summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>2021-08-17 20:21:55 -0700
committerSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>2021-08-18 09:59:34 -0700
commitef13a42ae2161ea914186c51721ffa6bb3709af7 (patch)
tree22fa30f3603ae08098588d90b8023dcb4a2e839d
parent6b6ee8b64535938375b5cb8d036bb96464307cd6 (diff)
downloaddata-kernel-ef13a42ae2161ea914186c51721ffa6bb3709af7.tar.gz
dfc: fix spinlock leak
In the DFC powersave work, the separate spin lock and unlock of multiple qos structures could be out of sync during SSR and results in spinlock leak after exiting the work. This change consolidated the spinlock operations to avoid multiple locking and unlocking, and fixed below issue: BUG: workqueue leaked lock or atomic: kworker/0:9/0x00000201/1361 last function: qmi_rmnet_check_stats_2.cfi_jt [rmnet_core] 1 lock held by kworker/0:9/1361: (&qos->qos_lock){....}-{2:2}, at: rmnet_lock_unlock_all_flows+0xa4/0xdc Change-Id: I10c1687a4f9993363dc631dee0b347faaa1067ab Acked-by: Weiyi Chen <weiyic@qti.qualcomm.com> Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
-rw-r--r--core/qmi_rmnet.c87
-rw-r--r--core/qmi_rmnet.h12
-rw-r--r--core/rmnet_config.c40
-rw-r--r--core/rmnet_qmi.h4
4 files changed, 31 insertions, 112 deletions
diff --git a/core/qmi_rmnet.c b/core/qmi_rmnet.c
index fa073fb..6fd652f 100644
--- a/core/qmi_rmnet.c
+++ b/core/qmi_rmnet.c
@@ -907,28 +907,10 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
/**
- * qmi_rmnet_lock_unlock_all_flows - lock or unlock all bearers
+ * rmnet_prepare_ps_bearers - get disabled bearers and
+ * reset enabled bearers
*/
-void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock)
-{
- struct qos_info *qos;
-
- qos = (struct qos_info *)rmnet_get_qos_pt(dev);
- if (!qos)
- return;
-
- if (lock)
- spin_lock_bh(&qos->qos_lock);
- else
- spin_unlock_bh(&qos->qos_lock);
-}
-EXPORT_SYMBOL(qmi_rmnet_lock_unlock_all_flows);
-
-/**
- * qmi_rmnet_get_disabled_flows - get disabled bearers
- * Needs to be called with qos_lock
- */
-void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
+void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
u8 *bearer_id)
{
struct qos_info *qos;
@@ -940,34 +922,9 @@ void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
if (!qos || !num_bearers)
return;
- num_bearers_left = *num_bearers;
-
- list_for_each_entry(bearer, &qos->bearer_head, list) {
- if (!bearer->grant_size && num_bearers_left) {
- if (bearer_id)
- bearer_id[current_num_bearers] =
- bearer->bearer_id;
- current_num_bearers++;
- num_bearers_left--;
- }
- }
-
- *num_bearers = current_num_bearers;
-}
-EXPORT_SYMBOL(qmi_rmnet_get_disabled_flows);
-
-/**
- * qmi_rmnet_reset_enabled_flows - reset enabled bearers for powersave
- * Needs to be called with qos_lock
- */
-void qmi_rmnet_reset_enabled_flows(struct net_device *dev)
-{
- struct qos_info *qos;
- struct rmnet_bearer_map *bearer;
+ spin_lock_bh(&qos->qos_lock);
- qos = (struct qos_info *)rmnet_get_qos_pt(dev);
- if (!qos)
- return;
+ num_bearers_left = *num_bearers;
list_for_each_entry(bearer, &qos->bearer_head, list) {
if (bearer->grant_size) {
@@ -980,10 +937,22 @@ void qmi_rmnet_reset_enabled_flows(struct net_device *dev)
bearer->grant_size = DEFAULT_GRANT;
bearer->grant_thresh =
qmi_rmnet_grant_per(DEFAULT_GRANT);
+ } else if (num_bearers_left) {
+ if (bearer_id)
+ bearer_id[current_num_bearers] =
+ bearer->bearer_id;
+ current_num_bearers++;
+ num_bearers_left--;
+ } else {
+ pr_err("DFC: no bearer space\n");
}
}
+
+ *num_bearers = current_num_bearers;
+
+ spin_unlock_bh(&qos->qos_lock);
}
-EXPORT_SYMBOL(qmi_rmnet_reset_enabled_flows);
+EXPORT_SYMBOL(qmi_rmnet_prepare_ps_bearers);
#ifdef CONFIG_QTI_QMI_DFC
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
@@ -1438,25 +1407,19 @@ static void qmi_rmnet_check_stats_2(struct work_struct *work)
}
if (!rxd && !txd) {
- rmnet_lock_unlock_all_flows(real_work->port, true);
+ qmi->ps_ignore_grant = true;
+ qmi->ps_enabled = true;
+ clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+
+ smp_mb();
num_bearers = sizeof(ps_bearer_id);
memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
- rmnet_get_disabled_flows(real_work->port, &num_bearers,
+ rmnet_prepare_ps_bearers(real_work->port, &num_bearers,
ps_bearer_id);
/* Enter powersave */
- if (dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id)) {
- rmnet_lock_unlock_all_flows(real_work->port, false);
- goto end;
- }
-
- rmnet_reset_enabled_flows(real_work->port);
- qmi->ps_ignore_grant = true;
- qmi->ps_enabled = true;
- clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
-
- rmnet_lock_unlock_all_flows(real_work->port, false);
+ dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);
if (rmnet_get_powersave_notif(real_work->port))
qmi_rmnet_ps_on_notify(real_work->port);
diff --git a/core/qmi_rmnet.h b/core/qmi_rmnet.h
index cee7dca..3833011 100644
--- a/core/qmi_rmnet.h
+++ b/core/qmi_rmnet.h
@@ -33,10 +33,8 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len);
void qmi_rmnet_enable_all_flows(struct net_device *dev);
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
-void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock);
-void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
+void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
u8 *bearer_id);
-void qmi_rmnet_reset_enabled_flows(struct net_device *dev);
#else
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
{
@@ -60,19 +58,13 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
return true;
}
-static inline void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev,
- bool lock)
-{
-}
-
-static inline void qmi_rmnet_get_disabled_flows(struct net_device *dev,
+static inline void qmi_rmnet_prepare_ps_bearers(struct net_device *dev,
u8 *num_bearers, u8 *bearer_id)
{
if (num_bearers)
*num_bearers = 0;
}
-static void qmi_rmnet_reset_enabled_flows(struct net_device *dev);
#endif
#ifdef CONFIG_QTI_QMI_DFC
diff --git a/core/rmnet_config.c b/core/rmnet_config.c
index 7a087e8..25e866e 100644
--- a/core/rmnet_config.c
+++ b/core/rmnet_config.c
@@ -755,24 +755,7 @@ out:
}
EXPORT_SYMBOL(rmnet_all_flows_enabled);
-void rmnet_lock_unlock_all_flows(void *port, bool lock)
-{
- struct rmnet_endpoint *ep;
- unsigned long bkt;
-
- if (unlikely(!port))
- return;
-
- rcu_read_lock();
- hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
- bkt, ep, hlnode) {
- qmi_rmnet_lock_unlock_all_flows(ep->egress_dev, lock);
- }
- rcu_read_unlock();
-}
-EXPORT_SYMBOL(rmnet_lock_unlock_all_flows);
-
-void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
+void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id)
{
struct rmnet_endpoint *ep;
unsigned long bkt;
@@ -789,7 +772,7 @@ void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
bkt, ep, hlnode) {
num_bearers_in_out = number_bearers_left;
- qmi_rmnet_get_disabled_flows(ep->egress_dev,
+ qmi_rmnet_prepare_ps_bearers(ep->egress_dev,
&num_bearers_in_out,
bearer_id ? bearer_id +
current_num_bearers : NULL);
@@ -800,24 +783,7 @@ void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
*num_bearers = current_num_bearers;
}
-EXPORT_SYMBOL(rmnet_get_disabled_flows);
-
-void rmnet_reset_enabled_flows(void *port)
-{
- struct rmnet_endpoint *ep;
- unsigned long bkt;
-
- if (unlikely(!port))
- return;
-
- rcu_read_lock();
- hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
- bkt, ep, hlnode) {
- qmi_rmnet_reset_enabled_flows(ep->egress_dev);
- }
- rcu_read_unlock();
-}
-EXPORT_SYMBOL(rmnet_reset_enabled_flows);
+EXPORT_SYMBOL(rmnet_prepare_ps_bearers);
int rmnet_get_powersave_notif(void *port)
{
diff --git a/core/rmnet_qmi.h b/core/rmnet_qmi.h
index 1980f16..9a9f5b7 100644
--- a/core/rmnet_qmi.h
+++ b/core/rmnet_qmi.h
@@ -35,9 +35,7 @@ void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
int rmnet_get_powersave_notif(void *port);
struct net_device *rmnet_get_real_dev(void *port);
int rmnet_get_dlmarker_info(void *port);
-void rmnet_lock_unlock_all_flows(void *port, bool lock);
-void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id);
-void rmnet_reset_enabled_flows(void *port);
+void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id);
#else
static inline void *rmnet_get_qmi_pt(void *port)
{