summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2020-05-01 12:52:01 -0700
committerLinux Build Service Account <lnxbuild@localhost>2020-05-01 12:52:01 -0700
commitd733171bb1cfb674e48d8a2cc2f12f3145dc5466 (patch)
tree211b95daabd4fc5804f07562a9b283e82d495987
parent660498e6aa70fc30b4e8004ede00b62ea2f18a45 (diff)
parent20f73a0d9d368f4659047363ac2179778fd2f1b6 (diff)
downloaddata-kernel-d733171bb1cfb674e48d8a2cc2f12f3145dc5466.tar.gz
Merge 20f73a0d9d368f4659047363ac2179778fd2f1b6 on remote branch
Change-Id: Icf378eba710e2afc2534a453cff370197035f2dc
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c69
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c22
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h4
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.c10
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.h2
5 files changed, 75 insertions, 32 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index bb2f175..0f6ea40 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -38,6 +38,7 @@
#define WQ_DELAY 2000000
#define MIN_MS 5
+#define BACKLOG_CHECK 1
#define GET_QTAIL(SD, CPU) (per_cpu(SD, CPU).input_queue_tail)
#define GET_QHEAD(SD, CPU) (per_cpu(SD, CPU).input_queue_head)
@@ -97,7 +98,7 @@ module_param(rmnet_shs_fall_back_timer, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_fall_back_timer,
"Option to enable fall back limit for parking");
-unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1200;
+unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1100;
module_param(rmnet_shs_backlog_max_pkts, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_backlog_max_pkts,
"Max pkts in backlog prioritizing");
@@ -387,8 +388,49 @@ static void rmnet_shs_deliver_skb_wq(struct sk_buff *skb)
gro_cells_receive(&priv->gro_cells, skb);
}
+static struct sk_buff *rmnet_shs_skb_partial_segment(struct sk_buff *skb,
+ u16 segments_per_skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct sk_buff *segments, *tmp;
+ u16 gso_size = shinfo->gso_size;
+ u16 gso_segs = shinfo->gso_segs;
+
+ if (segments_per_skb >= gso_segs) {
+ return NULL;
+ }
+
+ /* Update the numbers for the main skb */
+ shinfo->gso_segs = DIV_ROUND_UP(gso_segs, segments_per_skb);
+ shinfo->gso_size = gso_size * segments_per_skb;
+ segments = __skb_gso_segment(skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segments))) {
+ /* return to the original state */
+ shinfo->gso_size = gso_size;
+ shinfo->gso_segs = gso_segs;
+ return NULL;
+ }
+
+ /* Mark correct number of segments and correct size in the new skbs */
+ for (tmp = segments; tmp; tmp = tmp->next) {
+ struct skb_shared_info *new_shinfo = skb_shinfo(tmp);
+
+ new_shinfo->gso_size = gso_size;
+ if (gso_segs >= segments_per_skb)
+ new_shinfo->gso_segs = segments_per_skb;
+ else
+ new_shinfo->gso_segs = gso_segs;
+
+ gso_segs -= segments_per_skb;
+ }
+
+ return segments;
+}
+
/* Delivers skbs after segmenting, directly to network stack */
-static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
+static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb,
+ u8 ctext,
+ u16 segs_per_skb)
{
struct sk_buff *skb = NULL;
struct sk_buff *nxt_skb = NULL;
@@ -398,8 +440,9 @@ static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
0x1, 0xDEF, 0xDEF, 0xDEF, in_skb, NULL);
- segs = __skb_gso_segment(in_skb, NETIF_F_SG, false);
- if (unlikely(IS_ERR_OR_NULL(segs))) {
+ segs = rmnet_shs_skb_partial_segment(in_skb, segs_per_skb);
+
+ if (segs == NULL) {
if (ctext == RMNET_RX_CTXT)
netif_receive_skb(in_skb);
else
@@ -408,7 +451,7 @@ static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
return;
}
- /* Send segmeneted skb */
+ /* Send segmented skb */
for ((skb = segs); skb != NULL; skb = nxt_skb) {
nxt_skb = skb->next;
@@ -925,7 +968,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
u32 skb_bytes_delivered = 0;
u32 hash2stamp = 0; /* the default value of skb->hash*/
u8 map = 0, maplen = 0;
- u8 segment_enable = 0;
+ u16 segs_per_skb = 0;
if (!node->skb_list.head)
return;
@@ -947,7 +990,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
node->skb_list.num_parked_bytes,
node, node->skb_list.head);
- segment_enable = node->hstats->segment_enable;
+ segs_per_skb = (u16) node->hstats->segs_per_skb;
for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {
@@ -959,8 +1002,9 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
skbs_delivered += 1;
skb_bytes_delivered += skb->len;
- if (segment_enable) {
- rmnet_shs_deliver_skb_segmented(skb, ctext);
+ if (segs_per_skb > 0) {
+ rmnet_shs_deliver_skb_segmented(skb, ctext,
+ segs_per_skb);
} else {
if (ctext == RMNET_RX_CTXT)
rmnet_shs_deliver_skb(skb);
@@ -1128,7 +1172,6 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
u32 total_cpu_gro_flushed = 0;
u32 total_node_gro_flushed = 0;
u8 is_flushed = 0;
- u8 cpu_segment = 0;
/* Record a qtail + pkts flushed or move if reqd
* currently only use qtail for non TCP flows
@@ -1142,7 +1185,6 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
cpu_tail = rmnet_shs_get_cpu_qtail(cpu_num);
- cpu_segment = 0;
total_cpu_gro_flushed = 0;
skb_seg_pending = 0;
list_for_each_safe(ptr, next,
@@ -1151,8 +1193,7 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
skb_seg_pending += n->skb_list.skb_load;
}
if (rmnet_shs_inst_rate_switch) {
- cpu_segment = rmnet_shs_cpu_node_tbl[cpu_num].seg;
- rmnet_shs_core_prio_check(cpu_num, cpu_segment,
+ rmnet_shs_core_prio_check(cpu_num, BACKLOG_CHECK,
skb_seg_pending);
}
@@ -1195,7 +1236,7 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_update_core_load(cpu_num,
total_cpu_gro_flushed);
- rmnet_shs_core_prio_check(cpu_num, cpu_segment, 0);
+ rmnet_shs_core_prio_check(cpu_num, BACKLOG_CHECK, 0);
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 53f5826..2678d8b 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -42,6 +42,8 @@ MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals
#define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)
+#define RMNET_SHS_SEGS_PER_SKB_DEFAULT (2)
+
unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
module_param(rmnet_shs_wq_interval_ms, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
@@ -411,7 +413,7 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
/* Start TCP flows with segmentation if userspace connected */
if (rmnet_shs_userspace_connected &&
node_p->hstats->skb_tport_proto == IPPROTO_TCP)
- node_p->hstats->segment_enable = 1;
+ node_p->hstats->segs_per_skb = RMNET_SHS_SEGS_PER_SKB_DEFAULT;
node_p->hstats->node = node_p;
node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
@@ -1291,7 +1293,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
}
/* Change flow segmentation, return 1 if set, 0 otherwise */
-int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb)
{
struct rmnet_shs_skbn_s *node_p;
struct rmnet_shs_wq_hstat_s *hstat_p;
@@ -1311,22 +1313,22 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
if (hstat_p->hash != hash_to_set)
continue;
- rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
- hash_to_set, seg_enable);
+ rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u",
+ hash_to_set, segs_per_skb);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
- hstat_p->hash, seg_enable,
+ hstat_p->hash, segs_per_skb,
0xDEF, 0xDEF, hstat_p, NULL);
- node_p->hstats->segment_enable = seg_enable;
+ node_p->hstats->segs_per_skb = segs_per_skb;
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
return 1;
}
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
- rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
- hash_to_set, seg_enable);
+ rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u not set - hash not found",
+ hash_to_set, segs_per_skb);
return 0;
}
@@ -1966,7 +1968,7 @@ void rmnet_shs_wq_filter(void)
continue;
}
- if (hnode->node->hstats->segment_enable) {
+ if (hnode->node->hstats->segs_per_skb > 0) {
rmnet_shs_cpu_node_tbl[cur_cpu].seg++;
}
}
@@ -2003,7 +2005,7 @@ void rmnet_shs_wq_update_stats(void)
}
} else {
/* Disable segmentation if userspace gets disconnected connected */
- hnode->node->hstats->segment_enable = 0;
+ hnode->node->hstats->segs_per_skb = 0;
}
}
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index aa0265c..50572d2 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -100,7 +100,7 @@ struct rmnet_shs_wq_hstat_s {
u8 in_use;
u8 is_perm;
u8 is_new_flow;
- u8 segment_enable; /* segment coalesces packets */
+ u8 segs_per_skb; /* segments per skb */
};
struct rmnet_shs_wq_cpu_rx_pkt_q_s {
@@ -288,7 +288,7 @@ void rmnet_shs_wq_refresh_new_flow_list(void);
int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
u32 sugg_type);
-int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable);
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb);
void rmnet_shs_wq_ep_lock_bh(void);
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
index 2dff48a..9d69a21 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -209,24 +209,24 @@ int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info
if (na) {
if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
rm_err("SHS_GNL: recv segmentation req "
- "hash_to_set = 0x%x segment_enable = %u",
+ "hash_to_set = 0x%x segs_per_skb = %u",
seg_info.hash_to_set,
- seg_info.segment_enable);
+ seg_info.segs_per_skb);
rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
- seg_info.segment_enable);
+ seg_info.segs_per_skb);
if (rc == 1) {
rmnet_shs_genl_send_int_to_userspace(info, 0);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
- seg_info.hash_to_set, seg_info.segment_enable,
+ seg_info.hash_to_set, seg_info.segs_per_skb,
0xDEF, 0xDEF, NULL, NULL);
} else {
rmnet_shs_genl_send_int_to_userspace(info, -1);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
- seg_info.hash_to_set, seg_info.segment_enable,
+ seg_info.hash_to_set, seg_info.segs_per_skb,
0xDEF, 0xDEF, NULL, NULL);
return 0;
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
index 9901d38..b9cccb9 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -55,7 +55,7 @@ struct rmnet_shs_wq_sugg_info {
struct rmnet_shs_wq_seg_info {
uint32_t hash_to_set;
- uint32_t segment_enable;
+ uint32_t segs_per_skb;
};
/* Function Prototypes */