diff options
author | lucaswei <lucaswei@google.com> | 2020-11-12 17:28:13 +0800 |
---|---|---|
committer | lucaswei <lucaswei@google.com> | 2020-11-12 17:28:13 +0800 |
commit | 50165f827a92079d03ba8c513ff0bdff816ef109 (patch) | |
tree | df394534e2ac45cc48e903374e5e6c5256a3049c | |
parent | 3cb19815a576493208cbb1b20f9e9820761661ad (diff) | |
parent | bf2d06debec83ae14c0321a3385c55fb1c5379c6 (diff) | |
download | data-kernel-50165f827a92079d03ba8c513ff0bdff816ef109.tar.gz |
Merge LA.UM.9.12.R2.10.00.00.685.039 via branch 'qcom-msm-4.19-7250' into android-msm-pixel-4.19android-s-preview-3_r0.5android-s-beta-2_r0.5android-s-beta-1_r0.5android-msm-redbull-4.19-s-preview-3android-msm-redbull-4.19-s-beta-2android-msm-redbull-4.19-s-beta-1
Bug: 172988823
Signed-off-by: lucaswei <lucaswei@google.com>
Change-Id: I61f8e65251aabbcd6ee7ed5cdfcdc8ffa861ae97
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_config.h | 1 | ||||
-rwxr-xr-x | drivers/rmnet/shs/rmnet_shs_main.c | 19 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq.c | 10 |
3 files changed, 22 insertions, 8 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h index 10b8f58..8d318c1 100644 --- a/drivers/rmnet/shs/rmnet_shs_config.h +++ b/drivers/rmnet/shs/rmnet_shs_config.h @@ -48,6 +48,7 @@ enum rmnet_shs_crit_err_e { RMNET_SHS_WQ_NL_SOCKET_ERR, RMNET_SHS_CPU_FLOWS_BNDS_ERR, RMNET_SHS_OUT_OF_MEM_ERR, + RMNET_SHS_UDP_SEGMENT, RMNET_SHS_CRIT_ERR_MAX }; diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c index dec38c4..c65298b 100755 --- a/drivers/rmnet/shs/rmnet_shs_main.c +++ b/drivers/rmnet/shs/rmnet_shs_main.c @@ -404,6 +404,7 @@ static struct sk_buff *rmnet_shs_skb_partial_segment(struct sk_buff *skb, struct sk_buff *segments, *tmp; u16 gso_size = shinfo->gso_size; u16 gso_segs = shinfo->gso_segs; + unsigned int gso_type = shinfo->gso_type; if (segments_per_skb >= gso_segs) { return NULL; @@ -420,17 +421,27 @@ static struct sk_buff *rmnet_shs_skb_partial_segment(struct sk_buff *skb, return NULL; } - /* Mark correct number of segments and correct size in the new skbs */ + /* No need to set gso info if single segments */ + if (segments_per_skb <= 1) + return segments; + + /* Mark correct number of segments, size, and type in the new skbs */ for (tmp = segments; tmp; tmp = tmp->next) { struct skb_shared_info *new_shinfo = skb_shinfo(tmp); + new_shinfo->gso_type = gso_type; new_shinfo->gso_size = gso_size; + if (gso_segs >= segments_per_skb) new_shinfo->gso_segs = segments_per_skb; else new_shinfo->gso_segs = gso_segs; gso_segs -= segments_per_skb; + + if (gso_segs <= 1) { + break; + } } return segments; @@ -1012,6 +1023,8 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext) skb_bytes_delivered += skb->len; if (segs_per_skb > 0) { + if (node->skb_tport_proto == IPPROTO_UDP) + rmnet_shs_crit_err[RMNET_SHS_UDP_SEGMENT]++; rmnet_shs_deliver_skb_segmented(skb, ctext, segs_per_skb); } else { @@ -1511,7 +1524,7 @@ int rmnet_shs_drop_backlog(struct sk_buff_head *list, int cpu) return 0; } - +/* This will run in process context, avoid disabling bh */ static int rmnet_shs_oom_notify(struct notifier_block *self, unsigned long emtpy, void *free) { @@ -1520,7 +1533,6 @@ static int rmnet_shs_oom_notify(struct notifier_block *self, struct sk_buff_head *process_q; struct sk_buff_head *input_q; - local_bh_disable(); for_each_possible_cpu(cpu) { process_q = &GET_PQUEUE(cpu); @@ -1541,7 +1553,6 @@ static int rmnet_shs_oom_notify(struct notifier_block *self, (*nfree)++; } } - local_bh_enable(); return 0; } diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c index 6b2a31e..07566a2 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq.c +++ b/drivers/rmnet/shs/rmnet_shs_wq.c @@ -296,6 +296,7 @@ void rmnet_shs_wq_hstat_reset_node(struct rmnet_shs_wq_hstat_s *hnode) hnode->hash = 0; hnode->suggested_cpu = 0; hnode->current_cpu = 0; + hnode->segs_per_skb = 0; hnode->skb_tport_proto = 0; hnode->stat_idx = -1; INIT_LIST_HEAD(&hnode->cpu_node_id); @@ -409,7 +410,8 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p) node_p->hstats->skb_tport_proto = node_p->skb_tport_proto; node_p->hstats->current_cpu = node_p->map_cpu; node_p->hstats->suggested_cpu = node_p->map_cpu; - + /* Set egmentation off by default */ + node_p->hstats->segs_per_skb = 0; /* Start TCP flows with segmentation if userspace connected */ if (rmnet_shs_userspace_connected && node_p->hstats->skb_tport_proto == IPPROTO_TCP) @@ -1939,7 +1941,7 @@ void rmnet_shs_update_cfg_mask(void) } } -void rmnet_shs_wq_filter(void) +noinline void rmnet_shs_wq_filter(void) { int cpu, cur_cpu; int temp; @@ -1964,11 +1966,11 @@ void rmnet_shs_wq_filter(void) rmnet_shs_cpu_rx_filter_flows[temp]++; } cur_cpu = hnode->current_cpu; - if (cur_cpu >= MAX_CPUS) { + if (cur_cpu >= MAX_CPUS || cur_cpu < 0) { continue; } - if (hnode->node->hstats->segs_per_skb > 0) { + if (hnode->segs_per_skb > 0) { rmnet_shs_cpu_node_tbl[cur_cpu].seg++; } } |