diff options
author | Robin Peng <robinpeng@google.com> | 2020-04-13 10:35:22 +0800 |
---|---|---|
committer | Robin Peng <robinpeng@google.com> | 2020-04-13 10:35:22 +0800 |
commit | 4f7299d806677593864d704ed539d149424db8e3 (patch) | |
tree | bb9ed1b4efeb0a91c26097f808c18694488f0eed | |
parent | f5bc64353286b8e37b66d706546d6b717d54b1c2 (diff) | |
parent | d558006995d1438c9c1296ba92ad81c89f7895b5 (diff) | |
download | data-kernel-4f7299d806677593864d704ed539d149424db8e3.tar.gz |
Merge branch 'LA.UM.9.12.R1.10.00.00.597.042' into qcom-msm-4.19-7250
Change-Id: I58f801239dc4d98f395c2ac4fa20e8d5263f3fb1
-rw-r--r-- | drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c | 100 | ||||
-rw-r--r-- | drivers/rmnet/perf/rmnet_perf_core.c | 14 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs.h | 5 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_config.h | 3 | ||||
-rwxr-xr-x | drivers/rmnet/shs/rmnet_shs_main.c | 182 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq.c | 85 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq.h | 9 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq_genl.c | 4 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq_genl.h | 3 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq_mem.c | 287 | ||||
-rw-r--r-- | drivers/rmnet/shs/rmnet_shs_wq_mem.h | 29 |
11 files changed, 520 insertions, 201 deletions
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c index 9ea09fa..32b6a57 100644 --- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c +++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c @@ -51,13 +51,10 @@ void *ipc_emac_log_ctxt; -#define MAC_ADDR_CFG_FPATH "/data/emac_config.ini" -static UCHAR dev_addr[ETH_ALEN] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7}; +static UCHAR dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7}; struct DWC_ETH_QOS_res_data dwc_eth_qos_res_data = {0, }; static struct msm_bus_scale_pdata *emac_bus_scale_vec = NULL; -UCHAR config_dev_addr[ETH_ALEN]; - ULONG dwc_eth_qos_base_addr; ULONG dwc_rgmii_io_csr_base_addr; struct DWC_ETH_QOS_prv_data *gDWC_ETH_QOS_prv_data; @@ -92,6 +89,38 @@ MODULE_PARM_DESC(phy_interrupt_en, struct ip_params pparams = {}; #ifdef DWC_ETH_QOS_BUILTIN +/*! + * \brief API to extract MAC Address from given string + * + * \param[in] pointer to MAC Address string + * + * \return None + */ +void DWC_ETH_QOS_extract_macid(char *mac_addr) +{ + char *input = NULL; + int i = 0; + UCHAR mac_id = 0; + + if (!mac_addr) + return; + + /* Extract MAC ID byte by byte */ + input = strsep(&mac_addr, ":"); + while(input != NULL && i < DWC_ETH_QOS_MAC_ADDR_LEN) { + sscanf(input, "%x", &mac_id); + pparams.mac_addr[i++] = mac_id; + input = strsep(&mac_addr, ":"); + } + if (!is_valid_ether_addr(pparams.mac_addr)) { + EMACERR("Invalid Mac address programmed: %s\n", mac_addr); + return; + } else + pparams.is_valid_mac_addr = true; + + return; +} + static int __init set_early_ethernet_ipv4(char *ipv4_addr_in) { int ret = 1; @@ -141,25 +170,17 @@ __setup("eipv6=", set_early_ethernet_ipv6); static int __init set_early_ethernet_mac(char* mac_addr) { int ret = 1; - bool valid_mac = false; - + char temp_mac_addr[DWC_ETH_QOS_MAC_ADDR_STR_LEN]; pparams.is_valid_mac_addr = false; + if(!mac_addr) return ret; - valid_mac = mac_pton(mac_addr, pparams.mac_addr); - if(!valid_mac) - goto fail; - - valid_mac = is_valid_ether_addr(pparams.mac_addr); - if (!valid_mac) - goto fail; + strlcpy(temp_mac_addr, mac_addr, sizeof(temp_mac_addr)); + EMACDBG("Early ethernet MAC address assigned: %s\n", temp_mac_addr); + temp_mac_addr[DWC_ETH_QOS_MAC_ADDR_STR_LEN-1] = '\0'; - pparams.is_valid_mac_addr = true; - return ret; - -fail: - EMACERR("Invalid Mac address programmed: %s\n", mac_addr); + DWC_ETH_QOS_extract_macid(temp_mac_addr); return ret; } __setup("ermac=", set_early_ethernet_mac); @@ -1595,47 +1616,6 @@ u32 l3mdev_fib_table1 (const struct net_device *dev) const struct l3mdev_ops l3mdev_op1 = {.l3mdev_fib_table = l3mdev_fib_table1}; -/*! - * \brief Parse the config file to obtain the MAC address - * - * \param[in] None - * - * \return None - * - */ - -static void DWC_ETH_QOS_read_mac_addr_from_config(void) -{ - int ret = -ENOENT; - void *data = NULL; - char *file_path = MAC_ADDR_CFG_FPATH; - loff_t size = 0; - loff_t max_size = 30; - - EMACDBG("Enter\n"); - - ret = kernel_read_file_from_path(file_path, &data, &size, - max_size, READING_POLICY); - - if (ret < 0) { - EMACINFO("unable to open file: %s (%d)\n", file_path, ret); - goto ret; - } - - if (!mac_pton(data, config_dev_addr) && !is_valid_ether_addr(config_dev_addr)) { - EMACERR("Invalid mac addr found in emac_config.ini\n"); - goto ret; - } - - EMACDBG("mac address read from config.ini successfully\n"); - ether_addr_copy(dev_addr, config_dev_addr); - -ret: - if (data) - vfree(data); - return; -} - static int DWC_ETH_QOS_configure_netdevice(struct platform_device *pdev) { struct DWC_ETH_QOS_prv_data *pdata = NULL; @@ -1661,8 +1641,6 @@ static int DWC_ETH_QOS_configure_netdevice(struct platform_device *pdev) if (pparams.is_valid_mac_addr == true) ether_addr_copy(dev_addr, pparams.mac_addr); - else - DWC_ETH_QOS_read_mac_addr_from_config(); dev->dev_addr[0] = dev_addr[0]; dev->dev_addr[1] = dev_addr[1]; diff --git a/drivers/rmnet/perf/rmnet_perf_core.c b/drivers/rmnet/perf/rmnet_perf_core.c index 4e8d4c6..4166c5d 100644 --- a/drivers/rmnet/perf/rmnet_perf_core.c +++ b/drivers/rmnet/perf/rmnet_perf_core.c @@ -498,10 +498,6 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_burst_marker_state *bm_state; - /* If handling deaggregation, we're already locked */ - if (!rmnet_perf_core_is_deag_mode()) - rmnet_perf_core_grab_lock(); - bm_state = perf->core_meta->bm_state; /* if we get two starts in a row, without an end, then we flush * and carry on @@ -520,9 +516,6 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_START_DL_MRK, bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); - - if (!rmnet_perf_core_is_deag_mode()) - rmnet_perf_core_release_lock(); } void rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl, @@ -536,10 +529,6 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl) struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_burst_marker_state *bm_state; - /* If handling deaggregation, we're already locked */ - if (!rmnet_perf_core_is_deag_mode()) - rmnet_perf_core_grab_lock(); - bm_state = perf->core_meta->bm_state; rmnet_perf_opt_flush_all_flow_nodes(); rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++; @@ -548,9 +537,6 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl) bm_state->expect_packets = 0; trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); - - if (!rmnet_perf_core_is_deag_mode()) - rmnet_perf_core_release_lock(); } int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb, diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h index b7bf773..99ca7e4 100644 --- a/drivers/rmnet/shs/rmnet_shs.h +++ b/drivers/rmnet/shs/rmnet_shs.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -156,6 +156,8 @@ enum rmnet_shs_switch_reason_e { RMNET_SHS_SWITCH_WQ_RATE, RMNET_SHS_OOO_PACKET_SWITCH, RMNET_SHS_OOO_PACKET_TOTAL, + RMNET_SHS_SWITCH_PACKET_BURST, + RMNET_SHS_SWITCH_CORE_BACKLOG, RMNET_SHS_SWITCH_MAX_REASON }; @@ -195,6 +197,7 @@ struct rmnet_shs_cpu_node_s { u32 qtail; u32 qdiff; u32 parkedlen; + u32 seg; u8 prio; u8 wqprio; }; diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h index dc385e4..e55f5f8 100644 --- a/drivers/rmnet/shs/rmnet_shs_config.h +++ b/drivers/rmnet/shs/rmnet_shs_config.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,6 +46,7 @@ enum rmnet_shs_crit_err_e { RMNET_SHS_WQ_INVALID_PTR_ERR, RMNET_SHS_WQ_NODE_MALLOC_ERR, RMNET_SHS_WQ_NL_SOCKET_ERR, + RMNET_SHS_CPU_FLOWS_BNDS_ERR, RMNET_SHS_CRIT_ERR_MAX }; diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c index 2accd29..bb2f175 100755 --- a/drivers/rmnet/shs/rmnet_shs_main.c +++ b/drivers/rmnet/shs/rmnet_shs_main.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -30,6 +30,8 @@ #define NS_IN_MS 1000000 #define LPWR_CLUSTER 0 #define PERF_CLUSTER 4 +#define DEF_CORE_WAIT 10 + #define PERF_CORES 4 #define INVALID_CPU -1 @@ -42,6 +44,8 @@ #define GET_CTIMER(CPU) rmnet_shs_cfg.core_flush[CPU].core_timer #define SKB_FLUSH 0 +#define INCREMENT 1 +#define DECREMENT 0 /* Local Definitions and Declarations */ DEFINE_SPINLOCK(rmnet_shs_ht_splock); DEFINE_HASHTABLE(RMNET_SHS_HT, RMNET_SHS_HT_SIZE); @@ -65,15 +69,15 @@ unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON]; module_param_array(rmnet_shs_flush_reason, ulong, 0, 0444); MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type"); -unsigned int rmnet_shs_byte_store_limit __read_mostly = 271800 * 8; +unsigned int rmnet_shs_byte_store_limit __read_mostly = 271800 * 80; module_param(rmnet_shs_byte_store_limit, uint, 0644); MODULE_PARM_DESC(rmnet_shs_byte_store_limit, "Maximum byte module will park"); -unsigned int rmnet_shs_pkts_store_limit __read_mostly = 2100; +unsigned int rmnet_shs_pkts_store_limit __read_mostly = 2100 * 8; module_param(rmnet_shs_pkts_store_limit, uint, 0644); MODULE_PARM_DESC(rmnet_shs_pkts_store_limit, "Maximum pkts module will park"); -unsigned int rmnet_shs_max_core_wait __read_mostly = 10; +unsigned int rmnet_shs_max_core_wait __read_mostly = 45; module_param(rmnet_shs_max_core_wait, uint, 0644); MODULE_PARM_DESC(rmnet_shs_max_core_wait, "Max wait module will wait during move to perf core in ms"); @@ -93,6 +97,11 @@ module_param(rmnet_shs_fall_back_timer, uint, 0644); MODULE_PARM_DESC(rmnet_shs_fall_back_timer, "Option to enable fall back limit for parking"); +unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1200; +module_param(rmnet_shs_backlog_max_pkts, uint, 0644); +MODULE_PARM_DESC(rmnet_shs_backlog_max_pkts, + "Max pkts in backlog prioritizing"); + unsigned int rmnet_shs_inst_rate_max_pkts __read_mostly = 2500; module_param(rmnet_shs_inst_rate_max_pkts, uint, 0644); MODULE_PARM_DESC(rmnet_shs_inst_rate_max_pkts, @@ -110,17 +119,29 @@ unsigned int rmnet_shs_cpu_max_qdiff[MAX_CPUS]; module_param_array(rmnet_shs_cpu_max_qdiff, uint, 0, 0644); MODULE_PARM_DESC(rmnet_shs_cpu_max_qdiff, "Max queue length seen of each core"); +unsigned int rmnet_shs_cpu_ooo_count[MAX_CPUS]; +module_param_array(rmnet_shs_cpu_ooo_count, uint, 0, 0644); +MODULE_PARM_DESC(rmnet_shs_cpu_ooo_count, "OOO count for each cpu"); + unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS]; module_param_array(rmnet_shs_cpu_max_coresum, uint, 0, 0644); MODULE_PARM_DESC(rmnet_shs_cpu_max_coresum, "Max coresum seen of each core"); +static void rmnet_shs_change_cpu_num_flows(u16 map_cpu, bool inc) +{ + if (map_cpu < MAX_CPUS) + (inc) ? cpu_num_flows[map_cpu]++: cpu_num_flows[map_cpu]--; + else + rmnet_shs_crit_err[RMNET_SHS_CPU_FLOWS_BNDS_ERR]++; +} + void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node) { SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_REMOVE, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); list_del_init(&node->node_id); - cpu_num_flows[node->map_cpu]--; + rmnet_shs_change_cpu_num_flows(node->map_cpu, DECREMENT); } @@ -131,7 +152,7 @@ void rmnet_shs_cpu_node_add(struct rmnet_shs_skbn_s *node, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); list_add(&node->node_id, hd); - cpu_num_flows[node->map_cpu]++; + rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT); } void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node, @@ -141,10 +162,17 @@ void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); list_move(&node->node_id, hd); - cpu_num_flows[node->map_cpu]++; - cpu_num_flows[oldcpu]--; + rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT); + rmnet_shs_change_cpu_num_flows((u16) oldcpu, DECREMENT); } +static void rmnet_shs_cpu_ooo(u8 cpu, int count) +{ + if (cpu < MAX_CPUS) + { + rmnet_shs_cpu_ooo_count[cpu]+=count; + } +} /* Evaluates the incoming transport protocol of the incoming skb. Determines * if the skb transport protocol will be supported by SHS module */ @@ -282,14 +310,6 @@ static void rmnet_shs_update_core_load(int cpu, int burst) } -static int rmnet_shs_is_core_loaded(int cpu) -{ - - return rmnet_shs_cfg.core_flush[cpu].coresum >= - rmnet_shs_inst_rate_max_pkts; - -} - /* We deliver packets to GRO module only for TCP traffic*/ static int rmnet_shs_check_skb_can_gro(struct sk_buff *skb) { @@ -423,18 +443,9 @@ int rmnet_shs_flow_num_perf_cores(struct rmnet_shs_skbn_s *node_p) return ret; } -int rmnet_shs_is_lpwr_cpu(u16 cpu) +inline int rmnet_shs_is_lpwr_cpu(u16 cpu) { - int ret = 1; - u32 big_cluster_mask = (1 << PERF_CLUSTER) - 1; - - if ((1 << cpu) >= big_cluster_mask) - ret = 0; - - SHS_TRACE_LOW(RMNET_SHS_CORE_CFG, - RMNET_SHS_CORE_CFG_CHK_LO_CPU, - ret, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); - return ret; + return !((1 << cpu) & PERF_MASK); } /* Forms a new hash from the incoming hash based on the number of cores @@ -665,6 +676,22 @@ u32 rmnet_shs_get_cpu_qdiff(u8 cpu_num) return ret; } + +static int rmnet_shs_is_core_loaded(int cpu, int backlog_check, int parked_pkts) +{ + int ret = 0; + + if (rmnet_shs_cfg.core_flush[cpu].coresum >= + rmnet_shs_inst_rate_max_pkts) { + ret = RMNET_SHS_SWITCH_PACKET_BURST; + } + if (backlog_check && ((rmnet_shs_get_cpu_qdiff(cpu) + parked_pkts) >= + rmnet_shs_backlog_max_pkts)) + ret = RMNET_SHS_SWITCH_CORE_BACKLOG; + + return ret; +} + /* Takes a snapshot of absolute value of the CPU Qhead and Qtail counts for * a given core. * @@ -784,6 +811,7 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush) rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL] += (node_qhead - cur_cpu_qhead); + rmnet_shs_cpu_ooo(cpu_num, node_qhead - cur_cpu_qhead); } /* Mark gold core as prio to prevent * flows from moving in wq @@ -866,6 +894,8 @@ void rmnet_shs_flush_core(u8 cpu_num) rmnet_shs_cfg.num_bytes_parked -= total_bytes_flush; rmnet_shs_cfg.num_pkts_parked -= total_pkts_flush; rmnet_shs_cpu_node_tbl[cpu_num].prio = 0; + /* Reset coresum in case of instant rate switch */ + rmnet_shs_cfg.core_flush[cpu_num].coresum = 0; rmnet_shs_cpu_node_tbl[cpu_num].parkedlen = 0; spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); local_bh_enable(); @@ -1043,6 +1073,35 @@ int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node, node, NULL); return ret_val; } + +/* Check if cpu_num should be marked as a priority core and take care of + * marking it as priority and configuring all the changes need for a core + * switch. + */ +static void rmnet_shs_core_prio_check(u8 cpu_num, u8 segmented, u32 parked_pkts) +{ + u32 wait = (!rmnet_shs_max_core_wait) ? 1 : rmnet_shs_max_core_wait; + int load_reason; + + if ((load_reason = rmnet_shs_is_core_loaded(cpu_num, segmented, parked_pkts)) && + rmnet_shs_is_lpwr_cpu(cpu_num) && + !rmnet_shs_cpu_node_tbl[cpu_num].prio) { + + + wait = (!segmented)? DEF_CORE_WAIT: wait; + rmnet_shs_cpu_node_tbl[cpu_num].prio = 1; + rmnet_shs_boost_cpus(); + if (hrtimer_active(&GET_CTIMER(cpu_num))) + hrtimer_cancel(&GET_CTIMER(cpu_num)); + + hrtimer_start(&GET_CTIMER(cpu_num), + ns_to_ktime(wait * NS_IN_MS), + HRTIMER_MODE_REL); + + rmnet_shs_switch_reason[load_reason]++; + } +} + /* Flushes all the packets that have been parked so far across all the flows * The order of flushing depends on the CPU<=>flow association * The flows associated with low power cores are flushed before flushing @@ -1063,13 +1122,13 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt) u32 cpu_tail; u32 num_pkts_flush = 0; u32 num_bytes_flush = 0; + u32 skb_seg_pending = 0; u32 total_pkts_flush = 0; u32 total_bytes_flush = 0; u32 total_cpu_gro_flushed = 0; u32 total_node_gro_flushed = 0; - u8 is_flushed = 0; - u32 wait = (!rmnet_shs_max_core_wait) ? 1 : rmnet_shs_max_core_wait; + u8 cpu_segment = 0; /* Record a qtail + pkts flushed or move if reqd * currently only use qtail for non TCP flows @@ -1083,10 +1142,22 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt) for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) { cpu_tail = rmnet_shs_get_cpu_qtail(cpu_num); - + cpu_segment = 0; total_cpu_gro_flushed = 0; + skb_seg_pending = 0; + list_for_each_safe(ptr, next, + &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) { + n = list_entry(ptr, struct rmnet_shs_skbn_s, node_id); + skb_seg_pending += n->skb_list.skb_load; + } + if (rmnet_shs_inst_rate_switch) { + cpu_segment = rmnet_shs_cpu_node_tbl[cpu_num].seg; + rmnet_shs_core_prio_check(cpu_num, cpu_segment, + skb_seg_pending); + } + list_for_each_safe(ptr, next, - &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) { + &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) { n = list_entry(ptr, struct rmnet_shs_skbn_s, node_id); if (n != NULL && n->skb_list.num_parked_skbs) { @@ -1111,31 +1182,21 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt) } } } + } /* If core is loaded set core flows as priority and * start a 10ms hard flush timer */ if (rmnet_shs_inst_rate_switch) { + /* Update cpu load with prev flush for check */ if (rmnet_shs_is_lpwr_cpu(cpu_num) && !rmnet_shs_cpu_node_tbl[cpu_num].prio) rmnet_shs_update_core_load(cpu_num, total_cpu_gro_flushed); - if (rmnet_shs_is_core_loaded(cpu_num) && - rmnet_shs_is_lpwr_cpu(cpu_num) && - !rmnet_shs_cpu_node_tbl[cpu_num].prio) { - - rmnet_shs_cpu_node_tbl[cpu_num].prio = 1; - rmnet_shs_boost_cpus(); - if (hrtimer_active(&GET_CTIMER(cpu_num))) - hrtimer_cancel(&GET_CTIMER(cpu_num)); + rmnet_shs_core_prio_check(cpu_num, cpu_segment, 0); - hrtimer_start(&GET_CTIMER(cpu_num), - ns_to_ktime(wait * NS_IN_MS), - HRTIMER_MODE_REL); - - } } if (rmnet_shs_cpu_node_tbl[cpu_num].parkedlen < 0) @@ -1178,6 +1239,21 @@ void rmnet_shs_flush_table(u8 flsh, u8 ctxt) spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags); rmnet_shs_flush_lock_table(flsh, ctxt); + if (ctxt == RMNET_WQ_CTXT) { + /* If packets remain restart the timer in case there are no + * more NET_RX flushes coming so pkts are no lost + */ + if (rmnet_shs_fall_back_timer && + rmnet_shs_cfg.num_bytes_parked && + rmnet_shs_cfg.num_pkts_parked){ + if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) + hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs); + hrtimer_start(&rmnet_shs_cfg.hrtimer_shs, + ns_to_ktime(rmnet_shs_timeout * NS_IN_MS), + HRTIMER_MODE_REL); + } + rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++; + } spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); @@ -1262,21 +1338,6 @@ static void rmnet_flush_buffered(struct work_struct *work) local_bh_disable(); rmnet_shs_flush_table(is_force_flush, RMNET_WQ_CTXT); - - /* If packets remain restart the timer in case there are no - * more NET_RX flushes coming so pkts are no lost - */ - if (rmnet_shs_fall_back_timer && - rmnet_shs_cfg.num_bytes_parked && - rmnet_shs_cfg.num_pkts_parked){ - if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) - hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs); - - hrtimer_start(&rmnet_shs_cfg.hrtimer_shs, - ns_to_ktime(rmnet_shs_timeout * NS_IN_MS), - HRTIMER_MODE_REL); - } - rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++; local_bh_enable(); } SHS_TRACE_HIGH(RMNET_SHS_FLUSH, @@ -1649,9 +1710,9 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port) break; } while (0); - spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); if (!is_shs_reqd) { + spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); rmnet_shs_crit_err[RMNET_SHS_MAIN_SHS_NOT_REQD]++; rmnet_shs_deliver_skb(skb); SHS_TRACE_ERR(RMNET_SHS_ASSIGN, @@ -1683,6 +1744,7 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port) RMNET_SHS_FORCE_FLUSH_TIME_NSEC, 0xDEF, 0xDEF, 0xDEF, skb, NULL); } + spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); if (rmnet_shs_cfg.num_pkts_parked > rmnet_shs_pkts_store_limit) { diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c index 5944f16..53f5826 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq.c +++ b/drivers/rmnet/shs/rmnet_shs_wq.c @@ -32,23 +32,19 @@ MODULE_LICENSE("GPL v2"); #define RMNET_SHS_FILTER_FLOW_RATE 100 #define PERIODIC_CLEAN 0 -/* FORCE_CLEAN should only used during module de-ini.*/ +/* FORCE_CLEAN should only used during module de-init.*/ #define FORCE_CLEAN 1 -/* Time to wait (in time ticks) before re-triggering the workqueue - * 1 tick = 10 ms (Maximum possible resolution) - * 100 ticks = 1 second - */ /* Local Definitions and Declarations */ unsigned int rmnet_shs_cpu_prio_dur __read_mostly = 3; module_param(rmnet_shs_cpu_prio_dur, uint, 0644); -MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration(ticks)"); +MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals)"); #define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur) -unsigned int rmnet_shs_wq_frequency __read_mostly = RMNET_SHS_WQ_DELAY_TICKS; -module_param(rmnet_shs_wq_frequency, uint, 0644); -MODULE_PARM_DESC(rmnet_shs_wq_frequency, "Priodicity of Wq trigger(in ticks)"); +unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS; +module_param(rmnet_shs_wq_interval_ms, uint, 0644); +MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)"); unsigned long rmnet_shs_max_flow_inactivity_sec __read_mostly = RMNET_SHS_MAX_SKB_INACTIVE_TSEC; @@ -91,7 +87,7 @@ module_param_array(rmnet_shs_cpu_rx_flows, uint, 0, 0444); MODULE_PARM_DESC(rmnet_shs_cpu_rx_flows, "Num flows processed per core"); unsigned int rmnet_shs_cpu_rx_filter_flows[MAX_CPUS]; -module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0644); +module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0444); MODULE_PARM_DESC(rmnet_shs_cpu_rx_filter_flows, "Num filtered flows per core"); unsigned long long rmnet_shs_cpu_rx_bytes[MAX_CPUS]; @@ -183,8 +179,7 @@ static struct rmnet_shs_wq_rx_flow_s rmnet_shs_rx_flow_tbl; static struct list_head rmnet_shs_wq_hstat_tbl = LIST_HEAD_INIT(rmnet_shs_wq_hstat_tbl); static int rmnet_shs_flow_dbg_stats_idx_cnt; -static struct list_head rmnet_shs_wq_ep_tbl = - LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl); +struct list_head rmnet_shs_wq_ep_tbl = LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl); /* Helper functions to add and remove entries to the table * that maintains a list of all endpoints (vnd's) available on this device. @@ -544,6 +539,17 @@ void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p) hstat_p->rps_config_msk = ep->rps_config_msk; hstat_p->def_core_msk = ep->default_core_msk; hstat_p->pri_core_msk = ep->pri_core_msk; + + /* Update ep tput stats while we're here */ + if (hstat_p->skb_tport_proto == IPPROTO_TCP) { + rm_err("SHS_UDP: adding TCP bps %lu to ep_total %lu ep name %s", + hstat_p->rx_bps, ep->tcp_rx_bps, node_p->dev->name); + ep->tcp_rx_bps += hstat_p->rx_bps; + } else if (hstat_p->skb_tport_proto == IPPROTO_UDP) { + rm_err("SHS_UDP: adding UDP rx_bps %lu to ep_total %lu ep name %s", + hstat_p->rx_bps, ep->udp_rx_bps, node_p->dev->name); + ep->udp_rx_bps += hstat_p->rx_bps; + } break; } } @@ -1240,6 +1246,7 @@ int rmnet_shs_wq_check_cpu_move_for_ep(u16 current_cpu, u16 dest_cpu, int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move, u32 sugg_type) { + unsigned long flags; struct rmnet_shs_wq_ep_s *ep; if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) { @@ -1251,6 +1258,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move, * on it if is online, rps mask, isolation, etc. then make * suggestion to change the cpu for the flow by passing its hash */ + spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; @@ -1272,9 +1280,13 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move, rm_err("SHS_FDESC: >> flow 0x%x was suggested to" " move from cpu[%d] to cpu[%d] sugg_type [%d]", hash_to_move, cur_cpu, dest_cpu, sugg_type); + + spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); return 1; } } + + spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); return 0; } @@ -1283,8 +1295,10 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable) { struct rmnet_shs_skbn_s *node_p; struct rmnet_shs_wq_hstat_s *hstat_p; + unsigned long ht_flags; u16 bkt; + spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags); hash_for_each(RMNET_SHS_HT, bkt, node_p, list) { if (!node_p) continue; @@ -1306,8 +1320,10 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable) 0xDEF, 0xDEF, hstat_p, NULL); node_p->hstats->segment_enable = seg_enable; + spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); return 1; } + spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags); rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found", hash_to_set, seg_enable); @@ -1452,6 +1468,7 @@ void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps, rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps); rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows); rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows); + rmnet_shs_wq_mem_update_cached_netdevs(); rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT); @@ -1614,12 +1631,14 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev) int cpu_assigned = -1; u8 is_match_found = 0; struct rmnet_shs_wq_ep_s *ep = NULL; + unsigned long flags; if (!dev) { rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++; return cpu_assigned; } + spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; @@ -1635,6 +1654,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev) if (!is_match_found) { rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++; + spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); return cpu_assigned; } @@ -1652,6 +1672,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev) /* Increment CPU assignment idx to be ready for next flow assignment*/ if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max)) ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max); + spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); return cpu_assigned; } @@ -1663,12 +1684,14 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev) u8 hi_idx; u8 hi_max; u8 is_match_found = 0; + unsigned long flags; if (!dev) { rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++; return cpu_assigned; } + spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; @@ -1684,6 +1707,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev) if (!is_match_found) { rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++; + spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); return cpu_assigned; } @@ -1700,6 +1724,7 @@ int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev) /* Increment CPU assignment idx to be ready for next flow assignment*/ if (cpu_assigned >= 0) ep->new_hi_idx = ((hi_idx + 1) % hi_max); + spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); return cpu_assigned; } @@ -1874,6 +1899,11 @@ void rmnet_shs_wq_refresh_ep_masks(void) if (!ep->is_ep_active) continue; rmnet_shs_wq_update_ep_rps_msk(ep); + + /* These tput totals get re-added as we go through each flow */ + ep->udp_rx_bps = 0; + ep->tcp_rx_bps = 0; + } } @@ -1909,14 +1939,18 @@ void rmnet_shs_update_cfg_mask(void) void rmnet_shs_wq_filter(void) { - int cpu; + int cpu, cur_cpu; int temp; struct rmnet_shs_wq_hstat_s *hnode = NULL; - for (cpu = 0; cpu < MAX_CPUS; cpu++) + for (cpu = 0; cpu < MAX_CPUS; cpu++) { rmnet_shs_cpu_rx_filter_flows[cpu] = 0; + rmnet_shs_cpu_node_tbl[cpu].seg = 0; + } - /* Filter out flows with low pkt count */ + /* Filter out flows with low pkt count and + * mark CPUS with slowstart flows + */ list_for_each_entry(hnode, &rmnet_shs_wq_hstat_tbl, hstat_node_id) { if (hnode->in_use == 0) @@ -1927,6 +1961,14 @@ void rmnet_shs_wq_filter(void) temp = hnode->current_cpu; rmnet_shs_cpu_rx_filter_flows[temp]++; } + cur_cpu = hnode->current_cpu; + if (cur_cpu >= MAX_CPUS) { + continue; + } + + if (hnode->node->hstats->segment_enable) { + rmnet_shs_cpu_node_tbl[cur_cpu].seg++; + } } } @@ -1981,15 +2023,13 @@ void rmnet_shs_wq_update_stats(void) } rmnet_shs_wq_refresh_new_flow_list(); - /*Invoke after both the locks are released*/ - rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN); - rmnet_shs_wq_debug_print_flows(); rmnet_shs_wq_filter(); } void rmnet_shs_wq_process_wq(struct work_struct *work) { unsigned long flags; + unsigned long jiffies; trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ, RMNET_SHS_WQ_PROCESS_WQ_START, @@ -1999,8 +2039,14 @@ void rmnet_shs_wq_process_wq(struct work_struct *work) rmnet_shs_wq_update_stats(); spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); + /*Invoke after both the locks are released*/ + rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN); + rmnet_shs_wq_debug_print_flows(); + + jiffies = msecs_to_jiffies(rmnet_shs_wq_interval_ms); + queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq, - rmnet_shs_wq_frequency); + jiffies); trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ, RMNET_SHS_WQ_PROCESS_WQ_END, @@ -2034,6 +2080,7 @@ void rmnet_shs_wq_exit(void) return; rmnet_shs_wq_mem_deinit(); + rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT); trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h index 0d86200..aa0265c 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq.h +++ b/drivers/rmnet/shs/rmnet_shs_wq.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -31,14 +31,18 @@ #define RMNET_SHS_NSEC_TO_SEC(x) ((x)/1000000000) #define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8) #define RMNET_SHS_MIN_HSTAT_NODES_REQD 16 -#define RMNET_SHS_WQ_DELAY_TICKS 10 +#define RMNET_SHS_WQ_INTERVAL_MS 100 extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly; extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly; +extern struct list_head rmnet_shs_wq_ep_tbl; + /* stores wq and end point details */ struct rmnet_shs_wq_ep_s { + u64 tcp_rx_bps; + u64 udp_rx_bps; struct list_head ep_list_id; struct net_device *ep; int new_lo_core[MAX_CPUS]; @@ -161,6 +165,7 @@ struct rmnet_shs_wq_cpu_cap_s { struct list_head cpu_cap_list; u64 pps_capacity; u64 avg_pps_capacity; + u64 bps; u16 cpu_num; }; diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c index b28f0c2..2dff48a 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq_genl.c +++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -349,6 +349,8 @@ int rmnet_shs_wq_genl_deinit(void) { int ret; + rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT); + ret = genl_unregister_family(&rmnet_shs_genl_family); if(ret != 0){ rm_err("SHS_GNL: unregister family failed: %i\n",ret); diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h index 333de48..9901d38 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq_genl.h +++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,6 +23,7 @@ #define RMNET_SHS_GENL_VERSION 1 #define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS" #define RMNET_SHS_SYNC_RESP_INT 828 /* Any number, sent after mem update */ +#define RMNET_SHS_SYNC_WQ_EXIT 42 extern int rmnet_shs_userspace_connected; diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c index 33abf80..062edb7 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq_mem.c +++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "rmnet_shs_wq_mem.h" #include <linux/proc_fs.h> +#include <linux/refcount.h> MODULE_LICENSE("GPL v2"); @@ -24,6 +25,7 @@ struct proc_dir_entry *shs_proc_dir; struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS]; struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS]; struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS]; +struct rmnet_shs_wq_netdev_usr_s rmnet_shs_wq_netdev_usr[RMNET_SHS_MAX_NETDEVS]; struct list_head gflows = LIST_HEAD_INIT(gflows); /* gold flows */ struct list_head ssflows = LIST_HEAD_INIT(ssflows); /* slow start flows */ @@ -32,6 +34,7 @@ struct list_head cpu_caps = LIST_HEAD_INIT(cpu_caps); /* capacities */ struct rmnet_shs_mmap_info *cap_shared; struct rmnet_shs_mmap_info *gflow_shared; struct rmnet_shs_mmap_info *ssflow_shared; +struct rmnet_shs_mmap_info *netdev_shared; /* Static Functions and Definitions */ static void rmnet_shs_vm_open(struct vm_area_struct *vma) @@ -121,6 +124,32 @@ static int rmnet_shs_vm_fault_ss_flows(struct vm_fault *vmf) return 0; } +static int rmnet_shs_vm_fault_netdev(struct vm_fault *vmf) +{ + struct page *page = NULL; + struct rmnet_shs_mmap_info *info; + + rmnet_shs_wq_ep_lock_bh(); + if (netdev_shared) { + info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data; + if (info->data) { + page = virt_to_page(info->data); + get_page(page); + vmf->page = page; + } else { + rmnet_shs_wq_ep_unlock_bh(); + return VM_FAULT_SIGSEGV; + } + } else { + rmnet_shs_wq_ep_unlock_bh(); + return VM_FAULT_SIGSEGV; + } + rmnet_shs_wq_ep_unlock_bh(); + + return 0; +} + + static const struct vm_operations_struct rmnet_shs_vm_ops_caps = { .close = rmnet_shs_vm_close, .open = rmnet_shs_vm_open, @@ -139,6 +168,12 @@ static const struct vm_operations_struct rmnet_shs_vm_ops_ss_flows = { .fault = rmnet_shs_vm_fault_ss_flows, }; +static const struct vm_operations_struct rmnet_shs_vm_ops_netdev = { + .close = rmnet_shs_vm_close, + .open = rmnet_shs_vm_open, + .fault = rmnet_shs_vm_fault_netdev, +}; + static int rmnet_shs_mmap_caps(struct file *filp, struct vm_area_struct *vma) { vma->vm_ops = &rmnet_shs_vm_ops_caps; @@ -166,6 +201,15 @@ static int rmnet_shs_mmap_ss_flows(struct file *filp, struct vm_area_struct *vma return 0; } +static int rmnet_shs_mmap_netdev(struct file *filp, struct vm_area_struct *vma) +{ + vma->vm_ops = &rmnet_shs_vm_ops_netdev; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = filp->private_data; + + return 0; +} + static int rmnet_shs_open_caps(struct inode *inode, struct file *filp) { struct rmnet_shs_mmap_info *info; @@ -185,9 +229,12 @@ static int rmnet_shs_open_caps(struct inode *inode, struct file *filp) } cap_shared = info; + refcount_set(&cap_shared->refcnt, 1); rm_err("SHS_MEM: virt_to_phys = 0x%llx cap_shared = 0x%llx\n", (unsigned long long)virt_to_phys((void *)info), (unsigned long long)virt_to_phys((void *)cap_shared)); + } else { + refcount_inc(&cap_shared->refcnt); } filp->private_data = cap_shared; @@ -222,10 +269,14 @@ static int rmnet_shs_open_g_flows(struct inode *inode, struct file *filp) } gflow_shared = info; + refcount_set(&gflow_shared->refcnt, 1); rm_err("SHS_MEM: virt_to_phys = 0x%llx gflow_shared = 0x%llx\n", (unsigned long long)virt_to_phys((void *)info), (unsigned long long)virt_to_phys((void *)gflow_shared)); + } else { + refcount_inc(&gflow_shared->refcnt); } + filp->private_data = gflow_shared; rmnet_shs_wq_ep_unlock_bh(); @@ -256,10 +307,14 @@ static int rmnet_shs_open_ss_flows(struct inode *inode, struct file *filp) } ssflow_shared = info; + refcount_set(&ssflow_shared->refcnt, 1); rm_err("SHS_MEM: virt_to_phys = 0x%llx ssflow_shared = 0x%llx\n", (unsigned long long)virt_to_phys((void *)info), (unsigned long long)virt_to_phys((void *)ssflow_shared)); + } else { + refcount_inc(&ssflow_shared->refcnt); } + filp->private_data = ssflow_shared; rmnet_shs_wq_ep_unlock_bh(); @@ -271,40 +326,59 @@ fail: return -ENOMEM; } -static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off) +static int rmnet_shs_open_netdev(struct inode *inode, struct file *filp) { struct rmnet_shs_mmap_info *info; - int ret = 0; - rm_err("%s", "SHS_MEM: rmnet_shs_read - entry\n"); + rm_err("%s", "SHS_MEM: rmnet_shs_open netdev - entry\n"); rmnet_shs_wq_ep_lock_bh(); - info = filp->private_data; - ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE); - if (copy_to_user(buf, info->data, ret)) - ret = -EFAULT; - rmnet_shs_wq_ep_unlock_bh(); + if (!netdev_shared) { + info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC); + if (!info) + goto fail; - return ret; -} + info->data = (char *)get_zeroed_page(GFP_ATOMIC); + if (!info->data) { + kfree(info); + goto fail; + } -static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off) -{ - struct rmnet_shs_mmap_info *info; - int ret; + netdev_shared = info; + refcount_set(&netdev_shared->refcnt, 1); + rm_err("SHS_MEM: virt_to_phys = 0x%llx netdev_shared = 0x%llx\n", + (unsigned long long)virt_to_phys((void *)info), + (unsigned long long)virt_to_phys((void *)netdev_shared)); + } else { + refcount_inc(&netdev_shared->refcnt); + } - rm_err("%s", "SHS_MEM: rmnet_shs_write - entry\n"); + filp->private_data = netdev_shared; + rmnet_shs_wq_ep_unlock_bh(); - rmnet_shs_wq_ep_lock_bh(); - info = filp->private_data; - ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE); - if (copy_from_user(info->data, buf, ret)) - ret = -EFAULT; - else - ret = len; + return 0; + +fail: rmnet_shs_wq_ep_unlock_bh(); + return -ENOMEM; +} + +static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off) +{ + /* + * Decline to expose file value and simply return benign value + */ + return RMNET_SHS_READ_VAL; +} - return ret; +static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off) +{ + /* + * Returning zero here would result in echo commands hanging + * Instead return len and simply decline to allow echo'd values to + * take effect + */ + return len; } static int rmnet_shs_release_caps(struct inode *inode, struct file *filp) @@ -316,10 +390,14 @@ static int rmnet_shs_release_caps(struct inode *inode, struct file *filp) rmnet_shs_wq_ep_lock_bh(); if (cap_shared) { info = filp->private_data; - free_page((unsigned long)info->data); - kfree(info); - cap_shared = NULL; - filp->private_data = NULL; + if (refcount_read(&info->refcnt) <= 1) { + free_page((unsigned long)info->data); + kfree(info); + cap_shared = NULL; + filp->private_data = NULL; + } else { + refcount_dec(&info->refcnt); + } } rmnet_shs_wq_ep_unlock_bh(); @@ -335,10 +413,14 @@ static int rmnet_shs_release_g_flows(struct inode *inode, struct file *filp) rmnet_shs_wq_ep_lock_bh(); if (gflow_shared) { info = filp->private_data; - free_page((unsigned long)info->data); - kfree(info); - gflow_shared = NULL; - filp->private_data = NULL; + if (refcount_read(&info->refcnt) <= 1) { + free_page((unsigned long)info->data); + kfree(info); + gflow_shared = NULL; + filp->private_data = NULL; + } else { + refcount_dec(&info->refcnt); + } } rmnet_shs_wq_ep_unlock_bh(); @@ -354,10 +436,37 @@ static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp) rmnet_shs_wq_ep_lock_bh(); if (ssflow_shared) { info = filp->private_data; - free_page((unsigned long)info->data); - kfree(info); - ssflow_shared = NULL; - filp->private_data = NULL; + if (refcount_read(&info->refcnt) <= 1) { + free_page((unsigned long)info->data); + kfree(info); + ssflow_shared = NULL; + filp->private_data = NULL; + } else { + refcount_dec(&info->refcnt); + } + } + rmnet_shs_wq_ep_unlock_bh(); + + return 0; +} + +static int rmnet_shs_release_netdev(struct inode *inode, struct file *filp) +{ + struct rmnet_shs_mmap_info *info; + + rm_err("%s", "SHS_MEM: rmnet_shs_release netdev - entry\n"); + + rmnet_shs_wq_ep_lock_bh(); + if (netdev_shared) { + info = filp->private_data; + if (refcount_read(&info->refcnt) <= 1) { + free_page((unsigned long)info->data); + kfree(info); + netdev_shared = NULL; + filp->private_data = NULL; + } else { + refcount_dec(&info->refcnt); + } } rmnet_shs_wq_ep_unlock_bh(); @@ -391,6 +500,14 @@ static const struct file_operations rmnet_shs_ss_flows_fops = { .write = rmnet_shs_write, }; +static const struct file_operations rmnet_shs_netdev_fops = { + .owner = THIS_MODULE, + .mmap = rmnet_shs_mmap_netdev, + .open = rmnet_shs_open_netdev, + .release = rmnet_shs_release_netdev, + .read = rmnet_shs_read, + .write = rmnet_shs_write, +}; /* Global Functions */ /* Add a flow to the slow start flow list */ @@ -522,6 +639,7 @@ void rmnet_shs_wq_cpu_caps_list_add( if (flows <= 0) { cap_node->pps_capacity = pps_uthresh; cap_node->avg_pps_capacity = pps_uthresh; + cap_node->bps = 0; list_add(&cap_node->cpu_cap_list, cpu_caps); return; } @@ -542,6 +660,8 @@ void rmnet_shs_wq_cpu_caps_list_add( cap_node->avg_pps_capacity = 0; } + cap_node->bps = cpu_node->rx_bps; + list_add(&cap_node->cpu_cap_list, cpu_caps); } @@ -593,12 +713,13 @@ void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps) break; rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | " - "avg pps cap = %llu", + "avg pps cap = %llu bps = %llu", cap_node->cpu_num, cap_node->pps_capacity, - cap_node->avg_pps_capacity); + cap_node->avg_pps_capacity, cap_node->bps); rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity; rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity; + rmnet_shs_wq_cap_list_usr[idx].bps = cap_node->bps; rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num; idx += 1; } @@ -740,13 +861,97 @@ void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows) rm_err("SHS_SLOW: num ss flows = %u\n", idx); /* Copy num ss flows into first 2 bytes, - then copy in the cached gold flow array */ + then copy in the cached ss flow array */ memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx)); memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)), (void *) &rmnet_shs_wq_ssflows_usr[0], sizeof(rmnet_shs_wq_ssflows_usr)); } + +/* Extract info required from the rmnet_port array then memcpy to shared mem. + * > Add number of active netdevices/endpoints at the start. + * > After memcpy is complete, send userspace a message indicating that memcpy + * has just completed. + * > The netdev is formated like this: + * | num_netdevs | data_format | {rmnet_data0,ip_miss,rx_pkts} | ... | + * | 16 bits | 32 bits | | + */ +void rmnet_shs_wq_mem_update_cached_netdevs(void) +{ + struct rmnet_priv *priv; + struct rmnet_shs_wq_ep_s *ep = NULL; + u16 idx = 0; + u16 count = 0; + + rm_err("SHS_NETDEV: function enter %u\n", idx); + list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { + count += 1; + rm_err("SHS_NETDEV: function enter ep %u\n", count); + if (!ep) + continue; + + if (!ep->is_ep_active) { + rm_err("SHS_NETDEV: ep %u is NOT active\n", count); + continue; + } + + rm_err("SHS_NETDEV: ep %u is active and not null\n", count); + if (idx >= RMNET_SHS_MAX_NETDEVS) { + break; + } + + priv = netdev_priv(ep->ep); + if (!priv) { + rm_err("SHS_NETDEV: priv for ep %u is null\n", count); + continue; + } + + rm_err("SHS_NETDEV: ep %u has name = %s \n", count, + ep->ep->name); + rm_err("SHS_NETDEV: ep %u has mux_id = %u \n", count, + priv->mux_id); + rm_err("SHS_NETDEV: ep %u has ip_miss = %lu \n", count, + priv->stats.coal.close.ip_miss); + rm_err("SHS_NETDEV: ep %u has coal_rx_pkts = %lu \n", count, + priv->stats.coal.coal_pkts); + rm_err("SHS_NETDEV: ep %u has udp_rx_bps = %lu \n", count, + ep->udp_rx_bps); + rm_err("SHS_NETDEV: ep %u has tcp_rx_bps = %lu \n", count, + ep->tcp_rx_bps); + + /* Set netdev name and ip mismatch count */ + rmnet_shs_wq_netdev_usr[idx].coal_ip_miss = priv->stats.coal.close.ip_miss; + rmnet_shs_wq_netdev_usr[idx].hw_evict = priv->stats.coal.close.hw_evict; + rmnet_shs_wq_netdev_usr[idx].coal_tcp = priv->stats.coal.coal_tcp; + rmnet_shs_wq_netdev_usr[idx].coal_tcp_bytes = priv->stats.coal.coal_tcp_bytes; + rmnet_shs_wq_netdev_usr[idx].coal_udp = priv->stats.coal.coal_udp; + rmnet_shs_wq_netdev_usr[idx].coal_udp_bytes = priv->stats.coal.coal_udp_bytes; + rmnet_shs_wq_netdev_usr[idx].mux_id = priv->mux_id; + strlcpy(rmnet_shs_wq_netdev_usr[idx].name, + ep->ep->name, + sizeof(rmnet_shs_wq_netdev_usr[idx].name)); + + /* Set rx pkt from netdev stats */ + rmnet_shs_wq_netdev_usr[idx].coal_rx_pkts = priv->stats.coal.coal_pkts; + rmnet_shs_wq_netdev_usr[idx].tcp_rx_bps = ep->tcp_rx_bps; + rmnet_shs_wq_netdev_usr[idx].udp_rx_bps = ep->udp_rx_bps; + idx += 1; + } + + rm_err("SHS_MEM: netdev_shared = 0x%llx addr = 0x%pK\n", + (unsigned long long)virt_to_phys((void *)netdev_shared), netdev_shared); + if (!netdev_shared) { + rm_err("%s", "SHS_WRITE: netdev_shared is NULL"); + return; + } + + memcpy(((char *)netdev_shared->data), &idx, sizeof(idx)); + memcpy(((char *)netdev_shared->data + sizeof(uint16_t)), + (void *) &rmnet_shs_wq_netdev_usr[0], + sizeof(rmnet_shs_wq_netdev_usr)); +} + /* Creates the proc folder and files for shs shared memory */ void rmnet_shs_wq_mem_init(void) { @@ -755,11 +960,13 @@ void rmnet_shs_wq_mem_init(void) proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops); proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops); proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops); + proc_create(RMNET_SHS_PROC_NETDEV, 0644, shs_proc_dir, &rmnet_shs_netdev_fops); rmnet_shs_wq_ep_lock_bh(); cap_shared = NULL; gflow_shared = NULL; ssflow_shared = NULL; + netdev_shared = NULL; rmnet_shs_wq_ep_unlock_bh(); } @@ -769,11 +976,13 @@ void rmnet_shs_wq_mem_deinit(void) remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir); remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir); remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir); + remove_proc_entry(RMNET_SHS_PROC_NETDEV, shs_proc_dir); remove_proc_entry(RMNET_SHS_PROC_DIR, NULL); rmnet_shs_wq_ep_lock_bh(); cap_shared = NULL; gflow_shared = NULL; ssflow_shared = NULL; + netdev_shared = NULL; rmnet_shs_wq_ep_unlock_bh(); } diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h index 2e5e889..e955606 100644 --- a/drivers/rmnet/shs/rmnet_shs_wq_mem.h +++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,16 +23,23 @@ #define RMNET_SHS_PROC_CAPS "rmnet_shs_caps" #define RMNET_SHS_PROC_G_FLOWS "rmnet_shs_flows" #define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows" +#define RMNET_SHS_PROC_NETDEV "rmnet_shs_netdev" #define RMNET_SHS_MAX_USRFLOWS (128) +#define RMNET_SHS_MAX_NETDEVS (40) +#define RMNET_SHS_IFNAMSIZ (16) +#define RMNET_SHS_READ_VAL (0) +/* NOTE: Make sure these structs fit in one page */ +/* 26 bytes * 8 max cpus = 208 bytes < 4096 */ struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s { u64 pps_capacity; u64 avg_pps_capacity; - u64 bps_capacity; + u64 bps; u16 cpu_num; }; +/* 30 bytes * 128 max = 3840 bytes < 4096 */ struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s { u64 rx_pps; u64 avg_pps; @@ -41,6 +48,7 @@ struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s { u16 cpu_num; }; +/* 30 bytes * 128 max = 3840 bytes < 4096 */ struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s { u64 rx_pps; u64 avg_pps; @@ -49,6 +57,21 @@ struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s { u16 cpu_num; }; +/* 16 + 8*9 + 8 = 89 bytes, 89*40 netdev = 3560 bytes < 4096 */ +struct __attribute__((__packed__)) rmnet_shs_wq_netdev_usr_s { + char name[RMNET_SHS_IFNAMSIZ]; + u64 coal_ip_miss; + u64 hw_evict; + u64 coal_rx_pkts; + u64 coal_tcp; + u64 coal_tcp_bytes; + u64 coal_udp; + u64 coal_udp_bytes; + u64 udp_rx_bps; + u64 tcp_rx_bps; + u8 mux_id; +}; + extern struct list_head gflows; extern struct list_head ssflows; extern struct list_head cpu_caps; @@ -58,6 +81,7 @@ enum {RMNET_SHS_BUFFER_SIZE = 4096}; struct rmnet_shs_mmap_info { char *data; + refcount_t refcnt; }; /* Function Definitions */ @@ -81,6 +105,7 @@ void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps); void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows); void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows); +void rmnet_shs_wq_mem_update_cached_netdevs(void); void rmnet_shs_wq_mem_init(void); |