diff options
author | Jonathan Solnit <jsolnit@google.com> | 2017-07-27 09:33:05 -0700 |
---|---|---|
committer | Jonathan Solnit <jsolnit@google.com> | 2017-07-27 09:33:05 -0700 |
commit | 8ff24327ecca5d30e239b5f4158f9a46d03d84dd (patch) | |
tree | c38f0b9746dbd57b6dcb8d21b36babc266eaa107 | |
parent | 33ebe944be73d7eef354300266d42df693e33438 (diff) | |
parent | ba055940e942895a54bda19d1f4582a220aa701c (diff) | |
download | x86_64-android-x86_64-fugu-3.10-oreo-r6.tar.gz |
Merge branch 'android-x86_64-fugu-3.10-nyc-mr1' into android-x86_64-fugu-3.10android-8.0.0_r0.6android-x86_64-fugu-3.10-oreo-r6
September 2017.1
Bug: 63174165
Change-Id: Ie8d685edcbffbea72bcedcd02478b78017775b07
43 files changed, 825 insertions, 1052 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2fe6e767b3d6..d6d1988398fe 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2880,6 +2880,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. spia_pedr= spia_peddr= + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 2e06d56e987b..cf4ae6958240 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index ea331cfcdc95..f6acc1cbcd09 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707a62..efa59f1f8022 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 358820ba62bc..3fc4cc8b5b36 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 3e99c149271a..b2d248611390 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6777177807c2..7df7d5944188 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 1e9e320583c7..4591b8050d2e 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -119,7 +119,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index d2b59441ebdd..ce4937025e97 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -118,7 +118,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 650ccff8378c..c75eac7a2316 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -297,7 +297,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index e1aea7d76552..dcd247787f88 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -128,7 +128,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -167,7 +167,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index ae1aa71d0115..6adf3d963320 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -341,7 +341,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 5d3f7a119ed1..1ff0b92eeae7 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 0ca54421ce97..25d113a93c4b 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -303,7 +303,7 @@ err: * global one. Requires architecture specific get_dev_cma_area() helper * function. */ -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int align) { unsigned long mask, pfn, pageno, start = 0; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 81a79b739e97..f21bc46cd1d2 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1767,7 +1767,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user) if (r) goto out; - param->data_size = sizeof(*param); + param->data_size = offsetof(struct dm_ioctl, data); r = fn(param, input_param_size); if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c index 7bfc4e855279..235cbff23adb 100644 --- a/drivers/net/wireless/bcmdhd/bcmevent.c +++ b/drivers/net/wireless/bcmdhd/bcmevent.c @@ -155,7 +155,6 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), #ifdef GSCAN_SUPPORT { WLC_E_PFN_GSCAN_FULL_RESULT, "PFN_GSCAN_FULL_RESULT"}, - { WLC_E_PFN_SWC, "PFN_SIGNIFICANT_WIFI_CHANGE"} #endif /* GSCAN_SUPPORT */ #ifdef WLBSSLOAD_REPORT BCMEVENT_NAME(WLC_E_BSS_LOAD), diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c index 70a842a1e36d..9ffb24f1f1d3 100644 --- a/drivers/net/wireless/bcmdhd/dhd_common.c +++ b/drivers/net/wireless/bcmdhd/dhd_common.c @@ -1350,7 +1350,6 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, case WLC_E_PFN_SCAN_NONE: case WLC_E_PFN_SCAN_ALLGONE: case WLC_E_PFN_GSCAN_FULL_RESULT: - case WLC_E_PFN_SWC: DHD_EVENT(("PNOEVENT: %s\n", event_name)); break; diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c index 98aae6eb1715..9f2be37ea7c7 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux.c +++ b/drivers/net/wireless/bcmdhd/dhd_linux.c @@ -5753,7 +5753,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef GSCAN_SUPPORT setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT); setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE); - setbit(eventmask_msg->mask, WLC_E_PFN_SWC); #endif /* GSCAN_SUPPORT */ #ifdef BT_WIFI_HANDOVER setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ); @@ -7647,14 +7646,6 @@ int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_f return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); } -/* Linux wrapper to call common dhd_handle_swc_evt */ -void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - - return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes)); -} - /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ void * dhd_dev_hotlist_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes, hotlist_type_t type) @@ -7666,11 +7657,12 @@ void * dhd_dev_hotlist_scan_event(struct net_device *dev, /* Linux wrapper to call common dhd_process_full_gscan_result */ void * dhd_dev_process_full_gscan_result(struct net_device *dev, -const void *data, int *send_evt_bytes) +const void *data, uint32 len, int *send_evt_bytes) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes)); + return (dhd_process_full_gscan_result(&dhd->pub, data, len, + send_evt_bytes)); } void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c index 17656262837c..4ce02099d283 100644 --- a/drivers/net/wireless/bcmdhd/dhd_pno.c +++ b/drivers/net/wireless/bcmdhd/dhd_pno.c @@ -92,6 +92,12 @@ #define TIME_MIN_DIFF 5 static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state); + +#define EVENT_DATABUF_MAXLEN (512 - sizeof(bcm_event_t)) +#define EVENT_MAX_NETCNT \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_t)) \ + / sizeof(wl_pfn_net_info_t) + 1) + #ifdef GSCAN_SUPPORT static wl_pfn_gscan_channel_bucket_t * dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, @@ -943,34 +949,6 @@ exit: return err; } -#ifdef GSCAN_SUPPORT -static int -_dhd_pno_add_significant_bssid(dhd_pub_t *dhd, - wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid) -{ - int err = BCME_OK; - NULL_CHECK(dhd, "dhd is NULL", err); - - if (!nbssid) { - err = BCME_ERROR; - goto exit; - } - - NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err); - - err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", - (char *)p_pfn_significant_bssid, - sizeof(wl_pfn_significant_bssid_t) * nbssid, NULL, 0, - TRUE); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err)); - goto exit; - } -exit: - return err; -} -#endif /* GSCAN_SUPPORT */ - int dhd_pno_stop_for_ssid(dhd_pub_t *dhd) { @@ -1504,21 +1482,6 @@ static void dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params, _params->params_gscan.nbssid_hotlist = 0; DHD_PNO(("Flush Hotlist Config\n")); } - if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG) - { - dhd_pno_significant_bssid_t *iter, *next; - - if (_params->params_gscan.nbssid_significant_change > 0) { - list_for_each_entry_safe(iter, next, - &_params->params_gscan.significant_bssid_list, list) { - list_del(&iter->list); - kfree(iter); - } - } - _params->params_gscan.nbssid_significant_change = 0; - DHD_PNO(("Flush Significant Change Config\n")); - } - return; } @@ -1629,8 +1592,13 @@ void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, ptr->max_ap_cache_per_scan = 16; ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; ptr->max_scan_reporting_threshold = 100; - ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; - ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS; + ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_hotlist_ssids = 0; + ptr->max_significant_wifi_change_aps = 0; + ptr->max_bssid_history_entries = 0; + ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM; + ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID; + ptr->max_white_list_ssid = MAX_WHITELIST_SSID; ret = (void *)ptr; *len = sizeof(dhd_pno_gscan_capabilities_t); break; @@ -1743,10 +1711,10 @@ int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); if ((_params->params_gscan.nbssid_hotlist + - ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", - (_params->params_gscan.nbssid_hotlist + - ptr->nbssid))); + (_params->params_gscan.nbssid_hotlist + + ptr->nbssid))); err = BCME_RANGE; goto exit; } @@ -1772,61 +1740,6 @@ int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, _params->params_gscan.lost_ap_window = ptr->lost_ap_window; } break; - case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID: - { - gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf; - dhd_pno_significant_bssid_t *_pno_significant_change_bssid; - wl_pfn_significant_bssid_t *significant_bssid_ptr; - - if (flush) { - dhd_pno_reset_cfg_gscan(_params, _pno_state, - GSCAN_FLUSH_SIGNIFICANT_CFG); - } - - if (!ptr->nbssid) - break; - - if (!_params->params_gscan.nbssid_significant_change) - INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list); - - if ((_params->params_gscan.nbssid_significant_change + - ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { - DHD_ERROR(("Excessive number of SWC APs programmed %d\n", - (_params->params_gscan.nbssid_significant_change + - ptr->nbssid))); - err = BCME_RANGE; - goto exit; - } - - for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list; - i < ptr->nbssid; i++, significant_bssid_ptr++) { - _pno_significant_change_bssid = - kzalloc(sizeof(dhd_pno_significant_bssid_t), - GFP_KERNEL); - - if (!_pno_significant_change_bssid) { - DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes", - sizeof(dhd_pno_significant_bssid_t))); - err = BCME_NOMEM; - goto exit; - } - memcpy(&_pno_significant_change_bssid->BSSID, - &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN); - _pno_significant_change_bssid->rssi_low_threshold = - significant_bssid_ptr->rssi_low_threshold; - _pno_significant_change_bssid->rssi_high_threshold = - significant_bssid_ptr->rssi_high_threshold; - list_add_tail(&_pno_significant_change_bssid->list, - &_params->params_gscan.significant_bssid_list); - } - - _params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold; - _params->params_gscan.swc_rssi_window_size = ptr->rssi_window; - _params->params_gscan.lost_ap_window = ptr->lost_ap_window; - _params->params_gscan.nbssid_significant_change += ptr->nbssid; - - } - break; case DHD_PNO_SCAN_CFG_ID: { int i, k; @@ -1925,7 +1838,6 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL; wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; - wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL; wl_pfn_bssid_t *p_pfn_bssid = NULL; wlc_ssid_ext_t *pssid_list = NULL; dhd_pno_params_t *params_legacy; @@ -1996,7 +1908,8 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t); - pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) + MALLOCZ(dhd->osh, gscan_param_size); if (!pfn_gscan_cfg_t) { DHD_ERROR(("%s: failed to malloc memory of size %d\n", @@ -2011,16 +1924,6 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) else pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; - if (gscan_params->nbssid_significant_change) { - pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold; - pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size; - pfn_gscan_cfg_t->lost_ap_window = gscan_params->lost_ap_window; - } else { - pfn_gscan_cfg_t->swc_nbssid_threshold = 0; - pfn_gscan_cfg_t->swc_rssi_window_size = 0; - pfn_gscan_cfg_t->lost_ap_window = 0; - } - pfn_gscan_cfg_t->flags = (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; @@ -2053,38 +1956,6 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) __FUNCTION__, err)); goto exit; } - if (gscan_params->nbssid_significant_change) { - dhd_pno_significant_bssid_t *iter, *next; - - p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) * - gscan_params->nbssid_significant_change, GFP_KERNEL); - if (p_pfn_significant_bssid == NULL) { - DHD_ERROR(("%s : failed to allocate memory %zd\n", - __FUNCTION__, - sizeof(wl_pfn_significant_bssid_t) * - gscan_params->nbssid_significant_change)); - err = BCME_NOMEM; - goto exit; - } - i = 0; - /* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */ - list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) { - p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold; - p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold; - memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN); - i++; - } - - DHD_PNO(("nbssid_significant_change %d \n", - gscan_params->nbssid_significant_change)); - err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid, - gscan_params->nbssid_significant_change); - if (err < 0) { - DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n", - __FUNCTION__, err)); - goto exit; - } - } if (gscan_params->nbssid_hotlist) { struct dhd_pno_bssid *iter, *next; @@ -2134,7 +2005,6 @@ exit: } } kfree(pssid_list); - kfree(p_pfn_significant_bssid); kfree(p_pfn_bssid); if (pfn_gscan_cfg_t) MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size); @@ -3354,91 +3224,6 @@ int dhd_retreive_batch_scan_results(dhd_pub_t *dhd) return err; } -/* Handle Significant WiFi Change (SWC) event from FW - * Send event to HAL when all results arrive from FW - */ -void * dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes) -{ - void *ptr = NULL; - dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); - struct dhd_pno_gscan_params *gscan_params; - struct dhd_pno_swc_evt_param *params; - wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data; - wl_pfn_significant_net_t *change_array; - int i; - - gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); - params = &(gscan_params->param_significant); - - if (!results->total_count) { - *send_evt_bytes = 0; - return ptr; - } - - if (!params->results_rxed_so_far) { - if (!params->change_array) { - params->change_array = (wl_pfn_significant_net_t *) - kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count, - GFP_KERNEL); - - if (!params->change_array) { - DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__, - sizeof(wl_pfn_significant_net_t) * results->total_count)); - *send_evt_bytes = 0; - return ptr; - } - } else { - DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!")); - *send_evt_bytes = 0; - return ptr; - } - - } - - DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__, - results->pkt_count, results->total_count)); - - for (i = 0; i < results->pkt_count; i++) { - DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n", - results->list[i].BSSID.octet[0], - results->list[i].BSSID.octet[1], - results->list[i].BSSID.octet[2], - results->list[i].BSSID.octet[3], - results->list[i].BSSID.octet[4], - results->list[i].BSSID.octet[5])); - } - - change_array = ¶ms->change_array[params->results_rxed_so_far]; - if ((params->results_rxed_so_far + results->pkt_count) > - results->total_count) { - DHD_ERROR(("Error: Invalid data reset the counters!!\n")); - *send_evt_bytes = 0; - kfree(params->change_array); - params->change_array = NULL; - return ptr; - } - - memcpy(change_array, results->list, - sizeof(wl_pfn_significant_net_t) * results->pkt_count); - params->results_rxed_so_far += results->pkt_count; - - if (params->results_rxed_so_far == results->total_count) { - params->results_rxed_so_far = 0; - *send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count; - /* Pack up change buffer to send up and reset - * results_rxed_so_far, after its done. - */ - ptr = (void *) params->change_array; - /* expecting the callee to free this mem chunk */ - params->change_array = NULL; - } - else { - *send_evt_bytes = 0; - } - - return ptr; -} - void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) { dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); @@ -3467,7 +3252,8 @@ void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) } void * -dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, + int *size) { wl_bss_info_t *bi = NULL; wl_gscan_result_t *gscan_result; @@ -3476,15 +3262,25 @@ dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) uint8 channel; uint32 mem_needed; struct timespec ts; + u32 bi_ie_length = 0; + u32 bi_ie_offset = 0; *size = 0; - gscan_result = (wl_gscan_result_t *)data; - if (!gscan_result) { DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); goto exit; } + + if ((len < sizeof(*gscan_result)) || + (len < dtoh32(gscan_result->buflen)) || + (dtoh32(gscan_result->buflen) > + (sizeof(*gscan_result) + WL_SCAN_IE_LEN_MAX))) { + DHD_ERROR(("%s: invalid gscan buflen:%u\n", __func__, + dtoh32(gscan_result->buflen))); + goto exit; + } + if (!gscan_result->bss_info) { DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n")); goto exit; @@ -3496,9 +3292,18 @@ dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); goto exit; } + + bi_ie_offset = dtoh32(bi->ie_offset); + bi_ie_length = dtoh32(bi->ie_length); + if ((bi_ie_offset + bi_ie_length) > bi_length) { + DHD_ERROR(("%s: Invalid ie_length:%u or ie_offset:%u\n", + __func__, bi_ie_length, bi_ie_offset)); + goto exit; + } if (bi->SSID_len > DOT11_MAX_SSID_LEN) { - DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len)); - bi->SSID_len = DOT11_MAX_SSID_LEN; + DHD_ERROR(("%s: Invalid SSID length %u\n", + __func__, bi->SSID_len)); + goto exit; } mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length; @@ -3545,7 +3350,9 @@ void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, int *s gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); - if (!results->count) { + if ((results->count == 0) || (results->count > EVENT_MAX_NETCNT)) { + DHD_ERROR(("%s: wrong count:%d\n", __FUNCTION__, + results->count)); *send_evt_bytes = 0; return ptr; } diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h index b1357f9e4e11..d1aa4a788309 100644 --- a/drivers/net/wireless/bcmdhd/dhd_pno.h +++ b/drivers/net/wireless/bcmdhd/dhd_pno.h @@ -86,6 +86,7 @@ #define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF #define GSCAN_RETRY_THRESHOLD 3 +#define MAX_EPNO_SSID_NUM 64 #endif /* GSCAN_SUPPORT */ enum scan_status { @@ -301,13 +302,18 @@ typedef struct gscan_results_cache { } gscan_results_cache_t; typedef struct dhd_pno_gscan_capabilities { - int max_scan_cache_size; - int max_scan_buckets; - int max_ap_cache_per_scan; - int max_rssi_sample_size; - int max_scan_reporting_threshold; - int max_hotlist_aps; - int max_significant_wifi_change_aps; + int max_scan_cache_size; + int max_scan_buckets; + int max_ap_cache_per_scan; + int max_rssi_sample_size; + int max_scan_reporting_threshold; + int max_hotlist_bssids; + int max_hotlist_ssids; + int max_significant_wifi_change_aps; + int max_bssid_history_entries; + int max_epno_ssid_crc32; + int max_epno_hidden_ssid; + int max_white_list_ssid; } dhd_pno_gscan_capabilities_t; struct dhd_pno_gscan_params { @@ -357,26 +363,6 @@ typedef struct gscan_hotlist_scan_params { struct bssid_t bssid[1]; /* n bssids to follow */ } gscan_hotlist_scan_params_t; -/* SWC (Significant WiFi Change) params */ -typedef struct gscan_swc_params { - /* Rssi averaging window size */ - uint8 rssi_window; - /* Number of scans that the AP has to be absent before - * being declared LOST - */ - uint8 lost_ap_window; - /* if x Aps have a significant change generate an event. */ - uint8 swc_threshold; - uint8 nbssid; - wl_pfn_significant_bssid_t bssid_elem_list[1]; -} gscan_swc_params_t; - -typedef struct dhd_pno_significant_bssid { - struct ether_addr BSSID; - int8 rssi_low_threshold; - int8 rssi_high_threshold; - struct list_head list; -} dhd_pno_significant_bssid_t; #endif /* GSCAN_SUPPORT */ typedef union dhd_pno_params { struct dhd_pno_legacy_params params_legacy; @@ -438,13 +424,11 @@ void dhd_dev_pno_lock_access_batch_results(struct net_device *dev); void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); -extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data, - int *send_evt_bytes); int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes, hotlist_type_t type); void * dhd_dev_process_full_gscan_result(struct net_device *dev, - const void *data, int *send_evt_bytes); + const void *data, uint32 len, int *send_evt_bytes); extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); extern void dhd_dev_wait_batch_results_complete(struct net_device *dev); @@ -483,11 +467,11 @@ extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); -extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes); extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes, hotlist_type_t type); -extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, - int *send_evt_bytes); +extern void * +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, + uint32 len, int *send_evt_bytes); extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd); diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h index 156c3402d1a4..95a2ba4f247a 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h @@ -236,7 +236,7 @@ typedef union bcm_event_msg_u { #define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */ #define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */ #define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */ -#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */ +/* 135 was legacy entry for WLC_E_PFN_SWC can be reused */ #define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ #define WLC_E_RMC_EVENT 139 /* RMC event */ #define WLC_E_LAST 140 /* highest val + 1 for range checking */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h index 890abfec86d7..4c2646fe306e 100644 --- a/drivers/net/wireless/bcmdhd/include/wlioctl.h +++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h @@ -2544,6 +2544,9 @@ enum { #define PFN_SWC_MAX_NUM_APS 16 #define PFN_HOTLIST_MAX_NUM_APS 64 +#define MAX_EPNO_HIDDEN_SSID 8 +#define MAX_WHITELIST_SSID 2 + /* PFN network info structure */ typedef struct wl_pfn_subnet_info { struct ether_addr BSSID; @@ -2635,12 +2638,6 @@ typedef struct wl_pfn_bssid { uint16 flags; } wl_pfn_bssid_t; -typedef struct wl_pfn_significant_bssid { - struct ether_addr macaddr; - int8 rssi_low_threshold; - int8 rssi_high_threshold; -} wl_pfn_significant_bssid_t; - #define WL_PFN_SUPPRESSFOUND_MASK 0x08 #define WL_PFN_SUPPRESSLOST_MASK 0x10 #define WL_PFN_RSSI_MASK 0xff00 diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c index c0a97283980e..942a6d5ee4aa 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.c +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c @@ -9066,14 +9066,6 @@ wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, u32 len = ntoh32(e->datalen); switch (event) { - case WLC_E_PFN_SWC: - ptr = dhd_dev_swc_scan_event(ndev, data, &send_evt_bytes); - if (send_evt_bytes) { - wl_cfgvendor_send_async_event(wiphy, ndev, - GOOGLE_GSCAN_SIGNIFICANT_EVENT, ptr, send_evt_bytes); - kfree(ptr); - } - break; case WLC_E_PFN_BEST_BATCHING: err = dhd_dev_retrieve_batch_scan(ndev); if (err < 0) { @@ -9114,10 +9106,13 @@ wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, } break; case WLC_E_PFN_GSCAN_FULL_RESULT: - ptr = dhd_dev_process_full_gscan_result(ndev, data, &send_evt_bytes); + ptr = + dhd_dev_process_full_gscan_result(ndev, data, len, + &send_evt_bytes); if (ptr) { wl_cfgvendor_send_async_event(wiphy, ndev, - GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes); + GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, + send_evt_bytes); kfree(ptr); } break; @@ -9296,9 +9291,15 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, u32 event = ntoh32(e->event_type); u8 *mgmt_frame; u8 bsscfgidx = e->bsscfgidx; - u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t); + u32 mgmt_frame_len = ntoh32(e->datalen); u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK)); + if (mgmt_frame_len < sizeof(wl_event_rx_frame_data_t)) { + WL_ERR(("wrong datalen:%d\n", mgmt_frame_len)); + return -EINVAL; + } + mgmt_frame_len -= sizeof(wl_event_rx_frame_data_t); + memset(&bssid, 0, ETHER_ADDR_LEN); ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); @@ -9420,7 +9421,11 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, WL_DBG((" Event WLC_E_PROBREQ_MSG received\n")); mgmt_frame = (u8 *)(data); mgmt_frame_len = ntoh32(e->datalen); - + if (mgmt_frame_len < DOT11_MGMT_HDR_LEN) { + WL_ERR(("WLC_E_PROBREQ_MSG - wrong datalen:%d\n", + mgmt_frame_len)); + return -EINVAL; + } prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN; /* Parse prob_req IEs */ @@ -9658,7 +9663,6 @@ static void wl_init_event_handler(struct bcm_cfg80211 *cfg) cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event; cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event; cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event; - cfg->evt_handler[WLC_E_PFN_SWC] = wl_notify_gscan_event; cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event; cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event; #endif /* GSCAN_SUPPORT */ @@ -10059,6 +10063,13 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, WL_ERR(("Invalid escan result (NULL pointer)\n")); goto exit; } + if ((dtoh32(escan_result->buflen) > ESCAN_BUF_SIZE) || + (dtoh32(escan_result->buflen) < + sizeof(wl_escan_result_t))) { + WL_ERR(("Invalid escan buffer len:%d\n", + dtoh32(escan_result->buflen))); + goto exit; + } if (dtoh16(escan_result->bss_count) != 1) { WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); goto exit; diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.c b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c index 676eeafbc859..55094340b00d 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfgvendor.c +++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c @@ -846,105 +846,6 @@ static int wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy, return err; } -static int wl_cfgvendor_significant_change_cfg(struct wiphy *wiphy, - struct wireless_dev *wdev, const void *data, int len) -{ - int err = 0; - struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); - gscan_swc_params_t *significant_params; - int tmp, tmp1, tmp2, type, j = 0; - const struct nlattr *outer, *inner, *iter; - uint8 flush = 0; - wl_pfn_significant_bssid_t *pbssid; - uint16 num_bssid = 0; - uint16 max_buf_size = sizeof(gscan_swc_params_t) + - sizeof(wl_pfn_significant_bssid_t) * (PFN_SWC_MAX_NUM_APS - 1); - - significant_params = kzalloc(max_buf_size, GFP_KERNEL); - - if (!significant_params) { - WL_ERR(("Cannot Malloc mem size:%d\n", len)); - return BCME_NOMEM; - } - - - nla_for_each_attr(iter, data, len, tmp2) { - type = nla_type(iter); - - switch (type) { - case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH: - flush = nla_get_u8(iter); - break; - case GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE: - significant_params->rssi_window = nla_get_u16(iter); - break; - case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: - significant_params->lost_ap_window = nla_get_u16(iter); - break; - case GSCAN_ATTRIBUTE_MIN_BREACHING: - significant_params->swc_threshold = nla_get_u16(iter); - break; - case GSCAN_ATTRIBUTE_NUM_BSSID: - num_bssid = nla_get_u16(iter); - if (num_bssid > PFN_SWC_MAX_NUM_APS) { - WL_ERR(("ovar max SWC bssids:%d\n", - num_bssid)); - err = BCME_BADARG; - goto exit; - } - break; - case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS: - if (num_bssid == 0) { - WL_ERR(("num_bssid : 0\n")); - err = BCME_BADARG; - goto exit; - } - pbssid = significant_params->bssid_elem_list; - nla_for_each_nested(outer, iter, tmp) { - if (j >= num_bssid) { - j++; - break; - } - nla_for_each_nested(inner, outer, tmp1) { - switch (nla_type(inner)) { - case GSCAN_ATTRIBUTE_BSSID: - memcpy(&(pbssid[j].macaddr), - nla_data(inner), - ETHER_ADDR_LEN); - break; - case GSCAN_ATTRIBUTE_RSSI_HIGH: - pbssid[j].rssi_high_threshold = - (int8) nla_get_u8(inner); - break; - case GSCAN_ATTRIBUTE_RSSI_LOW: - pbssid[j].rssi_low_threshold = - (int8) nla_get_u8(inner); - break; - } - } - j++; - } - break; - } - } - if (j != num_bssid) { - WL_ERR(("swc bssids count:%d not matched to num_bssid:%d\n", - j, num_bssid)); - err = BCME_BADARG; - goto exit; - } - significant_params->nbssid = j; - - if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), - DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, significant_params, flush) < 0) { - WL_ERR(("Could not set GSCAN significant cfg\n")); - err = -EINVAL; - goto exit; - } -exit: - kfree(significant_params); - return err; -} #endif /* GSCAN_SUPPORT */ #ifdef RTT_SUPPORT @@ -1449,14 +1350,6 @@ static const struct wiphy_vendor_command wl_vendor_cmds [] = { { { .vendor_id = OUI_GOOGLE, - .subcmd = GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG - }, - .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, - .doit = wl_cfgvendor_significant_change_cfg - }, - { - { - .vendor_id = OUI_GOOGLE, .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 02809cceceb4..9d1d13aa51a1 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -486,7 +486,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) old_hdr->result = EIO; break; case DID_ERROR: - old_hdr->result = (srp->sense_b[0] == 0 && + old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: @@ -832,8 +832,10 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -ENXIO; if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) return -EFAULT; + mutex_lock(&sfp->parentdp->open_rel_lock); result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); + mutex_unlock(&sfp->parentdp->open_rel_lock); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, @@ -873,8 +875,10 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) sfp->low_dma = 1; if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { val = (int) sfp->reserve.bufflen; + mutex_lock(&sfp->parentdp->open_rel_lock); sg_remove_scat(&sfp->reserve); sg_build_reserve(sfp, val); + mutex_unlock(&sfp->parentdp->open_rel_lock); } } else { if (sdp->detached) @@ -942,15 +946,17 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) result = get_user(val, ip); if (result) return result; - if (val < 0) - return -EINVAL; + if (val < 0) + return -EINVAL; val = min_t(int, val, queue_max_sectors(sdp->device->request_queue) * 512); if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; + mutex_lock(&sfp->parentdp->open_rel_lock); sg_remove_scat(&sfp->reserve); sg_build_reserve(sfp, val); + mutex_unlock(&sfp->parentdp->open_rel_lock); } return 0; case SG_GET_RESERVED_SIZE: @@ -1003,8 +1009,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) if (srp) { rinfo[val].req_state = srp->done + 1; rinfo[val].problem = - srp->header.masked_status & - srp->header.host_status & + srp->header.masked_status & + srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = @@ -1025,7 +1031,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); - result = __copy_to_user(p, rinfo, + result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); @@ -1127,14 +1133,14 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon return -ENXIO; sdev = sdp->device; - if (sdev->host->hostt->compat_ioctl) { + if (sdev->host->hostt->compat_ioctl) { int ret; ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); return ret; } - + return -ENOIOCTLCMD; } #endif @@ -1594,7 +1600,7 @@ init_sg(void) else def_reserved_size = sg_big_buff; - rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), + rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; @@ -2234,7 +2240,7 @@ static const struct file_operations adio_fops = { }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); -static ssize_t sg_proc_write_dressz(struct file *filp, +static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations dressz_fops = { .owner = THIS_MODULE, @@ -2374,7 +2380,7 @@ static int sg_proc_single_open_adio(struct inode *inode, struct file *file) return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } -static ssize_t +static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { @@ -2395,7 +2401,7 @@ static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } -static ssize_t +static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { @@ -2552,7 +2558,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { - if (new_interface && + if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else @@ -2566,7 +2572,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) seq_printf(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; - seq_printf(s, srp->done ? + seq_printf(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", diff --git a/drivers/staging/imgtec/config_kernel.h b/drivers/staging/imgtec/config_kernel.h index 9381f4cc8020..5d327d79643d 100644 --- a/drivers/staging/imgtec/config_kernel.h +++ b/drivers/staging/imgtec/config_kernel.h @@ -82,11 +82,13 @@ #define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90 #define PVRSRV_ENABLE_MEMTRACK_STATS_FILE #define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 5120 -#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 10240 +#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 16384 +#define PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES #define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 1048576 -#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 -#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 -#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 +#define PVR_DIRTY_PAGECOUNT_FLUSH_THRESHOLD 256 +#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 +#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 #define SUPPORT_KERNEL_SRVINIT #define SUPPORT_NATIVE_FENCE_SYNC #define PVR_DRM_NAME "pvr" diff --git a/drivers/staging/imgtec/rogue/physmem_osmem_linux.c b/drivers/staging/imgtec/rogue/physmem_osmem_linux.c index 6231b443dd97..6b4a365b2832 100644 --- a/drivers/staging/imgtec/rogue/physmem_osmem_linux.c +++ b/drivers/staging/imgtec/rogue/physmem_osmem_linux.c @@ -90,6 +90,8 @@ static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM; #define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 static IMG_UINT32 g_uiMaxOrder = 0; #endif +/* Defines how many pages should be mapped at once to the kernel */ +#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */ /* Get/Set/Mask out alloc_page/dma_alloc flag */ #define DMA_GET_ADDR(x) (((x) >> 1) << 1) @@ -197,7 +199,11 @@ _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, IMG_BOOL bFlush, IMG_UINT32 ui32CPUCacheFlags); -/* A struct for our page pool holding an array of pages. +static inline unsigned int +_GetGFPFlags(IMG_BOOL bZero, + PVRSRV_DEVICE_NODE *psDevNode); + +/* A struct for our page pool holding an array of zeroed (!) pages. * We always put units of page arrays to the pool but are * able to take individual pages */ typedef struct @@ -207,10 +213,19 @@ typedef struct /* How many items are still in the page array */ IMG_UINT32 uiItemsRemaining; + /* Array of the actual pages */ struct page **ppsPageArray; } LinuxPagePoolEntry; +/* CleanupThread structure to put allocation in page pool */ +typedef struct +{ + PVRSRV_CLEANUP_THREAD_WORK sCleanupWork; + IMG_UINT32 ui32CPUCacheMode; + LinuxPagePoolEntry *psPoolEntry; +} LinuxCleanupData; + /* A struct for the unpinned items */ typedef struct { @@ -222,19 +237,36 @@ typedef struct static struct kmem_cache *g_psLinuxPagePoolCache = NULL; static struct kmem_cache *g_psLinuxPageArray = NULL; -/* Track what is live */ +/* Track what is live, all protected by pool lock. + * x86 needs two page pools because we have to change the memory attributes + * of the pages which is expensive due to an implicit flush. + * See set_pages_array_uc/wc/wb. */ static IMG_UINT32 g_ui32UnpinPageCount = 0; -static IMG_UINT32 g_ui32PagePoolEntryCount = 0; - -/* Pool entry limits */ +static IMG_UINT32 g_ui32PagePoolUCCount = 0; +#if defined(CONFIG_X86) +static IMG_UINT32 g_ui32PagePoolWCCount = 0; +#endif +/* Tracks asynchronous tasks currently accessing the page pool. + * It is incremented if a defer free task + * is created. Both will decrement the value when they finished the work. + * The atomic prevents piling up of deferred work in case the deferred thread + * cannot keep up with the applicaiton.*/ +static ATOMIC_T g_iPoolCleanTasks; +/* We don't want too many asynchronous threads trying to access the page pool + * at the same time */ +#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128 + +/* Defines how many pages the page cache should hold. */ #if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES) static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES; -static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent= PVR_LINUX_PHYSMEM_MAX_POOL_PAGES / 20; #else static const IMG_UINT32 g_ui32PagePoolMaxEntries = 0; -static const IMG_UINT32 g_ui32PagePoolMaxEntries_5Percent = 0; #endif +/* We double check if we would exceed this limit if we are below MAX_POOL_PAGES + and want to add an allocation to the pool. + This prevents big allocations being given back to the OS just because they + exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */ #if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES) static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES; #else @@ -242,16 +274,14 @@ static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = 0; #endif #if defined(CONFIG_X86) -#define PHYSMEM_OSMEM_NUM_OF_POOLS 3 +#define PHYSMEM_OSMEM_NUM_OF_POOLS 2 static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { - PVRSRV_MEMALLOCFLAG_CPU_CACHED, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE }; #else -#define PHYSMEM_OSMEM_NUM_OF_POOLS 2 +#define PHYSMEM_OSMEM_NUM_OF_POOLS 1 static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { - PVRSRV_MEMALLOCFLAG_CPU_CACHED, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED }; #endif @@ -260,11 +290,20 @@ static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { static DEFINE_MUTEX(g_sPagePoolMutex); /* List holding the page array pointers: */ -static LIST_HEAD(g_sPagePoolList_WB); static LIST_HEAD(g_sPagePoolList_WC); static LIST_HEAD(g_sPagePoolList_UC); static LIST_HEAD(g_sUnpinList); +static inline IMG_UINT32 +_PagesInPoolUnlocked(void) +{ + IMG_UINT32 uiCnt = g_ui32PagePoolUCCount; +#if defined(CONFIG_X86) + uiCnt += g_ui32PagePoolWCCount; +#endif + return uiCnt; +} + static inline void _PagePoolLock(void) { @@ -332,7 +371,8 @@ _RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) static inline IMG_BOOL _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, - struct list_head **ppsPoolHead) + struct list_head **ppsPoolHead, + IMG_UINT32 **ppuiCounter) { switch(PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) { @@ -345,20 +385,23 @@ _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, */ *ppsPoolHead = &g_sPagePoolList_WC; + *ppuiCounter = &g_ui32PagePoolWCCount; break; #endif case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: *ppsPoolHead = &g_sPagePoolList_UC; - break; - - case PVRSRV_MEMALLOCFLAG_CPU_CACHED: - *ppsPoolHead = &g_sPagePoolList_WB; + *ppuiCounter = &g_ui32PagePoolUCCount; break; default: - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get pages from pool, " - "unknown CPU caching mode.", __func__)); + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown CPU caching mode. " + "Using default UC pool.", + __func__)); + *ppsPoolHead = &g_sPagePoolList_UC; + *ppuiCounter = &g_ui32PagePoolUCCount; + PVR_ASSERT(0); return IMG_FALSE; } return IMG_TRUE; @@ -366,15 +409,11 @@ _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, static struct shrinker g_sShrinker; -/* Returning the number of pages that still reside in the page pool. - * Do not count excess pages that will be freed by the defer free thread. */ +/* Returning the number of pages that still reside in the page pool. */ static unsigned long _GetNumberOfPagesInPoolUnlocked(void) { - unsigned int uiEntryCount; - - uiEntryCount = (g_ui32PagePoolEntryCount > g_ui32PagePoolMaxEntries) ? g_ui32PagePoolMaxEntries : g_ui32PagePoolEntryCount; - return uiEntryCount + g_ui32UnpinPageCount; + return _PagesInPoolUnlocked() + g_ui32UnpinPageCount; } /* Linux shrinker function that informs the OS about how many pages we are caching and @@ -472,12 +511,6 @@ _ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShr } e_exit: - if (list_empty(&g_sPagePoolList_WC) && - list_empty(&g_sPagePoolList_UC) && - list_empty(&g_sPagePoolList_WB)) - { - PVR_ASSERT(g_ui32PagePoolEntryCount == 0); - } if (list_empty(&g_sUnpinList)) { PVR_ASSERT(g_ui32UnpinPageCount == 0); @@ -539,6 +572,8 @@ void LinuxInitPhysmem(void) register_shrinker(&g_sShrinker); } _PagePoolUnlock(); + + OSAtomicWrite(&g_iPoolCleanTasks, 0); } /* Unregister the shrinker and remove all pages from the pool that are still left */ @@ -546,14 +581,21 @@ void LinuxDeinitPhysmem(void) { IMG_UINT32 uiPagesFreed; + if (OSAtomicRead(&g_iPoolCleanTasks) > 0) + { + PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running " + "while deinitialising memory subsystem.")); + } + _PagePoolLock(); - if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount, &uiPagesFreed) != PVRSRV_OK) + if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK) { - PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when deinitialising.")); + PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when " + "deinitialising memory subsystem.")); PVR_ASSERT(0); } - PVR_ASSERT(g_ui32PagePoolEntryCount == 0); + PVR_ASSERT(_PagesInPoolUnlocked() == 0); /* Free the page cache */ kmem_cache_destroy(g_psLinuxPagePoolCache); @@ -619,6 +661,7 @@ _DumpPoolStructure(void) LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; struct list_head *psPoolHead = NULL; IMG_UINT32 j; + IMG_UINT32 *puiCounter; printk("\n"); /* Empty all pools */ @@ -628,7 +671,7 @@ _DumpPoolStructure(void) printk("pool = %u \n", j); /* Get the correct list for this caching mode */ - if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead)) + if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) { break; } @@ -645,121 +688,6 @@ _DumpPoolStructure(void) #endif } -/* Will take excess pages from the pool with acquired pool lock and then free - * them without pool lock being held. - * Designed to run in the deferred free thread. */ -static PVRSRV_ERROR -_FreeExcessPagesFromPool(void) -{ - PVRSRV_ERROR eError = PVRSRV_OK; - LIST_HEAD(sPagePoolFreeList); - LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; - struct list_head *psPoolHead = NULL; - IMG_UINT32 i, j, uiPoolIdx; - static IMG_UINT8 uiPoolAccessRandomiser; - IMG_BOOL bDone = IMG_FALSE; - - /* Make sure all pools are drained over time */ - uiPoolAccessRandomiser++; - - /* Empty all pools */ - for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) - { - uiPoolIdx = (j + uiPoolAccessRandomiser) % PHYSMEM_OSMEM_NUM_OF_POOLS; - - /* Just lock down to collect pool entries and unlock again before freeing them */ - _PagePoolLock(); - - /* Get the correct list for this caching mode */ - if (!_GetPoolListHead(g_aui32CPUCacheFlags[uiPoolIdx], &psPoolHead)) - { - _PagePoolUnlock(); - break; - } - - /* Traverse pool in reverse order to remove items that exceeded - * the pool size first */ - list_for_each_entry_safe_reverse(psPagePoolEntry, - psTempPoolEntry, - psPoolHead, - sPagePoolItem) - { - /* Go to free the pages if we collected enough */ - if (g_ui32PagePoolEntryCount <= g_ui32PagePoolMaxEntries) - { - bDone = IMG_TRUE; - break; - } - - /* Move item to free list so we can free it later without the pool lock */ - list_del(&psPagePoolEntry->sPagePoolItem); - list_add(&psPagePoolEntry->sPagePoolItem, &sPagePoolFreeList); - - /* Update counters */ - g_ui32PagePoolEntryCount -= psPagePoolEntry->uiItemsRemaining; - -#if defined(PVRSRV_ENABLE_PROCESS_STATS) - /* MemStats usually relies on having the bridge lock held, however - * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and - * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so - * the page pool lock is used to ensure these calls are mutually - * exclusive - */ - PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining); -#endif - } - - _PagePoolUnlock(); - - - /* Free the pages that we removed from the pool */ - list_for_each_entry_safe(psPagePoolEntry, - psTempPoolEntry, - &sPagePoolFreeList, - sPagePoolItem) - { -#if defined(CONFIG_X86) - /* Set the correct page caching attributes on x86 */ - if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[uiPoolIdx])) - { - int ret; - ret = set_pages_array_wb(psPagePoolEntry->ppsPageArray, - psPagePoolEntry->uiItemsRemaining); - if (ret) - { - PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__)); - eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES; - goto e_exit; - } - } -#endif - /* Free the actual pages */ - for (i = 0; i < psPagePoolEntry->uiItemsRemaining; i++) - { - __free_pages(psPagePoolEntry->ppsPageArray[i], 0); - psPagePoolEntry->ppsPageArray[i] = NULL; - } - - /* Free the pool entry and page array*/ - list_del(&psPagePoolEntry->sPagePoolItem); - OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); - kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); - } - - /* Stop if all excess pages were removed */ - if (bDone) - { - eError = PVRSRV_OK; - goto e_exit; - } - - } - -e_exit: - _DumpPoolStructure(); - return eError; -} - /* Free a certain number of pages from the page pool. * Mainly used in error paths or at deinitialisation to * empty the whole pool. */ @@ -771,6 +699,7 @@ _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; struct list_head *psPoolHead = NULL; IMG_UINT32 i, j; + IMG_UINT32 *puiCounter; *puiPagesFreed = uiMaxPagesToFree; @@ -779,7 +708,7 @@ _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, { /* Get the correct list for this caching mode */ - if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead)) + if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) { break; } @@ -829,7 +758,7 @@ _FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, /* Reduce counters */ uiMaxPagesToFree -= uiItemsToFree; - g_ui32PagePoolEntryCount -= uiItemsToFree; + *puiCounter -= uiItemsToFree; psPagePoolEntry->uiItemsRemaining -= uiItemsToFree; #if defined(PVRSRV_ENABLE_PROCESS_STATS) @@ -875,11 +804,12 @@ _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; struct list_head *psPoolHead = NULL; IMG_UINT32 i; + IMG_UINT32 *puiCounter; *puiNumReceivedPages = 0; /* Get the correct list for this caching mode */ - if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead)) + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) { return; } @@ -890,7 +820,7 @@ _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, return; } - PVR_ASSERT(g_ui32PagePoolEntryCount > 0); + PVR_ASSERT(*puiCounter > 0); /* Receive pages from the pool */ list_for_each_entry_safe(psPagePoolEntry, @@ -924,7 +854,7 @@ _GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, exit_ok: /* Update counters */ - g_ui32PagePoolEntryCount -= *puiNumReceivedPages; + *puiCounter -= *puiNumReceivedPages; #if defined(PVRSRV_ENABLE_PROCESS_STATS) /* MemStats usually relies on having the bridge lock held, however @@ -940,9 +870,6 @@ exit_ok: return; } -/* When is it worth waiting for the page pool? */ -#define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL 64 - /* Same as _GetPagesFromPoolUnlocked but handles locking and * checks first whether pages from the pool are a valid option. */ static inline void @@ -954,171 +881,191 @@ _GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, struct page **ppsPageArray, IMG_UINT32 *puiPagesFromPool) { +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + PVR_UNREFERENCED_PARAMETER(bZero); +#else + /* Don't get pages from pool if it doesn't provide zeroed pages */ + if (bZero) + { + return; + } +#endif + /* The page pool stores only order 0 pages. If we need zeroed memory we - * directly allocate from the OS because it is faster than doing it ourselves. */ - if (uiOrder == 0 && !bZero) + * directly allocate from the OS because it is faster than + * doing it within the driver. */ + if (uiOrder == 0 && + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) { - if (uiPagesToAlloc < PVR_LINUX_PHYSMEM_MIN_PAGES_TO_WAIT_FOR_POOL) - { - /* In case the request is a few pages, just try to acquire the pool lock */ - if (_PagePoolTrylock() == 0) - { - return; - } - } - else - { - /* It is worth waiting if many pages were requested. - * Freeing an item to the pool is very fast and - * the defer free thread will release the lock regularly. */ - _PagePoolLock(); - } + _PagePoolLock(); _GetPagesFromPoolUnlocked(ui32CPUCacheFlags, uiPagesToAlloc, ppsPageArray, puiPagesFromPool); _PagePoolUnlock(); - - /* Do cache maintenance so allocations from the pool can be - * considered clean */ - if (PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags) && - PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) - { - _ApplyCacheMaintenance(psDevNode, - ppsPageArray, - *puiPagesFromPool, - IMG_FALSE); - } } return; } -/* Defer free function to remove excess pages from the page pool. - * We do not need the bridge lock for this function */ +/* Takes a page array and maps it into the kernel to write zeros */ static PVRSRV_ERROR -_CleanupThread_FreePoolPages(void *pvData) +_ZeroPageArray(IMG_UINT32 uiNumToClean, + struct page **ppsCleanArray, + pgprot_t pgprot) { - PVRSRV_ERROR eError; + IMG_CPU_VIRTADDR pvAddr; + IMG_UINT32 uiMaxPagesToMap = PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES; - /* Free all that is necessary */ - eError = _FreeExcessPagesFromPool(); - if(eError != PVRSRV_OK) + /* Map and fill the pages with zeros. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + while (uiNumToClean != 0) { - PVR_DPF((PVR_DBG_ERROR, "%s: _FreeExcessPagesFromPool failed", __func__)); - goto e_exit; - } + IMG_UINT32 uiToClean = (uiNumToClean >= uiMaxPagesToMap) ? + uiMaxPagesToMap : + uiNumToClean; - OSFreeMem(pvData); + pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot); + if (!pvAddr) + { + if (uiMaxPagesToMap <= 1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of vmalloc memory, " + "unable to map pages for zeroing.", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + /* Half the pages to map at once and try again. */ + uiMaxPagesToMap = uiMaxPagesToMap >> 1; + continue; + } + } -e_exit: - return eError; + OSDeviceMemSet(pvAddr, 0, PAGE_SIZE * uiToClean); + vm_unmap_ram(pvAddr, uiToClean); + + ppsCleanArray = &(ppsCleanArray[uiToClean]); + uiNumToClean -= uiToClean; + } + + return PVRSRV_OK; } -/* Signal the defer free thread that there are pages in the pool to be cleaned up. - * MUST NOT HOLD THE PAGE POOL LOCK! */ -static void -_SignalDeferFree(void) +static PVRSRV_ERROR +_CleanupThread_CleanPages(void *pvData) { - PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; - psCleanupThreadFn = OSAllocMem(sizeof(*psCleanupThreadFn)); - - if(!psCleanupThreadFn) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Failed to get memory for deferred page pool cleanup. " - "Trying to free pages immediately", - __FUNCTION__)); - goto e_oom_exit; - } + PVRSRV_ERROR eError; + LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData; + LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry; + struct list_head *psPoolHead = NULL; + pgprot_t pgprot; + IMG_UINT32 *puiCounter = NULL; - psCleanupThreadFn->pfnFree = _CleanupThread_FreePoolPages; - psCleanupThreadFn->pvData = psCleanupThreadFn; - psCleanupThreadFn->ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT; - psCleanupThreadFn->bDependsOnHW = IMG_FALSE; - /* We must not hold the pool lock when calling AddWork because it might call us back to - * free pooled pages directly when unloading the driver */ - PVRSRVCleanupThreadAddWork(psCleanupThreadFn); - return; + /* Get the correct pool for this caching mode. */ + _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter); -e_oom_exit: + switch(PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode)) { - /* In case we are not able to signal the defer free thread - * we have to cleanup the pool now. */ - IMG_UINT32 uiPagesFreed; + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: +#if defined(CONFIG_X86) + /* For x86 we can only map with the same attributes + * as in the PAT settings*/ + pgprot = pgprot_noncached(PAGE_KERNEL); + break; +#endif - _PagePoolLock(); - if (_FreePagesFromPoolUnlocked(g_ui32PagePoolEntryCount - g_ui32PagePoolMaxEntries, - &uiPagesFreed) != PVRSRV_OK) - { - PVR_DPF((PVR_DBG_ERROR, - "%s: Unable to free pooled pages!", - __FUNCTION__)); - } - _PagePoolUnlock(); + case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: + pgprot = pgprot_writecombine(PAGE_KERNEL); + break; - return; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown caching mode to set page protection flags.", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto eExit; } -} - -/* Moves a page array to the page pool. - * - * If this function is successful the ppsPageArray is unusable and needs to be - * reallocated in case the _PMR_OSPAGEARRAY_DATA_ will be reused. - * This function expects cached pages to be not in the cache anymore, - * invalidate them before, ideally without using the pool lock. */ -static IMG_BOOL -_PutPagesToPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, - struct page **ppsPageArray, - IMG_UINT32 uiEntriesInArray) -{ - LinuxPagePoolEntry *psPagePoolEntry; - struct list_head *psPoolHead = NULL; - /* Check if there is still space in the pool */ - if ( (g_ui32PagePoolEntryCount + uiEntriesInArray) >= - (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) ) +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + /* Map and fill the pages with zeros. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + eError = _ZeroPageArray(psPagePoolEntry->uiItemsRemaining, + psPagePoolEntry->ppsPageArray, + pgprot); + if (eError != PVRSRV_OK) { - return IMG_FALSE; + goto eExit; } +#endif - /* Get the correct list for this caching mode */ - if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead)) - { - return IMG_FALSE; - } - /* Fill the new pool entry structure and add it to the pool list */ - psPagePoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); - psPagePoolEntry->ppsPageArray = ppsPageArray; - psPagePoolEntry->uiItemsRemaining = uiEntriesInArray; + /* Lock down pool and add item */ + _PagePoolLock(); + + /* Pool counters were already updated so don't do it here again*/ list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead); - /* Update counters */ - g_ui32PagePoolEntryCount += uiEntriesInArray; + _DumpPoolStructure(); + _PagePoolUnlock(); + + OSFreeMem(pvData); + OSAtomicDecrement(&g_iPoolCleanTasks); + + return PVRSRV_OK; + +eExit: + /* If this was the last retry, give up and free pages to OS */ + if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) == 0) + { + IMG_UINT32 i; + + PVR_DPF((PVR_DBG_ERROR, + "%s: Deferred task error, freeing pages to OS.", + __func__)); + _PagePoolLock(); + + *puiCounter -= psPagePoolEntry->uiItemsRemaining; #if defined(PVRSRV_ENABLE_PROCESS_STATS) - /* MemStats usually relies on having the bridge lock held, however - * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and - * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so - * the page pool lock is used to ensure these calls are mutually - * exclusive - */ - PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiEntriesInArray); + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psCleanupData->psPoolEntry->uiItemsRemaining); #endif - _DumpPoolStructure(); - return IMG_TRUE; + _PagePoolUnlock(); + + for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++) + { + _FreeOSPage(0, + IMG_TRUE, + psPagePoolEntry->ppsPageArray[i]); + } + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + OSFreeMem(psCleanupData); + + OSAtomicDecrement(&g_iPoolCleanTasks); + } + + return eError; } -/* Minimal amount of pages that will go to the pool, everything below is freed directly */ -#define PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL 16 -/* Same as _PutPagesToPoolUnlocked but handles locking and checks whether the pages are - * suitable to be stored in the page pool. */ +/* Put page array to the page pool. + Handles locking and checks whether the pages are + * suitable to be stored in the pool. */ static inline IMG_BOOL _PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, struct page **ppsPageArray, @@ -1126,45 +1073,127 @@ _PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, IMG_UINT32 uiOrder, IMG_UINT32 uiNumPages) { + LinuxCleanupData *psCleanupData; + PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; + if (uiOrder == 0 && !bUnpinned && - uiNumPages >= PVR_LINUX_PHYSMEM_MIN_PAGES_TO_ADD_TO_POOL) + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) { + IMG_UINT32 uiEntries; + IMG_UINT32 *puiCounter; + struct list_head *psPoolHead; + + _PagePoolLock(); - /* Try to quickly move page array to the pool */ - if (_PutPagesToPoolUnlocked(ui32CPUCacheFlags, - ppsPageArray, - uiNumPages) ) + uiEntries = _PagesInPoolUnlocked(); + + /* Check for number of current page pool entries and whether + we have other asynchronous tasks in-flight */ + if ( (uiEntries < g_ui32PagePoolMaxEntries) && + ((uiEntries + uiNumPages) < + (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) { - if (g_ui32PagePoolEntryCount > (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxEntries_5Percent)) + if (OSAtomicIncrement(&g_iPoolCleanTasks) <= + PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) { - /* Signal defer free to clean up excess pages from pool. - * Allow a little excess before signalling to avoid oscillating behaviour */ + psCleanupData = OSAllocMem(sizeof(*psCleanupData)); + + if(!psCleanupData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __FUNCTION__)); + goto eDecrement; + } + + psCleanupThreadFn = &psCleanupData->sCleanupWork; + psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; + psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); + + if(!psCleanupData->psPoolEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __FUNCTION__)); + goto eFreeCleanupData; + } + + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get correct page pool", + __FUNCTION__)); + goto eFreePoolEntry; + } + + /* Increase counter here to avoid deferred cleanup tasks piling up */ + *puiCounter = *puiCounter + uiNumPages; + + psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; + psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; + + psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; + psCleanupThreadFn->pvData = psCleanupData; + psCleanupThreadFn->ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT; + psCleanupThreadFn->bDependsOnHW = IMG_FALSE; + #if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiNumPages); + #endif + + /* We must not hold the pool lock when calling AddWork because it might call us back to + * free pooled pages directly when unloading the driver */ _PagePoolUnlock(); - _SignalDeferFree(); + + PVRSRVCleanupThreadAddWork(psCleanupThreadFn); + + } else { - _PagePoolUnlock(); + goto eDecrement; } - /* All done */ - return IMG_TRUE; } - - /* Could not move pages to pool, continue and free them now */ - _PagePoolUnlock(); + else + { + goto eUnlock; + } + } + else + { + goto eExitFalse; } + return IMG_TRUE; + +eFreePoolEntry: + OSFreeMem(psCleanupData->psPoolEntry); +eFreeCleanupData: + OSFreeMem(psCleanupData); +eDecrement: + OSAtomicDecrement(&g_iPoolCleanTasks); +eUnlock: + _PagePoolUnlock(); +eExitFalse: return IMG_FALSE; } /* Get the GFP flags that we pass to the page allocator */ static inline unsigned int -_GetGFPFlags(PMR_OSPAGEARRAY_DATA *psPageArrayData) +_GetGFPFlags(IMG_BOOL bZero, + PVRSRV_DEVICE_NODE *psDevNode) { - struct device *psDev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; unsigned int gfp_flags = 0; gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC; @@ -1181,7 +1210,7 @@ _GetGFPFlags(PMR_OSPAGEARRAY_DATA *psPageArrayData) gfp_flags |= __GFP_HIGHMEM; } - if (psPageArrayData->bZero) + if (bZero) { gfp_flags |= __GFP_ZERO; } @@ -1381,7 +1410,8 @@ _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, IMG_BOOL bFlush) { PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY; - IMG_UINT32 ui32Idx; + void * pvAddr; + if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD) { @@ -1389,38 +1419,71 @@ _ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH); } + if (eError != PVRSRV_OK) { - for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx) + + if (OSCPUCacheOpAddressType(PVRSRV_CACHE_OP_FLUSH) == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL) { - IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; - void *pvPageVAddr; + pgprot_t pgprot = PAGE_KERNEL; - pvPageVAddr = kmap(ppsPage[ui32Idx]); - sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]); - sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; + IMG_UINT32 uiNumToClean = uiNumPages; + struct page **ppsCleanArray = ppsPage; - /* If we're zeroing, we need to make sure the cleared memory is pushed out - of the cache before the cache lines are invalidated */ - if (bFlush) + /* Map and flush page. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + while (uiNumToClean != 0) { + IMG_UINT32 uiToClean = (uiNumToClean >= PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES) ? + PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES : + uiNumToClean; + IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; + + pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot); + if (!pvAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable to flush page cache for new allocation, skipping flush.")); + return; + } + OSFlushCPUCacheRangeKM(psDevNode, - pvPageVAddr, - pvPageVAddr + PAGE_SIZE, + pvAddr, + pvAddr + PAGE_SIZE, sCPUPhysAddrStart, sCPUPhysAddrEnd); + + vm_unmap_ram(pvAddr, uiToClean); + + ppsCleanArray = &(ppsCleanArray[uiToClean]); + uiNumToClean -= uiToClean; } - else + } + else + { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx) { - OSInvalidateCPUCacheRangeKM(psDevNode, - pvPageVAddr, - pvPageVAddr + PAGE_SIZE, - sCPUPhysAddrStart, - sCPUPhysAddrEnd); - } + IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; - kunmap(ppsPage[ui32Idx]); + pvAddr = kmap(ppsPage[ui32Idx]); + sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]); + sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; + + /* If we're zeroing, we need to make sure the cleared memory is pushed out + of the cache before the cache lines are invalidated */ + OSFlushCPUCacheRangeKM(psDevNode, + pvAddr, + pvAddr + PAGE_SIZE, + sCPUPhysAddrStart, + sCPUPhysAddrEnd); + + kunmap(ppsPage[ui32Idx]); + } } + } } @@ -1484,26 +1547,24 @@ _ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, } else #endif - /* Cache maintenance if: - * cached && (cleanFlag || bFlush) - * OR - * uncached || write-combine - */ - if ( (bCPUCached && (PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags) || bFlush)) || - bCPUUncached || bCPUWriteCombine ) { - /* We can be given pages which still remain in the cache. - In order to make sure that the data we write through our mappings - doesn't get overwritten by later cache evictions we invalidate the - pages that are given to us. - - Note: - This still seems to be true if we request cold pages, it's just less - likely to be in the cache. */ - _ApplyCacheMaintenance(psDevNode, - ppsPage, - uiNumPages, - bFlush); + if ( bFlush || + bCPUUncached || bCPUWriteCombine || + (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) ) + { + /* We can be given pages which still remain in the cache. + In order to make sure that the data we write through our mappings + doesn't get overwritten by later cache evictions we invalidate the + pages that are given to us. + + Note: + This still seems to be true if we request cold pages, it's just less + likely to be in the cache. */ + _ApplyCacheMaintenance(psDevNode, + ppsPage, + uiNumPages, + bFlush); + } } } @@ -1637,7 +1698,8 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) IMG_UINT32 uiPagesToAlloc; IMG_UINT32 uiPagesFromPool = 0; - unsigned int gfp_flags = _GetGFPFlags(psPageArrayData); + unsigned int gfp_flags = _GetGFPFlags(ui32MinOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero all pages later as batch */ + psPageArrayData->psDevNode); IMG_UINT32 ui32GfpFlags; IMG_UINT32 ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY); @@ -1795,6 +1857,19 @@ _AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) } } + if (psPageArrayData->bZero && ui32MinOrder == 0) + { + eError = _ZeroPageArray(uiPagesToAlloc - uiPagesFromPool, + ppsPageAttributeArray, + PAGE_KERNEL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages")); + goto e_free_pages; + } + } + + /* Do the cache management as required */ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, ppsPageAttributeArray, @@ -1883,9 +1958,10 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, PVRSRV_ERROR eError; IMG_UINT32 i; struct page **ppsPageArray = psPageArrayData->pagearray; - IMG_UINT32 uiOrder; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; IMG_UINT32 uiPagesFromPool = 0; - unsigned int gfp_flags = _GetGFPFlags(psPageArrayData); + unsigned int gfp_flags = _GetGFPFlags(uiOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero pages later as batch */ + psPageArrayData->psDevNode); /* We use this page array to receive pages from the pool and then reuse it afterwards to * store pages that need their cache attribute changed on x86*/ @@ -1902,8 +1978,6 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, goto e_exit; } - uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; - /* Check the requested number of pages if they fit in the page array */ if(psPageArrayData->uiTotalNumPages < \ (psPageArrayData->iNumPagesAllocated + uiPagesToAlloc)) @@ -1991,6 +2065,18 @@ _AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, } EnableOOMKiller(); + if (psPageArrayData->bZero && uiOrder == 0) + { + eError = _ZeroPageArray(uiTempPageArrayIndex, + ppsTempPageArray, + PAGE_KERNEL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages")); + goto e_free_pages; + } + } + /* Do the cache management as required */ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, ppsTempPageArray, @@ -2284,22 +2370,6 @@ _FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, ppsPageArray = psPageArrayData->pagearray; uiOrder = psPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; - /* Poison if necessary */ - if (psPageArrayData->bPoisonOnFree) - { - for (i = 0; i < uiNumPages; i ++) - { - uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i ; - if(INVALID_PAGE != ppsPageArray[uiPageIndex]) - { - _PoisonPages(ppsPageArray[uiPageIndex], - 0, - _FreePoison, - _FreePoisonSize); - } - } - } - /* Put pages in a contiguous array so further processing is easier */ uiTempIdx = 0; for (i = 0; i < uiNumPages; i++) @@ -2324,6 +2394,18 @@ _FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, goto exit_ok; } + /* Poison if necessary */ + if (psPageArrayData->bPoisonOnFree) + { + for (i = 0; i < uiTempIdx; i ++) + { + _PoisonPages(ppsTempPageArray[i], + 0, + _FreePoison, + _FreePoisonSize); + } + } + /* Free pages and reset page caching attributes on x86 */ #if defined(CONFIG_X86) if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType == IMG_TRUE) @@ -2369,17 +2451,6 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) /* We really should have something to free before we call this */ PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0); - /* Poison pages if necessary */ - if (psPageArrayData->bPoisonOnFree) - { - for (i = 0; i < uiNumPages; i++) - { - _PoisonPages(ppsPageArray[i], - 0, - _FreePoison, - _FreePoisonSize); - } - } /* Try to move the page array to the pool */ bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags, @@ -2393,6 +2464,18 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) goto exit_ok; } + /* Poison pages if necessary */ + if (psPageArrayData->bPoisonOnFree) + { + for (i = 0; i < uiNumPages; i++) + { + _PoisonPages(ppsPageArray[i], + 0, + _FreePoison, + _FreePoisonSize); + } + } + if (psPageArrayData->bIsCMA) { IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder; @@ -2877,7 +2960,7 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, IMG_UINT32 ui32Index = 0; IMG_UINT32 uiAllocpgidx ; IMG_UINT32 uiFreepgidx; - IMG_UINT32 ui32Order = psPMRPageArrayData->uiLog2DevPageSize - PAGE_SHIFT; + /* Check SPARSE flags and calculate pages to allocate and free */ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) @@ -3036,27 +3119,6 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; } - - /* Be sure to honour the attributes associated with the allocation - * such as zeroing, poisoning etc. */ - if (psPMRPageArrayData->bPoisonOnAlloc) - { - _PoisonPages(psPageArray[uiAllocpgidx], - ui32Order, - _AllocPoison, - _AllocPoisonSize); - } - else - { - if (psPMRPageArrayData->bZero) - { - char a = 0; - _PoisonPages(psPageArray[uiAllocpgidx], - ui32Order, - &a, - 1); - } - } } /* Free the additional free pages */ @@ -3204,6 +3266,12 @@ PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode, goto errorOnParam; } +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + /* Overwrite flags and always zero pages that could go back to UM */ + bZero = IMG_TRUE; + bPoisonOnAlloc = IMG_FALSE; +#endif + /* Create Array structure that hold the physical pages */ eError = _AllocOSPageArray(psDevNode, uiChunkSize, diff --git a/drivers/staging/imgtec/rogue/pvrsrv.c b/drivers/staging/imgtec/rogue/pvrsrv.c index 3c9e322df8b6..96741550c34c 100644 --- a/drivers/staging/imgtec/rogue/pvrsrv.c +++ b/drivers/staging/imgtec/rogue/pvrsrv.c @@ -124,6 +124,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * a day to check for any missed clean-up. */ #define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL +/*! When unloading try a few times to free everything remaining on the list */ +#define CLEANUP_THREAD_UNLOAD_RETRY 4 #define PVRSRV_PROC_HANDLE_BASE_INIT 10 @@ -304,6 +306,7 @@ static void CleanupThread(void *pvData) IMG_HANDLE hOSEvent; PVRSRV_ERROR eRc; IMG_BOOL bUseGlobalEO = IMG_FALSE; + IMG_UINT32 uiUnloadRetry = 0; /* Store the process id (pid) of the clean-up thread */ psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID(); @@ -322,11 +325,20 @@ static void CleanupThread(void *pvData) /* While the driver is in a good state and is not being unloaded * try to free any deferred items when signalled */ - while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && - (!psPVRSRVData->bUnload)) + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)) { IMG_HANDLE hEvent; + if (psPVRSRVData->bUnload) + { + if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) || + uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) + { + break; + } + uiUnloadRetry++; + } + /* Wait until signalled for deferred clean up OR wait for a * short period if the previous deferred clean up was not able * to release all the resources before trying again. diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 256b3808305f..616eca97459e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -659,6 +659,20 @@ has_zeroout: ret = check_block_validity(inode, map); if (ret != 0) return ret; + + /* + * Inodes with freshly allocated blocks where contents will be + * visible after transaction commit must be on transaction's + * ordered data list. + */ + if (map->m_flags & EXT4_MAP_NEW && + !(map->m_flags & EXT4_MAP_UNWRITTEN) && + !IS_NOQUOTA(inode) && + ext4_should_order_data(inode)) { + ret = ext4_jbd2_file_inode(handle, inode); + if (ret) + return ret; + } } return retval; } @@ -1114,15 +1128,6 @@ static int ext4_write_end(struct file *file, int i_size_changed = 0; trace_ext4_write_end(inode, pos, len, copied); - if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { - ret = ext4_jbd2_file_inode(handle, inode); - if (ret) { - unlock_page(page); - page_cache_release(page); - goto errout; - } - } - if (ext4_has_inline_data(inode)) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 4e5f332f15d9..db7d89cea2ce 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -169,7 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e7e7d26b1dc9..3f6badbf38c6 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -329,11 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 01b5c84be828..6d0bd9e16a11 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -71,7 +71,7 @@ void dma_contiguous_reserve(phys_addr_t addr_limit); int dma_declare_contiguous(struct device *dev, phys_addr_t size, phys_addr_t base, phys_addr_t limit); -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -90,7 +90,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, } static inline -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order) { return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index 153c2ffc94fb..f3a6c724054c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1069,34 +1069,6 @@ int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - extern pid_t vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); @@ -1627,6 +1599,7 @@ unsigned long ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -1655,6 +1628,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/mm/memory.c b/mm/memory.c index 52ed899e2777..8ece1d3b49be 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1654,12 +1654,6 @@ no_page_table: return page; } -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) -{ - return stack_guard_page_start(vma, addr) || - stack_guard_page_end(vma, addr+PAGE_SIZE); -} - /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task @@ -1827,11 +1821,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; - /* For mlock, just skip the stack guard page. */ - if (foll_flags & FOLL_MLOCK) { - if (stack_guard_page(vma, start)) - goto next_page; - } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) @@ -3206,40 +3195,6 @@ out_release: } /* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - -/* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. @@ -3258,10 +3213,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, address) < 0) - return VM_FAULT_SIGBUS; - /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 28fb17845f70..20fce89e788c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1543,7 +1543,6 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy, asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode) { - long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); @@ -1552,14 +1551,13 @@ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { - err = compat_get_bitmap(bm, nmask, nr_bits); + if (compat_get_bitmap(bm, nmask, nr_bits)) + return -EFAULT; nm = compat_alloc_user_space(alloc_size); - err |= copy_to_user(nm, bm, alloc_size); + if (copy_to_user(nm, bm, alloc_size)) + return -EFAULT; } - if (err) - return -EFAULT; - return sys_set_mempolicy(mode, nm, nr_bits+1); } @@ -1567,7 +1565,6 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, compat_ulong_t mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t flags) { - long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; @@ -1576,14 +1573,13 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { - err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); + if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) + return -EFAULT; nm = compat_alloc_user_space(alloc_size); - err |= copy_to_user(nm, nodes_addr(bm), alloc_size); + if (copy_to_user(nm, nodes_addr(bm), alloc_size)) + return -EFAULT; } - if (err) - return -EFAULT; - return sys_mbind(start, len, mode, nm, nr_bits+1, flags); } diff --git a/mm/mmap.c b/mm/mmap.c index 4d1bcc6844dd..c6e1e9d36273 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -275,6 +275,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long rlim, retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; bool populate; @@ -320,7 +321,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -343,10 +345,22 @@ out: static long vma_compute_subtree_gap(struct vm_area_struct *vma) { - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; + unsigned long max, prev_end, subtree_gap; + + /* + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we + * allow two stack_guard_gaps between them here, and when choosing + * an unmapped area; whereas when expanding we only require one. + * That's a little inconsistent, but keeps the code here simpler. + */ + max = vm_start_gap(vma); + if (vma->vm_prev) { + prev_end = vm_end_gap(vma->vm_prev); + if (max > prev_end) + max -= prev_end; + else + max = 0; + } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -430,7 +444,7 @@ void validate_mm(struct mm_struct *mm) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); vma_unlock_anon_vma(vma); - highest_address = vma->vm_end; + highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } @@ -598,7 +612,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = vma->vm_end; + mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the @@ -847,7 +861,7 @@ again: remove_next = 1 + (end > next->vm_end); vma_gap_update(vma); if (end_changed) { if (!next) - mm->highest_vm_end = end; + mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } @@ -890,7 +904,7 @@ again: remove_next = 1 + (end > next->vm_end); else if (next) vma_gap_update(next); else - mm->highest_vm_end = end; + WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } if (insert && file) uprobe_mmap(insert); @@ -1689,7 +1703,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) while (true) { /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, @@ -1700,12 +1714,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) } } - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; - if (gap_end >= low_limit && gap_end - gap_start >= length) + if (gap_end >= low_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ @@ -1727,8 +1742,8 @@ check_current: vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; + gap_start = vm_end_gap(vma->vm_prev); + gap_end = vm_start_gap(vma); goto check_current; } } @@ -1792,7 +1807,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) while (true) { /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, @@ -1805,10 +1820,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) check_current: /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; - if (gap_start <= high_limit && gap_end - gap_start >= length) + if (gap_start <= high_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ @@ -1831,7 +1847,7 @@ check_current: struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; + vm_end_gap(vma->vm_prev) : 0; goto check_current; } } @@ -1869,7 +1885,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) @@ -1880,9 +1896,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1914,7 +1931,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -1929,9 +1946,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2071,7 +2089,8 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; @@ -2123,32 +2142,40 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { - int error; + struct vm_area_struct *next; + unsigned long gap_addr; + int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ + /* Guard against wrapping around to address 0. */ + address &= PAGE_MASK; + address += PAGE_SIZE; + if (!address) + return -ENOMEM; + + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + if (gap_addr < address) + return -ENOMEM; + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. - * Also guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else { - vma_unlock_anon_vma(vma); - return -ENOMEM; - } - error = 0; + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { @@ -2179,7 +2206,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (vma->vm_next) vma_gap_update(vma->vm_next); else - vma->vm_mm->highest_vm_end = address; + vma->vm_mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&vma->vm_mm->page_table_lock); perf_event_mmap(vma); @@ -2199,27 +2226,36 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_downwards(struct vm_area_struct *vma, unsigned long address) { + struct vm_area_struct *prev; + unsigned long gap_addr; int error; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - address &= PAGE_MASK; error = security_mmap_addr(address); if (error) return error; - vma_lock_anon_vma(vma); + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -2261,28 +2297,25 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; + +static int __init cmdline_parse_stack_guard_gap(char *p) +{ + unsigned long val; + char *endptr; + + val = simple_strtoul(p, &endptr, 10); + if (!*endptr) + stack_guard_gap = val << PAGE_SHIFT; + + return 0; +} +__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); + #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { - struct vm_area_struct *next; - - address &= PAGE_MASK; - next = vma->vm_next; - if (next && next->vm_start == address + PAGE_SIZE) { - if (!(next->vm_flags & VM_GROWSUP)) - return -ENOMEM; - } return expand_upwards(vma, address); } @@ -2304,14 +2337,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { - struct vm_area_struct *prev; - - address &= PAGE_MASK; - prev = vma->vm_prev; - if (prev && prev->vm_end == address) { - if (!(prev->vm_flags & VM_GROWSDOWN)) - return -ENOMEM; - } return expand_downwards(vma, address); } @@ -2408,7 +2433,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, vma->vm_prev = prev; vma_gap_update(vma); } else - mm->highest_vm_end = prev ? prev->vm_end : 0; + mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; if (mm->unmap_area == arch_unmap_area) addr = prev ? prev->vm_end : mm->mmap_base; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 86eedbaf037f..6298599cd974 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -483,6 +483,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newsk->sk_backlog_rcv = dccp_v4_do_rcv; newnp->pktoptions = NULL; newnp->opt = NULL; + newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; @@ -558,6 +561,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; + newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; + /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (ireq6->pktopts != NULL) { diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index e6c353b665d5..316a3c0764d5 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -690,6 +690,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; newsk->sk_write_space = sk_stream_write_space; + inet_sk(newsk)->mc_list = NULL; + newsk->sk_mark = inet_rsk(req)->ir_mark; newicsk->icsk_retransmits = 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 399e2cd2cdea..666f634c17b2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -727,6 +727,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, ret = -EAGAIN; break; } + /* if __tcp_splice_read() got nothing while we have + * an skb in receive queue, we do not want to loop. + * This might happen with URG data. + */ + if (!skb_queue_empty(&sk->sk_receive_queue)) + break; sk_wait_data(sk, &timeo); if (signal_pending(current)) { ret = sock_intr_errno(timeo); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 98580b30f274..b1588bfad2c9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -364,35 +364,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, __be32 info) + u8 type, u8 code, int offset, __be32 info) { - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; - __be16 *p = (__be16 *)(skb->data + offset); - int grehlen = offset + 4; + const struct gre_base_hdr *greh; + const struct ipv6hdr *ipv6h; + int grehlen = sizeof(*greh); struct ip6_tnl *t; + int key_off = 0; __be16 flags; + __be32 key; - flags = p[0]; - if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { - if (flags&(GRE_VERSION|GRE_ROUTING)) - return; - if (flags&GRE_KEY) { - grehlen += 4; - if (flags&GRE_CSUM) - grehlen += 4; - } + if (!pskb_may_pull(skb, offset + grehlen)) + return; + greh = (const struct gre_base_hdr *)(skb->data + offset); + flags = greh->flags; + if (flags & (GRE_VERSION | GRE_ROUTING)) + return; + if (flags & GRE_CSUM) + grehlen += 4; + if (flags & GRE_KEY) { + key_off = grehlen + offset; + grehlen += 4; } - /* If only 8 bytes returned, keyed message will be dropped here */ - if (!pskb_may_pull(skb, grehlen)) + if (!pskb_may_pull(skb, offset + grehlen)) return; ipv6h = (const struct ipv6hdr *)skb->data; - p = (__be16 *)(skb->data + offset); + greh = (const struct gre_base_hdr *)(skb->data + offset); + key = key_off ? *(__be32 *)(skb->data + key_off) : 0; t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, - flags & GRE_KEY ? - *(((__be32 *)p) + (grehlen / 4) - 1) : 0, - p[1]); + key, greh->protocol); if (t == NULL) return; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f478169238d2..bdd90f2a4952 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1135,6 +1135,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif + newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; @@ -1202,6 +1203,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, First: no IPv4 options. */ newinet->inet_opt = NULL; + newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d8f90dc91404..13a6d9084d85 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1290,13 +1290,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) return -EINVAL; } + mutex_lock(&fanout_mutex); + + err = -EINVAL; if (!po->running) - return -EINVAL; + goto out; + err = -EALREADY; if (po->fanout) - return -EALREADY; + goto out; - mutex_lock(&fanout_mutex); match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && @@ -1352,17 +1355,16 @@ static void fanout_release(struct sock *sk) struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; - f = po->fanout; - if (!f) - return; - mutex_lock(&fanout_mutex); - po->fanout = NULL; + f = po->fanout; + if (f) { + po->fanout = NULL; - if (atomic_dec_and_test(&f->sk_ref)) { - list_del(&f->list); - dev_remove_pack(&f->prot_hook); - kfree(f); + if (atomic_dec_and_test(&f->sk_ref)) { + list_del(&f->list); + dev_remove_pack(&f->prot_hook); + kfree(f); + } } mutex_unlock(&fanout_mutex); } diff --git a/sound/core/timer.c b/sound/core/timer.c index ee81c947c24b..0641d14bc423 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1534,6 +1534,7 @@ static int snd_timer_user_tselect(struct file *file, if (err < 0) goto __err; + tu->qhead = tu->qtail = tu->qused = 0; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); @@ -1858,6 +1859,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); + mutex_lock(&tu->ioctl_lock); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { @@ -1873,7 +1875,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); + mutex_unlock(&tu->ioctl_lock); schedule(); + mutex_lock(&tu->ioctl_lock); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); @@ -1912,6 +1916,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, } spin_unlock_irq(&tu->qlock); _error: + mutex_unlock(&tu->ioctl_lock); return result > 0 ? result : err; } |