diff options
Diffstat (limited to 'mali_pixel/memory_group_manager.c')
-rw-r--r-- | mali_pixel/memory_group_manager.c | 519 |
1 files changed, 420 insertions, 99 deletions
diff --git a/mali_pixel/memory_group_manager.c b/mali_pixel/memory_group_manager.c index 5c98a5d..0cde4e0 100644 --- a/mali_pixel/memory_group_manager.c +++ b/mali_pixel/memory_group_manager.c @@ -8,7 +8,7 @@ */ #include <linux/atomic.h> -#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS #include <linux/debugfs.h> #endif #include <linux/fs.h> @@ -19,27 +19,41 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/version.h> +#include <linux/limits.h> #include <linux/memory_group_manager.h> #include <soc/google/pt.h> +#include <uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h> + + +#define ORDER_SMALL_PAGE 0 +#define ORDER_LARGE_PAGE 9 + +/* Borr does not have "real" PBHA support. However, since we only use a 36-bit PA on the bus, + * AxADDR[39:36] is wired up to the GPU AxUSER[PBHA] field seen by the rest of the system. + * Those AxADDR bits come from [39:36] in the page descriptor. + * + * Odin and Turse have "real" PBHA support using a dedicated output signal and page descriptor field. + * The AxUSER[PBHA] field is driven by the GPU's PBHA signal, and AxADDR[39:36] is dropped. + * The page descriptor PBHA field is [62:59]. + * + * We could write to both of these locations, as each SoC only reads from its respective PBHA + * location with the other being ignored or dropped. + * + * b/148988078 contains confirmation of the above description. + */ +#if IS_ENABLED(CONFIG_SOC_GS101) #define PBHA_BIT_POS (36) +#else +#define PBHA_BIT_POS (59) +#endif #define PBHA_BIT_MASK (0xf) #define MGM_PBHA_DEFAULT 0 -#define GROUP_ID_TO_PT_IDX(x) ((x)-1) -/* The Mali driver requires that allocations made on one of the groups - * are not treated specially. - */ -#define MGM_RESERVED_GROUP_ID 0 - -/* Imported memory is handled by the allocator of the memory, and the Mali - * DDK will request a group_id for such memory via mgm_get_import_memory_id(). - * We specify which group we want to use for this here. - */ -#define MGM_IMPORTED_MEMORY_GROUP_ID (MEMORY_GROUP_MANAGER_NR_GROUPS - 1) +#define MGM_SENTINEL_PT_SIZE U64_MAX #define INVALID_GROUP_ID(group_id) \ (WARN_ON((group_id) < 0) || \ @@ -68,8 +82,12 @@ static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, * @lp_size: The number of allocated large(2MB) pages * @insert_pfn: The number of calls to map pages for CPU access. * @update_gpu_pte: The number of calls to update GPU page table entries. - * @ptid: The partition ID for this group + * @ptid: The active partition ID for this group * @pbha: The PBHA bits assigned to this group, + * @base_pt: The base partition ID available to this group. + * @pt_num: The number of partitions available to this group. + * @active_pt_idx: The relative index for the partition backing the group. + * Different from the absolute ptid. * @state: The lifecycle state of the partition associated with this group * This structure allows page allocation information to be displayed via * debugfs. Display is organized per group with small and large sized pages. @@ -77,11 +95,17 @@ static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, struct mgm_group { atomic_t size; atomic_t lp_size; +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS atomic_t insert_pfn; atomic_t update_gpu_pte; +#endif ptid_t ptid; ptpbha_t pbha; + + u32 base_pt; + u32 pt_num; + u32 active_pt_idx; enum { MGM_GROUP_STATE_NEW = 0, MGM_GROUP_STATE_ENABLED = 10, @@ -91,10 +115,23 @@ struct mgm_group { }; /** + * struct partition_stats - Structure for tracking sizing of a partition + * + * @capacity: The total capacity of each partition + * @size: The current size of each partition + */ +struct partition_stats { + u64 capacity; + atomic64_t size; +}; + +/** * struct mgm_groups - Structure for groups of memory group manager * * @groups: To keep track of the number of allocated pages of all groups * @ngroups: Number of groups actually used + * @npartitions: Number of partitions used by all groups combined + * @pt_stats: The sizing info for each partition * @dev: device attached * @pt_handle: Link to SLC partition data * @kobj: &sruct kobject used for linking to pixel_stats_sysfs node @@ -106,10 +143,12 @@ struct mgm_group { struct mgm_groups { struct mgm_group groups[MEMORY_GROUP_MANAGER_NR_GROUPS]; size_t ngroups; + size_t npartitions; + struct partition_stats *pt_stats; struct device *dev; struct pt_handle *pt_handle; struct kobject kobj; -#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS struct dentry *mgm_debugfs_root; #endif }; @@ -118,7 +157,7 @@ struct mgm_groups { * DebugFS */ -#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS static int mgm_debugfs_state_get(void *data, u64 *val) { @@ -249,15 +288,14 @@ static int mgm_debugfs_init(struct mgm_groups *mgm_data) return 0; } -#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS */ /* * Pixel Stats sysfs */ -extern struct kobject *pixel_stat_gpu_kobj; +#ifdef CONFIG_MALI_PIXEL_STATS -#define ORDER_SMALL_PAGE 0 -#define ORDER_LARGE_PAGE 9 +extern struct kobject *pixel_stat_gpu_kobj; #define MGM_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) @@ -343,41 +381,81 @@ static void mgm_sysfs_term(struct mgm_groups *data) kobject_put(&data->kobj); } +#else /* CONFIG_MALI_PIXEL_STATS */ + +static int mgm_sysfs_init(struct mgm_groups *data) +{ + return 0; +} + +static void mgm_sysfs_term(struct mgm_groups *data) +{} + +#endif /* CONFIG_MALI_PIXEL_STATS */ + +static int group_pt_id(struct mgm_groups *data, enum pixel_mgm_group_id group_id, int pt_index) +{ + struct mgm_group *group = &data->groups[group_id]; + if (WARN_ON_ONCE(pt_index >= group->pt_num)) + return 0; + + return group->base_pt + pt_index; +} + +static int group_active_pt_id(struct mgm_groups *data, enum pixel_mgm_group_id group_id) +{ + return group_pt_id(data, group_id, data->groups[group_id].active_pt_idx); +} + static atomic64_t total_gpu_pages = ATOMIC64_INIT(0); -static void update_size(struct memory_group_manager_device *mgm_dev, int - group_id, int order, bool alloc) +static atomic_t* get_size_counter(struct memory_group_manager_device* mgm_dev, int group_id, int order) { - static DEFINE_RATELIMIT_STATE(gpu_alloc_rs, 10*HZ, 1); + static atomic_t err_atomic; struct mgm_groups *data = mgm_dev->data; switch (order) { case ORDER_SMALL_PAGE: - if (alloc) { - atomic_inc(&data->groups[group_id].size); - atomic64_inc(&total_gpu_pages); - } else { - WARN_ON(atomic_read(&data->groups[group_id].size) == 0); - atomic_dec(&data->groups[group_id].size); - atomic64_dec(&total_gpu_pages); - } - break; - + return &data->groups[group_id].size; case ORDER_LARGE_PAGE: - if (alloc) { - atomic_inc(&data->groups[group_id].lp_size); - atomic64_add(1 << ORDER_LARGE_PAGE, &total_gpu_pages); - } else { - WARN_ON(atomic_read( - &data->groups[group_id].lp_size) == 0); - atomic_dec(&data->groups[group_id].lp_size); - atomic64_sub(1 << ORDER_LARGE_PAGE, &total_gpu_pages); - } - break; - + return &data->groups[group_id].lp_size; default: dev_err(data->dev, "Unknown order(%d)\n", order); - break; + return &err_atomic; + } +} + +static void update_size(struct memory_group_manager_device *mgm_dev, int + group_id, int order, bool alloc) +{ + static DEFINE_RATELIMIT_STATE(gpu_alloc_rs, 10*HZ, 1); + atomic_t* size = get_size_counter(mgm_dev, group_id, order); + + if (alloc) { + atomic_inc(size); + atomic64_add(1 << order, &total_gpu_pages); + } else { + if (atomic_dec_return(size) < 0) { + /* b/289501175 + * Pages are often 'migrated' to the SLC group, which needs special + * accounting. + * + * TODO: Remove after SLC MGM decoupling b/290354607 + */ + if (!WARN_ON(group_id != MGM_SLC_GROUP_ID)) { + /* Undo the dec, and instead decrement the reserved group counter. + * This is still making the assumption that the migration came from + * the reserved group. Currently this is always true, however it + * might not be in future. It would be invasive and costly to track + * where every page came from, so instead this will be fixed as part + * of the b/290354607 effort. + */ + atomic_inc(size); + update_size(mgm_dev, MGM_RESERVED_GROUP_ID, order, alloc); + return; + } + } + atomic64_sub(1 << order, &total_gpu_pages); } if (atomic64_read(&total_gpu_pages) >= (4 << (30 - PAGE_SHIFT)) && @@ -385,6 +463,185 @@ static void update_size(struct memory_group_manager_device *mgm_dev, int pr_warn("total_gpu_pages %lld\n", atomic64_read(&total_gpu_pages)); } +static void pt_size_invalidate(struct mgm_groups* data, int pt_idx) +{ + /* Set the size to a known sentinel value so that we can later detect an update */ + atomic64_set(&data->pt_stats[pt_idx].size, MGM_SENTINEL_PT_SIZE); +} + +static void pt_size_init(struct mgm_groups* data, int pt_idx, size_t size) +{ + /* The resize callback may have already been executed, which would have set + * the correct size. Only update the size if this has not happened. + * We can tell that no resize took place if the size is still a sentinel. + */ + atomic64_cmpxchg(&data->pt_stats[pt_idx].size, MGM_SENTINEL_PT_SIZE, size); +} + +static void validate_ptid(struct mgm_groups* data, enum pixel_mgm_group_id group_id, int ptid) +{ + if (ptid == -EINVAL) + dev_err(data->dev, "Failed to get partition for group: %d\n", group_id); + else + dev_info(data->dev, "pt_client_mutate returned ptid=%d for group=%d", ptid, group_id); +} + +static void update_group(struct mgm_groups* data, + enum pixel_mgm_group_id group_id, + int ptid, + int relative_pt_idx) +{ + int const abs_pt_idx = group_pt_id(data, group_id, relative_pt_idx); + int const pbha = pt_pbha(data->dev->of_node, abs_pt_idx); + + if (pbha == PT_PBHA_INVALID) + dev_err(data->dev, "Failed to get PBHA for group: %d\n", group_id); + else + dev_info(data->dev, "pt_pbha returned PBHA=%d for group=%d", pbha, group_id); + + data->groups[group_id].ptid = ptid; + data->groups[group_id].pbha = pbha; + data->groups[group_id].state = MGM_GROUP_STATE_ENABLED; + data->groups[group_id].active_pt_idx = relative_pt_idx; +} + +static void disable_partition(struct mgm_groups* data, enum pixel_mgm_group_id group_id) +{ + int const active_idx = group_active_pt_id(data, group_id); + + /* Skip if not already enabled */ + if (data->groups[group_id].state != MGM_GROUP_STATE_ENABLED) + return; + + pt_client_disable_no_free(data->pt_handle, active_idx); + data->groups[group_id].state = MGM_GROUP_STATE_DISABLED_NOT_FREED; + + pt_size_invalidate(data, active_idx); + pt_size_init(data, active_idx, 0); +} + +static void enable_partition(struct mgm_groups* data, enum pixel_mgm_group_id group_id) +{ + int ptid; + size_t size = 0; + int const active_idx = group_active_pt_id(data, group_id); + + /* Skip if already enabled */ + if (data->groups[group_id].state == MGM_GROUP_STATE_ENABLED) + return; + + pt_size_invalidate(data, active_idx); + + ptid = pt_client_enable_size(data->pt_handle, active_idx, &size); + + validate_ptid(data, group_id, ptid); + + update_group(data, group_id, ptid, data->groups[group_id].active_pt_idx); + + pt_size_init(data, active_idx, size); +} + +static void set_group_partition(struct mgm_groups* data, + enum pixel_mgm_group_id group_id, + int new_pt_index) +{ + int ptid; + size_t size = 0; + int const active_idx = group_active_pt_id(data, group_id); + int const new_idx = group_pt_id(data, group_id, new_pt_index); + + /* Early out if no changes are needed */ + if (new_idx == active_idx) + return; + + pt_size_invalidate(data, new_idx); + + ptid = pt_client_mutate_size(data->pt_handle, active_idx, new_idx, &size); + + validate_ptid(data, group_id, ptid); + + update_group(data, group_id, ptid, new_pt_index); + + pt_size_init(data, new_idx, size); + /* Reset old partition size */ + atomic64_set(&data->pt_stats[active_idx].size, data->pt_stats[active_idx].capacity); +} + +u64 pixel_mgm_query_group_size(struct memory_group_manager_device* mgm_dev, + enum pixel_mgm_group_id group_id) +{ + struct mgm_groups *data; + struct mgm_group *group; + u64 size = 0; + + /* Early out if the group doesn't exist */ + if (INVALID_GROUP_ID(group_id)) + goto done; + + data = mgm_dev->data; + group = &data->groups[group_id]; + + /* Early out if the group has no partitions */ + if (group->pt_num == 0) + goto done; + + size = atomic64_read(&data->pt_stats[group_active_pt_id(data, group_id)].size); + +done: + return size; +} +EXPORT_SYMBOL(pixel_mgm_query_group_size); + +void pixel_mgm_resize_group_to_fit(struct memory_group_manager_device* mgm_dev, + enum pixel_mgm_group_id group_id, + u64 demand) +{ + struct mgm_groups *data; + struct mgm_group *group; + s64 diff, cur_size, min_diff = S64_MAX; + int pt_idx; + + /* Early out if the group doesn't exist */ + if (INVALID_GROUP_ID(group_id)) + goto done; + + data = mgm_dev->data; + group = &data->groups[group_id]; + + /* Early out if the group has no partitions */ + if (group->pt_num == 0) + goto done; + + /* We can disable the partition if there's no demand */ + if (demand == 0) + { + disable_partition(data, group_id); + goto done; + } + + /* Calculate best partition to use, by finding the nearest capacity */ + for (pt_idx = 0; pt_idx < group->pt_num; ++pt_idx) + { + cur_size = data->pt_stats[group_pt_id(data, group_id, pt_idx)].capacity; + diff = abs(demand - cur_size); + + if (diff > min_diff) + break; + + min_diff = diff; + } + + /* Ensure the partition is enabled before trying to mutate it */ + enable_partition(data, group_id); + set_group_partition(data, group_id, pt_idx - 1); + +done: + dev_dbg(data->dev, "%s: resized memory_group_%d for demand: %lldB", __func__, group_id, demand); + + return; +} +EXPORT_SYMBOL(pixel_mgm_resize_group_to_fit); + static struct page *mgm_alloc_page( struct memory_group_manager_device *mgm_dev, int group_id, gfp_t gfp_mask, unsigned int order) @@ -400,7 +657,7 @@ static struct page *mgm_alloc_page( return NULL; if (WARN_ON_ONCE((group_id != MGM_RESERVED_GROUP_ID) && - (GROUP_ID_TO_PT_IDX(group_id) >= data->ngroups))) + (group_active_pt_id(data, group_id) >= data->npartitions))) return NULL; /* We don't expect to be allocting pages into the group used for @@ -413,38 +670,9 @@ static struct page *mgm_alloc_page( * ensure that we have enabled the relevant partitions for it. */ if (group_id != MGM_RESERVED_GROUP_ID) { - int ptid, pbha; switch (data->groups[group_id].state) { case MGM_GROUP_STATE_NEW: - ptid = pt_client_enable(data->pt_handle, - GROUP_ID_TO_PT_IDX(group_id)); - if (ptid == -EINVAL) { - dev_err(data->dev, - "Failed to get partition for group: " - "%d\n", group_id); - } else { - dev_info(data->dev, - "pt_client_enable returned ptid=%d for" - " group=%d", - ptid, group_id); - } - - pbha = pt_pbha(data->dev->of_node, - GROUP_ID_TO_PT_IDX(group_id)); - if (pbha == PT_PBHA_INVALID) { - dev_err(data->dev, - "Failed to get PBHA for group: %d\n", - group_id); - } else { - dev_info(data->dev, - "pt_pbha returned PBHA=%d for group=%d", - pbha, group_id); - } - - data->groups[group_id].ptid = ptid; - data->groups[group_id].pbha = pbha; - data->groups[group_id].state = MGM_GROUP_STATE_ENABLED; - + enable_partition(data, group_id); break; case MGM_GROUP_STATE_ENABLED: case MGM_GROUP_STATE_DISABLED_NOT_FREED: @@ -534,7 +762,7 @@ static u64 mgm_update_gpu_pte( switch (group_id) { case MGM_RESERVED_GROUP_ID: - case MGM_IMPORTED_MEMORY_GROUP_ID: + case MGM_IMPORTED_MEMORY_GROUP_ID: /* The reserved group doesn't set PBHA bits */ /* TODO: Determine what to do with imported memory */ break; @@ -558,7 +786,35 @@ static u64 mgm_update_gpu_pte( } } +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS atomic_inc(&data->groups[group_id].update_gpu_pte); +#endif + + return pte; +} + +static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev, int group_id, + int mmu_level, u64 pte) +{ + struct mgm_groups *const data = mgm_dev->data; + u64 old_pte; + + if (INVALID_GROUP_ID(group_id)) + return pte; + + switch (group_id) { + case MGM_RESERVED_GROUP_ID: + case MGM_IMPORTED_MEMORY_GROUP_ID: + /* The reserved group doesn't set PBHA bits */ + /* TODO: Determine what to do with imported memory */ + break; + default: + /* All other groups will have PBHA bits, so clear them */ + old_pte = pte; + pte &= ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS); + dev_dbg(data->dev, "%s: group_id=%d pte=0x%llx -> 0x%llx\n", __func__, group_id, + old_pte, pte); + } return pte; } @@ -582,57 +838,105 @@ static vm_fault_t mgm_vmf_insert_pfn_prot( fault = vmf_insert_pfn_prot(vma, addr, pfn, prot); - if (fault == VM_FAULT_NOPAGE) - atomic_inc(&data->groups[group_id].insert_pfn); - else + if (fault != VM_FAULT_NOPAGE) dev_err(data->dev, "vmf_insert_pfn_prot failed\n"); +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS + else + atomic_inc(&data->groups[group_id].insert_pfn); +#endif return fault; } static void mgm_resize_callback(void *data, int id, size_t size_allocated) { - /* Currently we don't do anything on partition resize */ struct mgm_groups *const mgm_data = (struct mgm_groups *)data; - dev_dbg(mgm_data->dev, "Resize callback called, size_allocated: %zu\n", - size_allocated); + dev_dbg(mgm_data->dev, "Resize callback called, size_allocated: %zu\n", size_allocated); + /* Update the partition size for the group */ + atomic64_set(&mgm_data->pt_stats[id].size, size_allocated); } static int mgm_initialize_data(struct mgm_groups *mgm_data) { int i, ret; - const int ngroups = of_property_count_strings(mgm_data->dev->of_node, "pt_id"); + /* +1 to include the required default group */ + const int ngroups = of_property_count_strings(mgm_data->dev->of_node, "groups") + 1; if (WARN_ON(ngroups < 0) || WARN_ON(ngroups > MEMORY_GROUP_MANAGER_NR_GROUPS)) { mgm_data->ngroups = 0; } else { mgm_data->ngroups = ngroups; } + mgm_data->npartitions = of_property_count_strings(mgm_data->dev->of_node, "pt_id"); + + mgm_data->pt_stats = kzalloc(mgm_data->npartitions * sizeof(struct partition_stats), GFP_KERNEL); + if (mgm_data->pt_stats == NULL) { + dev_err(mgm_data->dev, "failed to allocate space for pt_stats"); + ret = -ENOMEM; + goto out_err; + } + + for (i = 0; i < mgm_data->npartitions; i++) { + struct partition_stats* stats; + u32 capacity_kb; + ret = of_property_read_u32_index(mgm_data->dev->of_node, "pt_size", i, &capacity_kb); + if (ret) { + dev_err(mgm_data->dev, "failed to read pt_size[%d]", i); + continue; + } + + stats = &mgm_data->pt_stats[i]; + // Convert from KB to bytes + stats->capacity = (u64)capacity_kb << 10; + atomic64_set(&stats->size, stats->capacity); + } for (i = 0; i < MEMORY_GROUP_MANAGER_NR_GROUPS; i++) { atomic_set(&mgm_data->groups[i].size, 0); atomic_set(&mgm_data->groups[i].lp_size, 0); +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS atomic_set(&mgm_data->groups[i].insert_pfn, 0); atomic_set(&mgm_data->groups[i].update_gpu_pte, 0); +#endif mgm_data->groups[i].pbha = MGM_PBHA_DEFAULT; + mgm_data->groups[i].base_pt = 0; + mgm_data->groups[i].pt_num = 0; + mgm_data->groups[i].active_pt_idx = 0; mgm_data->groups[i].state = MGM_GROUP_STATE_NEW; } + /* Discover the partitions belonging to each memory group, skipping the reserved group */ + for (i = 1; i < mgm_data->ngroups; i++) { + /* Device tree has no description for the reserved group */ + int const dt_idx = i - 1; + + int err = of_property_read_u32_index( + mgm_data->dev->of_node, "group_base_pt", dt_idx, &mgm_data->groups[i].base_pt); + if (err) { + dev_warn(mgm_data->dev, "failed to read base pt index for group %d", i); + continue; + } + + err = of_property_read_u32_index( + mgm_data->dev->of_node, "group_pt_num", dt_idx, &mgm_data->groups[i].pt_num); + if (err) + dev_warn(mgm_data->dev, "failed to read pt number for group %d", i); + } + /* * Initialize SLC partitions. We don't enable partitions until * we actually allocate memory to the corresponding memory * group */ - mgm_data->pt_handle = pt_client_register( - mgm_data->dev->of_node, - (void *)mgm_data, &mgm_resize_callback); + mgm_data->pt_handle = + pt_client_register(mgm_data->dev->of_node, (void*)mgm_data, &mgm_resize_callback); if (IS_ERR(mgm_data->pt_handle)) { ret = PTR_ERR(mgm_data->pt_handle); dev_err(mgm_data->dev, "pt_client_register returned %d\n", ret); - return ret; + goto out_err; } /* We don't use PBHA bits for the reserved memory group, and so @@ -640,13 +944,26 @@ static int mgm_initialize_data(struct mgm_groups *mgm_data) */ mgm_data->groups[MGM_RESERVED_GROUP_ID].state = MGM_GROUP_STATE_ENABLED; - ret = mgm_debugfs_init(mgm_data); - if (ret) - goto out; + if ((ret = mgm_debugfs_init(mgm_data))) + goto out_err; - ret = mgm_sysfs_init(mgm_data); + if ((ret = mgm_sysfs_init(mgm_data))) + goto out_err; + +#ifdef CONFIG_MALI_PIXEL_GPU_SLC + /* We enable the SLC partition by default to support dynamic SLC caching. + * Enabling will initialize the partition, by querying the pbha and assigning a ptid. + * We then immediately disable the partition, effectively resizing the group to zero, + * whilst still retaining other properties such as pbha. + */ + enable_partition(mgm_data, MGM_SLC_GROUP_ID); + disable_partition(mgm_data, MGM_SLC_GROUP_ID); +#endif -out: + return ret; + +out_err: + kfree(mgm_data->pt_stats); return ret; } @@ -677,8 +994,10 @@ static void mgm_term_data(struct mgm_groups *data) break; case MGM_GROUP_STATE_ENABLED: + pt_client_disable(data->pt_handle, group_active_pt_id(data, i)); + break; case MGM_GROUP_STATE_DISABLED_NOT_FREED: - pt_client_free(data->pt_handle, group->ptid); + pt_client_free(data->pt_handle, group_active_pt_id(data, i)); break; default: @@ -704,12 +1023,14 @@ static int memory_group_manager_probe(struct platform_device *pdev) return -ENOMEM; mgm_dev->owner = THIS_MODULE; - mgm_dev->ops.mgm_alloc_page = mgm_alloc_page; - mgm_dev->ops.mgm_free_page = mgm_free_page; - mgm_dev->ops.mgm_get_import_memory_id = - mgm_get_import_memory_id; - mgm_dev->ops.mgm_vmf_insert_pfn_prot = mgm_vmf_insert_pfn_prot; - mgm_dev->ops.mgm_update_gpu_pte = mgm_update_gpu_pte; + mgm_dev->ops = (struct memory_group_manager_ops){ + .mgm_alloc_page = mgm_alloc_page, + .mgm_free_page = mgm_free_page, + .mgm_get_import_memory_id = mgm_get_import_memory_id, + .mgm_update_gpu_pte = mgm_update_gpu_pte, + .mgm_pte_to_original_pte = mgm_pte_to_original_pte, + .mgm_vmf_insert_pfn_prot = mgm_vmf_insert_pfn_prot, + }; mgm_data = kzalloc(sizeof(*mgm_data), GFP_KERNEL); if (!mgm_data) { |