summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuren Baghdasaryan <surenb@google.com>2023-05-23 18:33:20 -0700
committerWill McVicker <willmcvicker@google.com>2023-08-16 12:15:51 -0700
commitde85b3c05698f1ce2829d3ff977dee90be48b2d8 (patch)
tree797d2789b04f0f63905330f511af57755c37d054
parenteaab819dab16d5fb3ea0df1e4141fe8e8c36ad2d (diff)
downloadgpu-de85b3c05698f1ce2829d3ff977dee90be48b2d8.tar.gz
mali_kbase: replace direct vm_flags modifications with modifier functions
With introduction of [1] in 6.4 kernels, direct vm_flags modifications are disallowed. Use modifier functions to change vma flags. [1] https://lkml.kernel.org/r/20230126193752.297968-3-surenb@google.com Bug: 161210518 Change-Id: Iaba9394269ec1df917d1c7d450bff9c71642b70f Signed-off-by: Suren Baghdasaryan <surenb@google.com>
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 0b323b0..23d55b2 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -2598,7 +2598,7 @@ static int kbase_cpu_mmap(struct kbase_context *kctx,
* See MIDBASE-1057
*/
- vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO);
vma->vm_ops = &kbase_vm_ops;
vma->vm_private_data = map;
@@ -2627,11 +2627,11 @@ static int kbase_cpu_mmap(struct kbase_context *kctx,
}
if (!kaddr) {
- vma->vm_flags |= VM_PFNMAP;
+ vm_flags_set(vma, VM_PFNMAP);
} else {
WARN_ON(aligned_offset);
/* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
- vma->vm_flags |= VM_MIXEDMAP;
+ vm_flags_set(vma, VM_MIXEDMAP);
/* vmalloc remaping is easy... */
err = remap_vmalloc_range(vma, kaddr, 0);
WARN_ON(err);
@@ -2847,9 +2847,9 @@ int kbase_context_mmap(struct kbase_context *const kctx,
dev_dbg(dev, "kbase_mmap\n");
if (!(vma->vm_flags & VM_READ))
- vma->vm_flags &= ~VM_MAYREAD;
+ vm_flags_clear(vma, VM_MAYREAD);
if (!(vma->vm_flags & VM_WRITE))
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
if (nr_pages == 0) {
err = -EINVAL;
@@ -3283,8 +3283,8 @@ static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_
spin_unlock(&kctx->mm_update_lock);
/* no real access */
- vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vm_flags_clear(vma, VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
vma->vm_ops = &kbase_vm_special_ops;
vma->vm_private_data = kctx;
@@ -3480,13 +3480,13 @@ static int kbase_csf_cpu_mmap_user_io_pages(struct kbase_context *kctx,
if (err)
goto map_failed;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO);
/* TODO use VM_MIXEDMAP, since it is more appropriate as both types of
* memory with and without "struct page" backing are being inserted here.
* Hw Doorbell pages comes from the device register area so kernel does
* not use "struct page" for them.
*/
- vma->vm_flags |= VM_PFNMAP;
+ vm_flags_set(vma, VM_PFNMAP);
vma->vm_ops = &kbase_csf_user_io_pages_vm_ops;
vma->vm_private_data = queue;
@@ -3592,12 +3592,12 @@ static int kbase_csf_cpu_mmap_user_reg_page(struct kbase_context *kctx,
/* Map uncached */
vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO);
/* User register page comes from the device register area so
* "struct page" isn't available for it.
*/
- vma->vm_flags |= VM_PFNMAP;
+ vm_flags_set(vma, VM_PFNMAP);
kctx->csf.user_reg_vma = vma;