aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKalesh Singh <kaleshsingh@google.com>2024-04-04 22:21:32 -0700
committerKalesh Singh <kaleshsingh@google.com>2024-05-02 22:14:25 +0000
commit6815ef31953426c6fea60b033481938baf860658 (patch)
tree0a3a3dbd77868dbfc3d013c87c3fa3c25b4facc0
parent6b9e4046753a61c8a5814419b3424778f34c0a9d (diff)
downloadhikey-linaro-6815ef31953426c6fea60b033481938baf860658.tar.gz
ANDROID: 16K: Use MADV_DONTNEED to save VMA padding pages.
When performing LOAD segment extension, the dynamic linker knows what portion of the VMA is padding. In order for the kernel to implement mitigations that ensure app compatibility, the extent of the padding must be made available to the kernel. To achieve this, reuse MADV_DONTNEED on single VMAs to hint the padding range to the kernel. This information is then stored in vm_flag bits. This allows userspace (dynamic linker) to set the padding pages on the VMA without a need for new out-of-tree UAPI. Bug: 330117029 Bug: 327600007 Bug: 330767927 Bug: 328266487 Bug: 329803029 Change-Id: I3421de32ab38ad3cb0fbce73ecbd8f7314287cde Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
-rw-r--r--include/linux/pgsize_migration.h8
-rw-r--r--mm/madvise.c3
-rw-r--r--mm/pgsize_migration.c56
3 files changed, 67 insertions, 0 deletions
diff --git a/include/linux/pgsize_migration.h b/include/linux/pgsize_migration.h
index 60f719d44107..fd1e74ea4283 100644
--- a/include/linux/pgsize_migration.h
+++ b/include/linux/pgsize_migration.h
@@ -45,6 +45,9 @@ extern void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages);
extern unsigned long vma_pad_pages(struct vm_area_struct *vma);
+
+extern void madvise_vma_pad_pages(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
@@ -55,6 +58,11 @@ static inline unsigned long vma_pad_pages(struct vm_area_struct *vma)
{
return 0;
}
+
+static inline void madvise_vma_pad_pages(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
diff --git a/mm/madvise.c b/mm/madvise.c
index b2a9177d8993..fa716030ba1d 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,6 +11,7 @@
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
#include <linux/page-isolation.h>
+#include <linux/pgsize_migration.h>
#include <linux/page_idle.h>
#include <linux/userfaultfd_k.h>
#include <linux/hugetlb.h>
@@ -824,6 +825,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ madvise_vma_pad_pages(vma, start, end);
+
zap_page_range_single(vma, start, end - start, NULL);
return 0;
}
diff --git a/mm/pgsize_migration.c b/mm/pgsize_migration.c
index 54b29a8269bf..ce77c7af86de 100644
--- a/mm/pgsize_migration.c
+++ b/mm/pgsize_migration.c
@@ -121,5 +121,61 @@ unsigned long vma_pad_pages(struct vm_area_struct *vma)
return vma->vm_flags >> VM_PAD_SHIFT;
}
+
+static __always_inline bool str_has_suffix(const char *str, const char *suffix)
+{
+ size_t str_len = strlen(str);
+ size_t suffix_len = strlen(suffix);
+
+ if (str_len < suffix_len)
+ return false;
+
+ return !strncmp(str + str_len - suffix_len, suffix, suffix_len);
+}
+
+/*
+ * Saves the number of padding pages for an ELF segment mapping
+ * in vm_flags.
+ *
+ * The number of padding pages is deduced from the madvise DONTNEED range [start, end)
+ * if the following conditions are met:
+ * 1) The range is enclosed by a single VMA
+ * 2) The range ends at the end address of the VMA
+ * 3) The range starts at an address greater than the start address of the VMA
+ * 4) The number of the pages in the range does not exceed VM_TOTAL_PAD_PAGES.
+ * 5) The VMA is a regular file backed VMA (filemap_fault)
+ * 6) The file backing the VMA is a shared library (*.so)
+ */
+void madvise_vma_pad_pages(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long nr_pad_pages;
+
+ if (!is_pgsize_migration_enabled())
+ return;
+
+ /* Only handle this for file backed VMAs */
+ if (!vma->vm_file || !vma->vm_ops || vma->vm_ops->fault != filemap_fault)
+ return;
+
+
+ /* Limit this to only shared libraries (*.so) */
+ if (!str_has_suffix(vma->vm_file->f_path.dentry->d_name.name, ".so"))
+ return;
+
+ /*
+ * If the madvise range is it at the end of the file save the number of
+ * pages in vm_flags (only need 4 bits are needed for 16kB aligned ELFs).
+ */
+ if (start <= vma->vm_start || end != vma->vm_end)
+ return;
+
+ nr_pad_pages = (end - start) >> PAGE_SHIFT;
+
+ if (!nr_pad_pages || nr_pad_pages > VM_TOTAL_PAD_PAGES)
+ return;
+
+ vma_set_pad_pages(vma, nr_pad_pages);
+}
#endif /* PAGE_SIZE == SZ_4K */
#endif /* CONFIG_64BIT */