mm/vma: make vma_is_accessible() available for general use
Lets move vma_is_accessible() helper to include/linux/mm.h which makes it available for general use. While here, this replaces all remaining open encodings for VMA access check with vma_is_accessible(). Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Guo Ren <guoren@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Guo Ren <guoren@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Burton <paulburton@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Rich Felker <dalias@libc.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Nick Piggin <npiggin@gmail.com> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Will Deacon <will@kernel.org> Link: http://lkml.kernel.org/r/1582520593-30704-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Juhyung Park <qkrwngud825@gmail.com> Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com> Change-Id: I91740a7c971e7298fad5a3e117b1559adf546e6c
This commit is contained in:
parent
9afaeff153
commit
f9f893e9ec
@ -125,7 +125,7 @@ good_area:
|
||||
case 1: /* read, present */
|
||||
goto acc_err;
|
||||
case 0: /* read, not present */
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
|
||||
if (!vma_is_accessible(vma))
|
||||
goto acc_err;
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ good_area:
|
||||
goto bad_area;
|
||||
}
|
||||
} else {
|
||||
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
if (!vma_is_accessible(vma))
|
||||
goto bad_area;
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +313,7 @@ static bool access_error(bool is_write, bool is_exec,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
|
||||
if (unlikely(!vma_is_accessible(vma)))
|
||||
return true;
|
||||
/*
|
||||
* We should ideally do the vma pkey access check here. But in the
|
||||
|
@ -327,7 +327,7 @@ int __execute_only_pkey(struct mm_struct *mm)
|
||||
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
|
||||
{
|
||||
/* Do this check first since the vm_flags should be hot */
|
||||
if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
|
||||
if ((vma_is_accessible(vma)) != VM_EXEC)
|
||||
return false;
|
||||
|
||||
return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
|
||||
|
@ -372,7 +372,7 @@ static inline int access_error(int error_code, struct vm_area_struct *vma)
|
||||
return 1;
|
||||
|
||||
/* read, not present: */
|
||||
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
|
||||
if (unlikely(!vma_is_accessible(vma)))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -1191,7 +1191,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
|
||||
return 1;
|
||||
|
||||
/* read, not present: */
|
||||
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
|
||||
if (unlikely(!vma_is_accessible(vma)))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -75,7 +75,7 @@ int __execute_only_pkey(struct mm_struct *mm)
|
||||
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
|
||||
{
|
||||
/* Do this check first since the vm_flags should be hot */
|
||||
if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
|
||||
if (vma_is_accessible(vma))
|
||||
return false;
|
||||
if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
|
||||
return false;
|
||||
|
@ -1655,6 +1655,11 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool vma_is_accessible(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SHMEM
|
||||
/*
|
||||
* The vma_is_shmem is not inline because it is used only by slow
|
||||
|
@ -2683,7 +2683,7 @@ void task_numa_work(struct callback_head *work)
|
||||
* Skip inaccessible VMAs to avoid any confusion between
|
||||
* PROT_NONE and NUMA hinting ptes
|
||||
*/
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
|
||||
if (!vma_is_accessible(vma))
|
||||
continue;
|
||||
|
||||
do {
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -1243,7 +1243,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
|
||||
* We want mlock to succeed for regions that have any permissions
|
||||
* other than PROT_NONE.
|
||||
*/
|
||||
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
|
||||
if (vma_is_accessible(vma))
|
||||
gup_flags |= FOLL_FORCE;
|
||||
|
||||
/*
|
||||
|
@ -4206,11 +4206,6 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
static inline bool vma_is_accessible(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
|
||||
}
|
||||
|
||||
static vm_fault_t create_huge_pud(struct vm_fault *vmf)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -642,8 +642,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
|
||||
|
||||
if (flags & MPOL_MF_LAZY) {
|
||||
/* Similar to task_numa_work, skip inaccessible VMAs */
|
||||
if (!is_vm_hugetlb_page(vma) &&
|
||||
(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
|
||||
if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
|
||||
!(vma->vm_flags & VM_MIXEDMAP))
|
||||
change_prot_numa(vma, start, endvma);
|
||||
return 1;
|
||||
|
@ -2332,8 +2332,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
gap_addr = TASK_SIZE;
|
||||
|
||||
next = vma->vm_next;
|
||||
if (next && next->vm_start < gap_addr &&
|
||||
(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
|
||||
if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
|
||||
if (!(next->vm_flags & VM_GROWSUP))
|
||||
return -ENOMEM;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
@ -2414,7 +2413,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
||||
prev = vma->vm_prev;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
|
||||
(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
|
||||
vma_is_accessible(prev)) {
|
||||
if (address - prev->vm_end < stack_guard_gap)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user