Revert "mm: protect mremap() against SPF hanlder"

This reverts commit d91303f0a1.

Change-Id: I584833c397d4c5ea35a47cf05b7b70c1bd82269a
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231 2022-04-28 19:19:48 +05:30 committed by spakkkk
parent 9e95a24d59
commit 4a01adb0b2
3 changed files with 17 additions and 72 deletions

View File

@ -2336,29 +2336,16 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand, bool keep_locked);
struct vm_area_struct *expand);
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
{
return __vma_adjust(vma, start, end, pgoff, insert, NULL, false);
return __vma_adjust(vma, start, end, pgoff, insert, NULL);
}
extern struct vm_area_struct *__vma_merge(struct mm_struct *mm,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *anon, struct file *file,
pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff,
const char __user *user, bool keep_locked);
static inline struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *anon, struct file *file,
pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff,
const char __user *user)
{
return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off,
pol, uff, user, false);
}
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);

View File

@ -684,7 +684,7 @@ static inline void __vma_unlink_prev(struct mm_struct *mm,
*/
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand, bool keep_locked)
struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
@ -780,12 +780,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
if (error) {
if (next && next != vma)
vm_write_end(next);
vm_write_end(vma);
if (error)
return error;
}
}
}
again:
@ -980,8 +976,7 @@ again:
if (next && next != vma)
vm_write_end(next);
if (!keep_locked)
vm_write_end(vma);
vm_write_end(vma);
validate_mm(mm);
@ -1122,13 +1117,13 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
* parameter) may establish ptes with the wrong permissions of NNNN
* instead of the right permissions of XXXX.
*/
struct vm_area_struct *__vma_merge(struct mm_struct *mm,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t pgoff, struct mempolicy *policy,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name, bool keep_locked)
const char __user *anon_name)
{
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
struct vm_area_struct *area, *next;
@ -1178,11 +1173,10 @@ struct vm_area_struct *__vma_merge(struct mm_struct *mm,
/* cases 1, 6 */
err = __vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL,
prev, keep_locked);
prev);
} else /* cases 2, 5, 7 */
err = __vma_adjust(prev, prev->vm_start,
end, prev->vm_pgoff, NULL, prev,
keep_locked);
end, prev->vm_pgoff, NULL, prev);
if (err)
return NULL;
khugepaged_enter_vma_merge(prev, vm_flags);
@ -1200,12 +1194,10 @@ struct vm_area_struct *__vma_merge(struct mm_struct *mm,
anon_name)) {
if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(prev, prev->vm_start,
addr, prev->vm_pgoff, NULL, next,
keep_locked);
addr, prev->vm_pgoff, NULL, next);
else { /* cases 3, 8 */
err = __vma_adjust(area, addr, next->vm_end,
next->vm_pgoff - pglen, NULL, next,
keep_locked);
next->vm_pgoff - pglen, NULL, next);
/*
* In case 3 area is already equal to next and
* this is a noop, but in case 8 "area" has
@ -3229,21 +3221,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
return NULL; /* should never get here */
/* There is 3 cases to manage here in
* AAAA AAAA AAAA AAAA
* PPPP.... PPPP......NNNN PPPP....NNNN PP........NN
* PPPPPPPP(A) PPPP..NNNNNNNN(B) PPPPPPPPPPPP(1) NULL
* PPPPPPPPNNNN(2)
* PPPPNNNNNNNN(3)
*
* new_vma == prev in case A,1,2
* new_vma == next in case B,3
*/
new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
vma_policy(vma), vma->vm_userfaultfd_ctx,
vma_get_anon_name(vma), true);
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
if (new_vma) {
/*
* Source vma may have been merged into new_vma
@ -3281,15 +3261,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
/*
* As the VMA is linked right now, it may be hit by the
* speculative page fault handler. But we don't want it to
* to start mapping page in this area until the caller has
* potentially move the pte from the moved VMA. To prevent
* that we protect it right now, and let the caller unprotect
* it once the move is done.
*/
vm_write_begin(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false;
}

View File

@ -297,14 +297,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (!new_vma)
return -ENOMEM;
/* new_vma is returned protected by copy_vma, to prevent speculative
* page fault to be done in the destination area before we move the pte.
* Now, we must also protect the source VMA since we don't want pages
* to be mapped in our back while we are copying the PTEs.
*/
if (vma != new_vma)
vm_write_begin(vma);
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
need_rmap_locks);
if (moved_len < old_len) {
@ -321,8 +313,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
*/
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
true);
if (vma != new_vma)
vm_write_end(vma);
vma = new_vma;
old_len = new_len;
old_addr = new_addr;
@ -331,10 +321,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
mremap_userfaultfd_prep(new_vma, uf);
arch_remap(mm, old_addr, old_addr + old_len,
new_addr, new_addr + new_len);
if (vma != new_vma)
vm_write_end(vma);
}
vm_write_end(new_vma);
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) {