From b982086907494c5a5e66d876328e43eada041e06 Mon Sep 17 00:00:00 2001 From: UtsavBalar1231 Date: Tue, 26 Apr 2022 21:57:01 +0530 Subject: [PATCH] Revert "mm: Enhance per process reclaim to consider shared pages" This reverts commit 5a83f94ad7cac48dc445aa8f62e380835397ff7e. Signed-off-by: UtsavBalar1231 --- fs/proc/task_mmu.c | 2 +- include/linux/rmap.h | 9 +++------ mm/ksm.c | 1 - mm/memory-failure.c | 2 +- mm/memory_hotplug.c | 4 +--- mm/migrate.c | 4 ++-- mm/rmap.c | 24 +----------------------- mm/vmscan.c | 12 ++---------- 8 files changed, 11 insertions(+), 47 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index a06ebb03e27d..af41c09f1f15 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1715,7 +1715,7 @@ cont: break; } pte_unmap_unlock(pte - 1, ptl); - reclaim_pages_from_list(&page_list, vma); + reclaim_pages_from_list(&page_list); if (addr != end) goto cont; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 4e21a2cbf144..5e47a0c02a4d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -14,8 +14,7 @@ extern int isolate_lru_page(struct page *page); extern void putback_lru_page(struct page *page); -extern unsigned long reclaim_pages_from_list(struct list_head *page_list, - struct vm_area_struct *vma); +extern unsigned long reclaim_pages_from_list(struct list_head *page_list); /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -209,8 +208,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -bool try_to_unmap(struct page *page, enum ttu_flags flags, - struct vm_area_struct *vma); +bool try_to_unmap(struct page *, enum ttu_flags flags); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) @@ -277,7 +275,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); */ struct rmap_walk_control { void *arg; - struct vm_area_struct *target_vma; /* * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. @@ -306,7 +303,7 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -#define try_to_unmap(page, refs, vma) false +#define try_to_unmap(page, refs) false static inline int page_mkclean(struct page *page) { diff --git a/mm/ksm.c b/mm/ksm.c index 9ee40e4f3061..ba2285c8286e 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2603,7 +2603,6 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) stable_node = page_stable_node(page); if (!stable_node) return; - again: hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 3cff488f28c5..3da3c63dccd1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1033,7 +1033,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, if (kill) collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); - unmap_success = try_to_unmap(hpage, ttu, NULL); + unmap_success = try_to_unmap(hpage, ttu); if (!unmap_success) pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f57fa439a40d..f195982aaabb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1445,9 +1445,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (WARN_ON(PageLRU(page))) isolate_lru_page(page); if (page_mapped(page)) - try_to_unmap(page, - TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS, - NULL); + try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); continue; } diff --git a/mm/migrate.c b/mm/migrate.c index 4e352cbf02b5..c90fdb4fa677 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1106,7 +1106,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, page); try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); page_was_mapped = 1; } @@ -1332,7 +1332,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (page_mapped(hpage)) { try_to_unmap(hpage, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); page_was_mapped = 1; } diff --git a/mm/rmap.c b/mm/rmap.c index 1ddb8f4de0ae..9ecbd7a8fd19 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1720,24 +1720,19 @@ static int page_not_mapped(struct page *page) * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped * @flags: action and flags - * @vma : target vma for reclaim * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. - * If @vma is not NULL, this function try to remove @page from only @vma - * without peeking all mapped vma for @page. * * If unmap is successful, return true. Otherwise, false. */ -bool try_to_unmap(struct page *page, enum ttu_flags flags, - struct vm_area_struct *vma) +bool try_to_unmap(struct page *page, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = page_not_mapped, .anon_lock = page_lock_anon_vma_read, - .target_vma = vma, }; /* @@ -1782,7 +1777,6 @@ void try_to_munlock(struct page *page) .arg = (void *)TTU_MUNLOCK, .done = page_not_mapped, .anon_lock = page_lock_anon_vma_read, - .target_vma = NULL, }; @@ -1844,13 +1838,6 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; - if (rwc->target_vma) { - unsigned long address = vma_address(page, rwc->target_vma); - - rwc->rmap_one(page, rwc->target_vma, address, rwc->arg); - return; - } - if (locked) { anon_vma = page_anon_vma(page); /* anon_vma disappear under us? */ @@ -1858,7 +1845,6 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, } else { anon_vma = rmap_walk_anon_lock(page, rwc); } - if (!anon_vma) return; @@ -1904,7 +1890,6 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, struct address_space *mapping = page_mapping(page); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; - unsigned long address; /* * The page lock not only makes sure that page->mapping cannot @@ -1921,13 +1906,6 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; if (!locked) i_mmap_lock_read(mapping); - - if (rwc->target_vma) { - address = vma_address(page, rwc->target_vma); - rwc->rmap_one(page, rwc->target_vma, address, rwc->arg); - goto done; - } - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(page, vma); diff --git a/mm/vmscan.c b/mm/vmscan.c index a9b79780fedc..063129a55847 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -127,12 +127,6 @@ struct scan_control { unsigned int file_taken; unsigned int taken; } nr; - /* - * Reclaim pages from a vma. If the page is shared by other tasks - * it is zapped from a vma without reclaim so it ends up remaining - * on memory until last task zap it. - */ - struct vm_area_struct *target_vma; }; #ifdef ARCH_HAS_PREFETCH @@ -1342,7 +1336,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - if (!try_to_unmap(page, flags, sc->target_vma)) { + if (!try_to_unmap(page, flags)) { nr_unmap_fail++; goto activate_locked; } @@ -1559,8 +1553,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, } #ifdef CONFIG_PROCESS_RECLAIM -unsigned long reclaim_pages_from_list(struct list_head *page_list, - struct vm_area_struct *vma) +unsigned long reclaim_pages_from_list(struct list_head *page_list) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -1568,7 +1561,6 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list, .may_writepage = 1, .may_unmap = 1, .may_swap = 1, - .target_vma = vma, }; unsigned long nr_reclaimed;