Revert "mm: Enhance per process reclaim to consider shared pages"

This reverts commit 5a83f94ad7.

Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231 2022-04-26 21:57:01 +05:30 committed by spakkkk
parent 9a6187a01f
commit b982086907
8 changed files with 11 additions and 47 deletions

View File

@ -1715,7 +1715,7 @@ cont:
break;
}
pte_unmap_unlock(pte - 1, ptl);
reclaim_pages_from_list(&page_list, vma);
reclaim_pages_from_list(&page_list);
if (addr != end)
goto cont;

View File

@ -14,8 +14,7 @@
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
struct vm_area_struct *vma);
extern unsigned long reclaim_pages_from_list(struct list_head *page_list);
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@ -209,8 +208,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
bool try_to_unmap(struct page *page, enum ttu_flags flags,
struct vm_area_struct *vma);
bool try_to_unmap(struct page *, enum ttu_flags flags);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
@ -277,7 +275,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
struct vm_area_struct *target_vma;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
@ -306,7 +303,7 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
#define try_to_unmap(page, refs, vma) false
#define try_to_unmap(page, refs) false
static inline int page_mkclean(struct page *page)
{

View File

@ -2603,7 +2603,6 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
stable_node = page_stable_node(page);
if (!stable_node)
return;
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;

View File

@ -1033,7 +1033,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
unmap_success = try_to_unmap(hpage, ttu, NULL);
unmap_success = try_to_unmap(hpage, ttu);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));

View File

@ -1445,9 +1445,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (WARN_ON(PageLRU(page)))
isolate_lru_page(page);
if (page_mapped(page))
try_to_unmap(page,
TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS,
NULL);
try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
continue;
}

View File

@ -1106,7 +1106,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
page_was_mapped = 1;
}
@ -1332,7 +1332,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (page_mapped(hpage)) {
try_to_unmap(hpage,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
page_was_mapped = 1;
}

View File

@ -1720,24 +1720,19 @@ static int page_not_mapped(struct page *page)
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
* @vma : target vma for reclaim
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
* If @vma is not NULL, this function try to remove @page from only @vma
* without peeking all mapped vma for @page.
*
* If unmap is successful, return true. Otherwise, false.
*/
bool try_to_unmap(struct page *page, enum ttu_flags flags,
struct vm_area_struct *vma)
bool try_to_unmap(struct page *page, enum ttu_flags flags)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
.target_vma = vma,
};
/*
@ -1782,7 +1777,6 @@ void try_to_munlock(struct page *page)
.arg = (void *)TTU_MUNLOCK,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
.target_vma = NULL,
};
@ -1844,13 +1838,6 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
if (rwc->target_vma) {
unsigned long address = vma_address(page, rwc->target_vma);
rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
return;
}
if (locked) {
anon_vma = page_anon_vma(page);
/* anon_vma disappear under us? */
@ -1858,7 +1845,6 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
} else {
anon_vma = rmap_walk_anon_lock(page, rwc);
}
if (!anon_vma)
return;
@ -1904,7 +1890,6 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
struct address_space *mapping = page_mapping(page);
pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma;
unsigned long address;
/*
* The page lock not only makes sure that page->mapping cannot
@ -1921,13 +1906,6 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
if (rwc->target_vma) {
address = vma_address(page, rwc->target_vma);
rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
goto done;
}
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);

View File

@ -127,12 +127,6 @@ struct scan_control {
unsigned int file_taken;
unsigned int taken;
} nr;
/*
* Reclaim pages from a vma. If the page is shared by other tasks
* it is zapped from a vma without reclaim so it ends up remaining
* on memory until last task zap it.
*/
struct vm_area_struct *target_vma;
};
#ifdef ARCH_HAS_PREFETCH
@ -1342,7 +1336,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
if (!try_to_unmap(page, flags, sc->target_vma)) {
if (!try_to_unmap(page, flags)) {
nr_unmap_fail++;
goto activate_locked;
}
@ -1559,8 +1553,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
}
#ifdef CONFIG_PROCESS_RECLAIM
unsigned long reclaim_pages_from_list(struct list_head *page_list,
struct vm_area_struct *vma)
unsigned long reclaim_pages_from_list(struct list_head *page_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
@ -1568,7 +1561,6 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.target_vma = vma,
};
unsigned long nr_reclaimed;