From a0afac9df4bc8bb5b5ba4a52e1a4ca5e4a50b317 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Fri, 2 Oct 2020 01:43:39 -0600 Subject: [PATCH] BACKPORT: mm/swap.c: don't pass "enum lru_list" to del_page_from_lru_list() The parameter is redundant in the sense that it can be potentially extracted from the "struct page" parameter by page_lru(). We need to make sure that existing PageActive() or PageUnevictable() remains until the function returns. A few places don't conform, and simple reordering fixes them. This patch may have left page_off_lru() seemingly odd, and we'll take care of it in the next patch. Link: https://lore.kernel.org/linux-mm/20201207220949.830352-6-yuzhao@google.com/ Link: https://lkml.kernel.org/r/20210122220600.906146-6-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Alex Shi Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Roman Gushchin Cc: Vladimir Davydov Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds (cherry picked from commit 46ae6b2cc2a47904a368d238425531ea91f3a2a5) Bug: 228114874 Change-Id: I1e14dcbf4111b39cf155ed3512423448865eb324 --- include/linux/mm_inline.h | 5 +++-- mm/compaction.c | 2 +- mm/memcontrol.c | 2 +- mm/mlock.c | 2 +- mm/swap.c | 21 +++++++++------------ mm/vmscan.c | 15 ++++++--------- 6 files changed, 21 insertions(+), 26 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 4fd0a14a3d71..7afe798dd8ff 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -125,9 +125,10 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, } static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) + struct lruvec *lruvec) { list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); + update_lru_size(lruvec, page_lru(page), page_zonenum(page), + -hpage_nr_pages(page)); } #endif diff --git a/mm/compaction.c b/mm/compaction.c index 012c38ceb0c0..9348005a4b70 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -879,7 +879,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, VM_BUG_ON_PAGE(PageCompound(page), page); /* Successfully isolated */ - del_page_from_lru_list(page, lruvec, page_lru(page)); + del_page_from_lru_list(page, lruvec); inc_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2e1f8ab307e0..b103508c9d29 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2392,7 +2392,7 @@ static void lock_page_lru(struct page *page, int *isolated) lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, page_lru(page)); + del_page_from_lru_list(page, lruvec); *isolated = 1; } else *isolated = 0; diff --git a/mm/mlock.c b/mm/mlock.c index ef2abafbc18f..ca87032bda0c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -115,7 +115,7 @@ static bool __munlock_isolate_lru_page(struct page *page, bool getpage) if (getpage) get_page(page); ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, page_lru(page)); + del_page_from_lru_list(page, lruvec); return true; } diff --git a/mm/swap.c b/mm/swap.c index 0dabf4e3a78f..b5f50e4a5775 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -68,7 +68,8 @@ static void __page_cache_release(struct page *page) lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, page_off_lru(page)); + del_page_from_lru_list(page, lruvec); + page_off_lru(page); spin_unlock_irqrestore(zone_lru_lock(zone), flags); } __ClearPageWaiters(page); @@ -222,7 +223,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, int *pgmoved = arg; if (PageLRU(page) && !PageUnevictable(page)) { - del_page_from_lru_list(page, lruvec, page_lru(page)); + del_page_from_lru_list(page, lruvec); ClearPageActive(page); add_page_to_lru_list_tail(page, lruvec); (*pgmoved)++; @@ -277,9 +278,8 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); - int lru = page_lru_base_type(page); - del_page_from_lru_list(page, lruvec, lru); + del_page_from_lru_list(page, lruvec); SetPageActive(page); add_page_to_lru_list(page, lruvec); trace_mm_lru_activate(page); @@ -495,7 +495,7 @@ void lru_cache_add_active_or_unevictable(struct page *page, static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, void *arg) { - int lru, file; + int file; bool active; if (!PageLRU(page)) @@ -510,9 +510,8 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, active = PageActive(page); file = page_is_file_cache(page); - lru = page_lru_base_type(page); - del_page_from_lru_list(page, lruvec, lru + active); + del_page_from_lru_list(page, lruvec); ClearPageActive(page); ClearPageReferenced(page); @@ -560,10 +559,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { - bool active = PageActive(page); - - del_page_from_lru_list(page, lruvec, - LRU_INACTIVE_ANON + active); + del_page_from_lru_list(page, lruvec); ClearPageActive(page); ClearPageReferenced(page); /* @@ -825,7 +821,8 @@ void release_pages(struct page **pages, int nr) lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, page_off_lru(page)); + del_page_from_lru_list(page, lruvec); + page_off_lru(page); } __ClearPageWaiters(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 05d2a78aea43..e2d034826edb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1837,10 +1837,9 @@ int isolate_lru_page(struct page *page) spin_lock_irq(zone_lru_lock(zone)); lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); if (PageLRU(page)) { - int lru = page_lru(page); get_page(page); ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, lru); + del_page_from_lru_list(page, lruvec); ret = 0; } spin_unlock_irq(zone_lru_lock(zone)); @@ -1897,7 +1896,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) */ while (!list_empty(page_list)) { struct page *page = lru_to_page(page_list); - int lru; VM_BUG_ON_PAGE(PageLRU(page), page); list_del(&page->lru); @@ -1911,18 +1909,17 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) lruvec = mem_cgroup_page_lruvec(page, pgdat); SetPageLRU(page); - lru = page_lru(page); add_page_to_lru_list(page, lruvec); - if (is_active_lru(lru)) { - int file = is_file_lru(lru); + if (PageActive(page)) { + int file = page_is_file_cache(page); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; } if (put_page_testzero(page)) { __ClearPageLRU(page); + del_page_from_lru_list(page, lruvec); __ClearPageActive(page); - del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&pgdat->lru_lock); @@ -2108,8 +2105,8 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, if (put_page_testzero(page)) { __ClearPageLRU(page); + del_page_from_lru_list(page, lruvec); __ClearPageActive(page); - del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&pgdat->lru_lock); @@ -4291,8 +4288,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) if (page_evictable(page)) { VM_BUG_ON_PAGE(PageActive(page), page); + del_page_from_lru_list(page, lruvec); ClearPageUnevictable(page); - del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); add_page_to_lru_list(page, lruvec); pgrescued++; }