ANDROID: GKI: cma: redirect page allocation to CMA

CMA pages are designed to be used as fallback for movable allocations
and cannot be used for non-movable allocations. If CMA pages are
utilized poorly, non-movable allocations may end up getting starved if
all regular movable pages are allocated and the only pages left are
CMA. Always using CMA pages first creates unacceptable performance
problems. As a midway alternative, use CMA pages for certain
userspace allocations. The userspace pages can be migrated or dropped
quickly which giving decent utilization.

Change-Id: I6165dda01b705309eebabc6dfa67146b7a95c174
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Heesub Shin <heesub.shin@samsung.com>
[lauraa@codeaurora.org: Missing CONFIG_CMA guards, add commit text]
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
[lmark@codeaurora.org: resolve conflicts relating to MIGRATE_HIGHATOMIC]
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
[swatsrid@codeaurora.org: Fix merge conflicts]
Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
(cherry picked from commit 46f8fca539)
Signed-off-by: Mark Salyzyn <salyzyn@google.com>
Bug: 150378964
This commit is contained in:
Mark Salyzyn 2020-02-24 14:40:49 -08:00
parent fd9c71c06b
commit c29070e5b9
4 changed files with 62 additions and 16 deletions

View File

@ -44,6 +44,7 @@ struct vm_area_struct;
#else
#define ___GFP_NOLOCKDEP 0
#endif
#define ___GFP_CMA 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@ -57,6 +58,7 @@ struct vm_area_struct;
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
/**
@ -217,8 +219,13 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_SHIFT (25)
#ifdef CONFIG_LOCKDEP
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
#else
#define __GFP_BITS_MASK (((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) & \
~0x800000u)
#endif
/**
* DOC: Useful GFP flag combinations

View File

@ -181,7 +181,12 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
#else
return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
vaddr);
#endif
}
static inline void clear_highpage(struct page *page)

View File

@ -387,6 +387,10 @@ struct zone {
struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;
#ifdef CONFIG_CMA
bool cma_alloc;
#endif
#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.

View File

@ -2579,14 +2579,30 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
if (migratetype == MIGRATE_MOVABLE)
page = __rmqueue_cma_fallback(zone, order);
if (!page && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;
}
if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
int migratetype)
{
struct page *page = 0;
retry:
#ifdef CONFIG_CMA
if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
page = __rmqueue_cma_fallback(zone, order);
else
#endif
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype))
goto retry;
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
@ -2599,14 +2615,19 @@ retry:
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, unsigned int alloc_flags)
int migratetype, unsigned int alloc_flags, int cma)
{
int i, alloced = 0;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
alloc_flags);
struct page *page;
if (cma)
page = __rmqueue_cma(zone, order, migratetype);
else
page = __rmqueue(zone, order, migratetype, alloc_flags);
if (unlikely(page == NULL))
break;
@ -3064,7 +3085,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list)
struct list_head *list, gfp_t gfp_flags)
{
struct page *page;
@ -3072,7 +3093,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, alloc_flags);
migratetype, alloc_flags,
gfp_flags & __GFP_CMA);
if (unlikely(list_empty(list)))
return NULL;
}
@ -3099,7 +3121,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list,
gfp_flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@ -3140,8 +3163,13 @@ struct page *rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page)
page = __rmqueue(zone, order, migratetype, alloc_flags);
if (!page) {
if (gfp_flags & __GFP_CMA)
page = __rmqueue_cma(zone, order, migratetype);
else
page = __rmqueue(zone, order, migratetype,
alloc_flags);
}
} while (page && check_new_pages(page, order));
spin_unlock(&zone->lock);
if (!page)
@ -8159,6 +8187,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
return ret;
cc.zone->cma_alloc = 1;
/*
* In case of -EBUSY, we'd like to know which page causes problem.
* So, just fall through. test_pages_isolated() has a tracepoint
@ -8241,6 +8270,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
cc.zone->cma_alloc = 0;
return ret;
}