From 1c86bcc13800b9a798bcec4141c883474c132b05 Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Sat, 18 Apr 2020 15:36:24 +0800 Subject: [PATCH] sched: separate capacity margin for boosted tasks With the introduction of placement hint patch, boosted tasks will not scheduled from big cores. We tune capacity margin to let important boosted tasks get scheduled on big cores. However, the capacity margin affects all group of tasks, so that non-boosted tasks get more chances to be scheduled on big cores, too. This could be solved by separating capacity margin for boosted tasks. Bug: 152925197 Test: margin set correctly Signed-off-by: Rick Yiu Change-Id: I0e059c56efa9bc8513f0ef4b0f6ab8f5d04a592a --- kernel/sched/fair.c | 25 ++++++++++++++++++------- kernel/sched/sched.h | 6 ++++-- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 01e23b190efa..8d582811ff1c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -28,6 +28,7 @@ #ifdef CONFIG_SMP static inline bool task_fits_max(struct task_struct *p, int cpu); +static inline unsigned long boosted_task_util(struct task_struct *task); #endif /* CONFIG_SMP */ #ifdef CONFIG_SCHED_WALT @@ -165,10 +166,16 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; * (default: ~20%) */ unsigned int capacity_margin = 1280; -unsigned int sched_capacity_margin_up[NR_CPUS] = { - [0 ... NR_CPUS-1] = 1078}; /* ~5% margin */ -unsigned int sched_capacity_margin_down[NR_CPUS] = { - [0 ... NR_CPUS-1] = 1205}; /* ~15% margin */ +unsigned int sched_capacity_margin_up[CPU_NR] = { + [0 ... CPU_NR-1] = 1078}; /* ~5% margin */ +unsigned int sched_capacity_margin_down[CPU_NR] = { + [0 ... CPU_NR-1] = 1205}; /* ~15% margin */ +unsigned int sched_capacity_margin_up_boosted[CPU_NR] = { + 3658, 3658, 3658, 3658, 3658, 3658, 1078, 1024 +}; /* 72% margin for small, 5% for big, 0% for big+ */ +unsigned int sched_capacity_margin_down_boosted[CPU_NR] = { + 3658, 3658, 3658, 3658, 3658, 3658, 3658, 3658 +}; /* not used for small cores, 72% margin for big, 72% margin for big+ */ #ifdef CONFIG_SCHED_WALT /* 1ms default for 20ms window size scaled to 1024 */ @@ -3966,11 +3973,15 @@ static inline bool task_fits_capacity(struct task_struct *p, * CPU. */ if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu)) - margin = sched_capacity_margin_down[cpu]; + margin = schedtune_task_boost(p) > 0 ? + sched_capacity_margin_down_boosted[task_cpu(p)] : + sched_capacity_margin_down[task_cpu(p)]; else - margin = sched_capacity_margin_up[task_cpu(p)]; + margin = schedtune_task_boost(p) > 0 ? + sched_capacity_margin_up_boosted[task_cpu(p)] : + sched_capacity_margin_up[task_cpu(p)]; - return capacity * 1024 > uclamp_task_util(p) * margin; + return capacity * 1024 > uclamp_task(p) * margin; } static inline bool task_fits_max(struct task_struct *p, int cpu) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 30e636196c71..2b80c96bf91e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -87,9 +87,11 @@ struct rq; struct cpuidle_state; +#define CPU_NR 8 + extern __read_mostly bool sched_predl; -extern unsigned int sched_capacity_margin_up[NR_CPUS]; -extern unsigned int sched_capacity_margin_down[NR_CPUS]; +extern unsigned int sched_capacity_margin_up[CPU_NR]; +extern unsigned int sched_capacity_margin_down[CPU_NR]; struct sched_walt_cpu_load { unsigned long nl;