sched: separate capacity margin for boosted tasks

With the introduction of placement hint patch, boosted tasks will not
scheduled from big cores. We tune capacity margin to let important
boosted tasks get scheduled on big cores. However, the capacity margin
affects all group of tasks, so that non-boosted tasks get more chances
to be scheduled on big cores, too. This could be solved by separating
capacity margin for boosted tasks.

Bug: 152925197
Test: margin set correctly
Signed-off-by: Rick Yiu <rickyiu@google.com>
Change-Id: I0e059c56efa9bc8513f0ef4b0f6ab8f5d04a592a
This commit is contained in:
Rick Yiu 2020-04-18 15:36:24 +08:00 committed by spakkkk
parent b8399d8efc
commit 1c86bcc138
2 changed files with 22 additions and 9 deletions

View File

@ -28,6 +28,7 @@
#ifdef CONFIG_SMP
static inline bool task_fits_max(struct task_struct *p, int cpu);
static inline unsigned long boosted_task_util(struct task_struct *task);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_WALT
@ -165,10 +166,16 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
* (default: ~20%)
*/
unsigned int capacity_margin = 1280;
unsigned int sched_capacity_margin_up[NR_CPUS] = {
[0 ... NR_CPUS-1] = 1078}; /* ~5% margin */
unsigned int sched_capacity_margin_down[NR_CPUS] = {
[0 ... NR_CPUS-1] = 1205}; /* ~15% margin */
unsigned int sched_capacity_margin_up[CPU_NR] = {
[0 ... CPU_NR-1] = 1078}; /* ~5% margin */
unsigned int sched_capacity_margin_down[CPU_NR] = {
[0 ... CPU_NR-1] = 1205}; /* ~15% margin */
unsigned int sched_capacity_margin_up_boosted[CPU_NR] = {
3658, 3658, 3658, 3658, 3658, 3658, 1078, 1024
}; /* 72% margin for small, 5% for big, 0% for big+ */
unsigned int sched_capacity_margin_down_boosted[CPU_NR] = {
3658, 3658, 3658, 3658, 3658, 3658, 3658, 3658
}; /* not used for small cores, 72% margin for big, 72% margin for big+ */
#ifdef CONFIG_SCHED_WALT
/* 1ms default for 20ms window size scaled to 1024 */
@ -3966,11 +3973,15 @@ static inline bool task_fits_capacity(struct task_struct *p,
* CPU.
*/
if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
margin = sched_capacity_margin_down[cpu];
margin = schedtune_task_boost(p) > 0 ?
sched_capacity_margin_down_boosted[task_cpu(p)] :
sched_capacity_margin_down[task_cpu(p)];
else
margin = sched_capacity_margin_up[task_cpu(p)];
margin = schedtune_task_boost(p) > 0 ?
sched_capacity_margin_up_boosted[task_cpu(p)] :
sched_capacity_margin_up[task_cpu(p)];
return capacity * 1024 > uclamp_task_util(p) * margin;
return capacity * 1024 > uclamp_task(p) * margin;
}
static inline bool task_fits_max(struct task_struct *p, int cpu)

View File

@ -87,9 +87,11 @@
struct rq;
struct cpuidle_state;
#define CPU_NR 8
extern __read_mostly bool sched_predl;
extern unsigned int sched_capacity_margin_up[NR_CPUS];
extern unsigned int sched_capacity_margin_down[NR_CPUS];
extern unsigned int sched_capacity_margin_up[CPU_NR];
extern unsigned int sched_capacity_margin_down[CPU_NR];
struct sched_walt_cpu_load {
unsigned long nl;