ANDROID: sched: EAS: take cstate into account when selecting idle core

Introduce a new sysctl for this option, 'sched_cstate_aware'.
When this is enabled, select_idle_sibling in CFS is modified to
choose the idle CPU in the sibling group which has the lowest
idle state index - idle state indexes are assumed to increase
as sleep depth and hence wakeup latency increase. In this way,
we attempt to minimise wakeup latency when an idle CPU is
required.

Signed-off-by: Srinath Sridharan <srinathsr@google.com>

Includes:
sched: EAS: fix select_idle_sibling
when sysctl_sched_cstate_aware is enabled, best_idle cpu will not be chosen
in the original flow because it will goto done directly

Bug: 30107557
Bug: 144961676
Change-Id: Ie09c2e3960cafbb976f8d472747faefab3b4d6ac
Signed-off-by: martin_liu <martin_liu@htc.com>
Signed-off-by: Andres Oportus <andresoportus@google.com>
[refactored and fixed conflicts]
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Jimmy Shiu <jimmyshiu@google.com>
This commit is contained in:
Srinath Sridharan 2017-11-04 15:14:15 +00:00 committed by spakkkk
parent d72455859d
commit d7ab758a70

View File

@ -6653,7 +6653,8 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
/*
* Try and locate an idle core/thread in the LLC cache domain.
*/
static int select_idle_sibling(struct task_struct *p, int prev, int target)
static inline int __select_idle_sibling(struct task_struct *p, int prev,
int target)
{
struct sched_domain *sd;
int i, recent_used_cpu;
@ -6702,6 +6703,82 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
return target;
}
static inline int select_idle_sibling_cstate_aware(struct task_struct *p,
int prev, int target)
{
struct sched_domain *sd;
struct sched_group *sg;
int best_idle_cpu = -1;
int best_idle_cstate = -1;
int best_idle_capacity = INT_MAX;
int i;
/*
* Iterate the domains and find an elegible idle cpu.
*/
sd = rcu_dereference(per_cpu(sd_llc, target));
for_each_lower_domain(sd) {
sg = sd->groups;
do {
if (!cpumask_intersects(sched_group_span(sg),
&p->cpus_allowed))
goto next;
for_each_cpu_and(i, &p->cpus_allowed,
sched_group_span(sg)) {
int idle_idx;
unsigned long new_usage;
unsigned long capacity_orig;
if (!idle_cpu(i))
goto next;
/* figure out if the task can fit here at all */
new_usage = boosted_task_util(p);
capacity_orig = capacity_orig_of(i);
if (new_usage > capacity_orig)
goto next;
/* if the task fits without changing OPP and we
* intended to use this CPU, just proceed
*/
if (i == target &&
new_usage <= capacity_curr_of(target)) {
return target;
}
/* otherwise select CPU with shallowest idle
* state to reduce wakeup latency.
*/
idle_idx = idle_get_state_idx(cpu_rq(i));
if (idle_idx < best_idle_cstate &&
capacity_orig <= best_idle_capacity) {
best_idle_cpu = i;
best_idle_cstate = idle_idx;
best_idle_capacity = capacity_orig;
}
}
next:
sg = sg->next;
} while (sg != sd->groups);
}
if (best_idle_cpu >= 0)
target = best_idle_cpu;
return target;
}
static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
if (!sysctl_sched_cstate_aware)
return __select_idle_sibling(p, prev, target);
return select_idle_sibling_cstate_aware(p, prev, target);
}
/*
* cpu_util_without: compute cpu utilization without any contributions from *p
* @cpu: the CPU which utilization is requested