diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6c3781eaab6b..cc77636cd9c1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7044,6 +7044,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus, int isolated_candidate = -1; unsigned int target_nr_rtg_high_prio = UINT_MAX; bool rtg_high_prio_task = task_rtg_high_prio(p); + int mid_cap_orig_cpu = cpu_rq(smp_processor_id())->rd->mid_cap_orig_cpu; /* * In most cases, target_capacity tracks capacity_orig of the most @@ -7196,11 +7197,16 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus, /* * Case A.1: IDLE CPU * Return the best IDLE CPU we find: - * - for boosted tasks: the CPU with the highest - * performance (i.e. biggest capacity_orig) + * - for boosted tasks: if the task fits in mid + * cluster, prefer the first mid cluster cpu + * due to cpuset design, then other mid cluster + * cpus. Otherwise, choose max cluster cpu. * - for !boosted tasks: the most energy * efficient CPU (i.e. smallest capacity_orig) */ + if (boosted && mid_cap_orig_cpu != -1 && + best_idle_cpu == mid_cap_orig_cpu) + break; if (idle_cpu(i)) { if (boosted && capacity_orig < target_capacity) @@ -7251,11 +7257,11 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus, /* * If utilization is the same between CPUs, - * break the ties with WALT's cumulative - * demand + * break the ties with cumulative demand, + * also prefer lower order cpu. */ if (new_util == best_active_util && - new_util_cuml > best_active_cuml_util) + new_util_cuml >= best_active_cuml_util) continue; min_wake_util = wake_util; best_active_util = new_util; @@ -7410,7 +7416,13 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus, (boosted && (best_idle_cpu != -1 || target_cpu != -1 || (fbt_env->strict_max && most_spare_cap_cpu != -1)))) { if (boosted) { - if (!next_group_higher_cap) + /* + * For boosted task, stop searching when an idle + * cpu is found in mid cluster. + */ + if ((mid_cap_orig_cpu != -1 && + best_idle_cpu >= mid_cap_orig_cpu) || + !next_group_higher_cap) break; } else { if (next_group_higher_cap)