diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index f00fe85e6997..538e1caedffe 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1598,6 +1598,28 @@ TRACE_EVENT_CONDITION(sched_overutilized, __entry->overutilized ? 1 : 0, __entry->cpulist) ); +TRACE_EVENT(sched_capacity_update, + + TP_PROTO(int cpu), + + TP_ARGS(cpu), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + __field(unsigned int, capacity) + __field(unsigned int, capacity_orig) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->capacity = capacity_of(cpu); + __entry->capacity_orig = capacity_orig_of(cpu); + ), + + TP_printk("cpu=%d capacity=%u capacity_orig=%u", + __entry->cpu, __entry->capacity, __entry->capacity_orig) +); + /* * Tracepoint for sched_get_nr_running_avg */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7c43d2391e2e..b23e9a805e37 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9544,19 +9544,29 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) { unsigned long capacity = arch_scale_cpu_capacity(sd, cpu); struct sched_group *sdg = sd->groups; + bool update = false; capacity *= arch_scale_max_freq_capacity(sd, cpu); capacity >>= SCHED_CAPACITY_SHIFT; capacity = min(capacity, thermal_cap(cpu)); - cpu_rq(cpu)->cpu_capacity_orig = capacity; + if (cpu_rq(cpu)->cpu_capacity_orig != capacity) { + cpu_rq(cpu)->cpu_capacity_orig = capacity; + update = true; + } capacity = scale_rt_capacity(cpu, capacity); if (!capacity) capacity = 1; - cpu_rq(cpu)->cpu_capacity = capacity; + if (cpu_rq(cpu)->cpu_capacity != capacity) { + cpu_rq(cpu)->cpu_capacity = capacity; + update = true; + } + if (update) + trace_sched_capacity_update(cpu); + sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = capacity; sdg->sgc->max_capacity = capacity;