Merge "trace: Add new trace event for DCVSH"

This commit is contained in:
qctecmdr 2019-09-12 15:41:04 -07:00 committed by Gerrit - the friendly Code Review server
commit d9e8ee12c4
2 changed files with 162 additions and 0 deletions

View File

@ -5,28 +5,42 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/energy_model.h>
#include <linux/sched.h>
#include <linux/cpu_cooling.h>
#define CREATE_TRACE_POINTS
#include <trace/events/dcvsh.h>
#define LUT_MAX_ENTRIES 40U
#define CORE_COUNT_VAL(val) (((val) & (GENMASK(18, 16))) >> 16)
#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
#define EQ_IRQ_STATUS BIT(0)
#define LT_IRQ_STATUS BIT(1)
#define MAX_FN_SIZE 12
#define LIMITS_POLLING_DELAY_MS 10
#define CYCLE_CNTR_OFFSET(c, m, acc_count) \
(acc_count ? ((c - cpumask_first(m) + 1) * 4) : 0)
enum {
REG_ENABLE,
REG_FREQ_LUT_TABLE,
REG_VOLT_LUT_TABLE,
REG_PERF_STATE,
REG_CYCLE_CNTR,
REG_LLM_DCVS_VC_VOTE,
REG_INTR_EN,
REG_INTR_CLR,
REG_INTR_STATUS,
REG_ARRAY_SIZE,
};
@ -41,6 +55,11 @@ struct cpufreq_qcom {
unsigned int max_cores;
unsigned long xo_rate;
unsigned long cpu_hw_rate;
struct delayed_work freq_poll_work;
struct mutex dcvsh_lock;
int dcvsh_irq;
char dcvsh_irq_name[MAX_FN_SIZE];
bool is_irq_enabled;
};
struct cpufreq_counter {
@ -63,11 +82,92 @@ static const u16 cpufreq_qcom_epss_std_offsets[REG_ARRAY_SIZE] = {
[REG_VOLT_LUT_TABLE] = 0x200,
[REG_PERF_STATE] = 0x320,
[REG_CYCLE_CNTR] = 0x3c4,
[REG_LLM_DCVS_VC_VOTE] = 0x024,
[REG_INTR_EN] = 0x304,
[REG_INTR_CLR] = 0x308,
[REG_INTR_STATUS] = 0x30C,
};
static struct cpufreq_counter qcom_cpufreq_counter[NR_CPUS];
static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
static unsigned long limits_mitigation_notify(struct cpufreq_qcom *c)
{
int i;
u32 max_vc;
max_vc = readl_relaxed(c->reg_bases[REG_LLM_DCVS_VC_VOTE]) &
GENMASK(13, 8);
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
if (c->table[i].driver_data != max_vc)
continue;
else {
sched_update_cpu_freq_min_max(&c->related_cpus, 0,
c->table[i].frequency);
trace_dcvsh_freq(cpumask_first(&c->related_cpus),
c->table[i].frequency);
return c->table[i].frequency;
}
}
return 0;
}
static void limits_dcvsh_poll(struct work_struct *work)
{
struct cpufreq_qcom *c = container_of(work, struct cpufreq_qcom,
freq_poll_work.work);
struct cpufreq_policy *policy;
unsigned long freq_limit;
u32 regval, cpu;
mutex_lock(&c->dcvsh_lock);
cpu = cpumask_first(&c->related_cpus);
policy = cpufreq_cpu_get_raw(cpu);
freq_limit = limits_mitigation_notify(c);
if (freq_limit != policy->cpuinfo.max_freq || !freq_limit) {
mod_delayed_work(system_highpri_wq, &c->freq_poll_work,
msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
} else {
regval = readl_relaxed(c->reg_bases[REG_INTR_CLR]);
regval &= ~LT_IRQ_STATUS;
writel_relaxed(regval, c->reg_bases[REG_INTR_CLR]);
c->is_irq_enabled = true;
enable_irq(c->dcvsh_irq);
}
mutex_unlock(&c->dcvsh_lock);
}
static irqreturn_t dcvsh_handle_isr(int irq, void *data)
{
struct cpufreq_qcom *c = data;
u32 regval;
regval = readl_relaxed(c->reg_bases[REG_INTR_STATUS]);
if (!(regval & LT_IRQ_STATUS))
return IRQ_HANDLED;
mutex_lock(&c->dcvsh_lock);
if (c->is_irq_enabled) {
c->is_irq_enabled = false;
disable_irq_nosync(c->dcvsh_irq);
limits_mitigation_notify(c);
mod_delayed_work(system_highpri_wq, &c->freq_poll_work,
msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
}
mutex_unlock(&c->dcvsh_lock);
return IRQ_HANDLED;
}
static u64 qcom_cpufreq_get_cpu_cycle_counter(int cpu)
{
struct cpufreq_counter *cpu_counter;
@ -183,6 +283,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
em_register_perf_domain(policy->cpus, ret, &em_cb);
if (c->dcvsh_irq > 0) {
snprintf(c->dcvsh_irq_name, sizeof(c->dcvsh_irq_name),
"dcvsh-irq-%d", policy->cpu);
ret = devm_request_threaded_irq(cpu_dev, c->dcvsh_irq, NULL,
dcvsh_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
IRQF_NO_SUSPEND | IRQF_SHARED, c->dcvsh_irq_name, c);
if (ret) {
dev_err(cpu_dev, "Failed to register irq %d\n", ret);
return ret;
}
c->is_irq_enabled = true;
writel_relaxed(LT_IRQ_STATUS, c->reg_bases[REG_INTR_EN]);
}
return 0;
}
@ -241,6 +356,7 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
struct device *dev = &pdev->dev;
void __iomem *base_freq, *base_volt;
u32 data, src, lval, i, core_count, prev_cc, prev_freq, cur_freq, volt;
u32 vc;
unsigned long cpu;
c->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
@ -259,12 +375,14 @@ static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
data = readl_relaxed(base_volt + i * lut_row_size);
volt = (data & GENMASK(11, 0)) * 1000;
vc = data & GENMASK(21, 16);
if (src)
c->table[i].frequency = c->xo_rate * lval / 1000;
else
c->table[i].frequency = c->cpu_hw_rate / 1000;
c->table[i].driver_data = vc;
cur_freq = c->table[i].frequency;
dev_dbg(dev, "index=%d freq=%d, core_count %d\n",
@ -385,6 +503,15 @@ static int qcom_cpu_resources_init(struct platform_device *pdev,
return ret;
}
if (of_find_property(dev->of_node, "interrupts", NULL)) {
c->dcvsh_irq = of_irq_get(dev->of_node, index);
if (c->dcvsh_irq > 0) {
mutex_init(&c->dcvsh_lock);
INIT_DEFERRABLE_WORK(&c->freq_poll_work,
limits_dcvsh_poll);
}
}
for_each_cpu(cpu_r, &c->related_cpus)
qcom_freq_domain_map[cpu_r] = c;

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM dcvsh
#if !defined(_TRACE_DCVSH_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_DCVSH_H
#include <linux/tracepoint.h>
TRACE_EVENT(dcvsh_freq,
TP_PROTO(unsigned long cpu, unsigned long freq),
TP_ARGS(cpu, freq),
TP_STRUCT__entry(
__field(unsigned long, cpu)
__field(unsigned long, freq)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->freq = freq;
),
TP_printk("cpu:%lu max frequency:%lu", __entry->cpu, __entry->freq)
);
#endif /* _TRACE_DCVSH_H */
/* This part must be outside protection */
#include <trace/define_trace.h>