cpuidle: lpm-levels: Remove debug event logging

A measurably significant amount of CPU time is spent on logging events
for debugging purposes in lpm_cpuidle_enter. Kill the useless logging to
reduce overhead.

Signed-off-by: Danny Lin <danny@kdrag0n.dev>
This commit is contained in:
Danny Lin 2019-05-05 21:59:37 -07:00 committed by spakkkk
parent 5d5815b023
commit 864548d421

View File

@ -34,7 +34,6 @@
#include <soc/qcom/event_timer.h>
#include <soc/qcom/lpm_levels.h>
#include <soc/qcom/lpm-stats.h>
#include <soc/qcom/minidump.h>
#include <asm/arch_timer.h>
#include <asm/suspend.h>
#include <asm/cpuidle.h>
@ -48,30 +47,6 @@
#define PSCI_POWER_STATE(reset) (reset << 30)
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
};
enum debug_event {
CPU_ENTER,
CPU_EXIT,
CLUSTER_ENTER,
CLUSTER_EXIT,
CPU_HP_STARTING,
CPU_HP_DYING,
};
struct lpm_debug {
u64 time;
enum debug_event evt;
int cpu;
uint32_t arg1;
uint32_t arg2;
uint32_t arg3;
uint32_t arg4;
};
static struct system_pm_ops *sys_pm_ops;
@ -108,9 +83,6 @@ static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
static DEFINE_PER_CPU(struct hrtimer, histtimer);
static DEFINE_PER_CPU(struct hrtimer, biastimer);
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
@ -282,38 +254,10 @@ int lpm_get_latency(struct latency_level *level, uint32_t *latency)
}
EXPORT_SYMBOL(lpm_get_latency);
static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
uint32_t arg2, uint32_t arg3, uint32_t arg4)
{
struct lpm_debug *dbg;
int idx;
static DEFINE_SPINLOCK(debug_lock);
static int pc_event_index;
if (!lpm_debug)
return;
spin_lock(&debug_lock);
idx = pc_event_index++;
dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
dbg->evt = event;
dbg->time = arch_counter_get_cntvct();
dbg->cpu = raw_smp_processor_id();
dbg->arg1 = arg1;
dbg->arg2 = arg2;
dbg->arg3 = arg3;
dbg->arg4 = arg4;
spin_unlock(&debug_lock);
}
static int lpm_dying_cpu(unsigned int cpu)
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
update_debug_pc_event(CPU_HP_DYING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
@ -322,9 +266,6 @@ static int lpm_starting_cpu(unsigned int cpu)
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
update_debug_pc_event(CPU_HP_STARTING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false,
0, true);
return 0;
@ -1107,9 +1048,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
return -EPERM;
if (idx != cluster->default_level) {
update_debug_pc_event(CLUSTER_ENTER, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
trace_cluster_enter(cluster->cluster_name, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@ -1263,9 +1201,6 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
if (sys_pm_ops && sys_pm_ops->exit)
sys_pm_ops->exit(success);
update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
trace_cluster_exit(cluster->cluster_name, cluster->last_level,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@ -1377,15 +1312,11 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
state_id += power_state + affinity_level + cpu->levels[idx].psci_id;
update_debug_pc_event(CPU_ENTER, state_id,
0xdeaffeed, 0xdeaffeed, from_idle);
stop_critical_timings();
success = !arm_cpuidle_suspend(state_id);
start_critical_timings();
update_debug_pc_event(CPU_EXIT, state_id,
success, 0xdeaffeed, from_idle);
if (from_idle && cpu->levels[idx].use_bc_timer)
tick_broadcast_exit();
@ -1773,11 +1704,9 @@ static const struct platform_s2idle_ops lpm_s2idle_ops = {
static int lpm_probe(struct platform_device *pdev)
{
int ret;
int size;
unsigned int cpu;
struct hrtimer *cpu_histtimer;
struct kobject *module_kobj = NULL;
struct md_region md_entry;
get_online_cpus();
lpm_root_node = lpm_of_parse_cluster(pdev);
@ -1809,10 +1738,6 @@ static int lpm_probe(struct platform_device *pdev)
cluster_timer_init(lpm_root_node);
size = num_dbg_elements * sizeof(struct lpm_debug);
lpm_debug = dma_alloc_coherent(&pdev->dev, size,
&lpm_debug_phys, GFP_KERNEL);
register_cluster_lpm_stats(lpm_root_node, NULL);
ret = cluster_cpuidle_register(lpm_root_node);
@ -1843,15 +1768,6 @@ static int lpm_probe(struct platform_device *pdev)
set_update_ipi_history_callback(update_ipi_history);
/* Add lpm_debug to Minidump*/
strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
md_entry.virt_addr = (uintptr_t)lpm_debug;
md_entry.phys_addr = lpm_debug_phys;
md_entry.size = size;
md_entry.id = MINIDUMP_DEFAULT_ID;
if (msm_minidump_add_region(&md_entry) < 0)
pr_info("Failed to add lpm_debug in Minidump\n");
return 0;
failed:
free_cluster_node(lpm_root_node);