From aaa6f4cef9020b96dec4938853c8ac6dcd66c9ad Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Thu, 16 Jan 2020 08:57:36 +0530 Subject: [PATCH] sched: walt: fix use after free in walt_task_dump() When SCHED_BUG_ON() is hit, we are printing the current tasks of all online CPUs. Accessing the task on the remote CPU is racy. It can be made safe by incrementing the task struct reference and also adding a check against the exiting flag. The WALT CPU load pointers are freed only after setting the exiting flag. This exiting flag check helps is not accessing the freed memory. Change-Id: I2fcbd37e9e4f096da5372be041aec2b60da26341 Signed-off-by: Pavankumar Kondeti --- kernel/sched/walt.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h index 967623724756..b459946aea7d 100644 --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ #ifndef __WALT_H @@ -364,6 +364,15 @@ static inline void walt_rq_dump(int cpu) struct task_struct *tsk = cpu_curr(cpu); int i; + /* + * Increment the task reference so that it can't be + * freed on a remote CPU. Since we are going to + * enter panic, there is no need to decrement the + * task reference. Decrementing the task reference + * can't be done in atomic context, especially with + * rq locks held. + */ + get_task_struct(tsk); printk_deferred("CPU:%d nr_running:%u current: %d (%s)\n", cpu, rq->nr_running, tsk->pid, tsk->comm); @@ -388,7 +397,8 @@ static inline void walt_rq_dump(int cpu) printk_deferred("rq->load_subs[%d].new_subs=%llu)\n", i, rq->load_subs[i].new_subs); } - walt_task_dump(tsk); + if (!exiting_task(tsk)) + walt_task_dump(tsk); SCHED_PRINT(sched_capacity_margin_up[cpu]); SCHED_PRINT(sched_capacity_margin_down[cpu]); }