mm: Perform PID map reads on the little CPU cluster
PID map reads for processes with thousands of mappings can be done extensively by certain Android apps, burning through CPU time on higher-performance CPUs even though reading PID maps is never a performance-critical task. We can relieve the load on the important CPUs by moving PID map reads to little CPUs via sched_migrate_to_cpumask_*(). Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: dreamisbaka <jolinux.g@gmail.com>
This commit is contained in:
parent
c2c3304ca2
commit
e23d4ea590
@ -298,6 +298,7 @@ struct proc_maps_private {
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *task_mempolicy;
|
||||
#endif
|
||||
unsigned long old_cpus_allowed;
|
||||
} __randomize_layout;
|
||||
|
||||
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
|
||||
|
@ -181,6 +181,9 @@ static void vma_stop(struct proc_maps_private *priv)
|
||||
release_task_mempolicy(priv);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
|
||||
sched_migrate_to_cpumask_end(to_cpumask(&priv->old_cpus_allowed),
|
||||
cpu_lp_mask);
|
||||
}
|
||||
|
||||
static struct vm_area_struct *
|
||||
@ -217,6 +220,9 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
|
||||
if (!mm || !mmget_not_zero(mm))
|
||||
return NULL;
|
||||
|
||||
sched_migrate_to_cpumask_start(to_cpumask(&priv->old_cpus_allowed),
|
||||
cpu_lp_mask);
|
||||
|
||||
if (down_read_killable(&mm->mmap_sem)) {
|
||||
mmput(mm);
|
||||
return ERR_PTR(-EINTR);
|
||||
|
Loading…
Reference in New Issue
Block a user