qos: Replace expensive cpumask usage with raw bitwise operations

cpumask_set_cpu() uses the set_bit() helper, which, in typical kernels
prior to 4.19, uses a spin lock to guarantee atomicity. This is
expensive and unneeded, especially since the qos functions are hot code
paths. The rest of the cpumask functions use the bitmap API, which is
also more expensive than just doing some simple operations on a word.

Since we're operating with a CPU count that can fit within a word,
replace the expensive cpumask operations with raw bitwise operations
wherever possible to make the pm_qos framework more efficient.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
This commit is contained in:
Sultan Alsawaf 2020-12-13 16:28:09 -08:00 committed by spakkkk
parent 73a40230d3
commit b6bbd4193e
8 changed files with 28 additions and 52 deletions

View File

@ -4327,7 +4327,6 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
{
int err = 0;
unsigned int latency;
cpumask_t mask;
struct fastrpc_apps *me = &gfa;
u32 len = me->silvercores.corecount, i = 0;
@ -4345,11 +4344,11 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
VERIFY(err, latency != 0);
if (err)
goto bail;
cpumask_clear(&mask);
atomic_set(&fl->pm_qos_req.cpus_affine, 0);
for (i = 0; i < len; i++)
cpumask_set_cpu(me->silvercores.coreno[i], &mask);
atomic_or(BIT(me->silvercores.coreno[i]),
&fl->pm_qos_req.cpus_affine);
fl->pm_qos_req.type = PM_QOS_REQ_AFFINE_CORES;
cpumask_copy(&fl->pm_qos_req.cpus_affine, &mask);
if (!fl->qos_request) {
pm_qos_add_request(&fl->pm_qos_req,

View File

@ -5281,7 +5281,6 @@ int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
int kgsl_device_platform_probe(struct kgsl_device *device)
{
int status = -EINVAL;
int cpu;
status = _register_device(device);
if (status)
@ -5373,11 +5372,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
qos->type = PM_QOS_REQ_AFFINE_CORES;
cpumask_empty(&qos->cpus_affine);
for_each_possible_cpu(cpu) {
if ((1 << cpu) & device->pwrctrl.l2pc_cpus_mask)
cpumask_set_cpu(cpu, &qos->cpus_affine);
}
atomic_set(&device->pwrctrl.l2pc_cpus_qos.cpus_affine,
device->pwrctrl.l2pc_cpus_mask);
pm_qos_add_request(&device->pwrctrl.l2pc_cpus_qos,
PM_QOS_CPU_DMA_LATENCY,

View File

@ -56,7 +56,7 @@ enum pm_qos_req_type {
struct pm_qos_request {
enum pm_qos_req_type type;
struct cpumask cpus_affine;
atomic_t cpus_affine;
#ifdef CONFIG_SMP
uint32_t irq;
/* Internal structure members */

View File

@ -51,6 +51,8 @@
#include <linux/export.h>
#include <trace/events/power.h>
#define CPUMASK_ALL (BIT(NR_CPUS) - 1)
/*
* locking rule: all changes to constraints or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
@ -267,7 +269,7 @@ static const struct file_operations pm_qos_debug_fops = {
};
static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
struct cpumask *cpus)
unsigned long *cpus)
{
struct pm_qos_request *req = NULL;
int cpu;
@ -282,7 +284,9 @@ static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
return -EINVAL;
plist_for_each_entry(req, &c->list, node) {
for_each_cpu(cpu, &req->cpus_affine) {
unsigned long affined_cpus = atomic_read(&req->cpus_affine);
for_each_cpu(cpu, to_cpumask(&affined_cpus)) {
switch (c->type) {
case PM_QOS_MIN:
if (qos_val[cpu] > req->node.prio)
@ -300,7 +304,7 @@ static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
for_each_possible_cpu(cpu) {
if (c->target_per_cpu[cpu] != qos_val[cpu])
cpumask_set_cpu(cpu, cpus);
*cpus |= BIT(cpu);
c->target_per_cpu[cpu] = qos_val[cpu];
}
@ -323,7 +327,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
{
unsigned long flags;
int prev_value, curr_value, new_value;
struct cpumask cpus;
unsigned long cpus = 0;
int ret;
spin_lock_irqsave(&pm_qos_lock, flags);
@ -355,7 +359,6 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
}
curr_value = pm_qos_get_value(c);
cpumask_clear(&cpus);
pm_qos_set_value(c, curr_value);
ret = pm_qos_set_value_for_cpus(c, &cpus);
@ -368,7 +371,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
* to update the new qos restriction for the cores
*/
if (!cpumask_empty(&cpus) ||
if (cpus ||
(ret && prev_value != curr_value)) {
ret = 1;
if (c->notifiers)
@ -533,7 +536,6 @@ static void pm_qos_work_fn(struct work_struct *work)
#ifdef CONFIG_SMP
static void pm_qos_irq_release(struct kref *ref)
{
unsigned long flags;
struct irq_affinity_notify *notify = container_of(ref,
struct irq_affinity_notify, kref);
struct pm_qos_request *req = container_of(notify,
@ -541,10 +543,7 @@ static void pm_qos_irq_release(struct kref *ref)
struct pm_qos_constraints *c =
pm_qos_array[req->pm_qos_class]->constraints;
spin_lock_irqsave(&pm_qos_lock, flags);
cpumask_setall(&req->cpus_affine);
spin_unlock_irqrestore(&pm_qos_lock, flags);
atomic_set(&req->cpus_affine, CPUMASK_ALL);
pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ,
c->default_value);
}
@ -552,16 +551,12 @@ static void pm_qos_irq_release(struct kref *ref)
static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
unsigned long flags;
struct pm_qos_request *req = container_of(notify,
struct pm_qos_request, irq_notify);
struct pm_qos_constraints *c =
pm_qos_array[req->pm_qos_class]->constraints;
spin_lock_irqsave(&pm_qos_lock, flags);
cpumask_copy(&req->cpus_affine, mask);
spin_unlock_irqrestore(&pm_qos_lock, flags);
atomic_set(&req->cpus_affine, *cpumask_bits(mask));
pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio);
}
#endif
@ -592,9 +587,8 @@ void pm_qos_add_request(struct pm_qos_request *req,
switch (req->type) {
case PM_QOS_REQ_AFFINE_CORES:
if (cpumask_empty(&req->cpus_affine)) {
if (!atomic_cmpxchg_relaxed(&req->cpus_affine, 0, CPUMASK_ALL)) {
req->type = PM_QOS_REQ_ALL_CORES;
cpumask_setall(&req->cpus_affine);
WARN(1, "Affine cores not set for request with affinity flag\n");
}
break;
@ -610,14 +604,14 @@ void pm_qos_add_request(struct pm_qos_request *req,
mask = desc->irq_data.common->affinity;
/* Get the current affinity */
cpumask_copy(&req->cpus_affine, mask);
atomic_set(&req->cpus_affine, *cpumask_bits(mask));
req->irq_notify.irq = req->irq;
req->irq_notify.notify = pm_qos_irq_notify;
req->irq_notify.release = pm_qos_irq_release;
} else {
req->type = PM_QOS_REQ_ALL_CORES;
cpumask_setall(&req->cpus_affine);
atomic_set(&req->cpus_affine, CPUMASK_ALL);
WARN(1, "IRQ-%d not set for request with affinity flag\n",
req->irq);
}
@ -627,7 +621,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
WARN(1, "Unknown request type %d\n", req->type);
/* fall through */
case PM_QOS_REQ_ALL_CORES:
cpumask_setall(&req->cpus_affine);
atomic_set(&req->cpus_affine, CPUMASK_ALL);
break;
}
@ -647,7 +641,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
if (ret) {
WARN(1, "IRQ affinity notify set failed\n");
req->type = PM_QOS_REQ_ALL_CORES;
cpumask_setall(&req->cpus_affine);
atomic_set(&req->cpus_affine, CPUMASK_ALL);
pm_qos_update_target(
pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, value);

View File

@ -5186,15 +5186,10 @@ err:
static int msm_fe_qos_prepare(struct snd_pcm_substream *substream)
{
cpumask_t mask;
if (pm_qos_request_active(&substream->latency_pm_qos_req))
pm_qos_remove_request(&substream->latency_pm_qos_req);
cpumask_clear(&mask);
cpumask_set_cpu(1, &mask); /* affine to core 1 */
cpumask_set_cpu(2, &mask); /* affine to core 2 */
cpumask_copy(&substream->latency_pm_qos_req.cpus_affine, &mask);
atomic_set(&substream->latency_pm_qos_req.cpus_affine, BIT(1) | BIT(2));
substream->latency_pm_qos_req.type = PM_QOS_REQ_AFFINE_CORES;

View File

@ -333,7 +333,6 @@ static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
struct pm_qos_request *req;
u32 cpu_mask;
u32 cpu_dma_latency;
int cpu;
if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
return;
@ -343,11 +342,7 @@ static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
req = &sde_enc->pm_qos_cpu_req;
req->type = PM_QOS_REQ_AFFINE_CORES;
cpumask_empty(&req->cpus_affine);
for_each_possible_cpu(cpu) {
if ((1 << cpu) & cpu_mask)
cpumask_set_cpu(cpu, &req->cpus_affine);
}
atomic_set(&req->cpus_affine, cpu_mask);
pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);

View File

@ -3277,12 +3277,13 @@ void sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms,
req = &sde_kms->pm_qos_irq_req;
req->type = PM_QOS_REQ_AFFINE_CORES;
req->cpus_affine = sde_kms->irq_cpu_mask;
atomic_set(&req->cpus_affine,
*cpumask_bits(&sde_kms->irq_cpu_mask));
cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
if (pm_qos_request_active(req))
pm_qos_update_request(req, cpu_irq_latency);
else if (!cpumask_empty(&req->cpus_affine)) {
else if (atomic_read(&req->cpus_affine)) {
/** If request is not active yet and mask is not empty
* then it needs to be added initially
*/

View File

@ -1280,11 +1280,7 @@ void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata)
req = &rot_mdata->pm_qos_rot_cpu_req;
req->type = PM_QOS_REQ_AFFINE_CORES;
cpumask_empty(&req->cpus_affine);
for_each_possible_cpu(cpu) {
if ((1 << cpu) & cpu_mask)
cpumask_set_cpu(cpu, &req->cpus_affine);
}
atomic_set(&req->cpus_affine, cpu_mask);
pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);