irq: Make irq affinity function cpu isolation aware

Prohibit setting the affinity of an IRQ to an isolated core.

Change-Id: I2a2f75250eb410ace1e54772e4807b9fe620041a
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
[markivx: Ported to 4.14, fixed conflict with managed irq]
Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Olav Haugan 2016-05-29 19:35:54 -07:00 committed by Satya Durga Srinivasu Prabhala
parent 28b9ddf7e1
commit c934db5672
2 changed files with 20 additions and 2 deletions

View File

@ -37,6 +37,7 @@
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <linux/cpumask.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-uniphier.h>
@ -117,6 +118,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
struct cpumask available_cpus;
/*
* If this is a per-CPU interrupt, or the affinity does not
@ -125,8 +127,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
cpumask_copy(&available_cpus, affinity);
cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
affinity = &available_cpus;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
cpumask_andnot(&available_cpus, cpu_online_mask,
cpu_isolated_mask);
if (cpumask_empty(affinity))
affinity = cpu_online_mask;
ret = true;
}

View File

@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/ratelimit.h>
#include <linux/irq.h>
#include <linux/cpumask.h>
#include "internals.h"
@ -57,6 +58,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
const struct cpumask *affinity;
bool brokeaff = false;
int err;
struct cpumask available_cpus;
/*
* IRQ chip might be already torn down, but the irq descriptor is
@ -109,6 +111,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (maskchip && chip->irq_mask)
chip->irq_mask(d);
cpumask_copy(&available_cpus, affinity);
cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
affinity = &available_cpus;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
/*
* If the interrupt is managed, then shut it down and leave
@ -119,7 +125,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
irq_shutdown(desc);
return false;
}
affinity = cpu_online_mask;
cpumask_andnot(&available_cpus, cpu_online_mask,
cpu_isolated_mask);
if (cpumask_empty(affinity))
affinity = cpu_online_mask;
brokeaff = true;
}
/*