Support for interrupt distribution design for SMP system solutions.
With this feature enabled ,the SPI interrupts would be routed to
all the cores rather than boot core to achieve better
load balance of interrupt handling.
That is, interrupts might be serviced simultaneously on different CPUs.
Signed-off-by: Hanks Chen <hanks.chen@xxxxxxxxxxxx>
---
drivers/irqchip/Kconfig | 12 ++++
drivers/irqchip/irq-gic-v3.c | 107 +++++++++++++++++++++--------
include/linux/irqchip/arm-gic-v3.h | 1 +
kernel/irq/cpuhotplug.c | 22 ++++++
kernel/irq/manage.c | 7 ++
5 files changed, 122 insertions(+), 27 deletions(-)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c6098eee0c7c..c88ee7731e92 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -597,4 +597,16 @@ config MST_IRQ
help
Support MStar Interrupt Controller.
+config ARM_IRQ_TARGET_ALL
+ bool "Distribute interrupts across processors on SMP system"
+ depends on SMP && ARM_GIC_V3
+ help
+ Support for interrupt distribution design for
+ SMP system solutions. With this feature enabled ,the
+ SPI interrupts would be routed to all the cores rather
+ than boot cpu to achieve better load balance of interrupt
+ handling
+
+ If you don't know what to do here, say N.
+
endmenu
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 16fecc0febe8..62a878ce4681 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -381,6 +381,12 @@ static inline bool gic_supports_nmi(void)
static_branch_likely(&supports_pseudo_nmis);
}
+static inline bool gic_supports_1n(void)
+{
+ return (IS_ENABLED(CONFIG_ARM_IRQ_TARGET_ALL) &&
+ ~(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_No1N));
+}
+
static int gic_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
@@ -716,6 +722,7 @@ static void __init gic_dist_init(void)
{
unsigned int i;
u64 affinity;
+
void __iomem *base = gic_data.dist_base;
u32 val;
@@ -759,16 +766,27 @@ static void __init gic_dist_init(void)
/* Enable distributor with ARE, Group1 */
writel_relaxed(val, base + GICD_CTLR);
- /*
- * Set all global interrupts to the boot CPU only. ARE must be
- * enabled.
- */
- affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
- for (i = 32; i < GIC_LINE_NR; i++)
- gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
+ if (!gic_supports_1n()) {
+ /*
+ * Set all global interrupts to the boot CPU only. ARE must be
+ * enabled.
+ */
+ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+ for (i = 32; i < GIC_LINE_NR; i++)
+ gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
- for (i = 0; i < GIC_ESPI_NR; i++)
- gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
+ for (i = 0; i < GIC_ESPI_NR; i++)
+ gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
+ } else {
+ /* default set target all for all SPI */
+ for (i = 32; i < GIC_LINE_NR; i++)
+ gic_write_irouter(GICD_IROUTER_SPI_MODE_ANY,
+ base + GICD_IROUTER + i * 8);
+
+ for (i = 0; i < GIC_ESPI_NR; i++)
+ gic_write_irouter(GICD_IROUTER_SPI_MODE_ANY,
+ base + GICD_IROUTERnE + i * 8);
+ }
}
static int gic_iterate_rdists(int (*fn)(struct redist_region *, void
__iomem *))
@@ -1191,29 +1209,64 @@ static int gic_set_affinity(struct irq_data
*d, const struct cpumask *mask_val,
if (gic_irq_in_rdist(d))
return -EINVAL;
- /* If interrupt was enabled, disable it first */
- enabled = gic_peek_irq(d, GICD_ISENABLER);
- if (enabled)
- gic_mask_irq(d);
+ if (!gic_supports_1n()) {
+ /* If interrupt was enabled, disable it first */
+ enabled = gic_peek_irq(d, GICD_ISENABLER);
+ if (enabled)
+ gic_mask_irq(d);
- offset = convert_offset_index(d, GICD_IROUTER, &index);
- reg = gic_dist_base(d) + offset + (index * 8);
- val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+ offset = convert_offset_index(d, GICD_IROUTER, &index);
+ reg = gic_dist_base(d) + offset + (index * 8);
+ val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
- gic_write_irouter(val, reg);
+ gic_write_irouter(val, reg);
- /*
- * If the interrupt was enabled, enabled it again. Otherwise,
- * just wait for the distributor to have digested our changes.
- */
- if (enabled)
- gic_unmask_irq(d);
- else
- gic_dist_wait_for_rwp();
+ /*
+ * If the interrupt was enabled, enabled it again. Otherwise,
+ * just wait for the distributor to have digested our changes.
+ */
+ if (enabled)
+ gic_unmask_irq(d);
+ else
+ gic_dist_wait_for_rwp();
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ } else {
+ /*
+ * no need to update when:
+ * input mask is equal to the current setting
+ */
+ if (cpumask_equal(irq_data_get_affinity_mask(d), mask_val))
+ return IRQ_SET_MASK_OK_NOCOPY;
+
+ /* If interrupt was enabled, disable it first */
+ enabled = gic_peek_irq(d, GICD_ISENABLER);
+ if (enabled)
+ gic_mask_irq(d);
+
+ offset = convert_offset_index(d, GICD_IROUTER, &index);
+ reg = gic_dist_base(d) + offset + (index * 8);
- irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ /* GICv3 supports target is 1 or all */
+ if (cpumask_weight(mask_val) > 1)
+ val = GICD_IROUTER_SPI_MODE_ANY;
+ else
+ val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+ gic_write_irouter(val, reg);
+
+ /*
+ * If the interrupt was enabled, enabled it again. Otherwise,
+ * just wait for the distributor to have digested our changes.
+ */
+ if (enabled)
+ gic_unmask_irq(d);
+ else
+ gic_dist_wait_for_rwp();
+ }
- return IRQ_SET_MASK_OK_DONE;
+ return IRQ_SET_MASK_OK;
}
#else
#define gic_set_affinity NULL
diff --git a/include/linux/irqchip/arm-gic-v3.h
b/include/linux/irqchip/arm-gic-v3.h
index f6d092fdb93d..c24336d506a3 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -80,6 +80,7 @@
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
#define GICD_TYPER_RSS (1U << 26)
+#define GICD_TYPER_No1N (1U << 25)
#define GICD_TYPER_LPIS (1U << 17)
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ESPI (1U << 8)
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 02236b13b359..779512e44960 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -87,6 +87,18 @@ static bool migrate_one_irq(struct irq_desc *desc)
return false;
}
+#ifdef CONFIG_ARM_IRQ_TARGET_ALL
+ /*
+ * No move required, if interrupt is 1 of N IRQ.
+ * write current cpu_online_mask into affinity mask.
+ */
+ if (cpumask_weight(desc->irq_common_data.affinity) > 1) {
+ cpumask_copy(desc->irq_common_data.affinity, cpu_online_mask);
+
+ return false;
+ }
+#endif
+
/*
* Complete an eventually pending irq move cleanup. If this
* interrupt was moved in hard irq context, then the vectors need
@@ -191,6 +203,16 @@ static void irq_restore_affinity_of_irq(struct
irq_desc *desc, unsigned int cpu)
struct irq_data *data = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = irq_data_get_affinity_mask(data);
+#ifdef CONFIG_ARM_IRQ_TARGET_ALL
+ /*
+ * No restore required, if interrupt is 1 of N IRQ.
+ */
+ if (cpumask_weight(affinity) > 1) {
+ cpumask_set_cpu(cpu, irq_data_get_affinity_mask(data));
+ return;
+ }
+#endif
+
if (!irqd_affinity_is_managed(data) || !desc->action ||
!irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
return;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c460e0496006..770b97e326bd 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -270,7 +270,14 @@ int irq_do_set_affinity(struct irq_data *data,
const struct cpumask *mask,
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
+#ifndef CONFIG_ARM_IRQ_TARGET_ALL
cpumask_copy(desc->irq_common_data.affinity, mask);
+#else
+ if (cpumask_weight(mask) > 1)
+ cpumask_copy(desc->irq_common_data.affinity, cpu_online_mask);
+ else
+ cpumask_copy(desc->irq_common_data.affinity, mask);
+#endif
fallthrough;
case IRQ_SET_MASK_OK_NOCOPY:
irq_validate_effective_affinity(data);