Re: [PATCH 2/6] irqchip/apple-aic: Add Fast IPI support

From: Marc Zyngier
Date: Sun Dec 12 2021 - 07:21:39 EST


On Thu, 09 Dec 2021 04:32:45 +0000,
Hector Martin <marcan@xxxxxxxxx> wrote:
>
> The newer AICv2 present in t600x SoCs does not have legacy IPI support
> at all. Since t8103 also supports Fast IPIs, implement support for this
> first. The legacy IPI code is left as a fallback, so it can be
> potentially used by older SoCs in the future.
>
> The vIPI code is shared; only the IPI firing/acking bits change for Fast
> IPIs.
>
> Signed-off-by: Hector Martin <marcan@xxxxxxxxx>
> ---
> drivers/irqchip/irq-apple-aic.c | 112 ++++++++++++++++++++++++++++----
> 1 file changed, 98 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
> index 3759dc36cc8f..1aa63580cae4 100644
> --- a/drivers/irqchip/irq-apple-aic.c
> +++ b/drivers/irqchip/irq-apple-aic.c
> @@ -24,7 +24,7 @@
> * - Default "this CPU" register view and explicit per-CPU views
> *
> * In addition, this driver also handles FIQs, as these are routed to the same
> - * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
> + * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
> * performance counters (TODO).
> *
> * Implementation notes:
> @@ -106,7 +106,6 @@
>
> /*
> * IMP-DEF sysregs that control FIQ sources
> - * Note: sysreg-based IPIs are not supported yet.
> */
>
> /* Core PMC control register */
> @@ -155,6 +154,10 @@
> #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
> #define UPMSR_IACT BIT(0)
>
> +/* MPIDR fields */
> +#define MPIDR_CPU GENMASK(7, 0)
> +#define MPIDR_CLUSTER GENMASK(15, 8)

This should be defined in terms of MPIDR_AFFINITY_LEVEL() and co.

> +
> #define AIC_NR_FIQ 4
> #define AIC_NR_SWIPI 32
>
> @@ -173,12 +176,42 @@
> #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
> #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
>
> +struct aic_info {
> + int version;
> +
> + /* Features */
> + bool fast_ipi;
> +};
> +
> +static const struct aic_info aic1_info = {
> + .version = 1,
> +};
> +
> +static const struct aic_info aic1_fipi_info = {
> + .version = 1,
> +
> + .fast_ipi = true,

Do you anticipate multiple feature flags like this? If so, maybe we
should consider biting the bullet and making this an unsigned long
populated with discrete flags.

Not something we need to decide now though.

> +};
> +
> +static const struct of_device_id aic_info_match[] = {
> + {
> + .compatible = "apple,t8103-aic",
> + .data = &aic1_fipi_info,
> + },
> + {
> + .compatible = "apple,aic",
> + .data = &aic1_info,
> + },
> + {}
> +};
> +
> struct aic_irq_chip {
> void __iomem *base;
> struct irq_domain *hw_domain;
> struct irq_domain *ipi_domain;
> int nr_hw;
> - int ipi_hwirq;
> +
> + struct aic_info info;
> };
>
> static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
> @@ -387,8 +420,12 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
> */
>
> if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
> - pr_err_ratelimited("Fast IPI fired. Acking.\n");
> - write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
> + if (aic_irqc->info.fast_ipi) {

On the other hand, this is likely to hit on the fast path. Given that
we know at probe time whether we support SR-based IPIs, we can turn
this into a static key and save a few fetches on every IPI. It applies
everywhere you look at this flag at runtime.

> + aic_handle_ipi(regs);
> + } else {
> + pr_err_ratelimited("Fast IPI fired. Acking.\n");
> + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
> + }
> }
>
> if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
> @@ -564,6 +601,21 @@ static const struct irq_domain_ops aic_irq_domain_ops = {
> * IPI irqchip
> */
>
> +static void aic_ipi_send_fast(int cpu)
> +{
> + u64 mpidr = cpu_logical_map(cpu);
> + u64 my_mpidr = cpu_logical_map(smp_processor_id());

This is the equivalent of reading MPIDR_EL1. My gut feeling is that it
is a bit faster to access the sysreg than a percpu lookup, a function
call and another memory access.

> + u64 cluster = FIELD_GET(MPIDR_CLUSTER, mpidr);

See my earlier comment. This really should be:

u64 cluster = MIPDR_AFFINITY_LEVEL(mpidr, 1);

rather than inventing a new decoding scheme.

> + u64 idx = FIELD_GET(MPIDR_CPU, mpidr);
> +
> + if (FIELD_GET(MPIDR_CLUSTER, my_mpidr) == cluster)
> + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
> + SYS_IMP_APL_IPI_RR_LOCAL_EL1);
> + else
> + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
> + SYS_IMP_APL_IPI_RR_GLOBAL_EL1);

Don't you need an ISB, either here or in the two callers? At the
moment, I don't see what will force the execution of these writes, and
they could be arbitrarily delayed.

> +}
> +
> static void aic_ipi_mask(struct irq_data *d)
> {
> u32 irq_bit = BIT(irqd_to_hwirq(d));
> @@ -589,8 +641,12 @@ static void aic_ipi_unmask(struct irq_data *d)
> * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
> * No barriers needed here since this is a self-IPI.
> */
> - if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit)
> - aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
> + if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
> + if (ic->info.fast_ipi)
> + aic_ipi_send_fast(smp_processor_id());

nit: if this is common enough, maybe having an aic_ipi_send_self_fast
could be better. Needs evaluation though.

> + else
> + aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
> + }
> }
>
> static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
> @@ -618,8 +674,12 @@ static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
> smp_mb__after_atomic();
>
> if (!(pending & irq_bit) &&
> - (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit))
> - send |= AIC_IPI_SEND_CPU(cpu);
> + (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
> + if (ic->info.fast_ipi)
> + aic_ipi_send_fast(cpu);
> + else
> + send |= AIC_IPI_SEND_CPU(cpu);
> + }
> }
>
> /*
> @@ -651,8 +711,16 @@ static void aic_handle_ipi(struct pt_regs *regs)
> /*
> * Ack the IPI. We need to order this after the AIC event read, but
> * that is enforced by normal MMIO ordering guarantees.
> + *
> + * For the Fast IPI case, this needs to be ordered before the vIPI
> + * handling below, so we need to isb();
> */
> - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
> + if (aic_irqc->info.fast_ipi) {
> + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
> + isb();
> + } else {
> + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
> + }
>
> /*
> * The mask read does not need to be ordered. Only we can change
> @@ -680,7 +748,8 @@ static void aic_handle_ipi(struct pt_regs *regs)
> * No ordering needed here; at worst this just changes the timing of
> * when the next IPI will be delivered.
> */
> - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
> + if (!aic_irqc->info.fast_ipi)
> + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
> }
>
> static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
> @@ -779,8 +848,12 @@ static int aic_init_cpu(unsigned int cpu)
> * by AIC during processing). We manage masks at the vIPI level.
> */
> aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
> - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
> - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
> + if (!aic_irqc->info.fast_ipi) {
> + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
> + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
> + } else {
> + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
> + }
>
> /* Initialize the local mask state */
> __this_cpu_write(aic_fiq_unmasked, 0);
> @@ -800,6 +873,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
> void __iomem *regs;
> u32 info;
> struct aic_irq_chip *irqc;
> + const struct of_device_id *match;
>
> regs = of_iomap(node, 0);
> if (WARN_ON(!regs))
> @@ -809,9 +883,16 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
> if (!irqc)
> return -ENOMEM;
>
> - aic_irqc = irqc;
> irqc->base = regs;
>
> + match = of_match_node(aic_info_match, node);
> + if (!match)
> + return -ENODEV;
> +
> + irqc->info = *(struct aic_info *)match->data;

Why the copy? All the data is const, and isn't going away.

> +
> + aic_irqc = irqc;
> +
> info = aic_ic_read(irqc, AIC_INFO);
> irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info);
>
> @@ -846,6 +927,9 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
> if (!is_kernel_in_hyp_mode())
> pr_info("Kernel running in EL1, mapping interrupts");
>
> + if (irqc->info.fast_ipi)
> + pr_info("Using Fast IPIs");
> +
> cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
> "irqchip/apple-aic/ipi:starting",
> aic_init_cpu, NULL);

Thanks,

M.

--
Without deviation from the norm, progress is not possible.