[PATCH 4.4 335/342] x86/irq: Call irq_force_move_complete with irq descriptor

From: Greg Kroah-Hartman
Date: Tue Mar 01 2016 - 19:04:38 EST


4.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

commit 90a2282e23f0522e4b3f797ad447c5e91bf7fe32 upstream.

First of all there is no point in looking up the irq descriptor again, but we
also need the descriptor for the final cleanup race fix in the next
patch. Make that change seperate. No functional difference.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Tested-by: Borislav Petkov <bp@xxxxxxxxx>
Tested-by: Joe Lawrence <joe.lawrence@xxxxxxxxxxx>
Cc: Jiang Liu <jiang.liu@xxxxxxxxxxxxxxx>
Cc: Jeremiah Mahler <jmmahler@xxxxxxxxx>
Cc: andy.shevchenko@xxxxxxxxx
Cc: Guenter Roeck <linux@xxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/20151231160107.125211743@xxxxxxxxxxxxx
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/x86/include/asm/irq.h | 5 +++--
arch/x86/kernel/apic/vector.c | 11 +++++++----
arch/x86/kernel/irq.c | 2 +-
3 files changed, 11 insertions(+), 7 deletions(-)

--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);

#define __ARCH_HAS_DO_SOFTIRQ

+struct irq_desc;
+
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h>
extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void);
-extern void irq_force_complete_move(int);
+extern void irq_force_complete_move(struct irq_desc *desc);
#endif

#ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_h
extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);

-struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);

extern __visible unsigned int do_IRQ(struct pt_regs *regs);
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -628,10 +628,14 @@ void irq_complete_move(struct irq_cfg *c
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
}

-void irq_force_complete_move(int irq)
+/*
+ * Called with @desc->lock held and interrupts disabled.
+ */
+void irq_force_complete_move(struct irq_desc *desc)
{
- struct irq_cfg *cfg = irq_cfg(irq);
- struct apic_chip_data *data;
+ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+ struct apic_chip_data *data = apic_chip_data(irqdata);
+ struct irq_cfg *cfg = data ? &data->cfg : NULL;

if (!cfg)
return;
@@ -645,7 +649,6 @@ void irq_force_complete_move(int irq)
* the way out.
*/
raw_spin_lock(&vector_lock);
- data = container_of(cfg, struct apic_chip_data, cfg);
cpumask_clear_cpu(smp_processor_id(), data->old_domain);
raw_spin_unlock(&vector_lock);
}
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -462,7 +462,7 @@ void fixup_irqs(void)
* non intr-remapping case, we can't wait till this interrupt
* arrives at this cpu before completing the irq move.
*/
- irq_force_complete_move(irq);
+ irq_force_complete_move(desc);

if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;