[patch 41/52] x86/apic: Remove unused callbacks

From: Thomas Gleixner
Date: Wed Sep 13 2017 - 17:37:23 EST


Now that the old allocator is gone, these apic functions are unused. Remove
them.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/kernel/apic/apic_common.c | 48 ----------------------------------
arch/x86/kernel/apic/apic_flat_64.c | 4 --
arch/x86/kernel/apic/apic_noop.c | 10 -------
arch/x86/kernel/apic/apic_numachip.c | 4 --
arch/x86/kernel/apic/bigsmp_32.c | 2 -
arch/x86/kernel/apic/probe_32.c | 2 -
arch/x86/kernel/apic/x2apic_cluster.c | 48 ----------------------------------
arch/x86/kernel/apic/x2apic_phys.c | 2 -
arch/x86/kernel/apic/x2apic_uv_x.c | 14 ---------
arch/x86/kernel/vsmp_64.c | 19 -------------
arch/x86/xen/apic.c | 2 -
11 files changed, 155 deletions(-)

--- a/arch/x86/kernel/apic/apic_common.c
+++ b/arch/x86/kernel/apic/apic_common.c
@@ -11,64 +11,16 @@ u32 apic_default_calc_apicid(unsigned in
return per_cpu(x86_cpu_to_apicid, cpu);
}

-int default_cpu_mask_to_apicid(const struct cpumask *msk, struct irq_data *irqd,
- unsigned int *apicid)
-{
- unsigned int cpu = cpumask_first(msk);
-
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
- *apicid = per_cpu(x86_cpu_to_apicid, cpu);
- irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
- return 0;
-}
-
u32 apic_flat_calc_apicid(unsigned int cpu)
{
return 1U << cpu;
}

-int flat_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqd,
- unsigned int *apicid)
-
-{
- struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqd);
- unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;
-
- if (!cpu_mask)
- return -EINVAL;
- *apicid = (unsigned int)cpu_mask;
- cpumask_bits(effmsk)[0] = cpu_mask;
- return 0;
-}
-
bool default_check_apicid_used(physid_mask_t *map, int apicid)
{
return physid_isset(apicid, *map);
}

-void flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- /*
- * Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
-void default_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- cpumask_copy(retmask, cpumask_of(cpu));
-}
-
void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
*retmap = *phys_map;
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -158,7 +158,6 @@ static struct apic apic_flat __ro_after_
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,

- .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,

.ioapic_phys_id_map = NULL,
@@ -171,7 +170,6 @@ static struct apic apic_flat __ro_after_
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,

- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid,

.send_IPI = default_send_IPI_single,
@@ -253,7 +251,6 @@ static struct apic apic_physflat __ro_af
.dest_logical = 0,
.check_apicid_used = NULL,

- .vector_allocation_domain = default_vector_allocation_domain,
/* not needed, but shouldn't hurt: */
.init_apic_ldr = flat_init_apic_ldr,

@@ -267,7 +264,6 @@ static struct apic apic_physflat __ro_af
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,

- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid,

.send_IPI = default_send_IPI_single_phys,
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -83,14 +83,6 @@ static int noop_apic_id_registered(void)
return physid_isset(0, phys_cpu_present_map);
}

-static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- if (cpu != 0)
- pr_warning("APIC: Vector allocated for non-BSP cpu\n");
- cpumask_copy(retmask, cpumask_of(cpu));
-}
-
static u32 noop_apic_read(u32 reg)
{
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
@@ -125,7 +117,6 @@ struct apic apic_noop __ro_after_init =
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = default_check_apicid_used,

- .vector_allocation_domain = noop_vector_allocation_domain,
.init_apic_ldr = noop_init_apic_ldr,

.ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -141,7 +132,6 @@ struct apic apic_noop __ro_after_init =
.get_apic_id = noop_get_apic_id,
.set_apic_id = NULL,

- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid,

.send_IPI = noop_send_IPI,
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -253,7 +253,6 @@ static const struct apic apic_numachip1
.dest_logical = 0,
.check_apicid_used = NULL,

- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,

.ioapic_phys_id_map = NULL,
@@ -266,7 +265,6 @@ static const struct apic apic_numachip1
.get_apic_id = numachip1_get_apic_id,
.set_apic_id = numachip1_set_apic_id,

- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid,

.send_IPI = numachip_send_IPI_one,
@@ -304,7 +302,6 @@ static const struct apic apic_numachip2
.dest_logical = 0,
.check_apicid_used = NULL,

- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,

.ioapic_phys_id_map = NULL,
@@ -317,7 +314,6 @@ static const struct apic apic_numachip2
.get_apic_id = numachip2_get_apic_id,
.set_apic_id = numachip2_set_apic_id,

- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid,

.send_IPI = numachip_send_IPI_one,
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -158,7 +158,6 @@ static struct apic apic_bigsmp __ro_afte
.dest_logical = 0,
.check_apicid_used = bigsmp_check_apicid_used,

- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = bigsmp_init_apic_ldr,

.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
@@ -171,7 +170,6 @@ static struct apic apic_bigsmp __ro_afte
.get_apic_id = bigsmp_get_apic_id,
.set_apic_id = NULL,

- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid,

.send_IPI = default_send_IPI_single_phys,
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -113,7 +113,6 @@ static struct apic apic_default __ro_aft
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = default_check_apicid_used,

- .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = default_init_apic_ldr,

.ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -126,7 +125,6 @@ static struct apic apic_default __ro_aft
.get_apic_id = default_get_apic_id,
.set_apic_id = NULL,

- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid,

.send_IPI = default_send_IPI_single,
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -91,29 +91,6 @@ static void x2apic_send_IPI_all(int vect
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}

-static int
-x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
- unsigned int *apicid)
-{
- struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
- struct cluster_mask *cmsk;
- unsigned int cpu;
- u32 dest = 0;
-
- cpu = cpumask_first(mask);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- cmsk = per_cpu(cluster_masks, cpu);
- cpumask_clear(effmsk);
- for_each_cpu_and(cpu, &cmsk->mask, mask) {
- dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
- cpumask_set_cpu(cpu, effmsk);
- }
- *apicid = dest;
- return 0;
-}
-
static u32 x2apic_calc_apicid(unsigned int cpu)
{
return per_cpu(x86_cpu_to_logical_apicid, cpu);
@@ -198,29 +175,6 @@ static int x2apic_cluster_probe(void)
return 1;
}

-/*
- * Each x2apic cluster is an allocation domain.
- */
-static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
-
- /*
- * To minimize vector pressure, default case of boot, device bringup
- * etc will use a single cpu for the interrupt destination.
- *
- * On explicit migration requests coming from irqbalance etc,
- * interrupts will be routed to the x2apic cluster (cluster-id
- * derived from the first cpu in the mask) members specified
- * in the mask.
- */
- if (cpumask_equal(mask, cpu_online_mask))
- cpumask_copy(retmask, cpumask_of(cpu));
- else
- cpumask_and(retmask, mask, &cmsk->mask);
-}
-
static struct apic apic_x2apic_cluster __ro_after_init = {

.name = "cluster x2apic",
@@ -236,7 +190,6 @@ static struct apic apic_x2apic_cluster _
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,

- .vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,

.ioapic_phys_id_map = NULL,
@@ -249,7 +202,6 @@ static struct apic apic_x2apic_cluster _
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,

- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.calc_dest_apicid = x2apic_calc_apicid,

.send_IPI = x2apic_send_IPI,
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -151,7 +151,6 @@ static struct apic apic_x2apic_phys __ro
.dest_logical = 0,
.check_apicid_used = NULL,

- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,

.ioapic_phys_id_map = NULL,
@@ -164,7 +163,6 @@ static struct apic apic_x2apic_phys __ro
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,

- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid,

.send_IPI = x2apic_send_IPI,
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -525,18 +525,6 @@ static void uv_init_apic_ldr(void)
{
}

-static int
-uv_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
- unsigned int *apicid)
-{
- int ret = default_cpu_mask_to_apicid(mask, irqdata, apicid);
-
- if (!ret)
- *apicid |= uv_apicid_hibits;
-
- return ret;
-}
-
static u32 apic_uv_calc_apicid(unsigned int cpu)
{
return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
@@ -593,7 +581,6 @@ static struct apic apic_x2apic_uv_x __ro
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,

- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = uv_init_apic_ldr,

.ioapic_phys_id_map = NULL,
@@ -606,7 +593,6 @@ static struct apic apic_x2apic_uv_x __ro
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = set_apic_id,

- .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.calc_dest_apicid = apic_uv_calc_apicid,

.send_IPI = uv_send_IPI_one,
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,9 +26,6 @@

#define TOPOLOGY_REGISTER_OFFSET 0x10

-/* Flag below is initialized once during vSMP PCI initialization. */
-static int irq_routing_comply = 1;
-
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/*
* Interrupt control on vSMPowered systems:
@@ -105,9 +102,6 @@ static void __init set_vsmp_pv_ops(void)
if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8);

- /* Interrupt routing set to ignore */
- irq_routing_comply = 0;
-
#ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */
no_irq_affinity = 1;
@@ -211,23 +205,10 @@ static int apicid_phys_pkg_id(int initia
return hard_smp_processor_id() >> index_msb;
}

-/*
- * In vSMP, all cpus should be capable of handling interrupts, regardless of
- * the APIC used.
- */
-static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- cpumask_setall(retmask);
-}
-
static void vsmp_apic_post_init(void)
{
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
-
- if (!irq_routing_comply)
- apic->vector_allocation_domain = fill_vector_allocation_domain;
}

void __init vsmp_init(void)
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -164,7 +164,6 @@ static struct apic xen_pv_apic = {
/* .dest_logical - default_send_IPI_ use it but we use our own. */
.check_apicid_used = default_check_apicid_used, /* Used on 32-bit */

- .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = xen_noop, /* setup_local_APIC calls it */

.ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
@@ -177,7 +176,6 @@ static struct apic xen_pv_apic = {
.get_apic_id = xen_get_apic_id,
.set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */

- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid,

#ifdef CONFIG_SMP