[PATCH 06 of 12] xen: add rebind_evtchn_irq

From: Jeremy Fitzhardinge
Date: Fri May 23 2008 - 09:45:47 EST


Add rebind_evtchn_irq(), which will rebind an device driver's existing
irq to a new event channel on restore. Since the new event channel
will be masked and bound to vcpu0, we update the state accordingly and
unmask the irq once everything is set up.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
---
drivers/xen/events.c | 27 +++++++++++++++++++++++++++
include/xen/events.h | 1 +
2 files changed, 28 insertions(+)

diff --git a/drivers/xen/events.c b/drivers/xen/events.c
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -557,6 +557,33 @@
put_cpu();
}

+/* Rebind a new event channel to an existing irq. */
+void rebind_evtchn_irq(int evtchn, int irq)
+{
+ /* Make sure the irq is masked, since the new event channel
+ will also be masked. */
+ disable_irq(irq);
+
+ spin_lock(&irq_mapping_update_lock);
+
+ /* After resume the irq<->evtchn mappings are all cleared out */
+ BUG_ON(evtchn_to_irq[evtchn] != -1);
+ /* Expect irq to have been bound before,
+ so the bindcount should be non-0 */
+ BUG_ON(irq_bindcount[irq] == 0);
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ /* new event channels are always bound to cpu 0 */
+ irq_set_affinity(irq, cpumask_of_cpu(0));
+
+ /* Unmask the event channel. */
+ enable_irq(irq);
+}
+
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
diff --git a/include/xen/events.h b/include/xen/events.h
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -32,6 +32,7 @@

void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
int resend_irq_on_evtchn(unsigned int irq);
+void rebind_evtchn_irq(int evtchn, int irq);

static inline void notify_remote_via_evtchn(int port)
{


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/