[PATCH] sched: schedule_raw_spin_unlock() and schedule_spin_unlock()

From: Kirill Tkhai
Date: Fri Jun 14 2013 - 10:41:00 EST


Helpers for replacement repeating patterns:

1)raw_spin_unlock_irq(lock);
schedule();
2)raw_spin_unlock_irqrestore(lock, flags);
schedule();

(The same for spinlock_t)

They allow to prevent excess preempt_schedule(), which can happen on preemptible kernel.

Signed-off-by: Kirill Tkhai <tkhai@xxxxxxxxx>
CC: Steven Rostedt <rostedt@xxxxxxxxxxx>
CC: Ingo Molnar <mingo@xxxxxxxxxx>
CC: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
---
include/linux/sched.h | 20 ++++++++++++++++++++
kernel/sched/core.c | 24 ++++++++++++++++++++++++
2 files changed, 44 insertions(+), 0 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8b10339..0ae79f3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,26 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);

+#ifdef CONFIG_PREEMPT
+extern void schedule_raw_spin_unlock(raw_spinlock_t *lock);
+/* See comment for schedule_raw_spin_unlock() */
+static inline void schedule_spin_unlock(spinlock_t *lock)
+{
+ schedule_raw_spin_unlock(&lock->rlock);
+}
+#else
+static inline void schedule_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ raw_spin_unlock_irq(lock);
+ schedule();
+}
+static inline void schedule_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock_irq(lock);
+ schedule();
+}
+#endif
+
struct nsproxy;
struct user_namespace;

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 58453b8..381e493 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3125,6 +3125,30 @@ asmlinkage void __sched preempt_schedule_irq(void)
exception_exit(prev_state);
}

+/*
+ * schedule_raw_spin_unlock - unlock raw_spinlock and call schedule()
+ *
+ * Should be used instead of the constructions
+ * 1) raw_spin_unlock_irq(lock);
+ * schedule();
+ * or
+ * 2) raw_spin_unlock_irqrestore(lock, flags);
+ * schedule();
+ * where they have to be.
+ *
+ * It helps to prevent excess preempt_schedule() during the unlocking,
+ * which can be called on preemptible kernel.
+ * Returns with irqs enabled.
+ */
+void __sched schedule_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ preempt_disable();
+ raw_spin_unlock_irq(lock);
+ sched_preempt_enable_no_resched();
+ schedule();
+}
+EXPORT_SYMBOL(schedule_raw_spin_unlock);
+
#endif /* CONFIG_PREEMPT */

int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/