linux/debian/patches/features/all/rt/wait-simple-version.patch

259 lines
6.9 KiB
Diff

Subject: wait-simple: Simple waitqueue implementation
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 12 Dec 2011 12:29:04 +0100
wait_queue is a swiss army knife and in most of the cases the
complexity is not needed. For RT waitqueues are a constant source of
trouble as we can't convert the head lock to a raw spinlock due to
fancy and long lasting callbacks.
Provide a slim version, which allows RT to replace wait queues. This
should go mainline as well, as it lowers memory consumption and
runtime overhead.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/wait-simple.h | 152 ++++++++++++++++++++++++++++++++++++++++++++
kernel/Makefile | 2
kernel/wait-simple.c | 63 ++++++++++++++++++
3 files changed, 216 insertions(+), 1 deletion(-)
Index: linux-3.2/include/linux/wait-simple.h
===================================================================
--- /dev/null
+++ linux-3.2/include/linux/wait-simple.h
@@ -0,0 +1,152 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <asm/current.h>
+
+struct swaiter {
+ struct task_struct *task;
+ struct list_head node;
+};
+
+#define DEFINE_SWAITER(name) \
+ struct swaiter name = { \
+ .task = current, \
+ .node = LIST_HEAD_INIT((name).node), \
+ }
+
+struct swait_head {
+ raw_spinlock_t lock;
+ struct list_head list;
+};
+
+#define DEFINE_SWAIT_HEAD(name) \
+ struct swait_head name = { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .list = LIST_HEAD_INIT((name).list), \
+ }
+
+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
+
+#define init_swait_head(swh) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_swait_head((swh), &__key); \
+ } while (0)
+
+/*
+ * Waiter functions
+ */
+static inline bool swaiter_enqueued(struct swaiter *w)
+{
+ return w->task != NULL;
+}
+
+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
+extern void swait_finish(struct swait_head *head, struct swaiter *w);
+
+/*
+ * Adds w to head->list. Must be called with head->lock locked.
+ */
+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
+{
+ list_add(&w->node, &head->list);
+}
+
+/*
+ * Removes w from head->list. Must be called with head->lock locked.
+ */
+static inline void __swait_dequeue(struct swaiter *w)
+{
+ list_del_init(&w->node);
+}
+
+/*
+ * Wakeup functions
+ */
+extern void __swait_wake(struct swait_head *head, unsigned int state);
+
+static inline void swait_wake(struct swait_head *head)
+{
+ __swait_wake(head, TASK_NORMAL);
+}
+
+/*
+ * Event API
+ */
+
+#define __swait_event(wq, condition) \
+do { \
+ DEFINE_SWAITER(__wait); \
+ \
+ for (;;) { \
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ schedule(); \
+ } \
+ swait_finish(&wq, &__wait); \
+} while (0)
+
+/**
+ * swait_event - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define swait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event(wq, condition); \
+} while (0)
+
+#define __swait_event_timeout(wq, condition, ret) \
+do { \
+ DEFINE_SWAITER(__wait); \
+ \
+ for (;;) { \
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ } \
+ swait_finish(&wq, &__wait); \
+} while (0)
+
+/**
+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if the @timeout elapsed, and the remaining
+ * jiffies if the condition evaluated to true before the timeout elapsed.
+ */
+#define swait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __swait_event_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif
Index: linux-3.2/kernel/Makefile
===================================================================
--- linux-3.2.orig/kernel/Makefile
+++ linux-3.2/kernel/Makefile
@@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
hrtimer.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o sched_clock.o cred.o \
- async.o range.o
+ async.o range.o wait-simple.o
obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER
Index: linux-3.2/kernel/wait-simple.c
===================================================================
--- /dev/null
+++ linux-3.2/kernel/wait-simple.c
@@ -0,0 +1,63 @@
+/*
+ * Simple waitqueues without fancy flags and callbacks
+ *
+ * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Based on kernel/wait.c
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/wait-simple.h>
+
+void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
+{
+ raw_spin_lock_init(&head->lock);
+ lockdep_set_class(&head->lock, key);
+ INIT_LIST_HEAD(&head->list);
+}
+EXPORT_SYMBOL_GPL(__init_swait_head);
+
+void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&head->lock, flags);
+ w->task = current;
+ __swait_enqueue(head, w);
+ set_current_state(state);
+ raw_spin_unlock_irqrestore(&head->lock, flags);
+}
+EXPORT_SYMBOL_GPL(swait_prepare);
+
+void swait_finish(struct swait_head *head, struct swaiter *w)
+{
+ unsigned long flags;
+
+ __set_current_state(TASK_RUNNING);
+ if (w->task) {
+ raw_spin_lock_irqsave(&head->lock, flags);
+ __swait_dequeue(w);
+ raw_spin_unlock_irqrestore(&head->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(swait_finish);
+
+void __swait_wake(struct swait_head *head, unsigned int state)
+{
+ struct swaiter *curr, *next;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&head->lock, flags);
+
+ list_for_each_entry_safe(curr, next, &head->list, node) {
+ if (wake_up_state(curr->task, state)) {
+ __swait_dequeue(curr);
+ curr->task = NULL;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&head->lock, flags);
+}