95 lines
2.5 KiB
Diff
95 lines
2.5 KiB
Diff
Subject: seqlock: Provide seq_spin_* functions
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Mon, 27 Feb 2012 17:55:11 +0100
|
|
|
|
In some cases it's desirable to lock the seqlock w/o changing the
|
|
seqcount. Provide functions for this, so we can avoid open coded
|
|
constructs.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: stable-rt@vger.kernel.org
|
|
---
|
|
include/linux/seqlock.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++++
|
|
1 file changed, 64 insertions(+)
|
|
|
|
Index: rt/include/linux/seqlock.h
|
|
===================================================================
|
|
--- rt.orig/include/linux/seqlock.h
|
|
+++ rt/include/linux/seqlock.h
|
|
@@ -188,6 +188,19 @@ static inline unsigned read_seqretry(con
|
|
}
|
|
|
|
/*
|
|
+ * Ditto w/o barriers
|
|
+ */
|
|
+static inline unsigned __read_seqbegin(const seqlock_t *sl)
|
|
+{
|
|
+ return __read_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline unsigned __read_seqretry(const seqlock_t *sl, unsigned start)
|
|
+{
|
|
+ return __read_seqcount_retry(&sl->seqcount, start);
|
|
+}
|
|
+
|
|
+/*
|
|
* Lock out other writers and update the count.
|
|
* Acts like a normal spin_lock/unlock.
|
|
* Don't need preempt_disable() because that is in the spin_lock already.
|
|
@@ -247,4 +260,55 @@ write_sequnlock_irqrestore(seqlock_t *sl
|
|
spin_unlock_irqrestore(&sl->lock, flags);
|
|
}
|
|
|
|
+/*
|
|
+ * Instead of open coding a spinlock and a seqcount, the following
|
|
+ * functions allow to serialize on the seqlock w/o touching seqcount.
|
|
+ */
|
|
+static inline void seq_spin_lock(seqlock_t *sl)
|
|
+{
|
|
+ spin_lock(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline int seq_spin_trylock(seqlock_t *sl)
|
|
+{
|
|
+ return spin_trylock(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline void seq_spin_unlock(seqlock_t *sl)
|
|
+{
|
|
+ spin_unlock(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline void assert_seq_spin_locked(seqlock_t *sl)
|
|
+{
|
|
+ assert_spin_locked(&sl->lock);
|
|
+}
|
|
+
|
|
+static inline void seq_spin_lock_nested(seqlock_t *sl, int subclass)
|
|
+{
|
|
+ spin_lock_nested(&sl->lock, subclass);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * For writers which need to take/release the lock w/o updating seqcount for
|
|
+ * whatever reasons the following functions allow to update the count
|
|
+ * after the lock has been acquired or before it is released.
|
|
+ */
|
|
+static inline void write_seqlock_begin(seqlock_t *sl)
|
|
+{
|
|
+ assert_spin_locked(&sl->lock);
|
|
+ write_seqcount_begin(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline void write_seqlock_end(seqlock_t *sl)
|
|
+{
|
|
+ assert_spin_locked(&sl->lock);
|
|
+ write_seqcount_end(&sl->seqcount);
|
|
+}
|
|
+
|
|
+static inline void write_seqlock_barrier(seqlock_t *sl)
|
|
+{
|
|
+ write_seqcount_barrier(&sl->seqcount);
|
|
+}
|
|
+
|
|
#endif /* __LINUX_SEQLOCK_H */
|