linux/debian/patches-rt/0195-seqlock-Prevent-rt-sta...

199 lines
5.7 KiB
Diff

From 5d67b6df1840dc09f2377137df24cf0830324714 Mon Sep 17 00:00:00 2001
Message-Id: <5d67b6df1840dc09f2377137df24cf0830324714.1601675152.git.zanussi@kernel.org>
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Feb 2012 12:03:30 +0100
Subject: [PATCH 195/333] seqlock: Prevent rt starvation
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
If a low prio writer gets preempted while holding the seqlock write
locked, a high prio reader spins forever on RT.
To prevent this let the reader grab the spinlock, so it blocks and
eventually boosts the writer. This way the writer can proceed and
endless spinning is prevented.
For seqcount writers we disable preemption over the update code
path. Thanks to Al Viro for distangling some VFS code to make that
possible.
Nicholas Mc Guire:
- spin_lock+unlock => spin_unlock_wait
- __write_seqcount_begin => __raw_write_seqcount_begin
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/seqlock.h | 57 ++++++++++++++++++++++++++++++++---------
include/net/neighbour.h | 6 ++---
2 files changed, 48 insertions(+), 15 deletions(-)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index bcf4cf26b8c8..689ed53016c7 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
return __read_seqcount_retry(s, start);
}
-
-
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+static inline void __raw_write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
-static inline void raw_write_seqcount_end(seqcount_t *s)
+static inline void raw_write_seqcount_begin(seqcount_t *s)
+{
+ preempt_disable_rt();
+ __raw_write_seqcount_begin(s);
+}
+
+static inline void __raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
+static inline void raw_write_seqcount_end(seqcount_t *s)
+{
+ __raw_write_seqcount_end(s);
+ preempt_enable_rt();
+}
+
/**
* raw_write_seqcount_barrier - do a seq write barrier
* @s: pointer to seqcount_t
@@ -428,10 +438,33 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
+#else
+/*
+ * Starvation safe read side for RT
+ */
+static inline unsigned read_seqbegin(seqlock_t *sl)
+{
+ unsigned ret;
+
+repeat:
+ ret = READ_ONCE(sl->seqcount.sequence);
+ if (unlikely(ret & 1)) {
+ /*
+ * Take the lock and let the writer proceed (i.e. evtl
+ * boost it), otherwise we could loop here forever.
+ */
+ spin_unlock_wait(&sl->lock);
+ goto repeat;
+ }
+ smp_rmb();
+ return ret;
+}
+#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
@@ -446,36 +479,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ __raw_write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ __raw_write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ __raw_write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ __raw_write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ __raw_write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ __raw_write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -484,7 +517,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_begin(&sl->seqcount);
+ __raw_write_seqcount_begin(&sl->seqcount);
return flags;
}
@@ -494,7 +527,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ __raw_write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 5ce035984a4d..1166fc17b757 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -451,7 +451,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
}
#endif
-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int hh_alen = 0;
unsigned int seq;
@@ -493,7 +493,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
{
- const struct hh_cache *hh = &n->hh;
+ struct hh_cache *hh = &n->hh;
if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
return neigh_hh_output(hh, skb);
@@ -534,7 +534,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
const struct net_device *dev)
{
unsigned int seq;
--
2.17.1