linux/debian/patches-rt/0198-net-move-xmit_recursio...

282 lines
8.1 KiB
Diff
Raw Normal View History

2020-06-23 13:42:59 +00:00
From c8dd410ff81f11189662f9bbfcdea62cd979bd7a Mon Sep 17 00:00:00 2001
Message-Id: <c8dd410ff81f11189662f9bbfcdea62cd979bd7a.1592846147.git.zanussi@kernel.org>
In-Reply-To: <07cd0dbc80b976663c80755496a03f288decfe5a.1592846146.git.zanussi@kernel.org>
References: <07cd0dbc80b976663c80755496a03f288decfe5a.1592846146.git.zanussi@kernel.org>
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 13 Jan 2016 15:55:02 +0100
2020-06-23 13:42:59 +00:00
Subject: [PATCH 198/330] net: move xmit_recursion to per-task variable on -RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.127-rt55.tar.xz
A softirq on -RT can be preempted. That means one task is in
__dev_queue_xmit(), gets preempted and another task may enter
__dev_queue_xmit() aw well. netperf together with a bridge device
will then trigger the `recursion alert` because each task increments
the xmit_recursion variable which is per-CPU.
A virtual device like br0 is required to trigger this warning.
This patch moves the lock owner and counter to be per task instead per-CPU so
it counts the recursion properly on -RT. The owner is also a task now and not a
CPU number.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
2019-04-08 23:49:20 +00:00
include/linux/netdevice.h | 95 ++++++++++++++++++++++++++++++++++++---
include/linux/sched.h | 3 ++
net/core/dev.c | 15 ++++---
net/core/filter.c | 6 +--
4 files changed, 104 insertions(+), 15 deletions(-)
2020-02-21 18:07:43 +00:00
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b816eb0bc1c4..5de4b66e11fe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
2020-02-21 18:07:43 +00:00
@@ -587,7 +587,11 @@ struct netdev_queue {
* write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ struct task_struct *xmit_lock_owner;
+#else
int xmit_lock_owner;
+#endif
/*
* Time (in jiffies) of last Tx
*/
2020-02-21 18:07:43 +00:00
@@ -2620,14 +2624,53 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
-DECLARE_PER_CPU(int, xmit_recursion);
#define XMIT_RECURSION_LIMIT 10
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline int dev_recursion_level(void)
+{
+ return current->xmit_recursion;
+}
+
+static inline int xmit_rec_read(void)
+{
+ return current->xmit_recursion;
+}
+
+static inline void xmit_rec_inc(void)
+{
+ current->xmit_recursion++;
+}
+
+static inline void xmit_rec_dec(void)
+{
+ current->xmit_recursion--;
+}
+
+#else
+
+DECLARE_PER_CPU(int, xmit_recursion);
static inline int dev_recursion_level(void)
{
return this_cpu_read(xmit_recursion);
}
+static inline int xmit_rec_read(void)
+{
+ return __this_cpu_read(xmit_recursion);
+}
+
+static inline void xmit_rec_inc(void)
+{
+ __this_cpu_inc(xmit_recursion);
+}
+
+static inline void xmit_rec_dec(void)
+{
+ __this_cpu_dec(xmit_recursion);
+}
+#endif
+
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2020-02-21 18:07:43 +00:00
@@ -3805,10 +3848,48 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1U << debug_value) - 1;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+{
+ txq->xmit_lock_owner = current;
+}
+
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = NULL;
+}
+
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+{
+ if (txq->xmit_lock_owner != NULL)
+ return true;
+ return false;
+}
+
+#else
+
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+{
+ txq->xmit_lock_owner = cpu;
+}
+
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+}
+
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+{
+ if (txq->xmit_lock_owner != -1)
+ return true;
+ return false;
+}
+#endif
+
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ netdev_queue_set_owner(txq, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
2020-02-21 18:07:43 +00:00
@@ -3825,32 +3906,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ netdev_queue_set_owner(txq, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+ netdev_queue_set_owner(txq, smp_processor_id());
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ netdev_queue_clear_owner(txq);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ netdev_queue_clear_owner(txq);
spin_unlock_bh(&txq->_xmit_lock);
}
static inline void txq_trans_update(struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != -1)
+ if (netdev_queue_has_owner(txq))
txq->trans_start = jiffies;
}
2020-02-21 18:07:43 +00:00
diff --git a/include/linux/sched.h b/include/linux/sched.h
2020-06-22 13:14:16 +00:00
index 4a4f03a8d8d4..37334ec1ab3e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
2020-02-21 18:07:43 +00:00
@@ -1216,6 +1216,9 @@ struct task_struct {
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
2020-02-21 18:07:43 +00:00
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int xmit_recursion;
2020-02-21 18:07:43 +00:00
#endif
int pagefault_disabled;
#ifdef CONFIG_MMU
2020-02-21 18:07:43 +00:00
diff --git a/net/core/dev.c b/net/core/dev.c
2020-06-23 13:42:59 +00:00
index c0e089fde4b5..b223a825affc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3540,8 +3540,10 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
+#ifndef CONFIG_PREEMPT_RT_FULL
DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion);
+#endif
/**
* dev_loopback_xmit - loop back @skb
@@ -3832,9 +3834,12 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (txq->xmit_lock_owner != current) {
+#else
if (txq->xmit_lock_owner != cpu) {
- if (unlikely(__this_cpu_read(xmit_recursion) >
- XMIT_RECURSION_LIMIT))
+#endif
+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
goto recursion_alert;
skb = validate_xmit_skb(skb, dev, &again);
@@ -3844,9 +3849,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
- __this_cpu_inc(xmit_recursion);
+ xmit_rec_inc();
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
- __this_cpu_dec(xmit_recursion);
+ xmit_rec_dec();
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
2020-06-23 13:42:59 +00:00
@@ -8578,7 +8583,7 @@ static void netdev_init_one_queue(struct net_device *dev,
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
- queue->xmit_lock_owner = -1;
+ netdev_queue_clear_owner(queue);
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
queue->dev = dev;
#ifdef CONFIG_BQL
2020-02-21 18:07:43 +00:00
diff --git a/net/core/filter.c b/net/core/filter.c
2020-03-06 11:44:27 +00:00
index 40b3af05c883..205cd1bb9bc2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
2020-02-21 18:07:43 +00:00
@@ -2000,7 +2000,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
2020-02-21 18:07:43 +00:00
@@ -2009,9 +2009,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
skb->tstamp = 0;
- __this_cpu_inc(xmit_recursion);
+ xmit_rec_inc();
ret = dev_queue_xmit(skb);
- __this_cpu_dec(xmit_recursion);
+ xmit_rec_dec();
return ret;
}
2020-02-21 18:07:43 +00:00
--
2020-06-22 13:14:16 +00:00
2.17.1
2020-02-21 18:07:43 +00:00