230 lines
6.5 KiB
Diff
230 lines
6.5 KiB
Diff
From c8dd410ff81f11189662f9bbfcdea62cd979bd7a Mon Sep 17 00:00:00 2001
|
|
Message-Id: <c8dd410ff81f11189662f9bbfcdea62cd979bd7a.1592846147.git.zanussi@kernel.org>
|
|
In-Reply-To: <07cd0dbc80b976663c80755496a03f288decfe5a.1592846146.git.zanussi@kernel.org>
|
|
References: <07cd0dbc80b976663c80755496a03f288decfe5a.1592846146.git.zanussi@kernel.org>
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Wed, 13 Jan 2016 15:55:02 +0100
|
|
Subject: [PATCH 198/330] net: move xmit_recursion to per-task variable on -RT
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.127-rt55.tar.xz
|
|
|
|
A softirq on -RT can be preempted. That means one task is in
|
|
__dev_queue_xmit(), gets preempted and another task may enter
|
|
__dev_queue_xmit() aw well. netperf together with a bridge device
|
|
will then trigger the `recursion alert` because each task increments
|
|
the xmit_recursion variable which is per-CPU.
|
|
A virtual device like br0 is required to trigger this warning.
|
|
|
|
This patch moves the lock owner and counter to be per task instead per-CPU so
|
|
it counts the recursion properly on -RT. The owner is also a task now and not a
|
|
CPU number.
|
|
|
|
Cc: stable-rt@vger.kernel.org
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
[bwh: Forward-ported to 4.19.131:
|
|
- Define dev_xmit_recursion{,_inc,_dec}() instead of
|
|
xmit_rec_{read,inc,dec}(), matching the functions introduced by
|
|
"net: place xmit recursion in softnet data"
|
|
- Make softnet_data::xmit.recursion field, instead of the global
|
|
percpu variable xmit_recursion, dependent on !CONFIG_PREEMPT_RT_FULL]
|
|
---
|
|
include/linux/netdevice.h | 95 ++++++++++++++++++++++++++++++++++++---
|
|
include/linux/sched.h | 3 ++
|
|
net/core/dev.c | 15 ++++---
|
|
net/core/filter.c | 6 +--
|
|
4 files changed, 104 insertions(+), 15 deletions(-)
|
|
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -587,7 +587,11 @@ struct netdev_queue {
|
|
* write-mostly part
|
|
*/
|
|
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ struct task_struct *xmit_lock_owner;
|
|
+#else
|
|
int xmit_lock_owner;
|
|
+#endif
|
|
/*
|
|
* Time (in jiffies) of last Tx
|
|
*/
|
|
@@ -2961,7 +2965,9 @@ struct softnet_data {
|
|
#endif
|
|
/* written and read only by owning cpu: */
|
|
struct {
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
u16 recursion;
|
|
+#endif
|
|
u8 more;
|
|
} xmit;
|
|
#ifdef CONFIG_RPS
|
|
@@ -3000,12 +3006,36 @@ static inline void input_queue_tail_incr
|
|
|
|
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
|
|
|
|
+#define XMIT_RECURSION_LIMIT 8
|
|
+
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline int dev_recursion_level(void)
|
|
+{
|
|
+ return current->xmit_recursion;
|
|
+}
|
|
+
|
|
+static inline bool dev_xmit_recursion(void)
|
|
+{
|
|
+ return unlikely(current->xmit_recursion > XMIT_RECURSION_LIMIT);
|
|
+}
|
|
+
|
|
+static inline void dev_xmit_recursion_inc(void)
|
|
+{
|
|
+ current->xmit_recursion++;
|
|
+}
|
|
+
|
|
+static inline void dev_xmit_recursion_dec(void)
|
|
+{
|
|
+ current->xmit_recursion--;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
static inline int dev_recursion_level(void)
|
|
{
|
|
return this_cpu_read(softnet_data.xmit.recursion);
|
|
}
|
|
|
|
-#define XMIT_RECURSION_LIMIT 8
|
|
static inline bool dev_xmit_recursion(void)
|
|
{
|
|
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
|
|
@@ -3022,6 +3052,8 @@ static inline void dev_xmit_recursion_de
|
|
__this_cpu_dec(softnet_data.xmit.recursion);
|
|
}
|
|
|
|
+#endif
|
|
+
|
|
void __netif_schedule(struct Qdisc *q);
|
|
void netif_schedule_queue(struct netdev_queue *txq);
|
|
|
|
@@ -3824,10 +3856,48 @@ static inline u32 netif_msg_init(int deb
|
|
return (1U << debug_value) - 1;
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
|
|
+{
|
|
+ txq->xmit_lock_owner = current;
|
|
+}
|
|
+
|
|
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ txq->xmit_lock_owner = NULL;
|
|
+}
|
|
+
|
|
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ if (txq->xmit_lock_owner != NULL)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
|
|
+{
|
|
+ txq->xmit_lock_owner = cpu;
|
|
+}
|
|
+
|
|
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ txq->xmit_lock_owner = -1;
|
|
+}
|
|
+
|
|
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
|
|
+{
|
|
+ if (txq->xmit_lock_owner != -1)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
|
{
|
|
spin_lock(&txq->_xmit_lock);
|
|
- txq->xmit_lock_owner = cpu;
|
|
+ netdev_queue_set_owner(txq, cpu);
|
|
}
|
|
|
|
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
|
|
@@ -3844,32 +3914,32 @@ static inline void __netif_tx_release(st
|
|
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
|
{
|
|
spin_lock_bh(&txq->_xmit_lock);
|
|
- txq->xmit_lock_owner = smp_processor_id();
|
|
+ netdev_queue_set_owner(txq, smp_processor_id());
|
|
}
|
|
|
|
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
|
|
{
|
|
bool ok = spin_trylock(&txq->_xmit_lock);
|
|
if (likely(ok))
|
|
- txq->xmit_lock_owner = smp_processor_id();
|
|
+ netdev_queue_set_owner(txq, smp_processor_id());
|
|
return ok;
|
|
}
|
|
|
|
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
|
{
|
|
- txq->xmit_lock_owner = -1;
|
|
+ netdev_queue_clear_owner(txq);
|
|
spin_unlock(&txq->_xmit_lock);
|
|
}
|
|
|
|
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
|
|
{
|
|
- txq->xmit_lock_owner = -1;
|
|
+ netdev_queue_clear_owner(txq);
|
|
spin_unlock_bh(&txq->_xmit_lock);
|
|
}
|
|
|
|
static inline void txq_trans_update(struct netdev_queue *txq)
|
|
{
|
|
- if (txq->xmit_lock_owner != -1)
|
|
+ if (netdev_queue_has_owner(txq))
|
|
txq->trans_start = jiffies;
|
|
}
|
|
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -1217,6 +1217,9 @@ struct task_struct {
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
unsigned long task_state_change;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int xmit_recursion;
|
|
+#endif
|
|
int pagefault_disabled;
|
|
#ifdef CONFIG_MMU
|
|
struct task_struct *oom_reaper_list;
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -3825,7 +3825,11 @@ static int __dev_queue_xmit(struct sk_bu
|
|
if (dev->flags & IFF_UP) {
|
|
int cpu = smp_processor_id(); /* ok because BHs are off */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (txq->xmit_lock_owner != current) {
|
|
+#else
|
|
if (txq->xmit_lock_owner != cpu) {
|
|
+#endif
|
|
if (dev_xmit_recursion())
|
|
goto recursion_alert;
|
|
|
|
@@ -8572,7 +8576,7 @@ static void netdev_init_one_queue(struct
|
|
/* Initialize queue lock */
|
|
spin_lock_init(&queue->_xmit_lock);
|
|
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
|
|
- queue->xmit_lock_owner = -1;
|
|
+ netdev_queue_clear_owner(queue);
|
|
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
|
|
queue->dev = dev;
|
|
#ifdef CONFIG_BQL
|