353 lines
12 KiB
Diff
353 lines
12 KiB
Diff
From 70506d966f14b76fc42746009b663a48cd7e2287 Mon Sep 17 00:00:00 2001
|
|
Message-Id: <70506d966f14b76fc42746009b663a48cd7e2287.1601675153.git.zanussi@kernel.org>
|
|
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
|
|
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Wed, 26 Jun 2019 11:59:44 +0200
|
|
Subject: [PATCH 276/333] futex: Make the futex_hash_bucket lock raw
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
|
|
|
|
[ Upstream commit f646521aadedab78801c9befe193e2e8a0c99298 ]
|
|
|
|
Since commit 1a1fb985f2e2b ("futex: Handle early deadlock return
|
|
correctly") we can deadlock while we attempt to acquire the HB lock if
|
|
we fail to acquire the lock.
|
|
The RT waiter (for the futex lock) is still enqueued and acquiring the
|
|
HB lock may build up a lock chain which leads to a deadlock if the owner
|
|
of the lock futex-lock holds the HB lock.
|
|
|
|
Make the hash bucket lock raw so it does not participate in the
|
|
lockchain.
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
---
|
|
kernel/futex.c | 88 +++++++++++++++++++++++++-------------------------
|
|
1 file changed, 44 insertions(+), 44 deletions(-)
|
|
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 9900c3f30124..8e8658df69d9 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -243,7 +243,7 @@ struct futex_q {
|
|
struct plist_node list;
|
|
|
|
struct task_struct *task;
|
|
- spinlock_t *lock_ptr;
|
|
+ raw_spinlock_t *lock_ptr;
|
|
union futex_key key;
|
|
struct futex_pi_state *pi_state;
|
|
struct rt_mutex_waiter *rt_waiter;
|
|
@@ -264,7 +264,7 @@ static const struct futex_q futex_q_init = {
|
|
*/
|
|
struct futex_hash_bucket {
|
|
atomic_t waiters;
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
struct plist_head chain;
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
@@ -928,7 +928,7 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
}
|
|
raw_spin_unlock_irq(&curr->pi_lock);
|
|
|
|
- spin_lock(&hb->lock);
|
|
+ raw_spin_lock(&hb->lock);
|
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
raw_spin_lock(&curr->pi_lock);
|
|
/*
|
|
@@ -938,7 +938,7 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
if (head->next != next) {
|
|
/* retain curr->pi_lock for the loop invariant */
|
|
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
put_pi_state(pi_state);
|
|
continue;
|
|
}
|
|
@@ -950,7 +950,7 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
|
|
raw_spin_unlock(&curr->pi_lock);
|
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
|
|
rt_mutex_futex_unlock(&pi_state->pi_mutex);
|
|
put_pi_state(pi_state);
|
|
@@ -1444,7 +1444,7 @@ static void __unqueue_futex(struct futex_q *q)
|
|
{
|
|
struct futex_hash_bucket *hb;
|
|
|
|
- if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
|
|
+ if (WARN_ON_SMP(!q->lock_ptr || !raw_spin_is_locked(q->lock_ptr))
|
|
|| WARN_ON(plist_node_empty(&q->list)))
|
|
return;
|
|
|
|
@@ -1572,21 +1572,21 @@ static inline void
|
|
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
|
|
{
|
|
if (hb1 <= hb2) {
|
|
- spin_lock(&hb1->lock);
|
|
+ raw_spin_lock(&hb1->lock);
|
|
if (hb1 < hb2)
|
|
- spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
|
|
+ raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
|
|
} else { /* hb1 > hb2 */
|
|
- spin_lock(&hb2->lock);
|
|
- spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
|
|
+ raw_spin_lock(&hb2->lock);
|
|
+ raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
|
|
}
|
|
}
|
|
|
|
static inline void
|
|
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
|
|
{
|
|
- spin_unlock(&hb1->lock);
|
|
+ raw_spin_unlock(&hb1->lock);
|
|
if (hb1 != hb2)
|
|
- spin_unlock(&hb2->lock);
|
|
+ raw_spin_unlock(&hb2->lock);
|
|
}
|
|
|
|
/*
|
|
@@ -1614,7 +1614,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
|
|
if (!hb_waiters_pending(hb))
|
|
goto out_put_key;
|
|
|
|
- spin_lock(&hb->lock);
|
|
+ raw_spin_lock(&hb->lock);
|
|
|
|
plist_for_each_entry_safe(this, next, &hb->chain, list) {
|
|
if (match_futex (&this->key, &key)) {
|
|
@@ -1633,7 +1633,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
|
|
}
|
|
}
|
|
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
wake_up_q(&wake_q);
|
|
out_put_key:
|
|
put_futex_key(&key);
|
|
@@ -2238,7 +2238,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
|
|
|
|
q->lock_ptr = &hb->lock;
|
|
|
|
- spin_lock(&hb->lock); /* implies smp_mb(); (A) */
|
|
+ raw_spin_lock(&hb->lock); /* implies smp_mb(); (A) */
|
|
return hb;
|
|
}
|
|
|
|
@@ -2246,7 +2246,7 @@ static inline void
|
|
queue_unlock(struct futex_hash_bucket *hb)
|
|
__releases(&hb->lock)
|
|
{
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
hb_waiters_dec(hb);
|
|
}
|
|
|
|
@@ -2285,7 +2285,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
|
__releases(&hb->lock)
|
|
{
|
|
__queue_me(q, hb);
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
}
|
|
|
|
/**
|
|
@@ -2301,41 +2301,41 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
|
*/
|
|
static int unqueue_me(struct futex_q *q)
|
|
{
|
|
- spinlock_t *lock_ptr;
|
|
+ raw_spinlock_t *lock_ptr;
|
|
int ret = 0;
|
|
|
|
/* In the common case we don't take the spinlock, which is nice. */
|
|
retry:
|
|
/*
|
|
- * q->lock_ptr can change between this read and the following spin_lock.
|
|
- * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
|
|
- * optimizing lock_ptr out of the logic below.
|
|
+ * q->lock_ptr can change between this read and the following
|
|
+ * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading
|
|
+ * q->lock_ptr and optimizing lock_ptr out of the logic below.
|
|
*/
|
|
lock_ptr = READ_ONCE(q->lock_ptr);
|
|
if (lock_ptr != NULL) {
|
|
- spin_lock(lock_ptr);
|
|
+ raw_spin_lock(lock_ptr);
|
|
/*
|
|
* q->lock_ptr can change between reading it and
|
|
- * spin_lock(), causing us to take the wrong lock. This
|
|
+ * raw_spin_lock(), causing us to take the wrong lock. This
|
|
* corrects the race condition.
|
|
*
|
|
* Reasoning goes like this: if we have the wrong lock,
|
|
* q->lock_ptr must have changed (maybe several times)
|
|
- * between reading it and the spin_lock(). It can
|
|
- * change again after the spin_lock() but only if it was
|
|
- * already changed before the spin_lock(). It cannot,
|
|
+ * between reading it and the raw_spin_lock(). It can
|
|
+ * change again after the raw_spin_lock() but only if it was
|
|
+ * already changed before the raw_spin_lock(). It cannot,
|
|
* however, change back to the original value. Therefore
|
|
* we can detect whether we acquired the correct lock.
|
|
*/
|
|
if (unlikely(lock_ptr != q->lock_ptr)) {
|
|
- spin_unlock(lock_ptr);
|
|
+ raw_spin_unlock(lock_ptr);
|
|
goto retry;
|
|
}
|
|
__unqueue_futex(q);
|
|
|
|
BUG_ON(q->pi_state);
|
|
|
|
- spin_unlock(lock_ptr);
|
|
+ raw_spin_unlock(lock_ptr);
|
|
ret = 1;
|
|
}
|
|
|
|
@@ -2357,7 +2357,7 @@ static void unqueue_me_pi(struct futex_q *q)
|
|
put_pi_state(q->pi_state);
|
|
q->pi_state = NULL;
|
|
|
|
- spin_unlock(q->lock_ptr);
|
|
+ raw_spin_unlock(q->lock_ptr);
|
|
}
|
|
|
|
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
@@ -2490,7 +2490,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
*/
|
|
handle_err:
|
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
- spin_unlock(q->lock_ptr);
|
|
+ raw_spin_unlock(q->lock_ptr);
|
|
|
|
switch (err) {
|
|
case -EFAULT:
|
|
@@ -2508,7 +2508,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
break;
|
|
}
|
|
|
|
- spin_lock(q->lock_ptr);
|
|
+ raw_spin_lock(q->lock_ptr);
|
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
|
|
/*
|
|
@@ -2604,7 +2604,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
|
|
/*
|
|
* The task state is guaranteed to be set before another task can
|
|
* wake it. set_current_state() is implemented using smp_store_mb() and
|
|
- * queue_me() calls spin_unlock() upon completion, both serializing
|
|
+ * queue_me() calls raw_spin_unlock() upon completion, both serializing
|
|
* access to the hash list and forcing another memory barrier.
|
|
*/
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
@@ -2895,7 +2895,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
* before __rt_mutex_start_proxy_lock() is done.
|
|
*/
|
|
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
|
|
- spin_unlock(q.lock_ptr);
|
|
+ raw_spin_unlock(q.lock_ptr);
|
|
/*
|
|
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
|
|
* such that futex_unlock_pi() is guaranteed to observe the waiter when
|
|
@@ -2916,7 +2916,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
|
|
|
cleanup:
|
|
- spin_lock(q.lock_ptr);
|
|
+ raw_spin_lock(q.lock_ptr);
|
|
/*
|
|
* If we failed to acquire the lock (deadlock/signal/timeout), we must
|
|
* first acquire the hb->lock before removing the lock from the
|
|
@@ -3017,7 +3017,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
|
return ret;
|
|
|
|
hb = hash_futex(&key);
|
|
- spin_lock(&hb->lock);
|
|
+ raw_spin_lock(&hb->lock);
|
|
|
|
/*
|
|
* Check waiters first. We do not trust user space values at
|
|
@@ -3051,7 +3051,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
|
* rt_waiter. Also see the WARN in wake_futex_pi().
|
|
*/
|
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
|
|
/* drops pi_state->pi_mutex.wait_lock */
|
|
ret = wake_futex_pi(uaddr, uval, pi_state);
|
|
@@ -3090,7 +3090,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
|
* owner.
|
|
*/
|
|
if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
switch (ret) {
|
|
case -EFAULT:
|
|
goto pi_faulted;
|
|
@@ -3110,7 +3110,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
|
ret = (curval == uval) ? 0 : -EAGAIN;
|
|
|
|
out_unlock:
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
out_putkey:
|
|
put_futex_key(&key);
|
|
return ret;
|
|
@@ -3284,9 +3284,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
|
|
futex_wait_queue_me(hb, &q, to);
|
|
|
|
- spin_lock(&hb->lock);
|
|
+ raw_spin_lock(&hb->lock);
|
|
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
|
- spin_unlock(&hb->lock);
|
|
+ raw_spin_unlock(&hb->lock);
|
|
if (ret)
|
|
goto out_put_keys;
|
|
|
|
@@ -3306,7 +3306,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* did a lock-steal - fix up the PI-state in that case.
|
|
*/
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
- spin_lock(q.lock_ptr);
|
|
+ raw_spin_lock(q.lock_ptr);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
|
|
pi_state = q.pi_state;
|
|
@@ -3317,7 +3317,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* the requeue_pi() code acquired for us.
|
|
*/
|
|
put_pi_state(q.pi_state);
|
|
- spin_unlock(q.lock_ptr);
|
|
+ raw_spin_unlock(q.lock_ptr);
|
|
}
|
|
} else {
|
|
struct rt_mutex *pi_mutex;
|
|
@@ -3331,7 +3331,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
pi_mutex = &q.pi_state->pi_mutex;
|
|
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
|
|
|
|
- spin_lock(q.lock_ptr);
|
|
+ raw_spin_lock(q.lock_ptr);
|
|
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
|
|
ret = 0;
|
|
|
|
@@ -3998,7 +3998,7 @@ static int __init futex_init(void)
|
|
for (i = 0; i < futex_hashsize; i++) {
|
|
atomic_set(&futex_queues[i].waiters, 0);
|
|
plist_head_init(&futex_queues[i].chain);
|
|
- spin_lock_init(&futex_queues[i].lock);
|
|
+ raw_spin_lock_init(&futex_queues[i].lock);
|
|
}
|
|
|
|
return 0;
|
|
--
|
|
2.17.1
|
|
|