157 lines
5.0 KiB
Diff
157 lines
5.0 KiB
Diff
From d98c7d8371e95d28a96e6b0364cf896cd2de136f Mon Sep 17 00:00:00 2001
|
|
Message-Id: <d98c7d8371e95d28a96e6b0364cf896cd2de136f.1592846147.git.zanussi@kernel.org>
|
|
In-Reply-To: <07cd0dbc80b976663c80755496a03f288decfe5a.1592846146.git.zanussi@kernel.org>
|
|
References: <07cd0dbc80b976663c80755496a03f288decfe5a.1592846146.git.zanussi@kernel.org>
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 12 Oct 2017 16:36:39 +0200
|
|
Subject: [PATCH 141/330] rtmutex: export lockdep-less version of rt_mutex's
|
|
lock, trylock and unlock
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.127-rt55.tar.xz
|
|
|
|
Required for lock implementation ontop of rtmutex.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
kernel/locking/rtmutex.c | 67 +++++++++++++++++++++------------
|
|
kernel/locking/rtmutex_common.h | 3 ++
|
|
2 files changed, 46 insertions(+), 24 deletions(-)
|
|
|
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
|
index 921345c31161..d732976d0f05 100644
|
|
--- a/kernel/locking/rtmutex.c
|
|
+++ b/kernel/locking/rtmutex.c
|
|
@@ -1494,12 +1494,33 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
rt_mutex_postunlock(&wake_q);
|
|
}
|
|
|
|
-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
|
|
+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
|
|
{
|
|
might_sleep();
|
|
+ return rt_mutex_fastlock(lock, state, rt_mutex_slowlock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rt_mutex_lock_state - lock a rt_mutex with a given state
|
|
+ *
|
|
+ * @lock: The rt_mutex to be locked
|
|
+ * @state: The state to set when blocking on the rt_mutex
|
|
+ */
|
|
+static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
|
|
+ unsigned int subclass, int state)
|
|
+{
|
|
+ int ret;
|
|
|
|
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
+ ret = __rt_mutex_lock_state(lock, state);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
|
|
+{
|
|
+ rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
@@ -1540,16 +1561,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
*/
|
|
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
|
{
|
|
- int ret;
|
|
-
|
|
- might_sleep();
|
|
-
|
|
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
- if (ret)
|
|
- mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
-
|
|
- return ret;
|
|
+ return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
|
|
|
@@ -1575,13 +1587,10 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
|
|
* Returns:
|
|
* 0 on success
|
|
* -EINTR when interrupted by a signal
|
|
- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
|
|
*/
|
|
int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
|
|
{
|
|
- might_sleep();
|
|
-
|
|
- return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
|
|
+ return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
|
|
|
|
@@ -1616,6 +1625,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
|
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (WARN_ON_ONCE(in_irq() || in_nmi()))
|
|
+#else
|
|
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
+#endif
|
|
+ return 0;
|
|
+
|
|
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_trylock - try to lock a rt_mutex
|
|
*
|
|
@@ -1631,14 +1652,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
|
{
|
|
int ret;
|
|
|
|
-#ifdef CONFIG_PREEMPT_RT_FULL
|
|
- if (WARN_ON_ONCE(in_irq() || in_nmi()))
|
|
-#else
|
|
- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
-#endif
|
|
- return 0;
|
|
-
|
|
- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
+ ret = __rt_mutex_trylock(lock);
|
|
if (ret)
|
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
|
|
@@ -1646,6 +1660,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
|
|
|
+void __sched __rt_mutex_unlock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_unlock - unlock a rt_mutex
|
|
*
|
|
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
|
|
index cb9815f0c766..5955ad2aa2a8 100644
|
|
--- a/kernel/locking/rtmutex_common.h
|
|
+++ b/kernel/locking/rtmutex_common.h
|
|
@@ -162,6 +162,9 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
|
|
/* RW semaphore special interface */
|
|
|
|
+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
|
|
+extern int __rt_mutex_trylock(struct rt_mutex *lock);
|
|
+extern void __rt_mutex_unlock(struct rt_mutex *lock);
|
|
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
enum rtmutex_chainwalk chwalk,
|
|
--
|
|
2.17.1
|
|
|