|
Matt Fleming |
52f948 |
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Matt Fleming |
52f948 |
Date: Thu, 12 Oct 2017 17:34:38 +0200
|
|
Matt Fleming |
52f948 |
Subject: rtmutex: add ww_mutex addon for mutex-rt
|
|
Daniel Wagner |
c00dcb |
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
|
|
Daniel Wagner |
c00dcb |
Git-commit: da77ceac3d20f27310a07a7c346a4ee6b40d6c28
|
|
Matt Fleming |
52f948 |
Patch-mainline: Queued in subsystem maintainer repository
|
|
Matt Fleming |
52f948 |
References: SLE Realtime Extension
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Matt Fleming |
52f948 |
Signed-off-by: Daniel Wagner <dwagner@suse.de>
|
|
Matt Fleming |
52f948 |
---
|
|
Daniel Wagner |
c00dcb |
kernel/locking/rtmutex.c | 269 ++++++++++++++++++++++++++++++++++++++--
|
|
Matt Fleming |
52f948 |
kernel/locking/rtmutex_common.h | 2
|
|
Matt Fleming |
52f948 |
kernel/locking/rwsem-rt.c | 2
|
|
Daniel Wagner |
c00dcb |
3 files changed, 259 insertions(+), 14 deletions(-)
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
--- a/kernel/locking/rtmutex.c
|
|
Matt Fleming |
52f948 |
+++ b/kernel/locking/rtmutex.c
|
|
Matt Fleming |
52f948 |
@@ -24,6 +24,7 @@
|
|
Matt Fleming |
52f948 |
#include <linux/sched/wake_q.h>
|
|
Matt Fleming |
52f948 |
#include <linux/sched/debug.h>
|
|
Matt Fleming |
52f948 |
#include <linux/timer.h>
|
|
Matt Fleming |
52f948 |
+#include <linux/ww_mutex.h>
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
#include "rtmutex_common.h"
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
c00dcb |
@@ -1243,6 +1244,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
fdf974 |
#endif /* PREEMPT_RT */
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
fdf974 |
+#ifdef CONFIG_PREEMPT_RT
|
|
Matt Fleming |
52f948 |
+ static inline int __sched
|
|
Matt Fleming |
52f948 |
+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ if (!hold_ctx)
|
|
Matt Fleming |
52f948 |
+ return 0;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ if (unlikely(ctx == hold_ctx))
|
|
Matt Fleming |
52f948 |
+ return -EALREADY;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
|
|
Matt Fleming |
52f948 |
+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
|
|
Matt Fleming |
52f948 |
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
|
Matt Fleming |
52f948 |
+ ctx->contending_lock = ww;
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+ return -EDEADLK;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ return 0;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+#else
|
|
Matt Fleming |
52f948 |
+ static inline int __sched
|
|
Matt Fleming |
52f948 |
+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ BUG();
|
|
Matt Fleming |
52f948 |
+ return 0;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
static inline int
|
|
Matt Fleming |
52f948 |
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
|
Matt Fleming |
52f948 |
struct rt_mutex_waiter *waiter)
|
|
Daniel Wagner |
c00dcb |
@@ -1521,7 +1556,8 @@ void rt_mutex_init_waiter(struct rt_mute
|
|
Matt Fleming |
52f948 |
static int __sched
|
|
Matt Fleming |
52f948 |
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
- struct rt_mutex_waiter *waiter)
|
|
Matt Fleming |
52f948 |
+ struct rt_mutex_waiter *waiter,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx)
|
|
Matt Fleming |
52f948 |
{
|
|
Matt Fleming |
52f948 |
int ret = 0;
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
c00dcb |
@@ -1539,6 +1575,12 @@ static int __sched
|
|
Matt Fleming |
52f948 |
break;
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
+ if (ww_ctx && ww_ctx->acquired > 0) {
|
|
Matt Fleming |
52f948 |
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
|
Matt Fleming |
52f948 |
+ if (ret)
|
|
Matt Fleming |
52f948 |
+ break;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
raw_spin_unlock_irq(&lock->wait_lock);
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
debug_rt_mutex_print_deadlock(waiter);
|
|
Daniel Wagner |
c00dcb |
@@ -1573,16 +1615,106 @@ static void rt_mutex_handle_deadlock(int
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
|
|
Matt Fleming |
52f948 |
+ * but released with a normal mutex_unlock in this call.
|
|
Matt Fleming |
52f948 |
+ *
|
|
Matt Fleming |
52f948 |
+ * This should never happen, always use ww_mutex_unlock.
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(ww->ctx);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * Not quite done after calling ww_acquire_done() ?
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ if (ww_ctx->contending_lock) {
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * After -EDEADLK you tried to
|
|
Matt Fleming |
52f948 |
+ * acquire a different ww_mutex? Bad!
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * You called ww_mutex_lock after receiving -EDEADLK,
|
|
Matt Fleming |
52f948 |
+ * but 'forgot' to unlock everything else first?
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
|
|
Matt Fleming |
52f948 |
+ ww_ctx->contending_lock = NULL;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * Naughty, using a different class will lead to undefined behavior!
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+ ww_ctx->acquired++;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+
|
|
Daniel Wagner |
fdf974 |
+#ifdef CONFIG_PREEMPT_RT
|
|
Matt Fleming |
52f948 |
+static void ww_mutex_account_lock(struct rt_mutex *lock,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
|
|
Matt Fleming |
52f948 |
+ struct rt_mutex_waiter *waiter, *n;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * This branch gets optimized out for the common case,
|
|
Matt Fleming |
52f948 |
+ * and is only important for ww_mutex_lock.
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ ww_mutex_lock_acquired(ww, ww_ctx);
|
|
Matt Fleming |
52f948 |
+ ww->ctx = ww_ctx;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * Give any possible sleeping processes the chance to wake up,
|
|
Matt Fleming |
52f948 |
+ * so they can recheck if they have to back off.
|
|
Matt Fleming |
52f948 |
+ */
|
|
Matt Fleming |
52f948 |
+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,
|
|
Matt Fleming |
52f948 |
+ tree_entry) {
|
|
Matt Fleming |
52f948 |
+ /* XXX debug rt mutex waiter wakeup */
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ BUG_ON(waiter->lock != lock);
|
|
Matt Fleming |
52f948 |
+ rt_mutex_wake_waiter(waiter);
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+#else
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+static void ww_mutex_account_lock(struct rt_mutex *lock,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ BUG();
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
enum rtmutex_chainwalk chwalk,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx,
|
|
Matt Fleming |
52f948 |
struct rt_mutex_waiter *waiter)
|
|
Matt Fleming |
52f948 |
{
|
|
Matt Fleming |
52f948 |
int ret;
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
fdf974 |
+#ifdef CONFIG_PREEMPT_RT
|
|
Matt Fleming |
52f948 |
+ if (ww_ctx) {
|
|
Matt Fleming |
52f948 |
+ struct ww_mutex *ww;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ ww = container_of(lock, struct ww_mutex, base.lock);
|
|
Matt Fleming |
52f948 |
+ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
|
Matt Fleming |
52f948 |
+ return -EALREADY;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
/* Try to acquire the lock again: */
|
|
Matt Fleming |
52f948 |
- if (try_to_take_rt_mutex(lock, current, NULL))
|
|
Matt Fleming |
52f948 |
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
|
|
Matt Fleming |
52f948 |
+ if (ww_ctx)
|
|
Matt Fleming |
52f948 |
+ ww_mutex_account_lock(lock, ww_ctx);
|
|
Matt Fleming |
52f948 |
return 0;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
set_current_state(state);
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
c00dcb |
@@ -1592,14 +1724,24 @@ int __sched rt_mutex_slowlock_locked(str
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
- if (likely(!ret))
|
|
Matt Fleming |
52f948 |
+ if (likely(!ret)) {
|
|
Matt Fleming |
52f948 |
/* sleep on the mutex */
|
|
Matt Fleming |
52f948 |
- ret = __rt_mutex_slowlock(lock, state, timeout, waiter);
|
|
Matt Fleming |
52f948 |
+ ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
|
|
Matt Fleming |
52f948 |
+ ww_ctx);
|
|
Matt Fleming |
52f948 |
+ } else if (ww_ctx) {
|
|
Matt Fleming |
52f948 |
+ /* ww_mutex received EDEADLK, let it become EALREADY */
|
|
Matt Fleming |
52f948 |
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
|
Matt Fleming |
52f948 |
+ BUG_ON(!ret);
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
if (unlikely(ret)) {
|
|
Matt Fleming |
52f948 |
__set_current_state(TASK_RUNNING);
|
|
Matt Fleming |
52f948 |
remove_waiter(lock, waiter);
|
|
Matt Fleming |
52f948 |
- rt_mutex_handle_deadlock(ret, chwalk, waiter);
|
|
Matt Fleming |
52f948 |
+ /* ww_mutex wants to report EDEADLK/EALREADY, let it */
|
|
Matt Fleming |
52f948 |
+ if (!ww_ctx)
|
|
Matt Fleming |
52f948 |
+ rt_mutex_handle_deadlock(ret, chwalk, waiter);
|
|
Matt Fleming |
52f948 |
+ } else if (ww_ctx) {
|
|
Matt Fleming |
52f948 |
+ ww_mutex_account_lock(lock, ww_ctx);
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
/*
|
|
Daniel Wagner |
c00dcb |
@@ -1616,7 +1758,8 @@ int __sched rt_mutex_slowlock_locked(str
|
|
Matt Fleming |
52f948 |
static int __sched
|
|
Matt Fleming |
52f948 |
rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
- enum rtmutex_chainwalk chwalk)
|
|
Matt Fleming |
52f948 |
+ enum rtmutex_chainwalk chwalk,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx)
|
|
Matt Fleming |
52f948 |
{
|
|
Matt Fleming |
52f948 |
struct rt_mutex_waiter waiter;
|
|
Matt Fleming |
52f948 |
unsigned long flags;
|
|
Daniel Wagner |
c00dcb |
@@ -1634,7 +1777,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
|
|
Matt Fleming |
52f948 |
*/
|
|
Matt Fleming |
52f948 |
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
- ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter);
|
|
Matt Fleming |
52f948 |
+ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
|
|
Matt Fleming |
52f948 |
+ &waiter);
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
Matt Fleming |
52f948 |
|
|
Daniel Wagner |
c00dcb |
@@ -1764,29 +1908,33 @@ static bool __sched rt_mutex_slowunlock(
|
|
Matt Fleming |
52f948 |
*/
|
|
Matt Fleming |
52f948 |
static inline int
|
|
Matt Fleming |
52f948 |
rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx,
|
|
Matt Fleming |
52f948 |
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
- enum rtmutex_chainwalk chwalk))
|
|
Matt Fleming |
52f948 |
+ enum rtmutex_chainwalk chwalk,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx))
|
|
Matt Fleming |
52f948 |
{
|
|
Matt Fleming |
52f948 |
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
Matt Fleming |
52f948 |
return 0;
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
|
Matt Fleming |
52f948 |
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
static inline int
|
|
Matt Fleming |
52f948 |
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
enum rtmutex_chainwalk chwalk,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx,
|
|
Matt Fleming |
52f948 |
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
- enum rtmutex_chainwalk chwalk))
|
|
Matt Fleming |
52f948 |
+ enum rtmutex_chainwalk chwalk,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx))
|
|
Matt Fleming |
52f948 |
{
|
|
Matt Fleming |
52f948 |
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
Matt Fleming |
52f948 |
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
Matt Fleming |
52f948 |
return 0;
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
- return slowfn(lock, state, timeout, chwalk);
|
|
Matt Fleming |
52f948 |
+ return slowfn(lock, state, timeout, chwalk, ww_ctx);
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
static inline int
|
|
Daniel Wagner |
c00dcb |
@@ -1831,7 +1979,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
|
|
Matt Fleming |
52f948 |
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
|
|
Matt Fleming |
52f948 |
{
|
|
Matt Fleming |
52f948 |
might_sleep();
|
|
Matt Fleming |
52f948 |
- return rt_mutex_fastlock(lock, state, rt_mutex_slowlock);
|
|
Matt Fleming |
52f948 |
+ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
/**
|
|
Daniel Wagner |
c00dcb |
@@ -1951,6 +2099,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
|
|
Matt Fleming |
52f948 |
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
Matt Fleming |
52f948 |
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
Matt Fleming |
52f948 |
RT_MUTEX_MIN_CHAINWALK,
|
|
Matt Fleming |
52f948 |
+ NULL,
|
|
Matt Fleming |
52f948 |
rt_mutex_slowlock);
|
|
Matt Fleming |
52f948 |
if (ret)
|
|
Daniel Wagner |
40099b |
mutex_release(&lock->dep_map, _RET_IP_);
|
|
Daniel Wagner |
c00dcb |
@@ -2320,7 +2469,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
|
|
Matt Fleming |
52f948 |
raw_spin_lock_irq(&lock->wait_lock);
|
|
Matt Fleming |
52f948 |
/* sleep on the mutex */
|
|
Matt Fleming |
52f948 |
set_current_state(TASK_INTERRUPTIBLE);
|
|
Matt Fleming |
52f948 |
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
|
|
Matt Fleming |
52f948 |
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
|
|
Matt Fleming |
52f948 |
/*
|
|
Matt Fleming |
52f948 |
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
|
Matt Fleming |
52f948 |
* have to fix that up.
|
|
Daniel Wagner |
c00dcb |
@@ -2390,3 +2539,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
return cleanup;
|
|
Matt Fleming |
52f948 |
}
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+static inline int
|
|
Matt Fleming |
52f948 |
+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
|
|
Matt Fleming |
52f948 |
+ unsigned tmp;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ if (ctx->deadlock_inject_countdown-- == 0) {
|
|
Matt Fleming |
52f948 |
+ tmp = ctx->deadlock_inject_interval;
|
|
Matt Fleming |
52f948 |
+ if (tmp > UINT_MAX/4)
|
|
Matt Fleming |
52f948 |
+ tmp = UINT_MAX;
|
|
Matt Fleming |
52f948 |
+ else
|
|
Matt Fleming |
52f948 |
+ tmp = tmp*2 + tmp + tmp/2;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ ctx->deadlock_inject_interval = tmp;
|
|
Matt Fleming |
52f948 |
+ ctx->deadlock_inject_countdown = tmp;
|
|
Matt Fleming |
52f948 |
+ ctx->contending_lock = lock;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ ww_mutex_unlock(lock);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ return -EDEADLK;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ return 0;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+
|
|
Daniel Wagner |
fdf974 |
+#ifdef CONFIG_PREEMPT_RT
|
|
Matt Fleming |
52f948 |
+int __sched
|
|
Matt Fleming |
52f948 |
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ int ret;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ might_sleep();
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
|
|
Matt Fleming |
52f948 |
+ ctx ? &ctx->dep_map : NULL, _RET_IP_);
|
|
Matt Fleming |
52f948 |
+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
|
|
Matt Fleming |
52f948 |
+ ctx);
|
|
Matt Fleming |
52f948 |
+ if (ret)
|
|
Daniel Wagner |
40099b |
+ mutex_release(&lock->base.dep_map, _RET_IP_);
|
|
Matt Fleming |
52f948 |
+ else if (!ret && ctx && ctx->acquired > 1)
|
|
Matt Fleming |
52f948 |
+ return ww_mutex_deadlock_injection(lock, ctx);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ return ret;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+int __sched
|
|
Matt Fleming |
52f948 |
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ int ret;
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ might_sleep();
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
|
|
Matt Fleming |
52f948 |
+ ctx ? &ctx->dep_map : NULL, _RET_IP_);
|
|
Matt Fleming |
52f948 |
+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
|
|
Matt Fleming |
52f948 |
+ ctx);
|
|
Matt Fleming |
52f948 |
+ if (ret)
|
|
Daniel Wagner |
40099b |
+ mutex_release(&lock->base.dep_map, _RET_IP_);
|
|
Matt Fleming |
52f948 |
+ else if (!ret && ctx && ctx->acquired > 1)
|
|
Matt Fleming |
52f948 |
+ return ww_mutex_deadlock_injection(lock, ctx);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+ return ret;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+EXPORT_SYMBOL_GPL(ww_mutex_lock);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ /*
|
|
Matt Fleming |
52f948 |
+ * The unlocking fastpath is the 0->1 transition from 'locked'
|
|
Matt Fleming |
52f948 |
+ * into 'unlocked' state:
|
|
Matt Fleming |
52f948 |
+ */
|
|
Daniel Wagner |
c00dcb |
+ if (lock->ctx) {
|
|
Matt Fleming |
52f948 |
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
Matt Fleming |
52f948 |
+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
+ if (lock->ctx->acquired > 0)
|
|
Matt Fleming |
52f948 |
+ lock->ctx->acquired--;
|
|
Matt Fleming |
52f948 |
+ lock->ctx = NULL;
|
|
Matt Fleming |
52f948 |
+ }
|
|
Matt Fleming |
52f948 |
+
|
|
Daniel Wagner |
40099b |
+ mutex_release(&lock->base.dep_map, _RET_IP_);
|
|
Matt Fleming |
52f948 |
+ __rt_mutex_unlock(&lock->base.lock);
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+EXPORT_SYMBOL(ww_mutex_unlock);
|
|
Matt Fleming |
52f948 |
+
|
|
Matt Fleming |
52f948 |
+int __rt_mutex_owner_current(struct rt_mutex *lock)
|
|
Matt Fleming |
52f948 |
+{
|
|
Matt Fleming |
52f948 |
+ return rt_mutex_owner(lock) == current;
|
|
Matt Fleming |
52f948 |
+}
|
|
Matt Fleming |
52f948 |
+EXPORT_SYMBOL(__rt_mutex_owner_current);
|
|
Matt Fleming |
52f948 |
+#endif
|
|
Matt Fleming |
52f948 |
--- a/kernel/locking/rtmutex_common.h
|
|
Matt Fleming |
52f948 |
+++ b/kernel/locking/rtmutex_common.h
|
|
Matt Fleming |
52f948 |
@@ -165,6 +165,7 @@ extern void rt_mutex_postunlock(struct w
|
|
Matt Fleming |
52f948 |
struct wake_q_head *wake_sleeper_q);
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
/* RW semaphore special interface */
|
|
Matt Fleming |
52f948 |
+struct ww_acquire_ctx;
|
|
Matt Fleming |
52f948 |
|
|
Matt Fleming |
52f948 |
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
|
|
Matt Fleming |
52f948 |
extern int __rt_mutex_trylock(struct rt_mutex *lock);
|
|
Matt Fleming |
52f948 |
@@ -172,6 +173,7 @@ extern void __rt_mutex_unlock(struct rt_
|
|
Matt Fleming |
52f948 |
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
|
|
Matt Fleming |
52f948 |
struct hrtimer_sleeper *timeout,
|
|
Matt Fleming |
52f948 |
enum rtmutex_chainwalk chwalk,
|
|
Matt Fleming |
52f948 |
+ struct ww_acquire_ctx *ww_ctx,
|
|
Matt Fleming |
52f948 |
struct rt_mutex_waiter *waiter);
|
|
Matt Fleming |
52f948 |
void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
|
|
Matt Fleming |
52f948 |
struct rt_mutex_waiter *waiter,
|
|
Matt Fleming |
52f948 |
--- a/kernel/locking/rwsem-rt.c
|
|
Matt Fleming |
52f948 |
+++ b/kernel/locking/rwsem-rt.c
|
|
Matt Fleming |
52f948 |
@@ -131,7 +131,7 @@ static int __sched __down_read_common(st
|
|
Matt Fleming |
52f948 |
*/
|
|
Matt Fleming |
52f948 |
rt_mutex_init_waiter(&waiter, false);
|
|
Matt Fleming |
52f948 |
ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK,
|
|
Matt Fleming |
52f948 |
- &waiter);
|
|
Matt Fleming |
52f948 |
+ NULL, &waiter);
|
|
Matt Fleming |
52f948 |
/*
|
|
Matt Fleming |
52f948 |
* The slowlock() above is guaranteed to return with the rtmutex (for
|
|
Matt Fleming |
52f948 |
* ret = 0) is now held, so there can't be a writer active. Increment
|