locking/mutex: Make contention tracepoints more consistent wrt adaptive spinning

Have the trace_contention_*() tracepoints consistently include
adaptive spinning. In order to differentiate between the spinning and
non-spinning states add LCB_F_MUTEX and combine with LCB_F_SPIN.

The consequence is that a mutex contention can now triggler multiple
_begin() tracepoints before triggering an _end().

Additionally, this fixes one path where mutex would trigger _end()
without ever seeing a _begin().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
This commit is contained in:
Peter Zijlstra 2022-03-30 13:06:54 +02:00
parent ee042be16c
commit dc1f7893a7
2 changed files with 15 additions and 5 deletions

View File

@ -14,6 +14,7 @@
#define LCB_F_WRITE (1U << 2) #define LCB_F_WRITE (1U << 2)
#define LCB_F_RT (1U << 3) #define LCB_F_RT (1U << 3)
#define LCB_F_PERCPU (1U << 4) #define LCB_F_PERCPU (1U << 4)
#define LCB_F_MUTEX (1U << 5)
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
@ -113,7 +114,8 @@ TRACE_EVENT(contention_begin,
{ LCB_F_READ, "READ" }, { LCB_F_READ, "READ" },
{ LCB_F_WRITE, "WRITE" }, { LCB_F_WRITE, "WRITE" },
{ LCB_F_RT, "RT" }, { LCB_F_RT, "RT" },
{ LCB_F_PERCPU, "PERCPU" } { LCB_F_PERCPU, "PERCPU" },
{ LCB_F_MUTEX, "MUTEX" }
)) ))
); );

View File

@ -602,12 +602,14 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
preempt_disable(); preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
if (__mutex_trylock(lock) || if (__mutex_trylock(lock) ||
mutex_optimistic_spin(lock, ww_ctx, NULL)) { mutex_optimistic_spin(lock, ww_ctx, NULL)) {
/* got the lock, yay! */ /* got the lock, yay! */
lock_acquired(&lock->dep_map, ip); lock_acquired(&lock->dep_map, ip);
if (ww_ctx) if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx); ww_mutex_set_context_fastpath(ww, ww_ctx);
trace_contention_end(lock, 0);
preempt_enable(); preempt_enable();
return 0; return 0;
} }
@ -644,7 +646,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
} }
set_current_state(state); set_current_state(state);
trace_contention_begin(lock, 0); trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) { for (;;) {
bool first; bool first;
@ -684,10 +686,16 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
* state back to RUNNING and fall through the next schedule(), * state back to RUNNING and fall through the next schedule(),
* or we must see its unlock and acquire. * or we must see its unlock and acquire.
*/ */
if (__mutex_trylock_or_handoff(lock, first) || if (__mutex_trylock_or_handoff(lock, first))
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
break; break;
if (first) {
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
break;
trace_contention_begin(lock, LCB_F_MUTEX);
}
raw_spin_lock(&lock->wait_lock); raw_spin_lock(&lock->wait_lock);
} }
raw_spin_lock(&lock->wait_lock); raw_spin_lock(&lock->wait_lock);
@ -723,8 +731,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
err: err:
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter); __mutex_remove_waiter(lock, &waiter);
trace_contention_end(lock, ret);
err_early_kill: err_early_kill:
trace_contention_end(lock, ret);
raw_spin_unlock(&lock->wait_lock); raw_spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter); debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, ip); mutex_release(&lock->dep_map, ip);