Skip to content

Commit c5b6aba

Browse files
Maxim Levitskybonzini
Maxim Levitsky
authored andcommitted
locking/mutex: implement mutex_trylock_nested
Despite the fact that several lockdep-related checks are skipped when calling trylock* versions of the locking primitives, for example mutex_trylock, each time the mutex is acquired, a held_lock is still placed onto the lockdep stack by __lock_acquire() which is called regardless of whether the trylock* or regular locking API was used. This means that if the caller successfully acquires more than MAX_LOCK_DEPTH locks of the same class, even when using mutex_trylock, lockdep will still complain that the maximum depth of the held lock stack has been reached and disable itself. For example, the following error currently occurs in the ARM version of KVM, once the code tries to lock all vCPUs of a VM configured with more than MAX_LOCK_DEPTH vCPUs, a situation that can easily happen on modern systems, where having more than 48 CPUs is common, and it's also common to run VMs that have vCPU counts approaching that number: [ 328.171264] BUG: MAX_LOCK_DEPTH too low! [ 328.175227] turning off the locking correctness validator. [ 328.180726] Please attach the output of /proc/lock_stat to the bug report [ 328.187531] depth: 48 max: 48! [ 328.190678] 48 locks held by qemu-kvm/11664: [ 328.194957] #0: ffff800086de5ba0 (&kvm->lock){+.+.}-{3:3}, at: kvm_ioctl_create_device+0x174/0x5b0 [ 328.204048] #1: ffff0800e78800b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0 [ 328.212521] #2: ffff07ffeee51e98 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0 [ 328.220991] #3: ffff0800dc7d80b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0 [ 328.229463] #4: ffff07ffe0c980b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0 [ 328.237934] #5: ffff0800a3883c78 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0 [ 328.246405] #6: ffff07fffbe480b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0 Luckily, in all instances that require locking all vCPUs, the 'kvm->lock' is taken a priori, and that fact makes it possible to use the little known feature of lockdep, called a 'nest_lock', to avoid this warning and subsequent lockdep self-disablement. The action of 'nested lock' being provided to lockdep's lock_acquire(), causes the lockdep to detect that the top of the held lock stack contains a lock of the same class and then increment its reference counter instead of pushing a new held_lock item onto that stack. See __lock_acquire for more information. Signed-off-by: Maxim Levitsky <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Message-ID: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent e9f1703 commit c5b6aba

File tree

2 files changed

+26
-3
lines changed

2 files changed

+26
-3
lines changed

include/linux/mutex.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,22 @@ extern void mutex_lock_io(struct mutex *lock);
193193
*
194194
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
195195
*/
196+
197+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
198+
extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
199+
200+
#define mutex_trylock_nest_lock(lock, nest_lock) \
201+
( \
202+
typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
203+
_mutex_trylock_nest_lock(lock, &(nest_lock)->dep_map) \
204+
)
205+
206+
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
207+
#else
196208
extern int mutex_trylock(struct mutex *lock);
209+
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
210+
#endif
211+
197212
extern void mutex_unlock(struct mutex *lock);
198213

199214
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);

kernel/locking/mutex.c

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1062,6 +1062,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
10621062

10631063
#endif
10641064

1065+
#ifndef CONFIG_DEBUG_LOCK_ALLOC
10651066
/**
10661067
* mutex_trylock - try to acquire the mutex, without waiting
10671068
* @lock: the mutex to be acquired
@@ -1077,18 +1078,25 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
10771078
* mutex must be released by the same task that acquired it.
10781079
*/
10791080
int __sched mutex_trylock(struct mutex *lock)
1081+
{
1082+
MUTEX_WARN_ON(lock->magic != lock);
1083+
return __mutex_trylock(lock);
1084+
}
1085+
EXPORT_SYMBOL(mutex_trylock);
1086+
#else
1087+
int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock)
10801088
{
10811089
bool locked;
10821090

10831091
MUTEX_WARN_ON(lock->magic != lock);
1084-
10851092
locked = __mutex_trylock(lock);
10861093
if (locked)
1087-
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1094+
mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_);
10881095

10891096
return locked;
10901097
}
1091-
EXPORT_SYMBOL(mutex_trylock);
1098+
EXPORT_SYMBOL(_mutex_trylock_nest_lock);
1099+
#endif
10921100

10931101
#ifndef CONFIG_DEBUG_LOCK_ALLOC
10941102
int __sched

0 commit comments

Comments
 (0)