locking/rwlocks: introduce write_lock_nested
In preparation for converting bit_spin_lock to rwlock in zsmalloc so that multiple writers of zspages can run at the same time but those zspages are supposed to be different zspage instance. Thus, it's not deadlock. This patch adds write_lock_nested to support the case for LOCKDEP. [minchan@kernel.org: fix write_lock_nested for RT] Link: https://lkml.kernel.org/r/YZfrMTAXV56HFWJY@google.com [bigeasy@linutronix.de: fixup write_lock_nested() implementation] Link: https://lkml.kernel.org/r/20211123170134.y6xb7pmpgdn4m3bn@linutronix.de Link: https://lkml.kernel.org/r/20211115185909.3949505-8-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Naresh Kamboju <naresh.kamboju@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c4549b8711
commit
4a57d6bbae
|
@ -55,6 +55,12 @@ do { \
|
|||
#define write_lock(lock) _raw_write_lock(lock)
|
||||
#define read_lock(lock) _raw_read_lock(lock)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
|
||||
#else
|
||||
#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||
|
||||
#define read_lock_irqsave(lock, flags) \
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
|
||||
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
|
||||
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
|
||||
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
|
||||
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
|
||||
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
|
||||
|
@ -209,6 +210,13 @@ static inline void __raw_write_lock(rwlock_t *lock)
|
|||
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
|
||||
}
|
||||
|
||||
static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
|
||||
{
|
||||
preempt_disable();
|
||||
rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
static inline void __raw_write_unlock(rwlock_t *lock)
|
||||
|
|
|
@ -28,6 +28,7 @@ extern void rt_read_lock(rwlock_t *rwlock);
|
|||
extern int rt_read_trylock(rwlock_t *rwlock);
|
||||
extern void rt_read_unlock(rwlock_t *rwlock);
|
||||
extern void rt_write_lock(rwlock_t *rwlock);
|
||||
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
|
||||
extern int rt_write_trylock(rwlock_t *rwlock);
|
||||
extern void rt_write_unlock(rwlock_t *rwlock);
|
||||
|
||||
|
@ -83,6 +84,15 @@ static __always_inline void write_lock(rwlock_t *rwlock)
|
|||
rt_write_lock(rwlock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
|
||||
{
|
||||
rt_write_lock_nested(rwlock, subclass);
|
||||
}
|
||||
#else
|
||||
#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
|
||||
#endif
|
||||
|
||||
static __always_inline void write_lock_bh(rwlock_t *rwlock)
|
||||
{
|
||||
local_bh_disable();
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
|
||||
#define _raw_read_lock(lock) __LOCK(lock)
|
||||
#define _raw_write_lock(lock) __LOCK(lock)
|
||||
#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
|
||||
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
|
||||
#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
|
||||
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
|
||||
|
|
|
@ -300,6 +300,16 @@ void __lockfunc _raw_write_lock(rwlock_t *lock)
|
|||
__raw_write_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock);
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock)))
|
||||
#endif
|
||||
|
||||
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
|
||||
{
|
||||
__raw_write_lock_nested(lock, subclass);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_nested);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
|
||||
|
|
|
@ -239,6 +239,18 @@ void __sched rt_write_lock(rwlock_t *rwlock)
|
|||
}
|
||||
EXPORT_SYMBOL(rt_write_lock);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
|
||||
{
|
||||
rtlock_might_resched();
|
||||
rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
|
||||
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
|
||||
rcu_read_lock();
|
||||
migrate_disable();
|
||||
}
|
||||
EXPORT_SYMBOL(rt_write_lock_nested);
|
||||
#endif
|
||||
|
||||
void __sched rt_read_unlock(rwlock_t *rwlock)
|
||||
{
|
||||
rwlock_release(&rwlock->dep_map, _RET_IP_);
|
||||
|
|
Loading…
Reference in New Issue