locking/rtmutex: Add mutex variant for RT
Add the necessary defines, helpers and API functions for replacing struct mutex on a PREEMPT_RT enabled kernel with an rtmutex based variant. No functional change when CONFIG_PREEMPT_RT=n Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211305.081517417@linutronix.de
This commit is contained in:
parent
f8635d509d
commit
bb630f9f7a
4 changed files with 187 additions and 16 deletions
|
|
@ -454,3 +454,125 @@ void rt_mutex_debug_task_free(struct task_struct *task)
|
|||
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/* Mutexes */
|
||||
void __mutex_rt_init(struct mutex *mutex, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
|
||||
lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
||||
}
|
||||
EXPORT_SYMBOL(__mutex_rt_init);
|
||||
|
||||
static __always_inline int __mutex_lock_common(struct mutex *lock,
|
||||
unsigned int state,
|
||||
unsigned int subclass,
|
||||
struct lockdep_map *nest_lock,
|
||||
unsigned long ip)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
||||
ret = __rt_mutex_lock(&lock->rtmutex, state);
|
||||
if (ret)
|
||||
mutex_release(&lock->dep_map, ip);
|
||||
else
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
||||
|
||||
void __sched _mutex_lock_nest_lock(struct mutex *lock,
|
||||
struct lockdep_map *nest_lock)
|
||||
{
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||
|
||||
int __sched mutex_lock_interruptible_nested(struct mutex *lock,
|
||||
unsigned int subclass)
|
||||
{
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||
|
||||
int __sched mutex_lock_killable_nested(struct mutex *lock,
|
||||
unsigned int subclass)
|
||||
{
|
||||
return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
||||
|
||||
void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
int token;
|
||||
|
||||
might_sleep();
|
||||
|
||||
token = io_schedule_prepare();
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||
io_schedule_finish(token);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
|
||||
|
||||
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
void __sched mutex_lock(struct mutex *lock)
|
||||
{
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock);
|
||||
|
||||
int __sched mutex_lock_interruptible(struct mutex *lock)
|
||||
{
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock_interruptible);
|
||||
|
||||
int __sched mutex_lock_killable(struct mutex *lock)
|
||||
{
|
||||
return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock_killable);
|
||||
|
||||
void __sched mutex_lock_io(struct mutex *lock)
|
||||
{
|
||||
int token = io_schedule_prepare();
|
||||
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||
io_schedule_finish(token);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock_io);
|
||||
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
int __sched mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
|
||||
return 0;
|
||||
|
||||
ret = __rt_mutex_trylock(&lock->rtmutex);
|
||||
if (ret)
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_trylock);
|
||||
|
||||
void __sched mutex_unlock(struct mutex *lock)
|
||||
{
|
||||
mutex_release(&lock->dep_map, _RET_IP_);
|
||||
__rt_mutex_unlock(&lock->rtmutex);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_unlock);
|
||||
|
||||
#endif /* CONFIG_PREEMPT_RT */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue