From: Andi Kleen Date: Sat, 8 Feb 2014 07:52:03 +0000 (+0100) Subject: asmlinkage, mutex: Mark __visible X-Git-Tag: firefly_0821_release~176^2~4223^2~18 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=22d9fd3411c693ccae5f5c2280fb1f9bb106ad4f;p=firefly-linux-kernel-4.4.55.git asmlinkage, mutex: Mark __visible Various kernel/mutex.c functions can be called from inline assembler, so they should be all global and __visible. Cc: Ingo Molnar Signed-off-by: Andi Kleen Link: http://lkml.kernel.org/r/1391845930-28580-7-git-send-email-ak@linux.intel.com Signed-off-by: H. Peter Anvin --- diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 4dd6e4c219de..adbc0d0f314b 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -67,8 +67,7 @@ EXPORT_SYMBOL(__mutex_init); * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ -static __used noinline void __sched -__mutex_lock_slowpath(atomic_t *lock_count); +__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); /** * mutex_lock - acquire the mutex @@ -225,7 +224,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) } #endif -static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); +__visible __used noinline +void __sched __mutex_unlock_slowpath(atomic_t *lock_count); /** * mutex_unlock - release the mutex @@ -746,7 +746,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) /* * Release the lock, slowpath: */ -static __used noinline void +__visible void __mutex_unlock_slowpath(atomic_t *lock_count) { __mutex_unlock_common_slowpath(lock_count, 1); @@ -803,7 +803,7 @@ int __sched mutex_lock_killable(struct mutex *lock) } EXPORT_SYMBOL(mutex_lock_killable); -static __used noinline void __sched +__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count);