1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 * Initialize an rwsem:
14 void __init_rwsem(struct rw_semaphore *sem, const char *name,
15 struct lock_class_key *key)
17 #ifdef CONFIG_DEBUG_LOCK_ALLOC
19 * Make sure we are not reinitializing a held semaphore:
21 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
22 lockdep_init_map(&sem->dep_map, name, key, 0);
24 sem->count = RWSEM_UNLOCKED_VALUE;
25 spin_lock_init(&sem->wait_lock);
26 INIT_LIST_HEAD(&sem->wait_list);
29 EXPORT_SYMBOL(__init_rwsem);
32 struct list_head list;
33 struct task_struct *task;
35 #define RWSEM_WAITING_FOR_READ 0x00000001
36 #define RWSEM_WAITING_FOR_WRITE 0x00000002
39 /* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
40 * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
41 * since the rwsem value was observed.
43 #define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
44 #define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
45 #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
48 * handle the lock release when processes blocked on it that can now run
49 * - if we come here from up_xxxx(), then:
50 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
51 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
52 * - there must be someone on the queue
53 * - the spinlock must be held by the caller
54 * - woken process blocks are discarded from the list after having task zeroed
55 * - writers are only woken if downgrading is false
57 static struct rw_semaphore *
58 __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
60 struct rwsem_waiter *waiter;
61 struct task_struct *tsk;
62 struct list_head *next;
63 signed long oldcount, woken, loop, adjustment;
65 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
66 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
69 if (wake_type == RWSEM_WAKE_READ_OWNED)
72 /* There's a writer at the front of the queue - try to grant it the
73 * write lock. However, we only wake this writer if we can transition
74 * the active part of the count from 0 -> 1
76 adjustment = RWSEM_ACTIVE_WRITE_BIAS;
77 if (waiter->list.next == &sem->wait_list)
78 adjustment -= RWSEM_WAITING_BIAS;
81 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
82 if (oldcount & RWSEM_ACTIVE_MASK)
83 /* Someone grabbed the sem already */
86 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
87 * It is an allocated on the waiter's stack and may become invalid at
88 * any time after that point (due to a wakeup from another source).
90 list_del(&waiter->list);
99 /* If we come here from up_xxxx(), another thread might have reached
100 * rwsem_down_failed_common() before we acquired the spinlock and
101 * woken up a waiter, making it now active. We prefer to check for
102 * this first in order to not spend too much time with the spinlock
103 * held if we're not going to be able to wake up readers in the end.
105 * Note that we do not need to update the rwsem count: any writer
106 * trying to acquire rwsem will run rwsem_down_write_failed() due
107 * to the waiting threads and block trying to acquire the spinlock.
109 * We use a dummy atomic update in order to acquire the cache line
110 * exclusively since we expect to succeed and run the final rwsem
111 * count adjustment pretty soon.
113 if (wake_type == RWSEM_WAKE_ANY &&
114 (rwsem_atomic_update(0, sem) & RWSEM_ACTIVE_MASK))
115 /* Someone grabbed the sem already */
118 /* Grant an infinite number of read locks to the readers at the front
119 * of the queue. Note we increment the 'active part' of the count by
120 * the number of readers before waking any processes up.
126 if (waiter->list.next == &sem->wait_list)
129 waiter = list_entry(waiter->list.next,
130 struct rwsem_waiter, list);
132 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
134 adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
135 if (waiter->flags & RWSEM_WAITING_FOR_READ)
136 /* hit end of list above */
137 adjustment -= RWSEM_WAITING_BIAS;
139 rwsem_atomic_add(adjustment, sem);
141 next = sem->wait_list.next;
142 for (loop = woken; loop > 0; loop--) {
143 waiter = list_entry(next, struct rwsem_waiter, list);
144 next = waiter->list.next;
148 wake_up_process(tsk);
149 put_task_struct(tsk);
152 sem->wait_list.next = next;
153 next->prev = &sem->wait_list;
158 /* undo the change to the active count, but check for a transition
161 if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
163 goto try_again_write;
167 * wait for a lock to be granted
169 static struct rw_semaphore __sched *
170 rwsem_down_failed_common(struct rw_semaphore *sem,
171 struct rwsem_waiter *waiter, signed long adjustment)
173 struct task_struct *tsk = current;
176 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
178 /* set up my own style of waitqueue */
179 spin_lock_irq(&sem->wait_lock);
181 get_task_struct(tsk);
183 if (list_empty(&sem->wait_list))
184 adjustment += RWSEM_WAITING_BIAS;
185 list_add_tail(&waiter->list, &sem->wait_list);
187 /* we're now waiting on the lock, but no longer actively locking */
188 count = rwsem_atomic_update(adjustment, sem);
190 /* if there are no active locks, wake the front queued process(es) up */
191 if (!(count & RWSEM_ACTIVE_MASK))
192 sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
194 spin_unlock_irq(&sem->wait_lock);
196 /* wait to be given the lock */
201 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
204 tsk->state = TASK_RUNNING;
210 * wait for the read lock to be granted
212 asmregparm struct rw_semaphore __sched *
213 rwsem_down_read_failed(struct rw_semaphore *sem)
215 struct rwsem_waiter waiter;
217 waiter.flags = RWSEM_WAITING_FOR_READ;
218 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_READ_BIAS);
223 * wait for the write lock to be granted
225 asmregparm struct rw_semaphore __sched *
226 rwsem_down_write_failed(struct rw_semaphore *sem)
228 struct rwsem_waiter waiter;
230 waiter.flags = RWSEM_WAITING_FOR_WRITE;
231 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_WRITE_BIAS);
237 * handle waking up a waiter on the semaphore
238 * - up_read/up_write has decremented the active part of count if we come here
240 asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
244 spin_lock_irqsave(&sem->wait_lock, flags);
246 /* do nothing if list empty */
247 if (!list_empty(&sem->wait_list))
248 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
250 spin_unlock_irqrestore(&sem->wait_lock, flags);
256 * downgrade a write lock into a read lock
257 * - caller incremented waiting part of count and discovered it still negative
258 * - just wake up any readers at the front of the queue
260 asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
264 spin_lock_irqsave(&sem->wait_lock, flags);
266 /* do nothing if list empty */
267 if (!list_empty(&sem->wait_list))
268 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
270 spin_unlock_irqrestore(&sem->wait_lock, flags);
275 EXPORT_SYMBOL(rwsem_down_read_failed);
276 EXPORT_SYMBOL(rwsem_down_write_failed);
277 EXPORT_SYMBOL(rwsem_wake);
278 EXPORT_SYMBOL(rwsem_downgrade_wake);