Merge tag 'kvm-3.10-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[firefly-linux-kernel-4.4.55.git] / drivers / md / bcache / closure.c
1 /*
2  * Asynchronous refcounty things
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11
12 #include "closure.h"
13
14 void closure_queue(struct closure *cl)
15 {
16         struct workqueue_struct *wq = cl->wq;
17         if (wq) {
18                 INIT_WORK(&cl->work, cl->work.func);
19                 BUG_ON(!queue_work(wq, &cl->work));
20         } else
21                 cl->fn(cl);
22 }
23 EXPORT_SYMBOL_GPL(closure_queue);
24
25 #define CL_FIELD(type, field)                                   \
26         case TYPE_ ## type:                                     \
27         return &container_of(cl, struct type, cl)->field
28
29 static struct closure_waitlist *closure_waitlist(struct closure *cl)
30 {
31         switch (cl->type) {
32                 CL_FIELD(closure_with_waitlist, wait);
33                 CL_FIELD(closure_with_waitlist_and_timer, wait);
34         default:
35                 return NULL;
36         }
37 }
38
39 static struct timer_list *closure_timer(struct closure *cl)
40 {
41         switch (cl->type) {
42                 CL_FIELD(closure_with_timer, timer);
43                 CL_FIELD(closure_with_waitlist_and_timer, timer);
44         default:
45                 return NULL;
46         }
47 }
48
49 static inline void closure_put_after_sub(struct closure *cl, int flags)
50 {
51         int r = flags & CLOSURE_REMAINING_MASK;
52
53         BUG_ON(flags & CLOSURE_GUARD_MASK);
54         BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
55
56         /* Must deliver precisely one wakeup */
57         if (r == 1 && (flags & CLOSURE_SLEEPING))
58                 wake_up_process(cl->task);
59
60         if (!r) {
61                 if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
62                         /* CLOSURE_BLOCKING might be set - clear it */
63                         atomic_set(&cl->remaining,
64                                    CLOSURE_REMAINING_INITIALIZER);
65                         closure_queue(cl);
66                 } else {
67                         struct closure *parent = cl->parent;
68                         struct closure_waitlist *wait = closure_waitlist(cl);
69
70                         closure_debug_destroy(cl);
71
72                         atomic_set(&cl->remaining, -1);
73
74                         if (wait)
75                                 closure_wake_up(wait);
76
77                         if (cl->fn)
78                                 cl->fn(cl);
79
80                         if (parent)
81                                 closure_put(parent);
82                 }
83         }
84 }
85
86 /* For clearing flags with the same atomic op as a put */
87 void closure_sub(struct closure *cl, int v)
88 {
89         closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
90 }
91 EXPORT_SYMBOL_GPL(closure_sub);
92
93 void closure_put(struct closure *cl)
94 {
95         closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
96 }
97 EXPORT_SYMBOL_GPL(closure_put);
98
99 static void set_waiting(struct closure *cl, unsigned long f)
100 {
101 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
102         cl->waiting_on = f;
103 #endif
104 }
105
106 void __closure_wake_up(struct closure_waitlist *wait_list)
107 {
108         struct llist_node *list;
109         struct closure *cl;
110         struct llist_node *reverse = NULL;
111
112         list = llist_del_all(&wait_list->list);
113
114         /* We first reverse the list to preserve FIFO ordering and fairness */
115
116         while (list) {
117                 struct llist_node *t = list;
118                 list = llist_next(list);
119
120                 t->next = reverse;
121                 reverse = t;
122         }
123
124         /* Then do the wakeups */
125
126         while (reverse) {
127                 cl = container_of(reverse, struct closure, list);
128                 reverse = llist_next(reverse);
129
130                 set_waiting(cl, 0);
131                 closure_sub(cl, CLOSURE_WAITING + 1);
132         }
133 }
134 EXPORT_SYMBOL_GPL(__closure_wake_up);
135
136 bool closure_wait(struct closure_waitlist *list, struct closure *cl)
137 {
138         if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
139                 return false;
140
141         set_waiting(cl, _RET_IP_);
142         atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
143         llist_add(&cl->list, &list->list);
144
145         return true;
146 }
147 EXPORT_SYMBOL_GPL(closure_wait);
148
149 /**
150  * closure_sync() - sleep until a closure a closure has nothing left to wait on
151  *
152  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
153  * the last refcount.
154  */
155 void closure_sync(struct closure *cl)
156 {
157         while (1) {
158                 __closure_start_sleep(cl);
159                 closure_set_ret_ip(cl);
160
161                 if ((atomic_read(&cl->remaining) &
162                      CLOSURE_REMAINING_MASK) == 1)
163                         break;
164
165                 schedule();
166         }
167
168         __closure_end_sleep(cl);
169 }
170 EXPORT_SYMBOL_GPL(closure_sync);
171
172 /**
173  * closure_trylock() - try to acquire the closure, without waiting
174  * @cl:         closure to lock
175  *
176  * Returns true if the closure was succesfully locked.
177  */
178 bool closure_trylock(struct closure *cl, struct closure *parent)
179 {
180         if (atomic_cmpxchg(&cl->remaining, -1,
181                            CLOSURE_REMAINING_INITIALIZER) != -1)
182                 return false;
183
184         closure_set_ret_ip(cl);
185
186         smp_mb();
187         cl->parent = parent;
188         if (parent)
189                 closure_get(parent);
190
191         closure_debug_create(cl);
192         return true;
193 }
194 EXPORT_SYMBOL_GPL(closure_trylock);
195
196 void __closure_lock(struct closure *cl, struct closure *parent,
197                     struct closure_waitlist *wait_list)
198 {
199         struct closure wait;
200         closure_init_stack(&wait);
201
202         while (1) {
203                 if (closure_trylock(cl, parent))
204                         return;
205
206                 closure_wait_event_sync(wait_list, &wait,
207                                         atomic_read(&cl->remaining) == -1);
208         }
209 }
210 EXPORT_SYMBOL_GPL(__closure_lock);
211
212 static void closure_delay_timer_fn(unsigned long data)
213 {
214         struct closure *cl = (struct closure *) data;
215         closure_sub(cl, CLOSURE_TIMER + 1);
216 }
217
218 void do_closure_timer_init(struct closure *cl)
219 {
220         struct timer_list *timer = closure_timer(cl);
221
222         init_timer(timer);
223         timer->data     = (unsigned long) cl;
224         timer->function = closure_delay_timer_fn;
225 }
226 EXPORT_SYMBOL_GPL(do_closure_timer_init);
227
228 bool __closure_delay(struct closure *cl, unsigned long delay,
229                      struct timer_list *timer)
230 {
231         if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
232                 return false;
233
234         BUG_ON(timer_pending(timer));
235
236         timer->expires  = jiffies + delay;
237
238         atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
239         add_timer(timer);
240         return true;
241 }
242 EXPORT_SYMBOL_GPL(__closure_delay);
243
244 void __closure_flush(struct closure *cl, struct timer_list *timer)
245 {
246         if (del_timer(timer))
247                 closure_sub(cl, CLOSURE_TIMER + 1);
248 }
249 EXPORT_SYMBOL_GPL(__closure_flush);
250
251 void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
252 {
253         if (del_timer_sync(timer))
254                 closure_sub(cl, CLOSURE_TIMER + 1);
255 }
256 EXPORT_SYMBOL_GPL(__closure_flush_sync);
257
258 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
259
260 static LIST_HEAD(closure_list);
261 static DEFINE_SPINLOCK(closure_list_lock);
262
263 void closure_debug_create(struct closure *cl)
264 {
265         unsigned long flags;
266
267         BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
268         cl->magic = CLOSURE_MAGIC_ALIVE;
269
270         spin_lock_irqsave(&closure_list_lock, flags);
271         list_add(&cl->all, &closure_list);
272         spin_unlock_irqrestore(&closure_list_lock, flags);
273 }
274 EXPORT_SYMBOL_GPL(closure_debug_create);
275
276 void closure_debug_destroy(struct closure *cl)
277 {
278         unsigned long flags;
279
280         BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
281         cl->magic = CLOSURE_MAGIC_DEAD;
282
283         spin_lock_irqsave(&closure_list_lock, flags);
284         list_del(&cl->all);
285         spin_unlock_irqrestore(&closure_list_lock, flags);
286 }
287 EXPORT_SYMBOL_GPL(closure_debug_destroy);
288
289 static struct dentry *debug;
290
291 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
292
293 static int debug_seq_show(struct seq_file *f, void *data)
294 {
295         struct closure *cl;
296         spin_lock_irq(&closure_list_lock);
297
298         list_for_each_entry(cl, &closure_list, all) {
299                 int r = atomic_read(&cl->remaining);
300
301                 seq_printf(f, "%p: %pF -> %pf p %p r %i ",
302                            cl, (void *) cl->ip, cl->fn, cl->parent,
303                            r & CLOSURE_REMAINING_MASK);
304
305                 seq_printf(f, "%s%s%s%s%s%s\n",
306                            test_bit(WORK_STRUCT_PENDING,
307                                     work_data_bits(&cl->work)) ? "Q" : "",
308                            r & CLOSURE_RUNNING  ? "R" : "",
309                            r & CLOSURE_BLOCKING ? "B" : "",
310                            r & CLOSURE_STACK    ? "S" : "",
311                            r & CLOSURE_SLEEPING ? "Sl" : "",
312                            r & CLOSURE_TIMER    ? "T" : "");
313
314                 if (r & CLOSURE_WAITING)
315                         seq_printf(f, " W %pF\n",
316                                    (void *) cl->waiting_on);
317
318                 seq_printf(f, "\n");
319         }
320
321         spin_unlock_irq(&closure_list_lock);
322         return 0;
323 }
324
325 static int debug_seq_open(struct inode *inode, struct file *file)
326 {
327         return single_open(file, debug_seq_show, NULL);
328 }
329
330 static const struct file_operations debug_ops = {
331         .owner          = THIS_MODULE,
332         .open           = debug_seq_open,
333         .read           = seq_read,
334         .release        = single_release
335 };
336
337 void __init closure_debug_init(void)
338 {
339         debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
340 }
341
342 #endif
343
344 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
345 MODULE_LICENSE("GPL");