FS-Cache: Handle a new operation submitted against a killed object
[firefly-linux-kernel-4.4.55.git] / fs / fscache / object.c
1 /* FS-Cache object state machine handler
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  *
11  * See Documentation/filesystems/caching/object.txt for a description of the
12  * object state machine and the in-kernel representations.
13  */
14
15 #define FSCACHE_DEBUG_LEVEL COOKIE
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/prefetch.h>
19 #include "internal.h"
20
21 static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
22 static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
23 static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
24 static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
25 static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
26 static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
27 static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
28 static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
29 static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
30 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
31 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
32 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
33
34 #define __STATE_NAME(n) fscache_osm_##n
35 #define STATE(n) (&__STATE_NAME(n))
36
37 /*
38  * Define a work state.  Work states are execution states.  No event processing
39  * is performed by them.  The function attached to a work state returns a
40  * pointer indicating the next state to which the state machine should
41  * transition.  Returning NO_TRANSIT repeats the current state, but goes back
42  * to the scheduler first.
43  */
44 #define WORK_STATE(n, sn, f) \
45         const struct fscache_state __STATE_NAME(n) = {                  \
46                 .name = #n,                                             \
47                 .short_name = sn,                                       \
48                 .work = f                                               \
49         }
50
51 /*
52  * Returns from work states.
53  */
54 #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
55
56 #define NO_TRANSIT ((struct fscache_state *)NULL)
57
58 /*
59  * Define a wait state.  Wait states are event processing states.  No execution
60  * is performed by them.  Wait states are just tables of "if event X occurs,
61  * clear it and transition to state Y".  The dispatcher returns to the
62  * scheduler if none of the events in which the wait state has an interest are
63  * currently pending.
64  */
65 #define WAIT_STATE(n, sn, ...) \
66         const struct fscache_state __STATE_NAME(n) = {                  \
67                 .name = #n,                                             \
68                 .short_name = sn,                                       \
69                 .work = NULL,                                           \
70                 .transitions = { __VA_ARGS__, { 0, NULL } }             \
71         }
72
73 #define TRANSIT_TO(state, emask) \
74         { .events = (emask), .transit_to = STATE(state) }
75
76 /*
77  * The object state machine.
78  */
79 static WORK_STATE(INIT_OBJECT,          "INIT", fscache_initialise_object);
80 static WORK_STATE(PARENT_READY,         "PRDY", fscache_parent_ready);
81 static WORK_STATE(ABORT_INIT,           "ABRT", fscache_abort_initialisation);
82 static WORK_STATE(LOOK_UP_OBJECT,       "LOOK", fscache_look_up_object);
83 static WORK_STATE(CREATE_OBJECT,        "CRTO", fscache_look_up_object);
84 static WORK_STATE(OBJECT_AVAILABLE,     "AVBL", fscache_object_available);
85 static WORK_STATE(JUMPSTART_DEPS,       "JUMP", fscache_jumpstart_dependents);
86
87 static WORK_STATE(INVALIDATE_OBJECT,    "INVL", fscache_invalidate_object);
88 static WORK_STATE(UPDATE_OBJECT,        "UPDT", fscache_update_object);
89
90 static WORK_STATE(LOOKUP_FAILURE,       "LCFL", fscache_lookup_failure);
91 static WORK_STATE(KILL_OBJECT,          "KILL", fscache_kill_object);
92 static WORK_STATE(KILL_DEPENDENTS,      "KDEP", fscache_kill_dependents);
93 static WORK_STATE(DROP_OBJECT,          "DROP", fscache_drop_object);
94 static WORK_STATE(OBJECT_DEAD,          "DEAD", (void*)2UL);
95
96 static WAIT_STATE(WAIT_FOR_INIT,        "?INI",
97                   TRANSIT_TO(INIT_OBJECT,       1 << FSCACHE_OBJECT_EV_NEW_CHILD));
98
99 static WAIT_STATE(WAIT_FOR_PARENT,      "?PRN",
100                   TRANSIT_TO(PARENT_READY,      1 << FSCACHE_OBJECT_EV_PARENT_READY));
101
102 static WAIT_STATE(WAIT_FOR_CMD,         "?CMD",
103                   TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
104                   TRANSIT_TO(UPDATE_OBJECT,     1 << FSCACHE_OBJECT_EV_UPDATE),
105                   TRANSIT_TO(JUMPSTART_DEPS,    1 << FSCACHE_OBJECT_EV_NEW_CHILD));
106
107 static WAIT_STATE(WAIT_FOR_CLEARANCE,   "?CLR",
108                   TRANSIT_TO(KILL_OBJECT,       1 << FSCACHE_OBJECT_EV_CLEARED));
109
110 /*
111  * Out-of-band event transition tables.  These are for handling unexpected
112  * events, such as an I/O error.  If an OOB event occurs, the state machine
113  * clears and disables the event and forces a transition to the nominated work
114  * state (acurrently executing work states will complete first).
115  *
116  * In such a situation, object->state remembers the state the machine should
117  * have been in/gone to and returning NO_TRANSIT returns to that.
118  */
119 static const struct fscache_transition fscache_osm_init_oob[] = {
120            TRANSIT_TO(ABORT_INIT,
121                       (1 << FSCACHE_OBJECT_EV_ERROR) |
122                       (1 << FSCACHE_OBJECT_EV_KILL)),
123            { 0, NULL }
124 };
125
126 static const struct fscache_transition fscache_osm_lookup_oob[] = {
127            TRANSIT_TO(LOOKUP_FAILURE,
128                       (1 << FSCACHE_OBJECT_EV_ERROR) |
129                       (1 << FSCACHE_OBJECT_EV_KILL)),
130            { 0, NULL }
131 };
132
133 static const struct fscache_transition fscache_osm_run_oob[] = {
134            TRANSIT_TO(KILL_OBJECT,
135                       (1 << FSCACHE_OBJECT_EV_ERROR) |
136                       (1 << FSCACHE_OBJECT_EV_KILL)),
137            { 0, NULL }
138 };
139
140 static int  fscache_get_object(struct fscache_object *);
141 static void fscache_put_object(struct fscache_object *);
142 static bool fscache_enqueue_dependents(struct fscache_object *, int);
143 static void fscache_dequeue_object(struct fscache_object *);
144
145 /*
146  * we need to notify the parent when an op completes that we had outstanding
147  * upon it
148  */
149 static inline void fscache_done_parent_op(struct fscache_object *object)
150 {
151         struct fscache_object *parent = object->parent;
152
153         _enter("OBJ%x {OBJ%x,%x}",
154                object->debug_id, parent->debug_id, parent->n_ops);
155
156         spin_lock_nested(&parent->lock, 1);
157         parent->n_obj_ops--;
158         parent->n_ops--;
159         if (parent->n_ops == 0)
160                 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
161         spin_unlock(&parent->lock);
162 }
163
164 /*
165  * Object state machine dispatcher.
166  */
167 static void fscache_object_sm_dispatcher(struct fscache_object *object)
168 {
169         const struct fscache_transition *t;
170         const struct fscache_state *state, *new_state;
171         unsigned long events, event_mask;
172         int event = -1;
173
174         ASSERT(object != NULL);
175
176         _enter("{OBJ%x,%s,%lx}",
177                object->debug_id, object->state->name, object->events);
178
179         event_mask = object->event_mask;
180 restart:
181         object->event_mask = 0; /* Mask normal event handling */
182         state = object->state;
183 restart_masked:
184         events = object->events;
185
186         /* Handle any out-of-band events (typically an error) */
187         if (events & object->oob_event_mask) {
188                 _debug("{OBJ%x} oob %lx",
189                        object->debug_id, events & object->oob_event_mask);
190                 for (t = object->oob_table; t->events; t++) {
191                         if (events & t->events) {
192                                 state = t->transit_to;
193                                 ASSERT(state->work != NULL);
194                                 event = fls(events & t->events) - 1;
195                                 __clear_bit(event, &object->oob_event_mask);
196                                 clear_bit(event, &object->events);
197                                 goto execute_work_state;
198                         }
199                 }
200         }
201
202         /* Wait states are just transition tables */
203         if (!state->work) {
204                 if (events & event_mask) {
205                         for (t = state->transitions; t->events; t++) {
206                                 if (events & t->events) {
207                                         new_state = t->transit_to;
208                                         event = fls(events & t->events) - 1;
209                                         clear_bit(event, &object->events);
210                                         _debug("{OBJ%x} ev %d: %s -> %s",
211                                                object->debug_id, event,
212                                                state->name, new_state->name);
213                                         object->state = state = new_state;
214                                         goto execute_work_state;
215                                 }
216                         }
217
218                         /* The event mask didn't include all the tabled bits */
219                         BUG();
220                 }
221                 /* Randomly woke up */
222                 goto unmask_events;
223         }
224
225 execute_work_state:
226         _debug("{OBJ%x} exec %s", object->debug_id, state->name);
227
228         new_state = state->work(object, event);
229         event = -1;
230         if (new_state == NO_TRANSIT) {
231                 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
232                 fscache_enqueue_object(object);
233                 event_mask = object->oob_event_mask;
234                 goto unmask_events;
235         }
236
237         _debug("{OBJ%x} %s -> %s",
238                object->debug_id, state->name, new_state->name);
239         object->state = state = new_state;
240
241         if (state->work) {
242                 if (unlikely(state->work == ((void *)2UL))) {
243                         _leave(" [dead]");
244                         return;
245                 }
246                 goto restart_masked;
247         }
248
249         /* Transited to wait state */
250         event_mask = object->oob_event_mask;
251         for (t = state->transitions; t->events; t++)
252                 event_mask |= t->events;
253
254 unmask_events:
255         object->event_mask = event_mask;
256         smp_mb();
257         events = object->events;
258         if (events & event_mask)
259                 goto restart;
260         _leave(" [msk %lx]", event_mask);
261 }
262
263 /*
264  * execute an object
265  */
266 static void fscache_object_work_func(struct work_struct *work)
267 {
268         struct fscache_object *object =
269                 container_of(work, struct fscache_object, work);
270         unsigned long start;
271
272         _enter("{OBJ%x}", object->debug_id);
273
274         start = jiffies;
275         fscache_object_sm_dispatcher(object);
276         fscache_hist(fscache_objs_histogram, start);
277         fscache_put_object(object);
278 }
279
280 /**
281  * fscache_object_init - Initialise a cache object description
282  * @object: Object description
283  * @cookie: Cookie object will be attached to
284  * @cache: Cache in which backing object will be found
285  *
286  * Initialise a cache object description to its basic values.
287  *
288  * See Documentation/filesystems/caching/backend-api.txt for a complete
289  * description.
290  */
291 void fscache_object_init(struct fscache_object *object,
292                          struct fscache_cookie *cookie,
293                          struct fscache_cache *cache)
294 {
295         const struct fscache_transition *t;
296
297         atomic_inc(&cache->object_count);
298
299         object->state = STATE(WAIT_FOR_INIT);
300         object->oob_table = fscache_osm_init_oob;
301         object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
302         spin_lock_init(&object->lock);
303         INIT_LIST_HEAD(&object->cache_link);
304         INIT_HLIST_NODE(&object->cookie_link);
305         INIT_WORK(&object->work, fscache_object_work_func);
306         INIT_LIST_HEAD(&object->dependents);
307         INIT_LIST_HEAD(&object->dep_link);
308         INIT_LIST_HEAD(&object->pending_ops);
309         object->n_children = 0;
310         object->n_ops = object->n_in_progress = object->n_exclusive = 0;
311         object->events = 0;
312         object->store_limit = 0;
313         object->store_limit_l = 0;
314         object->cache = cache;
315         object->cookie = cookie;
316         object->parent = NULL;
317 #ifdef CONFIG_FSCACHE_OBJECT_LIST
318         RB_CLEAR_NODE(&object->objlist_link);
319 #endif
320
321         object->oob_event_mask = 0;
322         for (t = object->oob_table; t->events; t++)
323                 object->oob_event_mask |= t->events;
324         object->event_mask = object->oob_event_mask;
325         for (t = object->state->transitions; t->events; t++)
326                 object->event_mask |= t->events;
327 }
328 EXPORT_SYMBOL(fscache_object_init);
329
330 /*
331  * Abort object initialisation before we start it.
332  */
333 static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
334                                                                 int event)
335 {
336         _enter("{OBJ%x},%d", object->debug_id, event);
337
338         object->oob_event_mask = 0;
339         fscache_dequeue_object(object);
340         return transit_to(KILL_OBJECT);
341 }
342
343 /*
344  * initialise an object
345  * - check the specified object's parent to see if we can make use of it
346  *   immediately to do a creation
347  * - we may need to start the process of creating a parent and we need to wait
348  *   for the parent's lookup and creation to complete if it's not there yet
349  */
350 static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
351                                                              int event)
352 {
353         struct fscache_object *parent;
354         bool success;
355
356         _enter("{OBJ%x},%d", object->debug_id, event);
357
358         ASSERT(list_empty(&object->dep_link));
359
360         parent = object->parent;
361         if (!parent) {
362                 _leave(" [no parent]");
363                 return transit_to(DROP_OBJECT);
364         }
365
366         _debug("parent: %s of:%lx", parent->state->name, parent->flags);
367
368         if (fscache_object_is_dying(parent)) {
369                 _leave(" [bad parent]");
370                 return transit_to(DROP_OBJECT);
371         }
372
373         if (fscache_object_is_available(parent)) {
374                 _leave(" [ready]");
375                 return transit_to(PARENT_READY);
376         }
377
378         _debug("wait");
379
380         spin_lock(&parent->lock);
381         fscache_stat(&fscache_n_cop_grab_object);
382         success = false;
383         if (fscache_object_is_live(parent) &&
384             object->cache->ops->grab_object(object)) {
385                 list_add(&object->dep_link, &parent->dependents);
386                 success = true;
387         }
388         fscache_stat_d(&fscache_n_cop_grab_object);
389         spin_unlock(&parent->lock);
390         if (!success) {
391                 _leave(" [grab failed]");
392                 return transit_to(DROP_OBJECT);
393         }
394
395         /* fscache_acquire_non_index_cookie() uses this
396          * to wake the chain up */
397         fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
398         _leave(" [wait]");
399         return transit_to(WAIT_FOR_PARENT);
400 }
401
402 /*
403  * Once the parent object is ready, we should kick off our lookup op.
404  */
405 static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
406                                                         int event)
407 {
408         struct fscache_object *parent = object->parent;
409
410         _enter("{OBJ%x},%d", object->debug_id, event);
411
412         ASSERT(parent != NULL);
413
414         spin_lock(&parent->lock);
415         parent->n_ops++;
416         parent->n_obj_ops++;
417         object->lookup_jif = jiffies;
418         spin_unlock(&parent->lock);
419
420         _leave("");
421         return transit_to(LOOK_UP_OBJECT);
422 }
423
424 /*
425  * look an object up in the cache from which it was allocated
426  * - we hold an "access lock" on the parent object, so the parent object cannot
427  *   be withdrawn by either party till we've finished
428  */
429 static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
430                                                           int event)
431 {
432         struct fscache_cookie *cookie = object->cookie;
433         struct fscache_object *parent = object->parent;
434         int ret;
435
436         _enter("{OBJ%x},%d", object->debug_id, event);
437
438         object->oob_table = fscache_osm_lookup_oob;
439
440         ASSERT(parent != NULL);
441         ASSERTCMP(parent->n_ops, >, 0);
442         ASSERTCMP(parent->n_obj_ops, >, 0);
443
444         /* make sure the parent is still available */
445         ASSERT(fscache_object_is_available(parent));
446
447         if (fscache_object_is_dying(parent) ||
448             test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
449             !fscache_use_cookie(object)) {
450                 _leave(" [unavailable]");
451                 return transit_to(LOOKUP_FAILURE);
452         }
453
454         _debug("LOOKUP \"%s\" in \"%s\"",
455                cookie->def->name, object->cache->tag->name);
456
457         fscache_stat(&fscache_n_object_lookups);
458         fscache_stat(&fscache_n_cop_lookup_object);
459         ret = object->cache->ops->lookup_object(object);
460         fscache_stat_d(&fscache_n_cop_lookup_object);
461
462         fscache_unuse_cookie(object);
463
464         if (ret == -ETIMEDOUT) {
465                 /* probably stuck behind another object, so move this one to
466                  * the back of the queue */
467                 fscache_stat(&fscache_n_object_lookups_timed_out);
468                 _leave(" [timeout]");
469                 return NO_TRANSIT;
470         }
471
472         if (ret < 0) {
473                 _leave(" [error]");
474                 return transit_to(LOOKUP_FAILURE);
475         }
476
477         _leave(" [ok]");
478         return transit_to(OBJECT_AVAILABLE);
479 }
480
481 /**
482  * fscache_object_lookup_negative - Note negative cookie lookup
483  * @object: Object pointing to cookie to mark
484  *
485  * Note negative lookup, permitting those waiting to read data from an already
486  * existing backing object to continue as there's no data for them to read.
487  */
488 void fscache_object_lookup_negative(struct fscache_object *object)
489 {
490         struct fscache_cookie *cookie = object->cookie;
491
492         _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
493
494         if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
495                 fscache_stat(&fscache_n_object_lookups_negative);
496
497                 /* Allow write requests to begin stacking up and read requests to begin
498                  * returning ENODATA.
499                  */
500                 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
501                 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
502
503                 _debug("wake up lookup %p", &cookie->flags);
504                 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
505                 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
506         }
507         _leave("");
508 }
509 EXPORT_SYMBOL(fscache_object_lookup_negative);
510
511 /**
512  * fscache_obtained_object - Note successful object lookup or creation
513  * @object: Object pointing to cookie to mark
514  *
515  * Note successful lookup and/or creation, permitting those waiting to write
516  * data to a backing object to continue.
517  *
518  * Note that after calling this, an object's cookie may be relinquished by the
519  * netfs, and so must be accessed with object lock held.
520  */
521 void fscache_obtained_object(struct fscache_object *object)
522 {
523         struct fscache_cookie *cookie = object->cookie;
524
525         _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
526
527         /* if we were still looking up, then we must have a positive lookup
528          * result, in which case there may be data available */
529         if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
530                 fscache_stat(&fscache_n_object_lookups_positive);
531
532                 /* We do (presumably) have data */
533                 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
534                 clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
535
536                 /* Allow write requests to begin stacking up and read requests
537                  * to begin shovelling data.
538                  */
539                 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
540                 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
541         } else {
542                 fscache_stat(&fscache_n_object_created);
543         }
544
545         set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
546         _leave("");
547 }
548 EXPORT_SYMBOL(fscache_obtained_object);
549
550 /*
551  * handle an object that has just become available
552  */
553 static const struct fscache_state *fscache_object_available(struct fscache_object *object,
554                                                             int event)
555 {
556         _enter("{OBJ%x},%d", object->debug_id, event);
557
558         object->oob_table = fscache_osm_run_oob;
559
560         spin_lock(&object->lock);
561
562         fscache_done_parent_op(object);
563         if (object->n_in_progress == 0) {
564                 if (object->n_ops > 0) {
565                         ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
566                         fscache_start_operations(object);
567                 } else {
568                         ASSERT(list_empty(&object->pending_ops));
569                 }
570         }
571         spin_unlock(&object->lock);
572
573         fscache_stat(&fscache_n_cop_lookup_complete);
574         object->cache->ops->lookup_complete(object);
575         fscache_stat_d(&fscache_n_cop_lookup_complete);
576
577         fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
578         fscache_stat(&fscache_n_object_avail);
579
580         _leave("");
581         return transit_to(JUMPSTART_DEPS);
582 }
583
584 /*
585  * Wake up this object's dependent objects now that we've become available.
586  */
587 static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
588                                                                 int event)
589 {
590         _enter("{OBJ%x},%d", object->debug_id, event);
591
592         if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
593                 return NO_TRANSIT; /* Not finished; requeue */
594         return transit_to(WAIT_FOR_CMD);
595 }
596
597 /*
598  * Handle lookup or creation failute.
599  */
600 static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
601                                                           int event)
602 {
603         struct fscache_cookie *cookie;
604
605         _enter("{OBJ%x},%d", object->debug_id, event);
606
607         object->oob_event_mask = 0;
608
609         fscache_stat(&fscache_n_cop_lookup_complete);
610         object->cache->ops->lookup_complete(object);
611         fscache_stat_d(&fscache_n_cop_lookup_complete);
612
613         set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
614
615         cookie = object->cookie;
616         set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
617         if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
618                 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
619
620         fscache_done_parent_op(object);
621         return transit_to(KILL_OBJECT);
622 }
623
624 /*
625  * Wait for completion of all active operations on this object and the death of
626  * all child objects of this object.
627  */
628 static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
629                                                        int event)
630 {
631         _enter("{OBJ%x,%d,%d},%d",
632                object->debug_id, object->n_ops, object->n_children, event);
633
634         clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
635         object->oob_event_mask = 0;
636
637         if (list_empty(&object->dependents) &&
638             object->n_ops == 0 &&
639             object->n_children == 0)
640                 return transit_to(DROP_OBJECT);
641
642         if (object->n_in_progress == 0) {
643                 spin_lock(&object->lock);
644                 if (object->n_ops > 0 && object->n_in_progress == 0)
645                         fscache_start_operations(object);
646                 spin_unlock(&object->lock);
647         }
648
649         if (!list_empty(&object->dependents))
650                 return transit_to(KILL_DEPENDENTS);
651
652         return transit_to(WAIT_FOR_CLEARANCE);
653 }
654
655 /*
656  * Kill dependent objects.
657  */
658 static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
659                                                            int event)
660 {
661         _enter("{OBJ%x},%d", object->debug_id, event);
662
663         if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
664                 return NO_TRANSIT; /* Not finished */
665         return transit_to(WAIT_FOR_CLEARANCE);
666 }
667
668 /*
669  * Drop an object's attachments
670  */
671 static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
672                                                        int event)
673 {
674         struct fscache_object *parent = object->parent;
675         struct fscache_cookie *cookie = object->cookie;
676         struct fscache_cache *cache = object->cache;
677         bool awaken = false;
678
679         _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
680
681         ASSERT(cookie != NULL);
682         ASSERT(!hlist_unhashed(&object->cookie_link));
683
684         /* Make sure the cookie no longer points here and that the netfs isn't
685          * waiting for us.
686          */
687         spin_lock(&cookie->lock);
688         hlist_del_init(&object->cookie_link);
689         if (hlist_empty(&cookie->backing_objects) &&
690             test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
691                 awaken = true;
692         spin_unlock(&cookie->lock);
693
694         if (awaken)
695                 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
696
697         /* Prevent a race with our last child, which has to signal EV_CLEARED
698          * before dropping our spinlock.
699          */
700         spin_lock(&object->lock);
701         spin_unlock(&object->lock);
702
703         /* Discard from the cache's collection of objects */
704         spin_lock(&cache->object_list_lock);
705         list_del_init(&object->cache_link);
706         spin_unlock(&cache->object_list_lock);
707
708         fscache_stat(&fscache_n_cop_drop_object);
709         cache->ops->drop_object(object);
710         fscache_stat_d(&fscache_n_cop_drop_object);
711
712         /* The parent object wants to know when all it dependents have gone */
713         if (parent) {
714                 _debug("release parent OBJ%x {%d}",
715                        parent->debug_id, parent->n_children);
716
717                 spin_lock(&parent->lock);
718                 parent->n_children--;
719                 if (parent->n_children == 0)
720                         fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
721                 spin_unlock(&parent->lock);
722                 object->parent = NULL;
723         }
724
725         /* this just shifts the object release to the work processor */
726         fscache_put_object(object);
727         fscache_stat(&fscache_n_object_dead);
728
729         _leave("");
730         return transit_to(OBJECT_DEAD);
731 }
732
733 /*
734  * get a ref on an object
735  */
736 static int fscache_get_object(struct fscache_object *object)
737 {
738         int ret;
739
740         fscache_stat(&fscache_n_cop_grab_object);
741         ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
742         fscache_stat_d(&fscache_n_cop_grab_object);
743         return ret;
744 }
745
746 /*
747  * Discard a ref on an object
748  */
749 static void fscache_put_object(struct fscache_object *object)
750 {
751         fscache_stat(&fscache_n_cop_put_object);
752         object->cache->ops->put_object(object);
753         fscache_stat_d(&fscache_n_cop_put_object);
754 }
755
756 /**
757  * fscache_object_destroy - Note that a cache object is about to be destroyed
758  * @object: The object to be destroyed
759  *
760  * Note the imminent destruction and deallocation of a cache object record.
761  */
762 void fscache_object_destroy(struct fscache_object *object)
763 {
764         fscache_objlist_remove(object);
765
766         /* We can get rid of the cookie now */
767         fscache_cookie_put(object->cookie);
768         object->cookie = NULL;
769 }
770 EXPORT_SYMBOL(fscache_object_destroy);
771
772 /*
773  * enqueue an object for metadata-type processing
774  */
775 void fscache_enqueue_object(struct fscache_object *object)
776 {
777         _enter("{OBJ%x}", object->debug_id);
778
779         if (fscache_get_object(object) >= 0) {
780                 wait_queue_head_t *cong_wq =
781                         &get_cpu_var(fscache_object_cong_wait);
782
783                 if (queue_work(fscache_object_wq, &object->work)) {
784                         if (fscache_object_congested())
785                                 wake_up(cong_wq);
786                 } else
787                         fscache_put_object(object);
788
789                 put_cpu_var(fscache_object_cong_wait);
790         }
791 }
792
793 /**
794  * fscache_object_sleep_till_congested - Sleep until object wq is congested
795  * @timeoutp: Scheduler sleep timeout
796  *
797  * Allow an object handler to sleep until the object workqueue is congested.
798  *
799  * The caller must set up a wake up event before calling this and must have set
800  * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
801  * condition before calling this function as no test is made here.
802  *
803  * %true is returned if the object wq is congested, %false otherwise.
804  */
805 bool fscache_object_sleep_till_congested(signed long *timeoutp)
806 {
807         wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
808         DEFINE_WAIT(wait);
809
810         if (fscache_object_congested())
811                 return true;
812
813         add_wait_queue_exclusive(cong_wq, &wait);
814         if (!fscache_object_congested())
815                 *timeoutp = schedule_timeout(*timeoutp);
816         finish_wait(cong_wq, &wait);
817
818         return fscache_object_congested();
819 }
820 EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
821
822 /*
823  * Enqueue the dependents of an object for metadata-type processing.
824  *
825  * If we don't manage to finish the list before the scheduler wants to run
826  * again then return false immediately.  We return true if the list was
827  * cleared.
828  */
829 static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
830 {
831         struct fscache_object *dep;
832         bool ret = true;
833
834         _enter("{OBJ%x}", object->debug_id);
835
836         if (list_empty(&object->dependents))
837                 return true;
838
839         spin_lock(&object->lock);
840
841         while (!list_empty(&object->dependents)) {
842                 dep = list_entry(object->dependents.next,
843                                  struct fscache_object, dep_link);
844                 list_del_init(&dep->dep_link);
845
846                 fscache_raise_event(dep, event);
847                 fscache_put_object(dep);
848
849                 if (!list_empty(&object->dependents) && need_resched()) {
850                         ret = false;
851                         break;
852                 }
853         }
854
855         spin_unlock(&object->lock);
856         return ret;
857 }
858
859 /*
860  * remove an object from whatever queue it's waiting on
861  */
862 static void fscache_dequeue_object(struct fscache_object *object)
863 {
864         _enter("{OBJ%x}", object->debug_id);
865
866         if (!list_empty(&object->dep_link)) {
867                 spin_lock(&object->parent->lock);
868                 list_del_init(&object->dep_link);
869                 spin_unlock(&object->parent->lock);
870         }
871
872         _leave("");
873 }
874
875 /**
876  * fscache_check_aux - Ask the netfs whether an object on disk is still valid
877  * @object: The object to ask about
878  * @data: The auxiliary data for the object
879  * @datalen: The size of the auxiliary data
880  *
881  * This function consults the netfs about the coherency state of an object.
882  * The caller must be holding a ref on cookie->n_active (held by
883  * fscache_look_up_object() on behalf of the cache backend during object lookup
884  * and creation).
885  */
886 enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
887                                         const void *data, uint16_t datalen)
888 {
889         enum fscache_checkaux result;
890
891         if (!object->cookie->def->check_aux) {
892                 fscache_stat(&fscache_n_checkaux_none);
893                 return FSCACHE_CHECKAUX_OKAY;
894         }
895
896         result = object->cookie->def->check_aux(object->cookie->netfs_data,
897                                                 data, datalen);
898         switch (result) {
899                 /* entry okay as is */
900         case FSCACHE_CHECKAUX_OKAY:
901                 fscache_stat(&fscache_n_checkaux_okay);
902                 break;
903
904                 /* entry requires update */
905         case FSCACHE_CHECKAUX_NEEDS_UPDATE:
906                 fscache_stat(&fscache_n_checkaux_update);
907                 break;
908
909                 /* entry requires deletion */
910         case FSCACHE_CHECKAUX_OBSOLETE:
911                 fscache_stat(&fscache_n_checkaux_obsolete);
912                 break;
913
914         default:
915                 BUG();
916         }
917
918         return result;
919 }
920 EXPORT_SYMBOL(fscache_check_aux);
921
922 /*
923  * Asynchronously invalidate an object.
924  */
925 static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
926                                                               int event)
927 {
928         struct fscache_operation *op;
929         struct fscache_cookie *cookie = object->cookie;
930
931         _enter("{OBJ%x},%d", object->debug_id, event);
932
933         /* We're going to need the cookie.  If the cookie is not available then
934          * retire the object instead.
935          */
936         if (!fscache_use_cookie(object)) {
937                 ASSERT(object->cookie->stores.rnode == NULL);
938                 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
939                 _leave(" [no cookie]");
940                 return transit_to(KILL_OBJECT);
941         }
942
943         /* Reject any new read/write ops and abort any that are pending. */
944         fscache_invalidate_writes(cookie);
945         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
946         fscache_cancel_all_ops(object);
947
948         /* Now we have to wait for in-progress reads and writes */
949         op = kzalloc(sizeof(*op), GFP_KERNEL);
950         if (!op)
951                 goto nomem;
952
953         fscache_operation_init(op, object->cache->ops->invalidate_object, NULL);
954         op->flags = FSCACHE_OP_ASYNC |
955                 (1 << FSCACHE_OP_EXCLUSIVE) |
956                 (1 << FSCACHE_OP_UNUSE_COOKIE);
957
958         spin_lock(&cookie->lock);
959         if (fscache_submit_exclusive_op(object, op) < 0)
960                 goto submit_op_failed;
961         spin_unlock(&cookie->lock);
962         fscache_put_operation(op);
963
964         /* Once we've completed the invalidation, we know there will be no data
965          * stored in the cache and thus we can reinstate the data-check-skip
966          * optimisation.
967          */
968         set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
969
970         /* We can allow read and write requests to come in once again.  They'll
971          * queue up behind our exclusive invalidation operation.
972          */
973         if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
974                 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
975         _leave(" [ok]");
976         return transit_to(UPDATE_OBJECT);
977
978 nomem:
979         clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
980         fscache_unuse_cookie(object);
981         _leave(" [ENOMEM]");
982         return transit_to(KILL_OBJECT);
983
984 submit_op_failed:
985         clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
986         spin_unlock(&cookie->lock);
987         fscache_unuse_cookie(object);
988         kfree(op);
989         _leave(" [EIO]");
990         return transit_to(KILL_OBJECT);
991 }
992
993 static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
994                                                              int event)
995 {
996         const struct fscache_state *s;
997
998         fscache_stat(&fscache_n_invalidates_run);
999         fscache_stat(&fscache_n_cop_invalidate_object);
1000         s = _fscache_invalidate_object(object, event);
1001         fscache_stat_d(&fscache_n_cop_invalidate_object);
1002         return s;
1003 }
1004
1005 /*
1006  * Asynchronously update an object.
1007  */
1008 static const struct fscache_state *fscache_update_object(struct fscache_object *object,
1009                                                          int event)
1010 {
1011         _enter("{OBJ%x},%d", object->debug_id, event);
1012
1013         fscache_stat(&fscache_n_updates_run);
1014         fscache_stat(&fscache_n_cop_update_object);
1015         object->cache->ops->update_object(object);
1016         fscache_stat_d(&fscache_n_cop_update_object);
1017
1018         _leave("");
1019         return transit_to(WAIT_FOR_CMD);
1020 }
1021
1022 /**
1023  * fscache_object_retrying_stale - Note retrying stale object
1024  * @object: The object that will be retried
1025  *
1026  * Note that an object lookup found an on-disk object that was adjudged to be
1027  * stale and has been deleted.  The lookup will be retried.
1028  */
1029 void fscache_object_retrying_stale(struct fscache_object *object)
1030 {
1031         fscache_stat(&fscache_n_cache_no_space_reject);
1032 }
1033 EXPORT_SYMBOL(fscache_object_retrying_stale);
1034
1035 /**
1036  * fscache_object_mark_killed - Note that an object was killed
1037  * @object: The object that was culled
1038  * @why: The reason the object was killed.
1039  *
1040  * Note that an object was killed.  Returns true if the object was
1041  * already marked killed, false if it wasn't.
1042  */
1043 void fscache_object_mark_killed(struct fscache_object *object,
1044                                 enum fscache_why_object_killed why)
1045 {
1046         if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
1047                 pr_err("Error: Object already killed by cache [%s]\n",
1048                        object->cache->identifier);
1049                 return;
1050         }
1051
1052         switch (why) {
1053         case FSCACHE_OBJECT_NO_SPACE:
1054                 fscache_stat(&fscache_n_cache_no_space_reject);
1055                 break;
1056         case FSCACHE_OBJECT_IS_STALE:
1057                 fscache_stat(&fscache_n_cache_stale_objects);
1058                 break;
1059         case FSCACHE_OBJECT_WAS_RETIRED:
1060                 fscache_stat(&fscache_n_cache_retired_objects);
1061                 break;
1062         case FSCACHE_OBJECT_WAS_CULLED:
1063                 fscache_stat(&fscache_n_cache_culled_objects);
1064                 break;
1065         }
1066 }
1067 EXPORT_SYMBOL(fscache_object_mark_killed);