1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id;
21 EXPORT_SYMBOL(fscache_op_debug_id);
24 * fscache_enqueue_operation - Enqueue an operation for processing
25 * @op: The operation to enqueue
27 * Enqueue an operation for processing by the FS-Cache thread pool.
29 * This will get its own ref on the object.
31 void fscache_enqueue_operation(struct fscache_operation *op)
33 _enter("{OBJ%x OP%x,%u}",
34 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
36 ASSERT(list_empty(&op->pend_link));
37 ASSERT(op->processor != NULL);
38 ASSERT(fscache_object_is_available(op->object));
39 ASSERTCMP(atomic_read(&op->usage), >, 0);
40 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
42 fscache_stat(&fscache_n_op_enqueue);
43 switch (op->flags & FSCACHE_OP_TYPE) {
44 case FSCACHE_OP_ASYNC:
45 _debug("queue async");
46 atomic_inc(&op->usage);
47 if (!queue_work(fscache_op_wq, &op->work))
48 fscache_put_operation(op);
50 case FSCACHE_OP_MYTHREAD:
51 _debug("queue for caller's attention");
54 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
60 EXPORT_SYMBOL(fscache_enqueue_operation);
65 static void fscache_run_op(struct fscache_object *object,
66 struct fscache_operation *op)
68 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
70 op->state = FSCACHE_OP_ST_IN_PROGRESS;
71 object->n_in_progress++;
72 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
73 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
75 fscache_enqueue_operation(op);
76 fscache_stat(&fscache_n_op_run);
80 * submit an exclusive operation for an object
81 * - other ops are excluded from running simultaneously with this one
82 * - this gets any extra refs it needs on an op
84 int fscache_submit_exclusive_op(struct fscache_object *object,
85 struct fscache_operation *op)
89 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
91 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
92 ASSERTCMP(atomic_read(&op->usage), >, 0);
94 spin_lock(&object->lock);
95 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
96 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
97 ASSERT(list_empty(&op->pend_link));
99 op->state = FSCACHE_OP_ST_PENDING;
100 if (fscache_object_is_active(object)) {
103 object->n_exclusive++; /* reads and writes must wait */
105 if (object->n_in_progress > 0) {
106 atomic_inc(&op->usage);
107 list_add_tail(&op->pend_link, &object->pending_ops);
108 fscache_stat(&fscache_n_op_pend);
109 } else if (!list_empty(&object->pending_ops)) {
110 atomic_inc(&op->usage);
111 list_add_tail(&op->pend_link, &object->pending_ops);
112 fscache_stat(&fscache_n_op_pend);
113 fscache_start_operations(object);
115 ASSERTCMP(object->n_in_progress, ==, 0);
116 fscache_run_op(object, op);
119 /* need to issue a new write op after this */
120 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
122 } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
125 object->n_exclusive++; /* reads and writes must wait */
126 atomic_inc(&op->usage);
127 list_add_tail(&op->pend_link, &object->pending_ops);
128 fscache_stat(&fscache_n_op_pend);
131 /* If we're in any other state, there must have been an I/O
132 * error of some nature.
134 ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags));
138 spin_unlock(&object->lock);
143 * report an unexpected submission
145 static void fscache_report_unexpected_submission(struct fscache_object *object,
146 struct fscache_operation *op,
147 const struct fscache_state *ostate)
149 static bool once_only;
150 struct fscache_operation *p;
157 kdebug("unexpected submission OP%x [OBJ%x %s]",
158 op->debug_id, object->debug_id, object->state->name);
159 kdebug("objstate=%s [%s]", object->state->name, ostate->name);
160 kdebug("objflags=%lx", object->flags);
161 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
162 kdebug("ops=%u inp=%u exc=%u",
163 object->n_ops, object->n_in_progress, object->n_exclusive);
165 if (!list_empty(&object->pending_ops)) {
167 list_for_each_entry(p, &object->pending_ops, pend_link) {
168 ASSERTCMP(p->object, ==, object);
169 kdebug("%p %p", op->processor, op->release);
180 * submit an operation for an object
181 * - objects may be submitted only in the following states:
182 * - during object creation (write ops may be submitted)
183 * - whilst the object is active
184 * - after an I/O error incurred in one of the two above states (op rejected)
185 * - this gets any extra refs it needs on an op
187 int fscache_submit_op(struct fscache_object *object,
188 struct fscache_operation *op)
190 const struct fscache_state *ostate;
193 _enter("{OBJ%x OP%x},{%u}",
194 object->debug_id, op->debug_id, atomic_read(&op->usage));
196 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
197 ASSERTCMP(atomic_read(&op->usage), >, 0);
199 spin_lock(&object->lock);
200 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
201 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
202 ASSERT(list_empty(&op->pend_link));
204 ostate = object->state;
207 op->state = FSCACHE_OP_ST_PENDING;
208 if (fscache_object_is_active(object)) {
212 if (object->n_exclusive > 0) {
213 atomic_inc(&op->usage);
214 list_add_tail(&op->pend_link, &object->pending_ops);
215 fscache_stat(&fscache_n_op_pend);
216 } else if (!list_empty(&object->pending_ops)) {
217 atomic_inc(&op->usage);
218 list_add_tail(&op->pend_link, &object->pending_ops);
219 fscache_stat(&fscache_n_op_pend);
220 fscache_start_operations(object);
222 ASSERTCMP(object->n_exclusive, ==, 0);
223 fscache_run_op(object, op);
226 } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
229 atomic_inc(&op->usage);
230 list_add_tail(&op->pend_link, &object->pending_ops);
231 fscache_stat(&fscache_n_op_pend);
233 } else if (fscache_object_is_dying(object)) {
234 fscache_stat(&fscache_n_op_rejected);
235 op->state = FSCACHE_OP_ST_CANCELLED;
237 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
238 fscache_report_unexpected_submission(object, op, ostate);
239 ASSERT(!fscache_object_is_active(object));
240 op->state = FSCACHE_OP_ST_CANCELLED;
243 op->state = FSCACHE_OP_ST_CANCELLED;
247 spin_unlock(&object->lock);
252 * queue an object for withdrawal on error, aborting all following asynchronous
255 void fscache_abort_object(struct fscache_object *object)
257 _enter("{OBJ%x}", object->debug_id);
259 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
263 * jump start the operation processing on an object
265 void fscache_start_operations(struct fscache_object *object)
267 struct fscache_operation *op;
270 ASSERT(spin_is_locked(&object->lock));
272 while (!list_empty(&object->pending_ops) && !stop) {
273 op = list_entry(object->pending_ops.next,
274 struct fscache_operation, pend_link);
276 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
277 if (object->n_in_progress > 0)
281 list_del_init(&op->pend_link);
282 fscache_run_op(object, op);
284 /* the pending queue was holding a ref on the object */
285 fscache_put_operation(op);
288 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
290 _debug("woke %d ops on OBJ%x",
291 object->n_in_progress, object->debug_id);
295 * cancel an operation that's pending on an object
297 int fscache_cancel_op(struct fscache_operation *op,
298 void (*do_cancel)(struct fscache_operation *))
300 struct fscache_object *object = op->object;
303 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
305 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
306 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
307 ASSERTCMP(atomic_read(&op->usage), >, 0);
309 spin_lock(&object->lock);
312 if (op->state == FSCACHE_OP_ST_PENDING) {
313 ASSERT(!list_empty(&op->pend_link));
314 fscache_stat(&fscache_n_op_cancelled);
315 list_del_init(&op->pend_link);
318 op->state = FSCACHE_OP_ST_CANCELLED;
319 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
320 object->n_exclusive--;
321 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
322 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
323 fscache_put_operation(op);
327 spin_unlock(&object->lock);
328 _leave(" = %d", ret);
333 * Cancel all pending operations on an object
335 void fscache_cancel_all_ops(struct fscache_object *object)
337 struct fscache_operation *op;
339 _enter("OBJ%x", object->debug_id);
341 spin_lock(&object->lock);
343 while (!list_empty(&object->pending_ops)) {
344 op = list_entry(object->pending_ops.next,
345 struct fscache_operation, pend_link);
346 fscache_stat(&fscache_n_op_cancelled);
347 list_del_init(&op->pend_link);
349 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
350 op->state = FSCACHE_OP_ST_CANCELLED;
352 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
353 object->n_exclusive--;
354 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
355 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
356 fscache_put_operation(op);
357 cond_resched_lock(&object->lock);
360 spin_unlock(&object->lock);
365 * Record the completion or cancellation of an in-progress operation.
367 void fscache_op_complete(struct fscache_operation *op, bool cancelled)
369 struct fscache_object *object = op->object;
371 _enter("OBJ%x", object->debug_id);
373 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
374 ASSERTCMP(object->n_in_progress, >, 0);
375 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
376 object->n_exclusive, >, 0);
377 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
378 object->n_in_progress, ==, 1);
380 spin_lock(&object->lock);
382 op->state = cancelled ?
383 FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE;
385 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
386 object->n_exclusive--;
387 object->n_in_progress--;
388 if (object->n_in_progress == 0)
389 fscache_start_operations(object);
391 spin_unlock(&object->lock);
394 EXPORT_SYMBOL(fscache_op_complete);
397 * release an operation
398 * - queues pending ops if this is the last in-progress op
400 void fscache_put_operation(struct fscache_operation *op)
402 struct fscache_object *object;
403 struct fscache_cache *cache;
405 _enter("{OBJ%x OP%x,%d}",
406 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
408 ASSERTCMP(atomic_read(&op->usage), >, 0);
410 if (!atomic_dec_and_test(&op->usage))
414 ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
415 op->state, ==, FSCACHE_OP_ST_CANCELLED);
416 op->state = FSCACHE_OP_ST_DEAD;
418 fscache_stat(&fscache_n_op_release);
427 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
428 if (atomic_dec_and_test(&object->n_reads)) {
429 clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
430 &object->cookie->flags);
431 wake_up_bit(&object->cookie->flags,
432 FSCACHE_COOKIE_WAITING_ON_READS);
436 /* now... we may get called with the object spinlock held, so we
437 * complete the cleanup here only if we can immediately acquire the
438 * lock, and defer it otherwise */
439 if (!spin_trylock(&object->lock)) {
441 fscache_stat(&fscache_n_op_deferred_release);
443 cache = object->cache;
444 spin_lock(&cache->op_gc_list_lock);
445 list_add_tail(&op->pend_link, &cache->op_gc_list);
446 spin_unlock(&cache->op_gc_list_lock);
447 schedule_work(&cache->op_gc);
452 ASSERTCMP(object->n_ops, >, 0);
454 if (object->n_ops == 0)
455 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
457 spin_unlock(&object->lock);
462 EXPORT_SYMBOL(fscache_put_operation);
465 * garbage collect operations that have had their release deferred
467 void fscache_operation_gc(struct work_struct *work)
469 struct fscache_operation *op;
470 struct fscache_object *object;
471 struct fscache_cache *cache =
472 container_of(work, struct fscache_cache, op_gc);
478 spin_lock(&cache->op_gc_list_lock);
479 if (list_empty(&cache->op_gc_list)) {
480 spin_unlock(&cache->op_gc_list_lock);
484 op = list_entry(cache->op_gc_list.next,
485 struct fscache_operation, pend_link);
486 list_del(&op->pend_link);
487 spin_unlock(&cache->op_gc_list_lock);
490 spin_lock(&object->lock);
492 _debug("GC DEFERRED REL OBJ%x OP%x",
493 object->debug_id, op->debug_id);
494 fscache_stat(&fscache_n_op_gc);
496 ASSERTCMP(atomic_read(&op->usage), ==, 0);
497 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
499 ASSERTCMP(object->n_ops, >, 0);
501 if (object->n_ops == 0)
502 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
504 spin_unlock(&object->lock);
507 } while (count++ < 20);
509 if (!list_empty(&cache->op_gc_list))
510 schedule_work(&cache->op_gc);
516 * execute an operation using fs_op_wq to provide processing context -
517 * the caller holds a ref to this object, so we don't need to hold one
519 void fscache_op_work_func(struct work_struct *work)
521 struct fscache_operation *op =
522 container_of(work, struct fscache_operation, work);
525 _enter("{OBJ%x OP%x,%d}",
526 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
528 ASSERT(op->processor != NULL);
531 fscache_hist(fscache_ops_histogram, start);
532 fscache_put_operation(op);