9dd9f1c4d0d97e088e7e9b99385aa6e4e5a50b38
[firefly-linux-kernel-4.4.55.git] / drivers / md / bcache / btree.c
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  *
4  * Uses a block device as cache for other block devices; optimized for SSDs.
5  * All allocation is done in buckets, which should match the erase block size
6  * of the device.
7  *
8  * Buckets containing cached data are kept on a heap sorted by priority;
9  * bucket priority is increased on cache hit, and periodically all the buckets
10  * on the heap have their priority scaled down. This currently is just used as
11  * an LRU but in the future should allow for more intelligent heuristics.
12  *
13  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14  * counter. Garbage collection is used to remove stale pointers.
15  *
16  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17  * as keys are inserted we only sort the pages that have not yet been written.
18  * When garbage collection is run, we resort the entire node.
19  *
20  * All configuration is done via sysfs; see Documentation/bcache.txt.
21  */
22
23 #include "bcache.h"
24 #include "btree.h"
25 #include "debug.h"
26 #include "extents.h"
27
28 #include <linux/slab.h>
29 #include <linux/bitops.h>
30 #include <linux/freezer.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <trace/events/bcache.h>
37
38 /*
39  * Todo:
40  * register_bcache: Return errors out to userspace correctly
41  *
42  * Writeback: don't undirty key until after a cache flush
43  *
44  * Create an iterator for key pointers
45  *
46  * On btree write error, mark bucket such that it won't be freed from the cache
47  *
48  * Journalling:
49  *   Check for bad keys in replay
50  *   Propagate barriers
51  *   Refcount journal entries in journal_replay
52  *
53  * Garbage collection:
54  *   Finish incremental gc
55  *   Gc should free old UUIDs, data for invalid UUIDs
56  *
57  * Provide a way to list backing device UUIDs we have data cached for, and
58  * probably how long it's been since we've seen them, and a way to invalidate
59  * dirty data for devices that will never be attached again
60  *
61  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62  * that based on that and how much dirty data we have we can keep writeback
63  * from being starved
64  *
65  * Add a tracepoint or somesuch to watch for writeback starvation
66  *
67  * When btree depth > 1 and splitting an interior node, we have to make sure
68  * alloc_bucket() cannot fail. This should be true but is not completely
69  * obvious.
70  *
71  * Plugging?
72  *
73  * If data write is less than hard sector size of ssd, round up offset in open
74  * bucket to the next whole sector
75  *
76  * Superblock needs to be fleshed out for multiple cache devices
77  *
78  * Add a sysfs tunable for the number of writeback IOs in flight
79  *
80  * Add a sysfs tunable for the number of open data buckets
81  *
82  * IO tracking: Can we track when one process is doing io on behalf of another?
83  * IO tracking: Don't use just an average, weigh more recent stuff higher
84  *
85  * Test module load/unload
86  */
87
88 #define MAX_NEED_GC             64
89 #define MAX_SAVE_PRIO           72
90
91 #define PTR_DIRTY_BIT           (((uint64_t) 1 << 36))
92
93 #define PTR_HASH(c, k)                                                  \
94         (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
95
96 #define insert_lock(s, b)       ((b)->level <= (s)->lock)
97
98 /*
99  * These macros are for recursing down the btree - they handle the details of
100  * locking and looking up nodes in the cache for you. They're best treated as
101  * mere syntax when reading code that uses them.
102  *
103  * op->lock determines whether we take a read or a write lock at a given depth.
104  * If you've got a read lock and find that you need a write lock (i.e. you're
105  * going to have to split), set op->lock and return -EINTR; btree_root() will
106  * call you again and you'll have the correct lock.
107  */
108
109 /**
110  * btree - recurse down the btree on a specified key
111  * @fn:         function to call, which will be passed the child node
112  * @key:        key to recurse on
113  * @b:          parent btree node
114  * @op:         pointer to struct btree_op
115  */
116 #define btree(fn, key, b, op, ...)                                      \
117 ({                                                                      \
118         int _r, l = (b)->level - 1;                                     \
119         bool _w = l <= (op)->lock;                                      \
120         struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\
121         if (!IS_ERR(_child)) {                                          \
122                 _child->parent = (b);                                   \
123                 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);       \
124                 rw_unlock(_w, _child);                                  \
125         } else                                                          \
126                 _r = PTR_ERR(_child);                                   \
127         _r;                                                             \
128 })
129
130 /**
131  * btree_root - call a function on the root of the btree
132  * @fn:         function to call, which will be passed the child node
133  * @c:          cache set
134  * @op:         pointer to struct btree_op
135  */
136 #define btree_root(fn, c, op, ...)                                      \
137 ({                                                                      \
138         int _r = -EINTR;                                                \
139         do {                                                            \
140                 struct btree *_b = (c)->root;                           \
141                 bool _w = insert_lock(op, _b);                          \
142                 rw_lock(_w, _b, _b->level);                             \
143                 if (_b == (c)->root &&                                  \
144                     _w == insert_lock(op, _b)) {                        \
145                         _b->parent = NULL;                              \
146                         _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);   \
147                 }                                                       \
148                 rw_unlock(_w, _b);                                      \
149                 bch_cannibalize_unlock(c);                              \
150                 if (_r == -EINTR)                                       \
151                         schedule();                                     \
152         } while (_r == -EINTR);                                         \
153                                                                         \
154         finish_wait(&(c)->btree_cache_wait, &(op)->wait);               \
155         _r;                                                             \
156 })
157
158 static inline struct bset *write_block(struct btree *b)
159 {
160         return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
161 }
162
163 static void bch_btree_init_next(struct btree *b)
164 {
165         /* If not a leaf node, always sort */
166         if (b->level && b->keys.nsets)
167                 bch_btree_sort(&b->keys, &b->c->sort);
168         else
169                 bch_btree_sort_lazy(&b->keys, &b->c->sort);
170
171         if (b->written < btree_blocks(b))
172                 bch_bset_init_next(&b->keys, write_block(b),
173                                    bset_magic(&b->c->sb));
174
175 }
176
177 /* Btree key manipulation */
178
179 void bkey_put(struct cache_set *c, struct bkey *k)
180 {
181         unsigned i;
182
183         for (i = 0; i < KEY_PTRS(k); i++)
184                 if (ptr_available(c, k, i))
185                         atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
186 }
187
188 /* Btree IO */
189
190 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
191 {
192         uint64_t crc = b->key.ptr[0];
193         void *data = (void *) i + 8, *end = bset_bkey_last(i);
194
195         crc = bch_crc64_update(crc, data, end - data);
196         return crc ^ 0xffffffffffffffffULL;
197 }
198
199 void bch_btree_node_read_done(struct btree *b)
200 {
201         const char *err = "bad btree header";
202         struct bset *i = btree_bset_first(b);
203         struct btree_iter *iter;
204
205         iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
206         iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
207         iter->used = 0;
208
209 #ifdef CONFIG_BCACHE_DEBUG
210         iter->b = &b->keys;
211 #endif
212
213         if (!i->seq)
214                 goto err;
215
216         for (;
217              b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
218              i = write_block(b)) {
219                 err = "unsupported bset version";
220                 if (i->version > BCACHE_BSET_VERSION)
221                         goto err;
222
223                 err = "bad btree header";
224                 if (b->written + set_blocks(i, block_bytes(b->c)) >
225                     btree_blocks(b))
226                         goto err;
227
228                 err = "bad magic";
229                 if (i->magic != bset_magic(&b->c->sb))
230                         goto err;
231
232                 err = "bad checksum";
233                 switch (i->version) {
234                 case 0:
235                         if (i->csum != csum_set(i))
236                                 goto err;
237                         break;
238                 case BCACHE_BSET_VERSION:
239                         if (i->csum != btree_csum_set(b, i))
240                                 goto err;
241                         break;
242                 }
243
244                 err = "empty set";
245                 if (i != b->keys.set[0].data && !i->keys)
246                         goto err;
247
248                 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
249
250                 b->written += set_blocks(i, block_bytes(b->c));
251         }
252
253         err = "corrupted btree";
254         for (i = write_block(b);
255              bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
256              i = ((void *) i) + block_bytes(b->c))
257                 if (i->seq == b->keys.set[0].data->seq)
258                         goto err;
259
260         bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
261
262         i = b->keys.set[0].data;
263         err = "short btree key";
264         if (b->keys.set[0].size &&
265             bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
266                 goto err;
267
268         if (b->written < btree_blocks(b))
269                 bch_bset_init_next(&b->keys, write_block(b),
270                                    bset_magic(&b->c->sb));
271 out:
272         mempool_free(iter, b->c->fill_iter);
273         return;
274 err:
275         set_btree_node_io_error(b);
276         bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
277                             err, PTR_BUCKET_NR(b->c, &b->key, 0),
278                             bset_block_offset(b, i), i->keys);
279         goto out;
280 }
281
282 static void btree_node_read_endio(struct bio *bio, int error)
283 {
284         struct closure *cl = bio->bi_private;
285         closure_put(cl);
286 }
287
288 static void bch_btree_node_read(struct btree *b)
289 {
290         uint64_t start_time = local_clock();
291         struct closure cl;
292         struct bio *bio;
293
294         trace_bcache_btree_read(b);
295
296         closure_init_stack(&cl);
297
298         bio = bch_bbio_alloc(b->c);
299         bio->bi_rw      = REQ_META|READ_SYNC;
300         bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
301         bio->bi_end_io  = btree_node_read_endio;
302         bio->bi_private = &cl;
303
304         bch_bio_map(bio, b->keys.set[0].data);
305
306         bch_submit_bbio(bio, b->c, &b->key, 0);
307         closure_sync(&cl);
308
309         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
310                 set_btree_node_io_error(b);
311
312         bch_bbio_free(bio, b->c);
313
314         if (btree_node_io_error(b))
315                 goto err;
316
317         bch_btree_node_read_done(b);
318         bch_time_stats_update(&b->c->btree_read_time, start_time);
319
320         return;
321 err:
322         bch_cache_set_error(b->c, "io error reading bucket %zu",
323                             PTR_BUCKET_NR(b->c, &b->key, 0));
324 }
325
326 static void btree_complete_write(struct btree *b, struct btree_write *w)
327 {
328         if (w->prio_blocked &&
329             !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
330                 wake_up_allocators(b->c);
331
332         if (w->journal) {
333                 atomic_dec_bug(w->journal);
334                 __closure_wake_up(&b->c->journal.wait);
335         }
336
337         w->prio_blocked = 0;
338         w->journal      = NULL;
339 }
340
341 static void btree_node_write_unlock(struct closure *cl)
342 {
343         struct btree *b = container_of(cl, struct btree, io);
344
345         up(&b->io_mutex);
346 }
347
348 static void __btree_node_write_done(struct closure *cl)
349 {
350         struct btree *b = container_of(cl, struct btree, io);
351         struct btree_write *w = btree_prev_write(b);
352
353         bch_bbio_free(b->bio, b->c);
354         b->bio = NULL;
355         btree_complete_write(b, w);
356
357         if (btree_node_dirty(b))
358                 schedule_delayed_work(&b->work, 30 * HZ);
359
360         closure_return_with_destructor(cl, btree_node_write_unlock);
361 }
362
363 static void btree_node_write_done(struct closure *cl)
364 {
365         struct btree *b = container_of(cl, struct btree, io);
366         struct bio_vec *bv;
367         int n;
368
369         bio_for_each_segment_all(bv, b->bio, n)
370                 __free_page(bv->bv_page);
371
372         __btree_node_write_done(cl);
373 }
374
375 static void btree_node_write_endio(struct bio *bio, int error)
376 {
377         struct closure *cl = bio->bi_private;
378         struct btree *b = container_of(cl, struct btree, io);
379
380         if (error)
381                 set_btree_node_io_error(b);
382
383         bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
384         closure_put(cl);
385 }
386
387 static void do_btree_node_write(struct btree *b)
388 {
389         struct closure *cl = &b->io;
390         struct bset *i = btree_bset_last(b);
391         BKEY_PADDED(key) k;
392
393         i->version      = BCACHE_BSET_VERSION;
394         i->csum         = btree_csum_set(b, i);
395
396         BUG_ON(b->bio);
397         b->bio = bch_bbio_alloc(b->c);
398
399         b->bio->bi_end_io       = btree_node_write_endio;
400         b->bio->bi_private      = cl;
401         b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
402         b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
403         bch_bio_map(b->bio, i);
404
405         /*
406          * If we're appending to a leaf node, we don't technically need FUA -
407          * this write just needs to be persisted before the next journal write,
408          * which will be marked FLUSH|FUA.
409          *
410          * Similarly if we're writing a new btree root - the pointer is going to
411          * be in the next journal entry.
412          *
413          * But if we're writing a new btree node (that isn't a root) or
414          * appending to a non leaf btree node, we need either FUA or a flush
415          * when we write the parent with the new pointer. FUA is cheaper than a
416          * flush, and writes appending to leaf nodes aren't blocking anything so
417          * just make all btree node writes FUA to keep things sane.
418          */
419
420         bkey_copy(&k.key, &b->key);
421         SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
422                        bset_sector_offset(&b->keys, i));
423
424         if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
425                 int j;
426                 struct bio_vec *bv;
427                 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
428
429                 bio_for_each_segment_all(bv, b->bio, j)
430                         memcpy(page_address(bv->bv_page),
431                                base + j * PAGE_SIZE, PAGE_SIZE);
432
433                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
434
435                 continue_at(cl, btree_node_write_done, NULL);
436         } else {
437                 b->bio->bi_vcnt = 0;
438                 bch_bio_map(b->bio, i);
439
440                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
441
442                 closure_sync(cl);
443                 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
444         }
445 }
446
447 void __bch_btree_node_write(struct btree *b, struct closure *parent)
448 {
449         struct bset *i = btree_bset_last(b);
450
451         lockdep_assert_held(&b->write_lock);
452
453         trace_bcache_btree_write(b);
454
455         BUG_ON(current->bio_list);
456         BUG_ON(b->written >= btree_blocks(b));
457         BUG_ON(b->written && !i->keys);
458         BUG_ON(btree_bset_first(b)->seq != i->seq);
459         bch_check_keys(&b->keys, "writing");
460
461         cancel_delayed_work(&b->work);
462
463         /* If caller isn't waiting for write, parent refcount is cache set */
464         down(&b->io_mutex);
465         closure_init(&b->io, parent ?: &b->c->cl);
466
467         clear_bit(BTREE_NODE_dirty,      &b->flags);
468         change_bit(BTREE_NODE_write_idx, &b->flags);
469
470         do_btree_node_write(b);
471
472         atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
473                         &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
474
475         b->written += set_blocks(i, block_bytes(b->c));
476 }
477
478 void bch_btree_node_write(struct btree *b, struct closure *parent)
479 {
480         unsigned nsets = b->keys.nsets;
481
482         lockdep_assert_held(&b->lock);
483
484         __bch_btree_node_write(b, parent);
485
486         /*
487          * do verify if there was more than one set initially (i.e. we did a
488          * sort) and we sorted down to a single set:
489          */
490         if (nsets && !b->keys.nsets)
491                 bch_btree_verify(b);
492
493         bch_btree_init_next(b);
494 }
495
496 static void bch_btree_node_write_sync(struct btree *b)
497 {
498         struct closure cl;
499
500         closure_init_stack(&cl);
501
502         mutex_lock(&b->write_lock);
503         bch_btree_node_write(b, &cl);
504         mutex_unlock(&b->write_lock);
505
506         closure_sync(&cl);
507 }
508
509 static void btree_node_write_work(struct work_struct *w)
510 {
511         struct btree *b = container_of(to_delayed_work(w), struct btree, work);
512
513         mutex_lock(&b->write_lock);
514         if (btree_node_dirty(b))
515                 __bch_btree_node_write(b, NULL);
516         mutex_unlock(&b->write_lock);
517 }
518
519 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
520 {
521         struct bset *i = btree_bset_last(b);
522         struct btree_write *w = btree_current_write(b);
523
524         lockdep_assert_held(&b->write_lock);
525
526         BUG_ON(!b->written);
527         BUG_ON(!i->keys);
528
529         if (!btree_node_dirty(b))
530                 schedule_delayed_work(&b->work, 30 * HZ);
531
532         set_btree_node_dirty(b);
533
534         if (journal_ref) {
535                 if (w->journal &&
536                     journal_pin_cmp(b->c, w->journal, journal_ref)) {
537                         atomic_dec_bug(w->journal);
538                         w->journal = NULL;
539                 }
540
541                 if (!w->journal) {
542                         w->journal = journal_ref;
543                         atomic_inc(w->journal);
544                 }
545         }
546
547         /* Force write if set is too big */
548         if (set_bytes(i) > PAGE_SIZE - 48 &&
549             !current->bio_list)
550                 bch_btree_node_write(b, NULL);
551 }
552
553 /*
554  * Btree in memory cache - allocation/freeing
555  * mca -> memory cache
556  */
557
558 #define mca_reserve(c)  (((c->root && c->root->level)           \
559                           ? c->root->level : 1) * 8 + 16)
560 #define mca_can_free(c)                                         \
561         max_t(int, 0, c->btree_cache_used - mca_reserve(c))
562
563 static void mca_data_free(struct btree *b)
564 {
565         BUG_ON(b->io_mutex.count != 1);
566
567         bch_btree_keys_free(&b->keys);
568
569         b->c->btree_cache_used--;
570         list_move(&b->list, &b->c->btree_cache_freed);
571 }
572
573 static void mca_bucket_free(struct btree *b)
574 {
575         BUG_ON(btree_node_dirty(b));
576
577         b->key.ptr[0] = 0;
578         hlist_del_init_rcu(&b->hash);
579         list_move(&b->list, &b->c->btree_cache_freeable);
580 }
581
582 static unsigned btree_order(struct bkey *k)
583 {
584         return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
585 }
586
587 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
588 {
589         if (!bch_btree_keys_alloc(&b->keys,
590                                   max_t(unsigned,
591                                         ilog2(b->c->btree_pages),
592                                         btree_order(k)),
593                                   gfp)) {
594                 b->c->btree_cache_used++;
595                 list_move(&b->list, &b->c->btree_cache);
596         } else {
597                 list_move(&b->list, &b->c->btree_cache_freed);
598         }
599 }
600
601 static struct btree *mca_bucket_alloc(struct cache_set *c,
602                                       struct bkey *k, gfp_t gfp)
603 {
604         struct btree *b = kzalloc(sizeof(struct btree), gfp);
605         if (!b)
606                 return NULL;
607
608         init_rwsem(&b->lock);
609         lockdep_set_novalidate_class(&b->lock);
610         mutex_init(&b->write_lock);
611         lockdep_set_novalidate_class(&b->write_lock);
612         INIT_LIST_HEAD(&b->list);
613         INIT_DELAYED_WORK(&b->work, btree_node_write_work);
614         b->c = c;
615         sema_init(&b->io_mutex, 1);
616
617         mca_data_alloc(b, k, gfp);
618         return b;
619 }
620
621 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
622 {
623         struct closure cl;
624
625         closure_init_stack(&cl);
626         lockdep_assert_held(&b->c->bucket_lock);
627
628         if (!down_write_trylock(&b->lock))
629                 return -ENOMEM;
630
631         BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
632
633         if (b->keys.page_order < min_order)
634                 goto out_unlock;
635
636         if (!flush) {
637                 if (btree_node_dirty(b))
638                         goto out_unlock;
639
640                 if (down_trylock(&b->io_mutex))
641                         goto out_unlock;
642                 up(&b->io_mutex);
643         }
644
645         mutex_lock(&b->write_lock);
646         if (btree_node_dirty(b))
647                 __bch_btree_node_write(b, &cl);
648         mutex_unlock(&b->write_lock);
649
650         closure_sync(&cl);
651
652         /* wait for any in flight btree write */
653         down(&b->io_mutex);
654         up(&b->io_mutex);
655
656         return 0;
657 out_unlock:
658         rw_unlock(true, b);
659         return -ENOMEM;
660 }
661
662 static unsigned long bch_mca_scan(struct shrinker *shrink,
663                                   struct shrink_control *sc)
664 {
665         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
666         struct btree *b, *t;
667         unsigned long i, nr = sc->nr_to_scan;
668         unsigned long freed = 0;
669
670         if (c->shrinker_disabled)
671                 return SHRINK_STOP;
672
673         if (c->btree_cache_alloc_lock)
674                 return SHRINK_STOP;
675
676         /* Return -1 if we can't do anything right now */
677         if (sc->gfp_mask & __GFP_IO)
678                 mutex_lock(&c->bucket_lock);
679         else if (!mutex_trylock(&c->bucket_lock))
680                 return -1;
681
682         /*
683          * It's _really_ critical that we don't free too many btree nodes - we
684          * have to always leave ourselves a reserve. The reserve is how we
685          * guarantee that allocating memory for a new btree node can always
686          * succeed, so that inserting keys into the btree can always succeed and
687          * IO can always make forward progress:
688          */
689         nr /= c->btree_pages;
690         nr = min_t(unsigned long, nr, mca_can_free(c));
691
692         i = 0;
693         list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
694                 if (freed >= nr)
695                         break;
696
697                 if (++i > 3 &&
698                     !mca_reap(b, 0, false)) {
699                         mca_data_free(b);
700                         rw_unlock(true, b);
701                         freed++;
702                 }
703         }
704
705         for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
706                 if (list_empty(&c->btree_cache))
707                         goto out;
708
709                 b = list_first_entry(&c->btree_cache, struct btree, list);
710                 list_rotate_left(&c->btree_cache);
711
712                 if (!b->accessed &&
713                     !mca_reap(b, 0, false)) {
714                         mca_bucket_free(b);
715                         mca_data_free(b);
716                         rw_unlock(true, b);
717                         freed++;
718                 } else
719                         b->accessed = 0;
720         }
721 out:
722         mutex_unlock(&c->bucket_lock);
723         return freed;
724 }
725
726 static unsigned long bch_mca_count(struct shrinker *shrink,
727                                    struct shrink_control *sc)
728 {
729         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
730
731         if (c->shrinker_disabled)
732                 return 0;
733
734         if (c->btree_cache_alloc_lock)
735                 return 0;
736
737         return mca_can_free(c) * c->btree_pages;
738 }
739
740 void bch_btree_cache_free(struct cache_set *c)
741 {
742         struct btree *b;
743         struct closure cl;
744         closure_init_stack(&cl);
745
746         if (c->shrink.list.next)
747                 unregister_shrinker(&c->shrink);
748
749         mutex_lock(&c->bucket_lock);
750
751 #ifdef CONFIG_BCACHE_DEBUG
752         if (c->verify_data)
753                 list_move(&c->verify_data->list, &c->btree_cache);
754
755         free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
756 #endif
757
758         list_splice(&c->btree_cache_freeable,
759                     &c->btree_cache);
760
761         while (!list_empty(&c->btree_cache)) {
762                 b = list_first_entry(&c->btree_cache, struct btree, list);
763
764                 if (btree_node_dirty(b))
765                         btree_complete_write(b, btree_current_write(b));
766                 clear_bit(BTREE_NODE_dirty, &b->flags);
767
768                 mca_data_free(b);
769         }
770
771         while (!list_empty(&c->btree_cache_freed)) {
772                 b = list_first_entry(&c->btree_cache_freed,
773                                      struct btree, list);
774                 list_del(&b->list);
775                 cancel_delayed_work_sync(&b->work);
776                 kfree(b);
777         }
778
779         mutex_unlock(&c->bucket_lock);
780 }
781
782 int bch_btree_cache_alloc(struct cache_set *c)
783 {
784         unsigned i;
785
786         for (i = 0; i < mca_reserve(c); i++)
787                 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
788                         return -ENOMEM;
789
790         list_splice_init(&c->btree_cache,
791                          &c->btree_cache_freeable);
792
793 #ifdef CONFIG_BCACHE_DEBUG
794         mutex_init(&c->verify_lock);
795
796         c->verify_ondisk = (void *)
797                 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
798
799         c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
800
801         if (c->verify_data &&
802             c->verify_data->keys.set->data)
803                 list_del_init(&c->verify_data->list);
804         else
805                 c->verify_data = NULL;
806 #endif
807
808         c->shrink.count_objects = bch_mca_count;
809         c->shrink.scan_objects = bch_mca_scan;
810         c->shrink.seeks = 4;
811         c->shrink.batch = c->btree_pages * 2;
812         register_shrinker(&c->shrink);
813
814         return 0;
815 }
816
817 /* Btree in memory cache - hash table */
818
819 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
820 {
821         return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
822 }
823
824 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
825 {
826         struct btree *b;
827
828         rcu_read_lock();
829         hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
830                 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
831                         goto out;
832         b = NULL;
833 out:
834         rcu_read_unlock();
835         return b;
836 }
837
838 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
839 {
840         struct task_struct *old;
841
842         old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
843         if (old && old != current) {
844                 if (op)
845                         prepare_to_wait(&c->btree_cache_wait, &op->wait,
846                                         TASK_UNINTERRUPTIBLE);
847                 return -EINTR;
848         }
849
850         return 0;
851 }
852
853 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
854                                      struct bkey *k)
855 {
856         struct btree *b;
857
858         trace_bcache_btree_cache_cannibalize(c);
859
860         if (mca_cannibalize_lock(c, op))
861                 return ERR_PTR(-EINTR);
862
863         list_for_each_entry_reverse(b, &c->btree_cache, list)
864                 if (!mca_reap(b, btree_order(k), false))
865                         return b;
866
867         list_for_each_entry_reverse(b, &c->btree_cache, list)
868                 if (!mca_reap(b, btree_order(k), true))
869                         return b;
870
871         WARN(1, "btree cache cannibalize failed\n");
872         return ERR_PTR(-ENOMEM);
873 }
874
875 /*
876  * We can only have one thread cannibalizing other cached btree nodes at a time,
877  * or we'll deadlock. We use an open coded mutex to ensure that, which a
878  * cannibalize_bucket() will take. This means every time we unlock the root of
879  * the btree, we need to release this lock if we have it held.
880  */
881 static void bch_cannibalize_unlock(struct cache_set *c)
882 {
883         if (c->btree_cache_alloc_lock == current) {
884                 c->btree_cache_alloc_lock = NULL;
885                 wake_up(&c->btree_cache_wait);
886         }
887 }
888
889 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
890                                struct bkey *k, int level)
891 {
892         struct btree *b;
893
894         BUG_ON(current->bio_list);
895
896         lockdep_assert_held(&c->bucket_lock);
897
898         if (mca_find(c, k))
899                 return NULL;
900
901         /* btree_free() doesn't free memory; it sticks the node on the end of
902          * the list. Check if there's any freed nodes there:
903          */
904         list_for_each_entry(b, &c->btree_cache_freeable, list)
905                 if (!mca_reap(b, btree_order(k), false))
906                         goto out;
907
908         /* We never free struct btree itself, just the memory that holds the on
909          * disk node. Check the freed list before allocating a new one:
910          */
911         list_for_each_entry(b, &c->btree_cache_freed, list)
912                 if (!mca_reap(b, 0, false)) {
913                         mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
914                         if (!b->keys.set[0].data)
915                                 goto err;
916                         else
917                                 goto out;
918                 }
919
920         b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
921         if (!b)
922                 goto err;
923
924         BUG_ON(!down_write_trylock(&b->lock));
925         if (!b->keys.set->data)
926                 goto err;
927 out:
928         BUG_ON(b->io_mutex.count != 1);
929
930         bkey_copy(&b->key, k);
931         list_move(&b->list, &c->btree_cache);
932         hlist_del_init_rcu(&b->hash);
933         hlist_add_head_rcu(&b->hash, mca_hash(c, k));
934
935         lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
936         b->parent       = (void *) ~0UL;
937         b->flags        = 0;
938         b->written      = 0;
939         b->level        = level;
940
941         if (!b->level)
942                 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
943                                     &b->c->expensive_debug_checks);
944         else
945                 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
946                                     &b->c->expensive_debug_checks);
947
948         return b;
949 err:
950         if (b)
951                 rw_unlock(true, b);
952
953         b = mca_cannibalize(c, op, k);
954         if (!IS_ERR(b))
955                 goto out;
956
957         return b;
958 }
959
960 /**
961  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
962  * in from disk if necessary.
963  *
964  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
965  *
966  * The btree node will have either a read or a write lock held, depending on
967  * level and op->lock.
968  */
969 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
970                                  struct bkey *k, int level, bool write)
971 {
972         int i = 0;
973         struct btree *b;
974
975         BUG_ON(level < 0);
976 retry:
977         b = mca_find(c, k);
978
979         if (!b) {
980                 if (current->bio_list)
981                         return ERR_PTR(-EAGAIN);
982
983                 mutex_lock(&c->bucket_lock);
984                 b = mca_alloc(c, op, k, level);
985                 mutex_unlock(&c->bucket_lock);
986
987                 if (!b)
988                         goto retry;
989                 if (IS_ERR(b))
990                         return b;
991
992                 bch_btree_node_read(b);
993
994                 if (!write)
995                         downgrade_write(&b->lock);
996         } else {
997                 rw_lock(write, b, level);
998                 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
999                         rw_unlock(write, b);
1000                         goto retry;
1001                 }
1002                 BUG_ON(b->level != level);
1003         }
1004
1005         b->accessed = 1;
1006
1007         for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1008                 prefetch(b->keys.set[i].tree);
1009                 prefetch(b->keys.set[i].data);
1010         }
1011
1012         for (; i <= b->keys.nsets; i++)
1013                 prefetch(b->keys.set[i].data);
1014
1015         if (btree_node_io_error(b)) {
1016                 rw_unlock(write, b);
1017                 return ERR_PTR(-EIO);
1018         }
1019
1020         BUG_ON(!b->written);
1021
1022         return b;
1023 }
1024
1025 static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
1026 {
1027         struct btree *b;
1028
1029         mutex_lock(&c->bucket_lock);
1030         b = mca_alloc(c, NULL, k, level);
1031         mutex_unlock(&c->bucket_lock);
1032
1033         if (!IS_ERR_OR_NULL(b)) {
1034                 bch_btree_node_read(b);
1035                 rw_unlock(true, b);
1036         }
1037 }
1038
1039 /* Btree alloc */
1040
1041 static void btree_node_free(struct btree *b)
1042 {
1043         trace_bcache_btree_node_free(b);
1044
1045         BUG_ON(b == b->c->root);
1046
1047         mutex_lock(&b->write_lock);
1048
1049         if (btree_node_dirty(b))
1050                 btree_complete_write(b, btree_current_write(b));
1051         clear_bit(BTREE_NODE_dirty, &b->flags);
1052
1053         mutex_unlock(&b->write_lock);
1054
1055         cancel_delayed_work(&b->work);
1056
1057         mutex_lock(&b->c->bucket_lock);
1058         bch_bucket_free(b->c, &b->key);
1059         mca_bucket_free(b);
1060         mutex_unlock(&b->c->bucket_lock);
1061 }
1062
1063 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1064                                      int level, bool wait)
1065 {
1066         BKEY_PADDED(key) k;
1067         struct btree *b = ERR_PTR(-EAGAIN);
1068
1069         mutex_lock(&c->bucket_lock);
1070 retry:
1071         if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1072                 goto err;
1073
1074         bkey_put(c, &k.key);
1075         SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1076
1077         b = mca_alloc(c, op, &k.key, level);
1078         if (IS_ERR(b))
1079                 goto err_free;
1080
1081         if (!b) {
1082                 cache_bug(c,
1083                         "Tried to allocate bucket that was in btree cache");
1084                 goto retry;
1085         }
1086
1087         b->accessed = 1;
1088         bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1089
1090         mutex_unlock(&c->bucket_lock);
1091
1092         trace_bcache_btree_node_alloc(b);
1093         return b;
1094 err_free:
1095         bch_bucket_free(c, &k.key);
1096 err:
1097         mutex_unlock(&c->bucket_lock);
1098
1099         trace_bcache_btree_node_alloc_fail(b);
1100         return b;
1101 }
1102
1103 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1104                                           struct btree_op *op, int level)
1105 {
1106         return __bch_btree_node_alloc(c, op, level, op != NULL);
1107 }
1108
1109 static struct btree *btree_node_alloc_replacement(struct btree *b,
1110                                                   struct btree_op *op)
1111 {
1112         struct btree *n = bch_btree_node_alloc(b->c, op, b->level);
1113         if (!IS_ERR_OR_NULL(n)) {
1114                 mutex_lock(&n->write_lock);
1115                 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1116                 bkey_copy_key(&n->key, &b->key);
1117                 mutex_unlock(&n->write_lock);
1118         }
1119
1120         return n;
1121 }
1122
1123 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1124 {
1125         unsigned i;
1126
1127         mutex_lock(&b->c->bucket_lock);
1128
1129         atomic_inc(&b->c->prio_blocked);
1130
1131         bkey_copy(k, &b->key);
1132         bkey_copy_key(k, &ZERO_KEY);
1133
1134         for (i = 0; i < KEY_PTRS(k); i++)
1135                 SET_PTR_GEN(k, i,
1136                             bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1137                                         PTR_BUCKET(b->c, &b->key, i)));
1138
1139         mutex_unlock(&b->c->bucket_lock);
1140 }
1141
1142 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1143 {
1144         struct cache_set *c = b->c;
1145         struct cache *ca;
1146         unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1147
1148         mutex_lock(&c->bucket_lock);
1149
1150         for_each_cache(ca, c, i)
1151                 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1152                         if (op)
1153                                 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1154                                                 TASK_UNINTERRUPTIBLE);
1155                         mutex_unlock(&c->bucket_lock);
1156                         return -EINTR;
1157                 }
1158
1159         mutex_unlock(&c->bucket_lock);
1160
1161         return mca_cannibalize_lock(b->c, op);
1162 }
1163
1164 /* Garbage collection */
1165
1166 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1167                                     struct bkey *k)
1168 {
1169         uint8_t stale = 0;
1170         unsigned i;
1171         struct bucket *g;
1172
1173         /*
1174          * ptr_invalid() can't return true for the keys that mark btree nodes as
1175          * freed, but since ptr_bad() returns true we'll never actually use them
1176          * for anything and thus we don't want mark their pointers here
1177          */
1178         if (!bkey_cmp(k, &ZERO_KEY))
1179                 return stale;
1180
1181         for (i = 0; i < KEY_PTRS(k); i++) {
1182                 if (!ptr_available(c, k, i))
1183                         continue;
1184
1185                 g = PTR_BUCKET(c, k, i);
1186
1187                 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1188                         g->last_gc = PTR_GEN(k, i);
1189
1190                 if (ptr_stale(c, k, i)) {
1191                         stale = max(stale, ptr_stale(c, k, i));
1192                         continue;
1193                 }
1194
1195                 cache_bug_on(GC_MARK(g) &&
1196                              (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1197                              c, "inconsistent ptrs: mark = %llu, level = %i",
1198                              GC_MARK(g), level);
1199
1200                 if (level)
1201                         SET_GC_MARK(g, GC_MARK_METADATA);
1202                 else if (KEY_DIRTY(k))
1203                         SET_GC_MARK(g, GC_MARK_DIRTY);
1204                 else if (!GC_MARK(g))
1205                         SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1206
1207                 /* guard against overflow */
1208                 SET_GC_SECTORS_USED(g, min_t(unsigned,
1209                                              GC_SECTORS_USED(g) + KEY_SIZE(k),
1210                                              MAX_GC_SECTORS_USED));
1211
1212                 BUG_ON(!GC_SECTORS_USED(g));
1213         }
1214
1215         return stale;
1216 }
1217
1218 #define btree_mark_key(b, k)    __bch_btree_mark_key(b->c, b->level, k)
1219
1220 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1221 {
1222         unsigned i;
1223
1224         for (i = 0; i < KEY_PTRS(k); i++)
1225                 if (ptr_available(c, k, i) &&
1226                     !ptr_stale(c, k, i)) {
1227                         struct bucket *b = PTR_BUCKET(c, k, i);
1228
1229                         b->gen = PTR_GEN(k, i);
1230
1231                         if (level && bkey_cmp(k, &ZERO_KEY))
1232                                 b->prio = BTREE_PRIO;
1233                         else if (!level && b->prio == BTREE_PRIO)
1234                                 b->prio = INITIAL_PRIO;
1235                 }
1236
1237         __bch_btree_mark_key(c, level, k);
1238 }
1239
1240 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1241 {
1242         uint8_t stale = 0;
1243         unsigned keys = 0, good_keys = 0;
1244         struct bkey *k;
1245         struct btree_iter iter;
1246         struct bset_tree *t;
1247
1248         gc->nodes++;
1249
1250         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1251                 stale = max(stale, btree_mark_key(b, k));
1252                 keys++;
1253
1254                 if (bch_ptr_bad(&b->keys, k))
1255                         continue;
1256
1257                 gc->key_bytes += bkey_u64s(k);
1258                 gc->nkeys++;
1259                 good_keys++;
1260
1261                 gc->data += KEY_SIZE(k);
1262         }
1263
1264         for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1265                 btree_bug_on(t->size &&
1266                              bset_written(&b->keys, t) &&
1267                              bkey_cmp(&b->key, &t->end) < 0,
1268                              b, "found short btree key in gc");
1269
1270         if (b->c->gc_always_rewrite)
1271                 return true;
1272
1273         if (stale > 10)
1274                 return true;
1275
1276         if ((keys - good_keys) * 2 > keys)
1277                 return true;
1278
1279         return false;
1280 }
1281
1282 #define GC_MERGE_NODES  4U
1283
1284 struct gc_merge_info {
1285         struct btree    *b;
1286         unsigned        keys;
1287 };
1288
1289 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1290                                  struct keylist *, atomic_t *, struct bkey *);
1291
1292 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1293                              struct gc_stat *gc, struct gc_merge_info *r)
1294 {
1295         unsigned i, nodes = 0, keys = 0, blocks;
1296         struct btree *new_nodes[GC_MERGE_NODES];
1297         struct keylist keylist;
1298         struct closure cl;
1299         struct bkey *k;
1300
1301         bch_keylist_init(&keylist);
1302
1303         if (btree_check_reserve(b, NULL))
1304                 return 0;
1305
1306         memset(new_nodes, 0, sizeof(new_nodes));
1307         closure_init_stack(&cl);
1308
1309         while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1310                 keys += r[nodes++].keys;
1311
1312         blocks = btree_default_blocks(b->c) * 2 / 3;
1313
1314         if (nodes < 2 ||
1315             __set_blocks(b->keys.set[0].data, keys,
1316                          block_bytes(b->c)) > blocks * (nodes - 1))
1317                 return 0;
1318
1319         for (i = 0; i < nodes; i++) {
1320                 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1321                 if (IS_ERR_OR_NULL(new_nodes[i]))
1322                         goto out_nocoalesce;
1323         }
1324
1325         /*
1326          * We have to check the reserve here, after we've allocated our new
1327          * nodes, to make sure the insert below will succeed - we also check
1328          * before as an optimization to potentially avoid a bunch of expensive
1329          * allocs/sorts
1330          */
1331         if (btree_check_reserve(b, NULL))
1332                 goto out_nocoalesce;
1333
1334         for (i = 0; i < nodes; i++)
1335                 mutex_lock(&new_nodes[i]->write_lock);
1336
1337         for (i = nodes - 1; i > 0; --i) {
1338                 struct bset *n1 = btree_bset_first(new_nodes[i]);
1339                 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1340                 struct bkey *k, *last = NULL;
1341
1342                 keys = 0;
1343
1344                 if (i > 1) {
1345                         for (k = n2->start;
1346                              k < bset_bkey_last(n2);
1347                              k = bkey_next(k)) {
1348                                 if (__set_blocks(n1, n1->keys + keys +
1349                                                  bkey_u64s(k),
1350                                                  block_bytes(b->c)) > blocks)
1351                                         break;
1352
1353                                 last = k;
1354                                 keys += bkey_u64s(k);
1355                         }
1356                 } else {
1357                         /*
1358                          * Last node we're not getting rid of - we're getting
1359                          * rid of the node at r[0]. Have to try and fit all of
1360                          * the remaining keys into this node; we can't ensure
1361                          * they will always fit due to rounding and variable
1362                          * length keys (shouldn't be possible in practice,
1363                          * though)
1364                          */
1365                         if (__set_blocks(n1, n1->keys + n2->keys,
1366                                          block_bytes(b->c)) >
1367                             btree_blocks(new_nodes[i]))
1368                                 goto out_nocoalesce;
1369
1370                         keys = n2->keys;
1371                         /* Take the key of the node we're getting rid of */
1372                         last = &r->b->key;
1373                 }
1374
1375                 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1376                        btree_blocks(new_nodes[i]));
1377
1378                 if (last)
1379                         bkey_copy_key(&new_nodes[i]->key, last);
1380
1381                 memcpy(bset_bkey_last(n1),
1382                        n2->start,
1383                        (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1384
1385                 n1->keys += keys;
1386                 r[i].keys = n1->keys;
1387
1388                 memmove(n2->start,
1389                         bset_bkey_idx(n2, keys),
1390                         (void *) bset_bkey_last(n2) -
1391                         (void *) bset_bkey_idx(n2, keys));
1392
1393                 n2->keys -= keys;
1394
1395                 if (__bch_keylist_realloc(&keylist,
1396                                           bkey_u64s(&new_nodes[i]->key)))
1397                         goto out_nocoalesce;
1398
1399                 bch_btree_node_write(new_nodes[i], &cl);
1400                 bch_keylist_add(&keylist, &new_nodes[i]->key);
1401         }
1402
1403         for (i = 0; i < nodes; i++)
1404                 mutex_unlock(&new_nodes[i]->write_lock);
1405
1406         closure_sync(&cl);
1407
1408         /* We emptied out this node */
1409         BUG_ON(btree_bset_first(new_nodes[0])->keys);
1410         btree_node_free(new_nodes[0]);
1411         rw_unlock(true, new_nodes[0]);
1412
1413         for (i = 0; i < nodes; i++) {
1414                 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1415                         goto out_nocoalesce;
1416
1417                 make_btree_freeing_key(r[i].b, keylist.top);
1418                 bch_keylist_push(&keylist);
1419         }
1420
1421         bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1422         BUG_ON(!bch_keylist_empty(&keylist));
1423
1424         for (i = 0; i < nodes; i++) {
1425                 btree_node_free(r[i].b);
1426                 rw_unlock(true, r[i].b);
1427
1428                 r[i].b = new_nodes[i];
1429         }
1430
1431         memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1432         r[nodes - 1].b = ERR_PTR(-EINTR);
1433
1434         trace_bcache_btree_gc_coalesce(nodes);
1435         gc->nodes--;
1436
1437         bch_keylist_free(&keylist);
1438
1439         /* Invalidated our iterator */
1440         return -EINTR;
1441
1442 out_nocoalesce:
1443         closure_sync(&cl);
1444         bch_keylist_free(&keylist);
1445
1446         while ((k = bch_keylist_pop(&keylist)))
1447                 if (!bkey_cmp(k, &ZERO_KEY))
1448                         atomic_dec(&b->c->prio_blocked);
1449
1450         for (i = 0; i < nodes; i++)
1451                 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1452                         btree_node_free(new_nodes[i]);
1453                         rw_unlock(true, new_nodes[i]);
1454                 }
1455         return 0;
1456 }
1457
1458 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1459                                  struct btree *replace)
1460 {
1461         struct keylist keys;
1462         struct btree *n;
1463
1464         if (btree_check_reserve(b, NULL))
1465                 return 0;
1466
1467         n = btree_node_alloc_replacement(replace, NULL);
1468
1469         /* recheck reserve after allocating replacement node */
1470         if (btree_check_reserve(b, NULL)) {
1471                 btree_node_free(n);
1472                 rw_unlock(true, n);
1473                 return 0;
1474         }
1475
1476         bch_btree_node_write_sync(n);
1477
1478         bch_keylist_init(&keys);
1479         bch_keylist_add(&keys, &n->key);
1480
1481         make_btree_freeing_key(replace, keys.top);
1482         bch_keylist_push(&keys);
1483
1484         bch_btree_insert_node(b, op, &keys, NULL, NULL);
1485         BUG_ON(!bch_keylist_empty(&keys));
1486
1487         btree_node_free(replace);
1488         rw_unlock(true, n);
1489
1490         /* Invalidated our iterator */
1491         return -EINTR;
1492 }
1493
1494 static unsigned btree_gc_count_keys(struct btree *b)
1495 {
1496         struct bkey *k;
1497         struct btree_iter iter;
1498         unsigned ret = 0;
1499
1500         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1501                 ret += bkey_u64s(k);
1502
1503         return ret;
1504 }
1505
1506 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1507                             struct closure *writes, struct gc_stat *gc)
1508 {
1509         int ret = 0;
1510         bool should_rewrite;
1511         struct bkey *k;
1512         struct btree_iter iter;
1513         struct gc_merge_info r[GC_MERGE_NODES];
1514         struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1515
1516         bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1517
1518         for (i = r; i < r + ARRAY_SIZE(r); i++)
1519                 i->b = ERR_PTR(-EINTR);
1520
1521         while (1) {
1522                 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1523                 if (k) {
1524                         r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1525                                                   true);
1526                         if (IS_ERR(r->b)) {
1527                                 ret = PTR_ERR(r->b);
1528                                 break;
1529                         }
1530
1531                         r->keys = btree_gc_count_keys(r->b);
1532
1533                         ret = btree_gc_coalesce(b, op, gc, r);
1534                         if (ret)
1535                                 break;
1536                 }
1537
1538                 if (!last->b)
1539                         break;
1540
1541                 if (!IS_ERR(last->b)) {
1542                         should_rewrite = btree_gc_mark_node(last->b, gc);
1543                         if (should_rewrite) {
1544                                 ret = btree_gc_rewrite_node(b, op, last->b);
1545                                 if (ret)
1546                                         break;
1547                         }
1548
1549                         if (last->b->level) {
1550                                 ret = btree_gc_recurse(last->b, op, writes, gc);
1551                                 if (ret)
1552                                         break;
1553                         }
1554
1555                         bkey_copy_key(&b->c->gc_done, &last->b->key);
1556
1557                         /*
1558                          * Must flush leaf nodes before gc ends, since replace
1559                          * operations aren't journalled
1560                          */
1561                         mutex_lock(&last->b->write_lock);
1562                         if (btree_node_dirty(last->b))
1563                                 bch_btree_node_write(last->b, writes);
1564                         mutex_unlock(&last->b->write_lock);
1565                         rw_unlock(true, last->b);
1566                 }
1567
1568                 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1569                 r->b = NULL;
1570
1571                 if (need_resched()) {
1572                         ret = -EAGAIN;
1573                         break;
1574                 }
1575         }
1576
1577         for (i = r; i < r + ARRAY_SIZE(r); i++)
1578                 if (!IS_ERR_OR_NULL(i->b)) {
1579                         mutex_lock(&i->b->write_lock);
1580                         if (btree_node_dirty(i->b))
1581                                 bch_btree_node_write(i->b, writes);
1582                         mutex_unlock(&i->b->write_lock);
1583                         rw_unlock(true, i->b);
1584                 }
1585
1586         return ret;
1587 }
1588
1589 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1590                              struct closure *writes, struct gc_stat *gc)
1591 {
1592         struct btree *n = NULL;
1593         int ret = 0;
1594         bool should_rewrite;
1595
1596         should_rewrite = btree_gc_mark_node(b, gc);
1597         if (should_rewrite) {
1598                 n = btree_node_alloc_replacement(b, NULL);
1599
1600                 if (!IS_ERR_OR_NULL(n)) {
1601                         bch_btree_node_write_sync(n);
1602
1603                         bch_btree_set_root(n);
1604                         btree_node_free(b);
1605                         rw_unlock(true, n);
1606
1607                         return -EINTR;
1608                 }
1609         }
1610
1611         __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1612
1613         if (b->level) {
1614                 ret = btree_gc_recurse(b, op, writes, gc);
1615                 if (ret)
1616                         return ret;
1617         }
1618
1619         bkey_copy_key(&b->c->gc_done, &b->key);
1620
1621         return ret;
1622 }
1623
1624 static void btree_gc_start(struct cache_set *c)
1625 {
1626         struct cache *ca;
1627         struct bucket *b;
1628         unsigned i;
1629
1630         if (!c->gc_mark_valid)
1631                 return;
1632
1633         mutex_lock(&c->bucket_lock);
1634
1635         c->gc_mark_valid = 0;
1636         c->gc_done = ZERO_KEY;
1637
1638         for_each_cache(ca, c, i)
1639                 for_each_bucket(b, ca) {
1640                         b->last_gc = b->gen;
1641                         if (!atomic_read(&b->pin)) {
1642                                 SET_GC_MARK(b, 0);
1643                                 SET_GC_SECTORS_USED(b, 0);
1644                         }
1645                 }
1646
1647         mutex_unlock(&c->bucket_lock);
1648 }
1649
1650 static size_t bch_btree_gc_finish(struct cache_set *c)
1651 {
1652         size_t available = 0;
1653         struct bucket *b;
1654         struct cache *ca;
1655         unsigned i;
1656
1657         mutex_lock(&c->bucket_lock);
1658
1659         set_gc_sectors(c);
1660         c->gc_mark_valid = 1;
1661         c->need_gc      = 0;
1662
1663         for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1664                 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1665                             GC_MARK_METADATA);
1666
1667         /* don't reclaim buckets to which writeback keys point */
1668         rcu_read_lock();
1669         for (i = 0; i < c->nr_uuids; i++) {
1670                 struct bcache_device *d = c->devices[i];
1671                 struct cached_dev *dc;
1672                 struct keybuf_key *w, *n;
1673                 unsigned j;
1674
1675                 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1676                         continue;
1677                 dc = container_of(d, struct cached_dev, disk);
1678
1679                 spin_lock(&dc->writeback_keys.lock);
1680                 rbtree_postorder_for_each_entry_safe(w, n,
1681                                         &dc->writeback_keys.keys, node)
1682                         for (j = 0; j < KEY_PTRS(&w->key); j++)
1683                                 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1684                                             GC_MARK_DIRTY);
1685                 spin_unlock(&dc->writeback_keys.lock);
1686         }
1687         rcu_read_unlock();
1688
1689         for_each_cache(ca, c, i) {
1690                 uint64_t *i;
1691
1692                 ca->invalidate_needs_gc = 0;
1693
1694                 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1695                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1696
1697                 for (i = ca->prio_buckets;
1698                      i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1699                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1700
1701                 for_each_bucket(b, ca) {
1702                         c->need_gc      = max(c->need_gc, bucket_gc_gen(b));
1703
1704                         if (atomic_read(&b->pin))
1705                                 continue;
1706
1707                         BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1708
1709                         if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1710                                 available++;
1711                 }
1712         }
1713
1714         mutex_unlock(&c->bucket_lock);
1715         return available;
1716 }
1717
1718 static void bch_btree_gc(struct cache_set *c)
1719 {
1720         int ret;
1721         unsigned long available;
1722         struct gc_stat stats;
1723         struct closure writes;
1724         struct btree_op op;
1725         uint64_t start_time = local_clock();
1726
1727         trace_bcache_gc_start(c);
1728
1729         memset(&stats, 0, sizeof(struct gc_stat));
1730         closure_init_stack(&writes);
1731         bch_btree_op_init(&op, SHRT_MAX);
1732
1733         btree_gc_start(c);
1734
1735         do {
1736                 ret = btree_root(gc_root, c, &op, &writes, &stats);
1737                 closure_sync(&writes);
1738
1739                 if (ret && ret != -EAGAIN)
1740                         pr_warn("gc failed!");
1741         } while (ret);
1742
1743         available = bch_btree_gc_finish(c);
1744         wake_up_allocators(c);
1745
1746         bch_time_stats_update(&c->btree_gc_time, start_time);
1747
1748         stats.key_bytes *= sizeof(uint64_t);
1749         stats.data      <<= 9;
1750         stats.in_use    = (c->nbuckets - available) * 100 / c->nbuckets;
1751         memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1752
1753         trace_bcache_gc_end(c);
1754
1755         bch_moving_gc(c);
1756 }
1757
1758 static int bch_gc_thread(void *arg)
1759 {
1760         struct cache_set *c = arg;
1761         struct cache *ca;
1762         unsigned i;
1763
1764         while (1) {
1765 again:
1766                 bch_btree_gc(c);
1767
1768                 set_current_state(TASK_INTERRUPTIBLE);
1769                 if (kthread_should_stop())
1770                         break;
1771
1772                 mutex_lock(&c->bucket_lock);
1773
1774                 for_each_cache(ca, c, i)
1775                         if (ca->invalidate_needs_gc) {
1776                                 mutex_unlock(&c->bucket_lock);
1777                                 set_current_state(TASK_RUNNING);
1778                                 goto again;
1779                         }
1780
1781                 mutex_unlock(&c->bucket_lock);
1782
1783                 try_to_freeze();
1784                 schedule();
1785         }
1786
1787         return 0;
1788 }
1789
1790 int bch_gc_thread_start(struct cache_set *c)
1791 {
1792         c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1793         if (IS_ERR(c->gc_thread))
1794                 return PTR_ERR(c->gc_thread);
1795
1796         set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1797         return 0;
1798 }
1799
1800 /* Initial partial gc */
1801
1802 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1803 {
1804         int ret = 0;
1805         struct bkey *k, *p = NULL;
1806         struct btree_iter iter;
1807
1808         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1809                 bch_initial_mark_key(b->c, b->level, k);
1810
1811         bch_initial_mark_key(b->c, b->level + 1, &b->key);
1812
1813         if (b->level) {
1814                 bch_btree_iter_init(&b->keys, &iter, NULL);
1815
1816                 do {
1817                         k = bch_btree_iter_next_filter(&iter, &b->keys,
1818                                                        bch_ptr_bad);
1819                         if (k)
1820                                 btree_node_prefetch(b->c, k, b->level - 1);
1821
1822                         if (p)
1823                                 ret = btree(check_recurse, p, b, op);
1824
1825                         p = k;
1826                 } while (p && !ret);
1827         }
1828
1829         return ret;
1830 }
1831
1832 int bch_btree_check(struct cache_set *c)
1833 {
1834         struct btree_op op;
1835
1836         bch_btree_op_init(&op, SHRT_MAX);
1837
1838         return btree_root(check_recurse, c, &op);
1839 }
1840
1841 void bch_initial_gc_finish(struct cache_set *c)
1842 {
1843         struct cache *ca;
1844         struct bucket *b;
1845         unsigned i;
1846
1847         bch_btree_gc_finish(c);
1848
1849         mutex_lock(&c->bucket_lock);
1850
1851         /*
1852          * We need to put some unused buckets directly on the prio freelist in
1853          * order to get the allocator thread started - it needs freed buckets in
1854          * order to rewrite the prios and gens, and it needs to rewrite prios
1855          * and gens in order to free buckets.
1856          *
1857          * This is only safe for buckets that have no live data in them, which
1858          * there should always be some of.
1859          */
1860         for_each_cache(ca, c, i) {
1861                 for_each_bucket(b, ca) {
1862                         if (fifo_full(&ca->free[RESERVE_PRIO]))
1863                                 break;
1864
1865                         if (bch_can_invalidate_bucket(ca, b) &&
1866                             !GC_MARK(b)) {
1867                                 __bch_invalidate_one_bucket(ca, b);
1868                                 fifo_push(&ca->free[RESERVE_PRIO],
1869                                           b - ca->buckets);
1870                         }
1871                 }
1872         }
1873
1874         mutex_unlock(&c->bucket_lock);
1875 }
1876
1877 /* Btree insertion */
1878
1879 static bool btree_insert_key(struct btree *b, struct bkey *k,
1880                              struct bkey *replace_key)
1881 {
1882         unsigned status;
1883
1884         BUG_ON(bkey_cmp(k, &b->key) > 0);
1885
1886         status = bch_btree_insert_key(&b->keys, k, replace_key);
1887         if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1888                 bch_check_keys(&b->keys, "%u for %s", status,
1889                                replace_key ? "replace" : "insert");
1890
1891                 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1892                                               status);
1893                 return true;
1894         } else
1895                 return false;
1896 }
1897
1898 static size_t insert_u64s_remaining(struct btree *b)
1899 {
1900         long ret = bch_btree_keys_u64s_remaining(&b->keys);
1901
1902         /*
1903          * Might land in the middle of an existing extent and have to split it
1904          */
1905         if (b->keys.ops->is_extents)
1906                 ret -= KEY_MAX_U64S;
1907
1908         return max(ret, 0L);
1909 }
1910
1911 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1912                                   struct keylist *insert_keys,
1913                                   struct bkey *replace_key)
1914 {
1915         bool ret = false;
1916         int oldsize = bch_count_data(&b->keys);
1917
1918         while (!bch_keylist_empty(insert_keys)) {
1919                 struct bkey *k = insert_keys->keys;
1920
1921                 if (bkey_u64s(k) > insert_u64s_remaining(b))
1922                         break;
1923
1924                 if (bkey_cmp(k, &b->key) <= 0) {
1925                         if (!b->level)
1926                                 bkey_put(b->c, k);
1927
1928                         ret |= btree_insert_key(b, k, replace_key);
1929                         bch_keylist_pop_front(insert_keys);
1930                 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1931                         BKEY_PADDED(key) temp;
1932                         bkey_copy(&temp.key, insert_keys->keys);
1933
1934                         bch_cut_back(&b->key, &temp.key);
1935                         bch_cut_front(&b->key, insert_keys->keys);
1936
1937                         ret |= btree_insert_key(b, &temp.key, replace_key);
1938                         break;
1939                 } else {
1940                         break;
1941                 }
1942         }
1943
1944         if (!ret)
1945                 op->insert_collision = true;
1946
1947         BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1948
1949         BUG_ON(bch_count_data(&b->keys) < oldsize);
1950         return ret;
1951 }
1952
1953 static int btree_split(struct btree *b, struct btree_op *op,
1954                        struct keylist *insert_keys,
1955                        struct bkey *replace_key)
1956 {
1957         bool split;
1958         struct btree *n1, *n2 = NULL, *n3 = NULL;
1959         uint64_t start_time = local_clock();
1960         struct closure cl;
1961         struct keylist parent_keys;
1962
1963         closure_init_stack(&cl);
1964         bch_keylist_init(&parent_keys);
1965
1966         if (btree_check_reserve(b, op)) {
1967                 if (!b->level)
1968                         return -EINTR;
1969                 else
1970                         WARN(1, "insufficient reserve for split\n");
1971         }
1972
1973         n1 = btree_node_alloc_replacement(b, op);
1974         if (IS_ERR(n1))
1975                 goto err;
1976
1977         split = set_blocks(btree_bset_first(n1),
1978                            block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1979
1980         if (split) {
1981                 unsigned keys = 0;
1982
1983                 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1984
1985                 n2 = bch_btree_node_alloc(b->c, op, b->level);
1986                 if (IS_ERR(n2))
1987                         goto err_free1;
1988
1989                 if (!b->parent) {
1990                         n3 = bch_btree_node_alloc(b->c, op, b->level + 1);
1991                         if (IS_ERR(n3))
1992                                 goto err_free2;
1993                 }
1994
1995                 mutex_lock(&n1->write_lock);
1996                 mutex_lock(&n2->write_lock);
1997
1998                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
1999
2000                 /*
2001                  * Has to be a linear search because we don't have an auxiliary
2002                  * search tree yet
2003                  */
2004
2005                 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2006                         keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2007                                                         keys));
2008
2009                 bkey_copy_key(&n1->key,
2010                               bset_bkey_idx(btree_bset_first(n1), keys));
2011                 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2012
2013                 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2014                 btree_bset_first(n1)->keys = keys;
2015
2016                 memcpy(btree_bset_first(n2)->start,
2017                        bset_bkey_last(btree_bset_first(n1)),
2018                        btree_bset_first(n2)->keys * sizeof(uint64_t));
2019
2020                 bkey_copy_key(&n2->key, &b->key);
2021
2022                 bch_keylist_add(&parent_keys, &n2->key);
2023                 bch_btree_node_write(n2, &cl);
2024                 mutex_unlock(&n2->write_lock);
2025                 rw_unlock(true, n2);
2026         } else {
2027                 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2028
2029                 mutex_lock(&n1->write_lock);
2030                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2031         }
2032
2033         bch_keylist_add(&parent_keys, &n1->key);
2034         bch_btree_node_write(n1, &cl);
2035         mutex_unlock(&n1->write_lock);
2036
2037         if (n3) {
2038                 /* Depth increases, make a new root */
2039                 mutex_lock(&n3->write_lock);
2040                 bkey_copy_key(&n3->key, &MAX_KEY);
2041                 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2042                 bch_btree_node_write(n3, &cl);
2043                 mutex_unlock(&n3->write_lock);
2044
2045                 closure_sync(&cl);
2046                 bch_btree_set_root(n3);
2047                 rw_unlock(true, n3);
2048         } else if (!b->parent) {
2049                 /* Root filled up but didn't need to be split */
2050                 closure_sync(&cl);
2051                 bch_btree_set_root(n1);
2052         } else {
2053                 /* Split a non root node */
2054                 closure_sync(&cl);
2055                 make_btree_freeing_key(b, parent_keys.top);
2056                 bch_keylist_push(&parent_keys);
2057
2058                 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2059                 BUG_ON(!bch_keylist_empty(&parent_keys));
2060         }
2061
2062         btree_node_free(b);
2063         rw_unlock(true, n1);
2064
2065         bch_time_stats_update(&b->c->btree_split_time, start_time);
2066
2067         return 0;
2068 err_free2:
2069         bkey_put(b->c, &n2->key);
2070         btree_node_free(n2);
2071         rw_unlock(true, n2);
2072 err_free1:
2073         bkey_put(b->c, &n1->key);
2074         btree_node_free(n1);
2075         rw_unlock(true, n1);
2076 err:
2077         WARN(1, "bcache: btree split failed (level %u)", b->level);
2078
2079         if (n3 == ERR_PTR(-EAGAIN) ||
2080             n2 == ERR_PTR(-EAGAIN) ||
2081             n1 == ERR_PTR(-EAGAIN))
2082                 return -EAGAIN;
2083
2084         return -ENOMEM;
2085 }
2086
2087 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2088                                  struct keylist *insert_keys,
2089                                  atomic_t *journal_ref,
2090                                  struct bkey *replace_key)
2091 {
2092         struct closure cl;
2093
2094         BUG_ON(b->level && replace_key);
2095
2096         closure_init_stack(&cl);
2097
2098         mutex_lock(&b->write_lock);
2099
2100         if (write_block(b) != btree_bset_last(b) &&
2101             b->keys.last_set_unwritten)
2102                 bch_btree_init_next(b); /* just wrote a set */
2103
2104         if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2105                 mutex_unlock(&b->write_lock);
2106                 goto split;
2107         }
2108
2109         BUG_ON(write_block(b) != btree_bset_last(b));
2110
2111         if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2112                 if (!b->level)
2113                         bch_btree_leaf_dirty(b, journal_ref);
2114                 else
2115                         bch_btree_node_write(b, &cl);
2116         }
2117
2118         mutex_unlock(&b->write_lock);
2119
2120         /* wait for btree node write if necessary, after unlock */
2121         closure_sync(&cl);
2122
2123         return 0;
2124 split:
2125         if (current->bio_list) {
2126                 op->lock = b->c->root->level + 1;
2127                 return -EAGAIN;
2128         } else if (op->lock <= b->c->root->level) {
2129                 op->lock = b->c->root->level + 1;
2130                 return -EINTR;
2131         } else {
2132                 /* Invalidated all iterators */
2133                 int ret = btree_split(b, op, insert_keys, replace_key);
2134
2135                 if (bch_keylist_empty(insert_keys))
2136                         return 0;
2137                 else if (!ret)
2138                         return -EINTR;
2139                 return ret;
2140         }
2141 }
2142
2143 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2144                                struct bkey *check_key)
2145 {
2146         int ret = -EINTR;
2147         uint64_t btree_ptr = b->key.ptr[0];
2148         unsigned long seq = b->seq;
2149         struct keylist insert;
2150         bool upgrade = op->lock == -1;
2151
2152         bch_keylist_init(&insert);
2153
2154         if (upgrade) {
2155                 rw_unlock(false, b);
2156                 rw_lock(true, b, b->level);
2157
2158                 if (b->key.ptr[0] != btree_ptr ||
2159                     b->seq != seq + 1)
2160                         goto out;
2161         }
2162
2163         SET_KEY_PTRS(check_key, 1);
2164         get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2165
2166         SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2167
2168         bch_keylist_add(&insert, check_key);
2169
2170         ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2171
2172         BUG_ON(!ret && !bch_keylist_empty(&insert));
2173 out:
2174         if (upgrade)
2175                 downgrade_write(&b->lock);
2176         return ret;
2177 }
2178
2179 struct btree_insert_op {
2180         struct btree_op op;
2181         struct keylist  *keys;
2182         atomic_t        *journal_ref;
2183         struct bkey     *replace_key;
2184 };
2185
2186 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2187 {
2188         struct btree_insert_op *op = container_of(b_op,
2189                                         struct btree_insert_op, op);
2190
2191         int ret = bch_btree_insert_node(b, &op->op, op->keys,
2192                                         op->journal_ref, op->replace_key);
2193         if (ret && !bch_keylist_empty(op->keys))
2194                 return ret;
2195         else
2196                 return MAP_DONE;
2197 }
2198
2199 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2200                      atomic_t *journal_ref, struct bkey *replace_key)
2201 {
2202         struct btree_insert_op op;
2203         int ret = 0;
2204
2205         BUG_ON(current->bio_list);
2206         BUG_ON(bch_keylist_empty(keys));
2207
2208         bch_btree_op_init(&op.op, 0);
2209         op.keys         = keys;
2210         op.journal_ref  = journal_ref;
2211         op.replace_key  = replace_key;
2212
2213         while (!ret && !bch_keylist_empty(keys)) {
2214                 op.op.lock = 0;
2215                 ret = bch_btree_map_leaf_nodes(&op.op, c,
2216                                                &START_KEY(keys->keys),
2217                                                btree_insert_fn);
2218         }
2219
2220         if (ret) {
2221                 struct bkey *k;
2222
2223                 pr_err("error %i", ret);
2224
2225                 while ((k = bch_keylist_pop(keys)))
2226                         bkey_put(c, k);
2227         } else if (op.op.insert_collision)
2228                 ret = -ESRCH;
2229
2230         return ret;
2231 }
2232
2233 void bch_btree_set_root(struct btree *b)
2234 {
2235         unsigned i;
2236         struct closure cl;
2237
2238         closure_init_stack(&cl);
2239
2240         trace_bcache_btree_set_root(b);
2241
2242         BUG_ON(!b->written);
2243
2244         for (i = 0; i < KEY_PTRS(&b->key); i++)
2245                 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2246
2247         mutex_lock(&b->c->bucket_lock);
2248         list_del_init(&b->list);
2249         mutex_unlock(&b->c->bucket_lock);
2250
2251         b->c->root = b;
2252
2253         bch_journal_meta(b->c, &cl);
2254         closure_sync(&cl);
2255 }
2256
2257 /* Map across nodes or keys */
2258
2259 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2260                                        struct bkey *from,
2261                                        btree_map_nodes_fn *fn, int flags)
2262 {
2263         int ret = MAP_CONTINUE;
2264
2265         if (b->level) {
2266                 struct bkey *k;
2267                 struct btree_iter iter;
2268
2269                 bch_btree_iter_init(&b->keys, &iter, from);
2270
2271                 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2272                                                        bch_ptr_bad))) {
2273                         ret = btree(map_nodes_recurse, k, b,
2274                                     op, from, fn, flags);
2275                         from = NULL;
2276
2277                         if (ret != MAP_CONTINUE)
2278                                 return ret;
2279                 }
2280         }
2281
2282         if (!b->level || flags == MAP_ALL_NODES)
2283                 ret = fn(op, b);
2284
2285         return ret;
2286 }
2287
2288 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2289                           struct bkey *from, btree_map_nodes_fn *fn, int flags)
2290 {
2291         return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2292 }
2293
2294 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2295                                       struct bkey *from, btree_map_keys_fn *fn,
2296                                       int flags)
2297 {
2298         int ret = MAP_CONTINUE;
2299         struct bkey *k;
2300         struct btree_iter iter;
2301
2302         bch_btree_iter_init(&b->keys, &iter, from);
2303
2304         while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2305                 ret = !b->level
2306                         ? fn(op, b, k)
2307                         : btree(map_keys_recurse, k, b, op, from, fn, flags);
2308                 from = NULL;
2309
2310                 if (ret != MAP_CONTINUE)
2311                         return ret;
2312         }
2313
2314         if (!b->level && (flags & MAP_END_KEY))
2315                 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2316                                      KEY_OFFSET(&b->key), 0));
2317
2318         return ret;
2319 }
2320
2321 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2322                        struct bkey *from, btree_map_keys_fn *fn, int flags)
2323 {
2324         return btree_root(map_keys_recurse, c, op, from, fn, flags);
2325 }
2326
2327 /* Keybuf code */
2328
2329 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2330 {
2331         /* Overlapping keys compare equal */
2332         if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2333                 return -1;
2334         if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2335                 return 1;
2336         return 0;
2337 }
2338
2339 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2340                                             struct keybuf_key *r)
2341 {
2342         return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2343 }
2344
2345 struct refill {
2346         struct btree_op op;
2347         unsigned        nr_found;
2348         struct keybuf   *buf;
2349         struct bkey     *end;
2350         keybuf_pred_fn  *pred;
2351 };
2352
2353 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2354                             struct bkey *k)
2355 {
2356         struct refill *refill = container_of(op, struct refill, op);
2357         struct keybuf *buf = refill->buf;
2358         int ret = MAP_CONTINUE;
2359
2360         if (bkey_cmp(k, refill->end) >= 0) {
2361                 ret = MAP_DONE;
2362                 goto out;
2363         }
2364
2365         if (!KEY_SIZE(k)) /* end key */
2366                 goto out;
2367
2368         if (refill->pred(buf, k)) {
2369                 struct keybuf_key *w;
2370
2371                 spin_lock(&buf->lock);
2372
2373                 w = array_alloc(&buf->freelist);
2374                 if (!w) {
2375                         spin_unlock(&buf->lock);
2376                         return MAP_DONE;
2377                 }
2378
2379                 w->private = NULL;
2380                 bkey_copy(&w->key, k);
2381
2382                 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2383                         array_free(&buf->freelist, w);
2384                 else
2385                         refill->nr_found++;
2386
2387                 if (array_freelist_empty(&buf->freelist))
2388                         ret = MAP_DONE;
2389
2390                 spin_unlock(&buf->lock);
2391         }
2392 out:
2393         buf->last_scanned = *k;
2394         return ret;
2395 }
2396
2397 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2398                        struct bkey *end, keybuf_pred_fn *pred)
2399 {
2400         struct bkey start = buf->last_scanned;
2401         struct refill refill;
2402
2403         cond_resched();
2404
2405         bch_btree_op_init(&refill.op, -1);
2406         refill.nr_found = 0;
2407         refill.buf      = buf;
2408         refill.end      = end;
2409         refill.pred     = pred;
2410
2411         bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2412                            refill_keybuf_fn, MAP_END_KEY);
2413
2414         trace_bcache_keyscan(refill.nr_found,
2415                              KEY_INODE(&start), KEY_OFFSET(&start),
2416                              KEY_INODE(&buf->last_scanned),
2417                              KEY_OFFSET(&buf->last_scanned));
2418
2419         spin_lock(&buf->lock);
2420
2421         if (!RB_EMPTY_ROOT(&buf->keys)) {
2422                 struct keybuf_key *w;
2423                 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2424                 buf->start      = START_KEY(&w->key);
2425
2426                 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2427                 buf->end        = w->key;
2428         } else {
2429                 buf->start      = MAX_KEY;
2430                 buf->end        = MAX_KEY;
2431         }
2432
2433         spin_unlock(&buf->lock);
2434 }
2435
2436 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2437 {
2438         rb_erase(&w->node, &buf->keys);
2439         array_free(&buf->freelist, w);
2440 }
2441
2442 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2443 {
2444         spin_lock(&buf->lock);
2445         __bch_keybuf_del(buf, w);
2446         spin_unlock(&buf->lock);
2447 }
2448
2449 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2450                                   struct bkey *end)
2451 {
2452         bool ret = false;
2453         struct keybuf_key *p, *w, s;
2454         s.key = *start;
2455
2456         if (bkey_cmp(end, &buf->start) <= 0 ||
2457             bkey_cmp(start, &buf->end) >= 0)
2458                 return false;
2459
2460         spin_lock(&buf->lock);
2461         w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2462
2463         while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2464                 p = w;
2465                 w = RB_NEXT(w, node);
2466
2467                 if (p->private)
2468                         ret = true;
2469                 else
2470                         __bch_keybuf_del(buf, p);
2471         }
2472
2473         spin_unlock(&buf->lock);
2474         return ret;
2475 }
2476
2477 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2478 {
2479         struct keybuf_key *w;
2480         spin_lock(&buf->lock);
2481
2482         w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2483
2484         while (w && w->private)
2485                 w = RB_NEXT(w, node);
2486
2487         if (w)
2488                 w->private = ERR_PTR(-EINTR);
2489
2490         spin_unlock(&buf->lock);
2491         return w;
2492 }
2493
2494 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2495                                           struct keybuf *buf,
2496                                           struct bkey *end,
2497                                           keybuf_pred_fn *pred)
2498 {
2499         struct keybuf_key *ret;
2500
2501         while (1) {
2502                 ret = bch_keybuf_next(buf);
2503                 if (ret)
2504                         break;
2505
2506                 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2507                         pr_debug("scan finished");
2508                         break;
2509                 }
2510
2511                 bch_refill_keybuf(c, buf, end, pred);
2512         }
2513
2514         return ret;
2515 }
2516
2517 void bch_keybuf_init(struct keybuf *buf)
2518 {
2519         buf->last_scanned       = MAX_KEY;
2520         buf->keys               = RB_ROOT;
2521
2522         spin_lock_init(&buf->lock);
2523         array_allocator_init(&buf->freelist);
2524 }