Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[firefly-linux-kernel-4.4.55.git] / drivers / md / bcache / debug.c
1 /*
2  * Assorted bcache debug code
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "request.h"
12
13 #include <linux/console.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/seq_file.h>
18
19 static struct dentry *debug;
20
21 const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
22 {
23         unsigned i;
24
25         for (i = 0; i < KEY_PTRS(k); i++)
26                 if (ptr_available(c, k, i)) {
27                         struct cache *ca = PTR_CACHE(c, k, i);
28                         size_t bucket = PTR_BUCKET_NR(c, k, i);
29                         size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
30
31                         if (KEY_SIZE(k) + r > c->sb.bucket_size)
32                                 return "bad, length too big";
33                         if (bucket <  ca->sb.first_bucket)
34                                 return "bad, short offset";
35                         if (bucket >= ca->sb.nbuckets)
36                                 return "bad, offset past end of device";
37                         if (ptr_stale(c, k, i))
38                                 return "stale";
39                 }
40
41         if (!bkey_cmp(k, &ZERO_KEY))
42                 return "bad, null key";
43         if (!KEY_PTRS(k))
44                 return "bad, no pointers";
45         if (!KEY_SIZE(k))
46                 return "zeroed key";
47         return "";
48 }
49
50 int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
51 {
52         unsigned i = 0;
53         char *out = buf, *end = buf + size;
54
55 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
56
57         p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
58
59         if (KEY_PTRS(k))
60                 while (1) {
61                         p("%llu:%llu gen %llu",
62                           PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
63
64                         if (++i == KEY_PTRS(k))
65                                 break;
66
67                         p(", ");
68                 }
69
70         p("]");
71
72         if (KEY_DIRTY(k))
73                 p(" dirty");
74         if (KEY_CSUM(k))
75                 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
76 #undef p
77         return out - buf;
78 }
79
80 int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
81 {
82         return scnprintf(buf, size, "%zu level %i/%i",
83                          PTR_BUCKET_NR(b->c, &b->key, 0),
84                          b->level, b->c->root ? b->c->root->level : -1);
85 }
86
87 #if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
88
89 static bool skipped_backwards(struct btree *b, struct bkey *k)
90 {
91         return bkey_cmp(k, (!b->level)
92                         ? &START_KEY(bkey_next(k))
93                         : bkey_next(k)) > 0;
94 }
95
96 static void dump_bset(struct btree *b, struct bset *i)
97 {
98         struct bkey *k;
99         unsigned j;
100         char buf[80];
101
102         for (k = i->start; k < end(i); k = bkey_next(k)) {
103                 bch_bkey_to_text(buf, sizeof(buf), k);
104                 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
105                        (uint64_t *) k - i->d, i->keys, buf);
106
107                 for (j = 0; j < KEY_PTRS(k); j++) {
108                         size_t n = PTR_BUCKET_NR(b->c, k, j);
109                         printk(" bucket %zu", n);
110
111                         if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
112                                 printk(" prio %i",
113                                        PTR_BUCKET(b->c, k, j)->prio);
114                 }
115
116                 printk(" %s\n", bch_ptr_status(b->c, k));
117
118                 if (bkey_next(k) < end(i) &&
119                     skipped_backwards(b, k))
120                         printk(KERN_ERR "Key skipped backwards\n");
121         }
122 }
123
124 #endif
125
126 #ifdef CONFIG_BCACHE_DEBUG
127
128 void bch_btree_verify(struct btree *b, struct bset *new)
129 {
130         struct btree *v = b->c->verify_data;
131         struct closure cl;
132         closure_init_stack(&cl);
133
134         if (!b->c->verify)
135                 return;
136
137         closure_wait_event(&b->io.wait, &cl,
138                            atomic_read(&b->io.cl.remaining) == -1);
139
140         mutex_lock(&b->c->verify_lock);
141
142         bkey_copy(&v->key, &b->key);
143         v->written = 0;
144         v->level = b->level;
145
146         bch_btree_node_read(v);
147         closure_wait_event(&v->io.wait, &cl,
148                            atomic_read(&b->io.cl.remaining) == -1);
149
150         if (new->keys != v->sets[0].data->keys ||
151             memcmp(new->start,
152                    v->sets[0].data->start,
153                    (void *) end(new) - (void *) new->start)) {
154                 unsigned i, j;
155
156                 console_lock();
157
158                 printk(KERN_ERR "*** original memory node:\n");
159                 for (i = 0; i <= b->nsets; i++)
160                         dump_bset(b, b->sets[i].data);
161
162                 printk(KERN_ERR "*** sorted memory node:\n");
163                 dump_bset(b, new);
164
165                 printk(KERN_ERR "*** on disk node:\n");
166                 dump_bset(v, v->sets[0].data);
167
168                 for (j = 0; j < new->keys; j++)
169                         if (new->d[j] != v->sets[0].data->d[j])
170                                 break;
171
172                 console_unlock();
173                 panic("verify failed at %u\n", j);
174         }
175
176         mutex_unlock(&b->c->verify_lock);
177 }
178
179 static void data_verify_endio(struct bio *bio, int error)
180 {
181         struct closure *cl = bio->bi_private;
182         closure_put(cl);
183 }
184
185 void bch_data_verify(struct search *s)
186 {
187         char name[BDEVNAME_SIZE];
188         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
189         struct closure *cl = &s->cl;
190         struct bio *check;
191         struct bio_vec *bv;
192         int i;
193
194         if (!s->unaligned_bvec)
195                 bio_for_each_segment(bv, s->orig_bio, i)
196                         bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
197
198         check = bio_clone(s->orig_bio, GFP_NOIO);
199         if (!check)
200                 return;
201
202         if (bio_alloc_pages(check, GFP_NOIO))
203                 goto out_put;
204
205         check->bi_rw            = READ_SYNC;
206         check->bi_private       = cl;
207         check->bi_end_io        = data_verify_endio;
208
209         closure_bio_submit(check, cl, &dc->disk);
210         closure_sync(cl);
211
212         bio_for_each_segment(bv, s->orig_bio, i) {
213                 void *p1 = kmap(bv->bv_page);
214                 void *p2 = kmap(check->bi_io_vec[i].bv_page);
215
216                 if (memcmp(p1 + bv->bv_offset,
217                            p2 + bv->bv_offset,
218                            bv->bv_len))
219                         printk(KERN_ERR
220                                "bcache (%s): verify failed at sector %llu\n",
221                                bdevname(dc->bdev, name),
222                                (uint64_t) s->orig_bio->bi_sector);
223
224                 kunmap(bv->bv_page);
225                 kunmap(check->bi_io_vec[i].bv_page);
226         }
227
228         __bio_for_each_segment(bv, check, i, 0)
229                 __free_page(bv->bv_page);
230 out_put:
231         bio_put(check);
232 }
233
234 #endif
235
236 #ifdef CONFIG_BCACHE_EDEBUG
237
238 unsigned bch_count_data(struct btree *b)
239 {
240         unsigned ret = 0;
241         struct btree_iter iter;
242         struct bkey *k;
243
244         if (!b->level)
245                 for_each_key(b, k, &iter)
246                         ret += KEY_SIZE(k);
247         return ret;
248 }
249
250 static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
251                                    va_list args)
252 {
253         unsigned i;
254         char buf[80];
255
256         console_lock();
257
258         for (i = 0; i <= b->nsets; i++)
259                 dump_bset(b, b->sets[i].data);
260
261         vprintk(fmt, args);
262
263         console_unlock();
264
265         bch_btree_to_text(buf, sizeof(buf), b);
266         panic("at %s\n", buf);
267 }
268
269 void bch_check_key_order_msg(struct btree *b, struct bset *i,
270                              const char *fmt, ...)
271 {
272         struct bkey *k;
273
274         if (!i->keys)
275                 return;
276
277         for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
278                 if (skipped_backwards(b, k)) {
279                         va_list args;
280                         va_start(args, fmt);
281
282                         vdump_bucket_and_panic(b, fmt, args);
283                         va_end(args);
284                 }
285 }
286
287 void bch_check_keys(struct btree *b, const char *fmt, ...)
288 {
289         va_list args;
290         struct bkey *k, *p = NULL;
291         struct btree_iter iter;
292
293         if (b->level)
294                 return;
295
296         for_each_key(b, k, &iter) {
297                 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
298                         printk(KERN_ERR "Keys out of order:\n");
299                         goto bug;
300                 }
301
302                 if (bch_ptr_invalid(b, k))
303                         continue;
304
305                 if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
306                         printk(KERN_ERR "Overlapping keys:\n");
307                         goto bug;
308                 }
309                 p = k;
310         }
311         return;
312 bug:
313         va_start(args, fmt);
314         vdump_bucket_and_panic(b, fmt, args);
315         va_end(args);
316 }
317
318 #endif
319
320 #ifdef CONFIG_DEBUG_FS
321
322 /* XXX: cache set refcounting */
323
324 struct dump_iterator {
325         char                    buf[PAGE_SIZE];
326         size_t                  bytes;
327         struct cache_set        *c;
328         struct keybuf           keys;
329 };
330
331 static bool dump_pred(struct keybuf *buf, struct bkey *k)
332 {
333         return true;
334 }
335
336 static ssize_t bch_dump_read(struct file *file, char __user *buf,
337                              size_t size, loff_t *ppos)
338 {
339         struct dump_iterator *i = file->private_data;
340         ssize_t ret = 0;
341         char kbuf[80];
342
343         while (size) {
344                 struct keybuf_key *w;
345                 unsigned bytes = min(i->bytes, size);
346
347                 int err = copy_to_user(buf, i->buf, bytes);
348                 if (err)
349                         return err;
350
351                 ret      += bytes;
352                 buf      += bytes;
353                 size     -= bytes;
354                 i->bytes -= bytes;
355                 memmove(i->buf, i->buf + bytes, i->bytes);
356
357                 if (i->bytes)
358                         break;
359
360                 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
361                 if (!w)
362                         break;
363
364                 bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
365                 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
366                 bch_keybuf_del(&i->keys, w);
367         }
368
369         return ret;
370 }
371
372 static int bch_dump_open(struct inode *inode, struct file *file)
373 {
374         struct cache_set *c = inode->i_private;
375         struct dump_iterator *i;
376
377         i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
378         if (!i)
379                 return -ENOMEM;
380
381         file->private_data = i;
382         i->c = c;
383         bch_keybuf_init(&i->keys);
384         i->keys.last_scanned = KEY(0, 0, 0);
385
386         return 0;
387 }
388
389 static int bch_dump_release(struct inode *inode, struct file *file)
390 {
391         kfree(file->private_data);
392         return 0;
393 }
394
395 static const struct file_operations cache_set_debug_ops = {
396         .owner          = THIS_MODULE,
397         .open           = bch_dump_open,
398         .read           = bch_dump_read,
399         .release        = bch_dump_release
400 };
401
402 void bch_debug_init_cache_set(struct cache_set *c)
403 {
404         if (!IS_ERR_OR_NULL(debug)) {
405                 char name[50];
406                 snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
407
408                 c->debug = debugfs_create_file(name, 0400, debug, c,
409                                                &cache_set_debug_ops);
410         }
411 }
412
413 #endif
414
415 void bch_debug_exit(void)
416 {
417         if (!IS_ERR_OR_NULL(debug))
418                 debugfs_remove_recursive(debug);
419 }
420
421 int __init bch_debug_init(struct kobject *kobj)
422 {
423         int ret = 0;
424
425         debug = debugfs_create_dir("bcache", NULL);
426         return ret;
427 }