2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/pagevec.h>
34 static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
36 struct nilfs_btree_path *path;
37 int level = NILFS_BTREE_LEVEL_DATA;
39 path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
43 for (; level < NILFS_BTREE_LEVEL_MAX; level++) {
44 path[level].bp_bh = NULL;
45 path[level].bp_sib_bh = NULL;
46 path[level].bp_index = 0;
47 path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
48 path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
49 path[level].bp_op = NULL;
56 static void nilfs_btree_free_path(struct nilfs_btree_path *path)
58 int level = NILFS_BTREE_LEVEL_DATA;
60 for (; level < NILFS_BTREE_LEVEL_MAX; level++)
61 brelse(path[level].bp_bh);
63 kmem_cache_free(nilfs_btree_path_cache, path);
67 * B-tree node operations
69 static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
70 struct buffer_head **bhp)
72 struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
73 struct buffer_head *bh;
76 err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp);
78 return err == -EEXIST ? 0 : err;
82 if (!buffer_uptodate(bh)) {
86 if (nilfs_btree_broken_node_block(bh)) {
87 clear_buffer_uptodate(bh);
94 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
95 __u64 ptr, struct buffer_head **bhp)
97 struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
98 struct buffer_head *bh;
100 bh = nilfs_btnode_create_block(btnc, ptr);
104 set_buffer_nilfs_volatile(bh);
110 nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
112 return node->bn_flags;
116 nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags)
118 node->bn_flags = flags;
121 static inline int nilfs_btree_node_root(const struct nilfs_btree_node *node)
123 return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT;
127 nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
129 return node->bn_level;
133 nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level)
135 node->bn_level = level;
139 nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
141 return le16_to_cpu(node->bn_nchildren);
145 nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren)
147 node->bn_nchildren = cpu_to_le16(nchildren);
150 static inline int nilfs_btree_node_size(const struct nilfs_bmap *btree)
152 return 1 << btree->b_inode->i_blkbits;
156 nilfs_btree_node_nchildren_min(const struct nilfs_btree_node *node,
157 const struct nilfs_bmap *btree)
159 return nilfs_btree_node_root(node) ?
160 NILFS_BTREE_ROOT_NCHILDREN_MIN :
161 NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
165 nilfs_btree_node_nchildren_max(const struct nilfs_btree_node *node,
166 const struct nilfs_bmap *btree)
168 return nilfs_btree_node_root(node) ?
169 NILFS_BTREE_ROOT_NCHILDREN_MAX :
170 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
173 static inline __le64 *
174 nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
176 return (__le64 *)((char *)(node + 1) +
177 (nilfs_btree_node_root(node) ?
178 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE));
181 static inline __le64 *
182 nilfs_btree_node_dptrs(const struct nilfs_btree_node *node,
183 const struct nilfs_bmap *btree)
185 return (__le64 *)(nilfs_btree_node_dkeys(node) +
186 nilfs_btree_node_nchildren_max(node, btree));
190 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index)
192 return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index));
196 nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key)
198 *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key);
202 nilfs_btree_node_get_ptr(const struct nilfs_bmap *btree,
203 const struct nilfs_btree_node *node, int index)
205 return le64_to_cpu(*(nilfs_btree_node_dptrs(node, btree) + index));
209 nilfs_btree_node_set_ptr(struct nilfs_bmap *btree,
210 struct nilfs_btree_node *node, int index, __u64 ptr)
212 *(nilfs_btree_node_dptrs(node, btree) + index) = cpu_to_le64(ptr);
215 static void nilfs_btree_node_init(struct nilfs_bmap *btree,
216 struct nilfs_btree_node *node,
217 int flags, int level, int nchildren,
218 const __u64 *keys, const __u64 *ptrs)
224 nilfs_btree_node_set_flags(node, flags);
225 nilfs_btree_node_set_level(node, level);
226 nilfs_btree_node_set_nchildren(node, nchildren);
228 dkeys = nilfs_btree_node_dkeys(node);
229 dptrs = nilfs_btree_node_dptrs(node, btree);
230 for (i = 0; i < nchildren; i++) {
231 dkeys[i] = cpu_to_le64(keys[i]);
232 dptrs[i] = cpu_to_le64(ptrs[i]);
236 /* Assume the buffer heads corresponding to left and right are locked. */
237 static void nilfs_btree_node_move_left(struct nilfs_bmap *btree,
238 struct nilfs_btree_node *left,
239 struct nilfs_btree_node *right,
242 __le64 *ldkeys, *rdkeys;
243 __le64 *ldptrs, *rdptrs;
244 int lnchildren, rnchildren;
246 ldkeys = nilfs_btree_node_dkeys(left);
247 ldptrs = nilfs_btree_node_dptrs(left, btree);
248 lnchildren = nilfs_btree_node_get_nchildren(left);
250 rdkeys = nilfs_btree_node_dkeys(right);
251 rdptrs = nilfs_btree_node_dptrs(right, btree);
252 rnchildren = nilfs_btree_node_get_nchildren(right);
254 memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys));
255 memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs));
256 memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys));
257 memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs));
261 nilfs_btree_node_set_nchildren(left, lnchildren);
262 nilfs_btree_node_set_nchildren(right, rnchildren);
265 /* Assume that the buffer heads corresponding to left and right are locked. */
266 static void nilfs_btree_node_move_right(struct nilfs_bmap *btree,
267 struct nilfs_btree_node *left,
268 struct nilfs_btree_node *right,
271 __le64 *ldkeys, *rdkeys;
272 __le64 *ldptrs, *rdptrs;
273 int lnchildren, rnchildren;
275 ldkeys = nilfs_btree_node_dkeys(left);
276 ldptrs = nilfs_btree_node_dptrs(left, btree);
277 lnchildren = nilfs_btree_node_get_nchildren(left);
279 rdkeys = nilfs_btree_node_dkeys(right);
280 rdptrs = nilfs_btree_node_dptrs(right, btree);
281 rnchildren = nilfs_btree_node_get_nchildren(right);
283 memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys));
284 memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs));
285 memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys));
286 memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs));
290 nilfs_btree_node_set_nchildren(left, lnchildren);
291 nilfs_btree_node_set_nchildren(right, rnchildren);
294 /* Assume that the buffer head corresponding to node is locked. */
295 static void nilfs_btree_node_insert(struct nilfs_bmap *btree,
296 struct nilfs_btree_node *node,
297 __u64 key, __u64 ptr, int index)
303 dkeys = nilfs_btree_node_dkeys(node);
304 dptrs = nilfs_btree_node_dptrs(node, btree);
305 nchildren = nilfs_btree_node_get_nchildren(node);
306 if (index < nchildren) {
307 memmove(dkeys + index + 1, dkeys + index,
308 (nchildren - index) * sizeof(*dkeys));
309 memmove(dptrs + index + 1, dptrs + index,
310 (nchildren - index) * sizeof(*dptrs));
312 dkeys[index] = cpu_to_le64(key);
313 dptrs[index] = cpu_to_le64(ptr);
315 nilfs_btree_node_set_nchildren(node, nchildren);
318 /* Assume that the buffer head corresponding to node is locked. */
319 static void nilfs_btree_node_delete(struct nilfs_bmap *btree,
320 struct nilfs_btree_node *node,
321 __u64 *keyp, __u64 *ptrp, int index)
329 dkeys = nilfs_btree_node_dkeys(node);
330 dptrs = nilfs_btree_node_dptrs(node, btree);
331 key = le64_to_cpu(dkeys[index]);
332 ptr = le64_to_cpu(dptrs[index]);
333 nchildren = nilfs_btree_node_get_nchildren(node);
339 if (index < nchildren - 1) {
340 memmove(dkeys + index, dkeys + index + 1,
341 (nchildren - index - 1) * sizeof(*dkeys));
342 memmove(dptrs + index, dptrs + index + 1,
343 (nchildren - index - 1) * sizeof(*dptrs));
346 nilfs_btree_node_set_nchildren(node, nchildren);
349 static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
350 __u64 key, int *indexp)
353 int index, low, high, s;
357 high = nilfs_btree_node_get_nchildren(node) - 1;
360 while (low <= high) {
361 index = (low + high) / 2;
362 nkey = nilfs_btree_node_get_key(node, index);
366 } else if (nkey < key) {
376 if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) {
377 if (s > 0 && index > 0)
389 * nilfs_btree_node_broken - verify consistency of btree node
390 * @node: btree node block to be examined
391 * @size: node size (in bytes)
392 * @blocknr: block number
394 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
396 static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
397 size_t size, sector_t blocknr)
399 int level, flags, nchildren;
402 level = nilfs_btree_node_get_level(node);
403 flags = nilfs_btree_node_get_flags(node);
404 nchildren = nilfs_btree_node_get_nchildren(node);
406 if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
407 level >= NILFS_BTREE_LEVEL_MAX ||
408 (flags & NILFS_BTREE_NODE_ROOT) ||
410 nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
411 printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): "
412 "level = %d, flags = 0x%x, nchildren = %d\n",
413 (unsigned long long)blocknr, level, flags, nchildren);
419 int nilfs_btree_broken_node_block(struct buffer_head *bh)
421 return nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
422 bh->b_size, bh->b_blocknr);
425 static inline struct nilfs_btree_node *
426 nilfs_btree_get_root(const struct nilfs_bmap *btree)
428 return (struct nilfs_btree_node *)btree->b_u.u_data;
431 static inline struct nilfs_btree_node *
432 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level)
434 return (struct nilfs_btree_node *)path[level].bp_bh->b_data;
437 static inline struct nilfs_btree_node *
438 nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level)
440 return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data;
443 static inline int nilfs_btree_height(const struct nilfs_bmap *btree)
445 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1;
448 static inline struct nilfs_btree_node *
449 nilfs_btree_get_node(const struct nilfs_bmap *btree,
450 const struct nilfs_btree_path *path,
453 return (level == nilfs_btree_height(btree) - 1) ?
454 nilfs_btree_get_root(btree) :
455 nilfs_btree_get_nonroot_node(path, level);
459 nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
461 if (unlikely(nilfs_btree_node_get_level(node) != level)) {
463 printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
464 nilfs_btree_node_get_level(node), level);
470 static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
471 struct nilfs_btree_path *path,
472 __u64 key, __u64 *ptrp, int minlevel)
474 struct nilfs_btree_node *node;
476 int level, index, found, ncmax, ret;
478 node = nilfs_btree_get_root(btree);
479 level = nilfs_btree_node_get_level(node);
480 if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0)
483 found = nilfs_btree_node_lookup(node, key, &index);
484 ptr = nilfs_btree_node_get_ptr(btree, node, index);
485 path[level].bp_bh = NULL;
486 path[level].bp_index = index;
488 ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
490 for (level--; level >= minlevel; level--) {
491 ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
494 node = nilfs_btree_get_nonroot_node(path, level);
495 if (nilfs_btree_bad_node(node, level))
498 found = nilfs_btree_node_lookup(node, key, &index);
502 ptr = nilfs_btree_node_get_ptr(btree, node, index);
504 WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
506 ptr = NILFS_BMAP_INVALID_PTR;
508 path[level].bp_index = index;
519 static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
520 struct nilfs_btree_path *path,
521 __u64 *keyp, __u64 *ptrp)
523 struct nilfs_btree_node *node;
525 int index, level, ret;
527 node = nilfs_btree_get_root(btree);
528 index = nilfs_btree_node_get_nchildren(node) - 1;
531 level = nilfs_btree_node_get_level(node);
532 ptr = nilfs_btree_node_get_ptr(btree, node, index);
533 path[level].bp_bh = NULL;
534 path[level].bp_index = index;
536 for (level--; level > 0; level--) {
537 ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
540 node = nilfs_btree_get_nonroot_node(path, level);
541 if (nilfs_btree_bad_node(node, level))
543 index = nilfs_btree_node_get_nchildren(node) - 1;
544 ptr = nilfs_btree_node_get_ptr(btree, node, index);
545 path[level].bp_index = index;
549 *keyp = nilfs_btree_node_get_key(node, index);
556 static int nilfs_btree_lookup(const struct nilfs_bmap *btree,
557 __u64 key, int level, __u64 *ptrp)
559 struct nilfs_btree_path *path;
562 path = nilfs_btree_alloc_path();
566 ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level);
568 nilfs_btree_free_path(path);
573 static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
574 __u64 key, __u64 *ptrp, unsigned maxblocks)
576 struct nilfs_btree_path *path;
577 struct nilfs_btree_node *node;
578 struct inode *dat = NULL;
581 int level = NILFS_BTREE_LEVEL_NODE_MIN;
582 int ret, cnt, index, maxlevel;
584 path = nilfs_btree_alloc_path();
588 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
592 if (NILFS_BMAP_USE_VBN(btree)) {
593 dat = nilfs_bmap_get_dat(btree);
594 ret = nilfs_dat_translate(dat, ptr, &blocknr);
600 if (cnt == maxblocks)
603 maxlevel = nilfs_btree_height(btree) - 1;
604 node = nilfs_btree_get_node(btree, path, level);
605 index = path[level].bp_index + 1;
607 while (index < nilfs_btree_node_get_nchildren(node)) {
608 if (nilfs_btree_node_get_key(node, index) !=
611 ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
613 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
618 if (ptr2 != ptr + cnt || ++cnt == maxblocks)
623 if (level == maxlevel)
626 /* look-up right sibling node */
627 node = nilfs_btree_get_node(btree, path, level + 1);
628 index = path[level + 1].bp_index + 1;
629 if (index >= nilfs_btree_node_get_nchildren(node) ||
630 nilfs_btree_node_get_key(node, index) != key + cnt)
632 ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
633 path[level + 1].bp_index = index;
635 brelse(path[level].bp_bh);
636 path[level].bp_bh = NULL;
637 ret = nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh);
640 node = nilfs_btree_get_nonroot_node(path, level);
642 path[level].bp_index = index;
648 nilfs_btree_free_path(path);
652 static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
653 struct nilfs_btree_path *path,
654 int level, __u64 key)
656 if (level < nilfs_btree_height(btree) - 1) {
658 nilfs_btree_node_set_key(
659 nilfs_btree_get_nonroot_node(path, level),
660 path[level].bp_index, key);
661 if (!buffer_dirty(path[level].bp_bh))
662 nilfs_btnode_mark_dirty(path[level].bp_bh);
663 } while ((path[level].bp_index == 0) &&
664 (++level < nilfs_btree_height(btree) - 1));
668 if (level == nilfs_btree_height(btree) - 1) {
669 nilfs_btree_node_set_key(nilfs_btree_get_root(btree),
670 path[level].bp_index, key);
674 static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
675 struct nilfs_btree_path *path,
676 int level, __u64 *keyp, __u64 *ptrp)
678 struct nilfs_btree_node *node;
680 if (level < nilfs_btree_height(btree) - 1) {
681 node = nilfs_btree_get_nonroot_node(path, level);
682 nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
683 path[level].bp_index);
684 if (!buffer_dirty(path[level].bp_bh))
685 nilfs_btnode_mark_dirty(path[level].bp_bh);
687 if (path[level].bp_index == 0)
688 nilfs_btree_promote_key(btree, path, level + 1,
689 nilfs_btree_node_get_key(node,
692 node = nilfs_btree_get_root(btree);
693 nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
694 path[level].bp_index);
698 static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
699 struct nilfs_btree_path *path,
700 int level, __u64 *keyp, __u64 *ptrp)
702 struct nilfs_btree_node *node, *left;
703 int nchildren, lnchildren, n, move;
705 node = nilfs_btree_get_nonroot_node(path, level);
706 left = nilfs_btree_get_sib_node(path, level);
707 nchildren = nilfs_btree_node_get_nchildren(node);
708 lnchildren = nilfs_btree_node_get_nchildren(left);
711 n = (nchildren + lnchildren + 1) / 2 - lnchildren;
712 if (n > path[level].bp_index) {
713 /* move insert point */
718 nilfs_btree_node_move_left(btree, left, node, n);
720 if (!buffer_dirty(path[level].bp_bh))
721 nilfs_btnode_mark_dirty(path[level].bp_bh);
722 if (!buffer_dirty(path[level].bp_sib_bh))
723 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
725 nilfs_btree_promote_key(btree, path, level + 1,
726 nilfs_btree_node_get_key(node, 0));
729 brelse(path[level].bp_bh);
730 path[level].bp_bh = path[level].bp_sib_bh;
731 path[level].bp_sib_bh = NULL;
732 path[level].bp_index += lnchildren;
733 path[level + 1].bp_index--;
735 brelse(path[level].bp_sib_bh);
736 path[level].bp_sib_bh = NULL;
737 path[level].bp_index -= n;
740 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
743 static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
744 struct nilfs_btree_path *path,
745 int level, __u64 *keyp, __u64 *ptrp)
747 struct nilfs_btree_node *node, *right;
748 int nchildren, rnchildren, n, move;
750 node = nilfs_btree_get_nonroot_node(path, level);
751 right = nilfs_btree_get_sib_node(path, level);
752 nchildren = nilfs_btree_node_get_nchildren(node);
753 rnchildren = nilfs_btree_node_get_nchildren(right);
756 n = (nchildren + rnchildren + 1) / 2 - rnchildren;
757 if (n > nchildren - path[level].bp_index) {
758 /* move insert point */
763 nilfs_btree_node_move_right(btree, node, right, n);
765 if (!buffer_dirty(path[level].bp_bh))
766 nilfs_btnode_mark_dirty(path[level].bp_bh);
767 if (!buffer_dirty(path[level].bp_sib_bh))
768 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
770 path[level + 1].bp_index++;
771 nilfs_btree_promote_key(btree, path, level + 1,
772 nilfs_btree_node_get_key(right, 0));
773 path[level + 1].bp_index--;
776 brelse(path[level].bp_bh);
777 path[level].bp_bh = path[level].bp_sib_bh;
778 path[level].bp_sib_bh = NULL;
779 path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
780 path[level + 1].bp_index++;
782 brelse(path[level].bp_sib_bh);
783 path[level].bp_sib_bh = NULL;
786 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
789 static void nilfs_btree_split(struct nilfs_bmap *btree,
790 struct nilfs_btree_path *path,
791 int level, __u64 *keyp, __u64 *ptrp)
793 struct nilfs_btree_node *node, *right;
796 int nchildren, n, move;
798 node = nilfs_btree_get_nonroot_node(path, level);
799 right = nilfs_btree_get_sib_node(path, level);
800 nchildren = nilfs_btree_node_get_nchildren(node);
803 n = (nchildren + 1) / 2;
804 if (n > nchildren - path[level].bp_index) {
809 nilfs_btree_node_move_right(btree, node, right, n);
811 if (!buffer_dirty(path[level].bp_bh))
812 nilfs_btnode_mark_dirty(path[level].bp_bh);
813 if (!buffer_dirty(path[level].bp_sib_bh))
814 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
816 newkey = nilfs_btree_node_get_key(right, 0);
817 newptr = path[level].bp_newreq.bpr_ptr;
820 path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
821 nilfs_btree_node_insert(btree, right, *keyp, *ptrp,
822 path[level].bp_index);
824 *keyp = nilfs_btree_node_get_key(right, 0);
825 *ptrp = path[level].bp_newreq.bpr_ptr;
827 brelse(path[level].bp_bh);
828 path[level].bp_bh = path[level].bp_sib_bh;
829 path[level].bp_sib_bh = NULL;
831 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
833 *keyp = nilfs_btree_node_get_key(right, 0);
834 *ptrp = path[level].bp_newreq.bpr_ptr;
836 brelse(path[level].bp_sib_bh);
837 path[level].bp_sib_bh = NULL;
840 path[level + 1].bp_index++;
843 static void nilfs_btree_grow(struct nilfs_bmap *btree,
844 struct nilfs_btree_path *path,
845 int level, __u64 *keyp, __u64 *ptrp)
847 struct nilfs_btree_node *root, *child;
850 root = nilfs_btree_get_root(btree);
851 child = nilfs_btree_get_sib_node(path, level);
853 n = nilfs_btree_node_get_nchildren(root);
855 nilfs_btree_node_move_right(btree, root, child, n);
856 nilfs_btree_node_set_level(root, level + 1);
858 if (!buffer_dirty(path[level].bp_sib_bh))
859 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
861 path[level].bp_bh = path[level].bp_sib_bh;
862 path[level].bp_sib_bh = NULL;
864 nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
866 *keyp = nilfs_btree_node_get_key(child, 0);
867 *ptrp = path[level].bp_newreq.bpr_ptr;
870 static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree,
871 const struct nilfs_btree_path *path)
873 struct nilfs_btree_node *node;
877 return NILFS_BMAP_INVALID_PTR;
880 level = NILFS_BTREE_LEVEL_NODE_MIN;
881 if (path[level].bp_index > 0) {
882 node = nilfs_btree_get_node(btree, path, level);
883 return nilfs_btree_node_get_ptr(btree, node,
884 path[level].bp_index - 1);
888 level = NILFS_BTREE_LEVEL_NODE_MIN + 1;
889 if (level <= nilfs_btree_height(btree) - 1) {
890 node = nilfs_btree_get_node(btree, path, level);
891 return nilfs_btree_node_get_ptr(btree, node,
892 path[level].bp_index);
895 return NILFS_BMAP_INVALID_PTR;
898 static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree,
899 const struct nilfs_btree_path *path,
904 ptr = nilfs_bmap_find_target_seq(btree, key);
905 if (ptr != NILFS_BMAP_INVALID_PTR)
906 /* sequential access */
909 ptr = nilfs_btree_find_near(btree, path);
910 if (ptr != NILFS_BMAP_INVALID_PTR)
915 return nilfs_bmap_find_target_in_group(btree);
918 static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
919 struct nilfs_btree_path *path,
920 int *levelp, __u64 key, __u64 ptr,
921 struct nilfs_bmap_stats *stats)
923 struct buffer_head *bh;
924 struct nilfs_btree_node *node, *parent, *sib;
926 int pindex, level, ncmax, ret;
927 struct inode *dat = NULL;
929 stats->bs_nblocks = 0;
930 level = NILFS_BTREE_LEVEL_DATA;
932 /* allocate a new ptr for data block */
933 if (NILFS_BMAP_USE_VBN(btree)) {
934 path[level].bp_newreq.bpr_ptr =
935 nilfs_btree_find_target_v(btree, path, key);
936 dat = nilfs_bmap_get_dat(btree);
939 ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
943 ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
945 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
946 level < nilfs_btree_height(btree) - 1;
948 node = nilfs_btree_get_nonroot_node(path, level);
949 if (nilfs_btree_node_get_nchildren(node) < ncmax) {
950 path[level].bp_op = nilfs_btree_do_insert;
955 parent = nilfs_btree_get_node(btree, path, level + 1);
956 pindex = path[level + 1].bp_index;
960 sibptr = nilfs_btree_node_get_ptr(btree, parent,
962 ret = nilfs_btree_get_block(btree, sibptr, &bh);
964 goto err_out_child_node;
965 sib = (struct nilfs_btree_node *)bh->b_data;
966 if (nilfs_btree_node_get_nchildren(sib) < ncmax) {
967 path[level].bp_sib_bh = bh;
968 path[level].bp_op = nilfs_btree_carry_left;
977 nilfs_btree_node_get_nchildren(parent) - 1) {
978 sibptr = nilfs_btree_node_get_ptr(btree, parent,
980 ret = nilfs_btree_get_block(btree, sibptr, &bh);
982 goto err_out_child_node;
983 sib = (struct nilfs_btree_node *)bh->b_data;
984 if (nilfs_btree_node_get_nchildren(sib) < ncmax) {
985 path[level].bp_sib_bh = bh;
986 path[level].bp_op = nilfs_btree_carry_right;
994 path[level].bp_newreq.bpr_ptr =
995 path[level - 1].bp_newreq.bpr_ptr + 1;
996 ret = nilfs_bmap_prepare_alloc_ptr(btree,
997 &path[level].bp_newreq, dat);
999 goto err_out_child_node;
1000 ret = nilfs_btree_get_new_block(btree,
1001 path[level].bp_newreq.bpr_ptr,
1004 goto err_out_curr_node;
1006 stats->bs_nblocks++;
1008 nilfs_btree_node_init(btree,
1009 (struct nilfs_btree_node *)bh->b_data,
1010 0, level, 0, NULL, NULL);
1011 path[level].bp_sib_bh = bh;
1012 path[level].bp_op = nilfs_btree_split;
1016 node = nilfs_btree_get_root(btree);
1017 if (nilfs_btree_node_get_nchildren(node) <
1018 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1019 path[level].bp_op = nilfs_btree_do_insert;
1020 stats->bs_nblocks++;
1025 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
1026 ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
1028 goto err_out_child_node;
1029 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
1032 goto err_out_curr_node;
1034 nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
1035 0, level, 0, NULL, NULL);
1036 path[level].bp_sib_bh = bh;
1037 path[level].bp_op = nilfs_btree_grow;
1040 path[level].bp_op = nilfs_btree_do_insert;
1042 /* a newly-created node block and a data block are added */
1043 stats->bs_nblocks += 2;
1052 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
1054 for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
1055 nilfs_btnode_delete(path[level].bp_sib_bh);
1056 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
1060 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
1063 stats->bs_nblocks = 0;
1067 static void nilfs_btree_commit_insert(struct nilfs_bmap *btree,
1068 struct nilfs_btree_path *path,
1069 int maxlevel, __u64 key, __u64 ptr)
1071 struct inode *dat = NULL;
1074 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1075 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
1076 if (NILFS_BMAP_USE_VBN(btree)) {
1077 nilfs_bmap_set_target_v(btree, key, ptr);
1078 dat = nilfs_bmap_get_dat(btree);
1081 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1082 nilfs_bmap_commit_alloc_ptr(btree,
1083 &path[level - 1].bp_newreq, dat);
1084 path[level].bp_op(btree, path, level, &key, &ptr);
1087 if (!nilfs_bmap_dirty(btree))
1088 nilfs_bmap_set_dirty(btree);
1091 static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr)
1093 struct nilfs_btree_path *path;
1094 struct nilfs_bmap_stats stats;
1097 path = nilfs_btree_alloc_path();
1101 ret = nilfs_btree_do_lookup(btree, path, key, NULL,
1102 NILFS_BTREE_LEVEL_NODE_MIN);
1103 if (ret != -ENOENT) {
1109 ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats);
1112 nilfs_btree_commit_insert(btree, path, level, key, ptr);
1113 nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
1116 nilfs_btree_free_path(path);
1120 static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
1121 struct nilfs_btree_path *path,
1122 int level, __u64 *keyp, __u64 *ptrp)
1124 struct nilfs_btree_node *node;
1126 if (level < nilfs_btree_height(btree) - 1) {
1127 node = nilfs_btree_get_nonroot_node(path, level);
1128 nilfs_btree_node_delete(btree, node, keyp, ptrp,
1129 path[level].bp_index);
1130 if (!buffer_dirty(path[level].bp_bh))
1131 nilfs_btnode_mark_dirty(path[level].bp_bh);
1132 if (path[level].bp_index == 0)
1133 nilfs_btree_promote_key(btree, path, level + 1,
1134 nilfs_btree_node_get_key(node, 0));
1136 node = nilfs_btree_get_root(btree);
1137 nilfs_btree_node_delete(btree, node, keyp, ptrp,
1138 path[level].bp_index);
1142 static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
1143 struct nilfs_btree_path *path,
1144 int level, __u64 *keyp, __u64 *ptrp)
1146 struct nilfs_btree_node *node, *left;
1147 int nchildren, lnchildren, n;
1149 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1151 node = nilfs_btree_get_nonroot_node(path, level);
1152 left = nilfs_btree_get_sib_node(path, level);
1153 nchildren = nilfs_btree_node_get_nchildren(node);
1154 lnchildren = nilfs_btree_node_get_nchildren(left);
1156 n = (nchildren + lnchildren) / 2 - nchildren;
1158 nilfs_btree_node_move_right(btree, left, node, n);
1160 if (!buffer_dirty(path[level].bp_bh))
1161 nilfs_btnode_mark_dirty(path[level].bp_bh);
1162 if (!buffer_dirty(path[level].bp_sib_bh))
1163 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
1165 nilfs_btree_promote_key(btree, path, level + 1,
1166 nilfs_btree_node_get_key(node, 0));
1168 brelse(path[level].bp_sib_bh);
1169 path[level].bp_sib_bh = NULL;
1170 path[level].bp_index += n;
1173 static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
1174 struct nilfs_btree_path *path,
1175 int level, __u64 *keyp, __u64 *ptrp)
1177 struct nilfs_btree_node *node, *right;
1178 int nchildren, rnchildren, n;
1180 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1182 node = nilfs_btree_get_nonroot_node(path, level);
1183 right = nilfs_btree_get_sib_node(path, level);
1184 nchildren = nilfs_btree_node_get_nchildren(node);
1185 rnchildren = nilfs_btree_node_get_nchildren(right);
1187 n = (nchildren + rnchildren) / 2 - nchildren;
1189 nilfs_btree_node_move_left(btree, node, right, n);
1191 if (!buffer_dirty(path[level].bp_bh))
1192 nilfs_btnode_mark_dirty(path[level].bp_bh);
1193 if (!buffer_dirty(path[level].bp_sib_bh))
1194 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
1196 path[level + 1].bp_index++;
1197 nilfs_btree_promote_key(btree, path, level + 1,
1198 nilfs_btree_node_get_key(right, 0));
1199 path[level + 1].bp_index--;
1201 brelse(path[level].bp_sib_bh);
1202 path[level].bp_sib_bh = NULL;
1205 static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
1206 struct nilfs_btree_path *path,
1207 int level, __u64 *keyp, __u64 *ptrp)
1209 struct nilfs_btree_node *node, *left;
1212 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1214 node = nilfs_btree_get_nonroot_node(path, level);
1215 left = nilfs_btree_get_sib_node(path, level);
1217 n = nilfs_btree_node_get_nchildren(node);
1219 nilfs_btree_node_move_left(btree, left, node, n);
1221 if (!buffer_dirty(path[level].bp_sib_bh))
1222 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
1224 nilfs_btnode_delete(path[level].bp_bh);
1225 path[level].bp_bh = path[level].bp_sib_bh;
1226 path[level].bp_sib_bh = NULL;
1227 path[level].bp_index += nilfs_btree_node_get_nchildren(left);
1230 static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
1231 struct nilfs_btree_path *path,
1232 int level, __u64 *keyp, __u64 *ptrp)
1234 struct nilfs_btree_node *node, *right;
1237 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1239 node = nilfs_btree_get_nonroot_node(path, level);
1240 right = nilfs_btree_get_sib_node(path, level);
1242 n = nilfs_btree_node_get_nchildren(right);
1244 nilfs_btree_node_move_left(btree, node, right, n);
1246 if (!buffer_dirty(path[level].bp_bh))
1247 nilfs_btnode_mark_dirty(path[level].bp_bh);
1249 nilfs_btnode_delete(path[level].bp_sib_bh);
1250 path[level].bp_sib_bh = NULL;
1251 path[level + 1].bp_index++;
1254 static void nilfs_btree_shrink(struct nilfs_bmap *btree,
1255 struct nilfs_btree_path *path,
1256 int level, __u64 *keyp, __u64 *ptrp)
1258 struct nilfs_btree_node *root, *child;
1261 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1263 root = nilfs_btree_get_root(btree);
1264 child = nilfs_btree_get_nonroot_node(path, level);
1266 nilfs_btree_node_delete(btree, root, NULL, NULL, 0);
1267 nilfs_btree_node_set_level(root, level);
1268 n = nilfs_btree_node_get_nchildren(child);
1269 nilfs_btree_node_move_left(btree, root, child, n);
1271 nilfs_btnode_delete(path[level].bp_bh);
1272 path[level].bp_bh = NULL;
1276 static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1277 struct nilfs_btree_path *path,
1279 struct nilfs_bmap_stats *stats,
1282 struct buffer_head *bh;
1283 struct nilfs_btree_node *node, *parent, *sib;
1285 int pindex, level, ncmin, ret;
1288 stats->bs_nblocks = 0;
1289 ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
1291 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
1292 level < nilfs_btree_height(btree) - 1;
1294 node = nilfs_btree_get_nonroot_node(path, level);
1295 path[level].bp_oldreq.bpr_ptr =
1296 nilfs_btree_node_get_ptr(btree, node,
1297 path[level].bp_index);
1298 ret = nilfs_bmap_prepare_end_ptr(btree,
1299 &path[level].bp_oldreq, dat);
1301 goto err_out_child_node;
1303 if (nilfs_btree_node_get_nchildren(node) > ncmin) {
1304 path[level].bp_op = nilfs_btree_do_delete;
1305 stats->bs_nblocks++;
1309 parent = nilfs_btree_get_node(btree, path, level + 1);
1310 pindex = path[level + 1].bp_index;
1314 sibptr = nilfs_btree_node_get_ptr(btree, parent,
1316 ret = nilfs_btree_get_block(btree, sibptr, &bh);
1318 goto err_out_curr_node;
1319 sib = (struct nilfs_btree_node *)bh->b_data;
1320 if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
1321 path[level].bp_sib_bh = bh;
1322 path[level].bp_op = nilfs_btree_borrow_left;
1323 stats->bs_nblocks++;
1326 path[level].bp_sib_bh = bh;
1327 path[level].bp_op = nilfs_btree_concat_left;
1328 stats->bs_nblocks++;
1332 nilfs_btree_node_get_nchildren(parent) - 1) {
1334 sibptr = nilfs_btree_node_get_ptr(btree, parent,
1336 ret = nilfs_btree_get_block(btree, sibptr, &bh);
1338 goto err_out_curr_node;
1339 sib = (struct nilfs_btree_node *)bh->b_data;
1340 if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
1341 path[level].bp_sib_bh = bh;
1342 path[level].bp_op = nilfs_btree_borrow_right;
1343 stats->bs_nblocks++;
1346 path[level].bp_sib_bh = bh;
1347 path[level].bp_op = nilfs_btree_concat_right;
1348 stats->bs_nblocks++;
1353 /* the only child of the root node */
1354 WARN_ON(level != nilfs_btree_height(btree) - 2);
1355 if (nilfs_btree_node_get_nchildren(node) - 1 <=
1356 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1357 path[level].bp_op = nilfs_btree_shrink;
1358 stats->bs_nblocks += 2;
1360 path[level].bp_op = nilfs_btree_do_delete;
1361 stats->bs_nblocks++;
1369 node = nilfs_btree_get_root(btree);
1370 path[level].bp_oldreq.bpr_ptr =
1371 nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
1373 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
1375 goto err_out_child_node;
1377 /* child of the root node is deleted */
1378 path[level].bp_op = nilfs_btree_do_delete;
1379 stats->bs_nblocks++;
1388 nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
1390 for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
1391 brelse(path[level].bp_sib_bh);
1392 nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
1395 stats->bs_nblocks = 0;
1399 static void nilfs_btree_commit_delete(struct nilfs_bmap *btree,
1400 struct nilfs_btree_path *path,
1401 int maxlevel, struct inode *dat)
1405 for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
1406 nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat);
1407 path[level].bp_op(btree, path, level, NULL, NULL);
1410 if (!nilfs_bmap_dirty(btree))
1411 nilfs_bmap_set_dirty(btree);
1414 static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key)
1417 struct nilfs_btree_path *path;
1418 struct nilfs_bmap_stats stats;
1422 path = nilfs_btree_alloc_path();
1426 ret = nilfs_btree_do_lookup(btree, path, key, NULL,
1427 NILFS_BTREE_LEVEL_NODE_MIN);
1432 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
1434 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
1437 nilfs_btree_commit_delete(btree, path, level, dat);
1438 nilfs_bmap_sub_blocks(btree, stats.bs_nblocks);
1441 nilfs_btree_free_path(path);
1445 static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp)
1447 struct nilfs_btree_path *path;
1450 path = nilfs_btree_alloc_path();
1454 ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
1456 nilfs_btree_free_path(path);
1461 static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
1463 struct buffer_head *bh;
1464 struct nilfs_btree_node *root, *node;
1465 __u64 maxkey, nextmaxkey;
1469 root = nilfs_btree_get_root(btree);
1470 switch (nilfs_btree_height(btree)) {
1476 nchildren = nilfs_btree_node_get_nchildren(root);
1479 ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
1480 ret = nilfs_btree_get_block(btree, ptr, &bh);
1483 node = (struct nilfs_btree_node *)bh->b_data;
1489 nchildren = nilfs_btree_node_get_nchildren(node);
1490 maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
1491 nextmaxkey = (nchildren > 1) ?
1492 nilfs_btree_node_get_key(node, nchildren - 2) : 0;
1496 return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
1499 static int nilfs_btree_gather_data(struct nilfs_bmap *btree,
1500 __u64 *keys, __u64 *ptrs, int nitems)
1502 struct buffer_head *bh;
1503 struct nilfs_btree_node *node, *root;
1507 int nchildren, i, ret;
1509 root = nilfs_btree_get_root(btree);
1510 switch (nilfs_btree_height(btree)) {
1516 nchildren = nilfs_btree_node_get_nchildren(root);
1517 WARN_ON(nchildren > 1);
1518 ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
1519 ret = nilfs_btree_get_block(btree, ptr, &bh);
1522 node = (struct nilfs_btree_node *)bh->b_data;
1529 nchildren = nilfs_btree_node_get_nchildren(node);
1530 if (nchildren < nitems)
1532 dkeys = nilfs_btree_node_dkeys(node);
1533 dptrs = nilfs_btree_node_dptrs(node, btree);
1534 for (i = 0; i < nitems; i++) {
1535 keys[i] = le64_to_cpu(dkeys[i]);
1536 ptrs[i] = le64_to_cpu(dptrs[i]);
1546 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
1547 union nilfs_bmap_ptr_req *dreq,
1548 union nilfs_bmap_ptr_req *nreq,
1549 struct buffer_head **bhp,
1550 struct nilfs_bmap_stats *stats)
1552 struct buffer_head *bh;
1553 struct inode *dat = NULL;
1556 stats->bs_nblocks = 0;
1559 /* cannot find near ptr */
1560 if (NILFS_BMAP_USE_VBN(btree)) {
1561 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
1562 dat = nilfs_bmap_get_dat(btree);
1565 ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
1570 stats->bs_nblocks++;
1572 nreq->bpr_ptr = dreq->bpr_ptr + 1;
1573 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat);
1577 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh);
1582 stats->bs_nblocks++;
1590 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat);
1592 nilfs_bmap_abort_alloc_ptr(btree, dreq, dat);
1593 stats->bs_nblocks = 0;
1599 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
1600 __u64 key, __u64 ptr,
1601 const __u64 *keys, const __u64 *ptrs,
1603 union nilfs_bmap_ptr_req *dreq,
1604 union nilfs_bmap_ptr_req *nreq,
1605 struct buffer_head *bh)
1607 struct nilfs_btree_node *node;
1611 /* free resources */
1612 if (btree->b_ops->bop_clear != NULL)
1613 btree->b_ops->bop_clear(btree);
1615 /* ptr must be a pointer to a buffer head. */
1616 set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
1618 /* convert and insert */
1619 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
1620 nilfs_btree_init(btree);
1622 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
1623 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
1625 /* create child node at level 1 */
1626 node = (struct nilfs_btree_node *)bh->b_data;
1627 nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
1628 nilfs_btree_node_insert(btree, node, key, dreq->bpr_ptr, n);
1629 if (!buffer_dirty(bh))
1630 nilfs_btnode_mark_dirty(bh);
1631 if (!nilfs_bmap_dirty(btree))
1632 nilfs_bmap_set_dirty(btree);
1636 /* create root node at level 2 */
1637 node = nilfs_btree_get_root(btree);
1638 tmpptr = nreq->bpr_ptr;
1639 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1640 2, 1, &keys[0], &tmpptr);
1642 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
1644 /* create root node at level 1 */
1645 node = nilfs_btree_get_root(btree);
1646 nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
1648 nilfs_btree_node_insert(btree, node, key, dreq->bpr_ptr, n);
1649 if (!nilfs_bmap_dirty(btree))
1650 nilfs_bmap_set_dirty(btree);
1653 if (NILFS_BMAP_USE_VBN(btree))
1654 nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr);
1658 * nilfs_btree_convert_and_insert -
1666 int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
1667 __u64 key, __u64 ptr,
1668 const __u64 *keys, const __u64 *ptrs, int n)
1670 struct buffer_head *bh;
1671 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni;
1672 struct nilfs_bmap_stats stats;
1675 if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1678 } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1679 1 << btree->b_inode->i_blkbits)) {
1688 ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh,
1692 nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n,
1694 nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
1698 static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
1699 struct nilfs_btree_path *path,
1701 struct buffer_head *bh)
1703 while ((++level < nilfs_btree_height(btree) - 1) &&
1704 !buffer_dirty(path[level].bp_bh))
1705 nilfs_btnode_mark_dirty(path[level].bp_bh);
1710 static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
1711 struct nilfs_btree_path *path,
1712 int level, struct inode *dat)
1714 struct nilfs_btree_node *parent;
1717 parent = nilfs_btree_get_node(btree, path, level + 1);
1718 path[level].bp_oldreq.bpr_ptr =
1719 nilfs_btree_node_get_ptr(btree, parent,
1720 path[level + 1].bp_index);
1721 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
1722 ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
1723 &path[level].bp_newreq.bpr_req);
1727 if (buffer_nilfs_node(path[level].bp_bh)) {
1728 path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr;
1729 path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
1730 path[level].bp_ctxt.bh = path[level].bp_bh;
1731 ret = nilfs_btnode_prepare_change_key(
1732 &NILFS_BMAP_I(btree)->i_btnode_cache,
1733 &path[level].bp_ctxt);
1735 nilfs_dat_abort_update(dat,
1736 &path[level].bp_oldreq.bpr_req,
1737 &path[level].bp_newreq.bpr_req);
1745 static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
1746 struct nilfs_btree_path *path,
1747 int level, struct inode *dat)
1749 struct nilfs_btree_node *parent;
1751 nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
1752 &path[level].bp_newreq.bpr_req,
1753 btree->b_ptr_type == NILFS_BMAP_PTR_VS);
1755 if (buffer_nilfs_node(path[level].bp_bh)) {
1756 nilfs_btnode_commit_change_key(
1757 &NILFS_BMAP_I(btree)->i_btnode_cache,
1758 &path[level].bp_ctxt);
1759 path[level].bp_bh = path[level].bp_ctxt.bh;
1761 set_buffer_nilfs_volatile(path[level].bp_bh);
1763 parent = nilfs_btree_get_node(btree, path, level + 1);
1764 nilfs_btree_node_set_ptr(btree, parent, path[level + 1].bp_index,
1765 path[level].bp_newreq.bpr_ptr);
1768 static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
1769 struct nilfs_btree_path *path,
1770 int level, struct inode *dat)
1772 nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
1773 &path[level].bp_newreq.bpr_req);
1774 if (buffer_nilfs_node(path[level].bp_bh))
1775 nilfs_btnode_abort_change_key(
1776 &NILFS_BMAP_I(btree)->i_btnode_cache,
1777 &path[level].bp_ctxt);
1780 static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree,
1781 struct nilfs_btree_path *path,
1782 int minlevel, int *maxlevelp,
1788 if (!buffer_nilfs_volatile(path[level].bp_bh)) {
1789 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1793 while ((++level < nilfs_btree_height(btree) - 1) &&
1794 !buffer_dirty(path[level].bp_bh)) {
1796 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
1797 ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
1803 *maxlevelp = level - 1;
1808 while (--level > minlevel)
1809 nilfs_btree_abort_update_v(btree, path, level, dat);
1810 if (!buffer_nilfs_volatile(path[level].bp_bh))
1811 nilfs_btree_abort_update_v(btree, path, level, dat);
1815 static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree,
1816 struct nilfs_btree_path *path,
1817 int minlevel, int maxlevel,
1818 struct buffer_head *bh,
1823 if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
1824 nilfs_btree_commit_update_v(btree, path, minlevel, dat);
1826 for (level = minlevel + 1; level <= maxlevel; level++)
1827 nilfs_btree_commit_update_v(btree, path, level, dat);
1830 static int nilfs_btree_propagate_v(struct nilfs_bmap *btree,
1831 struct nilfs_btree_path *path,
1832 int level, struct buffer_head *bh)
1834 int maxlevel = 0, ret;
1835 struct nilfs_btree_node *parent;
1836 struct inode *dat = nilfs_bmap_get_dat(btree);
1840 path[level].bp_bh = bh;
1841 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
1846 if (buffer_nilfs_volatile(path[level].bp_bh)) {
1847 parent = nilfs_btree_get_node(btree, path, level + 1);
1848 ptr = nilfs_btree_node_get_ptr(btree, parent,
1849 path[level + 1].bp_index);
1850 ret = nilfs_dat_mark_dirty(dat, ptr);
1855 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
1858 brelse(path[level].bp_bh);
1859 path[level].bp_bh = NULL;
1863 static int nilfs_btree_propagate(struct nilfs_bmap *btree,
1864 struct buffer_head *bh)
1866 struct nilfs_btree_path *path;
1867 struct nilfs_btree_node *node;
1871 WARN_ON(!buffer_dirty(bh));
1873 path = nilfs_btree_alloc_path();
1877 if (buffer_nilfs_node(bh)) {
1878 node = (struct nilfs_btree_node *)bh->b_data;
1879 key = nilfs_btree_node_get_key(node, 0);
1880 level = nilfs_btree_node_get_level(node);
1882 key = nilfs_bmap_data_get_key(btree, bh);
1883 level = NILFS_BTREE_LEVEL_DATA;
1886 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
1888 if (unlikely(ret == -ENOENT))
1889 printk(KERN_CRIT "%s: key = %llu, level == %d\n",
1890 __func__, (unsigned long long)key, level);
1894 ret = NILFS_BMAP_USE_VBN(btree) ?
1895 nilfs_btree_propagate_v(btree, path, level, bh) :
1896 nilfs_btree_propagate_p(btree, path, level, bh);
1899 nilfs_btree_free_path(path);
1904 static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree,
1905 struct buffer_head *bh)
1907 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr);
1910 static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
1911 struct list_head *lists,
1912 struct buffer_head *bh)
1914 struct list_head *head;
1915 struct buffer_head *cbh;
1916 struct nilfs_btree_node *node, *cnode;
1921 node = (struct nilfs_btree_node *)bh->b_data;
1922 key = nilfs_btree_node_get_key(node, 0);
1923 level = nilfs_btree_node_get_level(node);
1924 if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
1925 level >= NILFS_BTREE_LEVEL_MAX) {
1928 "%s: invalid btree level: %d (key=%llu, ino=%lu, "
1930 __func__, level, (unsigned long long)key,
1931 NILFS_BMAP_I(btree)->vfs_inode.i_ino,
1932 (unsigned long long)bh->b_blocknr);
1936 list_for_each(head, &lists[level]) {
1937 cbh = list_entry(head, struct buffer_head, b_assoc_buffers);
1938 cnode = (struct nilfs_btree_node *)cbh->b_data;
1939 ckey = nilfs_btree_node_get_key(cnode, 0);
1943 list_add_tail(&bh->b_assoc_buffers, head);
1946 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
1947 struct list_head *listp)
1949 struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
1950 struct list_head lists[NILFS_BTREE_LEVEL_MAX];
1951 struct pagevec pvec;
1952 struct buffer_head *bh, *head;
1956 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
1957 level < NILFS_BTREE_LEVEL_MAX;
1959 INIT_LIST_HEAD(&lists[level]);
1961 pagevec_init(&pvec, 0);
1963 while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
1965 for (i = 0; i < pagevec_count(&pvec); i++) {
1966 bh = head = page_buffers(pvec.pages[i]);
1968 if (buffer_dirty(bh))
1969 nilfs_btree_add_dirty_buffer(btree,
1971 } while ((bh = bh->b_this_page) != head);
1973 pagevec_release(&pvec);
1977 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
1978 level < NILFS_BTREE_LEVEL_MAX;
1980 list_splice_tail(&lists[level], listp);
1983 static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
1984 struct nilfs_btree_path *path,
1986 struct buffer_head **bh,
1988 union nilfs_binfo *binfo)
1990 struct nilfs_btree_node *parent;
1995 parent = nilfs_btree_get_node(btree, path, level + 1);
1996 ptr = nilfs_btree_node_get_ptr(btree, parent,
1997 path[level + 1].bp_index);
1998 if (buffer_nilfs_node(*bh)) {
1999 path[level].bp_ctxt.oldkey = ptr;
2000 path[level].bp_ctxt.newkey = blocknr;
2001 path[level].bp_ctxt.bh = *bh;
2002 ret = nilfs_btnode_prepare_change_key(
2003 &NILFS_BMAP_I(btree)->i_btnode_cache,
2004 &path[level].bp_ctxt);
2007 nilfs_btnode_commit_change_key(
2008 &NILFS_BMAP_I(btree)->i_btnode_cache,
2009 &path[level].bp_ctxt);
2010 *bh = path[level].bp_ctxt.bh;
2013 nilfs_btree_node_set_ptr(btree, parent,
2014 path[level + 1].bp_index, blocknr);
2016 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2017 /* on-disk format */
2018 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
2019 binfo->bi_dat.bi_level = level;
2024 static int nilfs_btree_assign_v(struct nilfs_bmap *btree,
2025 struct nilfs_btree_path *path,
2027 struct buffer_head **bh,
2029 union nilfs_binfo *binfo)
2031 struct nilfs_btree_node *parent;
2032 struct inode *dat = nilfs_bmap_get_dat(btree);
2035 union nilfs_bmap_ptr_req req;
2038 parent = nilfs_btree_get_node(btree, path, level + 1);
2039 ptr = nilfs_btree_node_get_ptr(btree, parent, path[level + 1].bp_index);
2041 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
2044 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
2046 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
2047 /* on-disk format */
2048 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
2049 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
2054 static int nilfs_btree_assign(struct nilfs_bmap *btree,
2055 struct buffer_head **bh,
2057 union nilfs_binfo *binfo)
2059 struct nilfs_btree_path *path;
2060 struct nilfs_btree_node *node;
2064 path = nilfs_btree_alloc_path();
2068 if (buffer_nilfs_node(*bh)) {
2069 node = (struct nilfs_btree_node *)(*bh)->b_data;
2070 key = nilfs_btree_node_get_key(node, 0);
2071 level = nilfs_btree_node_get_level(node);
2073 key = nilfs_bmap_data_get_key(btree, *bh);
2074 level = NILFS_BTREE_LEVEL_DATA;
2077 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
2079 WARN_ON(ret == -ENOENT);
2083 ret = NILFS_BMAP_USE_VBN(btree) ?
2084 nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) :
2085 nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
2088 nilfs_btree_free_path(path);
2093 static int nilfs_btree_assign_gc(struct nilfs_bmap *btree,
2094 struct buffer_head **bh,
2096 union nilfs_binfo *binfo)
2098 struct nilfs_btree_node *node;
2102 ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr,
2107 if (buffer_nilfs_node(*bh)) {
2108 node = (struct nilfs_btree_node *)(*bh)->b_data;
2109 key = nilfs_btree_node_get_key(node, 0);
2111 key = nilfs_bmap_data_get_key(btree, *bh);
2113 /* on-disk format */
2114 binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr);
2115 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
2120 static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
2122 struct buffer_head *bh;
2123 struct nilfs_btree_path *path;
2127 path = nilfs_btree_alloc_path();
2131 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
2133 WARN_ON(ret == -ENOENT);
2136 ret = nilfs_btree_get_block(btree, ptr, &bh);
2138 WARN_ON(ret == -ENOENT);
2142 if (!buffer_dirty(bh))
2143 nilfs_btnode_mark_dirty(bh);
2145 if (!nilfs_bmap_dirty(btree))
2146 nilfs_bmap_set_dirty(btree);
2149 nilfs_btree_free_path(path);
2153 static const struct nilfs_bmap_operations nilfs_btree_ops = {
2154 .bop_lookup = nilfs_btree_lookup,
2155 .bop_lookup_contig = nilfs_btree_lookup_contig,
2156 .bop_insert = nilfs_btree_insert,
2157 .bop_delete = nilfs_btree_delete,
2160 .bop_propagate = nilfs_btree_propagate,
2162 .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
2164 .bop_assign = nilfs_btree_assign,
2165 .bop_mark = nilfs_btree_mark,
2167 .bop_last_key = nilfs_btree_last_key,
2168 .bop_check_insert = NULL,
2169 .bop_check_delete = nilfs_btree_check_delete,
2170 .bop_gather_data = nilfs_btree_gather_data,
2173 static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
2175 .bop_lookup_contig = NULL,
2180 .bop_propagate = nilfs_btree_propagate_gc,
2182 .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
2184 .bop_assign = nilfs_btree_assign_gc,
2187 .bop_last_key = NULL,
2188 .bop_check_insert = NULL,
2189 .bop_check_delete = NULL,
2190 .bop_gather_data = NULL,
2193 int nilfs_btree_init(struct nilfs_bmap *bmap)
2195 bmap->b_ops = &nilfs_btree_ops;
2199 void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
2201 bmap->b_ops = &nilfs_btree_ops_gc;