2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
31 #include "xfs_dir2_format.h"
32 #include "xfs_dir2_priv.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_error.h"
41 #include "xfs_trace.h"
42 #include "xfs_cksum.h"
43 #include "xfs_buf_item.h"
48 * Routines to implement directories as Btrees of hashed names.
51 /*========================================================================
52 * Function prototypes for the kernel.
53 *========================================================================*/
56 * Routines used for growing the Btree.
58 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
59 xfs_da_state_blk_t *existing_root,
60 xfs_da_state_blk_t *new_child);
61 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
62 xfs_da_state_blk_t *existing_blk,
63 xfs_da_state_blk_t *split_blk,
64 xfs_da_state_blk_t *blk_to_add,
67 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
68 xfs_da_state_blk_t *node_blk_1,
69 xfs_da_state_blk_t *node_blk_2);
70 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
71 xfs_da_state_blk_t *old_node_blk,
72 xfs_da_state_blk_t *new_node_blk);
75 * Routines used for shrinking the Btree.
77 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
78 xfs_da_state_blk_t *root_blk);
79 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
80 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
81 xfs_da_state_blk_t *drop_blk);
82 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
83 xfs_da_state_blk_t *src_node_blk,
84 xfs_da_state_blk_t *dst_node_blk);
89 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
90 xfs_da_state_blk_t *drop_blk,
91 xfs_da_state_blk_t *save_blk);
94 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
97 * Allocate a dir-state structure.
98 * We don't put them on the stack since they're large.
101 xfs_da_state_alloc(void)
103 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
107 * Kill the altpath contents of a da-state structure.
110 xfs_da_state_kill_altpath(xfs_da_state_t *state)
114 for (i = 0; i < state->altpath.active; i++)
115 state->altpath.blk[i].bp = NULL;
116 state->altpath.active = 0;
120 * Free a da-state structure.
123 xfs_da_state_free(xfs_da_state_t *state)
125 xfs_da_state_kill_altpath(state);
127 memset((char *)state, 0, sizeof(*state));
129 kmem_zone_free(xfs_da_state_zone, state);
133 xfs_da3_node_hdr_from_disk(
134 struct xfs_da3_icnode_hdr *to,
135 struct xfs_da_intnode *from)
137 ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
138 from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
140 if (from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
141 struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
143 to->forw = be32_to_cpu(hdr3->info.hdr.forw);
144 to->back = be32_to_cpu(hdr3->info.hdr.back);
145 to->magic = be16_to_cpu(hdr3->info.hdr.magic);
146 to->count = be16_to_cpu(hdr3->__count);
147 to->level = be16_to_cpu(hdr3->__level);
150 to->forw = be32_to_cpu(from->hdr.info.forw);
151 to->back = be32_to_cpu(from->hdr.info.back);
152 to->magic = be16_to_cpu(from->hdr.info.magic);
153 to->count = be16_to_cpu(from->hdr.__count);
154 to->level = be16_to_cpu(from->hdr.__level);
158 xfs_da3_node_hdr_to_disk(
159 struct xfs_da_intnode *to,
160 struct xfs_da3_icnode_hdr *from)
162 ASSERT(from->magic == XFS_DA_NODE_MAGIC ||
163 from->magic == XFS_DA3_NODE_MAGIC);
165 if (from->magic == XFS_DA3_NODE_MAGIC) {
166 struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
168 hdr3->info.hdr.forw = cpu_to_be32(from->forw);
169 hdr3->info.hdr.back = cpu_to_be32(from->back);
170 hdr3->info.hdr.magic = cpu_to_be16(from->magic);
171 hdr3->__count = cpu_to_be16(from->count);
172 hdr3->__level = cpu_to_be16(from->level);
175 to->hdr.info.forw = cpu_to_be32(from->forw);
176 to->hdr.info.back = cpu_to_be32(from->back);
177 to->hdr.info.magic = cpu_to_be16(from->magic);
178 to->hdr.__count = cpu_to_be16(from->count);
179 to->hdr.__level = cpu_to_be16(from->level);
186 struct xfs_mount *mp = bp->b_target->bt_mount;
187 struct xfs_da_intnode *hdr = bp->b_addr;
188 struct xfs_da3_icnode_hdr ichdr;
190 xfs_da3_node_hdr_from_disk(&ichdr, hdr);
192 if (xfs_sb_version_hascrc(&mp->m_sb)) {
193 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
195 if (ichdr.magic != XFS_DA3_NODE_MAGIC)
198 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
200 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
203 if (ichdr.magic != XFS_DA_NODE_MAGIC)
206 if (ichdr.level == 0)
208 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
210 if (ichdr.count == 0)
214 * we don't know if the node is for and attribute or directory tree,
215 * so only fail if the count is outside both bounds
217 if (ichdr.count > mp->m_dir_node_ents &&
218 ichdr.count > mp->m_attr_node_ents)
221 /* XXX: hash order check? */
227 xfs_da3_node_write_verify(
230 struct xfs_mount *mp = bp->b_target->bt_mount;
231 struct xfs_buf_log_item *bip = bp->b_fspriv;
232 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
234 if (!xfs_da3_node_verify(bp)) {
235 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
236 xfs_buf_ioerror(bp, EFSCORRUPTED);
240 if (!xfs_sb_version_hascrc(&mp->m_sb))
244 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
246 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_DA3_NODE_CRC_OFF);
250 * leaf/node format detection on trees is sketchy, so a node read can be done on
251 * leaf level blocks when detection identifies the tree as a node format tree
252 * incorrectly. In this case, we need to swap the verifier to match the correct
253 * format of the block being read.
256 xfs_da3_node_read_verify(
259 struct xfs_mount *mp = bp->b_target->bt_mount;
260 struct xfs_da_blkinfo *info = bp->b_addr;
262 switch (be16_to_cpu(info->magic)) {
263 case XFS_DA3_NODE_MAGIC:
264 if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
265 XFS_DA3_NODE_CRC_OFF))
268 case XFS_DA_NODE_MAGIC:
269 if (!xfs_da3_node_verify(bp))
272 case XFS_ATTR_LEAF_MAGIC:
273 case XFS_ATTR3_LEAF_MAGIC:
274 bp->b_ops = &xfs_attr3_leaf_buf_ops;
275 bp->b_ops->verify_read(bp);
277 case XFS_DIR2_LEAFN_MAGIC:
278 case XFS_DIR3_LEAFN_MAGIC:
279 bp->b_ops = &xfs_dir3_leafn_buf_ops;
280 bp->b_ops->verify_read(bp);
287 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
288 xfs_buf_ioerror(bp, EFSCORRUPTED);
291 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
292 .verify_read = xfs_da3_node_read_verify,
293 .verify_write = xfs_da3_node_write_verify,
298 struct xfs_trans *tp,
299 struct xfs_inode *dp,
301 xfs_daddr_t mappedbno,
302 struct xfs_buf **bpp,
307 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
308 which_fork, &xfs_da3_node_buf_ops);
310 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
313 switch (be16_to_cpu(info->magic)) {
314 case XFS_DA_NODE_MAGIC:
315 case XFS_DA3_NODE_MAGIC:
316 type = XFS_BLFT_DA_NODE_BUF;
318 case XFS_ATTR_LEAF_MAGIC:
319 case XFS_ATTR3_LEAF_MAGIC:
320 type = XFS_BLFT_ATTR_LEAF_BUF;
322 case XFS_DIR2_LEAFN_MAGIC:
323 case XFS_DIR3_LEAFN_MAGIC:
324 type = XFS_BLFT_DIR_LEAFN_BUF;
331 xfs_trans_buf_set_type(tp, *bpp, type);
336 /*========================================================================
337 * Routines used for growing the Btree.
338 *========================================================================*/
341 * Create the initial contents of an intermediate node.
345 struct xfs_da_args *args,
348 struct xfs_buf **bpp,
351 struct xfs_da_intnode *node;
352 struct xfs_trans *tp = args->trans;
353 struct xfs_mount *mp = tp->t_mountp;
354 struct xfs_da3_icnode_hdr ichdr = {0};
358 trace_xfs_da_node_create(args);
359 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
361 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
364 bp->b_ops = &xfs_da3_node_buf_ops;
365 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
368 if (xfs_sb_version_hascrc(&mp->m_sb)) {
369 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
371 ichdr.magic = XFS_DA3_NODE_MAGIC;
372 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
373 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
374 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
376 ichdr.magic = XFS_DA_NODE_MAGIC;
380 xfs_da3_node_hdr_to_disk(node, &ichdr);
381 xfs_trans_log_buf(tp, bp,
382 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
389 * Split a leaf node, rebalance, then possibly split
390 * intermediate nodes, rebalance, etc.
394 struct xfs_da_state *state)
396 struct xfs_da_state_blk *oldblk;
397 struct xfs_da_state_blk *newblk;
398 struct xfs_da_state_blk *addblk;
399 struct xfs_da_intnode *node;
406 trace_xfs_da_split(state->args);
409 * Walk back up the tree splitting/inserting/adjusting as necessary.
410 * If we need to insert and there isn't room, split the node, then
411 * decide which fragment to insert the new block from below into.
412 * Note that we may split the root this way, but we need more fixup.
414 max = state->path.active - 1;
415 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
416 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
417 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
419 addblk = &state->path.blk[max]; /* initial dummy value */
420 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
421 oldblk = &state->path.blk[i];
422 newblk = &state->altpath.blk[i];
425 * If a leaf node then
426 * Allocate a new leaf node, then rebalance across them.
427 * else if an intermediate node then
428 * We split on the last layer, must we split the node?
430 switch (oldblk->magic) {
431 case XFS_ATTR_LEAF_MAGIC:
432 error = xfs_attr3_leaf_split(state, oldblk, newblk);
433 if ((error != 0) && (error != ENOSPC)) {
434 return(error); /* GROT: attr is inconsistent */
441 * Entry wouldn't fit, split the leaf again.
443 state->extravalid = 1;
445 state->extraafter = 0; /* before newblk */
446 trace_xfs_attr_leaf_split_before(state->args);
447 error = xfs_attr3_leaf_split(state, oldblk,
450 state->extraafter = 1; /* after newblk */
451 trace_xfs_attr_leaf_split_after(state->args);
452 error = xfs_attr3_leaf_split(state, newblk,
456 return(error); /* GROT: attr inconsistent */
459 case XFS_DIR2_LEAFN_MAGIC:
460 error = xfs_dir2_leafn_split(state, oldblk, newblk);
465 case XFS_DA_NODE_MAGIC:
466 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
470 return(error); /* GROT: dir is inconsistent */
472 * Record the newly split block for the next time thru?
482 * Update the btree to show the new hashval for this child.
484 xfs_da3_fixhashpath(state, &state->path);
490 * Split the root node.
492 ASSERT(state->path.active == 0);
493 oldblk = &state->path.blk[0];
494 error = xfs_da3_root_split(state, oldblk, addblk);
497 return(error); /* GROT: dir is inconsistent */
501 * Update pointers to the node which used to be block 0 and
502 * just got bumped because of the addition of a new root node.
503 * There might be three blocks involved if a double split occurred,
504 * and the original block 0 could be at any position in the list.
506 * Note: the magic numbers and sibling pointers are in the same
507 * physical place for both v2 and v3 headers (by design). Hence it
508 * doesn't matter which version of the xfs_da_intnode structure we use
509 * here as the result will be the same using either structure.
511 node = oldblk->bp->b_addr;
512 if (node->hdr.info.forw) {
513 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
516 ASSERT(state->extravalid);
517 bp = state->extrablk.bp;
520 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
521 xfs_trans_log_buf(state->args->trans, bp,
522 XFS_DA_LOGRANGE(node, &node->hdr.info,
523 sizeof(node->hdr.info)));
525 node = oldblk->bp->b_addr;
526 if (node->hdr.info.back) {
527 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
530 ASSERT(state->extravalid);
531 bp = state->extrablk.bp;
534 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
535 xfs_trans_log_buf(state->args->trans, bp,
536 XFS_DA_LOGRANGE(node, &node->hdr.info,
537 sizeof(node->hdr.info)));
544 * Split the root. We have to create a new root and point to the two
545 * parts (the split old root) that we just created. Copy block zero to
546 * the EOF, extending the inode in process.
548 STATIC int /* error */
550 struct xfs_da_state *state,
551 struct xfs_da_state_blk *blk1,
552 struct xfs_da_state_blk *blk2)
554 struct xfs_da_intnode *node;
555 struct xfs_da_intnode *oldroot;
556 struct xfs_da_node_entry *btree;
557 struct xfs_da3_icnode_hdr nodehdr;
558 struct xfs_da_args *args;
560 struct xfs_inode *dp;
561 struct xfs_trans *tp;
562 struct xfs_mount *mp;
563 struct xfs_dir2_leaf *leaf;
569 trace_xfs_da_root_split(state->args);
572 * Copy the existing (incorrect) block from the root node position
573 * to a free space somewhere.
576 error = xfs_da_grow_inode(args, &blkno);
583 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
587 oldroot = blk1->bp->b_addr;
588 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
589 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
590 struct xfs_da3_icnode_hdr nodehdr;
592 xfs_da3_node_hdr_from_disk(&nodehdr, oldroot);
593 btree = xfs_da3_node_tree_p(oldroot);
594 size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
595 level = nodehdr.level;
598 * we are about to copy oldroot to bp, so set up the type
599 * of bp while we know exactly what it will be.
601 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
603 struct xfs_dir3_icleaf_hdr leafhdr;
604 struct xfs_dir2_leaf_entry *ents;
606 leaf = (xfs_dir2_leaf_t *)oldroot;
607 xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
608 ents = xfs_dir3_leaf_ents_p(leaf);
610 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
611 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
612 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
616 * we are about to copy oldroot to bp, so set up the type
617 * of bp while we know exactly what it will be.
619 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
623 * we can copy most of the information in the node from one block to
624 * another, but for CRC enabled headers we have to make sure that the
625 * block specific identifiers are kept intact. We update the buffer
628 memcpy(node, oldroot, size);
629 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
630 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
631 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
633 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
635 xfs_trans_log_buf(tp, bp, 0, size - 1);
637 bp->b_ops = blk1->bp->b_ops;
642 * Set up the new root node.
644 error = xfs_da3_node_create(args,
645 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
646 level + 1, &bp, args->whichfork);
651 xfs_da3_node_hdr_from_disk(&nodehdr, node);
652 btree = xfs_da3_node_tree_p(node);
653 btree[0].hashval = cpu_to_be32(blk1->hashval);
654 btree[0].before = cpu_to_be32(blk1->blkno);
655 btree[1].hashval = cpu_to_be32(blk2->hashval);
656 btree[1].before = cpu_to_be32(blk2->blkno);
658 xfs_da3_node_hdr_to_disk(node, &nodehdr);
661 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
662 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
663 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
664 blk1->blkno < mp->m_dirfreeblk);
665 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
666 blk2->blkno < mp->m_dirfreeblk);
670 /* Header is already logged by xfs_da_node_create */
671 xfs_trans_log_buf(tp, bp,
672 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
678 * Split the node, rebalance, then add the new entry.
680 STATIC int /* error */
682 struct xfs_da_state *state,
683 struct xfs_da_state_blk *oldblk,
684 struct xfs_da_state_blk *newblk,
685 struct xfs_da_state_blk *addblk,
689 struct xfs_da_intnode *node;
690 struct xfs_da3_icnode_hdr nodehdr;
696 trace_xfs_da_node_split(state->args);
698 node = oldblk->bp->b_addr;
699 xfs_da3_node_hdr_from_disk(&nodehdr, node);
702 * With V2 dirs the extra block is data or freespace.
704 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
705 newcount = 1 + useextra;
707 * Do we have to split the node?
709 if (nodehdr.count + newcount > state->node_ents) {
711 * Allocate a new node, add to the doubly linked chain of
712 * nodes, then move some of our excess entries into it.
714 error = xfs_da_grow_inode(state->args, &blkno);
716 return(error); /* GROT: dir is inconsistent */
718 error = xfs_da3_node_create(state->args, blkno, treelevel,
719 &newblk->bp, state->args->whichfork);
721 return(error); /* GROT: dir is inconsistent */
722 newblk->blkno = blkno;
723 newblk->magic = XFS_DA_NODE_MAGIC;
724 xfs_da3_node_rebalance(state, oldblk, newblk);
725 error = xfs_da3_blk_link(state, oldblk, newblk);
734 * Insert the new entry(s) into the correct block
735 * (updating last hashval in the process).
737 * xfs_da3_node_add() inserts BEFORE the given index,
738 * and as a result of using node_lookup_int() we always
739 * point to a valid entry (not after one), but a split
740 * operation always results in a new block whose hashvals
741 * FOLLOW the current block.
743 * If we had double-split op below us, then add the extra block too.
745 node = oldblk->bp->b_addr;
746 xfs_da3_node_hdr_from_disk(&nodehdr, node);
747 if (oldblk->index <= nodehdr.count) {
749 xfs_da3_node_add(state, oldblk, addblk);
751 if (state->extraafter)
753 xfs_da3_node_add(state, oldblk, &state->extrablk);
754 state->extravalid = 0;
758 xfs_da3_node_add(state, newblk, addblk);
760 if (state->extraafter)
762 xfs_da3_node_add(state, newblk, &state->extrablk);
763 state->extravalid = 0;
771 * Balance the btree elements between two intermediate nodes,
772 * usually one full and one empty.
774 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
777 xfs_da3_node_rebalance(
778 struct xfs_da_state *state,
779 struct xfs_da_state_blk *blk1,
780 struct xfs_da_state_blk *blk2)
782 struct xfs_da_intnode *node1;
783 struct xfs_da_intnode *node2;
784 struct xfs_da_intnode *tmpnode;
785 struct xfs_da_node_entry *btree1;
786 struct xfs_da_node_entry *btree2;
787 struct xfs_da_node_entry *btree_s;
788 struct xfs_da_node_entry *btree_d;
789 struct xfs_da3_icnode_hdr nodehdr1;
790 struct xfs_da3_icnode_hdr nodehdr2;
791 struct xfs_trans *tp;
796 trace_xfs_da_node_rebalance(state->args);
798 node1 = blk1->bp->b_addr;
799 node2 = blk2->bp->b_addr;
800 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
801 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
802 btree1 = xfs_da3_node_tree_p(node1);
803 btree2 = xfs_da3_node_tree_p(node2);
806 * Figure out how many entries need to move, and in which direction.
807 * Swap the nodes around if that makes it simpler.
809 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
810 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
811 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
812 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
816 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
817 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
818 btree1 = xfs_da3_node_tree_p(node1);
819 btree2 = xfs_da3_node_tree_p(node2);
823 count = (nodehdr1.count - nodehdr2.count) / 2;
826 tp = state->args->trans;
828 * Two cases: high-to-low and low-to-high.
832 * Move elements in node2 up to make a hole.
834 tmp = nodehdr2.count;
836 tmp *= (uint)sizeof(xfs_da_node_entry_t);
837 btree_s = &btree2[0];
838 btree_d = &btree2[count];
839 memmove(btree_d, btree_s, tmp);
843 * Move the req'd B-tree elements from high in node1 to
846 nodehdr2.count += count;
847 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
848 btree_s = &btree1[nodehdr1.count - count];
849 btree_d = &btree2[0];
850 memcpy(btree_d, btree_s, tmp);
851 nodehdr1.count -= count;
854 * Move the req'd B-tree elements from low in node2 to
858 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
859 btree_s = &btree2[0];
860 btree_d = &btree1[nodehdr1.count];
861 memcpy(btree_d, btree_s, tmp);
862 nodehdr1.count += count;
864 xfs_trans_log_buf(tp, blk1->bp,
865 XFS_DA_LOGRANGE(node1, btree_d, tmp));
868 * Move elements in node2 down to fill the hole.
870 tmp = nodehdr2.count - count;
871 tmp *= (uint)sizeof(xfs_da_node_entry_t);
872 btree_s = &btree2[count];
873 btree_d = &btree2[0];
874 memmove(btree_d, btree_s, tmp);
875 nodehdr2.count -= count;
879 * Log header of node 1 and all current bits of node 2.
881 xfs_da3_node_hdr_to_disk(node1, &nodehdr1);
882 xfs_trans_log_buf(tp, blk1->bp,
883 XFS_DA_LOGRANGE(node1, &node1->hdr,
884 xfs_da3_node_hdr_size(node1)));
886 xfs_da3_node_hdr_to_disk(node2, &nodehdr2);
887 xfs_trans_log_buf(tp, blk2->bp,
888 XFS_DA_LOGRANGE(node2, &node2->hdr,
889 xfs_da3_node_hdr_size(node2) +
890 (sizeof(btree2[0]) * nodehdr2.count)));
893 * Record the last hashval from each block for upward propagation.
894 * (note: don't use the swapped node pointers)
897 node1 = blk1->bp->b_addr;
898 node2 = blk2->bp->b_addr;
899 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
900 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
901 btree1 = xfs_da3_node_tree_p(node1);
902 btree2 = xfs_da3_node_tree_p(node2);
904 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
905 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
908 * Adjust the expected index for insertion.
910 if (blk1->index >= nodehdr1.count) {
911 blk2->index = blk1->index - nodehdr1.count;
912 blk1->index = nodehdr1.count + 1; /* make it invalid */
917 * Add a new entry to an intermediate node.
921 struct xfs_da_state *state,
922 struct xfs_da_state_blk *oldblk,
923 struct xfs_da_state_blk *newblk)
925 struct xfs_da_intnode *node;
926 struct xfs_da3_icnode_hdr nodehdr;
927 struct xfs_da_node_entry *btree;
930 trace_xfs_da_node_add(state->args);
932 node = oldblk->bp->b_addr;
933 xfs_da3_node_hdr_from_disk(&nodehdr, node);
934 btree = xfs_da3_node_tree_p(node);
936 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
937 ASSERT(newblk->blkno != 0);
938 if (state->args->whichfork == XFS_DATA_FORK)
939 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
940 newblk->blkno < state->mp->m_dirfreeblk);
943 * We may need to make some room before we insert the new node.
946 if (oldblk->index < nodehdr.count) {
947 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
948 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
950 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
951 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
952 xfs_trans_log_buf(state->args->trans, oldblk->bp,
953 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
954 tmp + sizeof(*btree)));
957 xfs_da3_node_hdr_to_disk(node, &nodehdr);
958 xfs_trans_log_buf(state->args->trans, oldblk->bp,
959 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
962 * Copy the last hash value from the oldblk to propagate upwards.
964 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
967 /*========================================================================
968 * Routines used for shrinking the Btree.
969 *========================================================================*/
972 * Deallocate an empty leaf node, remove it from its parent,
973 * possibly deallocating that block, etc...
977 struct xfs_da_state *state)
979 struct xfs_da_state_blk *drop_blk;
980 struct xfs_da_state_blk *save_blk;
984 trace_xfs_da_join(state->args);
986 drop_blk = &state->path.blk[ state->path.active-1 ];
987 save_blk = &state->altpath.blk[ state->path.active-1 ];
988 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
989 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
990 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
993 * Walk back up the tree joining/deallocating as necessary.
994 * When we stop dropping blocks, break out.
996 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
997 state->path.active--) {
999 * See if we can combine the block with a neighbor.
1000 * (action == 0) => no options, just leave
1001 * (action == 1) => coalesce, then unlink
1002 * (action == 2) => block empty, unlink it
1004 switch (drop_blk->magic) {
1005 case XFS_ATTR_LEAF_MAGIC:
1006 error = xfs_attr3_leaf_toosmall(state, &action);
1011 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1013 case XFS_DIR2_LEAFN_MAGIC:
1014 error = xfs_dir2_leafn_toosmall(state, &action);
1019 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1021 case XFS_DA_NODE_MAGIC:
1023 * Remove the offending node, fixup hashvals,
1024 * check for a toosmall neighbor.
1026 xfs_da3_node_remove(state, drop_blk);
1027 xfs_da3_fixhashpath(state, &state->path);
1028 error = xfs_da3_node_toosmall(state, &action);
1033 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1036 xfs_da3_fixhashpath(state, &state->altpath);
1037 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1038 xfs_da_state_kill_altpath(state);
1041 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1043 drop_blk->bp = NULL;
1048 * We joined all the way to the top. If it turns out that
1049 * we only have one entry in the root, make the child block
1052 xfs_da3_node_remove(state, drop_blk);
1053 xfs_da3_fixhashpath(state, &state->path);
1054 error = xfs_da3_root_join(state, &state->path.blk[0]);
1060 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1062 __be16 magic = blkinfo->magic;
1065 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1066 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1067 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1068 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1070 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1071 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1073 ASSERT(!blkinfo->forw);
1074 ASSERT(!blkinfo->back);
1077 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1081 * We have only one entry in the root. Copy the only remaining child of
1082 * the old root to block 0 as the new root node.
1086 struct xfs_da_state *state,
1087 struct xfs_da_state_blk *root_blk)
1089 struct xfs_da_intnode *oldroot;
1090 struct xfs_da_args *args;
1093 struct xfs_da3_icnode_hdr oldroothdr;
1094 struct xfs_da_node_entry *btree;
1097 trace_xfs_da_root_join(state->args);
1099 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1102 oldroot = root_blk->bp->b_addr;
1103 xfs_da3_node_hdr_from_disk(&oldroothdr, oldroot);
1104 ASSERT(oldroothdr.forw == 0);
1105 ASSERT(oldroothdr.back == 0);
1108 * If the root has more than one child, then don't do anything.
1110 if (oldroothdr.count > 1)
1114 * Read in the (only) child block, then copy those bytes into
1115 * the root block's buffer and free the original child block.
1117 btree = xfs_da3_node_tree_p(oldroot);
1118 child = be32_to_cpu(btree[0].before);
1120 error = xfs_da3_node_read(args->trans, args->dp, child, -1, &bp,
1124 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1127 * This could be copying a leaf back into the root block in the case of
1128 * there only being a single leaf block left in the tree. Hence we have
1129 * to update the b_ops pointer as well to match the buffer type change
1130 * that could occur. For dir3 blocks we also need to update the block
1131 * number in the buffer header.
1133 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
1134 root_blk->bp->b_ops = bp->b_ops;
1135 xfs_trans_buf_copy_type(root_blk->bp, bp);
1136 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1137 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1138 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1140 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
1141 error = xfs_da_shrink_inode(args, child, bp);
1146 * Check a node block and its neighbors to see if the block should be
1147 * collapsed into one or the other neighbor. Always keep the block
1148 * with the smaller block number.
1149 * If the current block is over 50% full, don't try to join it, return 0.
1150 * If the block is empty, fill in the state structure and return 2.
1151 * If it can be collapsed, fill in the state structure and return 1.
1152 * If nothing can be done, return 0.
1155 xfs_da3_node_toosmall(
1156 struct xfs_da_state *state,
1159 struct xfs_da_intnode *node;
1160 struct xfs_da_state_blk *blk;
1161 struct xfs_da_blkinfo *info;
1164 struct xfs_da3_icnode_hdr nodehdr;
1171 trace_xfs_da_node_toosmall(state->args);
1174 * Check for the degenerate case of the block being over 50% full.
1175 * If so, it's not worth even looking to see if we might be able
1176 * to coalesce with a sibling.
1178 blk = &state->path.blk[ state->path.active-1 ];
1179 info = blk->bp->b_addr;
1180 node = (xfs_da_intnode_t *)info;
1181 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1182 if (nodehdr.count > (state->node_ents >> 1)) {
1183 *action = 0; /* blk over 50%, don't try to join */
1184 return(0); /* blk over 50%, don't try to join */
1188 * Check for the degenerate case of the block being empty.
1189 * If the block is empty, we'll simply delete it, no need to
1190 * coalesce it with a sibling block. We choose (arbitrarily)
1191 * to merge with the forward block unless it is NULL.
1193 if (nodehdr.count == 0) {
1195 * Make altpath point to the block we want to keep and
1196 * path point to the block we want to drop (this one).
1198 forward = (info->forw != 0);
1199 memcpy(&state->altpath, &state->path, sizeof(state->path));
1200 error = xfs_da3_path_shift(state, &state->altpath, forward,
1213 * Examine each sibling block to see if we can coalesce with
1214 * at least 25% free space to spare. We need to figure out
1215 * whether to merge with the forward or the backward block.
1216 * We prefer coalescing with the lower numbered sibling so as
1217 * to shrink a directory over time.
1219 count = state->node_ents;
1220 count -= state->node_ents >> 2;
1221 count -= nodehdr.count;
1223 /* start with smaller blk num */
1224 forward = nodehdr.forw < nodehdr.back;
1225 for (i = 0; i < 2; forward = !forward, i++) {
1227 blkno = nodehdr.forw;
1229 blkno = nodehdr.back;
1232 error = xfs_da3_node_read(state->args->trans, state->args->dp,
1233 blkno, -1, &bp, state->args->whichfork);
1238 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1239 xfs_trans_brelse(state->args->trans, bp);
1241 if (count - nodehdr.count >= 0)
1242 break; /* fits with at least 25% to spare */
1250 * Make altpath point to the block we want to keep (the lower
1251 * numbered block) and path point to the block we want to drop.
1253 memcpy(&state->altpath, &state->path, sizeof(state->path));
1254 if (blkno < blk->blkno) {
1255 error = xfs_da3_path_shift(state, &state->altpath, forward,
1258 error = xfs_da3_path_shift(state, &state->path, forward,
1272 * Pick up the last hashvalue from an intermediate node.
1275 xfs_da3_node_lasthash(
1279 struct xfs_da_intnode *node;
1280 struct xfs_da_node_entry *btree;
1281 struct xfs_da3_icnode_hdr nodehdr;
1284 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1286 *count = nodehdr.count;
1289 btree = xfs_da3_node_tree_p(node);
1290 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1294 * Walk back up the tree adjusting hash values as necessary,
1295 * when we stop making changes, return.
1298 xfs_da3_fixhashpath(
1299 struct xfs_da_state *state,
1300 struct xfs_da_state_path *path)
1302 struct xfs_da_state_blk *blk;
1303 struct xfs_da_intnode *node;
1304 struct xfs_da_node_entry *btree;
1305 xfs_dahash_t lasthash=0;
1309 trace_xfs_da_fixhashpath(state->args);
1311 level = path->active-1;
1312 blk = &path->blk[ level ];
1313 switch (blk->magic) {
1314 case XFS_ATTR_LEAF_MAGIC:
1315 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1319 case XFS_DIR2_LEAFN_MAGIC:
1320 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
1324 case XFS_DA_NODE_MAGIC:
1325 lasthash = xfs_da3_node_lasthash(blk->bp, &count);
1330 for (blk--, level--; level >= 0; blk--, level--) {
1331 struct xfs_da3_icnode_hdr nodehdr;
1333 node = blk->bp->b_addr;
1334 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1335 btree = xfs_da3_node_tree_p(node);
1336 if (be32_to_cpu(btree->hashval) == lasthash)
1338 blk->hashval = lasthash;
1339 btree[blk->index].hashval = cpu_to_be32(lasthash);
1340 xfs_trans_log_buf(state->args->trans, blk->bp,
1341 XFS_DA_LOGRANGE(node, &btree[blk->index],
1344 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1349 * Remove an entry from an intermediate node.
1352 xfs_da3_node_remove(
1353 struct xfs_da_state *state,
1354 struct xfs_da_state_blk *drop_blk)
1356 struct xfs_da_intnode *node;
1357 struct xfs_da3_icnode_hdr nodehdr;
1358 struct xfs_da_node_entry *btree;
1362 trace_xfs_da_node_remove(state->args);
1364 node = drop_blk->bp->b_addr;
1365 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1366 ASSERT(drop_blk->index < nodehdr.count);
1367 ASSERT(drop_blk->index >= 0);
1370 * Copy over the offending entry, or just zero it out.
1372 index = drop_blk->index;
1373 btree = xfs_da3_node_tree_p(node);
1374 if (index < nodehdr.count - 1) {
1375 tmp = nodehdr.count - index - 1;
1376 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1377 memmove(&btree[index], &btree[index + 1], tmp);
1378 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1379 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1380 index = nodehdr.count - 1;
1382 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1383 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1384 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1386 xfs_da3_node_hdr_to_disk(node, &nodehdr);
1387 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1388 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
1391 * Copy the last hash value from the block to propagate upwards.
1393 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1397 * Unbalance the elements between two intermediate nodes,
1398 * move all Btree elements from one node into another.
1401 xfs_da3_node_unbalance(
1402 struct xfs_da_state *state,
1403 struct xfs_da_state_blk *drop_blk,
1404 struct xfs_da_state_blk *save_blk)
1406 struct xfs_da_intnode *drop_node;
1407 struct xfs_da_intnode *save_node;
1408 struct xfs_da_node_entry *drop_btree;
1409 struct xfs_da_node_entry *save_btree;
1410 struct xfs_da3_icnode_hdr drop_hdr;
1411 struct xfs_da3_icnode_hdr save_hdr;
1412 struct xfs_trans *tp;
1416 trace_xfs_da_node_unbalance(state->args);
1418 drop_node = drop_blk->bp->b_addr;
1419 save_node = save_blk->bp->b_addr;
1420 xfs_da3_node_hdr_from_disk(&drop_hdr, drop_node);
1421 xfs_da3_node_hdr_from_disk(&save_hdr, save_node);
1422 drop_btree = xfs_da3_node_tree_p(drop_node);
1423 save_btree = xfs_da3_node_tree_p(save_node);
1424 tp = state->args->trans;
1427 * If the dying block has lower hashvals, then move all the
1428 * elements in the remaining block up to make a hole.
1430 if ((be32_to_cpu(drop_btree[0].hashval) <
1431 be32_to_cpu(save_btree[0].hashval)) ||
1432 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1433 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1434 /* XXX: check this - is memmove dst correct? */
1435 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1436 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1439 xfs_trans_log_buf(tp, save_blk->bp,
1440 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1441 (save_hdr.count + drop_hdr.count) *
1442 sizeof(xfs_da_node_entry_t)));
1444 sindex = save_hdr.count;
1445 xfs_trans_log_buf(tp, save_blk->bp,
1446 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1447 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1451 * Move all the B-tree elements from drop_blk to save_blk.
1453 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1454 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1455 save_hdr.count += drop_hdr.count;
1457 xfs_da3_node_hdr_to_disk(save_node, &save_hdr);
1458 xfs_trans_log_buf(tp, save_blk->bp,
1459 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1460 xfs_da3_node_hdr_size(save_node)));
1463 * Save the last hashval in the remaining block for upward propagation.
1465 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1468 /*========================================================================
1469 * Routines used for finding things in the Btree.
1470 *========================================================================*/
1473 * Walk down the Btree looking for a particular filename, filling
1474 * in the state structure as we go.
1476 * We will set the state structure to point to each of the elements
1477 * in each of the nodes where either the hashval is or should be.
1479 * We support duplicate hashval's so for each entry in the current
1480 * node that could contain the desired hashval, descend. This is a
1481 * pruned depth-first tree search.
1484 xfs_da3_node_lookup_int(
1485 struct xfs_da_state *state,
1488 struct xfs_da_state_blk *blk;
1489 struct xfs_da_blkinfo *curr;
1490 struct xfs_da_intnode *node;
1491 struct xfs_da_node_entry *btree;
1492 struct xfs_da3_icnode_hdr nodehdr;
1493 struct xfs_da_args *args;
1495 xfs_dahash_t hashval;
1496 xfs_dahash_t btreehashval;
1506 * Descend thru the B-tree searching each level for the right
1507 * node to use, until the right hashval is found.
1509 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1510 for (blk = &state->path.blk[0], state->path.active = 1;
1511 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1512 blk++, state->path.active++) {
1514 * Read the next node down in the tree.
1517 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1518 -1, &blk->bp, args->whichfork);
1521 state->path.active--;
1524 curr = blk->bp->b_addr;
1525 blk->magic = be16_to_cpu(curr->magic);
1527 if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1528 blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1529 blk->magic = XFS_ATTR_LEAF_MAGIC;
1530 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1534 if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1535 blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1536 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1537 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1541 blk->magic = XFS_DA_NODE_MAGIC;
1545 * Search an intermediate node for a match.
1547 node = blk->bp->b_addr;
1548 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1549 btree = xfs_da3_node_tree_p(node);
1551 max = nodehdr.count;
1552 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1555 * Binary search. (note: small blocks will skip loop)
1557 probe = span = max / 2;
1558 hashval = args->hashval;
1561 btreehashval = be32_to_cpu(btree[probe].hashval);
1562 if (btreehashval < hashval)
1564 else if (btreehashval > hashval)
1569 ASSERT((probe >= 0) && (probe < max));
1570 ASSERT((span <= 4) ||
1571 (be32_to_cpu(btree[probe].hashval) == hashval));
1574 * Since we may have duplicate hashval's, find the first
1575 * matching hashval in the node.
1578 be32_to_cpu(btree[probe].hashval) >= hashval) {
1581 while (probe < max &&
1582 be32_to_cpu(btree[probe].hashval) < hashval) {
1587 * Pick the right block to descend on.
1590 blk->index = max - 1;
1591 blkno = be32_to_cpu(btree[max - 1].before);
1594 blkno = be32_to_cpu(btree[probe].before);
1599 * A leaf block that ends in the hashval that we are interested in
1600 * (final hashval == search hashval) means that the next block may
1601 * contain more entries with the same hashval, shift upward to the
1602 * next leaf and keep searching.
1605 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1606 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1607 &blk->index, state);
1608 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1609 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1610 blk->index = args->index;
1611 args->blkno = blk->blkno;
1614 return XFS_ERROR(EFSCORRUPTED);
1616 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1617 (blk->hashval == args->hashval)) {
1618 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1624 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1625 /* path_shift() gives ENOENT */
1626 retval = XFS_ERROR(ENOATTR);
1635 /*========================================================================
1637 *========================================================================*/
1640 * Compare two intermediate nodes for "order".
1644 struct xfs_buf *node1_bp,
1645 struct xfs_buf *node2_bp)
1647 struct xfs_da_intnode *node1;
1648 struct xfs_da_intnode *node2;
1649 struct xfs_da_node_entry *btree1;
1650 struct xfs_da_node_entry *btree2;
1651 struct xfs_da3_icnode_hdr node1hdr;
1652 struct xfs_da3_icnode_hdr node2hdr;
1654 node1 = node1_bp->b_addr;
1655 node2 = node2_bp->b_addr;
1656 xfs_da3_node_hdr_from_disk(&node1hdr, node1);
1657 xfs_da3_node_hdr_from_disk(&node2hdr, node2);
1658 btree1 = xfs_da3_node_tree_p(node1);
1659 btree2 = xfs_da3_node_tree_p(node2);
1661 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1662 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1663 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1664 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1671 * Link a new block into a doubly linked list of blocks (of whatever type).
1675 struct xfs_da_state *state,
1676 struct xfs_da_state_blk *old_blk,
1677 struct xfs_da_state_blk *new_blk)
1679 struct xfs_da_blkinfo *old_info;
1680 struct xfs_da_blkinfo *new_info;
1681 struct xfs_da_blkinfo *tmp_info;
1682 struct xfs_da_args *args;
1688 * Set up environment.
1691 ASSERT(args != NULL);
1692 old_info = old_blk->bp->b_addr;
1693 new_info = new_blk->bp->b_addr;
1694 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1695 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1696 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1698 switch (old_blk->magic) {
1699 case XFS_ATTR_LEAF_MAGIC:
1700 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1702 case XFS_DIR2_LEAFN_MAGIC:
1703 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1705 case XFS_DA_NODE_MAGIC:
1706 before = xfs_da3_node_order(old_blk->bp, new_blk->bp);
1711 * Link blocks in appropriate order.
1715 * Link new block in before existing block.
1717 trace_xfs_da_link_before(args);
1718 new_info->forw = cpu_to_be32(old_blk->blkno);
1719 new_info->back = old_info->back;
1720 if (old_info->back) {
1721 error = xfs_da3_node_read(args->trans, args->dp,
1722 be32_to_cpu(old_info->back),
1723 -1, &bp, args->whichfork);
1727 tmp_info = bp->b_addr;
1728 ASSERT(tmp_info->magic == old_info->magic);
1729 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1730 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1731 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1733 old_info->back = cpu_to_be32(new_blk->blkno);
1736 * Link new block in after existing block.
1738 trace_xfs_da_link_after(args);
1739 new_info->forw = old_info->forw;
1740 new_info->back = cpu_to_be32(old_blk->blkno);
1741 if (old_info->forw) {
1742 error = xfs_da3_node_read(args->trans, args->dp,
1743 be32_to_cpu(old_info->forw),
1744 -1, &bp, args->whichfork);
1748 tmp_info = bp->b_addr;
1749 ASSERT(tmp_info->magic == old_info->magic);
1750 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1751 tmp_info->back = cpu_to_be32(new_blk->blkno);
1752 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1754 old_info->forw = cpu_to_be32(new_blk->blkno);
1757 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1758 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1763 * Unlink a block from a doubly linked list of blocks.
1765 STATIC int /* error */
1767 struct xfs_da_state *state,
1768 struct xfs_da_state_blk *drop_blk,
1769 struct xfs_da_state_blk *save_blk)
1771 struct xfs_da_blkinfo *drop_info;
1772 struct xfs_da_blkinfo *save_info;
1773 struct xfs_da_blkinfo *tmp_info;
1774 struct xfs_da_args *args;
1779 * Set up environment.
1782 ASSERT(args != NULL);
1783 save_info = save_blk->bp->b_addr;
1784 drop_info = drop_blk->bp->b_addr;
1785 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1786 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1787 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1788 ASSERT(save_blk->magic == drop_blk->magic);
1789 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1790 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1791 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1792 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1795 * Unlink the leaf block from the doubly linked chain of leaves.
1797 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1798 trace_xfs_da_unlink_back(args);
1799 save_info->back = drop_info->back;
1800 if (drop_info->back) {
1801 error = xfs_da3_node_read(args->trans, args->dp,
1802 be32_to_cpu(drop_info->back),
1803 -1, &bp, args->whichfork);
1807 tmp_info = bp->b_addr;
1808 ASSERT(tmp_info->magic == save_info->magic);
1809 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1810 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1811 xfs_trans_log_buf(args->trans, bp, 0,
1812 sizeof(*tmp_info) - 1);
1815 trace_xfs_da_unlink_forward(args);
1816 save_info->forw = drop_info->forw;
1817 if (drop_info->forw) {
1818 error = xfs_da3_node_read(args->trans, args->dp,
1819 be32_to_cpu(drop_info->forw),
1820 -1, &bp, args->whichfork);
1824 tmp_info = bp->b_addr;
1825 ASSERT(tmp_info->magic == save_info->magic);
1826 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1827 tmp_info->back = cpu_to_be32(save_blk->blkno);
1828 xfs_trans_log_buf(args->trans, bp, 0,
1829 sizeof(*tmp_info) - 1);
1833 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1838 * Move a path "forward" or "!forward" one block at the current level.
1840 * This routine will adjust a "path" to point to the next block
1841 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1842 * Btree, including updating pointers to the intermediate nodes between
1843 * the new bottom and the root.
1847 struct xfs_da_state *state,
1848 struct xfs_da_state_path *path,
1853 struct xfs_da_state_blk *blk;
1854 struct xfs_da_blkinfo *info;
1855 struct xfs_da_intnode *node;
1856 struct xfs_da_args *args;
1857 struct xfs_da_node_entry *btree;
1858 struct xfs_da3_icnode_hdr nodehdr;
1859 xfs_dablk_t blkno = 0;
1863 trace_xfs_da_path_shift(state->args);
1866 * Roll up the Btree looking for the first block where our
1867 * current index is not at the edge of the block. Note that
1868 * we skip the bottom layer because we want the sibling block.
1871 ASSERT(args != NULL);
1872 ASSERT(path != NULL);
1873 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1874 level = (path->active-1) - 1; /* skip bottom layer in path */
1875 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1876 node = blk->bp->b_addr;
1877 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1878 btree = xfs_da3_node_tree_p(node);
1880 if (forward && (blk->index < nodehdr.count - 1)) {
1882 blkno = be32_to_cpu(btree[blk->index].before);
1884 } else if (!forward && (blk->index > 0)) {
1886 blkno = be32_to_cpu(btree[blk->index].before);
1891 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1892 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1897 * Roll down the edge of the subtree until we reach the
1898 * same depth we were at originally.
1900 for (blk++, level++; level < path->active; blk++, level++) {
1902 * Release the old block.
1903 * (if it's dirty, trans won't actually let go)
1906 xfs_trans_brelse(args->trans, blk->bp);
1909 * Read the next child block.
1912 error = xfs_da3_node_read(args->trans, args->dp, blkno, -1,
1913 &blk->bp, args->whichfork);
1916 info = blk->bp->b_addr;
1917 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1918 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1919 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1920 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1921 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1922 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1926 * Note: we flatten the magic number to a single type so we
1927 * don't have to compare against crc/non-crc types elsewhere.
1929 switch (be16_to_cpu(info->magic)) {
1930 case XFS_DA_NODE_MAGIC:
1931 case XFS_DA3_NODE_MAGIC:
1932 blk->magic = XFS_DA_NODE_MAGIC;
1933 node = (xfs_da_intnode_t *)info;
1934 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1935 btree = xfs_da3_node_tree_p(node);
1936 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1940 blk->index = nodehdr.count - 1;
1941 blkno = be32_to_cpu(btree[blk->index].before);
1943 case XFS_ATTR_LEAF_MAGIC:
1944 case XFS_ATTR3_LEAF_MAGIC:
1945 blk->magic = XFS_ATTR_LEAF_MAGIC;
1946 ASSERT(level == path->active-1);
1948 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1951 case XFS_DIR2_LEAFN_MAGIC:
1952 case XFS_DIR3_LEAFN_MAGIC:
1953 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1954 ASSERT(level == path->active-1);
1956 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1969 /*========================================================================
1971 *========================================================================*/
1974 * Implement a simple hash on a character string.
1975 * Rotate the hash value by 7 bits, then XOR each character in.
1976 * This is implemented with some source-level loop unrolling.
1979 xfs_da_hashname(const __uint8_t *name, int namelen)
1984 * Do four characters at a time as long as we can.
1986 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1987 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1988 (name[3] << 0) ^ rol32(hash, 7 * 4);
1991 * Now do the rest of the characters.
1995 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1998 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2000 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2001 default: /* case 0: */
2008 struct xfs_da_args *args,
2009 const unsigned char *name,
2012 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2013 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2017 xfs_default_hashname(
2018 struct xfs_name *name)
2020 return xfs_da_hashname(name->name, name->len);
2023 const struct xfs_nameops xfs_default_nameops = {
2024 .hashname = xfs_default_hashname,
2025 .compname = xfs_da_compname
2029 xfs_da_grow_inode_int(
2030 struct xfs_da_args *args,
2034 struct xfs_trans *tp = args->trans;
2035 struct xfs_inode *dp = args->dp;
2036 int w = args->whichfork;
2037 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
2038 struct xfs_bmbt_irec map, *mapp;
2039 int nmap, error, got, i, mapi;
2042 * Find a spot in the file space to put the new block.
2044 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2049 * Try mapping it in one filesystem block.
2052 ASSERT(args->firstblock != NULL);
2053 error = xfs_bmapi_write(tp, dp, *bno, count,
2054 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2055 args->firstblock, args->total, &map, &nmap,
2064 } else if (nmap == 0 && count > 1) {
2069 * If we didn't get it and the block might work if fragmented,
2070 * try without the CONTIG flag. Loop until we get it all.
2072 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2073 for (b = *bno, mapi = 0; b < *bno + count; ) {
2074 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2075 c = (int)(*bno + count - b);
2076 error = xfs_bmapi_write(tp, dp, b, c,
2077 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2078 args->firstblock, args->total,
2079 &mapp[mapi], &nmap, args->flist);
2085 b = mapp[mapi - 1].br_startoff +
2086 mapp[mapi - 1].br_blockcount;
2094 * Count the blocks we got, make sure it matches the total.
2096 for (i = 0, got = 0; i < mapi; i++)
2097 got += mapp[i].br_blockcount;
2098 if (got != count || mapp[0].br_startoff != *bno ||
2099 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2101 error = XFS_ERROR(ENOSPC);
2105 /* account for newly allocated blocks in reserved blocks total */
2106 args->total -= dp->i_d.di_nblocks - nblks;
2115 * Add a block to the btree ahead of the file.
2116 * Return the new block number to the caller.
2120 struct xfs_da_args *args,
2121 xfs_dablk_t *new_blkno)
2127 trace_xfs_da_grow_inode(args);
2129 if (args->whichfork == XFS_DATA_FORK) {
2130 bno = args->dp->i_mount->m_dirleafblk;
2131 count = args->dp->i_mount->m_dirblkfsbs;
2137 error = xfs_da_grow_inode_int(args, &bno, count);
2139 *new_blkno = (xfs_dablk_t)bno;
2144 * Ick. We need to always be able to remove a btree block, even
2145 * if there's no space reservation because the filesystem is full.
2146 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2147 * It swaps the target block with the last block in the file. The
2148 * last block in the file can always be removed since it can't cause
2149 * a bmap btree split to do that.
2152 xfs_da3_swap_lastblock(
2153 struct xfs_da_args *args,
2154 xfs_dablk_t *dead_blknop,
2155 struct xfs_buf **dead_bufp)
2157 struct xfs_da_blkinfo *dead_info;
2158 struct xfs_da_blkinfo *sib_info;
2159 struct xfs_da_intnode *par_node;
2160 struct xfs_da_intnode *dead_node;
2161 struct xfs_dir2_leaf *dead_leaf2;
2162 struct xfs_da_node_entry *btree;
2163 struct xfs_da3_icnode_hdr par_hdr;
2164 struct xfs_inode *ip;
2165 struct xfs_trans *tp;
2166 struct xfs_mount *mp;
2167 struct xfs_buf *dead_buf;
2168 struct xfs_buf *last_buf;
2169 struct xfs_buf *sib_buf;
2170 struct xfs_buf *par_buf;
2171 xfs_dahash_t dead_hash;
2172 xfs_fileoff_t lastoff;
2173 xfs_dablk_t dead_blkno;
2174 xfs_dablk_t last_blkno;
2175 xfs_dablk_t sib_blkno;
2176 xfs_dablk_t par_blkno;
2183 trace_xfs_da_swap_lastblock(args);
2185 dead_buf = *dead_bufp;
2186 dead_blkno = *dead_blknop;
2189 w = args->whichfork;
2190 ASSERT(w == XFS_DATA_FORK);
2192 lastoff = mp->m_dirfreeblk;
2193 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
2196 if (unlikely(lastoff == 0)) {
2197 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2199 return XFS_ERROR(EFSCORRUPTED);
2202 * Read the last block in the btree space.
2204 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
2205 error = xfs_da3_node_read(tp, ip, last_blkno, -1, &last_buf, w);
2209 * Copy the last block into the dead buffer and log it.
2211 memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
2212 xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
2213 dead_info = dead_buf->b_addr;
2215 * Get values from the moved block.
2217 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2218 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2219 struct xfs_dir3_icleaf_hdr leafhdr;
2220 struct xfs_dir2_leaf_entry *ents;
2222 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2223 xfs_dir3_leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2224 ents = xfs_dir3_leaf_ents_p(dead_leaf2);
2226 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2228 struct xfs_da3_icnode_hdr deadhdr;
2230 dead_node = (xfs_da_intnode_t *)dead_info;
2231 xfs_da3_node_hdr_from_disk(&deadhdr, dead_node);
2232 btree = xfs_da3_node_tree_p(dead_node);
2233 dead_level = deadhdr.level;
2234 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2236 sib_buf = par_buf = NULL;
2238 * If the moved block has a left sibling, fix up the pointers.
2240 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2241 error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
2244 sib_info = sib_buf->b_addr;
2246 be32_to_cpu(sib_info->forw) != last_blkno ||
2247 sib_info->magic != dead_info->magic)) {
2248 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2249 XFS_ERRLEVEL_LOW, mp);
2250 error = XFS_ERROR(EFSCORRUPTED);
2253 sib_info->forw = cpu_to_be32(dead_blkno);
2254 xfs_trans_log_buf(tp, sib_buf,
2255 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2256 sizeof(sib_info->forw)));
2260 * If the moved block has a right sibling, fix up the pointers.
2262 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2263 error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
2266 sib_info = sib_buf->b_addr;
2268 be32_to_cpu(sib_info->back) != last_blkno ||
2269 sib_info->magic != dead_info->magic)) {
2270 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2271 XFS_ERRLEVEL_LOW, mp);
2272 error = XFS_ERROR(EFSCORRUPTED);
2275 sib_info->back = cpu_to_be32(dead_blkno);
2276 xfs_trans_log_buf(tp, sib_buf,
2277 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2278 sizeof(sib_info->back)));
2281 par_blkno = mp->m_dirleafblk;
2284 * Walk down the tree looking for the parent of the moved block.
2287 error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
2290 par_node = par_buf->b_addr;
2291 xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
2292 if (level >= 0 && level != par_hdr.level + 1) {
2293 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2294 XFS_ERRLEVEL_LOW, mp);
2295 error = XFS_ERROR(EFSCORRUPTED);
2298 level = par_hdr.level;
2299 btree = xfs_da3_node_tree_p(par_node);
2301 entno < par_hdr.count &&
2302 be32_to_cpu(btree[entno].hashval) < dead_hash;
2305 if (entno == par_hdr.count) {
2306 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2307 XFS_ERRLEVEL_LOW, mp);
2308 error = XFS_ERROR(EFSCORRUPTED);
2311 par_blkno = be32_to_cpu(btree[entno].before);
2312 if (level == dead_level + 1)
2314 xfs_trans_brelse(tp, par_buf);
2318 * We're in the right parent block.
2319 * Look for the right entry.
2323 entno < par_hdr.count &&
2324 be32_to_cpu(btree[entno].before) != last_blkno;
2327 if (entno < par_hdr.count)
2329 par_blkno = par_hdr.forw;
2330 xfs_trans_brelse(tp, par_buf);
2332 if (unlikely(par_blkno == 0)) {
2333 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2334 XFS_ERRLEVEL_LOW, mp);
2335 error = XFS_ERROR(EFSCORRUPTED);
2338 error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
2341 par_node = par_buf->b_addr;
2342 xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
2343 if (par_hdr.level != level) {
2344 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2345 XFS_ERRLEVEL_LOW, mp);
2346 error = XFS_ERROR(EFSCORRUPTED);
2349 btree = xfs_da3_node_tree_p(par_node);
2353 * Update the parent entry pointing to the moved block.
2355 btree[entno].before = cpu_to_be32(dead_blkno);
2356 xfs_trans_log_buf(tp, par_buf,
2357 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2358 sizeof(btree[entno].before)));
2359 *dead_blknop = last_blkno;
2360 *dead_bufp = last_buf;
2364 xfs_trans_brelse(tp, par_buf);
2366 xfs_trans_brelse(tp, sib_buf);
2367 xfs_trans_brelse(tp, last_buf);
2372 * Remove a btree block from a directory or attribute.
2375 xfs_da_shrink_inode(
2376 xfs_da_args_t *args,
2377 xfs_dablk_t dead_blkno,
2378 struct xfs_buf *dead_buf)
2381 int done, error, w, count;
2385 trace_xfs_da_shrink_inode(args);
2388 w = args->whichfork;
2391 if (w == XFS_DATA_FORK)
2392 count = mp->m_dirblkfsbs;
2397 * Remove extents. If we get ENOSPC for a dir we have to move
2398 * the last block to the place we want to kill.
2400 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2401 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2402 0, args->firstblock, args->flist, &done);
2403 if (error == ENOSPC) {
2404 if (w != XFS_DATA_FORK)
2406 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2414 xfs_trans_binval(tp, dead_buf);
2419 * See if the mapping(s) for this btree block are valid, i.e.
2420 * don't contain holes, are logically contiguous, and cover the whole range.
2423 xfs_da_map_covers_blocks(
2425 xfs_bmbt_irec_t *mapp,
2432 for (i = 0, off = bno; i < nmap; i++) {
2433 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2434 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2437 if (off != mapp[i].br_startoff) {
2440 off += mapp[i].br_blockcount;
2442 return off == bno + count;
2446 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2448 * For the single map case, it is assumed that the caller has provided a pointer
2449 * to a valid xfs_buf_map. For the multiple map case, this function will
2450 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2451 * map pointer with the allocated map.
2454 xfs_buf_map_from_irec(
2455 struct xfs_mount *mp,
2456 struct xfs_buf_map **mapp,
2457 unsigned int *nmaps,
2458 struct xfs_bmbt_irec *irecs,
2459 unsigned int nirecs)
2461 struct xfs_buf_map *map;
2464 ASSERT(*nmaps == 1);
2465 ASSERT(nirecs >= 1);
2468 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2469 KM_SLEEP | KM_NOFS);
2477 for (i = 0; i < *nmaps; i++) {
2478 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2479 irecs[i].br_startblock != HOLESTARTBLOCK);
2480 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2481 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2487 * Map the block we are given ready for reading. There are three possible return
2489 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2490 * caller knows not to execute a subsequent read.
2491 * 0 - if we mapped the block successfully
2492 * >0 - positive error number if there was an error.
2496 struct xfs_trans *trans,
2497 struct xfs_inode *dp,
2499 xfs_daddr_t mappedbno,
2501 struct xfs_buf_map **map,
2504 struct xfs_mount *mp = dp->i_mount;
2507 struct xfs_bmbt_irec irec;
2508 struct xfs_bmbt_irec *irecs = &irec;
2511 ASSERT(map && *map);
2512 ASSERT(*nmaps == 1);
2514 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
2517 * Caller doesn't have a mapping. -2 means don't complain
2518 * if we land in a hole.
2520 if (mappedbno == -1 || mappedbno == -2) {
2522 * Optimize the one-block case.
2525 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2526 KM_SLEEP | KM_NOFS);
2529 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2530 &nirecs, xfs_bmapi_aflag(whichfork));
2534 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2535 irecs->br_startoff = (xfs_fileoff_t)bno;
2536 irecs->br_blockcount = nfsb;
2537 irecs->br_state = 0;
2541 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2542 error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
2543 if (unlikely(error == EFSCORRUPTED)) {
2544 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2546 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2547 __func__, (long long)bno,
2548 (long long)dp->i_ino);
2549 for (i = 0; i < *nmaps; i++) {
2551 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2553 (long long)irecs[i].br_startoff,
2554 (long long)irecs[i].br_startblock,
2555 (long long)irecs[i].br_blockcount,
2559 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2560 XFS_ERRLEVEL_LOW, mp);
2564 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2572 * Get a buffer for the dir/attr block.
2576 struct xfs_trans *trans,
2577 struct xfs_inode *dp,
2579 xfs_daddr_t mappedbno,
2580 struct xfs_buf **bpp,
2584 struct xfs_buf_map map;
2585 struct xfs_buf_map *mapp;
2592 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2595 /* mapping a hole is not an error, but we don't continue */
2601 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2603 error = bp ? bp->b_error : XFS_ERROR(EIO);
2605 xfs_trans_brelse(trans, bp);
2619 * Get a buffer for the dir/attr block, fill in the contents.
2623 struct xfs_trans *trans,
2624 struct xfs_inode *dp,
2626 xfs_daddr_t mappedbno,
2627 struct xfs_buf **bpp,
2629 const struct xfs_buf_ops *ops)
2632 struct xfs_buf_map map;
2633 struct xfs_buf_map *mapp;
2640 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2643 /* mapping a hole is not an error, but we don't continue */
2649 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2650 dp->i_mount->m_ddev_targp,
2651 mapp, nmap, 0, &bp, ops);
2655 if (whichfork == XFS_ATTR_FORK)
2656 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2658 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2661 * This verification code will be moved to a CRC verification callback
2662 * function so just leave it here unchanged until then.
2665 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
2666 xfs_dir2_free_t *free = bp->b_addr;
2667 xfs_da_blkinfo_t *info = bp->b_addr;
2669 struct xfs_mount *mp = dp->i_mount;
2671 magic = be16_to_cpu(info->magic);
2672 magic1 = be32_to_cpu(hdr->magic);
2674 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2675 (magic != XFS_DA3_NODE_MAGIC) &&
2676 (magic != XFS_ATTR_LEAF_MAGIC) &&
2677 (magic != XFS_ATTR3_LEAF_MAGIC) &&
2678 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2679 (magic != XFS_DIR3_LEAF1_MAGIC) &&
2680 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2681 (magic != XFS_DIR3_LEAFN_MAGIC) &&
2682 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2683 (magic1 != XFS_DIR3_BLOCK_MAGIC) &&
2684 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2685 (magic1 != XFS_DIR3_DATA_MAGIC) &&
2687 cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
2689 cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
2690 mp, XFS_ERRTAG_DA_READ_BUF,
2691 XFS_RANDOM_DA_READ_BUF))) {
2692 trace_xfs_da_btree_corrupt(bp, _RET_IP_);
2693 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2694 XFS_ERRLEVEL_LOW, mp, info);
2695 error = XFS_ERROR(EFSCORRUPTED);
2696 xfs_trans_brelse(trans, bp);
2709 * Readahead the dir/attr block.
2713 struct xfs_trans *trans,
2714 struct xfs_inode *dp,
2716 xfs_daddr_t mappedbno,
2718 const struct xfs_buf_ops *ops)
2720 struct xfs_buf_map map;
2721 struct xfs_buf_map *mapp;
2727 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2730 /* mapping a hole is not an error, but we don't continue */
2736 mappedbno = mapp[0].bm_bn;
2737 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);