xfs: add CRCs to attr leaf blocks
[firefly-linux-kernel-4.4.55.git] / fs / xfs / xfs_attr_leaf.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * Copyright (c) 2013 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_bmap.h"
39 #include "xfs_attr.h"
40 #include "xfs_attr_leaf.h"
41 #include "xfs_error.h"
42 #include "xfs_trace.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_cksum.h"
45
46
47 /*
48  * xfs_attr_leaf.c
49  *
50  * Routines to implement leaf blocks of attributes as Btrees of hashed names.
51  */
52
53 /*========================================================================
54  * Function prototypes for the kernel.
55  *========================================================================*/
56
57 /*
58  * Routines used for growing the Btree.
59  */
60 STATIC int xfs_attr3_leaf_create(struct xfs_da_args *args,
61                                  xfs_dablk_t which_block, struct xfs_buf **bpp);
62 STATIC int xfs_attr3_leaf_add_work(struct xfs_buf *leaf_buffer,
63                                    struct xfs_attr3_icleaf_hdr *ichdr,
64                                    struct xfs_da_args *args, int freemap_index);
65 STATIC void xfs_attr3_leaf_compact(struct xfs_da_args *args,
66                                    struct xfs_attr3_icleaf_hdr *ichdr,
67                                    struct xfs_buf *leaf_buffer);
68 STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state,
69                                                    xfs_da_state_blk_t *blk1,
70                                                    xfs_da_state_blk_t *blk2);
71 STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
72                         xfs_da_state_blk_t *leaf_blk_1,
73                         struct xfs_attr3_icleaf_hdr *ichdr1,
74                         xfs_da_state_blk_t *leaf_blk_2,
75                         struct xfs_attr3_icleaf_hdr *ichdr2,
76                         int *number_entries_in_blk1,
77                         int *number_usedbytes_in_blk1);
78
79 /*
80  * Routines used for shrinking the Btree.
81  */
82 STATIC int xfs_attr3_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
83                                   struct xfs_buf *bp, int level);
84 STATIC int xfs_attr3_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
85                                   struct xfs_buf *bp);
86 STATIC int xfs_attr3_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
87                                    xfs_dablk_t blkno, int blkcnt);
88
89 /*
90  * Utility routines.
91  */
92 STATIC void xfs_attr3_leaf_moveents(struct xfs_attr_leafblock *src_leaf,
93                         struct xfs_attr3_icleaf_hdr *src_ichdr, int src_start,
94                         struct xfs_attr_leafblock *dst_leaf,
95                         struct xfs_attr3_icleaf_hdr *dst_ichdr, int dst_start,
96                         int move_count, struct xfs_mount *mp);
97 STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
98
99 void
100 xfs_attr3_leaf_hdr_from_disk(
101         struct xfs_attr3_icleaf_hdr     *to,
102         struct xfs_attr_leafblock       *from)
103 {
104         int     i;
105
106         ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
107                from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
108
109         if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
110                 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from;
111
112                 to->forw = be32_to_cpu(hdr3->info.hdr.forw);
113                 to->back = be32_to_cpu(hdr3->info.hdr.back);
114                 to->magic = be16_to_cpu(hdr3->info.hdr.magic);
115                 to->count = be16_to_cpu(hdr3->count);
116                 to->usedbytes = be16_to_cpu(hdr3->usedbytes);
117                 to->firstused = be16_to_cpu(hdr3->firstused);
118                 to->holes = hdr3->holes;
119
120                 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
121                         to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base);
122                         to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size);
123                 }
124                 return;
125         }
126         to->forw = be32_to_cpu(from->hdr.info.forw);
127         to->back = be32_to_cpu(from->hdr.info.back);
128         to->magic = be16_to_cpu(from->hdr.info.magic);
129         to->count = be16_to_cpu(from->hdr.count);
130         to->usedbytes = be16_to_cpu(from->hdr.usedbytes);
131         to->firstused = be16_to_cpu(from->hdr.firstused);
132         to->holes = from->hdr.holes;
133
134         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
135                 to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base);
136                 to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size);
137         }
138 }
139
140 void
141 xfs_attr3_leaf_hdr_to_disk(
142         struct xfs_attr_leafblock       *to,
143         struct xfs_attr3_icleaf_hdr     *from)
144 {
145         int     i;
146
147         ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC ||
148                from->magic == XFS_ATTR3_LEAF_MAGIC);
149
150         if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
151                 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to;
152
153                 hdr3->info.hdr.forw = cpu_to_be32(from->forw);
154                 hdr3->info.hdr.back = cpu_to_be32(from->back);
155                 hdr3->info.hdr.magic = cpu_to_be16(from->magic);
156                 hdr3->count = cpu_to_be16(from->count);
157                 hdr3->usedbytes = cpu_to_be16(from->usedbytes);
158                 hdr3->firstused = cpu_to_be16(from->firstused);
159                 hdr3->holes = from->holes;
160                 hdr3->pad1 = 0;
161
162                 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
163                         hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base);
164                         hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size);
165                 }
166                 return;
167         }
168         to->hdr.info.forw = cpu_to_be32(from->forw);
169         to->hdr.info.back = cpu_to_be32(from->back);
170         to->hdr.info.magic = cpu_to_be16(from->magic);
171         to->hdr.count = cpu_to_be16(from->count);
172         to->hdr.usedbytes = cpu_to_be16(from->usedbytes);
173         to->hdr.firstused = cpu_to_be16(from->firstused);
174         to->hdr.holes = from->holes;
175         to->hdr.pad1 = 0;
176
177         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
178                 to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base);
179                 to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size);
180         }
181 }
182
183 static bool
184 xfs_attr3_leaf_verify(
185         struct xfs_buf          *bp)
186 {
187         struct xfs_mount        *mp = bp->b_target->bt_mount;
188         struct xfs_attr_leafblock *leaf = bp->b_addr;
189         struct xfs_attr3_icleaf_hdr ichdr;
190
191         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
192
193         if (xfs_sb_version_hascrc(&mp->m_sb)) {
194                 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
195
196                 if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
197                         return false;
198
199                 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
200                         return false;
201                 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
202                         return false;
203         } else {
204                 if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
205                         return false;
206         }
207         if (ichdr.count == 0)
208                 return false;
209
210         /* XXX: need to range check rest of attr header values */
211         /* XXX: hash order check? */
212
213         return true;
214 }
215
216 static void
217 xfs_attr3_leaf_write_verify(
218         struct xfs_buf  *bp)
219 {
220         struct xfs_mount        *mp = bp->b_target->bt_mount;
221         struct xfs_buf_log_item *bip = bp->b_fspriv;
222         struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
223
224         if (!xfs_attr3_leaf_verify(bp)) {
225                 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
226                 xfs_buf_ioerror(bp, EFSCORRUPTED);
227                 return;
228         }
229
230         if (!xfs_sb_version_hascrc(&mp->m_sb))
231                 return;
232
233         if (bip)
234                 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
235
236         xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_ATTR3_LEAF_CRC_OFF);
237 }
238
239 /*
240  * leaf/node format detection on trees is sketchy, so a node read can be done on
241  * leaf level blocks when detection identifies the tree as a node format tree
242  * incorrectly. In this case, we need to swap the verifier to match the correct
243  * format of the block being read.
244  */
245 static void
246 xfs_attr3_leaf_read_verify(
247         struct xfs_buf          *bp)
248 {
249         struct xfs_mount        *mp = bp->b_target->bt_mount;
250
251         if ((xfs_sb_version_hascrc(&mp->m_sb) &&
252              !xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
253                                           XFS_ATTR3_LEAF_CRC_OFF)) ||
254             !xfs_attr3_leaf_verify(bp)) {
255                 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
256                 xfs_buf_ioerror(bp, EFSCORRUPTED);
257         }
258 }
259
260 const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
261         .verify_read = xfs_attr3_leaf_read_verify,
262         .verify_write = xfs_attr3_leaf_write_verify,
263 };
264
265 int
266 xfs_attr3_leaf_read(
267         struct xfs_trans        *tp,
268         struct xfs_inode        *dp,
269         xfs_dablk_t             bno,
270         xfs_daddr_t             mappedbno,
271         struct xfs_buf          **bpp)
272 {
273         return xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
274                                 XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
275 }
276
277 /*========================================================================
278  * Namespace helper routines
279  *========================================================================*/
280
281 /*
282  * If namespace bits don't match return 0.
283  * If all match then return 1.
284  */
285 STATIC int
286 xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
287 {
288         return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
289 }
290
291
292 /*========================================================================
293  * External routines when attribute fork size < XFS_LITINO(mp).
294  *========================================================================*/
295
296 /*
297  * Query whether the requested number of additional bytes of extended
298  * attribute space will be able to fit inline.
299  *
300  * Returns zero if not, else the di_forkoff fork offset to be used in the
301  * literal area for attribute data once the new bytes have been added.
302  *
303  * di_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
304  * special case for dev/uuid inodes, they have fixed size data forks.
305  */
306 int
307 xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
308 {
309         int offset;
310         int minforkoff; /* lower limit on valid forkoff locations */
311         int maxforkoff; /* upper limit on valid forkoff locations */
312         int dsize;
313         xfs_mount_t *mp = dp->i_mount;
314
315         /* rounded down */
316         offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
317
318         switch (dp->i_d.di_format) {
319         case XFS_DINODE_FMT_DEV:
320                 minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
321                 return (offset >= minforkoff) ? minforkoff : 0;
322         case XFS_DINODE_FMT_UUID:
323                 minforkoff = roundup(sizeof(uuid_t), 8) >> 3;
324                 return (offset >= minforkoff) ? minforkoff : 0;
325         }
326
327         /*
328          * If the requested numbers of bytes is smaller or equal to the
329          * current attribute fork size we can always proceed.
330          *
331          * Note that if_bytes in the data fork might actually be larger than
332          * the current data fork size is due to delalloc extents. In that
333          * case either the extent count will go down when they are converted
334          * to real extents, or the delalloc conversion will take care of the
335          * literal area rebalancing.
336          */
337         if (bytes <= XFS_IFORK_ASIZE(dp))
338                 return dp->i_d.di_forkoff;
339
340         /*
341          * For attr2 we can try to move the forkoff if there is space in the
342          * literal area, but for the old format we are done if there is no
343          * space in the fixed attribute fork.
344          */
345         if (!(mp->m_flags & XFS_MOUNT_ATTR2))
346                 return 0;
347
348         dsize = dp->i_df.if_bytes;
349
350         switch (dp->i_d.di_format) {
351         case XFS_DINODE_FMT_EXTENTS:
352                 /*
353                  * If there is no attr fork and the data fork is extents, 
354                  * determine if creating the default attr fork will result
355                  * in the extents form migrating to btree. If so, the
356                  * minimum offset only needs to be the space required for
357                  * the btree root.
358                  */
359                 if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
360                     xfs_default_attroffset(dp))
361                         dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
362                 break;
363         case XFS_DINODE_FMT_BTREE:
364                 /*
365                  * If we have a data btree then keep forkoff if we have one,
366                  * otherwise we are adding a new attr, so then we set
367                  * minforkoff to where the btree root can finish so we have
368                  * plenty of room for attrs
369                  */
370                 if (dp->i_d.di_forkoff) {
371                         if (offset < dp->i_d.di_forkoff)
372                                 return 0;
373                         return dp->i_d.di_forkoff;
374                 }
375                 dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot);
376                 break;
377         }
378
379         /*
380          * A data fork btree root must have space for at least
381          * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
382          */
383         minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
384         minforkoff = roundup(minforkoff, 8) >> 3;
385
386         /* attr fork btree root can have at least this many key/ptr pairs */
387         maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) -
388                         XFS_BMDR_SPACE_CALC(MINABTPTRS);
389         maxforkoff = maxforkoff >> 3;   /* rounded down */
390
391         if (offset >= maxforkoff)
392                 return maxforkoff;
393         if (offset >= minforkoff)
394                 return offset;
395         return 0;
396 }
397
398 /*
399  * Switch on the ATTR2 superblock bit (implies also FEATURES2)
400  */
401 STATIC void
402 xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
403 {
404         if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
405             !(xfs_sb_version_hasattr2(&mp->m_sb))) {
406                 spin_lock(&mp->m_sb_lock);
407                 if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
408                         xfs_sb_version_addattr2(&mp->m_sb);
409                         spin_unlock(&mp->m_sb_lock);
410                         xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
411                 } else
412                         spin_unlock(&mp->m_sb_lock);
413         }
414 }
415
416 /*
417  * Create the initial contents of a shortform attribute list.
418  */
419 void
420 xfs_attr_shortform_create(xfs_da_args_t *args)
421 {
422         xfs_attr_sf_hdr_t *hdr;
423         xfs_inode_t *dp;
424         xfs_ifork_t *ifp;
425
426         trace_xfs_attr_sf_create(args);
427
428         dp = args->dp;
429         ASSERT(dp != NULL);
430         ifp = dp->i_afp;
431         ASSERT(ifp != NULL);
432         ASSERT(ifp->if_bytes == 0);
433         if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) {
434                 ifp->if_flags &= ~XFS_IFEXTENTS;        /* just in case */
435                 dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL;
436                 ifp->if_flags |= XFS_IFINLINE;
437         } else {
438                 ASSERT(ifp->if_flags & XFS_IFINLINE);
439         }
440         xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
441         hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
442         hdr->count = 0;
443         hdr->totsize = cpu_to_be16(sizeof(*hdr));
444         xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
445 }
446
447 /*
448  * Add a name/value pair to the shortform attribute list.
449  * Overflow from the inode has already been checked for.
450  */
451 void
452 xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
453 {
454         xfs_attr_shortform_t *sf;
455         xfs_attr_sf_entry_t *sfe;
456         int i, offset, size;
457         xfs_mount_t *mp;
458         xfs_inode_t *dp;
459         xfs_ifork_t *ifp;
460
461         trace_xfs_attr_sf_add(args);
462
463         dp = args->dp;
464         mp = dp->i_mount;
465         dp->i_d.di_forkoff = forkoff;
466
467         ifp = dp->i_afp;
468         ASSERT(ifp->if_flags & XFS_IFINLINE);
469         sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
470         sfe = &sf->list[0];
471         for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
472 #ifdef DEBUG
473                 if (sfe->namelen != args->namelen)
474                         continue;
475                 if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
476                         continue;
477                 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
478                         continue;
479                 ASSERT(0);
480 #endif
481         }
482
483         offset = (char *)sfe - (char *)sf;
484         size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
485         xfs_idata_realloc(dp, size, XFS_ATTR_FORK);
486         sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
487         sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset);
488
489         sfe->namelen = args->namelen;
490         sfe->valuelen = args->valuelen;
491         sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
492         memcpy(sfe->nameval, args->name, args->namelen);
493         memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
494         sf->hdr.count++;
495         be16_add_cpu(&sf->hdr.totsize, size);
496         xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
497
498         xfs_sbversion_add_attr2(mp, args->trans);
499 }
500
501 /*
502  * After the last attribute is removed revert to original inode format,
503  * making all literal area available to the data fork once more.
504  */
505 STATIC void
506 xfs_attr_fork_reset(
507         struct xfs_inode        *ip,
508         struct xfs_trans        *tp)
509 {
510         xfs_idestroy_fork(ip, XFS_ATTR_FORK);
511         ip->i_d.di_forkoff = 0;
512         ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
513
514         ASSERT(ip->i_d.di_anextents == 0);
515         ASSERT(ip->i_afp == NULL);
516
517         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
518 }
519
520 /*
521  * Remove an attribute from the shortform attribute list structure.
522  */
523 int
524 xfs_attr_shortform_remove(xfs_da_args_t *args)
525 {
526         xfs_attr_shortform_t *sf;
527         xfs_attr_sf_entry_t *sfe;
528         int base, size=0, end, totsize, i;
529         xfs_mount_t *mp;
530         xfs_inode_t *dp;
531
532         trace_xfs_attr_sf_remove(args);
533
534         dp = args->dp;
535         mp = dp->i_mount;
536         base = sizeof(xfs_attr_sf_hdr_t);
537         sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
538         sfe = &sf->list[0];
539         end = sf->hdr.count;
540         for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
541                                         base += size, i++) {
542                 size = XFS_ATTR_SF_ENTSIZE(sfe);
543                 if (sfe->namelen != args->namelen)
544                         continue;
545                 if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
546                         continue;
547                 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
548                         continue;
549                 break;
550         }
551         if (i == end)
552                 return(XFS_ERROR(ENOATTR));
553
554         /*
555          * Fix up the attribute fork data, covering the hole
556          */
557         end = base + size;
558         totsize = be16_to_cpu(sf->hdr.totsize);
559         if (end != totsize)
560                 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
561         sf->hdr.count--;
562         be16_add_cpu(&sf->hdr.totsize, -size);
563
564         /*
565          * Fix up the start offset of the attribute fork
566          */
567         totsize -= size;
568         if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
569             (mp->m_flags & XFS_MOUNT_ATTR2) &&
570             (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
571             !(args->op_flags & XFS_DA_OP_ADDNAME)) {
572                 xfs_attr_fork_reset(dp, args->trans);
573         } else {
574                 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
575                 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
576                 ASSERT(dp->i_d.di_forkoff);
577                 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
578                                 (args->op_flags & XFS_DA_OP_ADDNAME) ||
579                                 !(mp->m_flags & XFS_MOUNT_ATTR2) ||
580                                 dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
581                 xfs_trans_log_inode(args->trans, dp,
582                                         XFS_ILOG_CORE | XFS_ILOG_ADATA);
583         }
584
585         xfs_sbversion_add_attr2(mp, args->trans);
586
587         return(0);
588 }
589
590 /*
591  * Look up a name in a shortform attribute list structure.
592  */
593 /*ARGSUSED*/
594 int
595 xfs_attr_shortform_lookup(xfs_da_args_t *args)
596 {
597         xfs_attr_shortform_t *sf;
598         xfs_attr_sf_entry_t *sfe;
599         int i;
600         xfs_ifork_t *ifp;
601
602         trace_xfs_attr_sf_lookup(args);
603
604         ifp = args->dp->i_afp;
605         ASSERT(ifp->if_flags & XFS_IFINLINE);
606         sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
607         sfe = &sf->list[0];
608         for (i = 0; i < sf->hdr.count;
609                                 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
610                 if (sfe->namelen != args->namelen)
611                         continue;
612                 if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
613                         continue;
614                 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
615                         continue;
616                 return(XFS_ERROR(EEXIST));
617         }
618         return(XFS_ERROR(ENOATTR));
619 }
620
621 /*
622  * Look up a name in a shortform attribute list structure.
623  */
624 /*ARGSUSED*/
625 int
626 xfs_attr_shortform_getvalue(xfs_da_args_t *args)
627 {
628         xfs_attr_shortform_t *sf;
629         xfs_attr_sf_entry_t *sfe;
630         int i;
631
632         ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE);
633         sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
634         sfe = &sf->list[0];
635         for (i = 0; i < sf->hdr.count;
636                                 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
637                 if (sfe->namelen != args->namelen)
638                         continue;
639                 if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
640                         continue;
641                 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
642                         continue;
643                 if (args->flags & ATTR_KERNOVAL) {
644                         args->valuelen = sfe->valuelen;
645                         return(XFS_ERROR(EEXIST));
646                 }
647                 if (args->valuelen < sfe->valuelen) {
648                         args->valuelen = sfe->valuelen;
649                         return(XFS_ERROR(ERANGE));
650                 }
651                 args->valuelen = sfe->valuelen;
652                 memcpy(args->value, &sfe->nameval[args->namelen],
653                                                     args->valuelen);
654                 return(XFS_ERROR(EEXIST));
655         }
656         return(XFS_ERROR(ENOATTR));
657 }
658
659 /*
660  * Convert from using the shortform to the leaf.
661  */
662 int
663 xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
664 {
665         xfs_inode_t *dp;
666         xfs_attr_shortform_t *sf;
667         xfs_attr_sf_entry_t *sfe;
668         xfs_da_args_t nargs;
669         char *tmpbuffer;
670         int error, i, size;
671         xfs_dablk_t blkno;
672         struct xfs_buf *bp;
673         xfs_ifork_t *ifp;
674
675         trace_xfs_attr_sf_to_leaf(args);
676
677         dp = args->dp;
678         ifp = dp->i_afp;
679         sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
680         size = be16_to_cpu(sf->hdr.totsize);
681         tmpbuffer = kmem_alloc(size, KM_SLEEP);
682         ASSERT(tmpbuffer != NULL);
683         memcpy(tmpbuffer, ifp->if_u1.if_data, size);
684         sf = (xfs_attr_shortform_t *)tmpbuffer;
685
686         xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
687         bp = NULL;
688         error = xfs_da_grow_inode(args, &blkno);
689         if (error) {
690                 /*
691                  * If we hit an IO error middle of the transaction inside
692                  * grow_inode(), we may have inconsistent data. Bail out.
693                  */
694                 if (error == EIO)
695                         goto out;
696                 xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
697                 memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
698                 goto out;
699         }
700
701         ASSERT(blkno == 0);
702         error = xfs_attr3_leaf_create(args, blkno, &bp);
703         if (error) {
704                 error = xfs_da_shrink_inode(args, 0, bp);
705                 bp = NULL;
706                 if (error)
707                         goto out;
708                 xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
709                 memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
710                 goto out;
711         }
712
713         memset((char *)&nargs, 0, sizeof(nargs));
714         nargs.dp = dp;
715         nargs.firstblock = args->firstblock;
716         nargs.flist = args->flist;
717         nargs.total = args->total;
718         nargs.whichfork = XFS_ATTR_FORK;
719         nargs.trans = args->trans;
720         nargs.op_flags = XFS_DA_OP_OKNOENT;
721
722         sfe = &sf->list[0];
723         for (i = 0; i < sf->hdr.count; i++) {
724                 nargs.name = sfe->nameval;
725                 nargs.namelen = sfe->namelen;
726                 nargs.value = &sfe->nameval[nargs.namelen];
727                 nargs.valuelen = sfe->valuelen;
728                 nargs.hashval = xfs_da_hashname(sfe->nameval,
729                                                 sfe->namelen);
730                 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
731                 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
732                 ASSERT(error == ENOATTR);
733                 error = xfs_attr3_leaf_add(bp, &nargs);
734                 ASSERT(error != ENOSPC);
735                 if (error)
736                         goto out;
737                 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
738         }
739         error = 0;
740
741 out:
742         kmem_free(tmpbuffer);
743         return(error);
744 }
745
746 STATIC int
747 xfs_attr_shortform_compare(const void *a, const void *b)
748 {
749         xfs_attr_sf_sort_t *sa, *sb;
750
751         sa = (xfs_attr_sf_sort_t *)a;
752         sb = (xfs_attr_sf_sort_t *)b;
753         if (sa->hash < sb->hash) {
754                 return(-1);
755         } else if (sa->hash > sb->hash) {
756                 return(1);
757         } else {
758                 return(sa->entno - sb->entno);
759         }
760 }
761
762
763 #define XFS_ISRESET_CURSOR(cursor) \
764         (!((cursor)->initted) && !((cursor)->hashval) && \
765          !((cursor)->blkno) && !((cursor)->offset))
766 /*
767  * Copy out entries of shortform attribute lists for attr_list().
768  * Shortform attribute lists are not stored in hashval sorted order.
769  * If the output buffer is not large enough to hold them all, then we
770  * we have to calculate each entries' hashvalue and sort them before
771  * we can begin returning them to the user.
772  */
773 /*ARGSUSED*/
774 int
775 xfs_attr_shortform_list(xfs_attr_list_context_t *context)
776 {
777         attrlist_cursor_kern_t *cursor;
778         xfs_attr_sf_sort_t *sbuf, *sbp;
779         xfs_attr_shortform_t *sf;
780         xfs_attr_sf_entry_t *sfe;
781         xfs_inode_t *dp;
782         int sbsize, nsbuf, count, i;
783         int error;
784
785         ASSERT(context != NULL);
786         dp = context->dp;
787         ASSERT(dp != NULL);
788         ASSERT(dp->i_afp != NULL);
789         sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
790         ASSERT(sf != NULL);
791         if (!sf->hdr.count)
792                 return(0);
793         cursor = context->cursor;
794         ASSERT(cursor != NULL);
795
796         trace_xfs_attr_list_sf(context);
797
798         /*
799          * If the buffer is large enough and the cursor is at the start,
800          * do not bother with sorting since we will return everything in
801          * one buffer and another call using the cursor won't need to be
802          * made.
803          * Note the generous fudge factor of 16 overhead bytes per entry.
804          * If bufsize is zero then put_listent must be a search function
805          * and can just scan through what we have.
806          */
807         if (context->bufsize == 0 ||
808             (XFS_ISRESET_CURSOR(cursor) &&
809              (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
810                 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
811                         error = context->put_listent(context,
812                                            sfe->flags,
813                                            sfe->nameval,
814                                            (int)sfe->namelen,
815                                            (int)sfe->valuelen,
816                                            &sfe->nameval[sfe->namelen]);
817
818                         /*
819                          * Either search callback finished early or
820                          * didn't fit it all in the buffer after all.
821                          */
822                         if (context->seen_enough)
823                                 break;
824
825                         if (error)
826                                 return error;
827                         sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
828                 }
829                 trace_xfs_attr_list_sf_all(context);
830                 return(0);
831         }
832
833         /* do no more for a search callback */
834         if (context->bufsize == 0)
835                 return 0;
836
837         /*
838          * It didn't all fit, so we have to sort everything on hashval.
839          */
840         sbsize = sf->hdr.count * sizeof(*sbuf);
841         sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
842
843         /*
844          * Scan the attribute list for the rest of the entries, storing
845          * the relevant info from only those that match into a buffer.
846          */
847         nsbuf = 0;
848         for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
849                 if (unlikely(
850                     ((char *)sfe < (char *)sf) ||
851                     ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
852                         XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
853                                              XFS_ERRLEVEL_LOW,
854                                              context->dp->i_mount, sfe);
855                         kmem_free(sbuf);
856                         return XFS_ERROR(EFSCORRUPTED);
857                 }
858
859                 sbp->entno = i;
860                 sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
861                 sbp->name = sfe->nameval;
862                 sbp->namelen = sfe->namelen;
863                 /* These are bytes, and both on-disk, don't endian-flip */
864                 sbp->valuelen = sfe->valuelen;
865                 sbp->flags = sfe->flags;
866                 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
867                 sbp++;
868                 nsbuf++;
869         }
870
871         /*
872          * Sort the entries on hash then entno.
873          */
874         xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
875
876         /*
877          * Re-find our place IN THE SORTED LIST.
878          */
879         count = 0;
880         cursor->initted = 1;
881         cursor->blkno = 0;
882         for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
883                 if (sbp->hash == cursor->hashval) {
884                         if (cursor->offset == count) {
885                                 break;
886                         }
887                         count++;
888                 } else if (sbp->hash > cursor->hashval) {
889                         break;
890                 }
891         }
892         if (i == nsbuf) {
893                 kmem_free(sbuf);
894                 return(0);
895         }
896
897         /*
898          * Loop putting entries into the user buffer.
899          */
900         for ( ; i < nsbuf; i++, sbp++) {
901                 if (cursor->hashval != sbp->hash) {
902                         cursor->hashval = sbp->hash;
903                         cursor->offset = 0;
904                 }
905                 error = context->put_listent(context,
906                                         sbp->flags,
907                                         sbp->name,
908                                         sbp->namelen,
909                                         sbp->valuelen,
910                                         &sbp->name[sbp->namelen]);
911                 if (error)
912                         return error;
913                 if (context->seen_enough)
914                         break;
915                 cursor->offset++;
916         }
917
918         kmem_free(sbuf);
919         return(0);
920 }
921
922 /*
923  * Check a leaf attribute block to see if all the entries would fit into
924  * a shortform attribute list.
925  */
926 int
927 xfs_attr_shortform_allfit(
928         struct xfs_buf  *bp,
929         struct xfs_inode *dp)
930 {
931         xfs_attr_leafblock_t *leaf;
932         xfs_attr_leaf_entry_t *entry;
933         xfs_attr_leaf_name_local_t *name_loc;
934         int bytes, i;
935
936         leaf = bp->b_addr;
937         ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
938
939         entry = &leaf->entries[0];
940         bytes = sizeof(struct xfs_attr_sf_hdr);
941         for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
942                 if (entry->flags & XFS_ATTR_INCOMPLETE)
943                         continue;               /* don't copy partial entries */
944                 if (!(entry->flags & XFS_ATTR_LOCAL))
945                         return(0);
946                 name_loc = xfs_attr3_leaf_name_local(leaf, i);
947                 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
948                         return(0);
949                 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
950                         return(0);
951                 bytes += sizeof(struct xfs_attr_sf_entry)-1
952                                 + name_loc->namelen
953                                 + be16_to_cpu(name_loc->valuelen);
954         }
955         if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
956             (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
957             (bytes == sizeof(struct xfs_attr_sf_hdr)))
958                 return(-1);
959         return(xfs_attr_shortform_bytesfit(dp, bytes));
960 }
961
962 /*
963  * Convert a leaf attribute list to shortform attribute list
964  */
965 int
966 xfs_attr3_leaf_to_shortform(
967         struct xfs_buf          *bp,
968         struct xfs_da_args      *args,
969         int                     forkoff)
970 {
971         struct xfs_attr_leafblock *leaf;
972         struct xfs_attr3_icleaf_hdr ichdr;
973         struct xfs_attr_leaf_entry *entry;
974         struct xfs_attr_leaf_name_local *name_loc;
975         struct xfs_da_args      nargs;
976         struct xfs_inode        *dp = args->dp;
977         char                    *tmpbuffer;
978         int                     error;
979         int                     i;
980
981         trace_xfs_attr_leaf_to_sf(args);
982
983         tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
984         if (!tmpbuffer)
985                 return ENOMEM;
986
987         memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(dp->i_mount));
988
989         leaf = (xfs_attr_leafblock_t *)tmpbuffer;
990         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
991         entry = xfs_attr3_leaf_entryp(leaf);
992
993         /* XXX (dgc): buffer is about to be marked stale - why zero it? */
994         memset(bp->b_addr, 0, XFS_LBSIZE(dp->i_mount));
995
996         /*
997          * Clean out the prior contents of the attribute list.
998          */
999         error = xfs_da_shrink_inode(args, 0, bp);
1000         if (error)
1001                 goto out;
1002
1003         if (forkoff == -1) {
1004                 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
1005                 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
1006                 xfs_attr_fork_reset(dp, args->trans);
1007                 goto out;
1008         }
1009
1010         xfs_attr_shortform_create(args);
1011
1012         /*
1013          * Copy the attributes
1014          */
1015         memset((char *)&nargs, 0, sizeof(nargs));
1016         nargs.dp = dp;
1017         nargs.firstblock = args->firstblock;
1018         nargs.flist = args->flist;
1019         nargs.total = args->total;
1020         nargs.whichfork = XFS_ATTR_FORK;
1021         nargs.trans = args->trans;
1022         nargs.op_flags = XFS_DA_OP_OKNOENT;
1023
1024         for (i = 0; i < ichdr.count; entry++, i++) {
1025                 if (entry->flags & XFS_ATTR_INCOMPLETE)
1026                         continue;       /* don't copy partial entries */
1027                 if (!entry->nameidx)
1028                         continue;
1029                 ASSERT(entry->flags & XFS_ATTR_LOCAL);
1030                 name_loc = xfs_attr3_leaf_name_local(leaf, i);
1031                 nargs.name = name_loc->nameval;
1032                 nargs.namelen = name_loc->namelen;
1033                 nargs.value = &name_loc->nameval[nargs.namelen];
1034                 nargs.valuelen = be16_to_cpu(name_loc->valuelen);
1035                 nargs.hashval = be32_to_cpu(entry->hashval);
1036                 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
1037                 xfs_attr_shortform_add(&nargs, forkoff);
1038         }
1039         error = 0;
1040
1041 out:
1042         kmem_free(tmpbuffer);
1043         return error;
1044 }
1045
1046 /*
1047  * Convert from using a single leaf to a root node and a leaf.
1048  */
1049 int
1050 xfs_attr3_leaf_to_node(
1051         struct xfs_da_args      *args)
1052 {
1053         struct xfs_attr_leafblock *leaf;
1054         struct xfs_attr3_icleaf_hdr icleafhdr;
1055         struct xfs_attr_leaf_entry *entries;
1056         struct xfs_da_node_entry *btree;
1057         struct xfs_da3_icnode_hdr icnodehdr;
1058         struct xfs_da_intnode   *node;
1059         struct xfs_inode        *dp = args->dp;
1060         struct xfs_mount        *mp = dp->i_mount;
1061         struct xfs_buf          *bp1 = NULL;
1062         struct xfs_buf          *bp2 = NULL;
1063         xfs_dablk_t             blkno;
1064         int                     error;
1065
1066         trace_xfs_attr_leaf_to_node(args);
1067
1068         error = xfs_da_grow_inode(args, &blkno);
1069         if (error)
1070                 goto out;
1071         error = xfs_attr3_leaf_read(args->trans, dp, 0, -1, &bp1);
1072         if (error)
1073                 goto out;
1074
1075         error = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp2, XFS_ATTR_FORK);
1076         if (error)
1077                 goto out;
1078
1079         /* copy leaf to new buffer, update identifiers */
1080         bp2->b_ops = bp1->b_ops;
1081         memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(mp));
1082         if (xfs_sb_version_hascrc(&mp->m_sb)) {
1083                 struct xfs_da3_blkinfo *hdr3 = bp2->b_addr;
1084                 hdr3->blkno = cpu_to_be64(bp2->b_bn);
1085         }
1086         xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(mp) - 1);
1087
1088         /*
1089          * Set up the new root node.
1090          */
1091         error = xfs_da3_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
1092         if (error)
1093                 goto out;
1094         node = bp1->b_addr;
1095         xfs_da3_node_hdr_from_disk(&icnodehdr, node);
1096         btree = xfs_da3_node_tree_p(node);
1097
1098         leaf = bp2->b_addr;
1099         xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf);
1100         entries = xfs_attr3_leaf_entryp(leaf);
1101
1102         /* both on-disk, don't endian-flip twice */
1103         btree[0].hashval = entries[icleafhdr.count - 1].hashval;
1104         btree[0].before = cpu_to_be32(blkno);
1105         icnodehdr.count = 1;
1106         xfs_da3_node_hdr_to_disk(node, &icnodehdr);
1107         xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(mp) - 1);
1108         error = 0;
1109 out:
1110         return error;
1111 }
1112
1113
1114 /*========================================================================
1115  * Routines used for growing the Btree.
1116  *========================================================================*/
1117
1118 /*
1119  * Create the initial contents of a leaf attribute list
1120  * or a leaf in a node attribute list.
1121  */
1122 STATIC int
1123 xfs_attr3_leaf_create(
1124         struct xfs_da_args      *args,
1125         xfs_dablk_t             blkno,
1126         struct xfs_buf          **bpp)
1127 {
1128         struct xfs_attr_leafblock *leaf;
1129         struct xfs_attr3_icleaf_hdr ichdr;
1130         struct xfs_inode        *dp = args->dp;
1131         struct xfs_mount        *mp = dp->i_mount;
1132         struct xfs_buf          *bp;
1133         int                     error;
1134
1135         trace_xfs_attr_leaf_create(args);
1136
1137         error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
1138                                             XFS_ATTR_FORK);
1139         if (error)
1140                 return error;
1141         bp->b_ops = &xfs_attr3_leaf_buf_ops;
1142         leaf = bp->b_addr;
1143         memset(leaf, 0, XFS_LBSIZE(mp));
1144
1145         memset(&ichdr, 0, sizeof(ichdr));
1146         ichdr.firstused = XFS_LBSIZE(mp);
1147
1148         if (xfs_sb_version_hascrc(&mp->m_sb)) {
1149                 struct xfs_da3_blkinfo *hdr3 = bp->b_addr;
1150
1151                 ichdr.magic = XFS_ATTR3_LEAF_MAGIC;
1152
1153                 hdr3->blkno = cpu_to_be64(bp->b_bn);
1154                 hdr3->owner = cpu_to_be64(dp->i_ino);
1155                 uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid);
1156
1157                 ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr);
1158         } else {
1159                 ichdr.magic = XFS_ATTR_LEAF_MAGIC;
1160                 ichdr.freemap[0].base = sizeof(struct xfs_attr_leaf_hdr);
1161         }
1162         ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base;
1163
1164         xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
1165         xfs_trans_log_buf(args->trans, bp, 0, XFS_LBSIZE(mp) - 1);
1166
1167         *bpp = bp;
1168         return 0;
1169 }
1170
1171 /*
1172  * Split the leaf node, rebalance, then add the new entry.
1173  */
1174 int
1175 xfs_attr3_leaf_split(
1176         struct xfs_da_state     *state,
1177         struct xfs_da_state_blk *oldblk,
1178         struct xfs_da_state_blk *newblk)
1179 {
1180         xfs_dablk_t blkno;
1181         int error;
1182
1183         trace_xfs_attr_leaf_split(state->args);
1184
1185         /*
1186          * Allocate space for a new leaf node.
1187          */
1188         ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC);
1189         error = xfs_da_grow_inode(state->args, &blkno);
1190         if (error)
1191                 return(error);
1192         error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp);
1193         if (error)
1194                 return(error);
1195         newblk->blkno = blkno;
1196         newblk->magic = XFS_ATTR_LEAF_MAGIC;
1197
1198         /*
1199          * Rebalance the entries across the two leaves.
1200          * NOTE: rebalance() currently depends on the 2nd block being empty.
1201          */
1202         xfs_attr3_leaf_rebalance(state, oldblk, newblk);
1203         error = xfs_da3_blk_link(state, oldblk, newblk);
1204         if (error)
1205                 return(error);
1206
1207         /*
1208          * Save info on "old" attribute for "atomic rename" ops, leaf_add()
1209          * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the
1210          * "new" attrs info.  Will need the "old" info to remove it later.
1211          *
1212          * Insert the "new" entry in the correct block.
1213          */
1214         if (state->inleaf) {
1215                 trace_xfs_attr_leaf_add_old(state->args);
1216                 error = xfs_attr3_leaf_add(oldblk->bp, state->args);
1217         } else {
1218                 trace_xfs_attr_leaf_add_new(state->args);
1219                 error = xfs_attr3_leaf_add(newblk->bp, state->args);
1220         }
1221
1222         /*
1223          * Update last hashval in each block since we added the name.
1224          */
1225         oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL);
1226         newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL);
1227         return(error);
1228 }
1229
1230 /*
1231  * Add a name to the leaf attribute list structure.
1232  */
1233 int
1234 xfs_attr3_leaf_add(
1235         struct xfs_buf          *bp,
1236         struct xfs_da_args      *args)
1237 {
1238         struct xfs_attr_leafblock *leaf;
1239         struct xfs_attr3_icleaf_hdr ichdr;
1240         int                     tablesize;
1241         int                     entsize;
1242         int                     sum;
1243         int                     tmp;
1244         int                     i;
1245
1246         trace_xfs_attr_leaf_add(args);
1247
1248         leaf = bp->b_addr;
1249         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
1250         ASSERT(args->index >= 0 && args->index <= ichdr.count);
1251         entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1252                            args->trans->t_mountp->m_sb.sb_blocksize, NULL);
1253
1254         /*
1255          * Search through freemap for first-fit on new name length.
1256          * (may need to figure in size of entry struct too)
1257          */
1258         tablesize = (ichdr.count + 1) * sizeof(xfs_attr_leaf_entry_t)
1259                                         + xfs_attr3_leaf_hdr_size(leaf);
1260         for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) {
1261                 if (tablesize > ichdr.firstused) {
1262                         sum += ichdr.freemap[i].size;
1263                         continue;
1264                 }
1265                 if (!ichdr.freemap[i].size)
1266                         continue;       /* no space in this map */
1267                 tmp = entsize;
1268                 if (ichdr.freemap[i].base < ichdr.firstused)
1269                         tmp += sizeof(xfs_attr_leaf_entry_t);
1270                 if (ichdr.freemap[i].size >= tmp) {
1271                         tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i);
1272                         goto out_log_hdr;
1273                 }
1274                 sum += ichdr.freemap[i].size;
1275         }
1276
1277         /*
1278          * If there are no holes in the address space of the block,
1279          * and we don't have enough freespace, then compaction will do us
1280          * no good and we should just give up.
1281          */
1282         if (!ichdr.holes && sum < entsize)
1283                 return XFS_ERROR(ENOSPC);
1284
1285         /*
1286          * Compact the entries to coalesce free space.
1287          * This may change the hdr->count via dropping INCOMPLETE entries.
1288          */
1289         xfs_attr3_leaf_compact(args, &ichdr, bp);
1290
1291         /*
1292          * After compaction, the block is guaranteed to have only one
1293          * free region, in freemap[0].  If it is not big enough, give up.
1294          */
1295         if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) {
1296                 tmp = ENOSPC;
1297                 goto out_log_hdr;
1298         }
1299
1300         tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0);
1301
1302 out_log_hdr:
1303         xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
1304         xfs_trans_log_buf(args->trans, bp,
1305                 XFS_DA_LOGRANGE(leaf, &leaf->hdr,
1306                                 xfs_attr3_leaf_hdr_size(leaf)));
1307         return tmp;
1308 }
1309
1310 /*
1311  * Add a name to a leaf attribute list structure.
1312  */
1313 STATIC int
1314 xfs_attr3_leaf_add_work(
1315         struct xfs_buf          *bp,
1316         struct xfs_attr3_icleaf_hdr *ichdr,
1317         struct xfs_da_args      *args,
1318         int                     mapindex)
1319 {
1320         struct xfs_attr_leafblock *leaf;
1321         struct xfs_attr_leaf_entry *entry;
1322         struct xfs_attr_leaf_name_local *name_loc;
1323         struct xfs_attr_leaf_name_remote *name_rmt;
1324         struct xfs_mount        *mp;
1325         int                     tmp;
1326         int                     i;
1327
1328         trace_xfs_attr_leaf_add_work(args);
1329
1330         leaf = bp->b_addr;
1331         ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE);
1332         ASSERT(args->index >= 0 && args->index <= ichdr->count);
1333
1334         /*
1335          * Force open some space in the entry array and fill it in.
1336          */
1337         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
1338         if (args->index < ichdr->count) {
1339                 tmp  = ichdr->count - args->index;
1340                 tmp *= sizeof(xfs_attr_leaf_entry_t);
1341                 memmove(entry + 1, entry, tmp);
1342                 xfs_trans_log_buf(args->trans, bp,
1343                     XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1344         }
1345         ichdr->count++;
1346
1347         /*
1348          * Allocate space for the new string (at the end of the run).
1349          */
1350         mp = args->trans->t_mountp;
1351         ASSERT(ichdr->freemap[mapindex].base < XFS_LBSIZE(mp));
1352         ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0);
1353         ASSERT(ichdr->freemap[mapindex].size >=
1354                 xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1355                                          mp->m_sb.sb_blocksize, NULL));
1356         ASSERT(ichdr->freemap[mapindex].size < XFS_LBSIZE(mp));
1357         ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0);
1358
1359         ichdr->freemap[mapindex].size -=
1360                         xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1361                                                  mp->m_sb.sb_blocksize, &tmp);
1362
1363         entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base +
1364                                      ichdr->freemap[mapindex].size);
1365         entry->hashval = cpu_to_be32(args->hashval);
1366         entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
1367         entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
1368         if (args->op_flags & XFS_DA_OP_RENAME) {
1369                 entry->flags |= XFS_ATTR_INCOMPLETE;
1370                 if ((args->blkno2 == args->blkno) &&
1371                     (args->index2 <= args->index)) {
1372                         args->index2++;
1373                 }
1374         }
1375         xfs_trans_log_buf(args->trans, bp,
1376                           XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
1377         ASSERT((args->index == 0) ||
1378                (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
1379         ASSERT((args->index == ichdr->count - 1) ||
1380                (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
1381
1382         /*
1383          * For "remote" attribute values, simply note that we need to
1384          * allocate space for the "remote" value.  We can't actually
1385          * allocate the extents in this transaction, and we can't decide
1386          * which blocks they should be as we might allocate more blocks
1387          * as part of this transaction (a split operation for example).
1388          */
1389         if (entry->flags & XFS_ATTR_LOCAL) {
1390                 name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
1391                 name_loc->namelen = args->namelen;
1392                 name_loc->valuelen = cpu_to_be16(args->valuelen);
1393                 memcpy((char *)name_loc->nameval, args->name, args->namelen);
1394                 memcpy((char *)&name_loc->nameval[args->namelen], args->value,
1395                                    be16_to_cpu(name_loc->valuelen));
1396         } else {
1397                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
1398                 name_rmt->namelen = args->namelen;
1399                 memcpy((char *)name_rmt->name, args->name, args->namelen);
1400                 entry->flags |= XFS_ATTR_INCOMPLETE;
1401                 /* just in case */
1402                 name_rmt->valuelen = 0;
1403                 name_rmt->valueblk = 0;
1404                 args->rmtblkno = 1;
1405                 args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
1406         }
1407         xfs_trans_log_buf(args->trans, bp,
1408              XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
1409                                    xfs_attr_leaf_entsize(leaf, args->index)));
1410
1411         /*
1412          * Update the control info for this leaf node
1413          */
1414         if (be16_to_cpu(entry->nameidx) < ichdr->firstused)
1415                 ichdr->firstused = be16_to_cpu(entry->nameidx);
1416
1417         ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t)
1418                                         + xfs_attr3_leaf_hdr_size(leaf));
1419         tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t)
1420                                         + xfs_attr3_leaf_hdr_size(leaf);
1421
1422         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
1423                 if (ichdr->freemap[i].base == tmp) {
1424                         ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
1425                         ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
1426                 }
1427         }
1428         ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
1429         return 0;
1430 }
1431
1432 /*
1433  * Garbage collect a leaf attribute list block by copying it to a new buffer.
1434  */
1435 STATIC void
1436 xfs_attr3_leaf_compact(
1437         struct xfs_da_args      *args,
1438         struct xfs_attr3_icleaf_hdr *ichdr_d,
1439         struct xfs_buf          *bp)
1440 {
1441         xfs_attr_leafblock_t    *leaf_s, *leaf_d;
1442         struct xfs_attr3_icleaf_hdr ichdr_s;
1443         struct xfs_trans        *trans = args->trans;
1444         struct xfs_mount        *mp = trans->t_mountp;
1445         char                    *tmpbuffer;
1446
1447         trace_xfs_attr_leaf_compact(args);
1448
1449         tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
1450         ASSERT(tmpbuffer != NULL);
1451         memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
1452         memset(bp->b_addr, 0, XFS_LBSIZE(mp));
1453
1454         /*
1455          * Copy basic information
1456          */
1457         leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
1458         leaf_d = bp->b_addr;
1459         ichdr_s = *ichdr_d;     /* struct copy */
1460         ichdr_d->firstused = XFS_LBSIZE(mp);
1461         ichdr_d->usedbytes = 0;
1462         ichdr_d->count = 0;
1463         ichdr_d->holes = 0;
1464         ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s);
1465         ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
1466
1467         /*
1468          * Copy all entry's in the same (sorted) order,
1469          * but allocate name/value pairs packed and in sequence.
1470          */
1471         xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0,
1472                                 ichdr_s.count, mp);
1473         /*
1474          * this logs the entire buffer, but the caller must write the header
1475          * back to the buffer when it is finished modifying it.
1476          */
1477         xfs_trans_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
1478
1479         kmem_free(tmpbuffer);
1480 }
1481
1482 /*
1483  * Compare two leaf blocks "order".
1484  * Return 0 unless leaf2 should go before leaf1.
1485  */
1486 static int
1487 xfs_attr3_leaf_order(
1488         struct xfs_buf  *leaf1_bp,
1489         struct xfs_attr3_icleaf_hdr *leaf1hdr,
1490         struct xfs_buf  *leaf2_bp,
1491         struct xfs_attr3_icleaf_hdr *leaf2hdr)
1492 {
1493         struct xfs_attr_leaf_entry *entries1;
1494         struct xfs_attr_leaf_entry *entries2;
1495
1496         entries1 = xfs_attr3_leaf_entryp(leaf1_bp->b_addr);
1497         entries2 = xfs_attr3_leaf_entryp(leaf2_bp->b_addr);
1498         if (leaf1hdr->count > 0 && leaf2hdr->count > 0 &&
1499             ((be32_to_cpu(entries2[0].hashval) <
1500               be32_to_cpu(entries1[0].hashval)) ||
1501              (be32_to_cpu(entries2[leaf2hdr->count - 1].hashval) <
1502               be32_to_cpu(entries1[leaf1hdr->count - 1].hashval)))) {
1503                 return 1;
1504         }
1505         return 0;
1506 }
1507
1508 int
1509 xfs_attr_leaf_order(
1510         struct xfs_buf  *leaf1_bp,
1511         struct xfs_buf  *leaf2_bp)
1512 {
1513         struct xfs_attr3_icleaf_hdr ichdr1;
1514         struct xfs_attr3_icleaf_hdr ichdr2;
1515
1516         xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1_bp->b_addr);
1517         xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2_bp->b_addr);
1518         return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2);
1519 }
1520
1521 /*
1522  * Redistribute the attribute list entries between two leaf nodes,
1523  * taking into account the size of the new entry.
1524  *
1525  * NOTE: if new block is empty, then it will get the upper half of the
1526  * old block.  At present, all (one) callers pass in an empty second block.
1527  *
1528  * This code adjusts the args->index/blkno and args->index2/blkno2 fields
1529  * to match what it is doing in splitting the attribute leaf block.  Those
1530  * values are used in "atomic rename" operations on attributes.  Note that
1531  * the "new" and "old" values can end up in different blocks.
1532  */
1533 STATIC void
1534 xfs_attr3_leaf_rebalance(
1535         struct xfs_da_state     *state,
1536         struct xfs_da_state_blk *blk1,
1537         struct xfs_da_state_blk *blk2)
1538 {
1539         struct xfs_da_args      *args;
1540         struct xfs_attr_leafblock *leaf1;
1541         struct xfs_attr_leafblock *leaf2;
1542         struct xfs_attr3_icleaf_hdr ichdr1;
1543         struct xfs_attr3_icleaf_hdr ichdr2;
1544         struct xfs_attr_leaf_entry *entries1;
1545         struct xfs_attr_leaf_entry *entries2;
1546         int                     count;
1547         int                     totallen;
1548         int                     max;
1549         int                     space;
1550         int                     swap;
1551
1552         /*
1553          * Set up environment.
1554          */
1555         ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
1556         ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
1557         leaf1 = blk1->bp->b_addr;
1558         leaf2 = blk2->bp->b_addr;
1559         xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1);
1560         xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2);
1561         ASSERT(ichdr2.count == 0);
1562         args = state->args;
1563
1564         trace_xfs_attr_leaf_rebalance(args);
1565
1566         /*
1567          * Check ordering of blocks, reverse if it makes things simpler.
1568          *
1569          * NOTE: Given that all (current) callers pass in an empty
1570          * second block, this code should never set "swap".
1571          */
1572         swap = 0;
1573         if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
1574                 struct xfs_da_state_blk *tmp_blk;
1575                 struct xfs_attr3_icleaf_hdr tmp_ichdr;
1576
1577                 tmp_blk = blk1;
1578                 blk1 = blk2;
1579                 blk2 = tmp_blk;
1580
1581                 /* struct copies to swap them rather than reconverting */
1582                 tmp_ichdr = ichdr1;
1583                 ichdr1 = ichdr2;
1584                 ichdr2 = tmp_ichdr;
1585
1586                 leaf1 = blk1->bp->b_addr;
1587                 leaf2 = blk2->bp->b_addr;
1588                 swap = 1;
1589         }
1590
1591         /*
1592          * Examine entries until we reduce the absolute difference in
1593          * byte usage between the two blocks to a minimum.  Then get
1594          * the direction to copy and the number of elements to move.
1595          *
1596          * "inleaf" is true if the new entry should be inserted into blk1.
1597          * If "swap" is also true, then reverse the sense of "inleaf".
1598          */
1599         state->inleaf = xfs_attr3_leaf_figure_balance(state, blk1, &ichdr1,
1600                                                       blk2, &ichdr2,
1601                                                       &count, &totallen);
1602         if (swap)
1603                 state->inleaf = !state->inleaf;
1604
1605         /*
1606          * Move any entries required from leaf to leaf:
1607          */
1608         if (count < ichdr1.count) {
1609                 /*
1610                  * Figure the total bytes to be added to the destination leaf.
1611                  */
1612                 /* number entries being moved */
1613                 count = ichdr1.count - count;
1614                 space  = ichdr1.usedbytes - totallen;
1615                 space += count * sizeof(xfs_attr_leaf_entry_t);
1616
1617                 /*
1618                  * leaf2 is the destination, compact it if it looks tight.
1619                  */
1620                 max  = ichdr2.firstused - xfs_attr3_leaf_hdr_size(leaf1);
1621                 max -= ichdr2.count * sizeof(xfs_attr_leaf_entry_t);
1622                 if (space > max)
1623                         xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp);
1624
1625                 /*
1626                  * Move high entries from leaf1 to low end of leaf2.
1627                  */
1628                 xfs_attr3_leaf_moveents(leaf1, &ichdr1, ichdr1.count - count,
1629                                 leaf2, &ichdr2, 0, count, state->mp);
1630
1631         } else if (count > ichdr1.count) {
1632                 /*
1633                  * I assert that since all callers pass in an empty
1634                  * second buffer, this code should never execute.
1635                  */
1636                 ASSERT(0);
1637
1638                 /*
1639                  * Figure the total bytes to be added to the destination leaf.
1640                  */
1641                 /* number entries being moved */
1642                 count -= ichdr1.count;
1643                 space  = totallen - ichdr1.usedbytes;
1644                 space += count * sizeof(xfs_attr_leaf_entry_t);
1645
1646                 /*
1647                  * leaf1 is the destination, compact it if it looks tight.
1648                  */
1649                 max  = ichdr1.firstused - xfs_attr3_leaf_hdr_size(leaf1);
1650                 max -= ichdr1.count * sizeof(xfs_attr_leaf_entry_t);
1651                 if (space > max)
1652                         xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp);
1653
1654                 /*
1655                  * Move low entries from leaf2 to high end of leaf1.
1656                  */
1657                 xfs_attr3_leaf_moveents(leaf2, &ichdr2, 0, leaf1, &ichdr1,
1658                                         ichdr1.count, count, state->mp);
1659         }
1660
1661         xfs_attr3_leaf_hdr_to_disk(leaf1, &ichdr1);
1662         xfs_attr3_leaf_hdr_to_disk(leaf2, &ichdr2);
1663         xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1664         xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
1665
1666         /*
1667          * Copy out last hashval in each block for B-tree code.
1668          */
1669         entries1 = xfs_attr3_leaf_entryp(leaf1);
1670         entries2 = xfs_attr3_leaf_entryp(leaf2);
1671         blk1->hashval = be32_to_cpu(entries1[ichdr1.count - 1].hashval);
1672         blk2->hashval = be32_to_cpu(entries2[ichdr2.count - 1].hashval);
1673
1674         /*
1675          * Adjust the expected index for insertion.
1676          * NOTE: this code depends on the (current) situation that the
1677          * second block was originally empty.
1678          *
1679          * If the insertion point moved to the 2nd block, we must adjust
1680          * the index.  We must also track the entry just following the
1681          * new entry for use in an "atomic rename" operation, that entry
1682          * is always the "old" entry and the "new" entry is what we are
1683          * inserting.  The index/blkno fields refer to the "old" entry,
1684          * while the index2/blkno2 fields refer to the "new" entry.
1685          */
1686         if (blk1->index > ichdr1.count) {
1687                 ASSERT(state->inleaf == 0);
1688                 blk2->index = blk1->index - ichdr1.count;
1689                 args->index = args->index2 = blk2->index;
1690                 args->blkno = args->blkno2 = blk2->blkno;
1691         } else if (blk1->index == ichdr1.count) {
1692                 if (state->inleaf) {
1693                         args->index = blk1->index;
1694                         args->blkno = blk1->blkno;
1695                         args->index2 = 0;
1696                         args->blkno2 = blk2->blkno;
1697                 } else {
1698                         /*
1699                          * On a double leaf split, the original attr location
1700                          * is already stored in blkno2/index2, so don't
1701                          * overwrite it overwise we corrupt the tree.
1702                          */
1703                         blk2->index = blk1->index - ichdr1.count;
1704                         args->index = blk2->index;
1705                         args->blkno = blk2->blkno;
1706                         if (!state->extravalid) {
1707                                 /*
1708                                  * set the new attr location to match the old
1709                                  * one and let the higher level split code
1710                                  * decide where in the leaf to place it.
1711                                  */
1712                                 args->index2 = blk2->index;
1713                                 args->blkno2 = blk2->blkno;
1714                         }
1715                 }
1716         } else {
1717                 ASSERT(state->inleaf == 1);
1718                 args->index = args->index2 = blk1->index;
1719                 args->blkno = args->blkno2 = blk1->blkno;
1720         }
1721 }
1722
1723 /*
1724  * Examine entries until we reduce the absolute difference in
1725  * byte usage between the two blocks to a minimum.
1726  * GROT: Is this really necessary?  With other than a 512 byte blocksize,
1727  * GROT: there will always be enough room in either block for a new entry.
1728  * GROT: Do a double-split for this case?
1729  */
1730 STATIC int
1731 xfs_attr3_leaf_figure_balance(
1732         struct xfs_da_state             *state,
1733         struct xfs_da_state_blk         *blk1,
1734         struct xfs_attr3_icleaf_hdr     *ichdr1,
1735         struct xfs_da_state_blk         *blk2,
1736         struct xfs_attr3_icleaf_hdr     *ichdr2,
1737         int                             *countarg,
1738         int                             *usedbytesarg)
1739 {
1740         struct xfs_attr_leafblock       *leaf1 = blk1->bp->b_addr;
1741         struct xfs_attr_leafblock       *leaf2 = blk2->bp->b_addr;
1742         struct xfs_attr_leaf_entry      *entry;
1743         int                             count;
1744         int                             max;
1745         int                             index;
1746         int                             totallen = 0;
1747         int                             half;
1748         int                             lastdelta;
1749         int                             foundit = 0;
1750         int                             tmp;
1751
1752         /*
1753          * Examine entries until we reduce the absolute difference in
1754          * byte usage between the two blocks to a minimum.
1755          */
1756         max = ichdr1->count + ichdr2->count;
1757         half = (max + 1) * sizeof(*entry);
1758         half += ichdr1->usedbytes + ichdr2->usedbytes +
1759                         xfs_attr_leaf_newentsize(state->args->namelen,
1760                                                  state->args->valuelen,
1761                                                  state->blocksize, NULL);
1762         half /= 2;
1763         lastdelta = state->blocksize;
1764         entry = xfs_attr3_leaf_entryp(leaf1);
1765         for (count = index = 0; count < max; entry++, index++, count++) {
1766
1767 #define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A))
1768                 /*
1769                  * The new entry is in the first block, account for it.
1770                  */
1771                 if (count == blk1->index) {
1772                         tmp = totallen + sizeof(*entry) +
1773                                 xfs_attr_leaf_newentsize(
1774                                                 state->args->namelen,
1775                                                 state->args->valuelen,
1776                                                 state->blocksize, NULL);
1777                         if (XFS_ATTR_ABS(half - tmp) > lastdelta)
1778                                 break;
1779                         lastdelta = XFS_ATTR_ABS(half - tmp);
1780                         totallen = tmp;
1781                         foundit = 1;
1782                 }
1783
1784                 /*
1785                  * Wrap around into the second block if necessary.
1786                  */
1787                 if (count == ichdr1->count) {
1788                         leaf1 = leaf2;
1789                         entry = xfs_attr3_leaf_entryp(leaf1);
1790                         index = 0;
1791                 }
1792
1793                 /*
1794                  * Figure out if next leaf entry would be too much.
1795                  */
1796                 tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,
1797                                                                         index);
1798                 if (XFS_ATTR_ABS(half - tmp) > lastdelta)
1799                         break;
1800                 lastdelta = XFS_ATTR_ABS(half - tmp);
1801                 totallen = tmp;
1802 #undef XFS_ATTR_ABS
1803         }
1804
1805         /*
1806          * Calculate the number of usedbytes that will end up in lower block.
1807          * If new entry not in lower block, fix up the count.
1808          */
1809         totallen -= count * sizeof(*entry);
1810         if (foundit) {
1811                 totallen -= sizeof(*entry) +
1812                                 xfs_attr_leaf_newentsize(
1813                                                 state->args->namelen,
1814                                                 state->args->valuelen,
1815                                                 state->blocksize, NULL);
1816         }
1817
1818         *countarg = count;
1819         *usedbytesarg = totallen;
1820         return foundit;
1821 }
1822
1823 /*========================================================================
1824  * Routines used for shrinking the Btree.
1825  *========================================================================*/
1826
1827 /*
1828  * Check a leaf block and its neighbors to see if the block should be
1829  * collapsed into one or the other neighbor.  Always keep the block
1830  * with the smaller block number.
1831  * If the current block is over 50% full, don't try to join it, return 0.
1832  * If the block is empty, fill in the state structure and return 2.
1833  * If it can be collapsed, fill in the state structure and return 1.
1834  * If nothing can be done, return 0.
1835  *
1836  * GROT: allow for INCOMPLETE entries in calculation.
1837  */
1838 int
1839 xfs_attr3_leaf_toosmall(
1840         struct xfs_da_state     *state,
1841         int                     *action)
1842 {
1843         struct xfs_attr_leafblock *leaf;
1844         struct xfs_da_state_blk *blk;
1845         struct xfs_attr3_icleaf_hdr ichdr;
1846         struct xfs_buf          *bp;
1847         xfs_dablk_t             blkno;
1848         int                     bytes;
1849         int                     forward;
1850         int                     error;
1851         int                     retval;
1852         int                     i;
1853
1854         trace_xfs_attr_leaf_toosmall(state->args);
1855
1856         /*
1857          * Check for the degenerate case of the block being over 50% full.
1858          * If so, it's not worth even looking to see if we might be able
1859          * to coalesce with a sibling.
1860          */
1861         blk = &state->path.blk[ state->path.active-1 ];
1862         leaf = blk->bp->b_addr;
1863         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
1864         bytes = xfs_attr3_leaf_hdr_size(leaf) +
1865                 ichdr.count * sizeof(xfs_attr_leaf_entry_t) +
1866                 ichdr.usedbytes;
1867         if (bytes > (state->blocksize >> 1)) {
1868                 *action = 0;    /* blk over 50%, don't try to join */
1869                 return(0);
1870         }
1871
1872         /*
1873          * Check for the degenerate case of the block being empty.
1874          * If the block is empty, we'll simply delete it, no need to
1875          * coalesce it with a sibling block.  We choose (arbitrarily)
1876          * to merge with the forward block unless it is NULL.
1877          */
1878         if (ichdr.count == 0) {
1879                 /*
1880                  * Make altpath point to the block we want to keep and
1881                  * path point to the block we want to drop (this one).
1882                  */
1883                 forward = (ichdr.forw != 0);
1884                 memcpy(&state->altpath, &state->path, sizeof(state->path));
1885                 error = xfs_da3_path_shift(state, &state->altpath, forward,
1886                                                  0, &retval);
1887                 if (error)
1888                         return(error);
1889                 if (retval) {
1890                         *action = 0;
1891                 } else {
1892                         *action = 2;
1893                 }
1894                 return 0;
1895         }
1896
1897         /*
1898          * Examine each sibling block to see if we can coalesce with
1899          * at least 25% free space to spare.  We need to figure out
1900          * whether to merge with the forward or the backward block.
1901          * We prefer coalescing with the lower numbered sibling so as
1902          * to shrink an attribute list over time.
1903          */
1904         /* start with smaller blk num */
1905         forward = ichdr.forw < ichdr.back;
1906         for (i = 0; i < 2; forward = !forward, i++) {
1907                 struct xfs_attr3_icleaf_hdr ichdr2;
1908                 if (forward)
1909                         blkno = ichdr.forw;
1910                 else
1911                         blkno = ichdr.back;
1912                 if (blkno == 0)
1913                         continue;
1914                 error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
1915                                         blkno, -1, &bp);
1916                 if (error)
1917                         return(error);
1918
1919                 xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr);
1920
1921                 bytes = state->blocksize - (state->blocksize >> 2) -
1922                         ichdr.usedbytes - ichdr2.usedbytes -
1923                         ((ichdr.count + ichdr2.count) *
1924                                         sizeof(xfs_attr_leaf_entry_t)) -
1925                         xfs_attr3_leaf_hdr_size(leaf);
1926
1927                 xfs_trans_brelse(state->args->trans, bp);
1928                 if (bytes >= 0)
1929                         break;  /* fits with at least 25% to spare */
1930         }
1931         if (i >= 2) {
1932                 *action = 0;
1933                 return(0);
1934         }
1935
1936         /*
1937          * Make altpath point to the block we want to keep (the lower
1938          * numbered block) and path point to the block we want to drop.
1939          */
1940         memcpy(&state->altpath, &state->path, sizeof(state->path));
1941         if (blkno < blk->blkno) {
1942                 error = xfs_da3_path_shift(state, &state->altpath, forward,
1943                                                  0, &retval);
1944         } else {
1945                 error = xfs_da3_path_shift(state, &state->path, forward,
1946                                                  0, &retval);
1947         }
1948         if (error)
1949                 return(error);
1950         if (retval) {
1951                 *action = 0;
1952         } else {
1953                 *action = 1;
1954         }
1955         return(0);
1956 }
1957
1958 /*
1959  * Remove a name from the leaf attribute list structure.
1960  *
1961  * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
1962  * If two leaves are 37% full, when combined they will leave 25% free.
1963  */
1964 int
1965 xfs_attr3_leaf_remove(
1966         struct xfs_buf          *bp,
1967         struct xfs_da_args      *args)
1968 {
1969         struct xfs_attr_leafblock *leaf;
1970         struct xfs_attr3_icleaf_hdr ichdr;
1971         struct xfs_attr_leaf_entry *entry;
1972         struct xfs_mount        *mp = args->trans->t_mountp;
1973         int                     before;
1974         int                     after;
1975         int                     smallest;
1976         int                     entsize;
1977         int                     tablesize;
1978         int                     tmp;
1979         int                     i;
1980
1981         trace_xfs_attr_leaf_remove(args);
1982
1983         leaf = bp->b_addr;
1984         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
1985
1986         ASSERT(ichdr.count > 0 && ichdr.count < XFS_LBSIZE(mp) / 8);
1987         ASSERT(args->index >= 0 && args->index < ichdr.count);
1988         ASSERT(ichdr.firstused >= ichdr.count * sizeof(*entry) +
1989                                         xfs_attr3_leaf_hdr_size(leaf));
1990
1991         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
1992
1993         ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
1994         ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
1995
1996         /*
1997          * Scan through free region table:
1998          *    check for adjacency of free'd entry with an existing one,
1999          *    find smallest free region in case we need to replace it,
2000          *    adjust any map that borders the entry table,
2001          */
2002         tablesize = ichdr.count * sizeof(xfs_attr_leaf_entry_t)
2003                                         + xfs_attr3_leaf_hdr_size(leaf);
2004         tmp = ichdr.freemap[0].size;
2005         before = after = -1;
2006         smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
2007         entsize = xfs_attr_leaf_entsize(leaf, args->index);
2008         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
2009                 ASSERT(ichdr.freemap[i].base < XFS_LBSIZE(mp));
2010                 ASSERT(ichdr.freemap[i].size < XFS_LBSIZE(mp));
2011                 if (ichdr.freemap[i].base == tablesize) {
2012                         ichdr.freemap[i].base -= sizeof(xfs_attr_leaf_entry_t);
2013                         ichdr.freemap[i].size += sizeof(xfs_attr_leaf_entry_t);
2014                 }
2015
2016                 if (ichdr.freemap[i].base + ichdr.freemap[i].size ==
2017                                 be16_to_cpu(entry->nameidx)) {
2018                         before = i;
2019                 } else if (ichdr.freemap[i].base ==
2020                                 (be16_to_cpu(entry->nameidx) + entsize)) {
2021                         after = i;
2022                 } else if (ichdr.freemap[i].size < tmp) {
2023                         tmp = ichdr.freemap[i].size;
2024                         smallest = i;
2025                 }
2026         }
2027
2028         /*
2029          * Coalesce adjacent freemap regions,
2030          * or replace the smallest region.
2031          */
2032         if ((before >= 0) || (after >= 0)) {
2033                 if ((before >= 0) && (after >= 0)) {
2034                         ichdr.freemap[before].size += entsize;
2035                         ichdr.freemap[before].size += ichdr.freemap[after].size;
2036                         ichdr.freemap[after].base = 0;
2037                         ichdr.freemap[after].size = 0;
2038                 } else if (before >= 0) {
2039                         ichdr.freemap[before].size += entsize;
2040                 } else {
2041                         ichdr.freemap[after].base = be16_to_cpu(entry->nameidx);
2042                         ichdr.freemap[after].size += entsize;
2043                 }
2044         } else {
2045                 /*
2046                  * Replace smallest region (if it is smaller than free'd entry)
2047                  */
2048                 if (ichdr.freemap[smallest].size < entsize) {
2049                         ichdr.freemap[smallest].base = be16_to_cpu(entry->nameidx);
2050                         ichdr.freemap[smallest].size = entsize;
2051                 }
2052         }
2053
2054         /*
2055          * Did we remove the first entry?
2056          */
2057         if (be16_to_cpu(entry->nameidx) == ichdr.firstused)
2058                 smallest = 1;
2059         else
2060                 smallest = 0;
2061
2062         /*
2063          * Compress the remaining entries and zero out the removed stuff.
2064          */
2065         memset(xfs_attr3_leaf_name(leaf, args->index), 0, entsize);
2066         ichdr.usedbytes -= entsize;
2067         xfs_trans_log_buf(args->trans, bp,
2068              XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
2069                                    entsize));
2070
2071         tmp = (ichdr.count - args->index) * sizeof(xfs_attr_leaf_entry_t);
2072         memmove(entry, entry + 1, tmp);
2073         ichdr.count--;
2074         xfs_trans_log_buf(args->trans, bp,
2075             XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(xfs_attr_leaf_entry_t)));
2076
2077         entry = &xfs_attr3_leaf_entryp(leaf)[ichdr.count];
2078         memset(entry, 0, sizeof(xfs_attr_leaf_entry_t));
2079
2080         /*
2081          * If we removed the first entry, re-find the first used byte
2082          * in the name area.  Note that if the entry was the "firstused",
2083          * then we don't have a "hole" in our block resulting from
2084          * removing the name.
2085          */
2086         if (smallest) {
2087                 tmp = XFS_LBSIZE(mp);
2088                 entry = xfs_attr3_leaf_entryp(leaf);
2089                 for (i = ichdr.count - 1; i >= 0; entry++, i--) {
2090                         ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
2091                         ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
2092
2093                         if (be16_to_cpu(entry->nameidx) < tmp)
2094                                 tmp = be16_to_cpu(entry->nameidx);
2095                 }
2096                 ichdr.firstused = tmp;
2097                 if (!ichdr.firstused)
2098                         ichdr.firstused = tmp - XFS_ATTR_LEAF_NAME_ALIGN;
2099         } else {
2100                 ichdr.holes = 1;        /* mark as needing compaction */
2101         }
2102         xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
2103         xfs_trans_log_buf(args->trans, bp,
2104                           XFS_DA_LOGRANGE(leaf, &leaf->hdr,
2105                                           xfs_attr3_leaf_hdr_size(leaf)));
2106
2107         /*
2108          * Check if leaf is less than 50% full, caller may want to
2109          * "join" the leaf with a sibling if so.
2110          */
2111         tmp = ichdr.usedbytes + xfs_attr3_leaf_hdr_size(leaf) +
2112               ichdr.count * sizeof(xfs_attr_leaf_entry_t);
2113
2114         return tmp < mp->m_attr_magicpct; /* leaf is < 37% full */
2115 }
2116
2117 /*
2118  * Move all the attribute list entries from drop_leaf into save_leaf.
2119  */
2120 void
2121 xfs_attr3_leaf_unbalance(
2122         struct xfs_da_state     *state,
2123         struct xfs_da_state_blk *drop_blk,
2124         struct xfs_da_state_blk *save_blk)
2125 {
2126         struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr;
2127         struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr;
2128         struct xfs_attr3_icleaf_hdr drophdr;
2129         struct xfs_attr3_icleaf_hdr savehdr;
2130         struct xfs_attr_leaf_entry *entry;
2131         struct xfs_mount        *mp = state->mp;
2132
2133         trace_xfs_attr_leaf_unbalance(state->args);
2134
2135         drop_leaf = drop_blk->bp->b_addr;
2136         save_leaf = save_blk->bp->b_addr;
2137         xfs_attr3_leaf_hdr_from_disk(&drophdr, drop_leaf);
2138         xfs_attr3_leaf_hdr_from_disk(&savehdr, save_leaf);
2139         entry = xfs_attr3_leaf_entryp(drop_leaf);
2140
2141         /*
2142          * Save last hashval from dying block for later Btree fixup.
2143          */
2144         drop_blk->hashval = be32_to_cpu(entry[drophdr.count - 1].hashval);
2145
2146         /*
2147          * Check if we need a temp buffer, or can we do it in place.
2148          * Note that we don't check "leaf" for holes because we will
2149          * always be dropping it, toosmall() decided that for us already.
2150          */
2151         if (savehdr.holes == 0) {
2152                 /*
2153                  * dest leaf has no holes, so we add there.  May need
2154                  * to make some room in the entry array.
2155                  */
2156                 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
2157                                          drop_blk->bp, &drophdr)) {
2158                         xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
2159                                                 save_leaf, &savehdr, 0,
2160                                                 drophdr.count, mp);
2161                 } else {
2162                         xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
2163                                                 save_leaf, &savehdr,
2164                                                 savehdr.count, drophdr.count, mp);
2165                 }
2166         } else {
2167                 /*
2168                  * Destination has holes, so we make a temporary copy
2169                  * of the leaf and add them both to that.
2170                  */
2171                 struct xfs_attr_leafblock *tmp_leaf;
2172                 struct xfs_attr3_icleaf_hdr tmphdr;
2173
2174                 tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP);
2175                 memset(tmp_leaf, 0, state->blocksize);
2176                 memset(&tmphdr, 0, sizeof(tmphdr));
2177
2178                 tmphdr.magic = savehdr.magic;
2179                 tmphdr.forw = savehdr.forw;
2180                 tmphdr.back = savehdr.back;
2181                 tmphdr.firstused = state->blocksize;
2182                 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
2183                                          drop_blk->bp, &drophdr)) {
2184                         xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
2185                                                 tmp_leaf, &tmphdr, 0,
2186                                                 drophdr.count, mp);
2187                         xfs_attr3_leaf_moveents(save_leaf, &savehdr, 0,
2188                                                 tmp_leaf, &tmphdr, tmphdr.count,
2189                                                 savehdr.count, mp);
2190                 } else {
2191                         xfs_attr3_leaf_moveents(save_leaf, &savehdr, 0,
2192                                                 tmp_leaf, &tmphdr, 0,
2193                                                 savehdr.count, mp);
2194                         xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
2195                                                 tmp_leaf, &tmphdr, tmphdr.count,
2196                                                 drophdr.count, mp);
2197                 }
2198                 memcpy(save_leaf, tmp_leaf, state->blocksize);
2199                 savehdr = tmphdr; /* struct copy */
2200                 kmem_free(tmp_leaf);
2201         }
2202
2203         xfs_attr3_leaf_hdr_to_disk(save_leaf, &savehdr);
2204         xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
2205                                            state->blocksize - 1);
2206
2207         /*
2208          * Copy out last hashval in each block for B-tree code.
2209          */
2210         entry = xfs_attr3_leaf_entryp(save_leaf);
2211         save_blk->hashval = be32_to_cpu(entry[savehdr.count - 1].hashval);
2212 }
2213
2214 /*========================================================================
2215  * Routines used for finding things in the Btree.
2216  *========================================================================*/
2217
2218 /*
2219  * Look up a name in a leaf attribute list structure.
2220  * This is the internal routine, it uses the caller's buffer.
2221  *
2222  * Note that duplicate keys are allowed, but only check within the
2223  * current leaf node.  The Btree code must check in adjacent leaf nodes.
2224  *
2225  * Return in args->index the index into the entry[] array of either
2226  * the found entry, or where the entry should have been (insert before
2227  * that entry).
2228  *
2229  * Don't change the args->value unless we find the attribute.
2230  */
2231 int
2232 xfs_attr3_leaf_lookup_int(
2233         struct xfs_buf          *bp,
2234         struct xfs_da_args      *args)
2235 {
2236         struct xfs_attr_leafblock *leaf;
2237         struct xfs_attr3_icleaf_hdr ichdr;
2238         struct xfs_attr_leaf_entry *entry;
2239         struct xfs_attr_leaf_entry *entries;
2240         struct xfs_attr_leaf_name_local *name_loc;
2241         struct xfs_attr_leaf_name_remote *name_rmt;
2242         xfs_dahash_t            hashval;
2243         int                     probe;
2244         int                     span;
2245
2246         trace_xfs_attr_leaf_lookup(args);
2247
2248         leaf = bp->b_addr;
2249         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
2250         entries = xfs_attr3_leaf_entryp(leaf);
2251         ASSERT(ichdr.count < XFS_LBSIZE(args->dp->i_mount) / 8);
2252
2253         /*
2254          * Binary search.  (note: small blocks will skip this loop)
2255          */
2256         hashval = args->hashval;
2257         probe = span = ichdr.count / 2;
2258         for (entry = &entries[probe]; span > 4; entry = &entries[probe]) {
2259                 span /= 2;
2260                 if (be32_to_cpu(entry->hashval) < hashval)
2261                         probe += span;
2262                 else if (be32_to_cpu(entry->hashval) > hashval)
2263                         probe -= span;
2264                 else
2265                         break;
2266         }
2267         ASSERT(probe >= 0 && (!ichdr.count || probe < ichdr.count));
2268         ASSERT(span <= 4 || be32_to_cpu(entry->hashval) == hashval);
2269
2270         /*
2271          * Since we may have duplicate hashval's, find the first matching
2272          * hashval in the leaf.
2273          */
2274         while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) {
2275                 entry--;
2276                 probe--;
2277         }
2278         while (probe < ichdr.count &&
2279                be32_to_cpu(entry->hashval) < hashval) {
2280                 entry++;
2281                 probe++;
2282         }
2283         if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) {
2284                 args->index = probe;
2285                 return XFS_ERROR(ENOATTR);
2286         }
2287
2288         /*
2289          * Duplicate keys may be present, so search all of them for a match.
2290          */
2291         for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval);
2292                         entry++, probe++) {
2293 /*
2294  * GROT: Add code to remove incomplete entries.
2295  */
2296                 /*
2297                  * If we are looking for INCOMPLETE entries, show only those.
2298                  * If we are looking for complete entries, show only those.
2299                  */
2300                 if ((args->flags & XFS_ATTR_INCOMPLETE) !=
2301                     (entry->flags & XFS_ATTR_INCOMPLETE)) {
2302                         continue;
2303                 }
2304                 if (entry->flags & XFS_ATTR_LOCAL) {
2305                         name_loc = xfs_attr3_leaf_name_local(leaf, probe);
2306                         if (name_loc->namelen != args->namelen)
2307                                 continue;
2308                         if (memcmp(args->name, name_loc->nameval,
2309                                                         args->namelen) != 0)
2310                                 continue;
2311                         if (!xfs_attr_namesp_match(args->flags, entry->flags))
2312                                 continue;
2313                         args->index = probe;
2314                         return XFS_ERROR(EEXIST);
2315                 } else {
2316                         name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
2317                         if (name_rmt->namelen != args->namelen)
2318                                 continue;
2319                         if (memcmp(args->name, name_rmt->name,
2320                                                         args->namelen) != 0)
2321                                 continue;
2322                         if (!xfs_attr_namesp_match(args->flags, entry->flags))
2323                                 continue;
2324                         args->index = probe;
2325                         args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2326                         args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
2327                                                    be32_to_cpu(name_rmt->valuelen));
2328                         return XFS_ERROR(EEXIST);
2329                 }
2330         }
2331         args->index = probe;
2332         return XFS_ERROR(ENOATTR);
2333 }
2334
2335 /*
2336  * Get the value associated with an attribute name from a leaf attribute
2337  * list structure.
2338  */
2339 int
2340 xfs_attr3_leaf_getvalue(
2341         struct xfs_buf          *bp,
2342         struct xfs_da_args      *args)
2343 {
2344         struct xfs_attr_leafblock *leaf;
2345         struct xfs_attr3_icleaf_hdr ichdr;
2346         struct xfs_attr_leaf_entry *entry;
2347         struct xfs_attr_leaf_name_local *name_loc;
2348         struct xfs_attr_leaf_name_remote *name_rmt;
2349         int                     valuelen;
2350
2351         leaf = bp->b_addr;
2352         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
2353         ASSERT(ichdr.count < XFS_LBSIZE(args->dp->i_mount) / 8);
2354         ASSERT(args->index < ichdr.count);
2355
2356         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2357         if (entry->flags & XFS_ATTR_LOCAL) {
2358                 name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
2359                 ASSERT(name_loc->namelen == args->namelen);
2360                 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
2361                 valuelen = be16_to_cpu(name_loc->valuelen);
2362                 if (args->flags & ATTR_KERNOVAL) {
2363                         args->valuelen = valuelen;
2364                         return 0;
2365                 }
2366                 if (args->valuelen < valuelen) {
2367                         args->valuelen = valuelen;
2368                         return XFS_ERROR(ERANGE);
2369                 }
2370                 args->valuelen = valuelen;
2371                 memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
2372         } else {
2373                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2374                 ASSERT(name_rmt->namelen == args->namelen);
2375                 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
2376                 valuelen = be32_to_cpu(name_rmt->valuelen);
2377                 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2378                 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
2379                 if (args->flags & ATTR_KERNOVAL) {
2380                         args->valuelen = valuelen;
2381                         return 0;
2382                 }
2383                 if (args->valuelen < valuelen) {
2384                         args->valuelen = valuelen;
2385                         return XFS_ERROR(ERANGE);
2386                 }
2387                 args->valuelen = valuelen;
2388         }
2389         return 0;
2390 }
2391
2392 /*========================================================================
2393  * Utility routines.
2394  *========================================================================*/
2395
2396 /*
2397  * Move the indicated entries from one leaf to another.
2398  * NOTE: this routine modifies both source and destination leaves.
2399  */
2400 /*ARGSUSED*/
2401 STATIC void
2402 xfs_attr3_leaf_moveents(
2403         struct xfs_attr_leafblock       *leaf_s,
2404         struct xfs_attr3_icleaf_hdr     *ichdr_s,
2405         int                             start_s,
2406         struct xfs_attr_leafblock       *leaf_d,
2407         struct xfs_attr3_icleaf_hdr     *ichdr_d,
2408         int                             start_d,
2409         int                             count,
2410         struct xfs_mount                *mp)
2411 {
2412         struct xfs_attr_leaf_entry      *entry_s;
2413         struct xfs_attr_leaf_entry      *entry_d;
2414         int                             desti;
2415         int                             tmp;
2416         int                             i;
2417
2418         /*
2419          * Check for nothing to do.
2420          */
2421         if (count == 0)
2422                 return;
2423
2424         /*
2425          * Set up environment.
2426          */
2427         ASSERT(ichdr_s->magic == XFS_ATTR_LEAF_MAGIC ||
2428                ichdr_s->magic == XFS_ATTR3_LEAF_MAGIC);
2429         ASSERT(ichdr_s->magic == ichdr_d->magic);
2430         ASSERT(ichdr_s->count > 0 && ichdr_s->count < XFS_LBSIZE(mp) / 8);
2431         ASSERT(ichdr_s->firstused >= (ichdr_s->count * sizeof(*entry_s))
2432                                         + xfs_attr3_leaf_hdr_size(leaf_s));
2433         ASSERT(ichdr_d->count < XFS_LBSIZE(mp) / 8);
2434         ASSERT(ichdr_d->firstused >= (ichdr_d->count * sizeof(*entry_d))
2435                                         + xfs_attr3_leaf_hdr_size(leaf_d));
2436
2437         ASSERT(start_s < ichdr_s->count);
2438         ASSERT(start_d <= ichdr_d->count);
2439         ASSERT(count <= ichdr_s->count);
2440
2441
2442         /*
2443          * Move the entries in the destination leaf up to make a hole?
2444          */
2445         if (start_d < ichdr_d->count) {
2446                 tmp  = ichdr_d->count - start_d;
2447                 tmp *= sizeof(xfs_attr_leaf_entry_t);
2448                 entry_s = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
2449                 entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d + count];
2450                 memmove(entry_d, entry_s, tmp);
2451         }
2452
2453         /*
2454          * Copy all entry's in the same (sorted) order,
2455          * but allocate attribute info packed and in sequence.
2456          */
2457         entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
2458         entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
2459         desti = start_d;
2460         for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
2461                 ASSERT(be16_to_cpu(entry_s->nameidx) >= ichdr_s->firstused);
2462                 tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
2463 #ifdef GROT
2464                 /*
2465                  * Code to drop INCOMPLETE entries.  Difficult to use as we
2466                  * may also need to change the insertion index.  Code turned
2467                  * off for 6.2, should be revisited later.
2468                  */
2469                 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
2470                         memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
2471                         ichdr_s->usedbytes -= tmp;
2472                         ichdr_s->count -= 1;
2473                         entry_d--;      /* to compensate for ++ in loop hdr */
2474                         desti--;
2475                         if ((start_s + i) < offset)
2476                                 result++;       /* insertion index adjustment */
2477                 } else {
2478 #endif /* GROT */
2479                         ichdr_d->firstused -= tmp;
2480                         /* both on-disk, don't endian flip twice */
2481                         entry_d->hashval = entry_s->hashval;
2482                         entry_d->nameidx = cpu_to_be16(ichdr_d->firstused);
2483                         entry_d->flags = entry_s->flags;
2484                         ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
2485                                                         <= XFS_LBSIZE(mp));
2486                         memmove(xfs_attr3_leaf_name(leaf_d, desti),
2487                                 xfs_attr3_leaf_name(leaf_s, start_s + i), tmp);
2488                         ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
2489                                                         <= XFS_LBSIZE(mp));
2490                         memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
2491                         ichdr_s->usedbytes -= tmp;
2492                         ichdr_d->usedbytes += tmp;
2493                         ichdr_s->count -= 1;
2494                         ichdr_d->count += 1;
2495                         tmp = ichdr_d->count * sizeof(xfs_attr_leaf_entry_t)
2496                                         + xfs_attr3_leaf_hdr_size(leaf_d);
2497                         ASSERT(ichdr_d->firstused >= tmp);
2498 #ifdef GROT
2499                 }
2500 #endif /* GROT */
2501         }
2502
2503         /*
2504          * Zero out the entries we just copied.
2505          */
2506         if (start_s == ichdr_s->count) {
2507                 tmp = count * sizeof(xfs_attr_leaf_entry_t);
2508                 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
2509                 ASSERT(((char *)entry_s + tmp) <=
2510                        ((char *)leaf_s + XFS_LBSIZE(mp)));
2511                 memset(entry_s, 0, tmp);
2512         } else {
2513                 /*
2514                  * Move the remaining entries down to fill the hole,
2515                  * then zero the entries at the top.
2516                  */
2517                 tmp  = (ichdr_s->count - count) * sizeof(xfs_attr_leaf_entry_t);
2518                 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s + count];
2519                 entry_d = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
2520                 memmove(entry_d, entry_s, tmp);
2521
2522                 tmp = count * sizeof(xfs_attr_leaf_entry_t);
2523                 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[ichdr_s->count];
2524                 ASSERT(((char *)entry_s + tmp) <=
2525                        ((char *)leaf_s + XFS_LBSIZE(mp)));
2526                 memset(entry_s, 0, tmp);
2527         }
2528
2529         /*
2530          * Fill in the freemap information
2531          */
2532         ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_d);
2533         ichdr_d->freemap[0].base += ichdr_d->count * sizeof(xfs_attr_leaf_entry_t);
2534         ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
2535         ichdr_d->freemap[1].base = 0;
2536         ichdr_d->freemap[2].base = 0;
2537         ichdr_d->freemap[1].size = 0;
2538         ichdr_d->freemap[2].size = 0;
2539         ichdr_s->holes = 1;     /* leaf may not be compact */
2540 }
2541
2542 /*
2543  * Pick up the last hashvalue from a leaf block.
2544  */
2545 xfs_dahash_t
2546 xfs_attr_leaf_lasthash(
2547         struct xfs_buf  *bp,
2548         int             *count)
2549 {
2550         struct xfs_attr3_icleaf_hdr ichdr;
2551         struct xfs_attr_leaf_entry *entries;
2552
2553         xfs_attr3_leaf_hdr_from_disk(&ichdr, bp->b_addr);
2554         entries = xfs_attr3_leaf_entryp(bp->b_addr);
2555         if (count)
2556                 *count = ichdr.count;
2557         if (!ichdr.count)
2558                 return 0;
2559         return be32_to_cpu(entries[ichdr.count - 1].hashval);
2560 }
2561
2562 /*
2563  * Calculate the number of bytes used to store the indicated attribute
2564  * (whether local or remote only calculate bytes in this block).
2565  */
2566 STATIC int
2567 xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
2568 {
2569         struct xfs_attr_leaf_entry *entries;
2570         xfs_attr_leaf_name_local_t *name_loc;
2571         xfs_attr_leaf_name_remote_t *name_rmt;
2572         int size;
2573
2574         entries = xfs_attr3_leaf_entryp(leaf);
2575         if (entries[index].flags & XFS_ATTR_LOCAL) {
2576                 name_loc = xfs_attr3_leaf_name_local(leaf, index);
2577                 size = xfs_attr_leaf_entsize_local(name_loc->namelen,
2578                                                    be16_to_cpu(name_loc->valuelen));
2579         } else {
2580                 name_rmt = xfs_attr3_leaf_name_remote(leaf, index);
2581                 size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
2582         }
2583         return size;
2584 }
2585
2586 /*
2587  * Calculate the number of bytes that would be required to store the new
2588  * attribute (whether local or remote only calculate bytes in this block).
2589  * This routine decides as a side effect whether the attribute will be
2590  * a "local" or a "remote" attribute.
2591  */
2592 int
2593 xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
2594 {
2595         int size;
2596
2597         size = xfs_attr_leaf_entsize_local(namelen, valuelen);
2598         if (size < xfs_attr_leaf_entsize_local_max(blocksize)) {
2599                 if (local) {
2600                         *local = 1;
2601                 }
2602         } else {
2603                 size = xfs_attr_leaf_entsize_remote(namelen);
2604                 if (local) {
2605                         *local = 0;
2606                 }
2607         }
2608         return size;
2609 }
2610
2611 /*
2612  * Copy out attribute list entries for attr_list(), for leaf attribute lists.
2613  */
2614 int
2615 xfs_attr3_leaf_list_int(
2616         struct xfs_buf                  *bp,
2617         struct xfs_attr_list_context    *context)
2618 {
2619         struct attrlist_cursor_kern     *cursor;
2620         struct xfs_attr_leafblock       *leaf;
2621         struct xfs_attr3_icleaf_hdr     ichdr;
2622         struct xfs_attr_leaf_entry      *entries;
2623         struct xfs_attr_leaf_entry      *entry;
2624         int                             retval;
2625         int                             i;
2626
2627         trace_xfs_attr_list_leaf(context);
2628
2629         leaf = bp->b_addr;
2630         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
2631         entries = xfs_attr3_leaf_entryp(leaf);
2632
2633         cursor = context->cursor;
2634         cursor->initted = 1;
2635
2636         /*
2637          * Re-find our place in the leaf block if this is a new syscall.
2638          */
2639         if (context->resynch) {
2640                 entry = &entries[0];
2641                 for (i = 0; i < ichdr.count; entry++, i++) {
2642                         if (be32_to_cpu(entry->hashval) == cursor->hashval) {
2643                                 if (cursor->offset == context->dupcnt) {
2644                                         context->dupcnt = 0;
2645                                         break;
2646                                 }
2647                                 context->dupcnt++;
2648                         } else if (be32_to_cpu(entry->hashval) >
2649                                         cursor->hashval) {
2650                                 context->dupcnt = 0;
2651                                 break;
2652                         }
2653                 }
2654                 if (i == ichdr.count) {
2655                         trace_xfs_attr_list_notfound(context);
2656                         return 0;
2657                 }
2658         } else {
2659                 entry = &entries[0];
2660                 i = 0;
2661         }
2662         context->resynch = 0;
2663
2664         /*
2665          * We have found our place, start copying out the new attributes.
2666          */
2667         retval = 0;
2668         for (; i < ichdr.count; entry++, i++) {
2669                 if (be32_to_cpu(entry->hashval) != cursor->hashval) {
2670                         cursor->hashval = be32_to_cpu(entry->hashval);
2671                         cursor->offset = 0;
2672                 }
2673
2674                 if (entry->flags & XFS_ATTR_INCOMPLETE)
2675                         continue;               /* skip incomplete entries */
2676
2677                 if (entry->flags & XFS_ATTR_LOCAL) {
2678                         xfs_attr_leaf_name_local_t *name_loc =
2679                                 xfs_attr3_leaf_name_local(leaf, i);
2680
2681                         retval = context->put_listent(context,
2682                                                 entry->flags,
2683                                                 name_loc->nameval,
2684                                                 (int)name_loc->namelen,
2685                                                 be16_to_cpu(name_loc->valuelen),
2686                                                 &name_loc->nameval[name_loc->namelen]);
2687                         if (retval)
2688                                 return retval;
2689                 } else {
2690                         xfs_attr_leaf_name_remote_t *name_rmt =
2691                                 xfs_attr3_leaf_name_remote(leaf, i);
2692
2693                         int valuelen = be32_to_cpu(name_rmt->valuelen);
2694
2695                         if (context->put_value) {
2696                                 xfs_da_args_t args;
2697
2698                                 memset((char *)&args, 0, sizeof(args));
2699                                 args.dp = context->dp;
2700                                 args.whichfork = XFS_ATTR_FORK;
2701                                 args.valuelen = valuelen;
2702                                 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
2703                                 args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
2704                                 args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
2705                                 retval = xfs_attr_rmtval_get(&args);
2706                                 if (retval)
2707                                         return retval;
2708                                 retval = context->put_listent(context,
2709                                                 entry->flags,
2710                                                 name_rmt->name,
2711                                                 (int)name_rmt->namelen,
2712                                                 valuelen,
2713                                                 args.value);
2714                                 kmem_free(args.value);
2715                         } else {
2716                                 retval = context->put_listent(context,
2717                                                 entry->flags,
2718                                                 name_rmt->name,
2719                                                 (int)name_rmt->namelen,
2720                                                 valuelen,
2721                                                 NULL);
2722                         }
2723                         if (retval)
2724                                 return retval;
2725                 }
2726                 if (context->seen_enough)
2727                         break;
2728                 cursor->offset++;
2729         }
2730         trace_xfs_attr_list_leaf_end(context);
2731         return retval;
2732 }
2733
2734
2735 /*========================================================================
2736  * Manage the INCOMPLETE flag in a leaf entry
2737  *========================================================================*/
2738
2739 /*
2740  * Clear the INCOMPLETE flag on an entry in a leaf block.
2741  */
2742 int
2743 xfs_attr3_leaf_clearflag(
2744         struct xfs_da_args      *args)
2745 {
2746         struct xfs_attr_leafblock *leaf;
2747         struct xfs_attr_leaf_entry *entry;
2748         struct xfs_attr_leaf_name_remote *name_rmt;
2749         struct xfs_buf          *bp;
2750         int                     error;
2751 #ifdef DEBUG
2752         struct xfs_attr3_icleaf_hdr ichdr;
2753         xfs_attr_leaf_name_local_t *name_loc;
2754         int namelen;
2755         char *name;
2756 #endif /* DEBUG */
2757
2758         trace_xfs_attr_leaf_clearflag(args);
2759         /*
2760          * Set up the operation.
2761          */
2762         error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
2763         if (error)
2764                 return(error);
2765
2766         leaf = bp->b_addr;
2767         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2768         ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
2769
2770 #ifdef DEBUG
2771         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
2772         ASSERT(args->index < ichdr.count);
2773         ASSERT(args->index >= 0);
2774
2775         if (entry->flags & XFS_ATTR_LOCAL) {
2776                 name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
2777                 namelen = name_loc->namelen;
2778                 name = (char *)name_loc->nameval;
2779         } else {
2780                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2781                 namelen = name_rmt->namelen;
2782                 name = (char *)name_rmt->name;
2783         }
2784         ASSERT(be32_to_cpu(entry->hashval) == args->hashval);
2785         ASSERT(namelen == args->namelen);
2786         ASSERT(memcmp(name, args->name, namelen) == 0);
2787 #endif /* DEBUG */
2788
2789         entry->flags &= ~XFS_ATTR_INCOMPLETE;
2790         xfs_trans_log_buf(args->trans, bp,
2791                          XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2792
2793         if (args->rmtblkno) {
2794                 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
2795                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2796                 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2797                 name_rmt->valuelen = cpu_to_be32(args->valuelen);
2798                 xfs_trans_log_buf(args->trans, bp,
2799                          XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2800         }
2801
2802         /*
2803          * Commit the flag value change and start the next trans in series.
2804          */
2805         return xfs_trans_roll(&args->trans, args->dp);
2806 }
2807
2808 /*
2809  * Set the INCOMPLETE flag on an entry in a leaf block.
2810  */
2811 int
2812 xfs_attr3_leaf_setflag(
2813         struct xfs_da_args      *args)
2814 {
2815         struct xfs_attr_leafblock *leaf;
2816         struct xfs_attr_leaf_entry *entry;
2817         struct xfs_attr_leaf_name_remote *name_rmt;
2818         struct xfs_buf          *bp;
2819         int error;
2820 #ifdef DEBUG
2821         struct xfs_attr3_icleaf_hdr ichdr;
2822 #endif
2823
2824         trace_xfs_attr_leaf_setflag(args);
2825
2826         /*
2827          * Set up the operation.
2828          */
2829         error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
2830         if (error)
2831                 return(error);
2832
2833         leaf = bp->b_addr;
2834 #ifdef DEBUG
2835         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
2836         ASSERT(args->index < ichdr.count);
2837         ASSERT(args->index >= 0);
2838 #endif
2839         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2840
2841         ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
2842         entry->flags |= XFS_ATTR_INCOMPLETE;
2843         xfs_trans_log_buf(args->trans, bp,
2844                         XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2845         if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
2846                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2847                 name_rmt->valueblk = 0;
2848                 name_rmt->valuelen = 0;
2849                 xfs_trans_log_buf(args->trans, bp,
2850                          XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2851         }
2852
2853         /*
2854          * Commit the flag value change and start the next trans in series.
2855          */
2856         return xfs_trans_roll(&args->trans, args->dp);
2857 }
2858
2859 /*
2860  * In a single transaction, clear the INCOMPLETE flag on the leaf entry
2861  * given by args->blkno/index and set the INCOMPLETE flag on the leaf
2862  * entry given by args->blkno2/index2.
2863  *
2864  * Note that they could be in different blocks, or in the same block.
2865  */
2866 int
2867 xfs_attr3_leaf_flipflags(
2868         struct xfs_da_args      *args)
2869 {
2870         struct xfs_attr_leafblock *leaf1;
2871         struct xfs_attr_leafblock *leaf2;
2872         struct xfs_attr_leaf_entry *entry1;
2873         struct xfs_attr_leaf_entry *entry2;
2874         struct xfs_attr_leaf_name_remote *name_rmt;
2875         struct xfs_buf          *bp1;
2876         struct xfs_buf          *bp2;
2877         int error;
2878 #ifdef DEBUG
2879         struct xfs_attr3_icleaf_hdr ichdr1;
2880         struct xfs_attr3_icleaf_hdr ichdr2;
2881         xfs_attr_leaf_name_local_t *name_loc;
2882         int namelen1, namelen2;
2883         char *name1, *name2;
2884 #endif /* DEBUG */
2885
2886         trace_xfs_attr_leaf_flipflags(args);
2887
2888         /*
2889          * Read the block containing the "old" attr
2890          */
2891         error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp1);
2892         if (error)
2893                 return error;
2894
2895         /*
2896          * Read the block containing the "new" attr, if it is different
2897          */
2898         if (args->blkno2 != args->blkno) {
2899                 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
2900                                            -1, &bp2);
2901                 if (error)
2902                         return error;
2903         } else {
2904                 bp2 = bp1;
2905         }
2906
2907         leaf1 = bp1->b_addr;
2908         entry1 = &xfs_attr3_leaf_entryp(leaf1)[args->index];
2909
2910         leaf2 = bp2->b_addr;
2911         entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2];
2912
2913 #ifdef DEBUG
2914         xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1);
2915         ASSERT(args->index < ichdr1.count);
2916         ASSERT(args->index >= 0);
2917
2918         xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2);
2919         ASSERT(args->index2 < ichdr2.count);
2920         ASSERT(args->index2 >= 0);
2921
2922         if (entry1->flags & XFS_ATTR_LOCAL) {
2923                 name_loc = xfs_attr3_leaf_name_local(leaf1, args->index);
2924                 namelen1 = name_loc->namelen;
2925                 name1 = (char *)name_loc->nameval;
2926         } else {
2927                 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
2928                 namelen1 = name_rmt->namelen;
2929                 name1 = (char *)name_rmt->name;
2930         }
2931         if (entry2->flags & XFS_ATTR_LOCAL) {
2932                 name_loc = xfs_attr3_leaf_name_local(leaf2, args->index2);
2933                 namelen2 = name_loc->namelen;
2934                 name2 = (char *)name_loc->nameval;
2935         } else {
2936                 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
2937                 namelen2 = name_rmt->namelen;
2938                 name2 = (char *)name_rmt->name;
2939         }
2940         ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval));
2941         ASSERT(namelen1 == namelen2);
2942         ASSERT(memcmp(name1, name2, namelen1) == 0);
2943 #endif /* DEBUG */
2944
2945         ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
2946         ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
2947
2948         entry1->flags &= ~XFS_ATTR_INCOMPLETE;
2949         xfs_trans_log_buf(args->trans, bp1,
2950                           XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
2951         if (args->rmtblkno) {
2952                 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
2953                 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
2954                 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2955                 name_rmt->valuelen = cpu_to_be32(args->valuelen);
2956                 xfs_trans_log_buf(args->trans, bp1,
2957                          XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
2958         }
2959
2960         entry2->flags |= XFS_ATTR_INCOMPLETE;
2961         xfs_trans_log_buf(args->trans, bp2,
2962                           XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
2963         if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
2964                 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
2965                 name_rmt->valueblk = 0;
2966                 name_rmt->valuelen = 0;
2967                 xfs_trans_log_buf(args->trans, bp2,
2968                          XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
2969         }
2970
2971         /*
2972          * Commit the flag value change and start the next trans in series.
2973          */
2974         error = xfs_trans_roll(&args->trans, args->dp);
2975
2976         return error;
2977 }
2978
2979 /*========================================================================
2980  * Indiscriminately delete the entire attribute fork
2981  *========================================================================*/
2982
2983 /*
2984  * Recurse (gasp!) through the attribute nodes until we find leaves.
2985  * We're doing a depth-first traversal in order to invalidate everything.
2986  */
2987 int
2988 xfs_attr3_root_inactive(
2989         struct xfs_trans        **trans,
2990         struct xfs_inode        *dp)
2991 {
2992         struct xfs_da_blkinfo   *info;
2993         struct xfs_buf          *bp;
2994         xfs_daddr_t             blkno;
2995         int                     error;
2996
2997         /*
2998          * Read block 0 to see what we have to work with.
2999          * We only get here if we have extents, since we remove
3000          * the extents in reverse order the extent containing
3001          * block 0 must still be there.
3002          */
3003         error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
3004         if (error)
3005                 return error;
3006         blkno = bp->b_bn;
3007
3008         /*
3009          * Invalidate the tree, even if the "tree" is only a single leaf block.
3010          * This is a depth-first traversal!
3011          */
3012         info = bp->b_addr;
3013         switch (info->magic) {
3014         case cpu_to_be16(XFS_DA_NODE_MAGIC):
3015         case cpu_to_be16(XFS_DA3_NODE_MAGIC):
3016                 error = xfs_attr3_node_inactive(trans, dp, bp, 1);
3017                 break;
3018         case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
3019         case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
3020                 error = xfs_attr3_leaf_inactive(trans, dp, bp);
3021                 break;
3022         default:
3023                 error = XFS_ERROR(EIO);
3024                 xfs_trans_brelse(*trans, bp);
3025                 break;
3026         }
3027         if (error)
3028                 return error;
3029
3030         /*
3031          * Invalidate the incore copy of the root block.
3032          */
3033         error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
3034         if (error)
3035                 return error;
3036         xfs_trans_binval(*trans, bp);   /* remove from cache */
3037         /*
3038          * Commit the invalidate and start the next transaction.
3039          */
3040         error = xfs_trans_roll(trans, dp);
3041
3042         return error;
3043 }
3044
3045 /*
3046  * Recurse (gasp!) through the attribute nodes until we find leaves.
3047  * We're doing a depth-first traversal in order to invalidate everything.
3048  */
3049 STATIC int
3050 xfs_attr3_node_inactive(
3051         struct xfs_trans **trans,
3052         struct xfs_inode *dp,
3053         struct xfs_buf  *bp,
3054         int             level)
3055 {
3056         xfs_da_blkinfo_t *info;
3057         xfs_da_intnode_t *node;
3058         xfs_dablk_t child_fsb;
3059         xfs_daddr_t parent_blkno, child_blkno;
3060         int error, i;
3061         struct xfs_buf *child_bp;
3062         struct xfs_da_node_entry *btree;
3063         struct xfs_da3_icnode_hdr ichdr;
3064
3065         /*
3066          * Since this code is recursive (gasp!) we must protect ourselves.
3067          */
3068         if (level > XFS_DA_NODE_MAXDEPTH) {
3069                 xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
3070                 return XFS_ERROR(EIO);
3071         }
3072
3073         node = bp->b_addr;
3074         xfs_da3_node_hdr_from_disk(&ichdr, node);
3075         parent_blkno = bp->b_bn;
3076         if (!ichdr.count) {
3077                 xfs_trans_brelse(*trans, bp);
3078                 return 0;
3079         }
3080         btree = xfs_da3_node_tree_p(node);
3081         child_fsb = be32_to_cpu(btree[0].before);
3082         xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
3083
3084         /*
3085          * If this is the node level just above the leaves, simply loop
3086          * over the leaves removing all of them.  If this is higher up
3087          * in the tree, recurse downward.
3088          */
3089         for (i = 0; i < ichdr.count; i++) {
3090                 /*
3091                  * Read the subsidiary block to see what we have to work with.
3092                  * Don't do this in a transaction.  This is a depth-first
3093                  * traversal of the tree so we may deal with many blocks
3094                  * before we come back to this one.
3095                  */
3096                 error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
3097                                                 XFS_ATTR_FORK);
3098                 if (error)
3099                         return(error);
3100                 if (child_bp) {
3101                                                 /* save for re-read later */
3102                         child_blkno = XFS_BUF_ADDR(child_bp);
3103
3104                         /*
3105                          * Invalidate the subtree, however we have to.
3106                          */
3107                         info = child_bp->b_addr;
3108                         switch (info->magic) {
3109                         case cpu_to_be16(XFS_DA_NODE_MAGIC):
3110                         case cpu_to_be16(XFS_DA3_NODE_MAGIC):
3111                                 error = xfs_attr3_node_inactive(trans, dp,
3112                                                         child_bp, level + 1);
3113                                 break;
3114                         case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
3115                         case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
3116                                 error = xfs_attr3_leaf_inactive(trans, dp,
3117                                                         child_bp);
3118                                 break;
3119                         default:
3120                                 error = XFS_ERROR(EIO);
3121                                 xfs_trans_brelse(*trans, child_bp);
3122                                 break;
3123                         }
3124                         if (error)
3125                                 return error;
3126
3127                         /*
3128                          * Remove the subsidiary block from the cache
3129                          * and from the log.
3130                          */
3131                         error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
3132                                 &child_bp, XFS_ATTR_FORK);
3133                         if (error)
3134                                 return error;
3135                         xfs_trans_binval(*trans, child_bp);
3136                 }
3137
3138                 /*
3139                  * If we're not done, re-read the parent to get the next
3140                  * child block number.
3141                  */
3142                 if (i + 1 < ichdr.count) {
3143                         error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
3144                                                  &bp, XFS_ATTR_FORK);
3145                         if (error)
3146                                 return error;
3147                         child_fsb = be32_to_cpu(btree[i + 1].before);
3148                         xfs_trans_brelse(*trans, bp);
3149                 }
3150                 /*
3151                  * Atomically commit the whole invalidate stuff.
3152                  */
3153                 error = xfs_trans_roll(trans, dp);
3154                 if (error)
3155                         return  error;
3156         }
3157
3158         return 0;
3159 }
3160
3161 /*
3162  * Invalidate all of the "remote" value regions pointed to by a particular
3163  * leaf block.
3164  * Note that we must release the lock on the buffer so that we are not
3165  * caught holding something that the logging code wants to flush to disk.
3166  */
3167 STATIC int
3168 xfs_attr3_leaf_inactive(
3169         struct xfs_trans        **trans,
3170         struct xfs_inode        *dp,
3171         struct xfs_buf          *bp)
3172 {
3173         struct xfs_attr_leafblock *leaf;
3174         struct xfs_attr3_icleaf_hdr ichdr;
3175         struct xfs_attr_leaf_entry *entry;
3176         struct xfs_attr_leaf_name_remote *name_rmt;
3177         struct xfs_attr_inactive_list *list;
3178         struct xfs_attr_inactive_list *lp;
3179         int                     error;
3180         int                     count;
3181         int                     size;
3182         int                     tmp;
3183         int                     i;
3184
3185         leaf = bp->b_addr;
3186         xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
3187
3188         /*
3189          * Count the number of "remote" value extents.
3190          */
3191         count = 0;
3192         entry = xfs_attr3_leaf_entryp(leaf);
3193         for (i = 0; i < ichdr.count; entry++, i++) {
3194                 if (be16_to_cpu(entry->nameidx) &&
3195                     ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
3196                         name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
3197                         if (name_rmt->valueblk)
3198                                 count++;
3199                 }
3200         }
3201
3202         /*
3203          * If there are no "remote" values, we're done.
3204          */
3205         if (count == 0) {
3206                 xfs_trans_brelse(*trans, bp);
3207                 return 0;
3208         }
3209
3210         /*
3211          * Allocate storage for a list of all the "remote" value extents.
3212          */
3213         size = count * sizeof(xfs_attr_inactive_list_t);
3214         list = kmem_alloc(size, KM_SLEEP);
3215
3216         /*
3217          * Identify each of the "remote" value extents.
3218          */
3219         lp = list;
3220         entry = xfs_attr3_leaf_entryp(leaf);
3221         for (i = 0; i < ichdr.count; entry++, i++) {
3222                 if (be16_to_cpu(entry->nameidx) &&
3223                     ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
3224                         name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
3225                         if (name_rmt->valueblk) {
3226                                 lp->valueblk = be32_to_cpu(name_rmt->valueblk);
3227                                 lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
3228                                                     be32_to_cpu(name_rmt->valuelen));
3229                                 lp++;
3230                         }
3231                 }
3232         }
3233         xfs_trans_brelse(*trans, bp);   /* unlock for trans. in freextent() */
3234
3235         /*
3236          * Invalidate each of the "remote" value extents.
3237          */
3238         error = 0;
3239         for (lp = list, i = 0; i < count; i++, lp++) {
3240                 tmp = xfs_attr3_leaf_freextent(trans, dp,
3241                                 lp->valueblk, lp->valuelen);
3242
3243                 if (error == 0)
3244                         error = tmp;    /* save only the 1st errno */
3245         }
3246
3247         kmem_free(list);
3248         return error;
3249 }
3250
3251 /*
3252  * Look at all the extents for this logical region,
3253  * invalidate any buffers that are incore/in transactions.
3254  */
3255 STATIC int
3256 xfs_attr3_leaf_freextent(
3257         struct xfs_trans        **trans,
3258         struct xfs_inode        *dp,
3259         xfs_dablk_t             blkno,
3260         int                     blkcnt)
3261 {
3262         struct xfs_bmbt_irec    map;
3263         struct xfs_buf          *bp;
3264         xfs_dablk_t             tblkno;
3265         xfs_daddr_t             dblkno;
3266         int                     tblkcnt;
3267         int                     dblkcnt;
3268         int                     nmap;
3269         int                     error;
3270
3271         /*
3272          * Roll through the "value", invalidating the attribute value's
3273          * blocks.
3274          */
3275         tblkno = blkno;
3276         tblkcnt = blkcnt;
3277         while (tblkcnt > 0) {
3278                 /*
3279                  * Try to remember where we decided to put the value.
3280                  */
3281                 nmap = 1;
3282                 error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
3283                                        &map, &nmap, XFS_BMAPI_ATTRFORK);
3284                 if (error) {
3285                         return(error);
3286                 }
3287                 ASSERT(nmap == 1);
3288                 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
3289
3290                 /*
3291                  * If it's a hole, these are already unmapped
3292                  * so there's nothing to invalidate.
3293                  */
3294                 if (map.br_startblock != HOLESTARTBLOCK) {
3295
3296                         dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
3297                                                   map.br_startblock);
3298                         dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
3299                                                 map.br_blockcount);
3300                         bp = xfs_trans_get_buf(*trans,
3301                                         dp->i_mount->m_ddev_targp,
3302                                         dblkno, dblkcnt, 0);
3303                         if (!bp)
3304                                 return ENOMEM;
3305                         xfs_trans_binval(*trans, bp);
3306                         /*
3307                          * Roll to next transaction.
3308                          */
3309                         error = xfs_trans_roll(trans, dp);
3310                         if (error)
3311                                 return (error);
3312                 }
3313
3314                 tblkno += map.br_blockcount;
3315                 tblkcnt -= map.br_blockcount;
3316         }
3317
3318         return(0);
3319 }