xfs: remove unused XFS_BMAPI_ flags
[firefly-linux-kernel-4.4.55.git] / fs / xfs / xfs_bmap.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir2_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_mount.h"
37 #include "xfs_itable.h"
38 #include "xfs_dir2_data.h"
39 #include "xfs_dir2_leaf.h"
40 #include "xfs_dir2_block.h"
41 #include "xfs_inode_item.h"
42 #include "xfs_extfree_item.h"
43 #include "xfs_alloc.h"
44 #include "xfs_bmap.h"
45 #include "xfs_rtalloc.h"
46 #include "xfs_error.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_rw.h"
49 #include "xfs_quota.h"
50 #include "xfs_trans_space.h"
51 #include "xfs_buf_item.h"
52 #include "xfs_filestream.h"
53 #include "xfs_vnodeops.h"
54 #include "xfs_trace.h"
55
56
57 #ifdef DEBUG
58 STATIC void
59 xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
60 #endif
61
62 kmem_zone_t             *xfs_bmap_free_item_zone;
63
64 /*
65  * Prototypes for internal bmap routines.
66  */
67
68
69 /*
70  * Called from xfs_bmap_add_attrfork to handle extents format files.
71  */
72 STATIC int                                      /* error */
73 xfs_bmap_add_attrfork_extents(
74         xfs_trans_t             *tp,            /* transaction pointer */
75         xfs_inode_t             *ip,            /* incore inode pointer */
76         xfs_fsblock_t           *firstblock,    /* first block allocated */
77         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
78         int                     *flags);        /* inode logging flags */
79
80 /*
81  * Called from xfs_bmap_add_attrfork to handle local format files.
82  */
83 STATIC int                                      /* error */
84 xfs_bmap_add_attrfork_local(
85         xfs_trans_t             *tp,            /* transaction pointer */
86         xfs_inode_t             *ip,            /* incore inode pointer */
87         xfs_fsblock_t           *firstblock,    /* first block allocated */
88         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
89         int                     *flags);        /* inode logging flags */
90
91 /*
92  * Called by xfs_bmapi to update file extent records and the btree
93  * after allocating space (or doing a delayed allocation).
94  */
95 STATIC int                              /* error */
96 xfs_bmap_add_extent(
97         xfs_inode_t             *ip,    /* incore inode pointer */
98         xfs_extnum_t            idx,    /* extent number to update/insert */
99         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
100         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
101         xfs_fsblock_t           *first, /* pointer to firstblock variable */
102         xfs_bmap_free_t         *flist, /* list of extents to be freed */
103         int                     *logflagsp, /* inode logging flags */
104         xfs_extdelta_t          *delta, /* Change made to incore extents */
105         int                     whichfork, /* data or attr fork */
106         int                     rsvd);  /* OK to allocate reserved blocks */
107
108 /*
109  * Called by xfs_bmap_add_extent to handle cases converting a delayed
110  * allocation to a real allocation.
111  */
112 STATIC int                              /* error */
113 xfs_bmap_add_extent_delay_real(
114         xfs_inode_t             *ip,    /* incore inode pointer */
115         xfs_extnum_t            idx,    /* extent number to update/insert */
116         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
117         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
118         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
119         xfs_fsblock_t           *first, /* pointer to firstblock variable */
120         xfs_bmap_free_t         *flist, /* list of extents to be freed */
121         int                     *logflagsp, /* inode logging flags */
122         xfs_extdelta_t          *delta, /* Change made to incore extents */
123         int                     rsvd);  /* OK to allocate reserved blocks */
124
125 /*
126  * Called by xfs_bmap_add_extent to handle cases converting a hole
127  * to a delayed allocation.
128  */
129 STATIC int                              /* error */
130 xfs_bmap_add_extent_hole_delay(
131         xfs_inode_t             *ip,    /* incore inode pointer */
132         xfs_extnum_t            idx,    /* extent number to update/insert */
133         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
134         int                     *logflagsp,/* inode logging flags */
135         xfs_extdelta_t          *delta, /* Change made to incore extents */
136         int                     rsvd);  /* OK to allocate reserved blocks */
137
138 /*
139  * Called by xfs_bmap_add_extent to handle cases converting a hole
140  * to a real allocation.
141  */
142 STATIC int                              /* error */
143 xfs_bmap_add_extent_hole_real(
144         xfs_inode_t             *ip,    /* incore inode pointer */
145         xfs_extnum_t            idx,    /* extent number to update/insert */
146         xfs_btree_cur_t         *cur,   /* if null, not a btree */
147         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
148         int                     *logflagsp, /* inode logging flags */
149         xfs_extdelta_t          *delta, /* Change made to incore extents */
150         int                     whichfork); /* data or attr fork */
151
152 /*
153  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
154  * allocation to a real allocation or vice versa.
155  */
156 STATIC int                              /* error */
157 xfs_bmap_add_extent_unwritten_real(
158         xfs_inode_t             *ip,    /* incore inode pointer */
159         xfs_extnum_t            idx,    /* extent number to update/insert */
160         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
161         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
162         int                     *logflagsp, /* inode logging flags */
163         xfs_extdelta_t          *delta); /* Change made to incore extents */
164
165 /*
166  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
167  * It figures out where to ask the underlying allocator to put the new extent.
168  */
169 STATIC int                              /* error */
170 xfs_bmap_alloc(
171         xfs_bmalloca_t          *ap);   /* bmap alloc argument struct */
172
173 /*
174  * Transform a btree format file with only one leaf node, where the
175  * extents list will fit in the inode, into an extents format file.
176  * Since the file extents are already in-core, all we have to do is
177  * give up the space for the btree root and pitch the leaf block.
178  */
179 STATIC int                              /* error */
180 xfs_bmap_btree_to_extents(
181         xfs_trans_t             *tp,    /* transaction pointer */
182         xfs_inode_t             *ip,    /* incore inode pointer */
183         xfs_btree_cur_t         *cur,   /* btree cursor */
184         int                     *logflagsp, /* inode logging flags */
185         int                     whichfork); /* data or attr fork */
186
187 /*
188  * Called by xfs_bmapi to update file extent records and the btree
189  * after removing space (or undoing a delayed allocation).
190  */
191 STATIC int                              /* error */
192 xfs_bmap_del_extent(
193         xfs_inode_t             *ip,    /* incore inode pointer */
194         xfs_trans_t             *tp,    /* current trans pointer */
195         xfs_extnum_t            idx,    /* extent number to update/insert */
196         xfs_bmap_free_t         *flist, /* list of extents to be freed */
197         xfs_btree_cur_t         *cur,   /* if null, not a btree */
198         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
199         int                     *logflagsp,/* inode logging flags */
200         xfs_extdelta_t          *delta, /* Change made to incore extents */
201         int                     whichfork, /* data or attr fork */
202         int                     rsvd);   /* OK to allocate reserved blocks */
203
204 /*
205  * Remove the entry "free" from the free item list.  Prev points to the
206  * previous entry, unless "free" is the head of the list.
207  */
208 STATIC void
209 xfs_bmap_del_free(
210         xfs_bmap_free_t         *flist, /* free item list header */
211         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
212         xfs_bmap_free_item_t    *free); /* list item to be freed */
213
214 /*
215  * Convert an extents-format file into a btree-format file.
216  * The new file will have a root block (in the inode) and a single child block.
217  */
218 STATIC int                                      /* error */
219 xfs_bmap_extents_to_btree(
220         xfs_trans_t             *tp,            /* transaction pointer */
221         xfs_inode_t             *ip,            /* incore inode pointer */
222         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
223         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
224         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
225         int                     wasdel,         /* converting a delayed alloc */
226         int                     *logflagsp,     /* inode logging flags */
227         int                     whichfork);     /* data or attr fork */
228
229 /*
230  * Convert a local file to an extents file.
231  * This code is sort of bogus, since the file data needs to get
232  * logged so it won't be lost.  The bmap-level manipulations are ok, though.
233  */
234 STATIC int                              /* error */
235 xfs_bmap_local_to_extents(
236         xfs_trans_t     *tp,            /* transaction pointer */
237         xfs_inode_t     *ip,            /* incore inode pointer */
238         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
239         xfs_extlen_t    total,          /* total blocks needed by transaction */
240         int             *logflagsp,     /* inode logging flags */
241         int             whichfork);     /* data or attr fork */
242
243 /*
244  * Search the extents list for the inode, for the extent containing bno.
245  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
246  * *eofp will be set, and *prevp will contain the last entry (null if none).
247  * Else, *lastxp will be set to the index of the found
248  * entry; *gotp will contain the entry.
249  */
250 STATIC xfs_bmbt_rec_host_t *            /* pointer to found extent entry */
251 xfs_bmap_search_extents(
252         xfs_inode_t     *ip,            /* incore inode pointer */
253         xfs_fileoff_t   bno,            /* block number searched for */
254         int             whichfork,      /* data or attr fork */
255         int             *eofp,          /* out: end of file found */
256         xfs_extnum_t    *lastxp,        /* out: last extent index */
257         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
258         xfs_bmbt_irec_t *prevp);        /* out: previous extent entry found */
259
260 /*
261  * Check the last inode extent to determine whether this allocation will result
262  * in blocks being allocated at the end of the file. When we allocate new data
263  * blocks at the end of the file which do not start at the previous data block,
264  * we will try to align the new blocks at stripe unit boundaries.
265  */
266 STATIC int                              /* error */
267 xfs_bmap_isaeof(
268         xfs_inode_t     *ip,            /* incore inode pointer */
269         xfs_fileoff_t   off,            /* file offset in fsblocks */
270         int             whichfork,      /* data or attribute fork */
271         char            *aeof);         /* return value */
272
273 /*
274  * Compute the worst-case number of indirect blocks that will be used
275  * for ip's delayed extent of length "len".
276  */
277 STATIC xfs_filblks_t
278 xfs_bmap_worst_indlen(
279         xfs_inode_t             *ip,    /* incore inode pointer */
280         xfs_filblks_t           len);   /* delayed extent length */
281
282 #ifdef DEBUG
283 /*
284  * Perform various validation checks on the values being returned
285  * from xfs_bmapi().
286  */
287 STATIC void
288 xfs_bmap_validate_ret(
289         xfs_fileoff_t           bno,
290         xfs_filblks_t           len,
291         int                     flags,
292         xfs_bmbt_irec_t         *mval,
293         int                     nmap,
294         int                     ret_nmap);
295 #else
296 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
297 #endif /* DEBUG */
298
299 STATIC int
300 xfs_bmap_count_tree(
301         xfs_mount_t     *mp,
302         xfs_trans_t     *tp,
303         xfs_ifork_t     *ifp,
304         xfs_fsblock_t   blockno,
305         int             levelin,
306         int             *count);
307
308 STATIC void
309 xfs_bmap_count_leaves(
310         xfs_ifork_t             *ifp,
311         xfs_extnum_t            idx,
312         int                     numrecs,
313         int                     *count);
314
315 STATIC void
316 xfs_bmap_disk_count_leaves(
317         struct xfs_mount        *mp,
318         struct xfs_btree_block  *block,
319         int                     numrecs,
320         int                     *count);
321
322 /*
323  * Bmap internal routines.
324  */
325
326 STATIC int                              /* error */
327 xfs_bmbt_lookup_eq(
328         struct xfs_btree_cur    *cur,
329         xfs_fileoff_t           off,
330         xfs_fsblock_t           bno,
331         xfs_filblks_t           len,
332         int                     *stat)  /* success/failure */
333 {
334         cur->bc_rec.b.br_startoff = off;
335         cur->bc_rec.b.br_startblock = bno;
336         cur->bc_rec.b.br_blockcount = len;
337         return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
338 }
339
340 STATIC int                              /* error */
341 xfs_bmbt_lookup_ge(
342         struct xfs_btree_cur    *cur,
343         xfs_fileoff_t           off,
344         xfs_fsblock_t           bno,
345         xfs_filblks_t           len,
346         int                     *stat)  /* success/failure */
347 {
348         cur->bc_rec.b.br_startoff = off;
349         cur->bc_rec.b.br_startblock = bno;
350         cur->bc_rec.b.br_blockcount = len;
351         return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
352 }
353
354 /*
355 * Update the record referred to by cur to the value given
356  * by [off, bno, len, state].
357  * This either works (return 0) or gets an EFSCORRUPTED error.
358  */
359 STATIC int
360 xfs_bmbt_update(
361         struct xfs_btree_cur    *cur,
362         xfs_fileoff_t           off,
363         xfs_fsblock_t           bno,
364         xfs_filblks_t           len,
365         xfs_exntst_t            state)
366 {
367         union xfs_btree_rec     rec;
368
369         xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
370         return xfs_btree_update(cur, &rec);
371 }
372
373 /*
374  * Called from xfs_bmap_add_attrfork to handle btree format files.
375  */
376 STATIC int                                      /* error */
377 xfs_bmap_add_attrfork_btree(
378         xfs_trans_t             *tp,            /* transaction pointer */
379         xfs_inode_t             *ip,            /* incore inode pointer */
380         xfs_fsblock_t           *firstblock,    /* first block allocated */
381         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
382         int                     *flags)         /* inode logging flags */
383 {
384         xfs_btree_cur_t         *cur;           /* btree cursor */
385         int                     error;          /* error return value */
386         xfs_mount_t             *mp;            /* file system mount struct */
387         int                     stat;           /* newroot status */
388
389         mp = ip->i_mount;
390         if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
391                 *flags |= XFS_ILOG_DBROOT;
392         else {
393                 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
394                 cur->bc_private.b.flist = flist;
395                 cur->bc_private.b.firstblock = *firstblock;
396                 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
397                         goto error0;
398                 /* must be at least one entry */
399                 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
400                 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
401                         goto error0;
402                 if (stat == 0) {
403                         xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
404                         return XFS_ERROR(ENOSPC);
405                 }
406                 *firstblock = cur->bc_private.b.firstblock;
407                 cur->bc_private.b.allocated = 0;
408                 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
409         }
410         return 0;
411 error0:
412         xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
413         return error;
414 }
415
416 /*
417  * Called from xfs_bmap_add_attrfork to handle extents format files.
418  */
419 STATIC int                                      /* error */
420 xfs_bmap_add_attrfork_extents(
421         xfs_trans_t             *tp,            /* transaction pointer */
422         xfs_inode_t             *ip,            /* incore inode pointer */
423         xfs_fsblock_t           *firstblock,    /* first block allocated */
424         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
425         int                     *flags)         /* inode logging flags */
426 {
427         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
428         int                     error;          /* error return value */
429
430         if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
431                 return 0;
432         cur = NULL;
433         error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
434                 flags, XFS_DATA_FORK);
435         if (cur) {
436                 cur->bc_private.b.allocated = 0;
437                 xfs_btree_del_cursor(cur,
438                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
439         }
440         return error;
441 }
442
443 /*
444  * Called from xfs_bmap_add_attrfork to handle local format files.
445  */
446 STATIC int                                      /* error */
447 xfs_bmap_add_attrfork_local(
448         xfs_trans_t             *tp,            /* transaction pointer */
449         xfs_inode_t             *ip,            /* incore inode pointer */
450         xfs_fsblock_t           *firstblock,    /* first block allocated */
451         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
452         int                     *flags)         /* inode logging flags */
453 {
454         xfs_da_args_t           dargs;          /* args for dir/attr code */
455         int                     error;          /* error return value */
456         xfs_mount_t             *mp;            /* mount structure pointer */
457
458         if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
459                 return 0;
460         if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
461                 mp = ip->i_mount;
462                 memset(&dargs, 0, sizeof(dargs));
463                 dargs.dp = ip;
464                 dargs.firstblock = firstblock;
465                 dargs.flist = flist;
466                 dargs.total = mp->m_dirblkfsbs;
467                 dargs.whichfork = XFS_DATA_FORK;
468                 dargs.trans = tp;
469                 error = xfs_dir2_sf_to_block(&dargs);
470         } else
471                 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
472                         XFS_DATA_FORK);
473         return error;
474 }
475
476 /*
477  * Called by xfs_bmapi to update file extent records and the btree
478  * after allocating space (or doing a delayed allocation).
479  */
480 STATIC int                              /* error */
481 xfs_bmap_add_extent(
482         xfs_inode_t             *ip,    /* incore inode pointer */
483         xfs_extnum_t            idx,    /* extent number to update/insert */
484         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
485         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
486         xfs_fsblock_t           *first, /* pointer to firstblock variable */
487         xfs_bmap_free_t         *flist, /* list of extents to be freed */
488         int                     *logflagsp, /* inode logging flags */
489         xfs_extdelta_t          *delta, /* Change made to incore extents */
490         int                     whichfork, /* data or attr fork */
491         int                     rsvd)   /* OK to use reserved data blocks */
492 {
493         xfs_btree_cur_t         *cur;   /* btree cursor or null */
494         xfs_filblks_t           da_new; /* new count del alloc blocks used */
495         xfs_filblks_t           da_old; /* old count del alloc blocks used */
496         int                     error;  /* error return value */
497         xfs_ifork_t             *ifp;   /* inode fork ptr */
498         int                     logflags; /* returned value */
499         xfs_extnum_t            nextents; /* number of extents in file now */
500
501         XFS_STATS_INC(xs_add_exlist);
502         cur = *curp;
503         ifp = XFS_IFORK_PTR(ip, whichfork);
504         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
505         ASSERT(idx <= nextents);
506         da_old = da_new = 0;
507         error = 0;
508         /*
509          * This is the first extent added to a new/empty file.
510          * Special case this one, so other routines get to assume there are
511          * already extents in the list.
512          */
513         if (nextents == 0) {
514                 xfs_iext_insert(ip, 0, 1, new,
515                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
516
517                 ASSERT(cur == NULL);
518                 ifp->if_lastex = 0;
519                 if (!isnullstartblock(new->br_startblock)) {
520                         XFS_IFORK_NEXT_SET(ip, whichfork, 1);
521                         logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
522                 } else
523                         logflags = 0;
524                 /* DELTA: single new extent */
525                 if (delta) {
526                         if (delta->xed_startoff > new->br_startoff)
527                                 delta->xed_startoff = new->br_startoff;
528                         if (delta->xed_blockcount <
529                                         new->br_startoff + new->br_blockcount)
530                                 delta->xed_blockcount = new->br_startoff +
531                                                 new->br_blockcount;
532                 }
533         }
534         /*
535          * Any kind of new delayed allocation goes here.
536          */
537         else if (isnullstartblock(new->br_startblock)) {
538                 if (cur)
539                         ASSERT((cur->bc_private.b.flags &
540                                 XFS_BTCUR_BPRV_WASDEL) == 0);
541                 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
542                                 &logflags, delta, rsvd)))
543                         goto done;
544         }
545         /*
546          * Real allocation off the end of the file.
547          */
548         else if (idx == nextents) {
549                 if (cur)
550                         ASSERT((cur->bc_private.b.flags &
551                                 XFS_BTCUR_BPRV_WASDEL) == 0);
552                 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
553                                 &logflags, delta, whichfork)))
554                         goto done;
555         } else {
556                 xfs_bmbt_irec_t prev;   /* old extent at offset idx */
557
558                 /*
559                  * Get the record referred to by idx.
560                  */
561                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
562                 /*
563                  * If it's a real allocation record, and the new allocation ends
564                  * after the start of the referred to record, then we're filling
565                  * in a delayed or unwritten allocation with a real one, or
566                  * converting real back to unwritten.
567                  */
568                 if (!isnullstartblock(new->br_startblock) &&
569                     new->br_startoff + new->br_blockcount > prev.br_startoff) {
570                         if (prev.br_state != XFS_EXT_UNWRITTEN &&
571                             isnullstartblock(prev.br_startblock)) {
572                                 da_old = startblockval(prev.br_startblock);
573                                 if (cur)
574                                         ASSERT(cur->bc_private.b.flags &
575                                                 XFS_BTCUR_BPRV_WASDEL);
576                                 if ((error = xfs_bmap_add_extent_delay_real(ip,
577                                         idx, &cur, new, &da_new, first, flist,
578                                         &logflags, delta, rsvd)))
579                                         goto done;
580                         } else if (new->br_state == XFS_EXT_NORM) {
581                                 ASSERT(new->br_state == XFS_EXT_NORM);
582                                 if ((error = xfs_bmap_add_extent_unwritten_real(
583                                         ip, idx, &cur, new, &logflags, delta)))
584                                         goto done;
585                         } else {
586                                 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
587                                 if ((error = xfs_bmap_add_extent_unwritten_real(
588                                         ip, idx, &cur, new, &logflags, delta)))
589                                         goto done;
590                         }
591                         ASSERT(*curp == cur || *curp == NULL);
592                 }
593                 /*
594                  * Otherwise we're filling in a hole with an allocation.
595                  */
596                 else {
597                         if (cur)
598                                 ASSERT((cur->bc_private.b.flags &
599                                         XFS_BTCUR_BPRV_WASDEL) == 0);
600                         if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
601                                         new, &logflags, delta, whichfork)))
602                                 goto done;
603                 }
604         }
605
606         ASSERT(*curp == cur || *curp == NULL);
607         /*
608          * Convert to a btree if necessary.
609          */
610         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
611             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
612                 int     tmp_logflags;   /* partial log flag return val */
613
614                 ASSERT(cur == NULL);
615                 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
616                         flist, &cur, da_old > 0, &tmp_logflags, whichfork);
617                 logflags |= tmp_logflags;
618                 if (error)
619                         goto done;
620         }
621         /*
622          * Adjust for changes in reserved delayed indirect blocks.
623          * Nothing to do for disk quotas here.
624          */
625         if (da_old || da_new) {
626                 xfs_filblks_t   nblks;
627
628                 nblks = da_new;
629                 if (cur)
630                         nblks += cur->bc_private.b.allocated;
631                 ASSERT(nblks <= da_old);
632                 if (nblks < da_old)
633                         xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
634                                 (int64_t)(da_old - nblks), rsvd);
635         }
636         /*
637          * Clear out the allocated field, done with it now in any case.
638          */
639         if (cur) {
640                 cur->bc_private.b.allocated = 0;
641                 *curp = cur;
642         }
643 done:
644 #ifdef DEBUG
645         if (!error)
646                 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
647 #endif
648         *logflagsp = logflags;
649         return error;
650 }
651
652 /*
653  * Called by xfs_bmap_add_extent to handle cases converting a delayed
654  * allocation to a real allocation.
655  */
656 STATIC int                              /* error */
657 xfs_bmap_add_extent_delay_real(
658         xfs_inode_t             *ip,    /* incore inode pointer */
659         xfs_extnum_t            idx,    /* extent number to update/insert */
660         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
661         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
662         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
663         xfs_fsblock_t           *first, /* pointer to firstblock variable */
664         xfs_bmap_free_t         *flist, /* list of extents to be freed */
665         int                     *logflagsp, /* inode logging flags */
666         xfs_extdelta_t          *delta, /* Change made to incore extents */
667         int                     rsvd)   /* OK to use reserved data block allocation */
668 {
669         xfs_btree_cur_t         *cur;   /* btree cursor */
670         int                     diff;   /* temp value */
671         xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
672         int                     error;  /* error return value */
673         int                     i;      /* temp state */
674         xfs_ifork_t             *ifp;   /* inode fork pointer */
675         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
676         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
677                                         /* left is 0, right is 1, prev is 2 */
678         int                     rval=0; /* return value (logging flags) */
679         int                     state = 0;/* state bits, accessed thru macros */
680         xfs_filblks_t           temp=0; /* value for dnew calculations */
681         xfs_filblks_t           temp2=0;/* value for dnew calculations */
682         int                     tmp_rval;       /* partial logging flags */
683
684 #define LEFT            r[0]
685 #define RIGHT           r[1]
686 #define PREV            r[2]
687
688         /*
689          * Set up a bunch of variables to make the tests simpler.
690          */
691         cur = *curp;
692         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
693         ep = xfs_iext_get_ext(ifp, idx);
694         xfs_bmbt_get_all(ep, &PREV);
695         new_endoff = new->br_startoff + new->br_blockcount;
696         ASSERT(PREV.br_startoff <= new->br_startoff);
697         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
698
699         /*
700          * Set flags determining what part of the previous delayed allocation
701          * extent is being replaced by a real allocation.
702          */
703         if (PREV.br_startoff == new->br_startoff)
704                 state |= BMAP_LEFT_FILLING;
705         if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
706                 state |= BMAP_RIGHT_FILLING;
707
708         /*
709          * Check and set flags if this segment has a left neighbor.
710          * Don't set contiguous if the combined extent would be too large.
711          */
712         if (idx > 0) {
713                 state |= BMAP_LEFT_VALID;
714                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
715
716                 if (isnullstartblock(LEFT.br_startblock))
717                         state |= BMAP_LEFT_DELAY;
718         }
719
720         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
721             LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
722             LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
723             LEFT.br_state == new->br_state &&
724             LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
725                 state |= BMAP_LEFT_CONTIG;
726
727         /*
728          * Check and set flags if this segment has a right neighbor.
729          * Don't set contiguous if the combined extent would be too large.
730          * Also check for all-three-contiguous being too large.
731          */
732         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
733                 state |= BMAP_RIGHT_VALID;
734                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
735
736                 if (isnullstartblock(RIGHT.br_startblock))
737                         state |= BMAP_RIGHT_DELAY;
738         }
739
740         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
741             new_endoff == RIGHT.br_startoff &&
742             new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
743             new->br_state == RIGHT.br_state &&
744             new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
745             ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
746                        BMAP_RIGHT_FILLING)) !=
747                       (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
748                        BMAP_RIGHT_FILLING) ||
749              LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
750                         <= MAXEXTLEN))
751                 state |= BMAP_RIGHT_CONTIG;
752
753         error = 0;
754         /*
755          * Switch out based on the FILLING and CONTIG state bits.
756          */
757         switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
758                          BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
759         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
760              BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
761                 /*
762                  * Filling in all of a previously delayed allocation extent.
763                  * The left and right neighbors are both contiguous with new.
764                  */
765                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
766                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
767                         LEFT.br_blockcount + PREV.br_blockcount +
768                         RIGHT.br_blockcount);
769                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
770
771                 xfs_iext_remove(ip, idx, 2, state);
772                 ip->i_df.if_lastex = idx - 1;
773                 ip->i_d.di_nextents--;
774                 if (cur == NULL)
775                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
776                 else {
777                         rval = XFS_ILOG_CORE;
778                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
779                                         RIGHT.br_startblock,
780                                         RIGHT.br_blockcount, &i)))
781                                 goto done;
782                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
783                         if ((error = xfs_btree_delete(cur, &i)))
784                                 goto done;
785                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
786                         if ((error = xfs_btree_decrement(cur, 0, &i)))
787                                 goto done;
788                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
789                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
790                                         LEFT.br_startblock,
791                                         LEFT.br_blockcount +
792                                         PREV.br_blockcount +
793                                         RIGHT.br_blockcount, LEFT.br_state)))
794                                 goto done;
795                 }
796                 *dnew = 0;
797                 /* DELTA: Three in-core extents are replaced by one. */
798                 temp = LEFT.br_startoff;
799                 temp2 = LEFT.br_blockcount +
800                         PREV.br_blockcount +
801                         RIGHT.br_blockcount;
802                 break;
803
804         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
805                 /*
806                  * Filling in all of a previously delayed allocation extent.
807                  * The left neighbor is contiguous, the right is not.
808                  */
809                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
810                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
811                         LEFT.br_blockcount + PREV.br_blockcount);
812                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
813
814                 ip->i_df.if_lastex = idx - 1;
815                 xfs_iext_remove(ip, idx, 1, state);
816                 if (cur == NULL)
817                         rval = XFS_ILOG_DEXT;
818                 else {
819                         rval = 0;
820                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
821                                         LEFT.br_startblock, LEFT.br_blockcount,
822                                         &i)))
823                                 goto done;
824                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
825                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
826                                         LEFT.br_startblock,
827                                         LEFT.br_blockcount +
828                                         PREV.br_blockcount, LEFT.br_state)))
829                                 goto done;
830                 }
831                 *dnew = 0;
832                 /* DELTA: Two in-core extents are replaced by one. */
833                 temp = LEFT.br_startoff;
834                 temp2 = LEFT.br_blockcount +
835                         PREV.br_blockcount;
836                 break;
837
838         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
839                 /*
840                  * Filling in all of a previously delayed allocation extent.
841                  * The right neighbor is contiguous, the left is not.
842                  */
843                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
844                 xfs_bmbt_set_startblock(ep, new->br_startblock);
845                 xfs_bmbt_set_blockcount(ep,
846                         PREV.br_blockcount + RIGHT.br_blockcount);
847                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
848
849                 ip->i_df.if_lastex = idx;
850                 xfs_iext_remove(ip, idx + 1, 1, state);
851                 if (cur == NULL)
852                         rval = XFS_ILOG_DEXT;
853                 else {
854                         rval = 0;
855                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
856                                         RIGHT.br_startblock,
857                                         RIGHT.br_blockcount, &i)))
858                                 goto done;
859                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
860                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
861                                         new->br_startblock,
862                                         PREV.br_blockcount +
863                                         RIGHT.br_blockcount, PREV.br_state)))
864                                 goto done;
865                 }
866                 *dnew = 0;
867                 /* DELTA: Two in-core extents are replaced by one. */
868                 temp = PREV.br_startoff;
869                 temp2 = PREV.br_blockcount +
870                         RIGHT.br_blockcount;
871                 break;
872
873         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
874                 /*
875                  * Filling in all of a previously delayed allocation extent.
876                  * Neither the left nor right neighbors are contiguous with
877                  * the new one.
878                  */
879                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
880                 xfs_bmbt_set_startblock(ep, new->br_startblock);
881                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
882
883                 ip->i_df.if_lastex = idx;
884                 ip->i_d.di_nextents++;
885                 if (cur == NULL)
886                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
887                 else {
888                         rval = XFS_ILOG_CORE;
889                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
890                                         new->br_startblock, new->br_blockcount,
891                                         &i)))
892                                 goto done;
893                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
894                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
895                         if ((error = xfs_btree_insert(cur, &i)))
896                                 goto done;
897                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
898                 }
899                 *dnew = 0;
900                 /* DELTA: The in-core extent described by new changed type. */
901                 temp = new->br_startoff;
902                 temp2 = new->br_blockcount;
903                 break;
904
905         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
906                 /*
907                  * Filling in the first part of a previous delayed allocation.
908                  * The left neighbor is contiguous.
909                  */
910                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
911                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
912                         LEFT.br_blockcount + new->br_blockcount);
913                 xfs_bmbt_set_startoff(ep,
914                         PREV.br_startoff + new->br_blockcount);
915                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
916
917                 temp = PREV.br_blockcount - new->br_blockcount;
918                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
919                 xfs_bmbt_set_blockcount(ep, temp);
920                 ip->i_df.if_lastex = idx - 1;
921                 if (cur == NULL)
922                         rval = XFS_ILOG_DEXT;
923                 else {
924                         rval = 0;
925                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
926                                         LEFT.br_startblock, LEFT.br_blockcount,
927                                         &i)))
928                                 goto done;
929                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
930                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
931                                         LEFT.br_startblock,
932                                         LEFT.br_blockcount +
933                                         new->br_blockcount,
934                                         LEFT.br_state)))
935                                 goto done;
936                 }
937                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
938                         startblockval(PREV.br_startblock));
939                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
940                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
941                 *dnew = temp;
942                 /* DELTA: The boundary between two in-core extents moved. */
943                 temp = LEFT.br_startoff;
944                 temp2 = LEFT.br_blockcount +
945                         PREV.br_blockcount;
946                 break;
947
948         case BMAP_LEFT_FILLING:
949                 /*
950                  * Filling in the first part of a previous delayed allocation.
951                  * The left neighbor is not contiguous.
952                  */
953                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
954                 xfs_bmbt_set_startoff(ep, new_endoff);
955                 temp = PREV.br_blockcount - new->br_blockcount;
956                 xfs_bmbt_set_blockcount(ep, temp);
957                 xfs_iext_insert(ip, idx, 1, new, state);
958                 ip->i_df.if_lastex = idx;
959                 ip->i_d.di_nextents++;
960                 if (cur == NULL)
961                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
962                 else {
963                         rval = XFS_ILOG_CORE;
964                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
965                                         new->br_startblock, new->br_blockcount,
966                                         &i)))
967                                 goto done;
968                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
969                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
970                         if ((error = xfs_btree_insert(cur, &i)))
971                                 goto done;
972                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
973                 }
974                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
975                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
976                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
977                                         first, flist, &cur, 1, &tmp_rval,
978                                         XFS_DATA_FORK);
979                         rval |= tmp_rval;
980                         if (error)
981                                 goto done;
982                 }
983                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
984                         startblockval(PREV.br_startblock) -
985                         (cur ? cur->bc_private.b.allocated : 0));
986                 ep = xfs_iext_get_ext(ifp, idx + 1);
987                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
988                 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
989                 *dnew = temp;
990                 /* DELTA: One in-core extent is split in two. */
991                 temp = PREV.br_startoff;
992                 temp2 = PREV.br_blockcount;
993                 break;
994
995         case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
996                 /*
997                  * Filling in the last part of a previous delayed allocation.
998                  * The right neighbor is contiguous with the new allocation.
999                  */
1000                 temp = PREV.br_blockcount - new->br_blockcount;
1001                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1002                 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1003                 xfs_bmbt_set_blockcount(ep, temp);
1004                 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1005                         new->br_startoff, new->br_startblock,
1006                         new->br_blockcount + RIGHT.br_blockcount,
1007                         RIGHT.br_state);
1008                 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1009                 ip->i_df.if_lastex = idx + 1;
1010                 if (cur == NULL)
1011                         rval = XFS_ILOG_DEXT;
1012                 else {
1013                         rval = 0;
1014                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1015                                         RIGHT.br_startblock,
1016                                         RIGHT.br_blockcount, &i)))
1017                                 goto done;
1018                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1019                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1020                                         new->br_startblock,
1021                                         new->br_blockcount +
1022                                         RIGHT.br_blockcount,
1023                                         RIGHT.br_state)))
1024                                 goto done;
1025                 }
1026                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1027                         startblockval(PREV.br_startblock));
1028                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1029                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1030                 *dnew = temp;
1031                 /* DELTA: The boundary between two in-core extents moved. */
1032                 temp = PREV.br_startoff;
1033                 temp2 = PREV.br_blockcount +
1034                         RIGHT.br_blockcount;
1035                 break;
1036
1037         case BMAP_RIGHT_FILLING:
1038                 /*
1039                  * Filling in the last part of a previous delayed allocation.
1040                  * The right neighbor is not contiguous.
1041                  */
1042                 temp = PREV.br_blockcount - new->br_blockcount;
1043                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1044                 xfs_bmbt_set_blockcount(ep, temp);
1045                 xfs_iext_insert(ip, idx + 1, 1, new, state);
1046                 ip->i_df.if_lastex = idx + 1;
1047                 ip->i_d.di_nextents++;
1048                 if (cur == NULL)
1049                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1050                 else {
1051                         rval = XFS_ILOG_CORE;
1052                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1053                                         new->br_startblock, new->br_blockcount,
1054                                         &i)))
1055                                 goto done;
1056                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1057                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1058                         if ((error = xfs_btree_insert(cur, &i)))
1059                                 goto done;
1060                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1061                 }
1062                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1063                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1064                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1065                                 first, flist, &cur, 1, &tmp_rval,
1066                                 XFS_DATA_FORK);
1067                         rval |= tmp_rval;
1068                         if (error)
1069                                 goto done;
1070                 }
1071                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1072                         startblockval(PREV.br_startblock) -
1073                         (cur ? cur->bc_private.b.allocated : 0));
1074                 ep = xfs_iext_get_ext(ifp, idx);
1075                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1076                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1077                 *dnew = temp;
1078                 /* DELTA: One in-core extent is split in two. */
1079                 temp = PREV.br_startoff;
1080                 temp2 = PREV.br_blockcount;
1081                 break;
1082
1083         case 0:
1084                 /*
1085                  * Filling in the middle part of a previous delayed allocation.
1086                  * Contiguity is impossible here.
1087                  * This case is avoided almost all the time.
1088                  */
1089                 temp = new->br_startoff - PREV.br_startoff;
1090                 trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
1091                 xfs_bmbt_set_blockcount(ep, temp);
1092                 r[0] = *new;
1093                 r[1].br_state = PREV.br_state;
1094                 r[1].br_startblock = 0;
1095                 r[1].br_startoff = new_endoff;
1096                 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1097                 r[1].br_blockcount = temp2;
1098                 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1099                 ip->i_df.if_lastex = idx + 1;
1100                 ip->i_d.di_nextents++;
1101                 if (cur == NULL)
1102                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1103                 else {
1104                         rval = XFS_ILOG_CORE;
1105                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1106                                         new->br_startblock, new->br_blockcount,
1107                                         &i)))
1108                                 goto done;
1109                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1110                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1111                         if ((error = xfs_btree_insert(cur, &i)))
1112                                 goto done;
1113                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1114                 }
1115                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1116                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1117                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1118                                         first, flist, &cur, 1, &tmp_rval,
1119                                         XFS_DATA_FORK);
1120                         rval |= tmp_rval;
1121                         if (error)
1122                                 goto done;
1123                 }
1124                 temp = xfs_bmap_worst_indlen(ip, temp);
1125                 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1126                 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
1127                         (cur ? cur->bc_private.b.allocated : 0));
1128                 if (diff > 0 &&
1129                     xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
1130                         /*
1131                          * Ick gross gag me with a spoon.
1132                          */
1133                         ASSERT(0);      /* want to see if this ever happens! */
1134                         while (diff > 0) {
1135                                 if (temp) {
1136                                         temp--;
1137                                         diff--;
1138                                         if (!diff ||
1139                                             !xfs_mod_incore_sb(ip->i_mount,
1140                                                     XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1141                                                 break;
1142                                 }
1143                                 if (temp2) {
1144                                         temp2--;
1145                                         diff--;
1146                                         if (!diff ||
1147                                             !xfs_mod_incore_sb(ip->i_mount,
1148                                                     XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1149                                                 break;
1150                                 }
1151                         }
1152                 }
1153                 ep = xfs_iext_get_ext(ifp, idx);
1154                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1155                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1156                 trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
1157                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1158                         nullstartblock((int)temp2));
1159                 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
1160                 *dnew = temp + temp2;
1161                 /* DELTA: One in-core extent is split in three. */
1162                 temp = PREV.br_startoff;
1163                 temp2 = PREV.br_blockcount;
1164                 break;
1165
1166         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1167         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1168         case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1169         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1170         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1171         case BMAP_LEFT_CONTIG:
1172         case BMAP_RIGHT_CONTIG:
1173                 /*
1174                  * These cases are all impossible.
1175                  */
1176                 ASSERT(0);
1177         }
1178         *curp = cur;
1179         if (delta) {
1180                 temp2 += temp;
1181                 if (delta->xed_startoff > temp)
1182                         delta->xed_startoff = temp;
1183                 if (delta->xed_blockcount < temp2)
1184                         delta->xed_blockcount = temp2;
1185         }
1186 done:
1187         *logflagsp = rval;
1188         return error;
1189 #undef  LEFT
1190 #undef  RIGHT
1191 #undef  PREV
1192 }
1193
1194 /*
1195  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1196  * allocation to a real allocation or vice versa.
1197  */
1198 STATIC int                              /* error */
1199 xfs_bmap_add_extent_unwritten_real(
1200         xfs_inode_t             *ip,    /* incore inode pointer */
1201         xfs_extnum_t            idx,    /* extent number to update/insert */
1202         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
1203         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1204         int                     *logflagsp, /* inode logging flags */
1205         xfs_extdelta_t          *delta) /* Change made to incore extents */
1206 {
1207         xfs_btree_cur_t         *cur;   /* btree cursor */
1208         xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
1209         int                     error;  /* error return value */
1210         int                     i;      /* temp state */
1211         xfs_ifork_t             *ifp;   /* inode fork pointer */
1212         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
1213         xfs_exntst_t            newext; /* new extent state */
1214         xfs_exntst_t            oldext; /* old extent state */
1215         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
1216                                         /* left is 0, right is 1, prev is 2 */
1217         int                     rval=0; /* return value (logging flags) */
1218         int                     state = 0;/* state bits, accessed thru macros */
1219         xfs_filblks_t           temp=0;
1220         xfs_filblks_t           temp2=0;
1221
1222 #define LEFT            r[0]
1223 #define RIGHT           r[1]
1224 #define PREV            r[2]
1225         /*
1226          * Set up a bunch of variables to make the tests simpler.
1227          */
1228         error = 0;
1229         cur = *curp;
1230         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1231         ep = xfs_iext_get_ext(ifp, idx);
1232         xfs_bmbt_get_all(ep, &PREV);
1233         newext = new->br_state;
1234         oldext = (newext == XFS_EXT_UNWRITTEN) ?
1235                 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1236         ASSERT(PREV.br_state == oldext);
1237         new_endoff = new->br_startoff + new->br_blockcount;
1238         ASSERT(PREV.br_startoff <= new->br_startoff);
1239         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1240
1241         /*
1242          * Set flags determining what part of the previous oldext allocation
1243          * extent is being replaced by a newext allocation.
1244          */
1245         if (PREV.br_startoff == new->br_startoff)
1246                 state |= BMAP_LEFT_FILLING;
1247         if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1248                 state |= BMAP_RIGHT_FILLING;
1249
1250         /*
1251          * Check and set flags if this segment has a left neighbor.
1252          * Don't set contiguous if the combined extent would be too large.
1253          */
1254         if (idx > 0) {
1255                 state |= BMAP_LEFT_VALID;
1256                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1257
1258                 if (isnullstartblock(LEFT.br_startblock))
1259                         state |= BMAP_LEFT_DELAY;
1260         }
1261
1262         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1263             LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1264             LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1265             LEFT.br_state == newext &&
1266             LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1267                 state |= BMAP_LEFT_CONTIG;
1268
1269         /*
1270          * Check and set flags if this segment has a right neighbor.
1271          * Don't set contiguous if the combined extent would be too large.
1272          * Also check for all-three-contiguous being too large.
1273          */
1274         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1275                 state |= BMAP_RIGHT_VALID;
1276                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1277                 if (isnullstartblock(RIGHT.br_startblock))
1278                         state |= BMAP_RIGHT_DELAY;
1279         }
1280
1281         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1282             new_endoff == RIGHT.br_startoff &&
1283             new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1284             newext == RIGHT.br_state &&
1285             new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1286             ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1287                        BMAP_RIGHT_FILLING)) !=
1288                       (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1289                        BMAP_RIGHT_FILLING) ||
1290              LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1291                         <= MAXEXTLEN))
1292                 state |= BMAP_RIGHT_CONTIG;
1293
1294         /*
1295          * Switch out based on the FILLING and CONTIG state bits.
1296          */
1297         switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1298                          BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1299         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1300              BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1301                 /*
1302                  * Setting all of a previous oldext extent to newext.
1303                  * The left and right neighbors are both contiguous with new.
1304                  */
1305                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1306                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1307                         LEFT.br_blockcount + PREV.br_blockcount +
1308                         RIGHT.br_blockcount);
1309                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1310
1311                 xfs_iext_remove(ip, idx, 2, state);
1312                 ip->i_df.if_lastex = idx - 1;
1313                 ip->i_d.di_nextents -= 2;
1314                 if (cur == NULL)
1315                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1316                 else {
1317                         rval = XFS_ILOG_CORE;
1318                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1319                                         RIGHT.br_startblock,
1320                                         RIGHT.br_blockcount, &i)))
1321                                 goto done;
1322                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1323                         if ((error = xfs_btree_delete(cur, &i)))
1324                                 goto done;
1325                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1326                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1327                                 goto done;
1328                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1329                         if ((error = xfs_btree_delete(cur, &i)))
1330                                 goto done;
1331                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1332                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1333                                 goto done;
1334                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1335                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1336                                 LEFT.br_startblock,
1337                                 LEFT.br_blockcount + PREV.br_blockcount +
1338                                 RIGHT.br_blockcount, LEFT.br_state)))
1339                                 goto done;
1340                 }
1341                 /* DELTA: Three in-core extents are replaced by one. */
1342                 temp = LEFT.br_startoff;
1343                 temp2 = LEFT.br_blockcount +
1344                         PREV.br_blockcount +
1345                         RIGHT.br_blockcount;
1346                 break;
1347
1348         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1349                 /*
1350                  * Setting all of a previous oldext extent to newext.
1351                  * The left neighbor is contiguous, the right is not.
1352                  */
1353                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1354                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1355                         LEFT.br_blockcount + PREV.br_blockcount);
1356                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1357
1358                 ip->i_df.if_lastex = idx - 1;
1359                 xfs_iext_remove(ip, idx, 1, state);
1360                 ip->i_d.di_nextents--;
1361                 if (cur == NULL)
1362                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1363                 else {
1364                         rval = XFS_ILOG_CORE;
1365                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1366                                         PREV.br_startblock, PREV.br_blockcount,
1367                                         &i)))
1368                                 goto done;
1369                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1370                         if ((error = xfs_btree_delete(cur, &i)))
1371                                 goto done;
1372                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1373                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1374                                 goto done;
1375                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1376                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1377                                 LEFT.br_startblock,
1378                                 LEFT.br_blockcount + PREV.br_blockcount,
1379                                 LEFT.br_state)))
1380                                 goto done;
1381                 }
1382                 /* DELTA: Two in-core extents are replaced by one. */
1383                 temp = LEFT.br_startoff;
1384                 temp2 = LEFT.br_blockcount +
1385                         PREV.br_blockcount;
1386                 break;
1387
1388         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1389                 /*
1390                  * Setting all of a previous oldext extent to newext.
1391                  * The right neighbor is contiguous, the left is not.
1392                  */
1393                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1394                 xfs_bmbt_set_blockcount(ep,
1395                         PREV.br_blockcount + RIGHT.br_blockcount);
1396                 xfs_bmbt_set_state(ep, newext);
1397                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1398                 ip->i_df.if_lastex = idx;
1399                 xfs_iext_remove(ip, idx + 1, 1, state);
1400                 ip->i_d.di_nextents--;
1401                 if (cur == NULL)
1402                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1403                 else {
1404                         rval = XFS_ILOG_CORE;
1405                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1406                                         RIGHT.br_startblock,
1407                                         RIGHT.br_blockcount, &i)))
1408                                 goto done;
1409                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1410                         if ((error = xfs_btree_delete(cur, &i)))
1411                                 goto done;
1412                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1413                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1414                                 goto done;
1415                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1416                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1417                                 new->br_startblock,
1418                                 new->br_blockcount + RIGHT.br_blockcount,
1419                                 newext)))
1420                                 goto done;
1421                 }
1422                 /* DELTA: Two in-core extents are replaced by one. */
1423                 temp = PREV.br_startoff;
1424                 temp2 = PREV.br_blockcount +
1425                         RIGHT.br_blockcount;
1426                 break;
1427
1428         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1429                 /*
1430                  * Setting all of a previous oldext extent to newext.
1431                  * Neither the left nor right neighbors are contiguous with
1432                  * the new one.
1433                  */
1434                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1435                 xfs_bmbt_set_state(ep, newext);
1436                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1437
1438                 ip->i_df.if_lastex = idx;
1439                 if (cur == NULL)
1440                         rval = XFS_ILOG_DEXT;
1441                 else {
1442                         rval = 0;
1443                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1444                                         new->br_startblock, new->br_blockcount,
1445                                         &i)))
1446                                 goto done;
1447                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1448                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1449                                 new->br_startblock, new->br_blockcount,
1450                                 newext)))
1451                                 goto done;
1452                 }
1453                 /* DELTA: The in-core extent described by new changed type. */
1454                 temp = new->br_startoff;
1455                 temp2 = new->br_blockcount;
1456                 break;
1457
1458         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1459                 /*
1460                  * Setting the first part of a previous oldext extent to newext.
1461                  * The left neighbor is contiguous.
1462                  */
1463                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1464                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1465                         LEFT.br_blockcount + new->br_blockcount);
1466                 xfs_bmbt_set_startoff(ep,
1467                         PREV.br_startoff + new->br_blockcount);
1468                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1469
1470                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1471                 xfs_bmbt_set_startblock(ep,
1472                         new->br_startblock + new->br_blockcount);
1473                 xfs_bmbt_set_blockcount(ep,
1474                         PREV.br_blockcount - new->br_blockcount);
1475                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1476
1477                 ip->i_df.if_lastex = idx - 1;
1478                 if (cur == NULL)
1479                         rval = XFS_ILOG_DEXT;
1480                 else {
1481                         rval = 0;
1482                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1483                                         PREV.br_startblock, PREV.br_blockcount,
1484                                         &i)))
1485                                 goto done;
1486                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1487                         if ((error = xfs_bmbt_update(cur,
1488                                 PREV.br_startoff + new->br_blockcount,
1489                                 PREV.br_startblock + new->br_blockcount,
1490                                 PREV.br_blockcount - new->br_blockcount,
1491                                 oldext)))
1492                                 goto done;
1493                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1494                                 goto done;
1495                         if (xfs_bmbt_update(cur, LEFT.br_startoff,
1496                                 LEFT.br_startblock,
1497                                 LEFT.br_blockcount + new->br_blockcount,
1498                                 LEFT.br_state))
1499                                 goto done;
1500                 }
1501                 /* DELTA: The boundary between two in-core extents moved. */
1502                 temp = LEFT.br_startoff;
1503                 temp2 = LEFT.br_blockcount +
1504                         PREV.br_blockcount;
1505                 break;
1506
1507         case BMAP_LEFT_FILLING:
1508                 /*
1509                  * Setting the first part of a previous oldext extent to newext.
1510                  * The left neighbor is not contiguous.
1511                  */
1512                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1513                 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1514                 xfs_bmbt_set_startoff(ep, new_endoff);
1515                 xfs_bmbt_set_blockcount(ep,
1516                         PREV.br_blockcount - new->br_blockcount);
1517                 xfs_bmbt_set_startblock(ep,
1518                         new->br_startblock + new->br_blockcount);
1519                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1520
1521                 xfs_iext_insert(ip, idx, 1, new, state);
1522                 ip->i_df.if_lastex = idx;
1523                 ip->i_d.di_nextents++;
1524                 if (cur == NULL)
1525                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1526                 else {
1527                         rval = XFS_ILOG_CORE;
1528                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1529                                         PREV.br_startblock, PREV.br_blockcount,
1530                                         &i)))
1531                                 goto done;
1532                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1533                         if ((error = xfs_bmbt_update(cur,
1534                                 PREV.br_startoff + new->br_blockcount,
1535                                 PREV.br_startblock + new->br_blockcount,
1536                                 PREV.br_blockcount - new->br_blockcount,
1537                                 oldext)))
1538                                 goto done;
1539                         cur->bc_rec.b = *new;
1540                         if ((error = xfs_btree_insert(cur, &i)))
1541                                 goto done;
1542                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1543                 }
1544                 /* DELTA: One in-core extent is split in two. */
1545                 temp = PREV.br_startoff;
1546                 temp2 = PREV.br_blockcount;
1547                 break;
1548
1549         case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1550                 /*
1551                  * Setting the last part of a previous oldext extent to newext.
1552                  * The right neighbor is contiguous with the new allocation.
1553                  */
1554                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1555                 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1556                 xfs_bmbt_set_blockcount(ep,
1557                         PREV.br_blockcount - new->br_blockcount);
1558                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1559                 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1560                         new->br_startoff, new->br_startblock,
1561                         new->br_blockcount + RIGHT.br_blockcount, newext);
1562                 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1563
1564                 ip->i_df.if_lastex = idx + 1;
1565                 if (cur == NULL)
1566                         rval = XFS_ILOG_DEXT;
1567                 else {
1568                         rval = 0;
1569                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1570                                         PREV.br_startblock,
1571                                         PREV.br_blockcount, &i)))
1572                                 goto done;
1573                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1574                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1575                                 PREV.br_startblock,
1576                                 PREV.br_blockcount - new->br_blockcount,
1577                                 oldext)))
1578                                 goto done;
1579                         if ((error = xfs_btree_increment(cur, 0, &i)))
1580                                 goto done;
1581                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1582                                 new->br_startblock,
1583                                 new->br_blockcount + RIGHT.br_blockcount,
1584                                 newext)))
1585                                 goto done;
1586                 }
1587                 /* DELTA: The boundary between two in-core extents moved. */
1588                 temp = PREV.br_startoff;
1589                 temp2 = PREV.br_blockcount +
1590                         RIGHT.br_blockcount;
1591                 break;
1592
1593         case BMAP_RIGHT_FILLING:
1594                 /*
1595                  * Setting the last part of a previous oldext extent to newext.
1596                  * The right neighbor is not contiguous.
1597                  */
1598                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1599                 xfs_bmbt_set_blockcount(ep,
1600                         PREV.br_blockcount - new->br_blockcount);
1601                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1602
1603                 xfs_iext_insert(ip, idx + 1, 1, new, state);
1604                 ip->i_df.if_lastex = idx + 1;
1605                 ip->i_d.di_nextents++;
1606                 if (cur == NULL)
1607                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1608                 else {
1609                         rval = XFS_ILOG_CORE;
1610                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1611                                         PREV.br_startblock, PREV.br_blockcount,
1612                                         &i)))
1613                                 goto done;
1614                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1615                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1616                                 PREV.br_startblock,
1617                                 PREV.br_blockcount - new->br_blockcount,
1618                                 oldext)))
1619                                 goto done;
1620                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1621                                         new->br_startblock, new->br_blockcount,
1622                                         &i)))
1623                                 goto done;
1624                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1625                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1626                         if ((error = xfs_btree_insert(cur, &i)))
1627                                 goto done;
1628                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1629                 }
1630                 /* DELTA: One in-core extent is split in two. */
1631                 temp = PREV.br_startoff;
1632                 temp2 = PREV.br_blockcount;
1633                 break;
1634
1635         case 0:
1636                 /*
1637                  * Setting the middle part of a previous oldext extent to
1638                  * newext.  Contiguity is impossible here.
1639                  * One extent becomes three extents.
1640                  */
1641                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1642                 xfs_bmbt_set_blockcount(ep,
1643                         new->br_startoff - PREV.br_startoff);
1644                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1645
1646                 r[0] = *new;
1647                 r[1].br_startoff = new_endoff;
1648                 r[1].br_blockcount =
1649                         PREV.br_startoff + PREV.br_blockcount - new_endoff;
1650                 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1651                 r[1].br_state = oldext;
1652                 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1653                 ip->i_df.if_lastex = idx + 1;
1654                 ip->i_d.di_nextents += 2;
1655                 if (cur == NULL)
1656                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1657                 else {
1658                         rval = XFS_ILOG_CORE;
1659                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1660                                         PREV.br_startblock, PREV.br_blockcount,
1661                                         &i)))
1662                                 goto done;
1663                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1664                         /* new right extent - oldext */
1665                         if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1666                                 r[1].br_startblock, r[1].br_blockcount,
1667                                 r[1].br_state)))
1668                                 goto done;
1669                         /* new left extent - oldext */
1670                         cur->bc_rec.b = PREV;
1671                         cur->bc_rec.b.br_blockcount =
1672                                 new->br_startoff - PREV.br_startoff;
1673                         if ((error = xfs_btree_insert(cur, &i)))
1674                                 goto done;
1675                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1676                         /*
1677                          * Reset the cursor to the position of the new extent
1678                          * we are about to insert as we can't trust it after
1679                          * the previous insert.
1680                          */
1681                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1682                                         new->br_startblock, new->br_blockcount,
1683                                         &i)))
1684                                 goto done;
1685                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1686                         /* new middle extent - newext */
1687                         cur->bc_rec.b.br_state = new->br_state;
1688                         if ((error = xfs_btree_insert(cur, &i)))
1689                                 goto done;
1690                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1691                 }
1692                 /* DELTA: One in-core extent is split in three. */
1693                 temp = PREV.br_startoff;
1694                 temp2 = PREV.br_blockcount;
1695                 break;
1696
1697         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1698         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1699         case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1700         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1701         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1702         case BMAP_LEFT_CONTIG:
1703         case BMAP_RIGHT_CONTIG:
1704                 /*
1705                  * These cases are all impossible.
1706                  */
1707                 ASSERT(0);
1708         }
1709         *curp = cur;
1710         if (delta) {
1711                 temp2 += temp;
1712                 if (delta->xed_startoff > temp)
1713                         delta->xed_startoff = temp;
1714                 if (delta->xed_blockcount < temp2)
1715                         delta->xed_blockcount = temp2;
1716         }
1717 done:
1718         *logflagsp = rval;
1719         return error;
1720 #undef  LEFT
1721 #undef  RIGHT
1722 #undef  PREV
1723 }
1724
1725 /*
1726  * Called by xfs_bmap_add_extent to handle cases converting a hole
1727  * to a delayed allocation.
1728  */
1729 /*ARGSUSED*/
1730 STATIC int                              /* error */
1731 xfs_bmap_add_extent_hole_delay(
1732         xfs_inode_t             *ip,    /* incore inode pointer */
1733         xfs_extnum_t            idx,    /* extent number to update/insert */
1734         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1735         int                     *logflagsp, /* inode logging flags */
1736         xfs_extdelta_t          *delta, /* Change made to incore extents */
1737         int                     rsvd)           /* OK to allocate reserved blocks */
1738 {
1739         xfs_bmbt_rec_host_t     *ep;    /* extent record for idx */
1740         xfs_ifork_t             *ifp;   /* inode fork pointer */
1741         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1742         xfs_filblks_t           newlen=0;       /* new indirect size */
1743         xfs_filblks_t           oldlen=0;       /* old indirect size */
1744         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1745         int                     state;  /* state bits, accessed thru macros */
1746         xfs_filblks_t           temp=0; /* temp for indirect calculations */
1747         xfs_filblks_t           temp2=0;
1748
1749         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1750         ep = xfs_iext_get_ext(ifp, idx);
1751         state = 0;
1752         ASSERT(isnullstartblock(new->br_startblock));
1753
1754         /*
1755          * Check and set flags if this segment has a left neighbor
1756          */
1757         if (idx > 0) {
1758                 state |= BMAP_LEFT_VALID;
1759                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1760
1761                 if (isnullstartblock(left.br_startblock))
1762                         state |= BMAP_LEFT_DELAY;
1763         }
1764
1765         /*
1766          * Check and set flags if the current (right) segment exists.
1767          * If it doesn't exist, we're converting the hole at end-of-file.
1768          */
1769         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1770                 state |= BMAP_RIGHT_VALID;
1771                 xfs_bmbt_get_all(ep, &right);
1772
1773                 if (isnullstartblock(right.br_startblock))
1774                         state |= BMAP_RIGHT_DELAY;
1775         }
1776
1777         /*
1778          * Set contiguity flags on the left and right neighbors.
1779          * Don't let extents get too large, even if the pieces are contiguous.
1780          */
1781         if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1782             left.br_startoff + left.br_blockcount == new->br_startoff &&
1783             left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1784                 state |= BMAP_LEFT_CONTIG;
1785
1786         if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1787             new->br_startoff + new->br_blockcount == right.br_startoff &&
1788             new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1789             (!(state & BMAP_LEFT_CONTIG) ||
1790              (left.br_blockcount + new->br_blockcount +
1791               right.br_blockcount <= MAXEXTLEN)))
1792                 state |= BMAP_RIGHT_CONTIG;
1793
1794         /*
1795          * Switch out based on the contiguity flags.
1796          */
1797         switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1798         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1799                 /*
1800                  * New allocation is contiguous with delayed allocations
1801                  * on the left and on the right.
1802                  * Merge all three into a single extent record.
1803                  */
1804                 temp = left.br_blockcount + new->br_blockcount +
1805                         right.br_blockcount;
1806
1807                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1808                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1809                 oldlen = startblockval(left.br_startblock) +
1810                         startblockval(new->br_startblock) +
1811                         startblockval(right.br_startblock);
1812                 newlen = xfs_bmap_worst_indlen(ip, temp);
1813                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1814                         nullstartblock((int)newlen));
1815                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1816
1817                 xfs_iext_remove(ip, idx, 1, state);
1818                 ip->i_df.if_lastex = idx - 1;
1819                 /* DELTA: Two in-core extents were replaced by one. */
1820                 temp2 = temp;
1821                 temp = left.br_startoff;
1822                 break;
1823
1824         case BMAP_LEFT_CONTIG:
1825                 /*
1826                  * New allocation is contiguous with a delayed allocation
1827                  * on the left.
1828                  * Merge the new allocation with the left neighbor.
1829                  */
1830                 temp = left.br_blockcount + new->br_blockcount;
1831                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1832                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1833                 oldlen = startblockval(left.br_startblock) +
1834                         startblockval(new->br_startblock);
1835                 newlen = xfs_bmap_worst_indlen(ip, temp);
1836                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1837                         nullstartblock((int)newlen));
1838                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1839
1840                 ip->i_df.if_lastex = idx - 1;
1841                 /* DELTA: One in-core extent grew into a hole. */
1842                 temp2 = temp;
1843                 temp = left.br_startoff;
1844                 break;
1845
1846         case BMAP_RIGHT_CONTIG:
1847                 /*
1848                  * New allocation is contiguous with a delayed allocation
1849                  * on the right.
1850                  * Merge the new allocation with the right neighbor.
1851                  */
1852                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1853                 temp = new->br_blockcount + right.br_blockcount;
1854                 oldlen = startblockval(new->br_startblock) +
1855                         startblockval(right.br_startblock);
1856                 newlen = xfs_bmap_worst_indlen(ip, temp);
1857                 xfs_bmbt_set_allf(ep, new->br_startoff,
1858                         nullstartblock((int)newlen), temp, right.br_state);
1859                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1860
1861                 ip->i_df.if_lastex = idx;
1862                 /* DELTA: One in-core extent grew into a hole. */
1863                 temp2 = temp;
1864                 temp = new->br_startoff;
1865                 break;
1866
1867         case 0:
1868                 /*
1869                  * New allocation is not contiguous with another
1870                  * delayed allocation.
1871                  * Insert a new entry.
1872                  */
1873                 oldlen = newlen = 0;
1874                 xfs_iext_insert(ip, idx, 1, new, state);
1875                 ip->i_df.if_lastex = idx;
1876                 /* DELTA: A new in-core extent was added in a hole. */
1877                 temp2 = new->br_blockcount;
1878                 temp = new->br_startoff;
1879                 break;
1880         }
1881         if (oldlen != newlen) {
1882                 ASSERT(oldlen > newlen);
1883                 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
1884                         (int64_t)(oldlen - newlen), rsvd);
1885                 /*
1886                  * Nothing to do for disk quota accounting here.
1887                  */
1888         }
1889         if (delta) {
1890                 temp2 += temp;
1891                 if (delta->xed_startoff > temp)
1892                         delta->xed_startoff = temp;
1893                 if (delta->xed_blockcount < temp2)
1894                         delta->xed_blockcount = temp2;
1895         }
1896         *logflagsp = 0;
1897         return 0;
1898 }
1899
1900 /*
1901  * Called by xfs_bmap_add_extent to handle cases converting a hole
1902  * to a real allocation.
1903  */
1904 STATIC int                              /* error */
1905 xfs_bmap_add_extent_hole_real(
1906         xfs_inode_t             *ip,    /* incore inode pointer */
1907         xfs_extnum_t            idx,    /* extent number to update/insert */
1908         xfs_btree_cur_t         *cur,   /* if null, not a btree */
1909         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1910         int                     *logflagsp, /* inode logging flags */
1911         xfs_extdelta_t          *delta, /* Change made to incore extents */
1912         int                     whichfork) /* data or attr fork */
1913 {
1914         xfs_bmbt_rec_host_t     *ep;    /* pointer to extent entry ins. point */
1915         int                     error;  /* error return value */
1916         int                     i;      /* temp state */
1917         xfs_ifork_t             *ifp;   /* inode fork pointer */
1918         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1919         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1920         int                     rval=0; /* return value (logging flags) */
1921         int                     state;  /* state bits, accessed thru macros */
1922         xfs_filblks_t           temp=0;
1923         xfs_filblks_t           temp2=0;
1924
1925         ifp = XFS_IFORK_PTR(ip, whichfork);
1926         ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1927         ep = xfs_iext_get_ext(ifp, idx);
1928         state = 0;
1929
1930         if (whichfork == XFS_ATTR_FORK)
1931                 state |= BMAP_ATTRFORK;
1932
1933         /*
1934          * Check and set flags if this segment has a left neighbor.
1935          */
1936         if (idx > 0) {
1937                 state |= BMAP_LEFT_VALID;
1938                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1939                 if (isnullstartblock(left.br_startblock))
1940                         state |= BMAP_LEFT_DELAY;
1941         }
1942
1943         /*
1944          * Check and set flags if this segment has a current value.
1945          * Not true if we're inserting into the "hole" at eof.
1946          */
1947         if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1948                 state |= BMAP_RIGHT_VALID;
1949                 xfs_bmbt_get_all(ep, &right);
1950                 if (isnullstartblock(right.br_startblock))
1951                         state |= BMAP_RIGHT_DELAY;
1952         }
1953
1954         /*
1955          * We're inserting a real allocation between "left" and "right".
1956          * Set the contiguity flags.  Don't let extents get too large.
1957          */
1958         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1959             left.br_startoff + left.br_blockcount == new->br_startoff &&
1960             left.br_startblock + left.br_blockcount == new->br_startblock &&
1961             left.br_state == new->br_state &&
1962             left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1963                 state |= BMAP_LEFT_CONTIG;
1964
1965         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1966             new->br_startoff + new->br_blockcount == right.br_startoff &&
1967             new->br_startblock + new->br_blockcount == right.br_startblock &&
1968             new->br_state == right.br_state &&
1969             new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1970             (!(state & BMAP_LEFT_CONTIG) ||
1971              left.br_blockcount + new->br_blockcount +
1972              right.br_blockcount <= MAXEXTLEN))
1973                 state |= BMAP_RIGHT_CONTIG;
1974
1975         error = 0;
1976         /*
1977          * Select which case we're in here, and implement it.
1978          */
1979         switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1980         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1981                 /*
1982                  * New allocation is contiguous with real allocations on the
1983                  * left and on the right.
1984                  * Merge all three into a single extent record.
1985                  */
1986                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1987                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1988                         left.br_blockcount + new->br_blockcount +
1989                         right.br_blockcount);
1990                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1991
1992                 xfs_iext_remove(ip, idx, 1, state);
1993                 ifp->if_lastex = idx - 1;
1994                 XFS_IFORK_NEXT_SET(ip, whichfork,
1995                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
1996                 if (cur == NULL) {
1997                         rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1998                 } else {
1999                         rval = XFS_ILOG_CORE;
2000                         if ((error = xfs_bmbt_lookup_eq(cur,
2001                                         right.br_startoff,
2002                                         right.br_startblock,
2003                                         right.br_blockcount, &i)))
2004                                 goto done;
2005                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2006                         if ((error = xfs_btree_delete(cur, &i)))
2007                                 goto done;
2008                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2009                         if ((error = xfs_btree_decrement(cur, 0, &i)))
2010                                 goto done;
2011                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2012                         if ((error = xfs_bmbt_update(cur, left.br_startoff,
2013                                         left.br_startblock,
2014                                         left.br_blockcount +
2015                                                 new->br_blockcount +
2016                                                 right.br_blockcount,
2017                                         left.br_state)))
2018                                 goto done;
2019                 }
2020                 /* DELTA: Two in-core extents were replaced by one. */
2021                 temp = left.br_startoff;
2022                 temp2 = left.br_blockcount +
2023                         new->br_blockcount +
2024                         right.br_blockcount;
2025                 break;
2026
2027         case BMAP_LEFT_CONTIG:
2028                 /*
2029                  * New allocation is contiguous with a real allocation
2030                  * on the left.
2031                  * Merge the new allocation with the left neighbor.
2032                  */
2033                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
2034                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2035                         left.br_blockcount + new->br_blockcount);
2036                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
2037
2038                 ifp->if_lastex = idx - 1;
2039                 if (cur == NULL) {
2040                         rval = xfs_ilog_fext(whichfork);
2041                 } else {
2042                         rval = 0;
2043                         if ((error = xfs_bmbt_lookup_eq(cur,
2044                                         left.br_startoff,
2045                                         left.br_startblock,
2046                                         left.br_blockcount, &i)))
2047                                 goto done;
2048                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2049                         if ((error = xfs_bmbt_update(cur, left.br_startoff,
2050                                         left.br_startblock,
2051                                         left.br_blockcount +
2052                                                 new->br_blockcount,
2053                                         left.br_state)))
2054                                 goto done;
2055                 }
2056                 /* DELTA: One in-core extent grew. */
2057                 temp = left.br_startoff;
2058                 temp2 = left.br_blockcount +
2059                         new->br_blockcount;
2060                 break;
2061
2062         case BMAP_RIGHT_CONTIG:
2063                 /*
2064                  * New allocation is contiguous with a real allocation
2065                  * on the right.
2066                  * Merge the new allocation with the right neighbor.
2067                  */
2068                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
2069                 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2070                         new->br_blockcount + right.br_blockcount,
2071                         right.br_state);
2072                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
2073
2074                 ifp->if_lastex = idx;
2075                 if (cur == NULL) {
2076                         rval = xfs_ilog_fext(whichfork);
2077                 } else {
2078                         rval = 0;
2079                         if ((error = xfs_bmbt_lookup_eq(cur,
2080                                         right.br_startoff,
2081                                         right.br_startblock,
2082                                         right.br_blockcount, &i)))
2083                                 goto done;
2084                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2085                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
2086                                         new->br_startblock,
2087                                         new->br_blockcount +
2088                                                 right.br_blockcount,
2089                                         right.br_state)))
2090                                 goto done;
2091                 }
2092                 /* DELTA: One in-core extent grew. */
2093                 temp = new->br_startoff;
2094                 temp2 = new->br_blockcount +
2095                         right.br_blockcount;
2096                 break;
2097
2098         case 0:
2099                 /*
2100                  * New allocation is not contiguous with another
2101                  * real allocation.
2102                  * Insert a new entry.
2103                  */
2104                 xfs_iext_insert(ip, idx, 1, new, state);
2105                 ifp->if_lastex = idx;
2106                 XFS_IFORK_NEXT_SET(ip, whichfork,
2107                         XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2108                 if (cur == NULL) {
2109                         rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2110                 } else {
2111                         rval = XFS_ILOG_CORE;
2112                         if ((error = xfs_bmbt_lookup_eq(cur,
2113                                         new->br_startoff,
2114                                         new->br_startblock,
2115                                         new->br_blockcount, &i)))
2116                                 goto done;
2117                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
2118                         cur->bc_rec.b.br_state = new->br_state;
2119                         if ((error = xfs_btree_insert(cur, &i)))
2120                                 goto done;
2121                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2122                 }
2123                 /* DELTA: A new extent was added in a hole. */
2124                 temp = new->br_startoff;
2125                 temp2 = new->br_blockcount;
2126                 break;
2127         }
2128         if (delta) {
2129                 temp2 += temp;
2130                 if (delta->xed_startoff > temp)
2131                         delta->xed_startoff = temp;
2132                 if (delta->xed_blockcount < temp2)
2133                         delta->xed_blockcount = temp2;
2134         }
2135 done:
2136         *logflagsp = rval;
2137         return error;
2138 }
2139
2140 /*
2141  * Adjust the size of the new extent based on di_extsize and rt extsize.
2142  */
2143 STATIC int
2144 xfs_bmap_extsize_align(
2145         xfs_mount_t     *mp,
2146         xfs_bmbt_irec_t *gotp,          /* next extent pointer */
2147         xfs_bmbt_irec_t *prevp,         /* previous extent pointer */
2148         xfs_extlen_t    extsz,          /* align to this extent size */
2149         int             rt,             /* is this a realtime inode? */
2150         int             eof,            /* is extent at end-of-file? */
2151         int             delay,          /* creating delalloc extent? */
2152         int             convert,        /* overwriting unwritten extent? */
2153         xfs_fileoff_t   *offp,          /* in/out: aligned offset */
2154         xfs_extlen_t    *lenp)          /* in/out: aligned length */
2155 {
2156         xfs_fileoff_t   orig_off;       /* original offset */
2157         xfs_extlen_t    orig_alen;      /* original length */
2158         xfs_fileoff_t   orig_end;       /* original off+len */
2159         xfs_fileoff_t   nexto;          /* next file offset */
2160         xfs_fileoff_t   prevo;          /* previous file offset */
2161         xfs_fileoff_t   align_off;      /* temp for offset */
2162         xfs_extlen_t    align_alen;     /* temp for length */
2163         xfs_extlen_t    temp;           /* temp for calculations */
2164
2165         if (convert)
2166                 return 0;
2167
2168         orig_off = align_off = *offp;
2169         orig_alen = align_alen = *lenp;
2170         orig_end = orig_off + orig_alen;
2171
2172         /*
2173          * If this request overlaps an existing extent, then don't
2174          * attempt to perform any additional alignment.
2175          */
2176         if (!delay && !eof &&
2177             (orig_off >= gotp->br_startoff) &&
2178             (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2179                 return 0;
2180         }
2181
2182         /*
2183          * If the file offset is unaligned vs. the extent size
2184          * we need to align it.  This will be possible unless
2185          * the file was previously written with a kernel that didn't
2186          * perform this alignment, or if a truncate shot us in the
2187          * foot.
2188          */
2189         temp = do_mod(orig_off, extsz);
2190         if (temp) {
2191                 align_alen += temp;
2192                 align_off -= temp;
2193         }
2194         /*
2195          * Same adjustment for the end of the requested area.
2196          */
2197         if ((temp = (align_alen % extsz))) {
2198                 align_alen += extsz - temp;
2199         }
2200         /*
2201          * If the previous block overlaps with this proposed allocation
2202          * then move the start forward without adjusting the length.
2203          */
2204         if (prevp->br_startoff != NULLFILEOFF) {
2205                 if (prevp->br_startblock == HOLESTARTBLOCK)
2206                         prevo = prevp->br_startoff;
2207                 else
2208                         prevo = prevp->br_startoff + prevp->br_blockcount;
2209         } else
2210                 prevo = 0;
2211         if (align_off != orig_off && align_off < prevo)
2212                 align_off = prevo;
2213         /*
2214          * If the next block overlaps with this proposed allocation
2215          * then move the start back without adjusting the length,
2216          * but not before offset 0.
2217          * This may of course make the start overlap previous block,
2218          * and if we hit the offset 0 limit then the next block
2219          * can still overlap too.
2220          */
2221         if (!eof && gotp->br_startoff != NULLFILEOFF) {
2222                 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2223                     (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2224                         nexto = gotp->br_startoff + gotp->br_blockcount;
2225                 else
2226                         nexto = gotp->br_startoff;
2227         } else
2228                 nexto = NULLFILEOFF;
2229         if (!eof &&
2230             align_off + align_alen != orig_end &&
2231             align_off + align_alen > nexto)
2232                 align_off = nexto > align_alen ? nexto - align_alen : 0;
2233         /*
2234          * If we're now overlapping the next or previous extent that
2235          * means we can't fit an extsz piece in this hole.  Just move
2236          * the start forward to the first valid spot and set
2237          * the length so we hit the end.
2238          */
2239         if (align_off != orig_off && align_off < prevo)
2240                 align_off = prevo;
2241         if (align_off + align_alen != orig_end &&
2242             align_off + align_alen > nexto &&
2243             nexto != NULLFILEOFF) {
2244                 ASSERT(nexto > prevo);
2245                 align_alen = nexto - align_off;
2246         }
2247
2248         /*
2249          * If realtime, and the result isn't a multiple of the realtime
2250          * extent size we need to remove blocks until it is.
2251          */
2252         if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2253                 /*
2254                  * We're not covering the original request, or
2255                  * we won't be able to once we fix the length.
2256                  */
2257                 if (orig_off < align_off ||
2258                     orig_end > align_off + align_alen ||
2259                     align_alen - temp < orig_alen)
2260                         return XFS_ERROR(EINVAL);
2261                 /*
2262                  * Try to fix it by moving the start up.
2263                  */
2264                 if (align_off + temp <= orig_off) {
2265                         align_alen -= temp;
2266                         align_off += temp;
2267                 }
2268                 /*
2269                  * Try to fix it by moving the end in.
2270                  */
2271                 else if (align_off + align_alen - temp >= orig_end)
2272                         align_alen -= temp;
2273                 /*
2274                  * Set the start to the minimum then trim the length.
2275                  */
2276                 else {
2277                         align_alen -= orig_off - align_off;
2278                         align_off = orig_off;
2279                         align_alen -= align_alen % mp->m_sb.sb_rextsize;
2280                 }
2281                 /*
2282                  * Result doesn't cover the request, fail it.
2283                  */
2284                 if (orig_off < align_off || orig_end > align_off + align_alen)
2285                         return XFS_ERROR(EINVAL);
2286         } else {
2287                 ASSERT(orig_off >= align_off);
2288                 ASSERT(orig_end <= align_off + align_alen);
2289         }
2290
2291 #ifdef DEBUG
2292         if (!eof && gotp->br_startoff != NULLFILEOFF)
2293                 ASSERT(align_off + align_alen <= gotp->br_startoff);
2294         if (prevp->br_startoff != NULLFILEOFF)
2295                 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2296 #endif
2297
2298         *lenp = align_alen;
2299         *offp = align_off;
2300         return 0;
2301 }
2302
2303 #define XFS_ALLOC_GAP_UNITS     4
2304
2305 STATIC void
2306 xfs_bmap_adjacent(
2307         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2308 {
2309         xfs_fsblock_t   adjust;         /* adjustment to block numbers */
2310         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2311         xfs_mount_t     *mp;            /* mount point structure */
2312         int             nullfb;         /* true if ap->firstblock isn't set */
2313         int             rt;             /* true if inode is realtime */
2314
2315 #define ISVALID(x,y)    \
2316         (rt ? \
2317                 (x) < mp->m_sb.sb_rblocks : \
2318                 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2319                 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2320                 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2321
2322         mp = ap->ip->i_mount;
2323         nullfb = ap->firstblock == NULLFSBLOCK;
2324         rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2325         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2326         /*
2327          * If allocating at eof, and there's a previous real block,
2328          * try to use its last block as our starting point.
2329          */
2330         if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2331             !isnullstartblock(ap->prevp->br_startblock) &&
2332             ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2333                     ap->prevp->br_startblock)) {
2334                 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2335                 /*
2336                  * Adjust for the gap between prevp and us.
2337                  */
2338                 adjust = ap->off -
2339                         (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2340                 if (adjust &&
2341                     ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2342                         ap->rval += adjust;
2343         }
2344         /*
2345          * If not at eof, then compare the two neighbor blocks.
2346          * Figure out whether either one gives us a good starting point,
2347          * and pick the better one.
2348          */
2349         else if (!ap->eof) {
2350                 xfs_fsblock_t   gotbno;         /* right side block number */
2351                 xfs_fsblock_t   gotdiff=0;      /* right side difference */
2352                 xfs_fsblock_t   prevbno;        /* left side block number */
2353                 xfs_fsblock_t   prevdiff=0;     /* left side difference */
2354
2355                 /*
2356                  * If there's a previous (left) block, select a requested
2357                  * start block based on it.
2358                  */
2359                 if (ap->prevp->br_startoff != NULLFILEOFF &&
2360                     !isnullstartblock(ap->prevp->br_startblock) &&
2361                     (prevbno = ap->prevp->br_startblock +
2362                                ap->prevp->br_blockcount) &&
2363                     ISVALID(prevbno, ap->prevp->br_startblock)) {
2364                         /*
2365                          * Calculate gap to end of previous block.
2366                          */
2367                         adjust = prevdiff = ap->off -
2368                                 (ap->prevp->br_startoff +
2369                                  ap->prevp->br_blockcount);
2370                         /*
2371                          * Figure the startblock based on the previous block's
2372                          * end and the gap size.
2373                          * Heuristic!
2374                          * If the gap is large relative to the piece we're
2375                          * allocating, or using it gives us an invalid block
2376                          * number, then just use the end of the previous block.
2377                          */
2378                         if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2379                             ISVALID(prevbno + prevdiff,
2380                                     ap->prevp->br_startblock))
2381                                 prevbno += adjust;
2382                         else
2383                                 prevdiff += adjust;
2384                         /*
2385                          * If the firstblock forbids it, can't use it,
2386                          * must use default.
2387                          */
2388                         if (!rt && !nullfb &&
2389                             XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2390                                 prevbno = NULLFSBLOCK;
2391                 }
2392                 /*
2393                  * No previous block or can't follow it, just default.
2394                  */
2395                 else
2396                         prevbno = NULLFSBLOCK;
2397                 /*
2398                  * If there's a following (right) block, select a requested
2399                  * start block based on it.
2400                  */
2401                 if (!isnullstartblock(ap->gotp->br_startblock)) {
2402                         /*
2403                          * Calculate gap to start of next block.
2404                          */
2405                         adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2406                         /*
2407                          * Figure the startblock based on the next block's
2408                          * start and the gap size.
2409                          */
2410                         gotbno = ap->gotp->br_startblock;
2411                         /*
2412                          * Heuristic!
2413                          * If the gap is large relative to the piece we're
2414                          * allocating, or using it gives us an invalid block
2415                          * number, then just use the start of the next block
2416                          * offset by our length.
2417                          */
2418                         if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2419                             ISVALID(gotbno - gotdiff, gotbno))
2420                                 gotbno -= adjust;
2421                         else if (ISVALID(gotbno - ap->alen, gotbno)) {
2422                                 gotbno -= ap->alen;
2423                                 gotdiff += adjust - ap->alen;
2424                         } else
2425                                 gotdiff += adjust;
2426                         /*
2427                          * If the firstblock forbids it, can't use it,
2428                          * must use default.
2429                          */
2430                         if (!rt && !nullfb &&
2431                             XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2432                                 gotbno = NULLFSBLOCK;
2433                 }
2434                 /*
2435                  * No next block, just default.
2436                  */
2437                 else
2438                         gotbno = NULLFSBLOCK;
2439                 /*
2440                  * If both valid, pick the better one, else the only good
2441                  * one, else ap->rval is already set (to 0 or the inode block).
2442                  */
2443                 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2444                         ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2445                 else if (prevbno != NULLFSBLOCK)
2446                         ap->rval = prevbno;
2447                 else if (gotbno != NULLFSBLOCK)
2448                         ap->rval = gotbno;
2449         }
2450 #undef ISVALID
2451 }
2452
2453 STATIC int
2454 xfs_bmap_rtalloc(
2455         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2456 {
2457         xfs_alloctype_t atype = 0;      /* type for allocation routines */
2458         int             error;          /* error return value */
2459         xfs_mount_t     *mp;            /* mount point structure */
2460         xfs_extlen_t    prod = 0;       /* product factor for allocators */
2461         xfs_extlen_t    ralen = 0;      /* realtime allocation length */
2462         xfs_extlen_t    align;          /* minimum allocation alignment */
2463         xfs_rtblock_t   rtb;
2464
2465         mp = ap->ip->i_mount;
2466         align = xfs_get_extsz_hint(ap->ip);
2467         prod = align / mp->m_sb.sb_rextsize;
2468         error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2469                                         align, 1, ap->eof, 0,
2470                                         ap->conv, &ap->off, &ap->alen);
2471         if (error)
2472                 return error;
2473         ASSERT(ap->alen);
2474         ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2475
2476         /*
2477          * If the offset & length are not perfectly aligned
2478          * then kill prod, it will just get us in trouble.
2479          */
2480         if (do_mod(ap->off, align) || ap->alen % align)
2481                 prod = 1;
2482         /*
2483          * Set ralen to be the actual requested length in rtextents.
2484          */
2485         ralen = ap->alen / mp->m_sb.sb_rextsize;
2486         /*
2487          * If the old value was close enough to MAXEXTLEN that
2488          * we rounded up to it, cut it back so it's valid again.
2489          * Note that if it's a really large request (bigger than
2490          * MAXEXTLEN), we don't hear about that number, and can't
2491          * adjust the starting point to match it.
2492          */
2493         if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2494                 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2495         /*
2496          * If it's an allocation to an empty file at offset 0,
2497          * pick an extent that will space things out in the rt area.
2498          */
2499         if (ap->eof && ap->off == 0) {
2500                 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2501
2502                 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2503                 if (error)
2504                         return error;
2505                 ap->rval = rtx * mp->m_sb.sb_rextsize;
2506         } else {
2507                 ap->rval = 0;
2508         }
2509
2510         xfs_bmap_adjacent(ap);
2511
2512         /*
2513          * Realtime allocation, done through xfs_rtallocate_extent.
2514          */
2515         atype = ap->rval == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2516         do_div(ap->rval, mp->m_sb.sb_rextsize);
2517         rtb = ap->rval;
2518         ap->alen = ralen;
2519         if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2520                                 &ralen, atype, ap->wasdel, prod, &rtb)))
2521                 return error;
2522         if (rtb == NULLFSBLOCK && prod > 1 &&
2523             (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2524                                            ap->alen, &ralen, atype,
2525                                            ap->wasdel, 1, &rtb)))
2526                 return error;
2527         ap->rval = rtb;
2528         if (ap->rval != NULLFSBLOCK) {
2529                 ap->rval *= mp->m_sb.sb_rextsize;
2530                 ralen *= mp->m_sb.sb_rextsize;
2531                 ap->alen = ralen;
2532                 ap->ip->i_d.di_nblocks += ralen;
2533                 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2534                 if (ap->wasdel)
2535                         ap->ip->i_delayed_blks -= ralen;
2536                 /*
2537                  * Adjust the disk quota also. This was reserved
2538                  * earlier.
2539                  */
2540                 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2541                         ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2542                                         XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2543         } else {
2544                 ap->alen = 0;
2545         }
2546         return 0;
2547 }
2548
2549 STATIC int
2550 xfs_bmap_btalloc_nullfb(
2551         struct xfs_bmalloca     *ap,
2552         struct xfs_alloc_arg    *args,
2553         xfs_extlen_t            *blen)
2554 {
2555         struct xfs_mount        *mp = ap->ip->i_mount;
2556         struct xfs_perag        *pag;
2557         xfs_agnumber_t          ag, startag;
2558         int                     notinit = 0;
2559         int                     error;
2560
2561         if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2562                 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2563         else
2564                 args->type = XFS_ALLOCTYPE_START_BNO;
2565         args->total = ap->total;
2566
2567         /*
2568          * Search for an allocation group with a single extent large enough
2569          * for the request.  If one isn't found, then adjust the minimum
2570          * allocation size to the largest space found.
2571          */
2572         startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
2573         if (startag == NULLAGNUMBER)
2574                 startag = ag = 0;
2575
2576         pag = xfs_perag_get(mp, ag);
2577         while (*blen < ap->alen) {
2578                 if (!pag->pagf_init) {
2579                         error = xfs_alloc_pagf_init(mp, args->tp, ag,
2580                                                     XFS_ALLOC_FLAG_TRYLOCK);
2581                         if (error) {
2582                                 xfs_perag_put(pag);
2583                                 return error;
2584                         }
2585                 }
2586
2587                 /*
2588                  * See xfs_alloc_fix_freelist...
2589                  */
2590                 if (pag->pagf_init) {
2591                         xfs_extlen_t    longest;
2592                         longest = xfs_alloc_longest_free_extent(mp, pag);
2593                         if (*blen < longest)
2594                                 *blen = longest;
2595                 } else
2596                         notinit = 1;
2597
2598                 if (xfs_inode_is_filestream(ap->ip)) {
2599                         if (*blen >= ap->alen)
2600                                 break;
2601
2602                         if (ap->userdata) {
2603                                 /*
2604                                  * If startag is an invalid AG, we've
2605                                  * come here once before and
2606                                  * xfs_filestream_new_ag picked the
2607                                  * best currently available.
2608                                  *
2609                                  * Don't continue looping, since we
2610                                  * could loop forever.
2611                                  */
2612                                 if (startag == NULLAGNUMBER)
2613                                         break;
2614
2615                                 error = xfs_filestream_new_ag(ap, &ag);
2616                                 xfs_perag_put(pag);
2617                                 if (error)
2618                                         return error;
2619
2620                                 /* loop again to set 'blen'*/
2621                                 startag = NULLAGNUMBER;
2622                                 pag = xfs_perag_get(mp, ag);
2623                                 continue;
2624                         }
2625                 }
2626                 if (++ag == mp->m_sb.sb_agcount)
2627                         ag = 0;
2628                 if (ag == startag)
2629                         break;
2630                 xfs_perag_put(pag);
2631                 pag = xfs_perag_get(mp, ag);
2632         }
2633         xfs_perag_put(pag);
2634
2635         /*
2636          * Since the above loop did a BUF_TRYLOCK, it is
2637          * possible that there is space for this request.
2638          */
2639         if (notinit || *blen < ap->minlen)
2640                 args->minlen = ap->minlen;
2641         /*
2642          * If the best seen length is less than the request
2643          * length, use the best as the minimum.
2644          */
2645         else if (*blen < ap->alen)
2646                 args->minlen = *blen;
2647         /*
2648          * Otherwise we've seen an extent as big as alen,
2649          * use that as the minimum.
2650          */
2651         else
2652                 args->minlen = ap->alen;
2653
2654         /*
2655          * set the failure fallback case to look in the selected
2656          * AG as the stream may have moved.
2657          */
2658         if (xfs_inode_is_filestream(ap->ip))
2659                 ap->rval = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2660
2661         return 0;
2662 }
2663
2664 STATIC int
2665 xfs_bmap_btalloc(
2666         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2667 {
2668         xfs_mount_t     *mp;            /* mount point structure */
2669         xfs_alloctype_t atype = 0;      /* type for allocation routines */
2670         xfs_extlen_t    align;          /* minimum allocation alignment */
2671         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2672         xfs_agnumber_t  ag;
2673         xfs_alloc_arg_t args;
2674         xfs_extlen_t    blen;
2675         xfs_extlen_t    nextminlen = 0;
2676         int             nullfb;         /* true if ap->firstblock isn't set */
2677         int             isaligned;
2678         int             tryagain;
2679         int             error;
2680
2681         mp = ap->ip->i_mount;
2682         align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2683         if (unlikely(align)) {
2684                 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2685                                                 align, 0, ap->eof, 0, ap->conv,
2686                                                 &ap->off, &ap->alen);
2687                 ASSERT(!error);
2688                 ASSERT(ap->alen);
2689         }
2690         nullfb = ap->firstblock == NULLFSBLOCK;
2691         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2692         if (nullfb) {
2693                 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2694                         ag = xfs_filestream_lookup_ag(ap->ip);
2695                         ag = (ag != NULLAGNUMBER) ? ag : 0;
2696                         ap->rval = XFS_AGB_TO_FSB(mp, ag, 0);
2697                 } else {
2698                         ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2699                 }
2700         } else
2701                 ap->rval = ap->firstblock;
2702
2703         xfs_bmap_adjacent(ap);
2704
2705         /*
2706          * If allowed, use ap->rval; otherwise must use firstblock since
2707          * it's in the right allocation group.
2708          */
2709         if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2710                 ;
2711         else
2712                 ap->rval = ap->firstblock;
2713         /*
2714          * Normal allocation, done through xfs_alloc_vextent.
2715          */
2716         tryagain = isaligned = 0;
2717         args.tp = ap->tp;
2718         args.mp = mp;
2719         args.fsbno = ap->rval;
2720         args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2721         args.firstblock = ap->firstblock;
2722         blen = 0;
2723         if (nullfb) {
2724                 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
2725                 if (error)
2726                         return error;
2727         } else if (ap->low) {
2728                 if (xfs_inode_is_filestream(ap->ip))
2729                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2730                 else
2731                         args.type = XFS_ALLOCTYPE_START_BNO;
2732                 args.total = args.minlen = ap->minlen;
2733         } else {
2734                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2735                 args.total = ap->total;
2736                 args.minlen = ap->minlen;
2737         }
2738         /* apply extent size hints if obtained earlier */
2739         if (unlikely(align)) {
2740                 args.prod = align;
2741                 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2742                         args.mod = (xfs_extlen_t)(args.prod - args.mod);
2743         } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2744                 args.prod = 1;
2745                 args.mod = 0;
2746         } else {
2747                 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2748                 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2749                         args.mod = (xfs_extlen_t)(args.prod - args.mod);
2750         }
2751         /*
2752          * If we are not low on available data blocks, and the
2753          * underlying logical volume manager is a stripe, and
2754          * the file offset is zero then try to allocate data
2755          * blocks on stripe unit boundary.
2756          * NOTE: ap->aeof is only set if the allocation length
2757          * is >= the stripe unit and the allocation offset is
2758          * at the end of file.
2759          */
2760         if (!ap->low && ap->aeof) {
2761                 if (!ap->off) {
2762                         args.alignment = mp->m_dalign;
2763                         atype = args.type;
2764                         isaligned = 1;
2765                         /*
2766                          * Adjust for alignment
2767                          */
2768                         if (blen > args.alignment && blen <= ap->alen)
2769                                 args.minlen = blen - args.alignment;
2770                         args.minalignslop = 0;
2771                 } else {
2772                         /*
2773                          * First try an exact bno allocation.
2774                          * If it fails then do a near or start bno
2775                          * allocation with alignment turned on.
2776                          */
2777                         atype = args.type;
2778                         tryagain = 1;
2779                         args.type = XFS_ALLOCTYPE_THIS_BNO;
2780                         args.alignment = 1;
2781                         /*
2782                          * Compute the minlen+alignment for the
2783                          * next case.  Set slop so that the value
2784                          * of minlen+alignment+slop doesn't go up
2785                          * between the calls.
2786                          */
2787                         if (blen > mp->m_dalign && blen <= ap->alen)
2788                                 nextminlen = blen - mp->m_dalign;
2789                         else
2790                                 nextminlen = args.minlen;
2791                         if (nextminlen + mp->m_dalign > args.minlen + 1)
2792                                 args.minalignslop =
2793                                         nextminlen + mp->m_dalign -
2794                                         args.minlen - 1;
2795                         else
2796                                 args.minalignslop = 0;
2797                 }
2798         } else {
2799                 args.alignment = 1;
2800                 args.minalignslop = 0;
2801         }
2802         args.minleft = ap->minleft;
2803         args.wasdel = ap->wasdel;
2804         args.isfl = 0;
2805         args.userdata = ap->userdata;
2806         if ((error = xfs_alloc_vextent(&args)))
2807                 return error;
2808         if (tryagain && args.fsbno == NULLFSBLOCK) {
2809                 /*
2810                  * Exact allocation failed. Now try with alignment
2811                  * turned on.
2812                  */
2813                 args.type = atype;
2814                 args.fsbno = ap->rval;
2815                 args.alignment = mp->m_dalign;
2816                 args.minlen = nextminlen;
2817                 args.minalignslop = 0;
2818                 isaligned = 1;
2819                 if ((error = xfs_alloc_vextent(&args)))
2820                         return error;
2821         }
2822         if (isaligned && args.fsbno == NULLFSBLOCK) {
2823                 /*
2824                  * allocation failed, so turn off alignment and
2825                  * try again.
2826                  */
2827                 args.type = atype;
2828                 args.fsbno = ap->rval;
2829                 args.alignment = 0;
2830                 if ((error = xfs_alloc_vextent(&args)))
2831                         return error;
2832         }
2833         if (args.fsbno == NULLFSBLOCK && nullfb &&
2834             args.minlen > ap->minlen) {
2835                 args.minlen = ap->minlen;
2836                 args.type = XFS_ALLOCTYPE_START_BNO;
2837                 args.fsbno = ap->rval;
2838                 if ((error = xfs_alloc_vextent(&args)))
2839                         return error;
2840         }
2841         if (args.fsbno == NULLFSBLOCK && nullfb) {
2842                 args.fsbno = 0;
2843                 args.type = XFS_ALLOCTYPE_FIRST_AG;
2844                 args.total = ap->minlen;
2845                 args.minleft = 0;
2846                 if ((error = xfs_alloc_vextent(&args)))
2847                         return error;
2848                 ap->low = 1;
2849         }
2850         if (args.fsbno != NULLFSBLOCK) {
2851                 ap->firstblock = ap->rval = args.fsbno;
2852                 ASSERT(nullfb || fb_agno == args.agno ||
2853                        (ap->low && fb_agno < args.agno));
2854                 ap->alen = args.len;
2855                 ap->ip->i_d.di_nblocks += args.len;
2856                 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2857                 if (ap->wasdel)
2858                         ap->ip->i_delayed_blks -= args.len;
2859                 /*
2860                  * Adjust the disk quota also. This was reserved
2861                  * earlier.
2862                  */
2863                 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2864                         ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2865                                         XFS_TRANS_DQ_BCOUNT,
2866                         (long) args.len);
2867         } else {
2868                 ap->rval = NULLFSBLOCK;
2869                 ap->alen = 0;
2870         }
2871         return 0;
2872 }
2873
2874 /*
2875  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2876  * It figures out where to ask the underlying allocator to put the new extent.
2877  */
2878 STATIC int
2879 xfs_bmap_alloc(
2880         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2881 {
2882         if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2883                 return xfs_bmap_rtalloc(ap);
2884         return xfs_bmap_btalloc(ap);
2885 }
2886
2887 /*
2888  * Transform a btree format file with only one leaf node, where the
2889  * extents list will fit in the inode, into an extents format file.
2890  * Since the file extents are already in-core, all we have to do is
2891  * give up the space for the btree root and pitch the leaf block.
2892  */
2893 STATIC int                              /* error */
2894 xfs_bmap_btree_to_extents(
2895         xfs_trans_t             *tp,    /* transaction pointer */
2896         xfs_inode_t             *ip,    /* incore inode pointer */
2897         xfs_btree_cur_t         *cur,   /* btree cursor */
2898         int                     *logflagsp, /* inode logging flags */
2899         int                     whichfork)  /* data or attr fork */
2900 {
2901         /* REFERENCED */
2902         struct xfs_btree_block  *cblock;/* child btree block */
2903         xfs_fsblock_t           cbno;   /* child block number */
2904         xfs_buf_t               *cbp;   /* child block's buffer */
2905         int                     error;  /* error return value */
2906         xfs_ifork_t             *ifp;   /* inode fork data */
2907         xfs_mount_t             *mp;    /* mount point structure */
2908         __be64                  *pp;    /* ptr to block address */
2909         struct xfs_btree_block  *rblock;/* root btree block */
2910
2911         mp = ip->i_mount;
2912         ifp = XFS_IFORK_PTR(ip, whichfork);
2913         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2914         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2915         rblock = ifp->if_broot;
2916         ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2917         ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2918         ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
2919         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
2920         cbno = be64_to_cpu(*pp);
2921         *logflagsp = 0;
2922 #ifdef DEBUG
2923         if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
2924                 return error;
2925 #endif
2926         if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2927                         XFS_BMAP_BTREE_REF)))
2928                 return error;
2929         cblock = XFS_BUF_TO_BLOCK(cbp);
2930         if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
2931                 return error;
2932         xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2933         ip->i_d.di_nblocks--;
2934         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2935         xfs_trans_binval(tp, cbp);
2936         if (cur->bc_bufs[0] == cbp)
2937                 cur->bc_bufs[0] = NULL;
2938         xfs_iroot_realloc(ip, -1, whichfork);
2939         ASSERT(ifp->if_broot == NULL);
2940         ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2941         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2942         *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2943         return 0;
2944 }
2945
2946 /*
2947  * Called by xfs_bmapi to update file extent records and the btree
2948  * after removing space (or undoing a delayed allocation).
2949  */
2950 STATIC int                              /* error */
2951 xfs_bmap_del_extent(
2952         xfs_inode_t             *ip,    /* incore inode pointer */
2953         xfs_trans_t             *tp,    /* current transaction pointer */
2954         xfs_extnum_t            idx,    /* extent number to update/delete */
2955         xfs_bmap_free_t         *flist, /* list of extents to be freed */
2956         xfs_btree_cur_t         *cur,   /* if null, not a btree */
2957         xfs_bmbt_irec_t         *del,   /* data to remove from extents */
2958         int                     *logflagsp, /* inode logging flags */
2959         xfs_extdelta_t          *delta, /* Change made to incore extents */
2960         int                     whichfork, /* data or attr fork */
2961         int                     rsvd)   /* OK to allocate reserved blocks */
2962 {
2963         xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
2964         xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
2965         xfs_fsblock_t           del_endblock=0; /* first block past del */
2966         xfs_fileoff_t           del_endoff;     /* first offset past del */
2967         int                     delay;  /* current block is delayed allocated */
2968         int                     do_fx;  /* free extent at end of routine */
2969         xfs_bmbt_rec_host_t     *ep;    /* current extent entry pointer */
2970         int                     error;  /* error return value */
2971         int                     flags;  /* inode logging flags */
2972         xfs_bmbt_irec_t         got;    /* current extent entry */
2973         xfs_fileoff_t           got_endoff;     /* first offset past got */
2974         int                     i;      /* temp state */
2975         xfs_ifork_t             *ifp;   /* inode fork pointer */
2976         xfs_mount_t             *mp;    /* mount structure */
2977         xfs_filblks_t           nblks;  /* quota/sb block count */
2978         xfs_bmbt_irec_t         new;    /* new record to be inserted */
2979         /* REFERENCED */
2980         uint                    qfield; /* quota field to update */
2981         xfs_filblks_t           temp;   /* for indirect length calculations */
2982         xfs_filblks_t           temp2;  /* for indirect length calculations */
2983         int                     state = 0;
2984
2985         XFS_STATS_INC(xs_del_exlist);
2986
2987         if (whichfork == XFS_ATTR_FORK)
2988                 state |= BMAP_ATTRFORK;
2989
2990         mp = ip->i_mount;
2991         ifp = XFS_IFORK_PTR(ip, whichfork);
2992         ASSERT((idx >= 0) && (idx < ifp->if_bytes /
2993                 (uint)sizeof(xfs_bmbt_rec_t)));
2994         ASSERT(del->br_blockcount > 0);
2995         ep = xfs_iext_get_ext(ifp, idx);
2996         xfs_bmbt_get_all(ep, &got);
2997         ASSERT(got.br_startoff <= del->br_startoff);
2998         del_endoff = del->br_startoff + del->br_blockcount;
2999         got_endoff = got.br_startoff + got.br_blockcount;
3000         ASSERT(got_endoff >= del_endoff);
3001         delay = isnullstartblock(got.br_startblock);
3002         ASSERT(isnullstartblock(del->br_startblock) == delay);
3003         flags = 0;
3004         qfield = 0;
3005         error = 0;
3006         /*
3007          * If deleting a real allocation, must free up the disk space.
3008          */
3009         if (!delay) {
3010                 flags = XFS_ILOG_CORE;
3011                 /*
3012                  * Realtime allocation.  Free it and record di_nblocks update.
3013                  */
3014                 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
3015                         xfs_fsblock_t   bno;
3016                         xfs_filblks_t   len;
3017
3018                         ASSERT(do_mod(del->br_blockcount,
3019                                       mp->m_sb.sb_rextsize) == 0);
3020                         ASSERT(do_mod(del->br_startblock,
3021                                       mp->m_sb.sb_rextsize) == 0);
3022                         bno = del->br_startblock;
3023                         len = del->br_blockcount;
3024                         do_div(bno, mp->m_sb.sb_rextsize);
3025                         do_div(len, mp->m_sb.sb_rextsize);
3026                         if ((error = xfs_rtfree_extent(ip->i_transp, bno,
3027                                         (xfs_extlen_t)len)))
3028                                 goto done;
3029                         do_fx = 0;
3030                         nblks = len * mp->m_sb.sb_rextsize;
3031                         qfield = XFS_TRANS_DQ_RTBCOUNT;
3032                 }
3033                 /*
3034                  * Ordinary allocation.
3035                  */
3036                 else {
3037                         do_fx = 1;
3038                         nblks = del->br_blockcount;
3039                         qfield = XFS_TRANS_DQ_BCOUNT;
3040                 }
3041                 /*
3042                  * Set up del_endblock and cur for later.
3043                  */
3044                 del_endblock = del->br_startblock + del->br_blockcount;
3045                 if (cur) {
3046                         if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
3047                                         got.br_startblock, got.br_blockcount,
3048                                         &i)))
3049                                 goto done;
3050                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3051                 }
3052                 da_old = da_new = 0;
3053         } else {
3054                 da_old = startblockval(got.br_startblock);
3055                 da_new = 0;
3056                 nblks = 0;
3057                 do_fx = 0;
3058         }
3059         /*
3060          * Set flag value to use in switch statement.
3061          * Left-contig is 2, right-contig is 1.
3062          */
3063         switch (((got.br_startoff == del->br_startoff) << 1) |
3064                 (got_endoff == del_endoff)) {
3065         case 3:
3066                 /*
3067                  * Matches the whole extent.  Delete the entry.
3068                  */
3069                 xfs_iext_remove(ip, idx, 1,
3070                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
3071                 ifp->if_lastex = idx;
3072                 if (delay)
3073                         break;
3074                 XFS_IFORK_NEXT_SET(ip, whichfork,
3075                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
3076                 flags |= XFS_ILOG_CORE;
3077                 if (!cur) {
3078                         flags |= xfs_ilog_fext(whichfork);
3079                         break;
3080                 }
3081                 if ((error = xfs_btree_delete(cur, &i)))
3082                         goto done;
3083                 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3084                 break;
3085
3086         case 2:
3087                 /*
3088                  * Deleting the first part of the extent.
3089                  */
3090                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3091                 xfs_bmbt_set_startoff(ep, del_endoff);
3092                 temp = got.br_blockcount - del->br_blockcount;
3093                 xfs_bmbt_set_blockcount(ep, temp);
3094                 ifp->if_lastex = idx;
3095                 if (delay) {
3096                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3097                                 da_old);
3098                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3099                         trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3100                         da_new = temp;
3101                         break;
3102                 }
3103                 xfs_bmbt_set_startblock(ep, del_endblock);
3104                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3105                 if (!cur) {
3106                         flags |= xfs_ilog_fext(whichfork);
3107                         break;
3108                 }
3109                 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
3110                                 got.br_blockcount - del->br_blockcount,
3111                                 got.br_state)))
3112                         goto done;
3113                 break;
3114
3115         case 1:
3116                 /*
3117                  * Deleting the last part of the extent.
3118                  */
3119                 temp = got.br_blockcount - del->br_blockcount;
3120                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3121                 xfs_bmbt_set_blockcount(ep, temp);
3122                 ifp->if_lastex = idx;
3123                 if (delay) {
3124                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3125                                 da_old);
3126                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3127                         trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3128                         da_new = temp;
3129                         break;
3130                 }
3131                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3132                 if (!cur) {
3133                         flags |= xfs_ilog_fext(whichfork);
3134                         break;
3135                 }
3136                 if ((error = xfs_bmbt_update(cur, got.br_startoff,
3137                                 got.br_startblock,
3138                                 got.br_blockcount - del->br_blockcount,
3139                                 got.br_state)))
3140                         goto done;
3141                 break;
3142
3143         case 0:
3144                 /*
3145                  * Deleting the middle of the extent.
3146                  */
3147                 temp = del->br_startoff - got.br_startoff;
3148                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3149                 xfs_bmbt_set_blockcount(ep, temp);
3150                 new.br_startoff = del_endoff;
3151                 temp2 = got_endoff - del_endoff;
3152                 new.br_blockcount = temp2;
3153                 new.br_state = got.br_state;
3154                 if (!delay) {
3155                         new.br_startblock = del_endblock;
3156                         flags |= XFS_ILOG_CORE;
3157                         if (cur) {
3158                                 if ((error = xfs_bmbt_update(cur,
3159                                                 got.br_startoff,
3160                                                 got.br_startblock, temp,
3161                                                 got.br_state)))
3162                                         goto done;
3163                                 if ((error = xfs_btree_increment(cur, 0, &i)))
3164                                         goto done;
3165                                 cur->bc_rec.b = new;
3166                                 error = xfs_btree_insert(cur, &i);
3167                                 if (error && error != ENOSPC)
3168                                         goto done;
3169                                 /*
3170                                  * If get no-space back from btree insert,
3171                                  * it tried a split, and we have a zero
3172                                  * block reservation.
3173                                  * Fix up our state and return the error.
3174                                  */
3175                                 if (error == ENOSPC) {
3176                                         /*
3177                                          * Reset the cursor, don't trust
3178                                          * it after any insert operation.
3179                                          */
3180                                         if ((error = xfs_bmbt_lookup_eq(cur,
3181                                                         got.br_startoff,
3182                                                         got.br_startblock,
3183                                                         temp, &i)))
3184                                                 goto done;
3185                                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3186                                         /*
3187                                          * Update the btree record back
3188                                          * to the original value.
3189                                          */
3190                                         if ((error = xfs_bmbt_update(cur,
3191                                                         got.br_startoff,
3192                                                         got.br_startblock,
3193                                                         got.br_blockcount,
3194                                                         got.br_state)))
3195                                                 goto done;
3196                                         /*
3197                                          * Reset the extent record back
3198                                          * to the original value.
3199                                          */
3200                                         xfs_bmbt_set_blockcount(ep,
3201                                                 got.br_blockcount);
3202                                         flags = 0;
3203                                         error = XFS_ERROR(ENOSPC);
3204                                         goto done;
3205                                 }
3206                                 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3207                         } else
3208                                 flags |= xfs_ilog_fext(whichfork);
3209                         XFS_IFORK_NEXT_SET(ip, whichfork,
3210                                 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3211                 } else {
3212                         ASSERT(whichfork == XFS_DATA_FORK);
3213                         temp = xfs_bmap_worst_indlen(ip, temp);
3214                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3215                         temp2 = xfs_bmap_worst_indlen(ip, temp2);
3216                         new.br_startblock = nullstartblock((int)temp2);
3217                         da_new = temp + temp2;
3218                         while (da_new > da_old) {
3219                                 if (temp) {
3220                                         temp--;
3221                                         da_new--;
3222                                         xfs_bmbt_set_startblock(ep,
3223                                                 nullstartblock((int)temp));
3224                                 }
3225                                 if (da_new == da_old)
3226                                         break;
3227                                 if (temp2) {
3228                                         temp2--;
3229                                         da_new--;
3230                                         new.br_startblock =
3231                                                 nullstartblock((int)temp2);
3232                                 }
3233                         }
3234                 }
3235                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3236                 xfs_iext_insert(ip, idx + 1, 1, &new, state);
3237                 ifp->if_lastex = idx + 1;
3238                 break;
3239         }
3240         /*
3241          * If we need to, add to list of extents to delete.
3242          */
3243         if (do_fx)
3244                 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
3245                         mp);
3246         /*
3247          * Adjust inode # blocks in the file.
3248          */
3249         if (nblks)
3250                 ip->i_d.di_nblocks -= nblks;
3251         /*
3252          * Adjust quota data.
3253          */
3254         if (qfield)
3255                 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
3256
3257         /*
3258          * Account for change in delayed indirect blocks.
3259          * Nothing to do for disk quota accounting here.
3260          */
3261         ASSERT(da_old >= da_new);
3262         if (da_old > da_new)
3263                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
3264                         rsvd);
3265         if (delta) {
3266                 /* DELTA: report the original extent. */
3267                 if (delta->xed_startoff > got.br_startoff)
3268                         delta->xed_startoff = got.br_startoff;
3269                 if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
3270                         delta->xed_blockcount = got.br_startoff +
3271                                                         got.br_blockcount;
3272         }
3273 done:
3274         *logflagsp = flags;
3275         return error;
3276 }
3277
3278 /*
3279  * Remove the entry "free" from the free item list.  Prev points to the
3280  * previous entry, unless "free" is the head of the list.
3281  */
3282 STATIC void
3283 xfs_bmap_del_free(
3284         xfs_bmap_free_t         *flist, /* free item list header */
3285         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
3286         xfs_bmap_free_item_t    *free)  /* list item to be freed */
3287 {
3288         if (prev)
3289                 prev->xbfi_next = free->xbfi_next;
3290         else
3291                 flist->xbf_first = free->xbfi_next;
3292         flist->xbf_count--;
3293         kmem_zone_free(xfs_bmap_free_item_zone, free);
3294 }
3295
3296 /*
3297  * Convert an extents-format file into a btree-format file.
3298  * The new file will have a root block (in the inode) and a single child block.
3299  */
3300 STATIC int                                      /* error */
3301 xfs_bmap_extents_to_btree(
3302         xfs_trans_t             *tp,            /* transaction pointer */
3303         xfs_inode_t             *ip,            /* incore inode pointer */
3304         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
3305         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
3306         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
3307         int                     wasdel,         /* converting a delayed alloc */
3308         int                     *logflagsp,     /* inode logging flags */
3309         int                     whichfork)      /* data or attr fork */
3310 {
3311         struct xfs_btree_block  *ablock;        /* allocated (child) bt block */
3312         xfs_buf_t               *abp;           /* buffer for ablock */
3313         xfs_alloc_arg_t         args;           /* allocation arguments */
3314         xfs_bmbt_rec_t          *arp;           /* child record pointer */
3315         struct xfs_btree_block  *block;         /* btree root block */
3316         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
3317         xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
3318         int                     error;          /* error return value */
3319         xfs_extnum_t            i, cnt;         /* extent record index */
3320         xfs_ifork_t             *ifp;           /* inode fork pointer */
3321         xfs_bmbt_key_t          *kp;            /* root block key pointer */
3322         xfs_mount_t             *mp;            /* mount structure */
3323         xfs_extnum_t            nextents;       /* number of file extents */
3324         xfs_bmbt_ptr_t          *pp;            /* root block address pointer */
3325
3326         ifp = XFS_IFORK_PTR(ip, whichfork);
3327         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3328         ASSERT(ifp->if_ext_max ==
3329                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3330         /*
3331          * Make space in the inode incore.
3332          */
3333         xfs_iroot_realloc(ip, 1, whichfork);
3334         ifp->if_flags |= XFS_IFBROOT;
3335
3336         /*
3337          * Fill in the root.
3338          */
3339         block = ifp->if_broot;
3340         block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3341         block->bb_level = cpu_to_be16(1);
3342         block->bb_numrecs = cpu_to_be16(1);
3343         block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3344         block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3345
3346         /*
3347          * Need a cursor.  Can't allocate until bb_level is filled in.
3348          */
3349         mp = ip->i_mount;
3350         cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
3351         cur->bc_private.b.firstblock = *firstblock;
3352         cur->bc_private.b.flist = flist;
3353         cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3354         /*
3355          * Convert to a btree with two levels, one record in root.
3356          */
3357         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3358         args.tp = tp;
3359         args.mp = mp;
3360         args.firstblock = *firstblock;
3361         if (*firstblock == NULLFSBLOCK) {
3362                 args.type = XFS_ALLOCTYPE_START_BNO;
3363                 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3364         } else if (flist->xbf_low) {
3365                 args.type = XFS_ALLOCTYPE_START_BNO;
3366                 args.fsbno = *firstblock;
3367         } else {
3368                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3369                 args.fsbno = *firstblock;
3370         }
3371         args.minlen = args.maxlen = args.prod = 1;
3372         args.total = args.minleft = args.alignment = args.mod = args.isfl =
3373                 args.minalignslop = 0;
3374         args.wasdel = wasdel;
3375         *logflagsp = 0;
3376         if ((error = xfs_alloc_vextent(&args))) {
3377                 xfs_iroot_realloc(ip, -1, whichfork);
3378                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3379                 return error;
3380         }
3381         /*
3382          * Allocation can't fail, the space was reserved.
3383          */
3384         ASSERT(args.fsbno != NULLFSBLOCK);
3385         ASSERT(*firstblock == NULLFSBLOCK ||
3386                args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3387                (flist->xbf_low &&
3388                 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3389         *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3390         cur->bc_private.b.allocated++;
3391         ip->i_d.di_nblocks++;
3392         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3393         abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3394         /*
3395          * Fill in the child block.
3396          */
3397         ablock = XFS_BUF_TO_BLOCK(abp);
3398         ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3399         ablock->bb_level = 0;
3400         ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3401         ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3402         arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3403         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3404         for (cnt = i = 0; i < nextents; i++) {
3405                 ep = xfs_iext_get_ext(ifp, i);
3406                 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
3407                         arp->l0 = cpu_to_be64(ep->l0);
3408                         arp->l1 = cpu_to_be64(ep->l1);
3409                         arp++; cnt++;
3410                 }
3411         }
3412         ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3413         xfs_btree_set_numrecs(ablock, cnt);
3414
3415         /*
3416          * Fill in the root key and pointer.
3417          */
3418         kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
3419         arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3420         kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
3421         pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
3422                                                 be16_to_cpu(block->bb_level)));
3423         *pp = cpu_to_be64(args.fsbno);
3424
3425         /*
3426          * Do all this logging at the end so that
3427          * the root is at the right level.
3428          */
3429         xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
3430         xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
3431         ASSERT(*curp == NULL);
3432         *curp = cur;
3433         *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
3434         return 0;
3435 }
3436
3437 /*
3438  * Calculate the default attribute fork offset for newly created inodes.
3439  */
3440 uint
3441 xfs_default_attroffset(
3442         struct xfs_inode        *ip)
3443 {
3444         struct xfs_mount        *mp = ip->i_mount;
3445         uint                    offset;
3446
3447         if (mp->m_sb.sb_inodesize == 256) {
3448                 offset = XFS_LITINO(mp) -
3449                                 XFS_BMDR_SPACE_CALC(MINABTPTRS);
3450         } else {
3451                 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
3452         }
3453
3454         ASSERT(offset < XFS_LITINO(mp));
3455         return offset;
3456 }
3457
3458 /*
3459  * Helper routine to reset inode di_forkoff field when switching
3460  * attribute fork from local to extent format - we reset it where
3461  * possible to make space available for inline data fork extents.
3462  */
3463 STATIC void
3464 xfs_bmap_forkoff_reset(
3465         xfs_mount_t     *mp,
3466         xfs_inode_t     *ip,
3467         int             whichfork)
3468 {
3469         if (whichfork == XFS_ATTR_FORK &&
3470             ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
3471             ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
3472             ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
3473                 uint    dfl_forkoff = xfs_default_attroffset(ip) >> 3;
3474
3475                 if (dfl_forkoff > ip->i_d.di_forkoff) {
3476                         ip->i_d.di_forkoff = dfl_forkoff;
3477                         ip->i_df.if_ext_max =
3478                                 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
3479                         ip->i_afp->if_ext_max =
3480                                 XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
3481                 }
3482         }
3483 }
3484
3485 /*
3486  * Convert a local file to an extents file.
3487  * This code is out of bounds for data forks of regular files,
3488  * since the file data needs to get logged so things will stay consistent.
3489  * (The bmap-level manipulations are ok, though).
3490  */
3491 STATIC int                              /* error */
3492 xfs_bmap_local_to_extents(
3493         xfs_trans_t     *tp,            /* transaction pointer */
3494         xfs_inode_t     *ip,            /* incore inode pointer */
3495         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
3496         xfs_extlen_t    total,          /* total blocks needed by transaction */
3497         int             *logflagsp,     /* inode logging flags */
3498         int             whichfork)      /* data or attr fork */
3499 {
3500         int             error;          /* error return value */
3501         int             flags;          /* logging flags returned */
3502         xfs_ifork_t     *ifp;           /* inode fork pointer */
3503
3504         /*
3505          * We don't want to deal with the case of keeping inode data inline yet.
3506          * So sending the data fork of a regular inode is invalid.
3507          */
3508         ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
3509                  whichfork == XFS_DATA_FORK));
3510         ifp = XFS_IFORK_PTR(ip, whichfork);
3511         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3512         flags = 0;
3513         error = 0;
3514         if (ifp->if_bytes) {
3515                 xfs_alloc_arg_t args;   /* allocation arguments */
3516                 xfs_buf_t       *bp;    /* buffer for extent block */
3517                 xfs_bmbt_rec_host_t *ep;/* extent record pointer */
3518
3519                 args.tp = tp;
3520                 args.mp = ip->i_mount;
3521                 args.firstblock = *firstblock;
3522                 ASSERT((ifp->if_flags &
3523                         (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
3524                 /*
3525                  * Allocate a block.  We know we need only one, since the
3526                  * file currently fits in an inode.
3527                  */
3528                 if (*firstblock == NULLFSBLOCK) {
3529                         args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3530                         args.type = XFS_ALLOCTYPE_START_BNO;
3531                 } else {
3532                         args.fsbno = *firstblock;
3533                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
3534                 }
3535                 args.total = total;
3536                 args.mod = args.minleft = args.alignment = args.wasdel =
3537                         args.isfl = args.minalignslop = 0;
3538                 args.minlen = args.maxlen = args.prod = 1;
3539                 if ((error = xfs_alloc_vextent(&args)))
3540                         goto done;
3541                 /*
3542                  * Can't fail, the space was reserved.
3543                  */
3544                 ASSERT(args.fsbno != NULLFSBLOCK);
3545                 ASSERT(args.len == 1);
3546                 *firstblock = args.fsbno;
3547                 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3548                 memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
3549                         ifp->if_bytes);
3550                 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3551                 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3552                 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3553                 xfs_iext_add(ifp, 0, 1);
3554                 ep = xfs_iext_get_ext(ifp, 0);
3555                 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3556                 trace_xfs_bmap_post_update(ip, 0,
3557                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
3558                                 _THIS_IP_);
3559                 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3560                 ip->i_d.di_nblocks = 1;
3561                 xfs_trans_mod_dquot_byino(tp, ip,
3562                         XFS_TRANS_DQ_BCOUNT, 1L);
3563                 flags |= xfs_ilog_fext(whichfork);
3564         } else {
3565                 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3566                 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3567         }
3568         ifp->if_flags &= ~XFS_IFINLINE;
3569         ifp->if_flags |= XFS_IFEXTENTS;
3570         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3571         flags |= XFS_ILOG_CORE;
3572 done:
3573         *logflagsp = flags;
3574         return error;
3575 }
3576
3577 /*
3578  * Search the extent records for the entry containing block bno.
3579  * If bno lies in a hole, point to the next entry.  If bno lies
3580  * past eof, *eofp will be set, and *prevp will contain the last
3581  * entry (null if none).  Else, *lastxp will be set to the index
3582  * of the found entry; *gotp will contain the entry.
3583  */
3584 STATIC xfs_bmbt_rec_host_t *            /* pointer to found extent entry */
3585 xfs_bmap_search_multi_extents(
3586         xfs_ifork_t     *ifp,           /* inode fork pointer */
3587         xfs_fileoff_t   bno,            /* block number searched for */
3588         int             *eofp,          /* out: end of file found */
3589         xfs_extnum_t    *lastxp,        /* out: last extent index */
3590         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3591         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3592 {
3593         xfs_bmbt_rec_host_t *ep;                /* extent record pointer */
3594         xfs_extnum_t    lastx;          /* last extent index */
3595
3596         /*
3597          * Initialize the extent entry structure to catch access to
3598          * uninitialized br_startblock field.
3599          */
3600         gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3601         gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3602         gotp->br_state = XFS_EXT_INVALID;
3603 #if XFS_BIG_BLKNOS
3604         gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3605 #else
3606         gotp->br_startblock = 0xffffa5a5;
3607 #endif
3608         prevp->br_startoff = NULLFILEOFF;
3609
3610         ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3611         if (lastx > 0) {
3612                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
3613         }
3614         if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3615                 xfs_bmbt_get_all(ep, gotp);
3616                 *eofp = 0;
3617         } else {
3618                 if (lastx > 0) {
3619                         *gotp = *prevp;
3620                 }
3621                 *eofp = 1;
3622                 ep = NULL;
3623         }
3624         *lastxp = lastx;
3625         return ep;
3626 }
3627
3628 /*
3629  * Search the extents list for the inode, for the extent containing bno.
3630  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
3631  * *eofp will be set, and *prevp will contain the last entry (null if none).
3632  * Else, *lastxp will be set to the index of the found
3633  * entry; *gotp will contain the entry.
3634  */
3635 STATIC xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */
3636 xfs_bmap_search_extents(
3637         xfs_inode_t     *ip,            /* incore inode pointer */
3638         xfs_fileoff_t   bno,            /* block number searched for */
3639         int             fork,           /* data or attr fork */
3640         int             *eofp,          /* out: end of file found */
3641         xfs_extnum_t    *lastxp,        /* out: last extent index */
3642         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3643         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3644 {
3645         xfs_ifork_t     *ifp;           /* inode fork pointer */
3646         xfs_bmbt_rec_host_t  *ep;            /* extent record pointer */
3647
3648         XFS_STATS_INC(xs_look_exlist);
3649         ifp = XFS_IFORK_PTR(ip, fork);
3650
3651         ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3652
3653         if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3654                      !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3655                 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
3656                                 "Access to block zero in inode %llu "
3657                                 "start_block: %llx start_off: %llx "
3658                                 "blkcnt: %llx extent-state: %x lastx: %x\n",
3659                         (unsigned long long)ip->i_ino,
3660                         (unsigned long long)gotp->br_startblock,
3661                         (unsigned long long)gotp->br_startoff,
3662                         (unsigned long long)gotp->br_blockcount,
3663                         gotp->br_state, *lastxp);
3664                 *lastxp = NULLEXTNUM;
3665                 *eofp = 1;
3666                 return NULL;
3667         }
3668         return ep;
3669 }
3670
3671 /*
3672  * Compute the worst-case number of indirect blocks that will be used
3673  * for ip's delayed extent of length "len".
3674  */
3675 STATIC xfs_filblks_t
3676 xfs_bmap_worst_indlen(
3677         xfs_inode_t     *ip,            /* incore inode pointer */
3678         xfs_filblks_t   len)            /* delayed extent length */
3679 {
3680         int             level;          /* btree level number */
3681         int             maxrecs;        /* maximum record count at this level */
3682         xfs_mount_t     *mp;            /* mount structure */
3683         xfs_filblks_t   rval;           /* return value */
3684
3685         mp = ip->i_mount;
3686         maxrecs = mp->m_bmap_dmxr[0];
3687         for (level = 0, rval = 0;
3688              level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3689              level++) {
3690                 len += maxrecs - 1;
3691                 do_div(len, maxrecs);
3692                 rval += len;
3693                 if (len == 1)
3694                         return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3695                                 level - 1;
3696                 if (level == 0)
3697                         maxrecs = mp->m_bmap_dmxr[1];
3698         }
3699         return rval;
3700 }
3701
3702 /*
3703  * Convert inode from non-attributed to attributed.
3704  * Must not be in a transaction, ip must not be locked.
3705  */
3706 int                                             /* error code */
3707 xfs_bmap_add_attrfork(
3708         xfs_inode_t             *ip,            /* incore inode pointer */
3709         int                     size,           /* space new attribute needs */
3710         int                     rsvd)           /* xact may use reserved blks */
3711 {
3712         xfs_fsblock_t           firstblock;     /* 1st block/ag allocated */
3713         xfs_bmap_free_t         flist;          /* freed extent records */
3714         xfs_mount_t             *mp;            /* mount structure */
3715         xfs_trans_t             *tp;            /* transaction pointer */
3716         int                     blks;           /* space reservation */
3717         int                     version = 1;    /* superblock attr version */
3718         int                     committed;      /* xaction was committed */
3719         int                     logflags;       /* logging flags */
3720         int                     error;          /* error return value */
3721
3722         ASSERT(XFS_IFORK_Q(ip) == 0);
3723         ASSERT(ip->i_df.if_ext_max ==
3724                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3725
3726         mp = ip->i_mount;
3727         ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3728         tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3729         blks = XFS_ADDAFORK_SPACE_RES(mp);
3730         if (rsvd)
3731                 tp->t_flags |= XFS_TRANS_RESERVE;
3732         if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3733                         XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3734                 goto error0;
3735         xfs_ilock(ip, XFS_ILOCK_EXCL);
3736         error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
3737                         XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3738                         XFS_QMOPT_RES_REGBLKS);
3739         if (error) {
3740                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3741                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3742                 return error;
3743         }
3744         if (XFS_IFORK_Q(ip))
3745                 goto error1;
3746         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3747                 /*
3748                  * For inodes coming from pre-6.2 filesystems.
3749                  */
3750                 ASSERT(ip->i_d.di_aformat == 0);
3751                 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3752         }
3753         ASSERT(ip->i_d.di_anextents == 0);
3754
3755         xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
3756         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3757
3758         switch (ip->i_d.di_format) {
3759         case XFS_DINODE_FMT_DEV:
3760                 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3761                 break;
3762         case XFS_DINODE_FMT_UUID:
3763                 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
3764                 break;
3765         case XFS_DINODE_FMT_LOCAL:
3766         case XFS_DINODE_FMT_EXTENTS:
3767         case XFS_DINODE_FMT_BTREE:
3768                 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3769                 if (!ip->i_d.di_forkoff)
3770                         ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3771                 else if (mp->m_flags & XFS_MOUNT_ATTR2)
3772                         version = 2;
3773                 break;
3774         default:
3775                 ASSERT(0);
3776                 error = XFS_ERROR(EINVAL);
3777                 goto error1;
3778         }
3779         ip->i_df.if_ext_max =
3780                 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3781         ASSERT(ip->i_afp == NULL);
3782         ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3783         ip->i_afp->if_ext_max =
3784                 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3785         ip->i_afp->if_flags = XFS_IFEXTENTS;
3786         logflags = 0;
3787         xfs_bmap_init(&flist, &firstblock);
3788         switch (ip->i_d.di_format) {
3789         case XFS_DINODE_FMT_LOCAL:
3790                 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
3791                         &logflags);
3792                 break;
3793         case XFS_DINODE_FMT_EXTENTS:
3794                 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
3795                         &flist, &logflags);
3796                 break;
3797         case XFS_DINODE_FMT_BTREE:
3798                 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
3799                         &logflags);
3800                 break;
3801         default:
3802                 error = 0;
3803                 break;
3804         }
3805         if (logflags)
3806                 xfs_trans_log_inode(tp, ip, logflags);
3807         if (error)
3808                 goto error2;
3809         if (!xfs_sb_version_hasattr(&mp->m_sb) ||
3810            (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
3811                 __int64_t sbfields = 0;
3812
3813                 spin_lock(&mp->m_sb_lock);
3814                 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
3815                         xfs_sb_version_addattr(&mp->m_sb);
3816                         sbfields |= XFS_SB_VERSIONNUM;
3817                 }
3818                 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
3819                         xfs_sb_version_addattr2(&mp->m_sb);
3820                         sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
3821                 }
3822                 if (sbfields) {
3823                         spin_unlock(&mp->m_sb_lock);
3824                         xfs_mod_sb(tp, sbfields);
3825                 } else
3826                         spin_unlock(&mp->m_sb_lock);
3827         }
3828         if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
3829                 goto error2;
3830         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
3831         ASSERT(ip->i_df.if_ext_max ==
3832                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3833         return error;
3834 error2:
3835         xfs_bmap_cancel(&flist);
3836 error1:
3837         xfs_iunlock(ip, XFS_ILOCK_EXCL);
3838 error0:
3839         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
3840         ASSERT(ip->i_df.if_ext_max ==
3841                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3842         return error;
3843 }
3844
3845 /*
3846  * Add the extent to the list of extents to be free at transaction end.
3847  * The list is maintained sorted (by block number).
3848  */
3849 /* ARGSUSED */
3850 void
3851 xfs_bmap_add_free(
3852         xfs_fsblock_t           bno,            /* fs block number of extent */
3853         xfs_filblks_t           len,            /* length of extent */
3854         xfs_bmap_free_t         *flist,         /* list of extents */
3855         xfs_mount_t             *mp)            /* mount point structure */
3856 {
3857         xfs_bmap_free_item_t    *cur;           /* current (next) element */
3858         xfs_bmap_free_item_t    *new;           /* new element */
3859         xfs_bmap_free_item_t    *prev;          /* previous element */
3860 #ifdef DEBUG
3861         xfs_agnumber_t          agno;
3862         xfs_agblock_t           agbno;
3863
3864         ASSERT(bno != NULLFSBLOCK);
3865         ASSERT(len > 0);
3866         ASSERT(len <= MAXEXTLEN);
3867         ASSERT(!isnullstartblock(bno));
3868         agno = XFS_FSB_TO_AGNO(mp, bno);
3869         agbno = XFS_FSB_TO_AGBNO(mp, bno);
3870         ASSERT(agno < mp->m_sb.sb_agcount);
3871         ASSERT(agbno < mp->m_sb.sb_agblocks);
3872         ASSERT(len < mp->m_sb.sb_agblocks);
3873         ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
3874 #endif
3875         ASSERT(xfs_bmap_free_item_zone != NULL);
3876         new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
3877         new->xbfi_startblock = bno;
3878         new->xbfi_blockcount = (xfs_extlen_t)len;
3879         for (prev = NULL, cur = flist->xbf_first;
3880              cur != NULL;
3881              prev = cur, cur = cur->xbfi_next) {
3882                 if (cur->xbfi_startblock >= bno)
3883                         break;
3884         }
3885         if (prev)
3886                 prev->xbfi_next = new;
3887         else
3888                 flist->xbf_first = new;
3889         new->xbfi_next = cur;
3890         flist->xbf_count++;
3891 }
3892
3893 /*
3894  * Compute and fill in the value of the maximum depth of a bmap btree
3895  * in this filesystem.  Done once, during mount.
3896  */
3897 void
3898 xfs_bmap_compute_maxlevels(
3899         xfs_mount_t     *mp,            /* file system mount structure */
3900         int             whichfork)      /* data or attr fork */
3901 {
3902         int             level;          /* btree level */
3903         uint            maxblocks;      /* max blocks at this level */
3904         uint            maxleafents;    /* max leaf entries possible */
3905         int             maxrootrecs;    /* max records in root block */
3906         int             minleafrecs;    /* min records in leaf block */
3907         int             minnoderecs;    /* min records in node block */
3908         int             sz;             /* root block size */
3909
3910         /*
3911          * The maximum number of extents in a file, hence the maximum
3912          * number of leaf entries, is controlled by the type of di_nextents
3913          * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
3914          * (a signed 16-bit number, xfs_aextnum_t).
3915          *
3916          * Note that we can no longer assume that if we are in ATTR1 that
3917          * the fork offset of all the inodes will be
3918          * (xfs_default_attroffset(ip) >> 3) because we could have mounted
3919          * with ATTR2 and then mounted back with ATTR1, keeping the
3920          * di_forkoff's fixed but probably at various positions. Therefore,
3921          * for both ATTR1 and ATTR2 we have to assume the worst case scenario
3922          * of a minimum size available.
3923          */
3924         if (whichfork == XFS_DATA_FORK) {
3925                 maxleafents = MAXEXTNUM;
3926                 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
3927         } else {
3928                 maxleafents = MAXAEXTNUM;
3929                 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
3930         }
3931         maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
3932         minleafrecs = mp->m_bmap_dmnr[0];
3933         minnoderecs = mp->m_bmap_dmnr[1];
3934         maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
3935         for (level = 1; maxblocks > 1; level++) {
3936                 if (maxblocks <= maxrootrecs)
3937                         maxblocks = 1;
3938                 else
3939                         maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
3940         }
3941         mp->m_bm_maxlevels[whichfork] = level;
3942 }
3943
3944 /*
3945  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
3946  * caller.  Frees all the extents that need freeing, which must be done
3947  * last due to locking considerations.  We never free any extents in
3948  * the first transaction.  This is to allow the caller to make the first
3949  * transaction a synchronous one so that the pointers to the data being
3950  * broken in this transaction will be permanent before the data is actually
3951  * freed.  This is necessary to prevent blocks from being reallocated
3952  * and written to before the free and reallocation are actually permanent.
3953  * We do not just make the first transaction synchronous here, because
3954  * there are more efficient ways to gain the same protection in some cases
3955  * (see the file truncation code).
3956  *
3957  * Return 1 if the given transaction was committed and a new one
3958  * started, and 0 otherwise in the committed parameter.
3959  */
3960 /*ARGSUSED*/
3961 int                                             /* error */
3962 xfs_bmap_finish(
3963         xfs_trans_t             **tp,           /* transaction pointer addr */
3964         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
3965         int                     *committed)     /* xact committed or not */
3966 {
3967         xfs_efd_log_item_t      *efd;           /* extent free data */
3968         xfs_efi_log_item_t      *efi;           /* extent free intention */
3969         int                     error;          /* error return value */
3970         xfs_bmap_free_item_t    *free;          /* free extent item */
3971         unsigned int            logres;         /* new log reservation */
3972         unsigned int            logcount;       /* new log count */
3973         xfs_mount_t             *mp;            /* filesystem mount structure */
3974         xfs_bmap_free_item_t    *next;          /* next item on free list */
3975         xfs_trans_t             *ntp;           /* new transaction pointer */
3976
3977         ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
3978         if (flist->xbf_count == 0) {
3979                 *committed = 0;
3980                 return 0;
3981         }
3982         ntp = *tp;
3983         efi = xfs_trans_get_efi(ntp, flist->xbf_count);
3984         for (free = flist->xbf_first; free; free = free->xbfi_next)
3985                 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
3986                         free->xbfi_blockcount);
3987         logres = ntp->t_log_res;
3988         logcount = ntp->t_log_count;
3989         ntp = xfs_trans_dup(*tp);
3990         error = xfs_trans_commit(*tp, 0);
3991         *tp = ntp;
3992         *committed = 1;
3993         /*
3994          * We have a new transaction, so we should return committed=1,
3995          * even though we're returning an error.
3996          */
3997         if (error)
3998                 return error;
3999
4000         /*
4001          * transaction commit worked ok so we can drop the extra ticket
4002          * reference that we gained in xfs_trans_dup()
4003          */
4004         xfs_log_ticket_put(ntp->t_ticket);
4005
4006         if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
4007                         logcount)))
4008                 return error;
4009         efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
4010         for (free = flist->xbf_first; free != NULL; free = next) {
4011                 next = free->xbfi_next;
4012                 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
4013                                 free->xbfi_blockcount))) {
4014                         /*
4015                          * The bmap free list will be cleaned up at a
4016                          * higher level.  The EFI will be canceled when
4017                          * this transaction is aborted.
4018                          * Need to force shutdown here to make sure it
4019                          * happens, since this transaction may not be
4020                          * dirty yet.
4021                          */
4022                         mp = ntp->t_mountp;
4023                         if (!XFS_FORCED_SHUTDOWN(mp))
4024                                 xfs_force_shutdown(mp,
4025                                                    (error == EFSCORRUPTED) ?
4026                                                    SHUTDOWN_CORRUPT_INCORE :
4027                                                    SHUTDOWN_META_IO_ERROR);
4028                         return error;
4029                 }
4030                 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
4031                         free->xbfi_blockcount);
4032                 xfs_bmap_del_free(flist, NULL, free);
4033         }
4034         return 0;
4035 }
4036
4037 /*
4038  * Free up any items left in the list.
4039  */
4040 void
4041 xfs_bmap_cancel(
4042         xfs_bmap_free_t         *flist) /* list of bmap_free_items */
4043 {
4044         xfs_bmap_free_item_t    *free;  /* free list item */
4045         xfs_bmap_free_item_t    *next;
4046
4047         if (flist->xbf_count == 0)
4048                 return;
4049         ASSERT(flist->xbf_first != NULL);
4050         for (free = flist->xbf_first; free; free = next) {
4051                 next = free->xbfi_next;
4052                 xfs_bmap_del_free(flist, NULL, free);
4053         }
4054         ASSERT(flist->xbf_count == 0);
4055 }
4056
4057 /*
4058  * Returns the file-relative block number of the first unused block(s)
4059  * in the file with at least "len" logically contiguous blocks free.
4060  * This is the lowest-address hole if the file has holes, else the first block
4061  * past the end of file.
4062  * Return 0 if the file is currently local (in-inode).
4063  */
4064 int                                             /* error */
4065 xfs_bmap_first_unused(
4066         xfs_trans_t     *tp,                    /* transaction pointer */
4067         xfs_inode_t     *ip,                    /* incore inode */
4068         xfs_extlen_t    len,                    /* size of hole to find */
4069         xfs_fileoff_t   *first_unused,          /* unused block */
4070         int             whichfork)              /* data or attr fork */
4071 {
4072         int             error;                  /* error return value */
4073         int             idx;                    /* extent record index */
4074         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4075         xfs_fileoff_t   lastaddr;               /* last block number seen */
4076         xfs_fileoff_t   lowest;                 /* lowest useful block */
4077         xfs_fileoff_t   max;                    /* starting useful block */
4078         xfs_fileoff_t   off;                    /* offset for this block */
4079         xfs_extnum_t    nextents;               /* number of extent entries */
4080
4081         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
4082                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
4083                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
4084         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4085                 *first_unused = 0;
4086                 return 0;
4087         }
4088         ifp = XFS_IFORK_PTR(ip, whichfork);
4089         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4090             (error = xfs_iread_extents(tp, ip, whichfork)))
4091                 return error;
4092         lowest = *first_unused;
4093         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4094         for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
4095                 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
4096                 off = xfs_bmbt_get_startoff(ep);
4097                 /*
4098                  * See if the hole before this extent will work.
4099                  */
4100                 if (off >= lowest + len && off - max >= len) {
4101                         *first_unused = max;
4102                         return 0;
4103                 }
4104                 lastaddr = off + xfs_bmbt_get_blockcount(ep);
4105                 max = XFS_FILEOFF_MAX(lastaddr, lowest);
4106         }
4107         *first_unused = max;
4108         return 0;
4109 }
4110
4111 /*
4112  * Returns the file-relative block number of the last block + 1 before
4113  * last_block (input value) in the file.
4114  * This is not based on i_size, it is based on the extent records.
4115  * Returns 0 for local files, as they do not have extent records.
4116  */
4117 int                                             /* error */
4118 xfs_bmap_last_before(
4119         xfs_trans_t     *tp,                    /* transaction pointer */
4120         xfs_inode_t     *ip,                    /* incore inode */
4121         xfs_fileoff_t   *last_block,            /* last block */
4122         int             whichfork)              /* data or attr fork */
4123 {
4124         xfs_fileoff_t   bno;                    /* input file offset */
4125         int             eof;                    /* hit end of file */
4126         xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
4127         int             error;                  /* error return value */
4128         xfs_bmbt_irec_t got;                    /* current extent value */
4129         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4130         xfs_extnum_t    lastx;                  /* last extent used */
4131         xfs_bmbt_irec_t prev;                   /* previous extent value */
4132
4133         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4134             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4135             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4136                return XFS_ERROR(EIO);
4137         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4138                 *last_block = 0;
4139                 return 0;
4140         }
4141         ifp = XFS_IFORK_PTR(ip, whichfork);
4142         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4143             (error = xfs_iread_extents(tp, ip, whichfork)))
4144                 return error;
4145         bno = *last_block - 1;
4146         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4147                 &prev);
4148         if (eof || xfs_bmbt_get_startoff(ep) > bno) {
4149                 if (prev.br_startoff == NULLFILEOFF)
4150                         *last_block = 0;
4151                 else
4152                         *last_block = prev.br_startoff + prev.br_blockcount;
4153         }
4154         /*
4155          * Otherwise *last_block is already the right answer.
4156          */
4157         return 0;
4158 }
4159
4160 /*
4161  * Returns the file-relative block number of the first block past eof in
4162  * the file.  This is not based on i_size, it is based on the extent records.
4163  * Returns 0 for local files, as they do not have extent records.
4164  */
4165 int                                             /* error */
4166 xfs_bmap_last_offset(
4167         xfs_trans_t     *tp,                    /* transaction pointer */
4168         xfs_inode_t     *ip,                    /* incore inode */
4169         xfs_fileoff_t   *last_block,            /* last block */
4170         int             whichfork)              /* data or attr fork */
4171 {
4172         xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
4173         int             error;                  /* error return value */
4174         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4175         xfs_extnum_t    nextents;               /* number of extent entries */
4176
4177         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4178             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4179             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4180                return XFS_ERROR(EIO);
4181         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4182                 *last_block = 0;
4183                 return 0;
4184         }
4185         ifp = XFS_IFORK_PTR(ip, whichfork);
4186         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4187             (error = xfs_iread_extents(tp, ip, whichfork)))
4188                 return error;
4189         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4190         if (!nextents) {
4191                 *last_block = 0;
4192                 return 0;
4193         }
4194         ep = xfs_iext_get_ext(ifp, nextents - 1);
4195         *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4196         return 0;
4197 }
4198
4199 /*
4200  * Returns whether the selected fork of the inode has exactly one
4201  * block or not.  For the data fork we check this matches di_size,
4202  * implying the file's range is 0..bsize-1.
4203  */
4204 int                                     /* 1=>1 block, 0=>otherwise */
4205 xfs_bmap_one_block(
4206         xfs_inode_t     *ip,            /* incore inode */
4207         int             whichfork)      /* data or attr fork */
4208 {
4209         xfs_bmbt_rec_host_t *ep;        /* ptr to fork's extent */
4210         xfs_ifork_t     *ifp;           /* inode fork pointer */
4211         int             rval;           /* return value */
4212         xfs_bmbt_irec_t s;              /* internal version of extent */
4213
4214 #ifndef DEBUG
4215         if (whichfork == XFS_DATA_FORK) {
4216                 return ((ip->i_d.di_mode & S_IFMT) == S_IFREG) ?
4217                         (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
4218                         (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4219         }
4220 #endif  /* !DEBUG */
4221         if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4222                 return 0;
4223         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4224                 return 0;
4225         ifp = XFS_IFORK_PTR(ip, whichfork);
4226         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4227         ep = xfs_iext_get_ext(ifp, 0);
4228         xfs_bmbt_get_all(ep, &s);
4229         rval = s.br_startoff == 0 && s.br_blockcount == 1;
4230         if (rval && whichfork == XFS_DATA_FORK)
4231                 ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
4232         return rval;
4233 }
4234
4235 STATIC int
4236 xfs_bmap_sanity_check(
4237         struct xfs_mount        *mp,
4238         struct xfs_buf          *bp,
4239         int                     level)
4240 {
4241         struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
4242
4243         if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC ||
4244             be16_to_cpu(block->bb_level) != level ||
4245             be16_to_cpu(block->bb_numrecs) == 0 ||
4246             be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
4247                 return 0;
4248         return 1;
4249 }
4250
4251 /*
4252  * Read in the extents to if_extents.
4253  * All inode fields are set up by caller, we just traverse the btree
4254  * and copy the records in. If the file system cannot contain unwritten
4255  * extents, the records are checked for no "state" flags.
4256  */
4257 int                                     /* error */
4258 xfs_bmap_read_extents(
4259         xfs_trans_t             *tp,    /* transaction pointer */
4260         xfs_inode_t             *ip,    /* incore inode */
4261         int                     whichfork) /* data or attr fork */
4262 {
4263         struct xfs_btree_block  *block; /* current btree block */
4264         xfs_fsblock_t           bno;    /* block # of "block" */
4265         xfs_buf_t               *bp;    /* buffer for "block" */
4266         int                     error;  /* error return value */
4267         xfs_exntfmt_t           exntf;  /* XFS_EXTFMT_NOSTATE, if checking */
4268         xfs_extnum_t            i, j;   /* index into the extents list */
4269         xfs_ifork_t             *ifp;   /* fork structure */
4270         int                     level;  /* btree level, for checking */
4271         xfs_mount_t             *mp;    /* file system mount structure */
4272         __be64                  *pp;    /* pointer to block address */
4273         /* REFERENCED */
4274         xfs_extnum_t            room;   /* number of entries there's room for */
4275
4276         bno = NULLFSBLOCK;
4277         mp = ip->i_mount;
4278         ifp = XFS_IFORK_PTR(ip, whichfork);
4279         exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4280                                         XFS_EXTFMT_INODE(ip);
4281         block = ifp->if_broot;
4282         /*
4283          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4284          */
4285         level = be16_to_cpu(block->bb_level);
4286         ASSERT(level > 0);
4287         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
4288         bno = be64_to_cpu(*pp);
4289         ASSERT(bno != NULLDFSBNO);
4290         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4291         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
4292         /*
4293          * Go down the tree until leaf level is reached, following the first
4294          * pointer (leftmost) at each level.
4295          */
4296         while (level-- > 0) {
4297                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4298                                 XFS_BMAP_BTREE_REF)))
4299                         return error;
4300                 block = XFS_BUF_TO_BLOCK(bp);
4301                 XFS_WANT_CORRUPTED_GOTO(
4302                         xfs_bmap_sanity_check(mp, bp, level),
4303                         error0);
4304                 if (level == 0)
4305                         break;
4306                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
4307                 bno = be64_to_cpu(*pp);
4308                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
4309                 xfs_trans_brelse(tp, bp);
4310         }
4311         /*
4312          * Here with bp and block set to the leftmost leaf node in the tree.
4313          */
4314         room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4315         i = 0;
4316         /*
4317          * Loop over all leaf nodes.  Copy information to the extent records.
4318          */
4319         for (;;) {
4320                 xfs_bmbt_rec_t  *frp;
4321                 xfs_fsblock_t   nextbno;
4322                 xfs_extnum_t    num_recs;
4323                 xfs_extnum_t    start;
4324
4325
4326                 num_recs = xfs_btree_get_numrecs(block);
4327                 if (unlikely(i + num_recs > room)) {
4328                         ASSERT(i + num_recs <= room);
4329                         xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
4330                                 "corrupt dinode %Lu, (btree extents).",
4331                                 (unsigned long long) ip->i_ino);
4332                         XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4333                                          XFS_ERRLEVEL_LOW,
4334                                         ip->i_mount);
4335                         goto error0;
4336                 }
4337                 XFS_WANT_CORRUPTED_GOTO(
4338                         xfs_bmap_sanity_check(mp, bp, 0),
4339                         error0);
4340                 /*
4341                  * Read-ahead the next leaf block, if any.
4342                  */
4343                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
4344                 if (nextbno != NULLFSBLOCK)
4345                         xfs_btree_reada_bufl(mp, nextbno, 1);
4346                 /*
4347                  * Copy records into the extent records.
4348                  */
4349                 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
4350                 start = i;
4351                 for (j = 0; j < num_recs; j++, i++, frp++) {
4352                         xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
4353                         trp->l0 = be64_to_cpu(frp->l0);
4354                         trp->l1 = be64_to_cpu(frp->l1);
4355                 }
4356                 if (exntf == XFS_EXTFMT_NOSTATE) {
4357                         /*
4358                          * Check all attribute bmap btree records and
4359                          * any "older" data bmap btree records for a
4360                          * set bit in the "extent flag" position.
4361                          */
4362                         if (unlikely(xfs_check_nostate_extents(ifp,
4363                                         start, num_recs))) {
4364                                 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4365                                                  XFS_ERRLEVEL_LOW,
4366                                                  ip->i_mount);
4367                                 goto error0;
4368                         }
4369                 }
4370                 xfs_trans_brelse(tp, bp);
4371                 bno = nextbno;
4372                 /*
4373                  * If we've reached the end, stop.
4374                  */
4375                 if (bno == NULLFSBLOCK)
4376                         break;
4377                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4378                                 XFS_BMAP_BTREE_REF)))
4379                         return error;
4380                 block = XFS_BUF_TO_BLOCK(bp);
4381         }
4382         ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4383         ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4384         XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
4385         return 0;
4386 error0:
4387         xfs_trans_brelse(tp, bp);
4388         return XFS_ERROR(EFSCORRUPTED);
4389 }
4390
4391 #ifdef DEBUG
4392 /*
4393  * Add bmap trace insert entries for all the contents of the extent records.
4394  */
4395 void
4396 xfs_bmap_trace_exlist(
4397         xfs_inode_t     *ip,            /* incore inode pointer */
4398         xfs_extnum_t    cnt,            /* count of entries in the list */
4399         int             whichfork,      /* data or attr fork */
4400         unsigned long   caller_ip)
4401 {
4402         xfs_extnum_t    idx;            /* extent record index */
4403         xfs_ifork_t     *ifp;           /* inode fork pointer */
4404         int             state = 0;
4405
4406         if (whichfork == XFS_ATTR_FORK)
4407                 state |= BMAP_ATTRFORK;
4408
4409         ifp = XFS_IFORK_PTR(ip, whichfork);
4410         ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4411         for (idx = 0; idx < cnt; idx++)
4412                 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4413 }
4414
4415 /*
4416  * Validate that the bmbt_irecs being returned from bmapi are valid
4417  * given the callers original parameters.  Specifically check the
4418  * ranges of the returned irecs to ensure that they only extent beyond
4419  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4420  */
4421 STATIC void
4422 xfs_bmap_validate_ret(
4423         xfs_fileoff_t           bno,
4424         xfs_filblks_t           len,
4425         int                     flags,
4426         xfs_bmbt_irec_t         *mval,
4427         int                     nmap,
4428         int                     ret_nmap)
4429 {
4430         int                     i;              /* index to map values */
4431
4432         ASSERT(ret_nmap <= nmap);
4433
4434         for (i = 0; i < ret_nmap; i++) {
4435                 ASSERT(mval[i].br_blockcount > 0);
4436                 if (!(flags & XFS_BMAPI_ENTIRE)) {
4437                         ASSERT(mval[i].br_startoff >= bno);
4438                         ASSERT(mval[i].br_blockcount <= len);
4439                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4440                                bno + len);
4441                 } else {
4442                         ASSERT(mval[i].br_startoff < bno + len);
4443                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4444                                bno);
4445                 }
4446                 ASSERT(i == 0 ||
4447                        mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4448                        mval[i].br_startoff);
4449                 if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
4450                         ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4451                                mval[i].br_startblock != HOLESTARTBLOCK);
4452                 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4453                        mval[i].br_state == XFS_EXT_UNWRITTEN);
4454         }
4455 }
4456 #endif /* DEBUG */
4457
4458
4459 /*
4460  * Map file blocks to filesystem blocks.
4461  * File range is given by the bno/len pair.
4462  * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
4463  * into a hole or past eof.
4464  * Only allocates blocks from a single allocation group,
4465  * to avoid locking problems.
4466  * The returned value in "firstblock" from the first call in a transaction
4467  * must be remembered and presented to subsequent calls in "firstblock".
4468  * An upper bound for the number of blocks to be allocated is supplied to
4469  * the first call in "total"; if no allocation group has that many free
4470  * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4471  */
4472 int                                     /* error */
4473 xfs_bmapi(
4474         xfs_trans_t     *tp,            /* transaction pointer */
4475         xfs_inode_t     *ip,            /* incore inode */
4476         xfs_fileoff_t   bno,            /* starting file offs. mapped */
4477         xfs_filblks_t   len,            /* length to map in file */
4478         int             flags,          /* XFS_BMAPI_... */
4479         xfs_fsblock_t   *firstblock,    /* first allocated block
4480                                            controls a.g. for allocs */
4481         xfs_extlen_t    total,          /* total blocks needed */
4482         xfs_bmbt_irec_t *mval,          /* output: map values */
4483         int             *nmap,          /* i/o: mval size/count */
4484         xfs_bmap_free_t *flist,         /* i/o: list extents to free */
4485         xfs_extdelta_t  *delta)         /* o: change made to incore extents */
4486 {
4487         xfs_fsblock_t   abno;           /* allocated block number */
4488         xfs_extlen_t    alen;           /* allocated extent length */
4489         xfs_fileoff_t   aoff;           /* allocated file offset */
4490         xfs_bmalloca_t  bma = { 0 };    /* args for xfs_bmap_alloc */
4491         xfs_btree_cur_t *cur;           /* bmap btree cursor */
4492         xfs_fileoff_t   end;            /* end of mapped file region */
4493         int             eof;            /* we've hit the end of extents */
4494         xfs_bmbt_rec_host_t *ep;        /* extent record pointer */
4495         int             error;          /* error return */
4496         xfs_bmbt_irec_t got;            /* current file extent record */
4497         xfs_ifork_t     *ifp;           /* inode fork pointer */
4498         xfs_extlen_t    indlen;         /* indirect blocks length */
4499         xfs_extnum_t    lastx;          /* last useful extent number */
4500         int             logflags;       /* flags for transaction logging */
4501         xfs_extlen_t    minleft;        /* min blocks left after allocation */
4502         xfs_extlen_t    minlen;         /* min allocation size */
4503         xfs_mount_t     *mp;            /* xfs mount structure */
4504         int             n;              /* current extent index */
4505         int             nallocs;        /* number of extents alloc'd */
4506         xfs_extnum_t    nextents;       /* number of extents in file */
4507         xfs_fileoff_t   obno;           /* old block number (offset) */
4508         xfs_bmbt_irec_t prev;           /* previous file extent record */
4509         int             tmp_logflags;   /* temp flags holder */
4510         int             whichfork;      /* data or attr fork */
4511         char            inhole;         /* current location is hole in file */
4512         char            wasdelay;       /* old extent was delayed */
4513         char            wr;             /* this is a write request */
4514         char            rt;             /* this is a realtime file */
4515 #ifdef DEBUG
4516         xfs_fileoff_t   orig_bno;       /* original block number value */
4517         int             orig_flags;     /* original flags arg value */
4518         xfs_filblks_t   orig_len;       /* original value of len arg */
4519         xfs_bmbt_irec_t *orig_mval;     /* original value of mval */
4520         int             orig_nmap;      /* original value of *nmap */
4521
4522         orig_bno = bno;
4523         orig_len = len;
4524         orig_flags = flags;
4525         orig_mval = mval;
4526         orig_nmap = *nmap;
4527 #endif
4528         ASSERT(*nmap >= 1);
4529         ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
4530         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4531                 XFS_ATTR_FORK : XFS_DATA_FORK;
4532         mp = ip->i_mount;
4533         if (unlikely(XFS_TEST_ERROR(
4534             (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4535              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4536              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4537              mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4538                 XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
4539                 return XFS_ERROR(EFSCORRUPTED);
4540         }
4541         if (XFS_FORCED_SHUTDOWN(mp))
4542                 return XFS_ERROR(EIO);
4543         rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4544         ifp = XFS_IFORK_PTR(ip, whichfork);
4545         ASSERT(ifp->if_ext_max ==
4546                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4547         if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
4548                 XFS_STATS_INC(xs_blk_mapw);
4549         else
4550                 XFS_STATS_INC(xs_blk_mapr);
4551         /*
4552          * IGSTATE flag is used to combine extents which
4553          * differ only due to the state of the extents.
4554          * This technique is used from xfs_getbmap()
4555          * when the caller does not wish to see the
4556          * separation (which is the default).
4557          *
4558          * This technique is also used when writing a
4559          * buffer which has been partially written,
4560          * (usually by being flushed during a chunkread),
4561          * to ensure one write takes place. This also
4562          * prevents a change in the xfs inode extents at
4563          * this time, intentionally. This change occurs
4564          * on completion of the write operation, in
4565          * xfs_strat_comp(), where the xfs_bmapi() call
4566          * is transactioned, and the extents combined.
4567          */
4568         if ((flags & XFS_BMAPI_IGSTATE) && wr)  /* if writing unwritten space */
4569                 wr = 0;                         /* no allocations are allowed */
4570         ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
4571         logflags = 0;
4572         nallocs = 0;
4573         cur = NULL;
4574         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4575                 ASSERT(wr && tp);
4576                 if ((error = xfs_bmap_local_to_extents(tp, ip,
4577                                 firstblock, total, &logflags, whichfork)))
4578                         goto error0;
4579         }
4580         if (wr && *firstblock == NULLFSBLOCK) {
4581                 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4582                         minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4583                 else
4584                         minleft = 1;
4585         } else
4586                 minleft = 0;
4587         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4588             (error = xfs_iread_extents(tp, ip, whichfork)))
4589                 goto error0;
4590         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4591                 &prev);
4592         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4593         n = 0;
4594         end = bno + len;
4595         obno = bno;
4596         bma.ip = NULL;
4597         if (delta) {
4598                 delta->xed_startoff = NULLFILEOFF;
4599                 delta->xed_blockcount = 0;
4600         }
4601         while (bno < end && n < *nmap) {
4602                 /*
4603                  * Reading past eof, act as though there's a hole
4604                  * up to end.
4605                  */
4606                 if (eof && !wr)
4607                         got.br_startoff = end;
4608                 inhole = eof || got.br_startoff > bno;
4609                 wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
4610                         isnullstartblock(got.br_startblock);
4611                 /*
4612                  * First, deal with the hole before the allocated space
4613                  * that we found, if any.
4614                  */
4615                 if (wr && (inhole || wasdelay)) {
4616                         /*
4617                          * For the wasdelay case, we could also just
4618                          * allocate the stuff asked for in this bmap call
4619                          * but that wouldn't be as good.
4620                          */
4621                         if (wasdelay) {
4622                                 alen = (xfs_extlen_t)got.br_blockcount;
4623                                 aoff = got.br_startoff;
4624                                 if (lastx != NULLEXTNUM && lastx) {
4625                                         ep = xfs_iext_get_ext(ifp, lastx - 1);
4626                                         xfs_bmbt_get_all(ep, &prev);
4627                                 }
4628                         } else {
4629                                 alen = (xfs_extlen_t)
4630                                         XFS_FILBLKS_MIN(len, MAXEXTLEN);
4631                                 if (!eof)
4632                                         alen = (xfs_extlen_t)
4633                                                 XFS_FILBLKS_MIN(alen,
4634                                                         got.br_startoff - bno);
4635                                 aoff = bno;
4636                         }
4637                         minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
4638                         if (flags & XFS_BMAPI_DELAY) {
4639                                 xfs_extlen_t    extsz;
4640
4641                                 /* Figure out the extent size, adjust alen */
4642                                 extsz = xfs_get_extsz_hint(ip);
4643                                 if (extsz) {
4644                                         error = xfs_bmap_extsize_align(mp,
4645                                                         &got, &prev, extsz,
4646                                                         rt, eof,
4647                                                         flags&XFS_BMAPI_DELAY,
4648                                                         flags&XFS_BMAPI_CONVERT,
4649                                                         &aoff, &alen);
4650                                         ASSERT(!error);
4651                                 }
4652
4653                                 if (rt)
4654                                         extsz = alen / mp->m_sb.sb_rextsize;
4655
4656                                 /*
4657                                  * Make a transaction-less quota reservation for
4658                                  * delayed allocation blocks. This number gets
4659                                  * adjusted later.  We return if we haven't
4660                                  * allocated blocks already inside this loop.
4661                                  */
4662                                 error = xfs_trans_reserve_quota_nblks(
4663                                                 NULL, ip, (long)alen, 0,
4664                                                 rt ? XFS_QMOPT_RES_RTBLKS :
4665                                                      XFS_QMOPT_RES_REGBLKS);
4666                                 if (error) {
4667                                         if (n == 0) {
4668                                                 *nmap = 0;
4669                                                 ASSERT(cur == NULL);
4670                                                 return error;
4671                                         }
4672                                         break;
4673                                 }
4674
4675                                 /*
4676                                  * Split changing sb for alen and indlen since
4677                                  * they could be coming from different places.
4678                                  */
4679                                 indlen = (xfs_extlen_t)
4680                                         xfs_bmap_worst_indlen(ip, alen);
4681                                 ASSERT(indlen > 0);
4682
4683                                 if (rt) {
4684                                         error = xfs_mod_incore_sb(mp,
4685                                                         XFS_SBS_FREXTENTS,
4686                                                         -((int64_t)extsz), (flags &
4687                                                         XFS_BMAPI_RSVBLOCKS));
4688                                 } else {
4689                                         error = xfs_mod_incore_sb(mp,
4690                                                         XFS_SBS_FDBLOCKS,
4691                                                         -((int64_t)alen), (flags &
4692                                                         XFS_BMAPI_RSVBLOCKS));
4693                                 }
4694                                 if (!error) {
4695                                         error = xfs_mod_incore_sb(mp,
4696                                                         XFS_SBS_FDBLOCKS,
4697                                                         -((int64_t)indlen), (flags &
4698                                                         XFS_BMAPI_RSVBLOCKS));
4699                                         if (error && rt)
4700                                                 xfs_mod_incore_sb(mp,
4701                                                         XFS_SBS_FREXTENTS,
4702                                                         (int64_t)extsz, (flags &
4703                                                         XFS_BMAPI_RSVBLOCKS));
4704                                         else if (error)
4705                                                 xfs_mod_incore_sb(mp,
4706                                                         XFS_SBS_FDBLOCKS,
4707                                                         (int64_t)alen, (flags &
4708                                                         XFS_BMAPI_RSVBLOCKS));
4709                                 }
4710
4711                                 if (error) {
4712                                         if (XFS_IS_QUOTA_ON(mp))
4713                                                 /* unreserve the blocks now */
4714                                                 (void)
4715                                                 xfs_trans_unreserve_quota_nblks(
4716                                                         NULL, ip,
4717                                                         (long)alen, 0, rt ?
4718                                                         XFS_QMOPT_RES_RTBLKS :
4719                                                         XFS_QMOPT_RES_REGBLKS);
4720                                         break;
4721                                 }
4722
4723                                 ip->i_delayed_blks += alen;
4724                                 abno = nullstartblock(indlen);
4725                         } else {
4726                                 /*
4727                                  * If first time, allocate and fill in
4728                                  * once-only bma fields.
4729                                  */
4730                                 if (bma.ip == NULL) {
4731                                         bma.tp = tp;
4732                                         bma.ip = ip;
4733                                         bma.prevp = &prev;
4734                                         bma.gotp = &got;
4735                                         bma.total = total;
4736                                         bma.userdata = 0;
4737                                 }
4738                                 /* Indicate if this is the first user data
4739                                  * in the file, or just any user data.
4740                                  */
4741                                 if (!(flags & XFS_BMAPI_METADATA)) {
4742                                         bma.userdata = (aoff == 0) ?
4743                                                 XFS_ALLOC_INITIAL_USER_DATA :
4744                                                 XFS_ALLOC_USERDATA;
4745                                 }
4746                                 /*
4747                                  * Fill in changeable bma fields.
4748                                  */
4749                                 bma.eof = eof;
4750                                 bma.firstblock = *firstblock;
4751                                 bma.alen = alen;
4752                                 bma.off = aoff;
4753                                 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4754                                 bma.wasdel = wasdelay;
4755                                 bma.minlen = minlen;
4756                                 bma.low = flist->xbf_low;
4757                                 bma.minleft = minleft;
4758                                 /*
4759                                  * Only want to do the alignment at the
4760                                  * eof if it is userdata and allocation length
4761                                  * is larger than a stripe unit.
4762                                  */
4763                                 if (mp->m_dalign && alen >= mp->m_dalign &&
4764                                     (!(flags & XFS_BMAPI_METADATA)) &&
4765                                     (whichfork == XFS_DATA_FORK)) {
4766                                         if ((error = xfs_bmap_isaeof(ip, aoff,
4767                                                         whichfork, &bma.aeof)))
4768                                                 goto error0;
4769                                 } else
4770                                         bma.aeof = 0;
4771                                 /*
4772                                  * Call allocator.
4773                                  */
4774                                 if ((error = xfs_bmap_alloc(&bma)))
4775                                         goto error0;
4776                                 /*
4777                                  * Copy out result fields.
4778                                  */
4779                                 abno = bma.rval;
4780                                 if ((flist->xbf_low = bma.low))
4781                                         minleft = 0;
4782                                 alen = bma.alen;
4783                                 aoff = bma.off;
4784                                 ASSERT(*firstblock == NULLFSBLOCK ||
4785                                        XFS_FSB_TO_AGNO(mp, *firstblock) ==
4786                                        XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
4787                                        (flist->xbf_low &&
4788                                         XFS_FSB_TO_AGNO(mp, *firstblock) <
4789                                         XFS_FSB_TO_AGNO(mp, bma.firstblock)));
4790                                 *firstblock = bma.firstblock;
4791                                 if (cur)
4792                                         cur->bc_private.b.firstblock =
4793                                                 *firstblock;
4794                                 if (abno == NULLFSBLOCK)
4795                                         break;
4796                                 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
4797                                         cur = xfs_bmbt_init_cursor(mp, tp,
4798                                                 ip, whichfork);
4799                                         cur->bc_private.b.firstblock =
4800                                                 *firstblock;
4801                                         cur->bc_private.b.flist = flist;
4802                                 }
4803                                 /*
4804                                  * Bump the number of extents we've allocated
4805                                  * in this call.
4806                                  */
4807                                 nallocs++;
4808                         }
4809                         if (cur)
4810                                 cur->bc_private.b.flags =
4811                                         wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
4812                         got.br_startoff = aoff;
4813                         got.br_startblock = abno;
4814                         got.br_blockcount = alen;
4815                         got.br_state = XFS_EXT_NORM;    /* assume normal */
4816                         /*
4817                          * Determine state of extent, and the filesystem.
4818                          * A wasdelay extent has been initialized, so
4819                          * shouldn't be flagged as unwritten.
4820                          */
4821                         if (wr && xfs_sb_version_hasextflgbit(&mp->m_sb)) {
4822                                 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
4823                                         got.br_state = XFS_EXT_UNWRITTEN;
4824                         }
4825                         error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
4826                                 firstblock, flist, &tmp_logflags, delta,
4827                                 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
4828                         logflags |= tmp_logflags;
4829                         if (error)
4830                                 goto error0;
4831                         lastx = ifp->if_lastex;
4832                         ep = xfs_iext_get_ext(ifp, lastx);
4833                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4834                         xfs_bmbt_get_all(ep, &got);
4835                         ASSERT(got.br_startoff <= aoff);
4836                         ASSERT(got.br_startoff + got.br_blockcount >=
4837                                 aoff + alen);
4838 #ifdef DEBUG
4839                         if (flags & XFS_BMAPI_DELAY) {
4840                                 ASSERT(isnullstartblock(got.br_startblock));
4841                                 ASSERT(startblockval(got.br_startblock) > 0);
4842                         }
4843                         ASSERT(got.br_state == XFS_EXT_NORM ||
4844                                got.br_state == XFS_EXT_UNWRITTEN);
4845 #endif
4846                         /*
4847                          * Fall down into the found allocated space case.
4848                          */
4849                 } else if (inhole) {
4850                         /*
4851                          * Reading in a hole.
4852                          */
4853                         mval->br_startoff = bno;
4854                         mval->br_startblock = HOLESTARTBLOCK;
4855                         mval->br_blockcount =
4856                                 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4857                         mval->br_state = XFS_EXT_NORM;
4858                         bno += mval->br_blockcount;
4859                         len -= mval->br_blockcount;
4860                         mval++;
4861                         n++;
4862                         continue;
4863                 }
4864                 /*
4865                  * Then deal with the allocated space we found.
4866                  */
4867                 ASSERT(ep != NULL);
4868                 if (!(flags & XFS_BMAPI_ENTIRE) &&
4869                     (got.br_startoff + got.br_blockcount > obno)) {
4870                         if (obno > bno)
4871                                 bno = obno;
4872                         ASSERT((bno >= obno) || (n == 0));
4873                         ASSERT(bno < end);
4874                         mval->br_startoff = bno;
4875                         if (isnullstartblock(got.br_startblock)) {
4876                                 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
4877                                 mval->br_startblock = DELAYSTARTBLOCK;
4878                         } else
4879                                 mval->br_startblock =
4880                                         got.br_startblock +
4881                                         (bno - got.br_startoff);
4882                         /*
4883                          * Return the minimum of what we got and what we
4884                          * asked for for the length.  We can use the len
4885                          * variable here because it is modified below
4886                          * and we could have been there before coming
4887                          * here if the first part of the allocation
4888                          * didn't overlap what was asked for.
4889                          */
4890                         mval->br_blockcount =
4891                                 XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
4892                                         (bno - got.br_startoff));
4893                         mval->br_state = got.br_state;
4894                         ASSERT(mval->br_blockcount <= len);
4895                 } else {
4896                         *mval = got;
4897                         if (isnullstartblock(mval->br_startblock)) {
4898                                 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
4899                                 mval->br_startblock = DELAYSTARTBLOCK;
4900                         }
4901                 }
4902
4903                 /*
4904                  * Check if writing previously allocated but
4905                  * unwritten extents.
4906                  */
4907                 if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
4908                     ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
4909                         /*
4910                          * Modify (by adding) the state flag, if writing.
4911                          */
4912                         ASSERT(mval->br_blockcount <= len);
4913                         if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
4914                                 cur = xfs_bmbt_init_cursor(mp,
4915                                         tp, ip, whichfork);
4916                                 cur->bc_private.b.firstblock =
4917                                         *firstblock;
4918                                 cur->bc_private.b.flist = flist;
4919                         }
4920                         mval->br_state = XFS_EXT_NORM;
4921                         error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
4922                                 firstblock, flist, &tmp_logflags, delta,
4923                                 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
4924                         logflags |= tmp_logflags;
4925                         if (error)
4926                                 goto error0;
4927                         lastx = ifp->if_lastex;
4928                         ep = xfs_iext_get_ext(ifp, lastx);
4929                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4930                         xfs_bmbt_get_all(ep, &got);
4931                         /*
4932                          * We may have combined previously unwritten
4933                          * space with written space, so generate
4934                          * another request.
4935                          */
4936                         if (mval->br_blockcount < len)
4937                                 continue;
4938                 }
4939
4940                 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4941                        ((mval->br_startoff + mval->br_blockcount) <= end));
4942                 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4943                        (mval->br_blockcount <= len) ||
4944                        (mval->br_startoff < obno));
4945                 bno = mval->br_startoff + mval->br_blockcount;
4946                 len = end - bno;
4947                 if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4948                         ASSERT(mval->br_startblock == mval[-1].br_startblock);
4949                         ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4950                         ASSERT(mval->br_state == mval[-1].br_state);
4951                         mval[-1].br_blockcount = mval->br_blockcount;
4952                         mval[-1].br_state = mval->br_state;
4953                 } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4954                            mval[-1].br_startblock != DELAYSTARTBLOCK &&
4955                            mval[-1].br_startblock != HOLESTARTBLOCK &&
4956                            mval->br_startblock ==
4957                            mval[-1].br_startblock + mval[-1].br_blockcount &&
4958                            ((flags & XFS_BMAPI_IGSTATE) ||
4959                                 mval[-1].br_state == mval->br_state)) {
4960                         ASSERT(mval->br_startoff ==
4961                                mval[-1].br_startoff + mval[-1].br_blockcount);
4962                         mval[-1].br_blockcount += mval->br_blockcount;
4963                 } else if (n > 0 &&
4964                            mval->br_startblock == DELAYSTARTBLOCK &&
4965                            mval[-1].br_startblock == DELAYSTARTBLOCK &&
4966                            mval->br_startoff ==
4967                            mval[-1].br_startoff + mval[-1].br_blockcount) {
4968                         mval[-1].br_blockcount += mval->br_blockcount;
4969                         mval[-1].br_state = mval->br_state;
4970                 } else if (!((n == 0) &&
4971                              ((mval->br_startoff + mval->br_blockcount) <=
4972                               obno))) {
4973                         mval++;
4974                         n++;
4975                 }
4976                 /*
4977                  * If we're done, stop now.  Stop when we've allocated
4978                  * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4979                  * the transaction may get too big.
4980                  */
4981                 if (bno >= end || n >= *nmap || nallocs >= *nmap)
4982                         break;
4983                 /*
4984                  * Else go on to the next record.
4985                  */
4986                 ep = xfs_iext_get_ext(ifp, ++lastx);
4987                 prev = got;
4988                 if (lastx >= nextents)
4989                         eof = 1;
4990                 else
4991                         xfs_bmbt_get_all(ep, &got);
4992         }
4993         ifp->if_lastex = lastx;
4994         *nmap = n;
4995         /*
4996          * Transform from btree to extents, give it cur.
4997          */
4998         if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
4999             XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5000                 ASSERT(wr && cur);
5001                 error = xfs_bmap_btree_to_extents(tp, ip, cur,
5002                         &tmp_logflags, whichfork);
5003                 logflags |= tmp_logflags;
5004                 if (error)
5005                         goto error0;
5006         }
5007         ASSERT(ifp->if_ext_max ==
5008                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5009         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5010                XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5011         error = 0;
5012         if (delta && delta->xed_startoff != NULLFILEOFF) {
5013                 /* A change was actually made.
5014                  * Note that delta->xed_blockount is an offset at this
5015                  * point and needs to be converted to a block count.
5016                  */
5017                 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5018                 delta->xed_blockcount -= delta->xed_startoff;
5019         }
5020 error0:
5021         /*
5022          * Log everything.  Do this after conversion, there's no point in
5023          * logging the extent records if we've converted to btree format.
5024          */
5025         if ((logflags & xfs_ilog_fext(whichfork)) &&
5026             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5027                 logflags &= ~xfs_ilog_fext(whichfork);
5028         else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5029                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5030                 logflags &= ~xfs_ilog_fbroot(whichfork);
5031         /*
5032          * Log whatever the flags say, even if error.  Otherwise we might miss
5033          * detecting a case where the data is changed, there's an error,
5034          * and it's not logged so we don't shutdown when we should.
5035          */
5036         if (logflags) {
5037                 ASSERT(tp && wr);
5038                 xfs_trans_log_inode(tp, ip, logflags);
5039         }
5040         if (cur) {
5041                 if (!error) {
5042                         ASSERT(*firstblock == NULLFSBLOCK ||
5043                                XFS_FSB_TO_AGNO(mp, *firstblock) ==
5044                                XFS_FSB_TO_AGNO(mp,
5045                                        cur->bc_private.b.firstblock) ||
5046                                (flist->xbf_low &&
5047                                 XFS_FSB_TO_AGNO(mp, *firstblock) <
5048                                 XFS_FSB_TO_AGNO(mp,
5049                                         cur->bc_private.b.firstblock)));
5050                         *firstblock = cur->bc_private.b.firstblock;
5051                 }
5052                 xfs_btree_del_cursor(cur,
5053                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5054         }
5055         if (!error)
5056                 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5057                         orig_nmap, *nmap);
5058         return error;
5059 }
5060
5061 /*
5062  * Map file blocks to filesystem blocks, simple version.
5063  * One block (extent) only, read-only.
5064  * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
5065  * For the other flag values, the effect is as if XFS_BMAPI_METADATA
5066  * was set and all the others were clear.
5067  */
5068 int                                             /* error */
5069 xfs_bmapi_single(
5070         xfs_trans_t     *tp,            /* transaction pointer */
5071         xfs_inode_t     *ip,            /* incore inode */
5072         int             whichfork,      /* data or attr fork */
5073         xfs_fsblock_t   *fsb,           /* output: mapped block */
5074         xfs_fileoff_t   bno)            /* starting file offs. mapped */
5075 {
5076         int             eof;            /* we've hit the end of extents */
5077         int             error;          /* error return */
5078         xfs_bmbt_irec_t got;            /* current file extent record */
5079         xfs_ifork_t     *ifp;           /* inode fork pointer */
5080         xfs_extnum_t    lastx;          /* last useful extent number */
5081         xfs_bmbt_irec_t prev;           /* previous file extent record */
5082
5083         ifp = XFS_IFORK_PTR(ip, whichfork);
5084         if (unlikely(
5085             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
5086             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
5087                XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
5088                                 ip->i_mount);
5089                return XFS_ERROR(EFSCORRUPTED);
5090         }
5091         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5092                 return XFS_ERROR(EIO);
5093         XFS_STATS_INC(xs_blk_mapr);
5094         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5095             (error = xfs_iread_extents(tp, ip, whichfork)))
5096                 return error;
5097         (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5098                 &prev);
5099         /*
5100          * Reading past eof, act as though there's a hole
5101          * up to end.
5102          */
5103         if (eof || got.br_startoff > bno) {
5104                 *fsb = NULLFSBLOCK;
5105                 return 0;
5106         }
5107         ASSERT(!isnullstartblock(got.br_startblock));
5108         ASSERT(bno < got.br_startoff + got.br_blockcount);
5109         *fsb = got.br_startblock + (bno - got.br_startoff);
5110         ifp->if_lastex = lastx;
5111         return 0;
5112 }
5113
5114 /*
5115  * Unmap (remove) blocks from a file.
5116  * If nexts is nonzero then the number of extents to remove is limited to
5117  * that value.  If not all extents in the block range can be removed then
5118  * *done is set.
5119  */
5120 int                                             /* error */
5121 xfs_bunmapi(
5122         xfs_trans_t             *tp,            /* transaction pointer */
5123         struct xfs_inode        *ip,            /* incore inode */
5124         xfs_fileoff_t           bno,            /* starting offset to unmap */
5125         xfs_filblks_t           len,            /* length to unmap in file */
5126         int                     flags,          /* misc flags */
5127         xfs_extnum_t            nexts,          /* number of extents max */
5128         xfs_fsblock_t           *firstblock,    /* first allocated block
5129                                                    controls a.g. for allocs */
5130         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
5131         xfs_extdelta_t          *delta,         /* o: change made to incore
5132                                                    extents */
5133         int                     *done)          /* set if not done yet */
5134 {
5135         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
5136         xfs_bmbt_irec_t         del;            /* extent being deleted */
5137         int                     eof;            /* is deleting at eof */
5138         xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
5139         int                     error;          /* error return value */
5140         xfs_extnum_t            extno;          /* extent number in list */
5141         xfs_bmbt_irec_t         got;            /* current extent record */
5142         xfs_ifork_t             *ifp;           /* inode fork pointer */
5143         int                     isrt;           /* freeing in rt area */
5144         xfs_extnum_t            lastx;          /* last extent index used */
5145         int                     logflags;       /* transaction logging flags */
5146         xfs_extlen_t            mod;            /* rt extent offset */
5147         xfs_mount_t             *mp;            /* mount structure */
5148         xfs_extnum_t            nextents;       /* number of file extents */
5149         xfs_bmbt_irec_t         prev;           /* previous extent record */
5150         xfs_fileoff_t           start;          /* first file offset deleted */
5151         int                     tmp_logflags;   /* partial logging flags */
5152         int                     wasdel;         /* was a delayed alloc extent */
5153         int                     whichfork;      /* data or attribute fork */
5154         int                     rsvd;           /* OK to allocate reserved blocks */
5155         xfs_fsblock_t           sum;
5156
5157         trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5158
5159         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5160                 XFS_ATTR_FORK : XFS_DATA_FORK;
5161         ifp = XFS_IFORK_PTR(ip, whichfork);
5162         if (unlikely(
5163             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5164             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5165                 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5166                                  ip->i_mount);
5167                 return XFS_ERROR(EFSCORRUPTED);
5168         }
5169         mp = ip->i_mount;
5170         if (XFS_FORCED_SHUTDOWN(mp))
5171                 return XFS_ERROR(EIO);
5172         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
5173         ASSERT(len > 0);
5174         ASSERT(nexts >= 0);
5175         ASSERT(ifp->if_ext_max ==
5176                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5177         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5178             (error = xfs_iread_extents(tp, ip, whichfork)))
5179                 return error;
5180         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5181         if (nextents == 0) {
5182                 *done = 1;
5183                 return 0;
5184         }
5185         XFS_STATS_INC(xs_blk_unmap);
5186         isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5187         start = bno;
5188         bno = start + len - 1;
5189         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5190                 &prev);
5191         if (delta) {
5192                 delta->xed_startoff = NULLFILEOFF;
5193                 delta->xed_blockcount = 0;
5194         }
5195         /*
5196          * Check to see if the given block number is past the end of the
5197          * file, back up to the last block if so...
5198          */
5199         if (eof) {
5200                 ep = xfs_iext_get_ext(ifp, --lastx);
5201                 xfs_bmbt_get_all(ep, &got);
5202                 bno = got.br_startoff + got.br_blockcount - 1;
5203         }
5204         logflags = 0;
5205         if (ifp->if_flags & XFS_IFBROOT) {
5206                 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5207                 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5208                 cur->bc_private.b.firstblock = *firstblock;
5209                 cur->bc_private.b.flist = flist;
5210                 cur->bc_private.b.flags = 0;
5211         } else
5212                 cur = NULL;
5213         extno = 0;
5214         while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5215                (nexts == 0 || extno < nexts)) {
5216                 /*
5217                  * Is the found extent after a hole in which bno lives?
5218                  * Just back up to the previous extent, if so.
5219                  */
5220                 if (got.br_startoff > bno) {
5221                         if (--lastx < 0)
5222                                 break;
5223                         ep = xfs_iext_get_ext(ifp, lastx);
5224                         xfs_bmbt_get_all(ep, &got);
5225                 }
5226                 /*
5227                  * Is the last block of this extent before the range
5228                  * we're supposed to delete?  If so, we're done.
5229                  */
5230                 bno = XFS_FILEOFF_MIN(bno,
5231                         got.br_startoff + got.br_blockcount - 1);
5232                 if (bno < start)
5233                         break;
5234                 /*
5235                  * Then deal with the (possibly delayed) allocated space
5236                  * we found.
5237                  */
5238                 ASSERT(ep != NULL);
5239                 del = got;
5240                 wasdel = isnullstartblock(del.br_startblock);
5241                 if (got.br_startoff < start) {
5242                         del.br_startoff = start;
5243                         del.br_blockcount -= start - got.br_startoff;
5244                         if (!wasdel)
5245                                 del.br_startblock += start - got.br_startoff;
5246                 }
5247                 if (del.br_startoff + del.br_blockcount > bno + 1)
5248                         del.br_blockcount = bno + 1 - del.br_startoff;
5249                 sum = del.br_startblock + del.br_blockcount;
5250                 if (isrt &&
5251                     (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5252                         /*
5253                          * Realtime extent not lined up at the end.
5254                          * The extent could have been split into written
5255                          * and unwritten pieces, or we could just be
5256                          * unmapping part of it.  But we can't really
5257                          * get rid of part of a realtime extent.
5258                          */
5259                         if (del.br_state == XFS_EXT_UNWRITTEN ||
5260                             !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5261                                 /*
5262                                  * This piece is unwritten, or we're not
5263                                  * using unwritten extents.  Skip over it.
5264                                  */
5265                                 ASSERT(bno >= mod);
5266                                 bno -= mod > del.br_blockcount ?
5267                                         del.br_blockcount : mod;
5268                                 if (bno < got.br_startoff) {
5269                                         if (--lastx >= 0)
5270                                                 xfs_bmbt_get_all(xfs_iext_get_ext(
5271                                                         ifp, lastx), &got);
5272                                 }
5273                                 continue;
5274                         }
5275                         /*
5276                          * It's written, turn it unwritten.
5277                          * This is better than zeroing it.
5278                          */
5279                         ASSERT(del.br_state == XFS_EXT_NORM);
5280                         ASSERT(xfs_trans_get_block_res(tp) > 0);
5281                         /*
5282                          * If this spans a realtime extent boundary,
5283                          * chop it back to the start of the one we end at.
5284                          */
5285                         if (del.br_blockcount > mod) {
5286                                 del.br_startoff += del.br_blockcount - mod;
5287                                 del.br_startblock += del.br_blockcount - mod;
5288                                 del.br_blockcount = mod;
5289                         }
5290                         del.br_state = XFS_EXT_UNWRITTEN;
5291                         error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
5292                                 firstblock, flist, &logflags, delta,
5293                                 XFS_DATA_FORK, 0);
5294                         if (error)
5295                                 goto error0;
5296                         goto nodelete;
5297                 }
5298                 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5299                         /*
5300                          * Realtime extent is lined up at the end but not
5301                          * at the front.  We'll get rid of full extents if
5302                          * we can.
5303                          */
5304                         mod = mp->m_sb.sb_rextsize - mod;
5305                         if (del.br_blockcount > mod) {
5306                                 del.br_blockcount -= mod;
5307                                 del.br_startoff += mod;
5308                                 del.br_startblock += mod;
5309                         } else if ((del.br_startoff == start &&
5310                                     (del.br_state == XFS_EXT_UNWRITTEN ||
5311                                      xfs_trans_get_block_res(tp) == 0)) ||
5312                                    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5313                                 /*
5314                                  * Can't make it unwritten.  There isn't
5315                                  * a full extent here so just skip it.
5316                                  */
5317                                 ASSERT(bno >= del.br_blockcount);
5318                                 bno -= del.br_blockcount;
5319                                 if (bno < got.br_startoff) {
5320                                         if (--lastx >= 0)
5321                                                 xfs_bmbt_get_all(--ep, &got);
5322                                 }
5323                                 continue;
5324                         } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5325                                 /*
5326                                  * This one is already unwritten.
5327                                  * It must have a written left neighbor.
5328                                  * Unwrite the killed part of that one and
5329                                  * try again.
5330                                  */
5331                                 ASSERT(lastx > 0);
5332                                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5333                                                 lastx - 1), &prev);
5334                                 ASSERT(prev.br_state == XFS_EXT_NORM);
5335                                 ASSERT(!isnullstartblock(prev.br_startblock));
5336                                 ASSERT(del.br_startblock ==
5337                                        prev.br_startblock + prev.br_blockcount);
5338                                 if (prev.br_startoff < start) {
5339                                         mod = start - prev.br_startoff;
5340                                         prev.br_blockcount -= mod;
5341                                         prev.br_startblock += mod;
5342                                         prev.br_startoff = start;
5343                                 }
5344                                 prev.br_state = XFS_EXT_UNWRITTEN;
5345                                 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5346                                         &prev, firstblock, flist, &logflags,
5347                                         delta, XFS_DATA_FORK, 0);
5348                                 if (error)
5349                                         goto error0;
5350                                 goto nodelete;
5351                         } else {
5352                                 ASSERT(del.br_state == XFS_EXT_NORM);
5353                                 del.br_state = XFS_EXT_UNWRITTEN;
5354                                 error = xfs_bmap_add_extent(ip, lastx, &cur,
5355                                         &del, firstblock, flist, &logflags,
5356                                         delta, XFS_DATA_FORK, 0);
5357                                 if (error)
5358                                         goto error0;
5359                                 goto nodelete;
5360                         }
5361                 }
5362                 if (wasdel) {
5363                         ASSERT(startblockval(del.br_startblock) > 0);
5364                         /* Update realtime/data freespace, unreserve quota */
5365                         if (isrt) {
5366                                 xfs_filblks_t rtexts;
5367
5368                                 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5369                                 do_div(rtexts, mp->m_sb.sb_rextsize);
5370                                 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5371                                                 (int64_t)rtexts, rsvd);
5372                                 (void)xfs_trans_reserve_quota_nblks(NULL,
5373                                         ip, -((long)del.br_blockcount), 0,
5374                                         XFS_QMOPT_RES_RTBLKS);
5375                         } else {
5376                                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5377                                                 (int64_t)del.br_blockcount, rsvd);
5378                                 (void)xfs_trans_reserve_quota_nblks(NULL,
5379                                         ip, -((long)del.br_blockcount), 0,
5380                                         XFS_QMOPT_RES_REGBLKS);
5381                         }
5382                         ip->i_delayed_blks -= del.br_blockcount;
5383                         if (cur)
5384                                 cur->bc_private.b.flags |=
5385                                         XFS_BTCUR_BPRV_WASDEL;
5386                 } else if (cur)
5387                         cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5388                 /*
5389                  * If it's the case where the directory code is running
5390                  * with no block reservation, and the deleted block is in
5391                  * the middle of its extent, and the resulting insert
5392                  * of an extent would cause transformation to btree format,
5393                  * then reject it.  The calling code will then swap
5394                  * blocks around instead.
5395                  * We have to do this now, rather than waiting for the
5396                  * conversion to btree format, since the transaction
5397                  * will be dirty.
5398                  */
5399                 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5400                     XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5401                     XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5402                     del.br_startoff > got.br_startoff &&
5403                     del.br_startoff + del.br_blockcount <
5404                     got.br_startoff + got.br_blockcount) {
5405                         error = XFS_ERROR(ENOSPC);
5406                         goto error0;
5407                 }
5408                 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
5409                                 &tmp_logflags, delta, whichfork, rsvd);
5410                 logflags |= tmp_logflags;
5411                 if (error)
5412                         goto error0;
5413                 bno = del.br_startoff - 1;
5414 nodelete:
5415                 lastx = ifp->if_lastex;
5416                 /*
5417                  * If not done go on to the next (previous) record.
5418                  * Reset ep in case the extents array was re-alloced.
5419                  */
5420                 ep = xfs_iext_get_ext(ifp, lastx);
5421                 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5422                         if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5423                             xfs_bmbt_get_startoff(ep) > bno) {
5424                                 if (--lastx >= 0)
5425                                         ep = xfs_iext_get_ext(ifp, lastx);
5426                         }
5427                         if (lastx >= 0)
5428                                 xfs_bmbt_get_all(ep, &got);
5429                         extno++;
5430                 }
5431         }
5432         ifp->if_lastex = lastx;
5433         *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5434         ASSERT(ifp->if_ext_max ==
5435                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5436         /*
5437          * Convert to a btree if necessary.
5438          */
5439         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5440             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5441                 ASSERT(cur == NULL);
5442                 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5443                         &cur, 0, &tmp_logflags, whichfork);
5444                 logflags |= tmp_logflags;
5445                 if (error)
5446                         goto error0;
5447         }
5448         /*
5449          * transform from btree to extents, give it cur
5450          */
5451         else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5452                  XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5453                 ASSERT(cur != NULL);
5454                 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5455                         whichfork);
5456                 logflags |= tmp_logflags;
5457                 if (error)
5458                         goto error0;
5459         }
5460         /*
5461          * transform from extents to local?
5462          */
5463         ASSERT(ifp->if_ext_max ==
5464                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5465         error = 0;
5466         if (delta && delta->xed_startoff != NULLFILEOFF) {
5467                 /* A change was actually made.
5468                  * Note that delta->xed_blockount is an offset at this
5469                  * point and needs to be converted to a block count.
5470                  */
5471                 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5472                 delta->xed_blockcount -= delta->xed_startoff;
5473         }
5474 error0:
5475         /*
5476          * Log everything.  Do this after conversion, there's no point in
5477          * logging the extent records if we've converted to btree format.
5478          */
5479         if ((logflags & xfs_ilog_fext(whichfork)) &&
5480             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5481                 logflags &= ~xfs_ilog_fext(whichfork);
5482         else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5483                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5484                 logflags &= ~xfs_ilog_fbroot(whichfork);
5485         /*
5486          * Log inode even in the error case, if the transaction
5487          * is dirty we'll need to shut down the filesystem.
5488          */
5489         if (logflags)
5490                 xfs_trans_log_inode(tp, ip, logflags);
5491         if (cur) {
5492                 if (!error) {
5493                         *firstblock = cur->bc_private.b.firstblock;
5494                         cur->bc_private.b.allocated = 0;
5495                 }
5496                 xfs_btree_del_cursor(cur,
5497                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5498         }
5499         return error;
5500 }
5501
5502 /*
5503  * returns 1 for success, 0 if we failed to map the extent.
5504  */
5505 STATIC int
5506 xfs_getbmapx_fix_eof_hole(
5507         xfs_inode_t             *ip,            /* xfs incore inode pointer */
5508         struct getbmapx         *out,           /* output structure */
5509         int                     prealloced,     /* this is a file with
5510                                                  * preallocated data space */
5511         __int64_t               end,            /* last block requested */
5512         xfs_fsblock_t           startblock)
5513 {
5514         __int64_t               fixlen;
5515         xfs_mount_t             *mp;            /* file system mount point */
5516         xfs_ifork_t             *ifp;           /* inode fork pointer */
5517         xfs_extnum_t            lastx;          /* last extent pointer */
5518         xfs_fileoff_t           fileblock;
5519
5520         if (startblock == HOLESTARTBLOCK) {
5521                 mp = ip->i_mount;
5522                 out->bmv_block = -1;
5523                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
5524                 fixlen -= out->bmv_offset;
5525                 if (prealloced && out->bmv_offset + out->bmv_length == end) {
5526                         /* Came to hole at EOF. Trim it. */
5527                         if (fixlen <= 0)
5528                                 return 0;
5529                         out->bmv_length = fixlen;
5530                 }
5531         } else {
5532                 if (startblock == DELAYSTARTBLOCK)
5533                         out->bmv_block = -2;
5534                 else
5535                         out->bmv_block = xfs_fsb_to_db(ip, startblock);
5536                 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
5537                 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
5538                 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
5539                    (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
5540                         out->bmv_oflags |= BMV_OF_LAST;
5541         }
5542
5543         return 1;
5544 }
5545
5546 /*
5547  * Get inode's extents as described in bmv, and format for output.
5548  * Calls formatter to fill the user's buffer until all extents
5549  * are mapped, until the passed-in bmv->bmv_count slots have
5550  * been filled, or until the formatter short-circuits the loop,
5551  * if it is tracking filled-in extents on its own.
5552  */
5553 int                                             /* error code */
5554 xfs_getbmap(
5555         xfs_inode_t             *ip,
5556         struct getbmapx         *bmv,           /* user bmap structure */
5557         xfs_bmap_format_t       formatter,      /* format to user */
5558         void                    *arg)           /* formatter arg */
5559 {
5560         __int64_t               bmvend;         /* last block requested */
5561         int                     error = 0;      /* return value */
5562         __int64_t               fixlen;         /* length for -1 case */
5563         int                     i;              /* extent number */
5564         int                     lock;           /* lock state */
5565         xfs_bmbt_irec_t         *map;           /* buffer for user's data */
5566         xfs_mount_t             *mp;            /* file system mount point */
5567         int                     nex;            /* # of user extents can do */
5568         int                     nexleft;        /* # of user extents left */
5569         int                     subnex;         /* # of bmapi's can do */
5570         int                     nmap;           /* number of map entries */
5571         struct getbmapx         *out;           /* output structure */
5572         int                     whichfork;      /* data or attr fork */
5573         int                     prealloced;     /* this is a file with
5574                                                  * preallocated data space */
5575         int                     iflags;         /* interface flags */
5576         int                     bmapi_flags;    /* flags for xfs_bmapi */
5577         int                     cur_ext = 0;
5578
5579         mp = ip->i_mount;
5580         iflags = bmv->bmv_iflags;
5581         whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5582
5583         if (whichfork == XFS_ATTR_FORK) {
5584                 if (XFS_IFORK_Q(ip)) {
5585                         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5586                             ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5587                             ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5588                                 return XFS_ERROR(EINVAL);
5589                 } else if (unlikely(
5590                            ip->i_d.di_aformat != 0 &&
5591                            ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5592                         XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5593                                          ip->i_mount);
5594                         return XFS_ERROR(EFSCORRUPTED);
5595                 }
5596
5597                 prealloced = 0;
5598                 fixlen = 1LL << 32;
5599         } else {
5600                 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5601                     ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5602                     ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5603                         return XFS_ERROR(EINVAL);
5604
5605                 if (xfs_get_extsz_hint(ip) ||
5606                     ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5607                         prealloced = 1;
5608                         fixlen = XFS_MAXIOFFSET(mp);
5609                 } else {
5610                         prealloced = 0;
5611                         fixlen = ip->i_size;
5612                 }
5613         }
5614
5615         if (bmv->bmv_length == -1) {
5616                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5617                 bmv->bmv_length =
5618                         max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
5619         } else if (bmv->bmv_length == 0) {
5620                 bmv->bmv_entries = 0;
5621                 return 0;
5622         } else if (bmv->bmv_length < 0) {
5623                 return XFS_ERROR(EINVAL);
5624         }
5625
5626         nex = bmv->bmv_count - 1;
5627         if (nex <= 0)
5628                 return XFS_ERROR(EINVAL);
5629         bmvend = bmv->bmv_offset + bmv->bmv_length;
5630
5631
5632         if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
5633                 return XFS_ERROR(ENOMEM);
5634         out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
5635         if (!out)
5636                 return XFS_ERROR(ENOMEM);
5637
5638         xfs_ilock(ip, XFS_IOLOCK_SHARED);
5639         if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
5640                 if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
5641                         error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
5642                         if (error)
5643                                 goto out_unlock_iolock;
5644                 }
5645
5646                 ASSERT(ip->i_delayed_blks == 0);
5647         }
5648
5649         lock = xfs_ilock_map_shared(ip);
5650
5651         /*
5652          * Don't let nex be bigger than the number of extents
5653          * we can have assuming alternating holes and real extents.
5654          */
5655         if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5656                 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5657
5658         bmapi_flags = xfs_bmapi_aflag(whichfork);
5659         if (!(iflags & BMV_IF_PREALLOC))
5660                 bmapi_flags |= XFS_BMAPI_IGSTATE;
5661
5662         /*
5663          * Allocate enough space to handle "subnex" maps at a time.
5664          */
5665         error = ENOMEM;
5666         subnex = 16;
5667         map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
5668         if (!map)
5669                 goto out_unlock_ilock;
5670
5671         bmv->bmv_entries = 0;
5672
5673         if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
5674             (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
5675                 error = 0;
5676                 goto out_free_map;
5677         }
5678
5679         nexleft = nex;
5680
5681         do {
5682                 nmap = (nexleft > subnex) ? subnex : nexleft;
5683                 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5684                                   XFS_BB_TO_FSB(mp, bmv->bmv_length),
5685                                   bmapi_flags, NULL, 0, map, &nmap,
5686                                   NULL, NULL);
5687                 if (error)
5688                         goto out_free_map;
5689                 ASSERT(nmap <= subnex);
5690
5691                 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5692                         out[cur_ext].bmv_oflags = 0;
5693                         if (map[i].br_state == XFS_EXT_UNWRITTEN)
5694                                 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
5695                         else if (map[i].br_startblock == DELAYSTARTBLOCK)
5696                                 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
5697                         out[cur_ext].bmv_offset =
5698                                 XFS_FSB_TO_BB(mp, map[i].br_startoff);
5699                         out[cur_ext].bmv_length =
5700                                 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5701                         out[cur_ext].bmv_unused1 = 0;
5702                         out[cur_ext].bmv_unused2 = 0;
5703                         ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
5704                               (map[i].br_startblock != DELAYSTARTBLOCK));
5705                         if (map[i].br_startblock == HOLESTARTBLOCK &&
5706                             whichfork == XFS_ATTR_FORK) {
5707                                 /* came to the end of attribute fork */
5708                                 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
5709                                 goto out_free_map;
5710                         }
5711
5712                         if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
5713                                         prealloced, bmvend,
5714                                         map[i].br_startblock))
5715                                 goto out_free_map;
5716
5717                         nexleft--;
5718                         bmv->bmv_offset =
5719                                 out[cur_ext].bmv_offset +
5720                                 out[cur_ext].bmv_length;
5721                         bmv->bmv_length =
5722                                 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
5723                         bmv->bmv_entries++;
5724                         cur_ext++;
5725                 }
5726         } while (nmap && nexleft && bmv->bmv_length);
5727
5728  out_free_map:
5729         kmem_free(map);
5730  out_unlock_ilock:
5731         xfs_iunlock_map_shared(ip, lock);
5732  out_unlock_iolock:
5733         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5734
5735         for (i = 0; i < cur_ext; i++) {
5736                 int full = 0;   /* user array is full */
5737
5738                 /* format results & advance arg */
5739                 error = formatter(&arg, &out[i], &full);
5740                 if (error || full)
5741                         break;
5742         }
5743
5744         kmem_free(out);
5745         return error;
5746 }
5747
5748 /*
5749  * Check the last inode extent to determine whether this allocation will result
5750  * in blocks being allocated at the end of the file. When we allocate new data
5751  * blocks at the end of the file which do not start at the previous data block,
5752  * we will try to align the new blocks at stripe unit boundaries.
5753  */
5754 STATIC int                              /* error */
5755 xfs_bmap_isaeof(
5756         xfs_inode_t     *ip,            /* incore inode pointer */
5757         xfs_fileoff_t   off,            /* file offset in fsblocks */
5758         int             whichfork,      /* data or attribute fork */
5759         char            *aeof)          /* return value */
5760 {
5761         int             error;          /* error return value */
5762         xfs_ifork_t     *ifp;           /* inode fork pointer */
5763         xfs_bmbt_rec_host_t *lastrec;   /* extent record pointer */
5764         xfs_extnum_t    nextents;       /* number of file extents */
5765         xfs_bmbt_irec_t s;              /* expanded extent record */
5766
5767         ASSERT(whichfork == XFS_DATA_FORK);
5768         ifp = XFS_IFORK_PTR(ip, whichfork);
5769         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5770             (error = xfs_iread_extents(NULL, ip, whichfork)))
5771                 return error;
5772         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5773         if (nextents == 0) {
5774                 *aeof = 1;
5775                 return 0;
5776         }
5777         /*
5778          * Go to the last extent
5779          */
5780         lastrec = xfs_iext_get_ext(ifp, nextents - 1);
5781         xfs_bmbt_get_all(lastrec, &s);
5782         /*
5783          * Check we are allocating in the last extent (for delayed allocations)
5784          * or past the last extent for non-delayed allocations.
5785          */
5786         *aeof = (off >= s.br_startoff &&
5787                  off < s.br_startoff + s.br_blockcount &&
5788                  isnullstartblock(s.br_startblock)) ||
5789                 off >= s.br_startoff + s.br_blockcount;
5790         return 0;
5791 }
5792
5793 /*
5794  * Check if the endoff is outside the last extent. If so the caller will grow
5795  * the allocation to a stripe unit boundary.
5796  */
5797 int                                     /* error */
5798 xfs_bmap_eof(
5799         xfs_inode_t     *ip,            /* incore inode pointer */
5800         xfs_fileoff_t   endoff,         /* file offset in fsblocks */
5801         int             whichfork,      /* data or attribute fork */
5802         int             *eof)           /* result value */
5803 {
5804         xfs_fsblock_t   blockcount;     /* extent block count */
5805         int             error;          /* error return value */
5806         xfs_ifork_t     *ifp;           /* inode fork pointer */
5807         xfs_bmbt_rec_host_t *lastrec;   /* extent record pointer */
5808         xfs_extnum_t    nextents;       /* number of file extents */
5809         xfs_fileoff_t   startoff;       /* extent starting file offset */
5810
5811         ASSERT(whichfork == XFS_DATA_FORK);
5812         ifp = XFS_IFORK_PTR(ip, whichfork);
5813         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5814             (error = xfs_iread_extents(NULL, ip, whichfork)))
5815                 return error;
5816         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5817         if (nextents == 0) {
5818                 *eof = 1;
5819                 return 0;
5820         }
5821         /*
5822          * Go to the last extent
5823          */
5824         lastrec = xfs_iext_get_ext(ifp, nextents - 1);
5825         startoff = xfs_bmbt_get_startoff(lastrec);
5826         blockcount = xfs_bmbt_get_blockcount(lastrec);
5827         *eof = endoff >= startoff + blockcount;
5828         return 0;
5829 }
5830
5831 #ifdef DEBUG
5832 STATIC
5833 xfs_buf_t *
5834 xfs_bmap_get_bp(
5835         xfs_btree_cur_t         *cur,
5836         xfs_fsblock_t           bno)
5837 {
5838         int i;
5839         xfs_buf_t *bp;
5840
5841         if (!cur)
5842                 return(NULL);
5843
5844         bp = NULL;
5845         for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5846                 bp = cur->bc_bufs[i];
5847                 if (!bp) break;
5848                 if (XFS_BUF_ADDR(bp) == bno)
5849                         break;  /* Found it */
5850         }
5851         if (i == XFS_BTREE_MAXLEVELS)
5852                 bp = NULL;
5853
5854         if (!bp) { /* Chase down all the log items to see if the bp is there */
5855                 struct xfs_log_item_desc *lidp;
5856                 struct xfs_buf_log_item *bip;
5857
5858                 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
5859                         bip = (struct xfs_buf_log_item *)lidp->lid_item;
5860                         if (bip->bli_item.li_type == XFS_LI_BUF &&
5861                             XFS_BUF_ADDR(bip->bli_buf) == bno)
5862                                 return bip->bli_buf;
5863                 }
5864         }
5865
5866         return bp;
5867 }
5868
5869 STATIC void
5870 xfs_check_block(
5871         struct xfs_btree_block  *block,
5872         xfs_mount_t             *mp,
5873         int                     root,
5874         short                   sz)
5875 {
5876         int                     i, j, dmxr;
5877         __be64                  *pp, *thispa;   /* pointer to block address */
5878         xfs_bmbt_key_t          *prevp, *keyp;
5879
5880         ASSERT(be16_to_cpu(block->bb_level) > 0);
5881
5882         prevp = NULL;
5883         for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
5884                 dmxr = mp->m_bmap_dmxr[0];
5885                 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
5886
5887                 if (prevp) {
5888                         ASSERT(be64_to_cpu(prevp->br_startoff) <
5889                                be64_to_cpu(keyp->br_startoff));
5890                 }
5891                 prevp = keyp;
5892
5893                 /*
5894                  * Compare the block numbers to see if there are dups.
5895                  */
5896                 if (root)
5897                         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
5898                 else
5899                         pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
5900
5901                 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
5902                         if (root)
5903                                 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
5904                         else
5905                                 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
5906                         if (*thispa == *pp) {
5907                                 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
5908                                         __func__, j, i,
5909                                         (unsigned long long)be64_to_cpu(*thispa));
5910                                 panic("%s: ptrs are equal in node\n",
5911                                         __func__);
5912                         }
5913                 }
5914         }
5915 }
5916
5917 /*
5918  * Check that the extents for the inode ip are in the right order in all
5919  * btree leaves.
5920  */
5921
5922 STATIC void
5923 xfs_bmap_check_leaf_extents(
5924         xfs_btree_cur_t         *cur,   /* btree cursor or null */
5925         xfs_inode_t             *ip,            /* incore inode pointer */
5926         int                     whichfork)      /* data or attr fork */
5927 {
5928         struct xfs_btree_block  *block; /* current btree block */
5929         xfs_fsblock_t           bno;    /* block # of "block" */
5930         xfs_buf_t               *bp;    /* buffer for "block" */
5931         int                     error;  /* error return value */
5932         xfs_extnum_t            i=0, j; /* index into the extents list */
5933         xfs_ifork_t             *ifp;   /* fork structure */
5934         int                     level;  /* btree level, for checking */
5935         xfs_mount_t             *mp;    /* file system mount structure */
5936         __be64                  *pp;    /* pointer to block address */
5937         xfs_bmbt_rec_t          *ep;    /* pointer to current extent */
5938         xfs_bmbt_rec_t          last = {0, 0}; /* last extent in prev block */
5939         xfs_bmbt_rec_t          *nextp; /* pointer to next extent */
5940         int                     bp_release = 0;
5941
5942         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
5943                 return;
5944         }
5945
5946         bno = NULLFSBLOCK;
5947         mp = ip->i_mount;
5948         ifp = XFS_IFORK_PTR(ip, whichfork);
5949         block = ifp->if_broot;
5950         /*
5951          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
5952          */
5953         level = be16_to_cpu(block->bb_level);
5954         ASSERT(level > 0);
5955         xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
5956         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
5957         bno = be64_to_cpu(*pp);
5958
5959         ASSERT(bno != NULLDFSBNO);
5960         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
5961         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
5962
5963         /*
5964          * Go down the tree until leaf level is reached, following the first
5965          * pointer (leftmost) at each level.
5966          */
5967         while (level-- > 0) {
5968                 /* See if buf is in cur first */
5969                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
5970                 if (bp) {
5971                         bp_release = 0;
5972                 } else {
5973                         bp_release = 1;
5974                 }
5975                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
5976                                 XFS_BMAP_BTREE_REF)))
5977                         goto error_norelse;
5978                 block = XFS_BUF_TO_BLOCK(bp);
5979                 XFS_WANT_CORRUPTED_GOTO(
5980                         xfs_bmap_sanity_check(mp, bp, level),
5981                         error0);
5982                 if (level == 0)
5983                         break;
5984
5985                 /*
5986                  * Check this block for basic sanity (increasing keys and
5987                  * no duplicate blocks).
5988                  */
5989
5990                 xfs_check_block(block, mp, 0, 0);
5991                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
5992                 bno = be64_to_cpu(*pp);
5993                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
5994                 if (bp_release) {
5995                         bp_release = 0;
5996                         xfs_trans_brelse(NULL, bp);
5997                 }
5998         }
5999
6000         /*
6001          * Here with bp and block set to the leftmost leaf node in the tree.
6002          */
6003         i = 0;
6004
6005         /*
6006          * Loop over all leaf nodes checking that all extents are in the right order.
6007          */
6008         for (;;) {
6009                 xfs_fsblock_t   nextbno;
6010                 xfs_extnum_t    num_recs;
6011
6012
6013                 num_recs = xfs_btree_get_numrecs(block);
6014
6015                 /*
6016                  * Read-ahead the next leaf block, if any.
6017                  */
6018
6019                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6020
6021                 /*
6022                  * Check all the extents to make sure they are OK.
6023                  * If we had a previous block, the last entry should
6024                  * conform with the first entry in this one.
6025                  */
6026
6027                 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
6028                 if (i) {
6029                         ASSERT(xfs_bmbt_disk_get_startoff(&last) +
6030                                xfs_bmbt_disk_get_blockcount(&last) <=
6031                                xfs_bmbt_disk_get_startoff(ep));
6032                 }
6033                 for (j = 1; j < num_recs; j++) {
6034                         nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
6035                         ASSERT(xfs_bmbt_disk_get_startoff(ep) +
6036                                xfs_bmbt_disk_get_blockcount(ep) <=
6037                                xfs_bmbt_disk_get_startoff(nextp));
6038                         ep = nextp;
6039                 }
6040
6041                 last = *ep;
6042                 i += num_recs;
6043                 if (bp_release) {
6044                         bp_release = 0;
6045                         xfs_trans_brelse(NULL, bp);
6046                 }
6047                 bno = nextbno;
6048                 /*
6049                  * If we've reached the end, stop.
6050                  */
6051                 if (bno == NULLFSBLOCK)
6052                         break;
6053
6054                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6055                 if (bp) {
6056                         bp_release = 0;
6057                 } else {
6058                         bp_release = 1;
6059                 }
6060                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6061                                 XFS_BMAP_BTREE_REF)))
6062                         goto error_norelse;
6063                 block = XFS_BUF_TO_BLOCK(bp);
6064         }
6065         if (bp_release) {
6066                 bp_release = 0;
6067                 xfs_trans_brelse(NULL, bp);
6068         }
6069         return;
6070
6071 error0:
6072         cmn_err(CE_WARN, "%s: at error0", __func__);
6073         if (bp_release)
6074                 xfs_trans_brelse(NULL, bp);
6075 error_norelse:
6076         cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
6077                 __func__, i);
6078         panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
6079         return;
6080 }
6081 #endif
6082
6083 /*
6084  * Count fsblocks of the given fork.
6085  */
6086 int                                             /* error */
6087 xfs_bmap_count_blocks(
6088         xfs_trans_t             *tp,            /* transaction pointer */
6089         xfs_inode_t             *ip,            /* incore inode */
6090         int                     whichfork,      /* data or attr fork */
6091         int                     *count)         /* out: count of blocks */
6092 {
6093         struct xfs_btree_block  *block; /* current btree block */
6094         xfs_fsblock_t           bno;    /* block # of "block" */
6095         xfs_ifork_t             *ifp;   /* fork structure */
6096         int                     level;  /* btree level, for checking */
6097         xfs_mount_t             *mp;    /* file system mount structure */
6098         __be64                  *pp;    /* pointer to block address */
6099
6100         bno = NULLFSBLOCK;
6101         mp = ip->i_mount;
6102         ifp = XFS_IFORK_PTR(ip, whichfork);
6103         if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6104                 xfs_bmap_count_leaves(ifp, 0,
6105                         ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6106                         count);
6107                 return 0;
6108         }
6109
6110         /*
6111          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6112          */
6113         block = ifp->if_broot;
6114         level = be16_to_cpu(block->bb_level);
6115         ASSERT(level > 0);
6116         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
6117         bno = be64_to_cpu(*pp);
6118         ASSERT(bno != NULLDFSBNO);
6119         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6120         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6121
6122         if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
6123                 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6124                                  mp);
6125                 return XFS_ERROR(EFSCORRUPTED);
6126         }
6127
6128         return 0;
6129 }
6130
6131 /*
6132  * Recursively walks each level of a btree
6133  * to count total fsblocks is use.
6134  */
6135 STATIC int                                     /* error */
6136 xfs_bmap_count_tree(
6137         xfs_mount_t     *mp,            /* file system mount point */
6138         xfs_trans_t     *tp,            /* transaction pointer */
6139         xfs_ifork_t     *ifp,           /* inode fork pointer */
6140         xfs_fsblock_t   blockno,        /* file system block number */
6141         int             levelin,        /* level in btree */
6142         int             *count)         /* Count of blocks */
6143 {
6144         int                     error;
6145         xfs_buf_t               *bp, *nbp;
6146         int                     level = levelin;
6147         __be64                  *pp;
6148         xfs_fsblock_t           bno = blockno;
6149         xfs_fsblock_t           nextbno;
6150         struct xfs_btree_block  *block, *nextblock;
6151         int                     numrecs;
6152
6153         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6154                 return error;
6155         *count += 1;
6156         block = XFS_BUF_TO_BLOCK(bp);
6157
6158         if (--level) {
6159                 /* Not at node above leaves, count this level of nodes */
6160                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6161                 while (nextbno != NULLFSBLOCK) {
6162                         if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6163                                 0, &nbp, XFS_BMAP_BTREE_REF)))
6164                                 return error;
6165                         *count += 1;
6166                         nextblock = XFS_BUF_TO_BLOCK(nbp);
6167                         nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
6168                         xfs_trans_brelse(tp, nbp);
6169                 }
6170
6171                 /* Dive to the next level */
6172                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6173                 bno = be64_to_cpu(*pp);
6174                 if (unlikely((error =
6175                      xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6176                         xfs_trans_brelse(tp, bp);
6177                         XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6178                                          XFS_ERRLEVEL_LOW, mp);
6179                         return XFS_ERROR(EFSCORRUPTED);
6180                 }
6181                 xfs_trans_brelse(tp, bp);
6182         } else {
6183                 /* count all level 1 nodes and their leaves */
6184                 for (;;) {
6185                         nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6186                         numrecs = be16_to_cpu(block->bb_numrecs);
6187                         xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
6188                         xfs_trans_brelse(tp, bp);
6189                         if (nextbno == NULLFSBLOCK)
6190                                 break;
6191                         bno = nextbno;
6192                         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6193                                 XFS_BMAP_BTREE_REF)))
6194                                 return error;
6195                         *count += 1;
6196                         block = XFS_BUF_TO_BLOCK(bp);
6197                 }
6198         }
6199         return 0;
6200 }
6201
6202 /*
6203  * Count leaf blocks given a range of extent records.
6204  */
6205 STATIC void
6206 xfs_bmap_count_leaves(
6207         xfs_ifork_t             *ifp,
6208         xfs_extnum_t            idx,
6209         int                     numrecs,
6210         int                     *count)
6211 {
6212         int             b;
6213
6214         for (b = 0; b < numrecs; b++) {
6215                 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
6216                 *count += xfs_bmbt_get_blockcount(frp);
6217         }
6218 }
6219
6220 /*
6221  * Count leaf blocks given a range of extent records originally
6222  * in btree format.
6223  */
6224 STATIC void
6225 xfs_bmap_disk_count_leaves(
6226         struct xfs_mount        *mp,
6227         struct xfs_btree_block  *block,
6228         int                     numrecs,
6229         int                     *count)
6230 {
6231         int             b;
6232         xfs_bmbt_rec_t  *frp;
6233
6234         for (b = 1; b <= numrecs; b++) {
6235                 frp = XFS_BMBT_REC_ADDR(mp, block, b);
6236                 *count += xfs_bmbt_disk_get_blockcount(frp);
6237         }
6238 }