2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd.h>
37 #include <linux/smp_lock.h>
38 #include <linux/highuid.h>
39 #include <linux/pagemap.h>
40 #include <linux/quotaops.h>
41 #include <linux/string.h>
42 #include <linux/slab.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
49 * combine low and high parts of physical block number into ext4_fsblk_t
51 static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
55 block = le32_to_cpu(ex->ee_start);
56 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64 static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
68 block = le32_to_cpu(ix->ei_leaf);
69 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
74 * ext4_ext_store_pblock:
75 * stores a large physical block number into an extent struct,
76 * breaking it into parts
78 static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
85 * ext4_idx_store_pblock:
86 * stores a large physical block number into an index struct,
87 * breaking it into parts
89 static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
95 static int ext4_ext_check_header(const char *function, struct inode *inode,
96 struct ext4_extent_header *eh)
98 const char *error_msg = NULL;
100 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
101 error_msg = "invalid magic";
104 if (unlikely(eh->eh_max == 0)) {
105 error_msg = "invalid eh_max";
108 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
109 error_msg = "invalid eh_entries";
115 ext4_error(inode->i_sb, function,
116 "bad header in inode #%lu: %s - magic %x, "
117 "entries %u, max %u, depth %u",
118 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
119 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
120 le16_to_cpu(eh->eh_depth));
125 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
129 if (handle->h_buffer_credits > needed)
131 if (!ext4_journal_extend(handle, needed))
133 err = ext4_journal_restart(handle, needed);
143 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
144 struct ext4_ext_path *path)
147 /* path points to block */
148 return ext4_journal_get_write_access(handle, path->p_bh);
150 /* path points to leaf/index in inode body */
151 /* we use in-core data, no need to protect them */
161 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
162 struct ext4_ext_path *path)
166 /* path points to block */
167 err = ext4_journal_dirty_metadata(handle, path->p_bh);
169 /* path points to leaf/index in inode body */
170 err = ext4_mark_inode_dirty(handle, inode);
175 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
176 struct ext4_ext_path *path,
179 struct ext4_inode_info *ei = EXT4_I(inode);
180 ext4_fsblk_t bg_start;
181 ext4_grpblk_t colour;
185 struct ext4_extent *ex;
186 depth = path->p_depth;
188 /* try to predict block placement */
189 ex = path[depth].p_ext;
191 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
193 /* it looks like index is empty;
194 * try to find starting block from index itself */
195 if (path[depth].p_bh)
196 return path[depth].p_bh->b_blocknr;
199 /* OK. use inode's group */
200 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
201 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
202 colour = (current->pid % 16) *
203 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
204 return bg_start + colour + block;
208 ext4_ext_new_block(handle_t *handle, struct inode *inode,
209 struct ext4_ext_path *path,
210 struct ext4_extent *ex, int *err)
212 ext4_fsblk_t goal, newblock;
214 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
215 newblock = ext4_new_block(handle, inode, goal, err);
219 static inline int ext4_ext_space_block(struct inode *inode)
223 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
224 / sizeof(struct ext4_extent);
225 #ifdef AGRESSIVE_TEST
232 static inline int ext4_ext_space_block_idx(struct inode *inode)
236 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
237 / sizeof(struct ext4_extent_idx);
238 #ifdef AGRESSIVE_TEST
245 static inline int ext4_ext_space_root(struct inode *inode)
249 size = sizeof(EXT4_I(inode)->i_data);
250 size -= sizeof(struct ext4_extent_header);
251 size /= sizeof(struct ext4_extent);
252 #ifdef AGRESSIVE_TEST
259 static inline int ext4_ext_space_root_idx(struct inode *inode)
263 size = sizeof(EXT4_I(inode)->i_data);
264 size -= sizeof(struct ext4_extent_header);
265 size /= sizeof(struct ext4_extent_idx);
266 #ifdef AGRESSIVE_TEST
274 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
276 int k, l = path->p_depth;
279 for (k = 0; k <= l; k++, path++) {
281 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
282 idx_pblock(path->p_idx));
283 } else if (path->p_ext) {
284 ext_debug(" %d:%d:%llu ",
285 le32_to_cpu(path->p_ext->ee_block),
286 le16_to_cpu(path->p_ext->ee_len),
287 ext_pblock(path->p_ext));
294 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
296 int depth = ext_depth(inode);
297 struct ext4_extent_header *eh;
298 struct ext4_extent *ex;
304 eh = path[depth].p_hdr;
305 ex = EXT_FIRST_EXTENT(eh);
307 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
308 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
309 le16_to_cpu(ex->ee_len), ext_pblock(ex));
314 #define ext4_ext_show_path(inode,path)
315 #define ext4_ext_show_leaf(inode,path)
318 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
320 int depth = path->p_depth;
323 for (i = 0; i <= depth; i++, path++)
331 * ext4_ext_binsearch_idx:
332 * binary search for the closest index of the given block
335 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
337 struct ext4_extent_header *eh = path->p_hdr;
338 struct ext4_extent_idx *r, *l, *m;
340 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
341 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
342 BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
344 ext_debug("binsearch for %d(idx): ", block);
346 l = EXT_FIRST_INDEX(eh) + 1;
347 r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
350 if (block < le32_to_cpu(m->ei_block))
354 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
355 m, m->ei_block, r, r->ei_block);
359 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
360 idx_block(path->p_idx));
362 #ifdef CHECK_BINSEARCH
364 struct ext4_extent_idx *chix, *ix;
367 chix = ix = EXT_FIRST_INDEX(eh);
368 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
370 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
371 printk("k=%d, ix=0x%p, first=0x%p\n", k,
372 ix, EXT_FIRST_INDEX(eh));
374 le32_to_cpu(ix->ei_block),
375 le32_to_cpu(ix[-1].ei_block));
377 BUG_ON(k && le32_to_cpu(ix->ei_block)
378 <= le32_to_cpu(ix[-1].ei_block));
379 if (block < le32_to_cpu(ix->ei_block))
383 BUG_ON(chix != path->p_idx);
390 * ext4_ext_binsearch:
391 * binary search for closest extent of the given block
394 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
396 struct ext4_extent_header *eh = path->p_hdr;
397 struct ext4_extent *r, *l, *m;
399 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
400 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
402 if (eh->eh_entries == 0) {
404 * this leaf is empty:
405 * we get such a leaf in split/add case
410 ext_debug("binsearch for %d: ", block);
412 l = EXT_FIRST_EXTENT(eh) + 1;
413 r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
417 if (block < le32_to_cpu(m->ee_block))
421 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
422 m, m->ee_block, r, r->ee_block);
426 ext_debug(" -> %d:%llu:%d ",
427 le32_to_cpu(path->p_ext->ee_block),
428 ext_pblock(path->p_ext),
429 le16_to_cpu(path->p_ext->ee_len));
431 #ifdef CHECK_BINSEARCH
433 struct ext4_extent *chex, *ex;
436 chex = ex = EXT_FIRST_EXTENT(eh);
437 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
438 BUG_ON(k && le32_to_cpu(ex->ee_block)
439 <= le32_to_cpu(ex[-1].ee_block));
440 if (block < le32_to_cpu(ex->ee_block))
444 BUG_ON(chex != path->p_ext);
450 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
452 struct ext4_extent_header *eh;
454 eh = ext_inode_hdr(inode);
457 eh->eh_magic = EXT4_EXT_MAGIC;
458 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
459 ext4_mark_inode_dirty(handle, inode);
460 ext4_ext_invalidate_cache(inode);
464 struct ext4_ext_path *
465 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
467 struct ext4_extent_header *eh;
468 struct buffer_head *bh;
469 short int depth, i, ppos = 0, alloc = 0;
471 eh = ext_inode_hdr(inode);
473 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
474 return ERR_PTR(-EIO);
476 i = depth = ext_depth(inode);
478 /* account possible depth increase */
480 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
483 return ERR_PTR(-ENOMEM);
486 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
489 /* walk through the tree */
491 ext_debug("depth %d: num %d, max %d\n",
492 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
493 ext4_ext_binsearch_idx(inode, path + ppos, block);
494 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
495 path[ppos].p_depth = i;
496 path[ppos].p_ext = NULL;
498 bh = sb_bread(inode->i_sb, path[ppos].p_block);
502 eh = ext_block_hdr(bh);
504 BUG_ON(ppos > depth);
505 path[ppos].p_bh = bh;
506 path[ppos].p_hdr = eh;
509 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
513 path[ppos].p_depth = i;
514 path[ppos].p_hdr = eh;
515 path[ppos].p_ext = NULL;
516 path[ppos].p_idx = NULL;
518 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
522 ext4_ext_binsearch(inode, path + ppos, block);
524 ext4_ext_show_path(inode, path);
529 ext4_ext_drop_refs(path);
532 return ERR_PTR(-EIO);
536 * ext4_ext_insert_index:
537 * insert new index [@logical;@ptr] into the block at @curp;
538 * check where to insert: before @curp or after @curp
540 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
541 struct ext4_ext_path *curp,
542 int logical, ext4_fsblk_t ptr)
544 struct ext4_extent_idx *ix;
547 err = ext4_ext_get_access(handle, inode, curp);
551 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
552 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
553 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
555 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
556 len = (len - 1) * sizeof(struct ext4_extent_idx);
557 len = len < 0 ? 0 : len;
558 ext_debug("insert new index %d after: %d. "
559 "move %d from 0x%p to 0x%p\n",
561 (curp->p_idx + 1), (curp->p_idx + 2));
562 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
564 ix = curp->p_idx + 1;
567 len = len * sizeof(struct ext4_extent_idx);
568 len = len < 0 ? 0 : len;
569 ext_debug("insert new index %d before: %d. "
570 "move %d from 0x%p to 0x%p\n",
572 curp->p_idx, (curp->p_idx + 1));
573 memmove(curp->p_idx + 1, curp->p_idx, len);
577 ix->ei_block = cpu_to_le32(logical);
578 ext4_idx_store_pblock(ix, ptr);
579 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
581 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
582 > le16_to_cpu(curp->p_hdr->eh_max));
583 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
585 err = ext4_ext_dirty(handle, inode, curp);
586 ext4_std_error(inode->i_sb, err);
593 * inserts new subtree into the path, using free index entry
595 * - allocates all needed blocks (new leaf and all intermediate index blocks)
596 * - makes decision where to split
597 * - moves remaining extents and index entries (right to the split point)
598 * into the newly allocated blocks
599 * - initializes subtree
601 static int ext4_ext_split(handle_t *handle, struct inode *inode,
602 struct ext4_ext_path *path,
603 struct ext4_extent *newext, int at)
605 struct buffer_head *bh = NULL;
606 int depth = ext_depth(inode);
607 struct ext4_extent_header *neh;
608 struct ext4_extent_idx *fidx;
609 struct ext4_extent *ex;
611 ext4_fsblk_t newblock, oldblock;
613 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
616 /* make decision: where to split? */
617 /* FIXME: now decision is simplest: at current extent */
619 /* if current leaf will be split, then we should use
620 * border from split point */
621 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
622 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
623 border = path[depth].p_ext[1].ee_block;
624 ext_debug("leaf will be split."
625 " next leaf starts at %d\n",
626 le32_to_cpu(border));
628 border = newext->ee_block;
629 ext_debug("leaf will be added."
630 " next leaf starts at %d\n",
631 le32_to_cpu(border));
635 * If error occurs, then we break processing
636 * and mark filesystem read-only. index won't
637 * be inserted and tree will be in consistent
638 * state. Next mount will repair buffers too.
642 * Get array to track all allocated blocks.
643 * We need this to handle errors and free blocks
646 ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
649 memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
651 /* allocate all needed blocks */
652 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
653 for (a = 0; a < depth - at; a++) {
654 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
657 ablocks[a] = newblock;
660 /* initialize new leaf */
661 newblock = ablocks[--a];
662 BUG_ON(newblock == 0);
663 bh = sb_getblk(inode->i_sb, newblock);
670 err = ext4_journal_get_create_access(handle, bh);
674 neh = ext_block_hdr(bh);
676 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
677 neh->eh_magic = EXT4_EXT_MAGIC;
679 ex = EXT_FIRST_EXTENT(neh);
681 /* move remainder of path[depth] to the new leaf */
682 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
683 /* start copy from next extent */
684 /* TODO: we could do it by single memmove */
687 while (path[depth].p_ext <=
688 EXT_MAX_EXTENT(path[depth].p_hdr)) {
689 ext_debug("move %d:%llu:%d in new leaf %llu\n",
690 le32_to_cpu(path[depth].p_ext->ee_block),
691 ext_pblock(path[depth].p_ext),
692 le16_to_cpu(path[depth].p_ext->ee_len),
694 /*memmove(ex++, path[depth].p_ext++,
695 sizeof(struct ext4_extent));
701 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
702 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
705 set_buffer_uptodate(bh);
708 err = ext4_journal_dirty_metadata(handle, bh);
714 /* correct old leaf */
716 err = ext4_ext_get_access(handle, inode, path + depth);
719 path[depth].p_hdr->eh_entries =
720 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
721 err = ext4_ext_dirty(handle, inode, path + depth);
727 /* create intermediate indexes */
731 ext_debug("create %d intermediate indices\n", k);
732 /* insert new index into current index block */
733 /* current depth stored in i var */
737 newblock = ablocks[--a];
738 bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
745 err = ext4_journal_get_create_access(handle, bh);
749 neh = ext_block_hdr(bh);
750 neh->eh_entries = cpu_to_le16(1);
751 neh->eh_magic = EXT4_EXT_MAGIC;
752 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
753 neh->eh_depth = cpu_to_le16(depth - i);
754 fidx = EXT_FIRST_INDEX(neh);
755 fidx->ei_block = border;
756 ext4_idx_store_pblock(fidx, oldblock);
758 ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,
759 newblock, (unsigned long) le32_to_cpu(border),
765 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
766 EXT_MAX_INDEX(path[i].p_hdr));
767 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
768 EXT_LAST_INDEX(path[i].p_hdr));
769 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
770 ext_debug("%d: move %d:%d in new index %llu\n", i,
771 le32_to_cpu(path[i].p_idx->ei_block),
772 idx_pblock(path[i].p_idx),
774 /*memmove(++fidx, path[i].p_idx++,
775 sizeof(struct ext4_extent_idx));
777 BUG_ON(neh->eh_entries > neh->eh_max);*/
782 memmove(++fidx, path[i].p_idx - m,
783 sizeof(struct ext4_extent_idx) * m);
785 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
787 set_buffer_uptodate(bh);
790 err = ext4_journal_dirty_metadata(handle, bh);
796 /* correct old index */
798 err = ext4_ext_get_access(handle, inode, path + i);
801 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
802 err = ext4_ext_dirty(handle, inode, path + i);
810 /* insert new index */
811 err = ext4_ext_insert_index(handle, inode, path + at,
812 le32_to_cpu(border), newblock);
816 if (buffer_locked(bh))
822 /* free all allocated blocks in error case */
823 for (i = 0; i < depth; i++) {
826 ext4_free_blocks(handle, inode, ablocks[i], 1);
835 * ext4_ext_grow_indepth:
836 * implements tree growing procedure:
837 * - allocates new block
838 * - moves top-level data (index block or leaf) into the new block
839 * - initializes new top-level, creating index that points to the
842 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
843 struct ext4_ext_path *path,
844 struct ext4_extent *newext)
846 struct ext4_ext_path *curp = path;
847 struct ext4_extent_header *neh;
848 struct ext4_extent_idx *fidx;
849 struct buffer_head *bh;
850 ext4_fsblk_t newblock;
853 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
857 bh = sb_getblk(inode->i_sb, newblock);
860 ext4_std_error(inode->i_sb, err);
865 err = ext4_journal_get_create_access(handle, bh);
871 /* move top-level index/leaf into new block */
872 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
874 /* set size of new block */
875 neh = ext_block_hdr(bh);
876 /* old root could have indexes or leaves
877 * so calculate e_max right way */
878 if (ext_depth(inode))
879 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
881 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
882 neh->eh_magic = EXT4_EXT_MAGIC;
883 set_buffer_uptodate(bh);
886 err = ext4_journal_dirty_metadata(handle, bh);
890 /* create index in new top-level index: num,max,pointer */
891 err = ext4_ext_get_access(handle, inode, curp);
895 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
896 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
897 curp->p_hdr->eh_entries = cpu_to_le16(1);
898 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
899 /* FIXME: it works, but actually path[0] can be index */
900 curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
901 ext4_idx_store_pblock(curp->p_idx, newblock);
903 neh = ext_inode_hdr(inode);
904 fidx = EXT_FIRST_INDEX(neh);
905 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
906 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
907 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
909 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
910 err = ext4_ext_dirty(handle, inode, curp);
918 * ext4_ext_create_new_leaf:
919 * finds empty index and adds new leaf.
920 * if no free index is found, then it requests in-depth growing.
922 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
923 struct ext4_ext_path *path,
924 struct ext4_extent *newext)
926 struct ext4_ext_path *curp;
927 int depth, i, err = 0;
930 i = depth = ext_depth(inode);
932 /* walk up to the tree and look for free index entry */
934 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
939 /* we use already allocated block for index block,
940 * so subsequent data blocks should be contiguous */
941 if (EXT_HAS_FREE_INDEX(curp)) {
942 /* if we found index with free entry, then use that
943 * entry: create all needed subtree and add new leaf */
944 err = ext4_ext_split(handle, inode, path, newext, i);
947 ext4_ext_drop_refs(path);
948 path = ext4_ext_find_extent(inode,
949 le32_to_cpu(newext->ee_block),
954 /* tree is full, time to grow in depth */
955 err = ext4_ext_grow_indepth(handle, inode, path, newext);
960 ext4_ext_drop_refs(path);
961 path = ext4_ext_find_extent(inode,
962 le32_to_cpu(newext->ee_block),
970 * only first (depth 0 -> 1) produces free space;
971 * in all other cases we have to split the grown tree
973 depth = ext_depth(inode);
974 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
975 /* now we need to split */
985 * ext4_ext_next_allocated_block:
986 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
987 * NOTE: it considers block number from index entry as
988 * allocated block. Thus, index entries have to be consistent
992 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
996 BUG_ON(path == NULL);
997 depth = path->p_depth;
999 if (depth == 0 && path->p_ext == NULL)
1000 return EXT_MAX_BLOCK;
1002 while (depth >= 0) {
1003 if (depth == path->p_depth) {
1005 if (path[depth].p_ext !=
1006 EXT_LAST_EXTENT(path[depth].p_hdr))
1007 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1010 if (path[depth].p_idx !=
1011 EXT_LAST_INDEX(path[depth].p_hdr))
1012 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1017 return EXT_MAX_BLOCK;
1021 * ext4_ext_next_leaf_block:
1022 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1024 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
1025 struct ext4_ext_path *path)
1029 BUG_ON(path == NULL);
1030 depth = path->p_depth;
1032 /* zero-tree has no leaf blocks at all */
1034 return EXT_MAX_BLOCK;
1036 /* go to index block */
1039 while (depth >= 0) {
1040 if (path[depth].p_idx !=
1041 EXT_LAST_INDEX(path[depth].p_hdr))
1042 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1046 return EXT_MAX_BLOCK;
1050 * ext4_ext_correct_indexes:
1051 * if leaf gets modified and modified extent is first in the leaf,
1052 * then we have to correct all indexes above.
1053 * TODO: do we need to correct tree in all cases?
1055 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1056 struct ext4_ext_path *path)
1058 struct ext4_extent_header *eh;
1059 int depth = ext_depth(inode);
1060 struct ext4_extent *ex;
1064 eh = path[depth].p_hdr;
1065 ex = path[depth].p_ext;
1070 /* there is no tree at all */
1074 if (ex != EXT_FIRST_EXTENT(eh)) {
1075 /* we correct tree if first leaf got modified only */
1080 * TODO: we need correction if border is smaller than current one
1083 border = path[depth].p_ext->ee_block;
1084 err = ext4_ext_get_access(handle, inode, path + k);
1087 path[k].p_idx->ei_block = border;
1088 err = ext4_ext_dirty(handle, inode, path + k);
1093 /* change all left-side indexes */
1094 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1096 err = ext4_ext_get_access(handle, inode, path + k);
1099 path[k].p_idx->ei_block = border;
1100 err = ext4_ext_dirty(handle, inode, path + k);
1109 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1110 struct ext4_extent *ex2)
1112 if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
1113 le32_to_cpu(ex2->ee_block))
1117 * To allow future support for preallocated extents to be added
1118 * as an RO_COMPAT feature, refuse to merge to extents if
1119 * this can result in the top bit of ee_len being set.
1121 if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
1123 #ifdef AGRESSIVE_TEST
1124 if (le16_to_cpu(ex1->ee_len) >= 4)
1128 if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
1134 * ext4_ext_insert_extent:
1135 * tries to merge requsted extent into the existing extent or
1136 * inserts requested extent as new one into the tree,
1137 * creating new leaf in the no-space case.
1139 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1140 struct ext4_ext_path *path,
1141 struct ext4_extent *newext)
1143 struct ext4_extent_header * eh;
1144 struct ext4_extent *ex, *fex;
1145 struct ext4_extent *nearex; /* nearest extent */
1146 struct ext4_ext_path *npath = NULL;
1147 int depth, len, err, next;
1149 BUG_ON(newext->ee_len == 0);
1150 depth = ext_depth(inode);
1151 ex = path[depth].p_ext;
1152 BUG_ON(path[depth].p_hdr == NULL);
1154 /* try to insert block into found extent and return */
1155 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1156 ext_debug("append %d block to %d:%d (from %llu)\n",
1157 le16_to_cpu(newext->ee_len),
1158 le32_to_cpu(ex->ee_block),
1159 le16_to_cpu(ex->ee_len), ext_pblock(ex));
1160 err = ext4_ext_get_access(handle, inode, path + depth);
1163 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1164 + le16_to_cpu(newext->ee_len));
1165 eh = path[depth].p_hdr;
1171 depth = ext_depth(inode);
1172 eh = path[depth].p_hdr;
1173 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1176 /* probably next leaf has space for us? */
1177 fex = EXT_LAST_EXTENT(eh);
1178 next = ext4_ext_next_leaf_block(inode, path);
1179 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1180 && next != EXT_MAX_BLOCK) {
1181 ext_debug("next leaf block - %d\n", next);
1182 BUG_ON(npath != NULL);
1183 npath = ext4_ext_find_extent(inode, next, NULL);
1185 return PTR_ERR(npath);
1186 BUG_ON(npath->p_depth != path->p_depth);
1187 eh = npath[depth].p_hdr;
1188 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1189 ext_debug("next leaf isnt full(%d)\n",
1190 le16_to_cpu(eh->eh_entries));
1194 ext_debug("next leaf has no free space(%d,%d)\n",
1195 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1199 * There is no free space in the found leaf.
1200 * We're gonna add a new leaf in the tree.
1202 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1205 depth = ext_depth(inode);
1206 eh = path[depth].p_hdr;
1209 nearex = path[depth].p_ext;
1211 err = ext4_ext_get_access(handle, inode, path + depth);
1216 /* there is no extent in this leaf, create first one */
1217 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1218 le32_to_cpu(newext->ee_block),
1220 le16_to_cpu(newext->ee_len));
1221 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1222 } else if (le32_to_cpu(newext->ee_block)
1223 > le32_to_cpu(nearex->ee_block)) {
1224 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1225 if (nearex != EXT_LAST_EXTENT(eh)) {
1226 len = EXT_MAX_EXTENT(eh) - nearex;
1227 len = (len - 1) * sizeof(struct ext4_extent);
1228 len = len < 0 ? 0 : len;
1229 ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1230 "move %d from 0x%p to 0x%p\n",
1231 le32_to_cpu(newext->ee_block),
1233 le16_to_cpu(newext->ee_len),
1234 nearex, len, nearex + 1, nearex + 2);
1235 memmove(nearex + 2, nearex + 1, len);
1237 path[depth].p_ext = nearex + 1;
1239 BUG_ON(newext->ee_block == nearex->ee_block);
1240 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1241 len = len < 0 ? 0 : len;
1242 ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1243 "move %d from 0x%p to 0x%p\n",
1244 le32_to_cpu(newext->ee_block),
1246 le16_to_cpu(newext->ee_len),
1247 nearex, len, nearex + 1, nearex + 2);
1248 memmove(nearex + 1, nearex, len);
1249 path[depth].p_ext = nearex;
1252 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1253 nearex = path[depth].p_ext;
1254 nearex->ee_block = newext->ee_block;
1255 nearex->ee_start = newext->ee_start;
1256 nearex->ee_start_hi = newext->ee_start_hi;
1257 nearex->ee_len = newext->ee_len;
1260 /* try to merge extents to the right */
1261 while (nearex < EXT_LAST_EXTENT(eh)) {
1262 if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
1264 /* merge with next extent! */
1265 nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
1266 + le16_to_cpu(nearex[1].ee_len));
1267 if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1268 len = (EXT_LAST_EXTENT(eh) - nearex - 1)
1269 * sizeof(struct ext4_extent);
1270 memmove(nearex + 1, nearex + 2, len);
1272 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1273 BUG_ON(eh->eh_entries == 0);
1276 /* try to merge extents to the left */
1278 /* time to correct all indexes above */
1279 err = ext4_ext_correct_indexes(handle, inode, path);
1283 err = ext4_ext_dirty(handle, inode, path + depth);
1287 ext4_ext_drop_refs(npath);
1290 ext4_ext_tree_changed(inode);
1291 ext4_ext_invalidate_cache(inode);
1295 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1296 unsigned long num, ext_prepare_callback func,
1299 struct ext4_ext_path *path = NULL;
1300 struct ext4_ext_cache cbex;
1301 struct ext4_extent *ex;
1302 unsigned long next, start = 0, end = 0;
1303 unsigned long last = block + num;
1304 int depth, exists, err = 0;
1306 BUG_ON(func == NULL);
1307 BUG_ON(inode == NULL);
1309 while (block < last && block != EXT_MAX_BLOCK) {
1311 /* find extent for this block */
1312 path = ext4_ext_find_extent(inode, block, path);
1314 err = PTR_ERR(path);
1319 depth = ext_depth(inode);
1320 BUG_ON(path[depth].p_hdr == NULL);
1321 ex = path[depth].p_ext;
1322 next = ext4_ext_next_allocated_block(path);
1326 /* there is no extent yet, so try to allocate
1327 * all requested space */
1330 } else if (le32_to_cpu(ex->ee_block) > block) {
1331 /* need to allocate space before found extent */
1333 end = le32_to_cpu(ex->ee_block);
1334 if (block + num < end)
1337 le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
1338 /* need to allocate space after found extent */
1343 } else if (block >= le32_to_cpu(ex->ee_block)) {
1345 * some part of requested space is covered
1349 end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
1350 if (block + num < end)
1356 BUG_ON(end <= start);
1359 cbex.ec_block = start;
1360 cbex.ec_len = end - start;
1362 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1364 cbex.ec_block = le32_to_cpu(ex->ee_block);
1365 cbex.ec_len = le16_to_cpu(ex->ee_len);
1366 cbex.ec_start = ext_pblock(ex);
1367 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1370 BUG_ON(cbex.ec_len == 0);
1371 err = func(inode, path, &cbex, cbdata);
1372 ext4_ext_drop_refs(path);
1376 if (err == EXT_REPEAT)
1378 else if (err == EXT_BREAK) {
1383 if (ext_depth(inode) != depth) {
1384 /* depth was changed. we have to realloc path */
1389 block = cbex.ec_block + cbex.ec_len;
1393 ext4_ext_drop_refs(path);
1401 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1402 __u32 len, __u32 start, int type)
1404 struct ext4_ext_cache *cex;
1406 cex = &EXT4_I(inode)->i_cached_extent;
1407 cex->ec_type = type;
1408 cex->ec_block = block;
1410 cex->ec_start = start;
1414 * ext4_ext_put_gap_in_cache:
1415 * calculate boundaries of the gap that the requested block fits into
1416 * and cache this gap
1419 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1420 unsigned long block)
1422 int depth = ext_depth(inode);
1423 unsigned long lblock, len;
1424 struct ext4_extent *ex;
1426 ex = path[depth].p_ext;
1428 /* there is no extent yet, so gap is [0;-] */
1430 len = EXT_MAX_BLOCK;
1431 ext_debug("cache gap(whole file):");
1432 } else if (block < le32_to_cpu(ex->ee_block)) {
1434 len = le32_to_cpu(ex->ee_block) - block;
1435 ext_debug("cache gap(before): %lu [%lu:%lu]",
1436 (unsigned long) block,
1437 (unsigned long) le32_to_cpu(ex->ee_block),
1438 (unsigned long) le16_to_cpu(ex->ee_len));
1439 } else if (block >= le32_to_cpu(ex->ee_block)
1440 + le16_to_cpu(ex->ee_len)) {
1441 lblock = le32_to_cpu(ex->ee_block)
1442 + le16_to_cpu(ex->ee_len);
1443 len = ext4_ext_next_allocated_block(path);
1444 ext_debug("cache gap(after): [%lu:%lu] %lu",
1445 (unsigned long) le32_to_cpu(ex->ee_block),
1446 (unsigned long) le16_to_cpu(ex->ee_len),
1447 (unsigned long) block);
1448 BUG_ON(len == lblock);
1455 ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1456 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1460 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1461 struct ext4_extent *ex)
1463 struct ext4_ext_cache *cex;
1465 cex = &EXT4_I(inode)->i_cached_extent;
1467 /* has cache valid data? */
1468 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1469 return EXT4_EXT_CACHE_NO;
1471 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1472 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1473 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1474 ex->ee_block = cpu_to_le32(cex->ec_block);
1475 ext4_ext_store_pblock(ex, cex->ec_start);
1476 ex->ee_len = cpu_to_le16(cex->ec_len);
1477 ext_debug("%lu cached by %lu:%lu:%llu\n",
1478 (unsigned long) block,
1479 (unsigned long) cex->ec_block,
1480 (unsigned long) cex->ec_len,
1482 return cex->ec_type;
1486 return EXT4_EXT_CACHE_NO;
1491 * removes index from the index block.
1492 * It's used in truncate case only, thus all requests are for
1493 * last index in the block only.
1495 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1496 struct ext4_ext_path *path)
1498 struct buffer_head *bh;
1502 /* free index block */
1504 leaf = idx_pblock(path->p_idx);
1505 BUG_ON(path->p_hdr->eh_entries == 0);
1506 err = ext4_ext_get_access(handle, inode, path);
1509 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1510 err = ext4_ext_dirty(handle, inode, path);
1513 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1514 bh = sb_find_get_block(inode->i_sb, leaf);
1515 ext4_forget(handle, 1, inode, bh, leaf);
1516 ext4_free_blocks(handle, inode, leaf, 1);
1521 * ext4_ext_calc_credits_for_insert:
1522 * This routine returns max. credits that the extent tree can consume.
1523 * It should be OK for low-performance paths like ->writepage()
1524 * To allow many writing processes to fit into a single transaction,
1525 * the caller should calculate credits under truncate_mutex and
1526 * pass the actual path.
1528 int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1529 struct ext4_ext_path *path)
1534 /* probably there is space in leaf? */
1535 depth = ext_depth(inode);
1536 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1537 < le16_to_cpu(path[depth].p_hdr->eh_max))
1542 * given 32-bit logical block (4294967296 blocks), max. tree
1543 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1544 * Let's also add one more level for imbalance.
1548 /* allocation of new data block(s) */
1552 * tree can be full, so it would need to grow in depth:
1553 * we need one credit to modify old root, credits for
1554 * new root will be added in split accounting
1559 * Index split can happen, we would need:
1560 * allocate intermediate indexes (bitmap + group)
1561 * + change two blocks at each level, but root (already included)
1563 needed += (depth * 2) + (depth * 2);
1565 /* any allocation modifies superblock */
1571 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1572 struct ext4_extent *ex,
1573 unsigned long from, unsigned long to)
1575 struct buffer_head *bh;
1578 #ifdef EXTENTS_STATS
1580 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1581 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1582 spin_lock(&sbi->s_ext_stats_lock);
1583 sbi->s_ext_blocks += ee_len;
1584 sbi->s_ext_extents++;
1585 if (ee_len < sbi->s_ext_min)
1586 sbi->s_ext_min = ee_len;
1587 if (ee_len > sbi->s_ext_max)
1588 sbi->s_ext_max = ee_len;
1589 if (ext_depth(inode) > sbi->s_depth_max)
1590 sbi->s_depth_max = ext_depth(inode);
1591 spin_unlock(&sbi->s_ext_stats_lock);
1594 if (from >= le32_to_cpu(ex->ee_block)
1595 && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1599 num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
1600 start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
1601 ext_debug("free last %lu blocks starting %llu\n", num, start);
1602 for (i = 0; i < num; i++) {
1603 bh = sb_find_get_block(inode->i_sb, start + i);
1604 ext4_forget(handle, 0, inode, bh, start + i);
1606 ext4_free_blocks(handle, inode, start, num);
1607 } else if (from == le32_to_cpu(ex->ee_block)
1608 && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1609 printk("strange request: removal %lu-%lu from %u:%u\n",
1610 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1612 printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1613 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1619 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1620 struct ext4_ext_path *path, unsigned long start)
1622 int err = 0, correct_index = 0;
1623 int depth = ext_depth(inode), credits;
1624 struct ext4_extent_header *eh;
1625 unsigned a, b, block, num;
1626 unsigned long ex_ee_block;
1627 unsigned short ex_ee_len;
1628 struct ext4_extent *ex;
1630 ext_debug("truncate since %lu in leaf\n", start);
1631 if (!path[depth].p_hdr)
1632 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1633 eh = path[depth].p_hdr;
1635 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
1636 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
1638 /* find where to start removing */
1639 ex = EXT_LAST_EXTENT(eh);
1641 ex_ee_block = le32_to_cpu(ex->ee_block);
1642 ex_ee_len = le16_to_cpu(ex->ee_len);
1644 while (ex >= EXT_FIRST_EXTENT(eh) &&
1645 ex_ee_block + ex_ee_len > start) {
1646 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1647 path[depth].p_ext = ex;
1649 a = ex_ee_block > start ? ex_ee_block : start;
1650 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1651 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1653 ext_debug(" border %u:%u\n", a, b);
1655 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1659 } else if (a != ex_ee_block) {
1660 /* remove tail of the extent */
1661 block = ex_ee_block;
1663 } else if (b != ex_ee_block + ex_ee_len - 1) {
1664 /* remove head of the extent */
1667 /* there is no "make a hole" API yet */
1670 /* remove whole extent: excellent! */
1671 block = ex_ee_block;
1673 BUG_ON(a != ex_ee_block);
1674 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1677 /* at present, extent can't cross block group: */
1678 /* leaf + bitmap + group desc + sb + inode */
1680 if (ex == EXT_FIRST_EXTENT(eh)) {
1682 credits += (ext_depth(inode)) + 1;
1685 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1688 handle = ext4_ext_journal_restart(handle, credits);
1689 if (IS_ERR(handle)) {
1690 err = PTR_ERR(handle);
1694 err = ext4_ext_get_access(handle, inode, path + depth);
1698 err = ext4_remove_blocks(handle, inode, ex, a, b);
1703 /* this extent is removed; mark slot entirely unused */
1704 ext4_ext_store_pblock(ex, 0);
1705 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1708 ex->ee_block = cpu_to_le32(block);
1709 ex->ee_len = cpu_to_le16(num);
1711 err = ext4_ext_dirty(handle, inode, path + depth);
1715 ext_debug("new extent: %u:%u:%llu\n", block, num,
1718 ex_ee_block = le32_to_cpu(ex->ee_block);
1719 ex_ee_len = le16_to_cpu(ex->ee_len);
1722 if (correct_index && eh->eh_entries)
1723 err = ext4_ext_correct_indexes(handle, inode, path);
1725 /* if this leaf is free, then we should
1726 * remove it from index block above */
1727 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1728 err = ext4_ext_rm_idx(handle, inode, path + depth);
1735 * ext4_ext_more_to_rm:
1736 * returns 1 if current index has to be freed (even partial)
1739 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1741 BUG_ON(path->p_idx == NULL);
1743 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1747 * if truncate on deeper level happened, it wasn't partial,
1748 * so we have to consider current index for truncation
1750 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1755 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1757 struct super_block *sb = inode->i_sb;
1758 int depth = ext_depth(inode);
1759 struct ext4_ext_path *path;
1763 ext_debug("truncate since %lu\n", start);
1765 /* probably first extent we're gonna free will be last in block */
1766 handle = ext4_journal_start(inode, depth + 1);
1768 return PTR_ERR(handle);
1770 ext4_ext_invalidate_cache(inode);
1773 * We start scanning from right side, freeing all the blocks
1774 * after i_size and walking into the tree depth-wise.
1776 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1778 ext4_journal_stop(handle);
1781 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
1782 path[0].p_hdr = ext_inode_hdr(inode);
1783 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1787 path[0].p_depth = depth;
1789 while (i >= 0 && err == 0) {
1791 /* this is leaf block */
1792 err = ext4_ext_rm_leaf(handle, inode, path, start);
1793 /* root level has p_bh == NULL, brelse() eats this */
1794 brelse(path[i].p_bh);
1795 path[i].p_bh = NULL;
1800 /* this is index block */
1801 if (!path[i].p_hdr) {
1802 ext_debug("initialize header\n");
1803 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1804 if (ext4_ext_check_header(__FUNCTION__, inode,
1811 BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
1812 > le16_to_cpu(path[i].p_hdr->eh_max));
1813 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1815 if (!path[i].p_idx) {
1816 /* this level hasn't been touched yet */
1817 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1818 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1819 ext_debug("init index ptr: hdr 0x%p, num %d\n",
1821 le16_to_cpu(path[i].p_hdr->eh_entries));
1823 /* we were already here, see at next index */
1827 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1828 i, EXT_FIRST_INDEX(path[i].p_hdr),
1830 if (ext4_ext_more_to_rm(path + i)) {
1831 /* go to the next level */
1832 ext_debug("move to level %d (block %llu)\n",
1833 i + 1, idx_pblock(path[i].p_idx));
1834 memset(path + i + 1, 0, sizeof(*path));
1836 sb_bread(sb, idx_pblock(path[i].p_idx));
1837 if (!path[i+1].p_bh) {
1838 /* should we reset i_size? */
1843 /* save actual number of indexes since this
1844 * number is changed at the next iteration */
1845 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1848 /* we finished processing this index, go up */
1849 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1850 /* index is empty, remove it;
1851 * handle must be already prepared by the
1852 * truncatei_leaf() */
1853 err = ext4_ext_rm_idx(handle, inode, path + i);
1855 /* root level has p_bh == NULL, brelse() eats this */
1856 brelse(path[i].p_bh);
1857 path[i].p_bh = NULL;
1859 ext_debug("return to level %d\n", i);
1863 /* TODO: flexible tree reduction should be here */
1864 if (path->p_hdr->eh_entries == 0) {
1866 * truncate to zero freed all the tree,
1867 * so we need to correct eh_depth
1869 err = ext4_ext_get_access(handle, inode, path);
1871 ext_inode_hdr(inode)->eh_depth = 0;
1872 ext_inode_hdr(inode)->eh_max =
1873 cpu_to_le16(ext4_ext_space_root(inode));
1874 err = ext4_ext_dirty(handle, inode, path);
1878 ext4_ext_tree_changed(inode);
1879 ext4_ext_drop_refs(path);
1881 ext4_journal_stop(handle);
1887 * called at mount time
1889 void ext4_ext_init(struct super_block *sb)
1892 * possible initialization would be here
1895 if (test_opt(sb, EXTENTS)) {
1896 printk("EXT4-fs: file extents enabled");
1897 #ifdef AGRESSIVE_TEST
1898 printk(", agressive tests");
1900 #ifdef CHECK_BINSEARCH
1901 printk(", check binsearch");
1903 #ifdef EXTENTS_STATS
1907 #ifdef EXTENTS_STATS
1908 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
1909 EXT4_SB(sb)->s_ext_min = 1 << 30;
1910 EXT4_SB(sb)->s_ext_max = 0;
1916 * called at umount time
1918 void ext4_ext_release(struct super_block *sb)
1920 if (!test_opt(sb, EXTENTS))
1923 #ifdef EXTENTS_STATS
1924 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
1925 struct ext4_sb_info *sbi = EXT4_SB(sb);
1926 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
1927 sbi->s_ext_blocks, sbi->s_ext_extents,
1928 sbi->s_ext_blocks / sbi->s_ext_extents);
1929 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
1930 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
1935 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1936 ext4_fsblk_t iblock,
1937 unsigned long max_blocks, struct buffer_head *bh_result,
1938 int create, int extend_disksize)
1940 struct ext4_ext_path *path = NULL;
1941 struct ext4_extent newex, *ex;
1942 ext4_fsblk_t goal, newblock;
1944 unsigned long allocated = 0;
1946 __clear_bit(BH_New, &bh_result->b_state);
1947 ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
1948 max_blocks, (unsigned) inode->i_ino);
1949 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1951 /* check in cache */
1952 goal = ext4_ext_in_cache(inode, iblock, &newex);
1954 if (goal == EXT4_EXT_CACHE_GAP) {
1956 /* block isn't allocated yet and
1957 * user doesn't want to allocate it */
1960 /* we should allocate requested block */
1961 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
1962 /* block is already allocated */
1964 - le32_to_cpu(newex.ee_block)
1965 + ext_pblock(&newex);
1966 /* number of remaining blocks in the extent */
1967 allocated = le16_to_cpu(newex.ee_len) -
1968 (iblock - le32_to_cpu(newex.ee_block));
1975 /* find extent for this block */
1976 path = ext4_ext_find_extent(inode, iblock, NULL);
1978 err = PTR_ERR(path);
1983 depth = ext_depth(inode);
1986 * consistent leaf must not be empty;
1987 * this situation is possible, though, _during_ tree modification;
1988 * this is why assert can't be put in ext4_ext_find_extent()
1990 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1992 ex = path[depth].p_ext;
1994 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1995 ext4_fsblk_t ee_start = ext_pblock(ex);
1996 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1999 * Allow future support for preallocated extents to be added
2000 * as an RO_COMPAT feature:
2001 * Uninitialized extents are treated as holes, except that
2002 * we avoid (fail) allocating new blocks during a write.
2004 if (ee_len > EXT_MAX_LEN)
2006 /* if found extent covers block, simply return it */
2007 if (iblock >= ee_block && iblock < ee_block + ee_len) {
2008 newblock = iblock - ee_block + ee_start;
2009 /* number of remaining blocks in the extent */
2010 allocated = ee_len - (iblock - ee_block);
2011 ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
2012 ee_block, ee_len, newblock);
2013 ext4_ext_put_in_cache(inode, ee_block, ee_len,
2014 ee_start, EXT4_EXT_CACHE_EXTENT);
2020 * requested block isn't allocated yet;
2021 * we couldn't try to create block if create flag is zero
2024 /* put just found gap into cache to speed up
2025 * subsequent requests */
2026 ext4_ext_put_gap_in_cache(inode, path, iblock);
2030 * Okay, we need to do block allocation. Lazily initialize the block
2031 * allocation info here if necessary.
2033 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2034 ext4_init_block_alloc_info(inode);
2036 /* allocate new block */
2037 goal = ext4_ext_find_goal(inode, path, iblock);
2038 allocated = max_blocks;
2039 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2042 ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2043 goal, newblock, allocated);
2045 /* try to insert new extent into found leaf and return */
2046 newex.ee_block = cpu_to_le32(iblock);
2047 ext4_ext_store_pblock(&newex, newblock);
2048 newex.ee_len = cpu_to_le16(allocated);
2049 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2053 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2054 EXT4_I(inode)->i_disksize = inode->i_size;
2056 /* previous routine could use block we allocated */
2057 newblock = ext_pblock(&newex);
2058 __set_bit(BH_New, &bh_result->b_state);
2060 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2061 EXT4_EXT_CACHE_EXTENT);
2063 if (allocated > max_blocks)
2064 allocated = max_blocks;
2065 ext4_ext_show_leaf(inode, path);
2066 __set_bit(BH_Mapped, &bh_result->b_state);
2067 bh_result->b_bdev = inode->i_sb->s_bdev;
2068 bh_result->b_blocknr = newblock;
2071 ext4_ext_drop_refs(path);
2074 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2076 return err ? err : allocated;
2079 void ext4_ext_truncate(struct inode * inode, struct page *page)
2081 struct address_space *mapping = inode->i_mapping;
2082 struct super_block *sb = inode->i_sb;
2083 unsigned long last_block;
2088 * probably first extent we're gonna free will be last in block
2090 err = ext4_writepage_trans_blocks(inode) + 3;
2091 handle = ext4_journal_start(inode, err);
2092 if (IS_ERR(handle)) {
2094 clear_highpage(page);
2095 flush_dcache_page(page);
2097 page_cache_release(page);
2103 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2105 mutex_lock(&EXT4_I(inode)->truncate_mutex);
2106 ext4_ext_invalidate_cache(inode);
2109 * TODO: optimization is possible here.
2110 * Probably we need not scan at all,
2111 * because page truncation is enough.
2113 if (ext4_orphan_add(handle, inode))
2116 /* we have to know where to truncate from in crash case */
2117 EXT4_I(inode)->i_disksize = inode->i_size;
2118 ext4_mark_inode_dirty(handle, inode);
2120 last_block = (inode->i_size + sb->s_blocksize - 1)
2121 >> EXT4_BLOCK_SIZE_BITS(sb);
2122 err = ext4_ext_remove_space(inode, last_block);
2124 /* In a multi-transaction truncate, we only make the final
2125 * transaction synchronous. */
2131 * If this was a simple ftruncate() and the file will remain alive,
2132 * then we need to clear up the orphan record which we created above.
2133 * However, if this was a real unlink then we were called by
2134 * ext4_delete_inode(), and we allow that function to clean up the
2135 * orphan info for us.
2138 ext4_orphan_del(handle, inode);
2140 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2141 ext4_journal_stop(handle);
2145 * ext4_ext_writepage_trans_blocks:
2146 * calculate max number of blocks we could modify
2147 * in order to allocate new block for an inode
2149 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2153 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2155 /* caller wants to allocate num blocks, but note it includes sb */
2156 needed = needed * num - (num - 1);
2159 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2165 EXPORT_SYMBOL(ext4_mark_inode_dirty);
2166 EXPORT_SYMBOL(ext4_ext_invalidate_cache);
2167 EXPORT_SYMBOL(ext4_ext_insert_extent);
2168 EXPORT_SYMBOL(ext4_ext_walk_space);
2169 EXPORT_SYMBOL(ext4_ext_find_goal);
2170 EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);