2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
25 #include <linux/module.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/mpage.h>
36 #include <linux/uio.h>
37 #include <linux/bio.h>
38 #include "ext4_jbd2.h"
42 static inline int ext4_begin_ordered_truncate(struct inode *inode,
45 return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
50 * Test whether an inode is a fast symlink.
52 static int ext4_inode_is_fast_symlink(struct inode *inode)
54 int ea_blocks = EXT4_I(inode)->i_file_acl ?
55 (inode->i_sb->s_blocksize >> 9) : 0;
57 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
61 * The ext4 forget function must perform a revoke if we are freeing data
62 * which has been journaled. Metadata (eg. indirect blocks) must be
63 * revoked in all cases.
65 * "bh" may be NULL: a metadata block may have been freed from memory
66 * but there may still be a record of it in the journal, and that record
67 * still needs to be revoked.
69 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
70 struct buffer_head *bh, ext4_fsblk_t blocknr)
76 BUFFER_TRACE(bh, "enter");
78 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
80 bh, is_metadata, inode->i_mode,
81 test_opt(inode->i_sb, DATA_FLAGS));
83 /* Never use the revoke function if we are doing full data
84 * journaling: there is no need to, and a V1 superblock won't
85 * support it. Otherwise, only skip the revoke on un-journaled
88 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
89 (!is_metadata && !ext4_should_journal_data(inode))) {
91 BUFFER_TRACE(bh, "call jbd2_journal_forget");
92 return ext4_journal_forget(handle, bh);
98 * data!=journal && (is_metadata || should_journal_data(inode))
100 BUFFER_TRACE(bh, "call ext4_journal_revoke");
101 err = ext4_journal_revoke(handle, blocknr, bh);
103 ext4_abort(inode->i_sb, __func__,
104 "error %d when attempting revoke", err);
105 BUFFER_TRACE(bh, "exit");
110 * Work out how many blocks we need to proceed with the next chunk of a
111 * truncate transaction.
113 static unsigned long blocks_for_truncate(struct inode *inode)
117 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
119 /* Give ourselves just enough room to cope with inodes in which
120 * i_blocks is corrupt: we've seen disk corruptions in the past
121 * which resulted in random data in an inode which looked enough
122 * like a regular file for ext4 to try to delete it. Things
123 * will go a bit crazy if that happens, but at least we should
124 * try not to panic the whole kernel. */
128 /* But we need to bound the transaction so we don't overflow the
130 if (needed > EXT4_MAX_TRANS_DATA)
131 needed = EXT4_MAX_TRANS_DATA;
133 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
137 * Truncate transactions can be complex and absolutely huge. So we need to
138 * be able to restart the transaction at a conventient checkpoint to make
139 * sure we don't overflow the journal.
141 * start_transaction gets us a new handle for a truncate transaction,
142 * and extend_transaction tries to extend the existing one a bit. If
143 * extend fails, we need to propagate the failure up and restart the
144 * transaction in the top-level truncate loop. --sct
146 static handle_t *start_transaction(struct inode *inode)
150 result = ext4_journal_start(inode, blocks_for_truncate(inode));
154 ext4_std_error(inode->i_sb, PTR_ERR(result));
159 * Try to extend this transaction for the purposes of truncation.
161 * Returns 0 if we managed to create more room. If we can't create more
162 * room, and the transaction must be restarted we return 1.
164 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
166 if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
168 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
174 * Restart the transaction associated with *handle. This does a commit,
175 * so before we call here everything must be consistently dirtied against
178 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
180 jbd_debug(2, "restarting handle %p\n", handle);
181 return ext4_journal_restart(handle, blocks_for_truncate(inode));
185 * Called at the last iput() if i_nlink is zero.
187 void ext4_delete_inode (struct inode * inode)
191 if (ext4_should_order_data(inode))
192 ext4_begin_ordered_truncate(inode, 0);
193 truncate_inode_pages(&inode->i_data, 0);
195 if (is_bad_inode(inode))
198 handle = start_transaction(inode);
199 if (IS_ERR(handle)) {
201 * If we're going to skip the normal cleanup, we still need to
202 * make sure that the in-core orphan linked list is properly
205 ext4_orphan_del(NULL, inode);
213 ext4_truncate(inode);
215 * Kill off the orphan record which ext4_truncate created.
216 * AKPM: I think this can be inside the above `if'.
217 * Note that ext4_orphan_del() has to be able to cope with the
218 * deletion of a non-existent orphan - this is because we don't
219 * know if ext4_truncate() actually created an orphan record.
220 * (Well, we could do this if we need to, but heck - it works)
222 ext4_orphan_del(handle, inode);
223 EXT4_I(inode)->i_dtime = get_seconds();
226 * One subtle ordering requirement: if anything has gone wrong
227 * (transaction abort, IO errors, whatever), then we can still
228 * do these next steps (the fs will already have been marked as
229 * having errors), but we can't free the inode if the mark_dirty
232 if (ext4_mark_inode_dirty(handle, inode))
233 /* If that failed, just do the required in-core inode clear. */
236 ext4_free_inode(handle, inode);
237 ext4_journal_stop(handle);
240 clear_inode(inode); /* We must guarantee clearing of inode... */
246 struct buffer_head *bh;
249 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
251 p->key = *(p->p = v);
256 * ext4_block_to_path - parse the block number into array of offsets
257 * @inode: inode in question (we are only interested in its superblock)
258 * @i_block: block number to be parsed
259 * @offsets: array to store the offsets in
260 * @boundary: set this non-zero if the referred-to block is likely to be
261 * followed (on disk) by an indirect block.
263 * To store the locations of file's data ext4 uses a data structure common
264 * for UNIX filesystems - tree of pointers anchored in the inode, with
265 * data blocks at leaves and indirect blocks in intermediate nodes.
266 * This function translates the block number into path in that tree -
267 * return value is the path length and @offsets[n] is the offset of
268 * pointer to (n+1)th node in the nth one. If @block is out of range
269 * (negative or too large) warning is printed and zero returned.
271 * Note: function doesn't find node addresses, so no IO is needed. All
272 * we need to know is the capacity of indirect blocks (taken from the
277 * Portability note: the last comparison (check that we fit into triple
278 * indirect block) is spelled differently, because otherwise on an
279 * architecture with 32-bit longs and 8Kb pages we might get into trouble
280 * if our filesystem had 8Kb blocks. We might use long long, but that would
281 * kill us on x86. Oh, well, at least the sign propagation does not matter -
282 * i_block would have to be negative in the very beginning, so we would not
286 static int ext4_block_to_path(struct inode *inode,
288 ext4_lblk_t offsets[4], int *boundary)
290 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
291 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
292 const long direct_blocks = EXT4_NDIR_BLOCKS,
293 indirect_blocks = ptrs,
294 double_blocks = (1 << (ptrs_bits * 2));
299 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
300 } else if (i_block < direct_blocks) {
301 offsets[n++] = i_block;
302 final = direct_blocks;
303 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
304 offsets[n++] = EXT4_IND_BLOCK;
305 offsets[n++] = i_block;
307 } else if ((i_block -= indirect_blocks) < double_blocks) {
308 offsets[n++] = EXT4_DIND_BLOCK;
309 offsets[n++] = i_block >> ptrs_bits;
310 offsets[n++] = i_block & (ptrs - 1);
312 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
313 offsets[n++] = EXT4_TIND_BLOCK;
314 offsets[n++] = i_block >> (ptrs_bits * 2);
315 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
316 offsets[n++] = i_block & (ptrs - 1);
319 ext4_warning(inode->i_sb, "ext4_block_to_path",
321 i_block + direct_blocks +
322 indirect_blocks + double_blocks);
325 *boundary = final - 1 - (i_block & (ptrs - 1));
330 * ext4_get_branch - read the chain of indirect blocks leading to data
331 * @inode: inode in question
332 * @depth: depth of the chain (1 - direct pointer, etc.)
333 * @offsets: offsets of pointers in inode/indirect blocks
334 * @chain: place to store the result
335 * @err: here we store the error value
337 * Function fills the array of triples <key, p, bh> and returns %NULL
338 * if everything went OK or the pointer to the last filled triple
339 * (incomplete one) otherwise. Upon the return chain[i].key contains
340 * the number of (i+1)-th block in the chain (as it is stored in memory,
341 * i.e. little-endian 32-bit), chain[i].p contains the address of that
342 * number (it points into struct inode for i==0 and into the bh->b_data
343 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
344 * block for i>0 and NULL for i==0. In other words, it holds the block
345 * numbers of the chain, addresses they were taken from (and where we can
346 * verify that chain did not change) and buffer_heads hosting these
349 * Function stops when it stumbles upon zero pointer (absent block)
350 * (pointer to last triple returned, *@err == 0)
351 * or when it gets an IO error reading an indirect block
352 * (ditto, *@err == -EIO)
353 * or when it reads all @depth-1 indirect blocks successfully and finds
354 * the whole chain, all way to the data (returns %NULL, *err == 0).
356 * Need to be called with
357 * down_read(&EXT4_I(inode)->i_data_sem)
359 static Indirect *ext4_get_branch(struct inode *inode, int depth,
360 ext4_lblk_t *offsets,
361 Indirect chain[4], int *err)
363 struct super_block *sb = inode->i_sb;
365 struct buffer_head *bh;
368 /* i_data is not going away, no lock needed */
369 add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
373 bh = sb_bread(sb, le32_to_cpu(p->key));
376 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
390 * ext4_find_near - find a place for allocation with sufficient locality
392 * @ind: descriptor of indirect block.
394 * This function returns the preferred place for block allocation.
395 * It is used when heuristic for sequential allocation fails.
397 * + if there is a block to the left of our position - allocate near it.
398 * + if pointer will live in indirect block - allocate near that block.
399 * + if pointer will live in inode - allocate in the same
402 * In the latter case we colour the starting block by the callers PID to
403 * prevent it from clashing with concurrent allocations for a different inode
404 * in the same block group. The PID is used here so that functionally related
405 * files will be close-by on-disk.
407 * Caller must make sure that @ind is valid and will stay that way.
409 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
411 struct ext4_inode_info *ei = EXT4_I(inode);
412 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
414 ext4_fsblk_t bg_start;
415 ext4_fsblk_t last_block;
416 ext4_grpblk_t colour;
418 /* Try to find previous block */
419 for (p = ind->p - 1; p >= start; p--) {
421 return le32_to_cpu(*p);
424 /* No such thing, so let's try location of indirect block */
426 return ind->bh->b_blocknr;
429 * It is going to be referred to from the inode itself? OK, just put it
430 * into the same cylinder group then.
432 bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
433 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
435 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
436 colour = (current->pid % 16) *
437 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
439 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
440 return bg_start + colour;
444 * ext4_find_goal - find a preferred place for allocation.
446 * @block: block we want
447 * @partial: pointer to the last triple within a chain
449 * Normally this function find the preferred place for block allocation,
452 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
455 struct ext4_block_alloc_info *block_i;
457 block_i = EXT4_I(inode)->i_block_alloc_info;
460 * try the heuristic for sequential allocation,
461 * failing that at least try to get decent locality.
463 if (block_i && (block == block_i->last_alloc_logical_block + 1)
464 && (block_i->last_alloc_physical_block != 0)) {
465 return block_i->last_alloc_physical_block + 1;
468 return ext4_find_near(inode, partial);
472 * ext4_blks_to_allocate: Look up the block map and count the number
473 * of direct blocks need to be allocated for the given branch.
475 * @branch: chain of indirect blocks
476 * @k: number of blocks need for indirect blocks
477 * @blks: number of data blocks to be mapped.
478 * @blocks_to_boundary: the offset in the indirect block
480 * return the total number of blocks to be allocate, including the
481 * direct and indirect blocks.
483 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
484 int blocks_to_boundary)
486 unsigned long count = 0;
489 * Simple case, [t,d]Indirect block(s) has not allocated yet
490 * then it's clear blocks on that path have not allocated
493 /* right now we don't handle cross boundary allocation */
494 if (blks < blocks_to_boundary + 1)
497 count += blocks_to_boundary + 1;
502 while (count < blks && count <= blocks_to_boundary &&
503 le32_to_cpu(*(branch[0].p + count)) == 0) {
510 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
511 * @indirect_blks: the number of blocks need to allocate for indirect
514 * @new_blocks: on return it will store the new block numbers for
515 * the indirect blocks(if needed) and the first direct block,
516 * @blks: on return it will store the total number of allocated
519 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
520 ext4_lblk_t iblock, ext4_fsblk_t goal,
521 int indirect_blks, int blks,
522 ext4_fsblk_t new_blocks[4], int *err)
525 unsigned long count = 0, blk_allocated = 0;
527 ext4_fsblk_t current_block = 0;
531 * Here we try to allocate the requested multiple blocks at once,
532 * on a best-effort basis.
533 * To build a branch, we should allocate blocks for
534 * the indirect blocks(if not allocated yet), and at least
535 * the first direct block of this branch. That's the
536 * minimum number of blocks need to allocate(required)
538 /* first we try to allocate the indirect blocks */
539 target = indirect_blks;
542 /* allocating blocks for indirect blocks and direct blocks */
543 current_block = ext4_new_meta_blocks(handle, inode,
549 /* allocate blocks for indirect blocks */
550 while (index < indirect_blks && count) {
551 new_blocks[index++] = current_block++;
556 * save the new block number
557 * for the first direct block
559 new_blocks[index] = current_block;
560 printk(KERN_INFO "%s returned more blocks than "
561 "requested\n", __func__);
567 target = blks - count ;
568 blk_allocated = count;
571 /* Now allocate data blocks */
573 /* allocating blocks for data blocks */
574 current_block = ext4_new_blocks(handle, inode, iblock,
576 if (*err && (target == blks)) {
578 * if the allocation failed and we didn't allocate
584 if (target == blks) {
586 * save the new block number
587 * for the first direct block
589 new_blocks[index] = current_block;
591 blk_allocated += count;
594 /* total number of blocks allocated for direct blocks */
599 for (i = 0; i <index; i++)
600 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
605 * ext4_alloc_branch - allocate and set up a chain of blocks.
607 * @indirect_blks: number of allocated indirect blocks
608 * @blks: number of allocated direct blocks
609 * @offsets: offsets (in the blocks) to store the pointers to next.
610 * @branch: place to store the chain in.
612 * This function allocates blocks, zeroes out all but the last one,
613 * links them into chain and (if we are synchronous) writes them to disk.
614 * In other words, it prepares a branch that can be spliced onto the
615 * inode. It stores the information about that chain in the branch[], in
616 * the same format as ext4_get_branch() would do. We are calling it after
617 * we had read the existing part of chain and partial points to the last
618 * triple of that (one with zero ->key). Upon the exit we have the same
619 * picture as after the successful ext4_get_block(), except that in one
620 * place chain is disconnected - *branch->p is still zero (we did not
621 * set the last link), but branch->key contains the number that should
622 * be placed into *branch->p to fill that gap.
624 * If allocation fails we free all blocks we've allocated (and forget
625 * their buffer_heads) and return the error value the from failed
626 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
627 * as described above and return 0.
629 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
630 ext4_lblk_t iblock, int indirect_blks,
631 int *blks, ext4_fsblk_t goal,
632 ext4_lblk_t *offsets, Indirect *branch)
634 int blocksize = inode->i_sb->s_blocksize;
637 struct buffer_head *bh;
639 ext4_fsblk_t new_blocks[4];
640 ext4_fsblk_t current_block;
642 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
643 *blks, new_blocks, &err);
647 branch[0].key = cpu_to_le32(new_blocks[0]);
649 * metadata blocks and data blocks are allocated.
651 for (n = 1; n <= indirect_blks; n++) {
653 * Get buffer_head for parent block, zero it out
654 * and set the pointer to new one, then send
657 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
660 BUFFER_TRACE(bh, "call get_create_access");
661 err = ext4_journal_get_create_access(handle, bh);
668 memset(bh->b_data, 0, blocksize);
669 branch[n].p = (__le32 *) bh->b_data + offsets[n];
670 branch[n].key = cpu_to_le32(new_blocks[n]);
671 *branch[n].p = branch[n].key;
672 if ( n == indirect_blks) {
673 current_block = new_blocks[n];
675 * End of chain, update the last new metablock of
676 * the chain to point to the new allocated
677 * data blocks numbers
679 for (i=1; i < num; i++)
680 *(branch[n].p + i) = cpu_to_le32(++current_block);
682 BUFFER_TRACE(bh, "marking uptodate");
683 set_buffer_uptodate(bh);
686 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
687 err = ext4_journal_dirty_metadata(handle, bh);
694 /* Allocation failed, free what we already allocated */
695 for (i = 1; i <= n ; i++) {
696 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
697 ext4_journal_forget(handle, branch[i].bh);
699 for (i = 0; i <indirect_blks; i++)
700 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
702 ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
708 * ext4_splice_branch - splice the allocated branch onto inode.
710 * @block: (logical) number of block we are adding
711 * @chain: chain of indirect blocks (with a missing link - see
713 * @where: location of missing link
714 * @num: number of indirect blocks we are adding
715 * @blks: number of direct blocks we are adding
717 * This function fills the missing link and does all housekeeping needed in
718 * inode (->i_blocks, etc.). In case of success we end up with the full
719 * chain to new block and return 0.
721 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
722 ext4_lblk_t block, Indirect *where, int num, int blks)
726 struct ext4_block_alloc_info *block_i;
727 ext4_fsblk_t current_block;
729 block_i = EXT4_I(inode)->i_block_alloc_info;
731 * If we're splicing into a [td]indirect block (as opposed to the
732 * inode) then we need to get write access to the [td]indirect block
736 BUFFER_TRACE(where->bh, "get_write_access");
737 err = ext4_journal_get_write_access(handle, where->bh);
743 *where->p = where->key;
746 * Update the host buffer_head or inode to point to more just allocated
747 * direct blocks blocks
749 if (num == 0 && blks > 1) {
750 current_block = le32_to_cpu(where->key) + 1;
751 for (i = 1; i < blks; i++)
752 *(where->p + i ) = cpu_to_le32(current_block++);
756 * update the most recently allocated logical & physical block
757 * in i_block_alloc_info, to assist find the proper goal block for next
761 block_i->last_alloc_logical_block = block + blks - 1;
762 block_i->last_alloc_physical_block =
763 le32_to_cpu(where[num].key) + blks - 1;
766 /* We are done with atomic stuff, now do the rest of housekeeping */
768 inode->i_ctime = ext4_current_time(inode);
769 ext4_mark_inode_dirty(handle, inode);
771 /* had we spliced it onto indirect block? */
774 * If we spliced it onto an indirect block, we haven't
775 * altered the inode. Note however that if it is being spliced
776 * onto an indirect block at the very end of the file (the
777 * file is growing) then we *will* alter the inode to reflect
778 * the new i_size. But that is not done here - it is done in
779 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
781 jbd_debug(5, "splicing indirect only\n");
782 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
783 err = ext4_journal_dirty_metadata(handle, where->bh);
788 * OK, we spliced it into the inode itself on a direct block.
789 * Inode was dirtied above.
791 jbd_debug(5, "splicing direct\n");
796 for (i = 1; i <= num; i++) {
797 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
798 ext4_journal_forget(handle, where[i].bh);
799 ext4_free_blocks(handle, inode,
800 le32_to_cpu(where[i-1].key), 1, 0);
802 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
808 * Allocation strategy is simple: if we have to allocate something, we will
809 * have to go the whole way to leaf. So let's do it before attaching anything
810 * to tree, set linkage between the newborn blocks, write them if sync is
811 * required, recheck the path, free and repeat if check fails, otherwise
812 * set the last missing link (that will protect us from any truncate-generated
813 * removals - all blocks on the path are immune now) and possibly force the
814 * write on the parent block.
815 * That has a nice additional property: no special recovery from the failed
816 * allocations is needed - we simply release blocks and do not touch anything
817 * reachable from inode.
819 * `handle' can be NULL if create == 0.
821 * return > 0, # of blocks mapped or allocated.
822 * return = 0, if plain lookup failed.
823 * return < 0, error case.
826 * Need to be called with
827 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
828 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
830 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
831 ext4_lblk_t iblock, unsigned long maxblocks,
832 struct buffer_head *bh_result,
833 int create, int extend_disksize)
836 ext4_lblk_t offsets[4];
841 int blocks_to_boundary = 0;
843 struct ext4_inode_info *ei = EXT4_I(inode);
845 ext4_fsblk_t first_block = 0;
848 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
849 J_ASSERT(handle != NULL || create == 0);
850 depth = ext4_block_to_path(inode, iblock, offsets,
851 &blocks_to_boundary);
856 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
858 /* Simplest case - block found, no allocation needed */
860 first_block = le32_to_cpu(chain[depth - 1].key);
861 clear_buffer_new(bh_result);
864 while (count < maxblocks && count <= blocks_to_boundary) {
867 blk = le32_to_cpu(*(chain[depth-1].p + count));
869 if (blk == first_block + count)
877 /* Next simple case - plain lookup or failed read of indirect block */
878 if (!create || err == -EIO)
882 * Okay, we need to do block allocation. Lazily initialize the block
883 * allocation info here if necessary
885 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
886 ext4_init_block_alloc_info(inode);
888 goal = ext4_find_goal(inode, iblock, partial);
890 /* the number of blocks need to allocate for [d,t]indirect blocks */
891 indirect_blks = (chain + depth) - partial - 1;
894 * Next look up the indirect map to count the totoal number of
895 * direct blocks to allocate for this branch.
897 count = ext4_blks_to_allocate(partial, indirect_blks,
898 maxblocks, blocks_to_boundary);
900 * Block out ext4_truncate while we alter the tree
902 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
904 offsets + (partial - chain), partial);
907 * The ext4_splice_branch call will free and forget any buffers
908 * on the new chain if there is a failure, but that risks using
909 * up transaction credits, especially for bitmaps where the
910 * credits cannot be returned. Can we handle this somehow? We
911 * may need to return -EAGAIN upwards in the worst case. --sct
914 err = ext4_splice_branch(handle, inode, iblock,
915 partial, indirect_blks, count);
917 * i_disksize growing is protected by i_data_sem. Don't forget to
918 * protect it if you're about to implement concurrent
919 * ext4_get_block() -bzzz
921 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
922 ei->i_disksize = inode->i_size;
926 set_buffer_new(bh_result);
928 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
929 if (count > blocks_to_boundary)
930 set_buffer_boundary(bh_result);
932 /* Clean up and exit */
933 partial = chain + depth - 1; /* the whole chain */
935 while (partial > chain) {
936 BUFFER_TRACE(partial->bh, "call brelse");
940 BUFFER_TRACE(bh_result, "returned");
945 /* Maximum number of blocks we map for direct IO at once. */
946 #define DIO_MAX_BLOCKS 4096
948 * Number of credits we need for writing DIO_MAX_BLOCKS:
949 * We need sb + group descriptor + bitmap + inode -> 4
950 * For B blocks with A block pointers per block we need:
951 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
952 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
954 #define DIO_CREDITS 25
960 * ext4_ext4 get_block() wrapper function
961 * It will do a look up first, and returns if the blocks already mapped.
962 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
963 * and store the allocated blocks in the result buffer head and mark it
966 * If file type is extents based, it will call ext4_ext_get_blocks(),
967 * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
970 * On success, it returns the number of blocks being mapped or allocate.
971 * if create==0 and the blocks are pre-allocated and uninitialized block,
972 * the result buffer head is unmapped. If the create ==1, it will make sure
973 * the buffer head is mapped.
975 * It returns 0 if plain look up failed (blocks have not been allocated), in
976 * that casem, buffer head is unmapped
978 * It returns the error in case of allocation failure.
980 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
981 unsigned long max_blocks, struct buffer_head *bh,
982 int create, int extend_disksize)
986 clear_buffer_mapped(bh);
989 * Try to see if we can get the block without requesting
990 * for new file system block.
992 down_read((&EXT4_I(inode)->i_data_sem));
993 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
994 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
997 retval = ext4_get_blocks_handle(handle,
998 inode, block, max_blocks, bh, 0, 0);
1000 up_read((&EXT4_I(inode)->i_data_sem));
1002 /* If it is only a block(s) look up */
1007 * Returns if the blocks have already allocated
1009 * Note that if blocks have been preallocated
1010 * ext4_ext_get_block() returns th create = 0
1011 * with buffer head unmapped.
1013 if (retval > 0 && buffer_mapped(bh))
1017 * New blocks allocate and/or writing to uninitialized extent
1018 * will possibly result in updating i_data, so we take
1019 * the write lock of i_data_sem, and call get_blocks()
1020 * with create == 1 flag.
1022 down_write((&EXT4_I(inode)->i_data_sem));
1024 * We need to check for EXT4 here because migrate
1025 * could have changed the inode type in between
1027 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1028 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1029 bh, create, extend_disksize);
1031 retval = ext4_get_blocks_handle(handle, inode, block,
1032 max_blocks, bh, create, extend_disksize);
1034 if (retval > 0 && buffer_new(bh)) {
1036 * We allocated new blocks which will result in
1037 * i_data's format changing. Force the migrate
1038 * to fail by clearing migrate flags
1040 EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1044 up_write((&EXT4_I(inode)->i_data_sem));
1048 static int ext4_get_block(struct inode *inode, sector_t iblock,
1049 struct buffer_head *bh_result, int create)
1051 handle_t *handle = ext4_journal_current_handle();
1052 int ret = 0, started = 0;
1053 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1055 if (create && !handle) {
1056 /* Direct IO write... */
1057 if (max_blocks > DIO_MAX_BLOCKS)
1058 max_blocks = DIO_MAX_BLOCKS;
1059 handle = ext4_journal_start(inode, DIO_CREDITS +
1060 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
1061 if (IS_ERR(handle)) {
1062 ret = PTR_ERR(handle);
1068 ret = ext4_get_blocks_wrap(handle, inode, iblock,
1069 max_blocks, bh_result, create, 0);
1071 bh_result->b_size = (ret << inode->i_blkbits);
1075 ext4_journal_stop(handle);
1081 * `handle' can be NULL if create is zero
1083 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1084 ext4_lblk_t block, int create, int *errp)
1086 struct buffer_head dummy;
1089 J_ASSERT(handle != NULL || create == 0);
1092 dummy.b_blocknr = -1000;
1093 buffer_trace_init(&dummy.b_history);
1094 err = ext4_get_blocks_wrap(handle, inode, block, 1,
1097 * ext4_get_blocks_handle() returns number of blocks
1098 * mapped. 0 in case of a HOLE.
1106 if (!err && buffer_mapped(&dummy)) {
1107 struct buffer_head *bh;
1108 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1113 if (buffer_new(&dummy)) {
1114 J_ASSERT(create != 0);
1115 J_ASSERT(handle != NULL);
1118 * Now that we do not always journal data, we should
1119 * keep in mind whether this should always journal the
1120 * new buffer as metadata. For now, regular file
1121 * writes use ext4_get_block instead, so it's not a
1125 BUFFER_TRACE(bh, "call get_create_access");
1126 fatal = ext4_journal_get_create_access(handle, bh);
1127 if (!fatal && !buffer_uptodate(bh)) {
1128 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1129 set_buffer_uptodate(bh);
1132 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1133 err = ext4_journal_dirty_metadata(handle, bh);
1137 BUFFER_TRACE(bh, "not a new buffer");
1150 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1151 ext4_lblk_t block, int create, int *err)
1153 struct buffer_head * bh;
1155 bh = ext4_getblk(handle, inode, block, create, err);
1158 if (buffer_uptodate(bh))
1160 ll_rw_block(READ_META, 1, &bh);
1162 if (buffer_uptodate(bh))
1169 static int walk_page_buffers( handle_t *handle,
1170 struct buffer_head *head,
1174 int (*fn)( handle_t *handle,
1175 struct buffer_head *bh))
1177 struct buffer_head *bh;
1178 unsigned block_start, block_end;
1179 unsigned blocksize = head->b_size;
1181 struct buffer_head *next;
1183 for ( bh = head, block_start = 0;
1184 ret == 0 && (bh != head || !block_start);
1185 block_start = block_end, bh = next)
1187 next = bh->b_this_page;
1188 block_end = block_start + blocksize;
1189 if (block_end <= from || block_start >= to) {
1190 if (partial && !buffer_uptodate(bh))
1194 err = (*fn)(handle, bh);
1202 * To preserve ordering, it is essential that the hole instantiation and
1203 * the data write be encapsulated in a single transaction. We cannot
1204 * close off a transaction and start a new one between the ext4_get_block()
1205 * and the commit_write(). So doing the jbd2_journal_start at the start of
1206 * prepare_write() is the right place.
1208 * Also, this function can nest inside ext4_writepage() ->
1209 * block_write_full_page(). In that case, we *know* that ext4_writepage()
1210 * has generated enough buffer credits to do the whole page. So we won't
1211 * block on the journal in that case, which is good, because the caller may
1214 * By accident, ext4 can be reentered when a transaction is open via
1215 * quota file writes. If we were to commit the transaction while thus
1216 * reentered, there can be a deadlock - we would be holding a quota
1217 * lock, and the commit would never complete if another thread had a
1218 * transaction open and was blocking on the quota lock - a ranking
1221 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1222 * will _not_ run commit under these circumstances because handle->h_ref
1223 * is elevated. We'll still have enough credits for the tiny quotafile
1226 static int do_journal_get_write_access(handle_t *handle,
1227 struct buffer_head *bh)
1229 if (!buffer_mapped(bh) || buffer_freed(bh))
1231 return ext4_journal_get_write_access(handle, bh);
1234 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1235 loff_t pos, unsigned len, unsigned flags,
1236 struct page **pagep, void **fsdata)
1238 struct inode *inode = mapping->host;
1239 int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1246 index = pos >> PAGE_CACHE_SHIFT;
1247 from = pos & (PAGE_CACHE_SIZE - 1);
1251 handle = ext4_journal_start(inode, needed_blocks);
1252 if (IS_ERR(handle)) {
1253 ret = PTR_ERR(handle);
1257 page = __grab_cache_page(mapping, index);
1259 ext4_journal_stop(handle);
1265 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1268 if (!ret && ext4_should_journal_data(inode)) {
1269 ret = walk_page_buffers(handle, page_buffers(page),
1270 from, to, NULL, do_journal_get_write_access);
1275 ext4_journal_stop(handle);
1276 page_cache_release(page);
1279 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1285 /* For write_end() in data=journal mode */
1286 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1288 if (!buffer_mapped(bh) || buffer_freed(bh))
1290 set_buffer_uptodate(bh);
1291 return ext4_journal_dirty_metadata(handle, bh);
1295 * We need to pick up the new inode size which generic_commit_write gave us
1296 * `file' can be NULL - eg, when called from page_symlink().
1298 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1299 * buffers are managed internally.
1301 static int ext4_ordered_write_end(struct file *file,
1302 struct address_space *mapping,
1303 loff_t pos, unsigned len, unsigned copied,
1304 struct page *page, void *fsdata)
1306 handle_t *handle = ext4_journal_current_handle();
1307 struct inode *inode = mapping->host;
1311 from = pos & (PAGE_CACHE_SIZE - 1);
1314 ret = ext4_jbd2_file_inode(handle, inode);
1318 * generic_write_end() will run mark_inode_dirty() if i_size
1319 * changes. So let's piggyback the i_disksize mark_inode_dirty
1324 new_i_size = pos + copied;
1325 if (new_i_size > EXT4_I(inode)->i_disksize)
1326 EXT4_I(inode)->i_disksize = new_i_size;
1327 ret2 = generic_write_end(file, mapping, pos, len, copied,
1333 ret2 = ext4_journal_stop(handle);
1337 return ret ? ret : copied;
1340 static int ext4_writeback_write_end(struct file *file,
1341 struct address_space *mapping,
1342 loff_t pos, unsigned len, unsigned copied,
1343 struct page *page, void *fsdata)
1345 handle_t *handle = ext4_journal_current_handle();
1346 struct inode *inode = mapping->host;
1350 new_i_size = pos + copied;
1351 if (new_i_size > EXT4_I(inode)->i_disksize)
1352 EXT4_I(inode)->i_disksize = new_i_size;
1354 ret2 = generic_write_end(file, mapping, pos, len, copied,
1360 ret2 = ext4_journal_stop(handle);
1364 return ret ? ret : copied;
1367 static int ext4_journalled_write_end(struct file *file,
1368 struct address_space *mapping,
1369 loff_t pos, unsigned len, unsigned copied,
1370 struct page *page, void *fsdata)
1372 handle_t *handle = ext4_journal_current_handle();
1373 struct inode *inode = mapping->host;
1378 from = pos & (PAGE_CACHE_SIZE - 1);
1382 if (!PageUptodate(page))
1384 page_zero_new_buffers(page, from+copied, to);
1387 ret = walk_page_buffers(handle, page_buffers(page), from,
1388 to, &partial, write_end_fn);
1390 SetPageUptodate(page);
1391 if (pos+copied > inode->i_size)
1392 i_size_write(inode, pos+copied);
1393 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1394 if (inode->i_size > EXT4_I(inode)->i_disksize) {
1395 EXT4_I(inode)->i_disksize = inode->i_size;
1396 ret2 = ext4_mark_inode_dirty(handle, inode);
1402 ret2 = ext4_journal_stop(handle);
1405 page_cache_release(page);
1407 return ret ? ret : copied;
1411 * bmap() is special. It gets used by applications such as lilo and by
1412 * the swapper to find the on-disk block of a specific piece of data.
1414 * Naturally, this is dangerous if the block concerned is still in the
1415 * journal. If somebody makes a swapfile on an ext4 data-journaling
1416 * filesystem and enables swap, then they may get a nasty shock when the
1417 * data getting swapped to that swapfile suddenly gets overwritten by
1418 * the original zero's written out previously to the journal and
1419 * awaiting writeback in the kernel's buffer cache.
1421 * So, if we see any bmap calls here on a modified, data-journaled file,
1422 * take extra steps to flush any blocks which might be in the cache.
1424 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1426 struct inode *inode = mapping->host;
1430 if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1432 * This is a REALLY heavyweight approach, but the use of
1433 * bmap on dirty files is expected to be extremely rare:
1434 * only if we run lilo or swapon on a freshly made file
1435 * do we expect this to happen.
1437 * (bmap requires CAP_SYS_RAWIO so this does not
1438 * represent an unprivileged user DOS attack --- we'd be
1439 * in trouble if mortal users could trigger this path at
1442 * NB. EXT4_STATE_JDATA is not set on files other than
1443 * regular files. If somebody wants to bmap a directory
1444 * or symlink and gets confused because the buffer
1445 * hasn't yet been flushed to disk, they deserve
1446 * everything they get.
1449 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1450 journal = EXT4_JOURNAL(inode);
1451 jbd2_journal_lock_updates(journal);
1452 err = jbd2_journal_flush(journal);
1453 jbd2_journal_unlock_updates(journal);
1459 return generic_block_bmap(mapping,block,ext4_get_block);
1462 static int bget_one(handle_t *handle, struct buffer_head *bh)
1468 static int bput_one(handle_t *handle, struct buffer_head *bh)
1474 static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
1476 return !buffer_mapped(bh) || buffer_delay(bh);
1480 * Note that we don't need to start a transaction unless we're journaling data
1481 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1482 * need to file the inode to the transaction's list in ordered mode because if
1483 * we are writing back data added by write(), the inode is already there and if
1484 * we are writing back data modified via mmap(), noone guarantees in which
1485 * transaction the data will hit the disk. In case we are journaling data, we
1486 * cannot start transaction directly because transaction start ranks above page
1487 * lock so we have to do some magic.
1489 * In all journaling modes block_write_full_page() will start the I/O.
1493 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1498 * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1500 * Same applies to ext4_get_block(). We will deadlock on various things like
1501 * lock_journal and i_data_sem
1503 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1506 * 16May01: If we're reentered then journal_current_handle() will be
1507 * non-zero. We simply *return*.
1509 * 1 July 2001: @@@ FIXME:
1510 * In journalled data mode, a data buffer may be metadata against the
1511 * current transaction. But the same file is part of a shared mapping
1512 * and someone does a writepage() on it.
1514 * We will move the buffer onto the async_data list, but *after* it has
1515 * been dirtied. So there's a small window where we have dirty data on
1518 * Note that this only applies to the last partial page in the file. The
1519 * bit which block_write_full_page() uses prepare/commit for. (That's
1520 * broken code anyway: it's wrong for msync()).
1522 * It's a rare case: affects the final partial page, for journalled data
1523 * where the file is subject to bith write() and writepage() in the same
1524 * transction. To fix it we'll need a custom block_write_full_page().
1525 * We'll probably need that anyway for journalling writepage() output.
1527 * We don't honour synchronous mounts for writepage(). That would be
1528 * disastrous. Any write() or metadata operation will sync the fs for
1532 static int __ext4_normal_writepage(struct page *page,
1533 struct writeback_control *wbc)
1535 struct inode *inode = page->mapping->host;
1537 if (test_opt(inode->i_sb, NOBH))
1538 return nobh_writepage(page, ext4_get_block, wbc);
1540 return block_write_full_page(page, ext4_get_block, wbc);
1544 static int ext4_normal_writepage(struct page *page,
1545 struct writeback_control *wbc)
1547 struct inode *inode = page->mapping->host;
1548 loff_t size = i_size_read(inode);
1551 J_ASSERT(PageLocked(page));
1552 J_ASSERT(page_has_buffers(page));
1553 if (page->index == size >> PAGE_CACHE_SHIFT)
1554 len = size & ~PAGE_CACHE_MASK;
1556 len = PAGE_CACHE_SIZE;
1557 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
1558 ext4_bh_unmapped_or_delay));
1560 if (!ext4_journal_current_handle())
1561 return __ext4_normal_writepage(page, wbc);
1563 redirty_page_for_writepage(wbc, page);
1568 static int __ext4_journalled_writepage(struct page *page,
1569 struct writeback_control *wbc)
1571 struct address_space *mapping = page->mapping;
1572 struct inode *inode = mapping->host;
1573 struct buffer_head *page_bufs;
1574 handle_t *handle = NULL;
1578 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, ext4_get_block);
1582 page_bufs = page_buffers(page);
1583 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
1585 /* As soon as we unlock the page, it can go away, but we have
1586 * references to buffers so we are safe */
1589 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1590 if (IS_ERR(handle)) {
1591 ret = PTR_ERR(handle);
1595 ret = walk_page_buffers(handle, page_bufs, 0,
1596 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1598 err = walk_page_buffers(handle, page_bufs, 0,
1599 PAGE_CACHE_SIZE, NULL, write_end_fn);
1602 err = ext4_journal_stop(handle);
1606 walk_page_buffers(handle, page_bufs, 0,
1607 PAGE_CACHE_SIZE, NULL, bput_one);
1608 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1617 static int ext4_journalled_writepage(struct page *page,
1618 struct writeback_control *wbc)
1620 struct inode *inode = page->mapping->host;
1621 loff_t size = i_size_read(inode);
1624 J_ASSERT(PageLocked(page));
1625 J_ASSERT(page_has_buffers(page));
1626 if (page->index == size >> PAGE_CACHE_SHIFT)
1627 len = size & ~PAGE_CACHE_MASK;
1629 len = PAGE_CACHE_SIZE;
1630 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
1631 ext4_bh_unmapped_or_delay));
1633 if (ext4_journal_current_handle())
1636 if (PageChecked(page)) {
1638 * It's mmapped pagecache. Add buffers and journal it. There
1639 * doesn't seem much point in redirtying the page here.
1641 ClearPageChecked(page);
1642 return __ext4_journalled_writepage(page, wbc);
1645 * It may be a page full of checkpoint-mode buffers. We don't
1646 * really know unless we go poke around in the buffer_heads.
1647 * But block_write_full_page will do the right thing.
1649 return block_write_full_page(page, ext4_get_block, wbc);
1652 redirty_page_for_writepage(wbc, page);
1657 static int ext4_readpage(struct file *file, struct page *page)
1659 return mpage_readpage(page, ext4_get_block);
1663 ext4_readpages(struct file *file, struct address_space *mapping,
1664 struct list_head *pages, unsigned nr_pages)
1666 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1669 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1671 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1674 * If it's a full truncate we just forget about the pending dirtying
1677 ClearPageChecked(page);
1679 jbd2_journal_invalidatepage(journal, page, offset);
1682 static int ext4_releasepage(struct page *page, gfp_t wait)
1684 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1686 WARN_ON(PageChecked(page));
1687 if (!page_has_buffers(page))
1689 return jbd2_journal_try_to_free_buffers(journal, page, wait);
1693 * If the O_DIRECT write will extend the file then add this inode to the
1694 * orphan list. So recovery will truncate it back to the original size
1695 * if the machine crashes during the write.
1697 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1698 * crashes then stale disk data _may_ be exposed inside the file. But current
1699 * VFS code falls back into buffered path in that case so we are safe.
1701 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1702 const struct iovec *iov, loff_t offset,
1703 unsigned long nr_segs)
1705 struct file *file = iocb->ki_filp;
1706 struct inode *inode = file->f_mapping->host;
1707 struct ext4_inode_info *ei = EXT4_I(inode);
1711 size_t count = iov_length(iov, nr_segs);
1714 loff_t final_size = offset + count;
1716 if (final_size > inode->i_size) {
1717 /* Credits for sb + inode write */
1718 handle = ext4_journal_start(inode, 2);
1719 if (IS_ERR(handle)) {
1720 ret = PTR_ERR(handle);
1723 ret = ext4_orphan_add(handle, inode);
1725 ext4_journal_stop(handle);
1729 ei->i_disksize = inode->i_size;
1730 ext4_journal_stop(handle);
1734 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1736 ext4_get_block, NULL);
1741 /* Credits for sb + inode write */
1742 handle = ext4_journal_start(inode, 2);
1743 if (IS_ERR(handle)) {
1744 /* This is really bad luck. We've written the data
1745 * but cannot extend i_size. Bail out and pretend
1746 * the write failed... */
1747 ret = PTR_ERR(handle);
1751 ext4_orphan_del(handle, inode);
1753 loff_t end = offset + ret;
1754 if (end > inode->i_size) {
1755 ei->i_disksize = end;
1756 i_size_write(inode, end);
1758 * We're going to return a positive `ret'
1759 * here due to non-zero-length I/O, so there's
1760 * no way of reporting error returns from
1761 * ext4_mark_inode_dirty() to userspace. So
1764 ext4_mark_inode_dirty(handle, inode);
1767 err = ext4_journal_stop(handle);
1776 * Pages can be marked dirty completely asynchronously from ext4's journalling
1777 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
1778 * much here because ->set_page_dirty is called under VFS locks. The page is
1779 * not necessarily locked.
1781 * We cannot just dirty the page and leave attached buffers clean, because the
1782 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
1783 * or jbddirty because all the journalling code will explode.
1785 * So what we do is to mark the page "pending dirty" and next time writepage
1786 * is called, propagate that into the buffers appropriately.
1788 static int ext4_journalled_set_page_dirty(struct page *page)
1790 SetPageChecked(page);
1791 return __set_page_dirty_nobuffers(page);
1794 static const struct address_space_operations ext4_ordered_aops = {
1795 .readpage = ext4_readpage,
1796 .readpages = ext4_readpages,
1797 .writepage = ext4_normal_writepage,
1798 .sync_page = block_sync_page,
1799 .write_begin = ext4_write_begin,
1800 .write_end = ext4_ordered_write_end,
1802 .invalidatepage = ext4_invalidatepage,
1803 .releasepage = ext4_releasepage,
1804 .direct_IO = ext4_direct_IO,
1805 .migratepage = buffer_migrate_page,
1808 static const struct address_space_operations ext4_writeback_aops = {
1809 .readpage = ext4_readpage,
1810 .readpages = ext4_readpages,
1811 .writepage = ext4_normal_writepage,
1812 .sync_page = block_sync_page,
1813 .write_begin = ext4_write_begin,
1814 .write_end = ext4_writeback_write_end,
1816 .invalidatepage = ext4_invalidatepage,
1817 .releasepage = ext4_releasepage,
1818 .direct_IO = ext4_direct_IO,
1819 .migratepage = buffer_migrate_page,
1822 static const struct address_space_operations ext4_journalled_aops = {
1823 .readpage = ext4_readpage,
1824 .readpages = ext4_readpages,
1825 .writepage = ext4_journalled_writepage,
1826 .sync_page = block_sync_page,
1827 .write_begin = ext4_write_begin,
1828 .write_end = ext4_journalled_write_end,
1829 .set_page_dirty = ext4_journalled_set_page_dirty,
1831 .invalidatepage = ext4_invalidatepage,
1832 .releasepage = ext4_releasepage,
1835 void ext4_set_aops(struct inode *inode)
1837 if (ext4_should_order_data(inode))
1838 inode->i_mapping->a_ops = &ext4_ordered_aops;
1839 else if (ext4_should_writeback_data(inode))
1840 inode->i_mapping->a_ops = &ext4_writeback_aops;
1842 inode->i_mapping->a_ops = &ext4_journalled_aops;
1846 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1847 * up to the end of the block which corresponds to `from'.
1848 * This required during truncate. We need to physically zero the tail end
1849 * of that block so it doesn't yield old data if the file is later grown.
1851 int ext4_block_truncate_page(handle_t *handle,
1852 struct address_space *mapping, loff_t from)
1854 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1855 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1856 unsigned blocksize, length, pos;
1858 struct inode *inode = mapping->host;
1859 struct buffer_head *bh;
1863 page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
1867 blocksize = inode->i_sb->s_blocksize;
1868 length = blocksize - (offset & (blocksize - 1));
1869 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1872 * For "nobh" option, we can only work if we don't need to
1873 * read-in the page - otherwise we create buffers to do the IO.
1875 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1876 ext4_should_writeback_data(inode) && PageUptodate(page)) {
1877 zero_user(page, offset, length);
1878 set_page_dirty(page);
1882 if (!page_has_buffers(page))
1883 create_empty_buffers(page, blocksize, 0);
1885 /* Find the buffer that contains "offset" */
1886 bh = page_buffers(page);
1888 while (offset >= pos) {
1889 bh = bh->b_this_page;
1895 if (buffer_freed(bh)) {
1896 BUFFER_TRACE(bh, "freed: skip");
1900 if (!buffer_mapped(bh)) {
1901 BUFFER_TRACE(bh, "unmapped");
1902 ext4_get_block(inode, iblock, bh, 0);
1903 /* unmapped? It's a hole - nothing to do */
1904 if (!buffer_mapped(bh)) {
1905 BUFFER_TRACE(bh, "still unmapped");
1910 /* Ok, it's mapped. Make sure it's up-to-date */
1911 if (PageUptodate(page))
1912 set_buffer_uptodate(bh);
1914 if (!buffer_uptodate(bh)) {
1916 ll_rw_block(READ, 1, &bh);
1918 /* Uhhuh. Read error. Complain and punt. */
1919 if (!buffer_uptodate(bh))
1923 if (ext4_should_journal_data(inode)) {
1924 BUFFER_TRACE(bh, "get write access");
1925 err = ext4_journal_get_write_access(handle, bh);
1930 zero_user(page, offset, length);
1932 BUFFER_TRACE(bh, "zeroed end of block");
1935 if (ext4_should_journal_data(inode)) {
1936 err = ext4_journal_dirty_metadata(handle, bh);
1938 if (ext4_should_order_data(inode))
1939 err = ext4_jbd2_file_inode(handle, inode);
1940 mark_buffer_dirty(bh);
1945 page_cache_release(page);
1950 * Probably it should be a library function... search for first non-zero word
1951 * or memcmp with zero_page, whatever is better for particular architecture.
1954 static inline int all_zeroes(__le32 *p, __le32 *q)
1963 * ext4_find_shared - find the indirect blocks for partial truncation.
1964 * @inode: inode in question
1965 * @depth: depth of the affected branch
1966 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
1967 * @chain: place to store the pointers to partial indirect blocks
1968 * @top: place to the (detached) top of branch
1970 * This is a helper function used by ext4_truncate().
1972 * When we do truncate() we may have to clean the ends of several
1973 * indirect blocks but leave the blocks themselves alive. Block is
1974 * partially truncated if some data below the new i_size is refered
1975 * from it (and it is on the path to the first completely truncated
1976 * data block, indeed). We have to free the top of that path along
1977 * with everything to the right of the path. Since no allocation
1978 * past the truncation point is possible until ext4_truncate()
1979 * finishes, we may safely do the latter, but top of branch may
1980 * require special attention - pageout below the truncation point
1981 * might try to populate it.
1983 * We atomically detach the top of branch from the tree, store the
1984 * block number of its root in *@top, pointers to buffer_heads of
1985 * partially truncated blocks - in @chain[].bh and pointers to
1986 * their last elements that should not be removed - in
1987 * @chain[].p. Return value is the pointer to last filled element
1990 * The work left to caller to do the actual freeing of subtrees:
1991 * a) free the subtree starting from *@top
1992 * b) free the subtrees whose roots are stored in
1993 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1994 * c) free the subtrees growing from the inode past the @chain[0].
1995 * (no partially truncated stuff there). */
1997 static Indirect *ext4_find_shared(struct inode *inode, int depth,
1998 ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
2000 Indirect *partial, *p;
2004 /* Make k index the deepest non-null offest + 1 */
2005 for (k = depth; k > 1 && !offsets[k-1]; k--)
2007 partial = ext4_get_branch(inode, k, offsets, chain, &err);
2008 /* Writer: pointers */
2010 partial = chain + k-1;
2012 * If the branch acquired continuation since we've looked at it -
2013 * fine, it should all survive and (new) top doesn't belong to us.
2015 if (!partial->key && *partial->p)
2018 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2021 * OK, we've found the last block that must survive. The rest of our
2022 * branch should be detached before unlocking. However, if that rest
2023 * of branch is all ours and does not grow immediately from the inode
2024 * it's easier to cheat and just decrement partial->p.
2026 if (p == chain + k - 1 && p > chain) {
2030 /* Nope, don't do this in ext4. Must leave the tree intact */
2037 while(partial > p) {
2038 brelse(partial->bh);
2046 * Zero a number of block pointers in either an inode or an indirect block.
2047 * If we restart the transaction we must again get write access to the
2048 * indirect block for further modification.
2050 * We release `count' blocks on disk, but (last - first) may be greater
2051 * than `count' because there can be holes in there.
2053 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2054 struct buffer_head *bh, ext4_fsblk_t block_to_free,
2055 unsigned long count, __le32 *first, __le32 *last)
2058 if (try_to_extend_transaction(handle, inode)) {
2060 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2061 ext4_journal_dirty_metadata(handle, bh);
2063 ext4_mark_inode_dirty(handle, inode);
2064 ext4_journal_test_restart(handle, inode);
2066 BUFFER_TRACE(bh, "retaking write access");
2067 ext4_journal_get_write_access(handle, bh);
2072 * Any buffers which are on the journal will be in memory. We find
2073 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2074 * on them. We've already detached each block from the file, so
2075 * bforget() in jbd2_journal_forget() should be safe.
2077 * AKPM: turn on bforget in jbd2_journal_forget()!!!
2079 for (p = first; p < last; p++) {
2080 u32 nr = le32_to_cpu(*p);
2082 struct buffer_head *tbh;
2085 tbh = sb_find_get_block(inode->i_sb, nr);
2086 ext4_forget(handle, 0, inode, tbh, nr);
2090 ext4_free_blocks(handle, inode, block_to_free, count, 0);
2094 * ext4_free_data - free a list of data blocks
2095 * @handle: handle for this transaction
2096 * @inode: inode we are dealing with
2097 * @this_bh: indirect buffer_head which contains *@first and *@last
2098 * @first: array of block numbers
2099 * @last: points immediately past the end of array
2101 * We are freeing all blocks refered from that array (numbers are stored as
2102 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2104 * We accumulate contiguous runs of blocks to free. Conveniently, if these
2105 * blocks are contiguous then releasing them at one time will only affect one
2106 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2107 * actually use a lot of journal space.
2109 * @this_bh will be %NULL if @first and @last point into the inode's direct
2112 static void ext4_free_data(handle_t *handle, struct inode *inode,
2113 struct buffer_head *this_bh,
2114 __le32 *first, __le32 *last)
2116 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
2117 unsigned long count = 0; /* Number of blocks in the run */
2118 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2121 ext4_fsblk_t nr; /* Current block # */
2122 __le32 *p; /* Pointer into inode/ind
2123 for current block */
2126 if (this_bh) { /* For indirect block */
2127 BUFFER_TRACE(this_bh, "get_write_access");
2128 err = ext4_journal_get_write_access(handle, this_bh);
2129 /* Important: if we can't update the indirect pointers
2130 * to the blocks, we can't free them. */
2135 for (p = first; p < last; p++) {
2136 nr = le32_to_cpu(*p);
2138 /* accumulate blocks to free if they're contiguous */
2141 block_to_free_p = p;
2143 } else if (nr == block_to_free + count) {
2146 ext4_clear_blocks(handle, inode, this_bh,
2148 count, block_to_free_p, p);
2150 block_to_free_p = p;
2157 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2158 count, block_to_free_p, p);
2161 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2164 * The buffer head should have an attached journal head at this
2165 * point. However, if the data is corrupted and an indirect
2166 * block pointed to itself, it would have been detached when
2167 * the block was cleared. Check for this instead of OOPSing.
2170 ext4_journal_dirty_metadata(handle, this_bh);
2172 ext4_error(inode->i_sb, __func__,
2173 "circular indirect block detected, "
2174 "inode=%lu, block=%llu",
2176 (unsigned long long) this_bh->b_blocknr);
2181 * ext4_free_branches - free an array of branches
2182 * @handle: JBD handle for this transaction
2183 * @inode: inode we are dealing with
2184 * @parent_bh: the buffer_head which contains *@first and *@last
2185 * @first: array of block numbers
2186 * @last: pointer immediately past the end of array
2187 * @depth: depth of the branches to free
2189 * We are freeing all blocks refered from these branches (numbers are
2190 * stored as little-endian 32-bit) and updating @inode->i_blocks
2193 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2194 struct buffer_head *parent_bh,
2195 __le32 *first, __le32 *last, int depth)
2200 if (is_handle_aborted(handle))
2204 struct buffer_head *bh;
2205 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2207 while (--p >= first) {
2208 nr = le32_to_cpu(*p);
2210 continue; /* A hole */
2212 /* Go read the buffer for the next level down */
2213 bh = sb_bread(inode->i_sb, nr);
2216 * A read failure? Report error and clear slot
2220 ext4_error(inode->i_sb, "ext4_free_branches",
2221 "Read failure, inode=%lu, block=%llu",
2226 /* This zaps the entire block. Bottom up. */
2227 BUFFER_TRACE(bh, "free child branches");
2228 ext4_free_branches(handle, inode, bh,
2229 (__le32*)bh->b_data,
2230 (__le32*)bh->b_data + addr_per_block,
2234 * We've probably journalled the indirect block several
2235 * times during the truncate. But it's no longer
2236 * needed and we now drop it from the transaction via
2237 * jbd2_journal_revoke().
2239 * That's easy if it's exclusively part of this
2240 * transaction. But if it's part of the committing
2241 * transaction then jbd2_journal_forget() will simply
2242 * brelse() it. That means that if the underlying
2243 * block is reallocated in ext4_get_block(),
2244 * unmap_underlying_metadata() will find this block
2245 * and will try to get rid of it. damn, damn.
2247 * If this block has already been committed to the
2248 * journal, a revoke record will be written. And
2249 * revoke records must be emitted *before* clearing
2250 * this block's bit in the bitmaps.
2252 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2255 * Everything below this this pointer has been
2256 * released. Now let this top-of-subtree go.
2258 * We want the freeing of this indirect block to be
2259 * atomic in the journal with the updating of the
2260 * bitmap block which owns it. So make some room in
2263 * We zero the parent pointer *after* freeing its
2264 * pointee in the bitmaps, so if extend_transaction()
2265 * for some reason fails to put the bitmap changes and
2266 * the release into the same transaction, recovery
2267 * will merely complain about releasing a free block,
2268 * rather than leaking blocks.
2270 if (is_handle_aborted(handle))
2272 if (try_to_extend_transaction(handle, inode)) {
2273 ext4_mark_inode_dirty(handle, inode);
2274 ext4_journal_test_restart(handle, inode);
2277 ext4_free_blocks(handle, inode, nr, 1, 1);
2281 * The block which we have just freed is
2282 * pointed to by an indirect block: journal it
2284 BUFFER_TRACE(parent_bh, "get_write_access");
2285 if (!ext4_journal_get_write_access(handle,
2288 BUFFER_TRACE(parent_bh,
2289 "call ext4_journal_dirty_metadata");
2290 ext4_journal_dirty_metadata(handle,
2296 /* We have reached the bottom of the tree. */
2297 BUFFER_TRACE(parent_bh, "free data blocks");
2298 ext4_free_data(handle, inode, parent_bh, first, last);
2302 int ext4_can_truncate(struct inode *inode)
2304 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2306 if (S_ISREG(inode->i_mode))
2308 if (S_ISDIR(inode->i_mode))
2310 if (S_ISLNK(inode->i_mode))
2311 return !ext4_inode_is_fast_symlink(inode);
2318 * We block out ext4_get_block() block instantiations across the entire
2319 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2320 * simultaneously on behalf of the same inode.
2322 * As we work through the truncate and commmit bits of it to the journal there
2323 * is one core, guiding principle: the file's tree must always be consistent on
2324 * disk. We must be able to restart the truncate after a crash.
2326 * The file's tree may be transiently inconsistent in memory (although it
2327 * probably isn't), but whenever we close off and commit a journal transaction,
2328 * the contents of (the filesystem + the journal) must be consistent and
2329 * restartable. It's pretty simple, really: bottom up, right to left (although
2330 * left-to-right works OK too).
2332 * Note that at recovery time, journal replay occurs *before* the restart of
2333 * truncate against the orphan inode list.
2335 * The committed inode has the new, desired i_size (which is the same as
2336 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
2337 * that this inode's truncate did not complete and it will again call
2338 * ext4_truncate() to have another go. So there will be instantiated blocks
2339 * to the right of the truncation point in a crashed ext4 filesystem. But
2340 * that's fine - as long as they are linked from the inode, the post-crash
2341 * ext4_truncate() run will find them and release them.
2343 void ext4_truncate(struct inode *inode)
2346 struct ext4_inode_info *ei = EXT4_I(inode);
2347 __le32 *i_data = ei->i_data;
2348 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2349 struct address_space *mapping = inode->i_mapping;
2350 ext4_lblk_t offsets[4];
2355 ext4_lblk_t last_block;
2356 unsigned blocksize = inode->i_sb->s_blocksize;
2358 if (!ext4_can_truncate(inode))
2361 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
2362 ext4_ext_truncate(inode);
2366 handle = start_transaction(inode);
2368 return; /* AKPM: return what? */
2370 last_block = (inode->i_size + blocksize-1)
2371 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2373 if (inode->i_size & (blocksize - 1))
2374 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
2377 n = ext4_block_to_path(inode, last_block, offsets, NULL);
2379 goto out_stop; /* error */
2382 * OK. This truncate is going to happen. We add the inode to the
2383 * orphan list, so that if this truncate spans multiple transactions,
2384 * and we crash, we will resume the truncate when the filesystem
2385 * recovers. It also marks the inode dirty, to catch the new size.
2387 * Implication: the file must always be in a sane, consistent
2388 * truncatable state while each transaction commits.
2390 if (ext4_orphan_add(handle, inode))
2394 * The orphan list entry will now protect us from any crash which
2395 * occurs before the truncate completes, so it is now safe to propagate
2396 * the new, shorter inode size (held for now in i_size) into the
2397 * on-disk inode. We do this via i_disksize, which is the value which
2398 * ext4 *really* writes onto the disk inode.
2400 ei->i_disksize = inode->i_size;
2403 * From here we block out all ext4_get_block() callers who want to
2404 * modify the block allocation tree.
2406 down_write(&ei->i_data_sem);
2408 if (n == 1) { /* direct blocks */
2409 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2410 i_data + EXT4_NDIR_BLOCKS);
2414 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2415 /* Kill the top of shared branch (not detached) */
2417 if (partial == chain) {
2418 /* Shared branch grows from the inode */
2419 ext4_free_branches(handle, inode, NULL,
2420 &nr, &nr+1, (chain+n-1) - partial);
2423 * We mark the inode dirty prior to restart,
2424 * and prior to stop. No need for it here.
2427 /* Shared branch grows from an indirect block */
2428 BUFFER_TRACE(partial->bh, "get_write_access");
2429 ext4_free_branches(handle, inode, partial->bh,
2431 partial->p+1, (chain+n-1) - partial);
2434 /* Clear the ends of indirect blocks on the shared branch */
2435 while (partial > chain) {
2436 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2437 (__le32*)partial->bh->b_data+addr_per_block,
2438 (chain+n-1) - partial);
2439 BUFFER_TRACE(partial->bh, "call brelse");
2440 brelse (partial->bh);
2444 /* Kill the remaining (whole) subtrees */
2445 switch (offsets[0]) {
2447 nr = i_data[EXT4_IND_BLOCK];
2449 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2450 i_data[EXT4_IND_BLOCK] = 0;
2452 case EXT4_IND_BLOCK:
2453 nr = i_data[EXT4_DIND_BLOCK];
2455 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2456 i_data[EXT4_DIND_BLOCK] = 0;
2458 case EXT4_DIND_BLOCK:
2459 nr = i_data[EXT4_TIND_BLOCK];
2461 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2462 i_data[EXT4_TIND_BLOCK] = 0;
2464 case EXT4_TIND_BLOCK:
2468 ext4_discard_reservation(inode);
2470 up_write(&ei->i_data_sem);
2471 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2472 ext4_mark_inode_dirty(handle, inode);
2475 * In a multi-transaction truncate, we only make the final transaction
2482 * If this was a simple ftruncate(), and the file will remain alive
2483 * then we need to clear up the orphan record which we created above.
2484 * However, if this was a real unlink then we were called by
2485 * ext4_delete_inode(), and we allow that function to clean up the
2486 * orphan info for us.
2489 ext4_orphan_del(handle, inode);
2491 ext4_journal_stop(handle);
2494 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2495 unsigned long ino, struct ext4_iloc *iloc)
2497 ext4_group_t block_group;
2498 unsigned long offset;
2500 struct ext4_group_desc *gdp;
2502 if (!ext4_valid_inum(sb, ino)) {
2504 * This error is already checked for in namei.c unless we are
2505 * looking at an NFS filehandle, in which case no error
2511 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2512 gdp = ext4_get_group_desc(sb, block_group, NULL);
2517 * Figure out the offset within the block group inode table
2519 offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2520 EXT4_INODE_SIZE(sb);
2521 block = ext4_inode_table(sb, gdp) +
2522 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2524 iloc->block_group = block_group;
2525 iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2530 * ext4_get_inode_loc returns with an extra refcount against the inode's
2531 * underlying buffer_head on success. If 'in_mem' is true, we have all
2532 * data in memory that is needed to recreate the on-disk version of this
2535 static int __ext4_get_inode_loc(struct inode *inode,
2536 struct ext4_iloc *iloc, int in_mem)
2539 struct buffer_head *bh;
2541 block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2545 bh = sb_getblk(inode->i_sb, block);
2547 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2548 "unable to read inode block - "
2549 "inode=%lu, block=%llu",
2550 inode->i_ino, block);
2553 if (!buffer_uptodate(bh)) {
2555 if (buffer_uptodate(bh)) {
2556 /* someone brought it uptodate while we waited */
2562 * If we have all information of the inode in memory and this
2563 * is the only valid inode in the block, we need not read the
2567 struct buffer_head *bitmap_bh;
2568 struct ext4_group_desc *desc;
2569 int inodes_per_buffer;
2570 int inode_offset, i;
2571 ext4_group_t block_group;
2574 block_group = (inode->i_ino - 1) /
2575 EXT4_INODES_PER_GROUP(inode->i_sb);
2576 inodes_per_buffer = bh->b_size /
2577 EXT4_INODE_SIZE(inode->i_sb);
2578 inode_offset = ((inode->i_ino - 1) %
2579 EXT4_INODES_PER_GROUP(inode->i_sb));
2580 start = inode_offset & ~(inodes_per_buffer - 1);
2582 /* Is the inode bitmap in cache? */
2583 desc = ext4_get_group_desc(inode->i_sb,
2588 bitmap_bh = sb_getblk(inode->i_sb,
2589 ext4_inode_bitmap(inode->i_sb, desc));
2594 * If the inode bitmap isn't in cache then the
2595 * optimisation may end up performing two reads instead
2596 * of one, so skip it.
2598 if (!buffer_uptodate(bitmap_bh)) {
2602 for (i = start; i < start + inodes_per_buffer; i++) {
2603 if (i == inode_offset)
2605 if (ext4_test_bit(i, bitmap_bh->b_data))
2609 if (i == start + inodes_per_buffer) {
2610 /* all other inodes are free, so skip I/O */
2611 memset(bh->b_data, 0, bh->b_size);
2612 set_buffer_uptodate(bh);
2620 * There are other valid inodes in the buffer, this inode
2621 * has in-inode xattrs, or we don't have this inode in memory.
2622 * Read the block from disk.
2625 bh->b_end_io = end_buffer_read_sync;
2626 submit_bh(READ_META, bh);
2628 if (!buffer_uptodate(bh)) {
2629 ext4_error(inode->i_sb, "ext4_get_inode_loc",
2630 "unable to read inode block - "
2631 "inode=%lu, block=%llu",
2632 inode->i_ino, block);
2642 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2644 /* We have all inode data except xattrs in memory here. */
2645 return __ext4_get_inode_loc(inode, iloc,
2646 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2649 void ext4_set_inode_flags(struct inode *inode)
2651 unsigned int flags = EXT4_I(inode)->i_flags;
2653 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2654 if (flags & EXT4_SYNC_FL)
2655 inode->i_flags |= S_SYNC;
2656 if (flags & EXT4_APPEND_FL)
2657 inode->i_flags |= S_APPEND;
2658 if (flags & EXT4_IMMUTABLE_FL)
2659 inode->i_flags |= S_IMMUTABLE;
2660 if (flags & EXT4_NOATIME_FL)
2661 inode->i_flags |= S_NOATIME;
2662 if (flags & EXT4_DIRSYNC_FL)
2663 inode->i_flags |= S_DIRSYNC;
2666 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2667 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2669 unsigned int flags = ei->vfs_inode.i_flags;
2671 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2672 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2674 ei->i_flags |= EXT4_SYNC_FL;
2675 if (flags & S_APPEND)
2676 ei->i_flags |= EXT4_APPEND_FL;
2677 if (flags & S_IMMUTABLE)
2678 ei->i_flags |= EXT4_IMMUTABLE_FL;
2679 if (flags & S_NOATIME)
2680 ei->i_flags |= EXT4_NOATIME_FL;
2681 if (flags & S_DIRSYNC)
2682 ei->i_flags |= EXT4_DIRSYNC_FL;
2684 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2685 struct ext4_inode_info *ei)
2688 struct inode *inode = &(ei->vfs_inode);
2689 struct super_block *sb = inode->i_sb;
2691 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2692 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2693 /* we are using combined 48 bit field */
2694 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
2695 le32_to_cpu(raw_inode->i_blocks_lo);
2696 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
2697 /* i_blocks represent file system block size */
2698 return i_blocks << (inode->i_blkbits - 9);
2703 return le32_to_cpu(raw_inode->i_blocks_lo);
2707 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2709 struct ext4_iloc iloc;
2710 struct ext4_inode *raw_inode;
2711 struct ext4_inode_info *ei;
2712 struct buffer_head *bh;
2713 struct inode *inode;
2717 inode = iget_locked(sb, ino);
2719 return ERR_PTR(-ENOMEM);
2720 if (!(inode->i_state & I_NEW))
2724 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2725 ei->i_acl = EXT4_ACL_NOT_CACHED;
2726 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2728 ei->i_block_alloc_info = NULL;
2730 ret = __ext4_get_inode_loc(inode, &iloc, 0);
2734 raw_inode = ext4_raw_inode(&iloc);
2735 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2736 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2737 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2738 if(!(test_opt (inode->i_sb, NO_UID32))) {
2739 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2740 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2742 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2745 ei->i_dir_start_lookup = 0;
2746 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2747 /* We now have enough fields to check if the inode was active or not.
2748 * This is needed because nfsd might try to access dead inodes
2749 * the test is that same one that e2fsck uses
2750 * NeilBrown 1999oct15
2752 if (inode->i_nlink == 0) {
2753 if (inode->i_mode == 0 ||
2754 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2755 /* this inode is deleted */
2760 /* The only unlinked inodes we let through here have
2761 * valid i_mode and are being read by the orphan
2762 * recovery code: that's fine, we're about to complete
2763 * the process of deleting those. */
2765 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2766 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
2767 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
2768 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2769 cpu_to_le32(EXT4_OS_HURD)) {
2771 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2773 inode->i_size = ext4_isize(raw_inode);
2774 ei->i_disksize = inode->i_size;
2775 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2776 ei->i_block_group = iloc.block_group;
2778 * NOTE! The in-memory inode i_data array is in little-endian order
2779 * even on big-endian machines: we do NOT byteswap the block numbers!
2781 for (block = 0; block < EXT4_N_BLOCKS; block++)
2782 ei->i_data[block] = raw_inode->i_block[block];
2783 INIT_LIST_HEAD(&ei->i_orphan);
2785 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2786 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2787 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2788 EXT4_INODE_SIZE(inode->i_sb)) {
2793 if (ei->i_extra_isize == 0) {
2794 /* The extra space is currently unused. Use it. */
2795 ei->i_extra_isize = sizeof(struct ext4_inode) -
2796 EXT4_GOOD_OLD_INODE_SIZE;
2798 __le32 *magic = (void *)raw_inode +
2799 EXT4_GOOD_OLD_INODE_SIZE +
2801 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2802 ei->i_state |= EXT4_STATE_XATTR;
2805 ei->i_extra_isize = 0;
2807 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2808 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2809 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2810 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2812 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
2813 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2814 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
2816 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
2819 if (S_ISREG(inode->i_mode)) {
2820 inode->i_op = &ext4_file_inode_operations;
2821 inode->i_fop = &ext4_file_operations;
2822 ext4_set_aops(inode);
2823 } else if (S_ISDIR(inode->i_mode)) {
2824 inode->i_op = &ext4_dir_inode_operations;
2825 inode->i_fop = &ext4_dir_operations;
2826 } else if (S_ISLNK(inode->i_mode)) {
2827 if (ext4_inode_is_fast_symlink(inode))
2828 inode->i_op = &ext4_fast_symlink_inode_operations;
2830 inode->i_op = &ext4_symlink_inode_operations;
2831 ext4_set_aops(inode);
2834 inode->i_op = &ext4_special_inode_operations;
2835 if (raw_inode->i_block[0])
2836 init_special_inode(inode, inode->i_mode,
2837 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2839 init_special_inode(inode, inode->i_mode,
2840 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2843 ext4_set_inode_flags(inode);
2844 unlock_new_inode(inode);
2849 return ERR_PTR(ret);
2852 static int ext4_inode_blocks_set(handle_t *handle,
2853 struct ext4_inode *raw_inode,
2854 struct ext4_inode_info *ei)
2856 struct inode *inode = &(ei->vfs_inode);
2857 u64 i_blocks = inode->i_blocks;
2858 struct super_block *sb = inode->i_sb;
2861 if (i_blocks <= ~0U) {
2863 * i_blocks can be represnted in a 32 bit variable
2864 * as multiple of 512 bytes
2866 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
2867 raw_inode->i_blocks_high = 0;
2868 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2869 } else if (i_blocks <= 0xffffffffffffULL) {
2871 * i_blocks can be represented in a 48 bit variable
2872 * as multiple of 512 bytes
2874 err = ext4_update_rocompat_feature(handle, sb,
2875 EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2878 /* i_block is stored in the split 48 bit fields */
2879 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
2880 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2881 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2884 * i_blocks should be represented in a 48 bit variable
2885 * as multiple of file system block size
2887 err = ext4_update_rocompat_feature(handle, sb,
2888 EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2891 ei->i_flags |= EXT4_HUGE_FILE_FL;
2892 /* i_block is stored in file system block size */
2893 i_blocks = i_blocks >> (inode->i_blkbits - 9);
2894 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
2895 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2902 * Post the struct inode info into an on-disk inode location in the
2903 * buffer-cache. This gobbles the caller's reference to the
2904 * buffer_head in the inode location struct.
2906 * The caller must have write access to iloc->bh.
2908 static int ext4_do_update_inode(handle_t *handle,
2909 struct inode *inode,
2910 struct ext4_iloc *iloc)
2912 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2913 struct ext4_inode_info *ei = EXT4_I(inode);
2914 struct buffer_head *bh = iloc->bh;
2915 int err = 0, rc, block;
2917 /* For fields not not tracking in the in-memory inode,
2918 * initialise them to zero for new inodes. */
2919 if (ei->i_state & EXT4_STATE_NEW)
2920 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2922 ext4_get_inode_flags(ei);
2923 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2924 if(!(test_opt(inode->i_sb, NO_UID32))) {
2925 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2926 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2928 * Fix up interoperability with old kernels. Otherwise, old inodes get
2929 * re-used with the upper 16 bits of the uid/gid intact
2932 raw_inode->i_uid_high =
2933 cpu_to_le16(high_16_bits(inode->i_uid));
2934 raw_inode->i_gid_high =
2935 cpu_to_le16(high_16_bits(inode->i_gid));
2937 raw_inode->i_uid_high = 0;
2938 raw_inode->i_gid_high = 0;
2941 raw_inode->i_uid_low =
2942 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2943 raw_inode->i_gid_low =
2944 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2945 raw_inode->i_uid_high = 0;
2946 raw_inode->i_gid_high = 0;
2948 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2950 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2951 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2952 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2953 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2955 if (ext4_inode_blocks_set(handle, raw_inode, ei))
2957 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2958 /* clear the migrate flag in the raw_inode */
2959 raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
2960 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2961 cpu_to_le32(EXT4_OS_HURD))
2962 raw_inode->i_file_acl_high =
2963 cpu_to_le16(ei->i_file_acl >> 32);
2964 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
2965 ext4_isize_set(raw_inode, ei->i_disksize);
2966 if (ei->i_disksize > 0x7fffffffULL) {
2967 struct super_block *sb = inode->i_sb;
2968 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2969 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2970 EXT4_SB(sb)->s_es->s_rev_level ==
2971 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2972 /* If this is the first large file
2973 * created, add a flag to the superblock.
2975 err = ext4_journal_get_write_access(handle,
2976 EXT4_SB(sb)->s_sbh);
2979 ext4_update_dynamic_rev(sb);
2980 EXT4_SET_RO_COMPAT_FEATURE(sb,
2981 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
2984 err = ext4_journal_dirty_metadata(handle,
2985 EXT4_SB(sb)->s_sbh);
2988 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2989 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2990 if (old_valid_dev(inode->i_rdev)) {
2991 raw_inode->i_block[0] =
2992 cpu_to_le32(old_encode_dev(inode->i_rdev));
2993 raw_inode->i_block[1] = 0;
2995 raw_inode->i_block[0] = 0;
2996 raw_inode->i_block[1] =
2997 cpu_to_le32(new_encode_dev(inode->i_rdev));
2998 raw_inode->i_block[2] = 0;
3000 } else for (block = 0; block < EXT4_N_BLOCKS; block++)
3001 raw_inode->i_block[block] = ei->i_data[block];
3003 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
3004 if (ei->i_extra_isize) {
3005 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3006 raw_inode->i_version_hi =
3007 cpu_to_le32(inode->i_version >> 32);
3008 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3012 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
3013 rc = ext4_journal_dirty_metadata(handle, bh);
3016 ei->i_state &= ~EXT4_STATE_NEW;
3020 ext4_std_error(inode->i_sb, err);
3025 * ext4_write_inode()
3027 * We are called from a few places:
3029 * - Within generic_file_write() for O_SYNC files.
3030 * Here, there will be no transaction running. We wait for any running
3031 * trasnaction to commit.
3033 * - Within sys_sync(), kupdate and such.
3034 * We wait on commit, if tol to.
3036 * - Within prune_icache() (PF_MEMALLOC == true)
3037 * Here we simply return. We can't afford to block kswapd on the
3040 * In all cases it is actually safe for us to return without doing anything,
3041 * because the inode has been copied into a raw inode buffer in
3042 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
3045 * Note that we are absolutely dependent upon all inode dirtiers doing the
3046 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3047 * which we are interested.
3049 * It would be a bug for them to not do this. The code:
3051 * mark_inode_dirty(inode)
3053 * inode->i_size = expr;
3055 * is in error because a kswapd-driven write_inode() could occur while
3056 * `stuff()' is running, and the new i_size will be lost. Plus the inode
3057 * will no longer be on the superblock's dirty inode list.
3059 int ext4_write_inode(struct inode *inode, int wait)
3061 if (current->flags & PF_MEMALLOC)
3064 if (ext4_journal_current_handle()) {
3065 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3073 return ext4_force_commit(inode->i_sb);
3079 * Called from notify_change.
3081 * We want to trap VFS attempts to truncate the file as soon as
3082 * possible. In particular, we want to make sure that when the VFS
3083 * shrinks i_size, we put the inode on the orphan list and modify
3084 * i_disksize immediately, so that during the subsequent flushing of
3085 * dirty pages and freeing of disk blocks, we can guarantee that any
3086 * commit will leave the blocks being flushed in an unused state on
3087 * disk. (On recovery, the inode will get truncated and the blocks will
3088 * be freed, so we have a strong guarantee that no future commit will
3089 * leave these blocks visible to the user.)
3091 * Another thing we have to assure is that if we are in ordered mode
3092 * and inode is still attached to the committing transaction, we must
3093 * we start writeout of all the dirty pages which are being truncated.
3094 * This way we are sure that all the data written in the previous
3095 * transaction are already on disk (truncate waits for pages under
3098 * Called with inode->i_mutex down.
3100 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3102 struct inode *inode = dentry->d_inode;
3104 const unsigned int ia_valid = attr->ia_valid;
3106 error = inode_change_ok(inode, attr);
3110 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3111 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3114 /* (user+group)*(old+new) structure, inode write (sb,
3115 * inode block, ? - but truncate inode update has it) */
3116 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3117 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3118 if (IS_ERR(handle)) {
3119 error = PTR_ERR(handle);
3122 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3124 ext4_journal_stop(handle);
3127 /* Update corresponding info in inode so that everything is in
3128 * one transaction */
3129 if (attr->ia_valid & ATTR_UID)
3130 inode->i_uid = attr->ia_uid;
3131 if (attr->ia_valid & ATTR_GID)
3132 inode->i_gid = attr->ia_gid;
3133 error = ext4_mark_inode_dirty(handle, inode);
3134 ext4_journal_stop(handle);
3137 if (attr->ia_valid & ATTR_SIZE) {
3138 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
3139 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3141 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
3148 if (S_ISREG(inode->i_mode) &&
3149 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3152 handle = ext4_journal_start(inode, 3);
3153 if (IS_ERR(handle)) {
3154 error = PTR_ERR(handle);
3158 error = ext4_orphan_add(handle, inode);
3159 EXT4_I(inode)->i_disksize = attr->ia_size;
3160 rc = ext4_mark_inode_dirty(handle, inode);
3163 ext4_journal_stop(handle);
3165 if (ext4_should_order_data(inode)) {
3166 error = ext4_begin_ordered_truncate(inode,
3169 /* Do as much error cleanup as possible */
3170 handle = ext4_journal_start(inode, 3);
3171 if (IS_ERR(handle)) {
3172 ext4_orphan_del(NULL, inode);
3175 ext4_orphan_del(handle, inode);
3176 ext4_journal_stop(handle);
3182 rc = inode_setattr(inode, attr);
3184 /* If inode_setattr's call to ext4_truncate failed to get a
3185 * transaction handle at all, we need to clean up the in-core
3186 * orphan list manually. */
3188 ext4_orphan_del(NULL, inode);
3190 if (!rc && (ia_valid & ATTR_MODE))
3191 rc = ext4_acl_chmod(inode);
3194 ext4_std_error(inode->i_sb, error);
3202 * How many blocks doth make a writepage()?
3204 * With N blocks per page, it may be:
3209 * N+5 bitmap blocks (from the above)
3210 * N+5 group descriptor summary blocks
3213 * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3215 * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3217 * With ordered or writeback data it's the same, less the N data blocks.
3219 * If the inode's direct blocks can hold an integral number of pages then a
3220 * page cannot straddle two indirect blocks, and we can only touch one indirect
3221 * and dindirect block, and the "5" above becomes "3".
3223 * This still overestimates under most circumstances. If we were to pass the
3224 * start and end offsets in here as well we could do block_to_path() on each
3225 * block and work out the exact number of indirects which are touched. Pah.
3228 int ext4_writepage_trans_blocks(struct inode *inode)
3230 int bpp = ext4_journal_blocks_per_page(inode);
3231 int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3234 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3235 return ext4_ext_writepage_trans_blocks(inode, bpp);
3237 if (ext4_should_journal_data(inode))
3238 ret = 3 * (bpp + indirects) + 2;
3240 ret = 2 * (bpp + indirects) + 2;
3243 /* We know that structure was already allocated during DQUOT_INIT so
3244 * we will be updating only the data blocks + inodes */
3245 ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3252 * The caller must have previously called ext4_reserve_inode_write().
3253 * Give this, we know that the caller already has write access to iloc->bh.
3255 int ext4_mark_iloc_dirty(handle_t *handle,
3256 struct inode *inode, struct ext4_iloc *iloc)
3260 if (test_opt(inode->i_sb, I_VERSION))
3261 inode_inc_iversion(inode);
3263 /* the do_update_inode consumes one bh->b_count */
3266 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3267 err = ext4_do_update_inode(handle, inode, iloc);
3273 * On success, We end up with an outstanding reference count against
3274 * iloc->bh. This _must_ be cleaned up later.
3278 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3279 struct ext4_iloc *iloc)
3283 err = ext4_get_inode_loc(inode, iloc);
3285 BUFFER_TRACE(iloc->bh, "get_write_access");
3286 err = ext4_journal_get_write_access(handle, iloc->bh);
3293 ext4_std_error(inode->i_sb, err);
3298 * Expand an inode by new_extra_isize bytes.
3299 * Returns 0 on success or negative error number on failure.
3301 static int ext4_expand_extra_isize(struct inode *inode,
3302 unsigned int new_extra_isize,
3303 struct ext4_iloc iloc,
3306 struct ext4_inode *raw_inode;
3307 struct ext4_xattr_ibody_header *header;
3308 struct ext4_xattr_entry *entry;
3310 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3313 raw_inode = ext4_raw_inode(&iloc);
3315 header = IHDR(inode, raw_inode);
3316 entry = IFIRST(header);
3318 /* No extended attributes present */
3319 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3320 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3321 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3323 EXT4_I(inode)->i_extra_isize = new_extra_isize;
3327 /* try to expand with EAs present */
3328 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3333 * What we do here is to mark the in-core inode as clean with respect to inode
3334 * dirtiness (it may still be data-dirty).
3335 * This means that the in-core inode may be reaped by prune_icache
3336 * without having to perform any I/O. This is a very good thing,
3337 * because *any* task may call prune_icache - even ones which
3338 * have a transaction open against a different journal.
3340 * Is this cheating? Not really. Sure, we haven't written the
3341 * inode out, but prune_icache isn't a user-visible syncing function.
3342 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3343 * we start and wait on commits.
3345 * Is this efficient/effective? Well, we're being nice to the system
3346 * by cleaning up our inodes proactively so they can be reaped
3347 * without I/O. But we are potentially leaving up to five seconds'
3348 * worth of inodes floating about which prune_icache wants us to
3349 * write out. One way to fix that would be to get prune_icache()
3350 * to do a write_super() to free up some memory. It has the desired
3353 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3355 struct ext4_iloc iloc;
3356 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3357 static unsigned int mnt_count;
3361 err = ext4_reserve_inode_write(handle, inode, &iloc);
3362 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3363 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3365 * We need extra buffer credits since we may write into EA block
3366 * with this same handle. If journal_extend fails, then it will
3367 * only result in a minor loss of functionality for that inode.
3368 * If this is felt to be critical, then e2fsck should be run to
3369 * force a large enough s_min_extra_isize.
3371 if ((jbd2_journal_extend(handle,
3372 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3373 ret = ext4_expand_extra_isize(inode,
3374 sbi->s_want_extra_isize,
3377 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3379 le16_to_cpu(sbi->s_es->s_mnt_count)) {
3380 ext4_warning(inode->i_sb, __func__,
3381 "Unable to expand inode %lu. Delete"
3382 " some EAs or run e2fsck.",
3385 le16_to_cpu(sbi->s_es->s_mnt_count);
3391 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3396 * ext4_dirty_inode() is called from __mark_inode_dirty()
3398 * We're really interested in the case where a file is being extended.
3399 * i_size has been changed by generic_commit_write() and we thus need
3400 * to include the updated inode in the current transaction.
3402 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3403 * are allocated to the file.
3405 * If the inode is marked synchronous, we don't honour that here - doing
3406 * so would cause a commit on atime updates, which we don't bother doing.
3407 * We handle synchronous inodes at the highest possible level.
3409 void ext4_dirty_inode(struct inode *inode)
3411 handle_t *current_handle = ext4_journal_current_handle();
3414 handle = ext4_journal_start(inode, 2);
3417 if (current_handle &&
3418 current_handle->h_transaction != handle->h_transaction) {
3419 /* This task has a transaction open against a different fs */
3420 printk(KERN_EMERG "%s: transactions do not match!\n",
3423 jbd_debug(5, "marking dirty. outer handle=%p\n",
3425 ext4_mark_inode_dirty(handle, inode);
3427 ext4_journal_stop(handle);
3434 * Bind an inode's backing buffer_head into this transaction, to prevent
3435 * it from being flushed to disk early. Unlike
3436 * ext4_reserve_inode_write, this leaves behind no bh reference and
3437 * returns no iloc structure, so the caller needs to repeat the iloc
3438 * lookup to mark the inode dirty later.
3440 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3442 struct ext4_iloc iloc;
3446 err = ext4_get_inode_loc(inode, &iloc);
3448 BUFFER_TRACE(iloc.bh, "get_write_access");
3449 err = jbd2_journal_get_write_access(handle, iloc.bh);
3451 err = ext4_journal_dirty_metadata(handle,
3456 ext4_std_error(inode->i_sb, err);
3461 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3468 * We have to be very careful here: changing a data block's
3469 * journaling status dynamically is dangerous. If we write a
3470 * data block to the journal, change the status and then delete
3471 * that block, we risk forgetting to revoke the old log record
3472 * from the journal and so a subsequent replay can corrupt data.
3473 * So, first we make sure that the journal is empty and that
3474 * nobody is changing anything.
3477 journal = EXT4_JOURNAL(inode);
3478 if (is_journal_aborted(journal))
3481 jbd2_journal_lock_updates(journal);
3482 jbd2_journal_flush(journal);
3485 * OK, there are no updates running now, and all cached data is
3486 * synced to disk. We are now in a completely consistent state
3487 * which doesn't have anything in the journal, and we know that
3488 * no filesystem updates are running, so it is safe to modify
3489 * the inode's in-core data-journaling state flag now.
3493 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3495 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3496 ext4_set_aops(inode);
3498 jbd2_journal_unlock_updates(journal);
3500 /* Finally we can mark the inode as dirty. */
3502 handle = ext4_journal_start(inode, 1);
3504 return PTR_ERR(handle);
3506 err = ext4_mark_inode_dirty(handle, inode);
3508 ext4_journal_stop(handle);
3509 ext4_std_error(inode->i_sb, err);
3514 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
3516 return !buffer_mapped(bh);
3519 int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
3524 struct file *file = vma->vm_file;
3525 struct inode *inode = file->f_path.dentry->d_inode;
3526 struct address_space *mapping = inode->i_mapping;
3529 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
3530 * get i_mutex because we are already holding mmap_sem.
3532 down_read(&inode->i_alloc_sem);
3533 size = i_size_read(inode);
3534 if (page->mapping != mapping || size <= page_offset(page)
3535 || !PageUptodate(page)) {
3536 /* page got truncated from under us? */
3540 if (PageMappedToDisk(page))
3543 if (page->index == size >> PAGE_CACHE_SHIFT)
3544 len = size & ~PAGE_CACHE_MASK;
3546 len = PAGE_CACHE_SIZE;
3548 if (page_has_buffers(page)) {
3549 /* return if we have all the buffers mapped */
3550 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3555 * OK, we need to fill the hole... Do write_begin write_end
3556 * to do block allocation/reservation.We are not holding
3557 * inode.i__mutex here. That allow * parallel write_begin,
3558 * write_end call. lock_page prevent this from happening
3559 * on the same page though
3561 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
3562 len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
3565 ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
3566 len, len, page, NULL);
3571 up_read(&inode->i_alloc_sem);