2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 static struct kmem_cache *btrfs_inode_defrag_cachep;
46 * when auto defrag is enabled we
47 * queue up these defrag structs to remember which
48 * inodes need defragging passes
51 struct rb_node rb_node;
55 * transid where the defrag was added, we search for
56 * extents newer than this
63 /* last offset we were able to defrag */
66 /* if we've wrapped around back to zero once already */
70 static int __compare_inode_defrag(struct inode_defrag *defrag1,
71 struct inode_defrag *defrag2)
73 if (defrag1->root > defrag2->root)
75 else if (defrag1->root < defrag2->root)
77 else if (defrag1->ino > defrag2->ino)
79 else if (defrag1->ino < defrag2->ino)
85 /* pop a record for an inode into the defrag tree. The lock
86 * must be held already
88 * If you're inserting a record for an older transid than an
89 * existing record, the transid already in the tree is lowered
91 * If an existing record is found the defrag item you
94 static void __btrfs_add_inode_defrag(struct inode *inode,
95 struct inode_defrag *defrag)
97 struct btrfs_root *root = BTRFS_I(inode)->root;
98 struct inode_defrag *entry;
100 struct rb_node *parent = NULL;
103 p = &root->fs_info->defrag_inodes.rb_node;
106 entry = rb_entry(parent, struct inode_defrag, rb_node);
108 ret = __compare_inode_defrag(defrag, entry);
110 p = &parent->rb_left;
112 p = &parent->rb_right;
114 /* if we're reinserting an entry for
115 * an old defrag run, make sure to
116 * lower the transid of our existing record
118 if (defrag->transid < entry->transid)
119 entry->transid = defrag->transid;
120 if (defrag->last_offset > entry->last_offset)
121 entry->last_offset = defrag->last_offset;
125 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
126 rb_link_node(&defrag->rb_node, parent, p);
127 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
131 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
137 * insert a defrag record for this inode if auto defrag is
140 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
143 struct btrfs_root *root = BTRFS_I(inode)->root;
144 struct inode_defrag *defrag;
147 if (!btrfs_test_opt(root, AUTO_DEFRAG))
150 if (btrfs_fs_closing(root->fs_info))
153 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
157 transid = trans->transid;
159 transid = BTRFS_I(inode)->root->last_trans;
161 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
165 defrag->ino = btrfs_ino(inode);
166 defrag->transid = transid;
167 defrag->root = root->root_key.objectid;
169 spin_lock(&root->fs_info->defrag_inodes_lock);
170 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
171 __btrfs_add_inode_defrag(inode, defrag);
173 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
174 spin_unlock(&root->fs_info->defrag_inodes_lock);
179 * must be called with the defrag_inodes lock held
181 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
183 struct rb_node **next)
185 struct inode_defrag *entry = NULL;
186 struct inode_defrag tmp;
188 struct rb_node *parent = NULL;
194 p = info->defrag_inodes.rb_node;
197 entry = rb_entry(parent, struct inode_defrag, rb_node);
199 ret = __compare_inode_defrag(&tmp, entry);
203 p = parent->rb_right;
209 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
210 parent = rb_next(parent);
211 entry = rb_entry(parent, struct inode_defrag, rb_node);
219 * run through the list of inodes in the FS that need
222 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
224 struct inode_defrag *defrag;
225 struct btrfs_root *inode_root;
228 struct btrfs_key key;
229 struct btrfs_ioctl_defrag_range_args range;
231 u64 root_objectid = 0;
233 int defrag_batch = 1024;
235 memset(&range, 0, sizeof(range));
238 atomic_inc(&fs_info->defrag_running);
239 spin_lock(&fs_info->defrag_inodes_lock);
243 /* find an inode to defrag */
244 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
248 defrag = rb_entry(n, struct inode_defrag,
250 } else if (root_objectid || first_ino) {
259 /* remove it from the rbtree */
260 first_ino = defrag->ino + 1;
261 root_objectid = defrag->root;
262 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
264 if (btrfs_fs_closing(fs_info))
267 spin_unlock(&fs_info->defrag_inodes_lock);
270 key.objectid = defrag->root;
271 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
272 key.offset = (u64)-1;
273 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
274 if (IS_ERR(inode_root))
277 key.objectid = defrag->ino;
278 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
281 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
285 /* do a chunk of defrag */
286 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
287 range.start = defrag->last_offset;
288 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
291 * if we filled the whole defrag batch, there
292 * must be more work to do. Queue this defrag
295 if (num_defrag == defrag_batch) {
296 defrag->last_offset = range.start;
297 __btrfs_add_inode_defrag(inode, defrag);
299 * we don't want to kfree defrag, we added it back to
303 } else if (defrag->last_offset && !defrag->cycled) {
305 * we didn't fill our defrag batch, but
306 * we didn't start at zero. Make sure we loop
307 * around to the start of the file.
309 defrag->last_offset = 0;
311 __btrfs_add_inode_defrag(inode, defrag);
317 spin_lock(&fs_info->defrag_inodes_lock);
320 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
322 spin_unlock(&fs_info->defrag_inodes_lock);
324 atomic_dec(&fs_info->defrag_running);
327 * during unmount, we use the transaction_wait queue to
328 * wait for the defragger to stop
330 wake_up(&fs_info->transaction_wait);
334 /* simple helper to fault in pages and copy. This should go away
335 * and be replaced with calls into generic code.
337 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
339 struct page **prepared_pages,
343 size_t total_copied = 0;
345 int offset = pos & (PAGE_CACHE_SIZE - 1);
347 while (write_bytes > 0) {
348 size_t count = min_t(size_t,
349 PAGE_CACHE_SIZE - offset, write_bytes);
350 struct page *page = prepared_pages[pg];
352 * Copy data from userspace to the current page
354 * Disable pagefault to avoid recursive lock since
355 * the pages are already locked
358 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
361 /* Flush processor's dcache for this page */
362 flush_dcache_page(page);
365 * if we get a partial write, we can end up with
366 * partially up to date pages. These add
367 * a lot of complexity, so make sure they don't
368 * happen by forcing this copy to be retried.
370 * The rest of the btrfs_file_write code will fall
371 * back to page at a time copies after we return 0.
373 if (!PageUptodate(page) && copied < count)
376 iov_iter_advance(i, copied);
377 write_bytes -= copied;
378 total_copied += copied;
380 /* Return to btrfs_file_aio_write to fault page */
381 if (unlikely(copied == 0))
384 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
395 * unlocks pages after btrfs_file_write is done with them
397 void btrfs_drop_pages(struct page **pages, size_t num_pages)
400 for (i = 0; i < num_pages; i++) {
401 /* page checked is some magic around finding pages that
402 * have been modified without going through btrfs_set_page_dirty
405 ClearPageChecked(pages[i]);
406 unlock_page(pages[i]);
407 mark_page_accessed(pages[i]);
408 page_cache_release(pages[i]);
413 * after copy_from_user, pages need to be dirtied and we need to make
414 * sure holes are created between the current EOF and the start of
415 * any next extents (if required).
417 * this also makes the decision about creating an inline extent vs
418 * doing real data extents, marking pages dirty and delalloc as required.
420 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
421 struct page **pages, size_t num_pages,
422 loff_t pos, size_t write_bytes,
423 struct extent_state **cached)
429 u64 end_of_last_block;
430 u64 end_pos = pos + write_bytes;
431 loff_t isize = i_size_read(inode);
433 start_pos = pos & ~((u64)root->sectorsize - 1);
434 num_bytes = (write_bytes + pos - start_pos +
435 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
437 end_of_last_block = start_pos + num_bytes - 1;
438 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
443 for (i = 0; i < num_pages; i++) {
444 struct page *p = pages[i];
451 * we've only changed i_size in ram, and we haven't updated
452 * the disk i_size. There is no need to log the inode
456 i_size_write(inode, end_pos);
461 * this drops all the extents in the cache that intersect the range
462 * [start, end]. Existing extents are split as required.
464 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
467 struct extent_map *em;
468 struct extent_map *split = NULL;
469 struct extent_map *split2 = NULL;
470 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
471 u64 len = end - start + 1;
478 WARN_ON(end < start);
479 if (end == (u64)-1) {
487 split = alloc_extent_map();
489 split2 = alloc_extent_map();
490 if (!split || !split2)
493 write_lock(&em_tree->lock);
494 em = lookup_extent_mapping(em_tree, start, len);
496 write_unlock(&em_tree->lock);
500 gen = em->generation;
501 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
502 if (testend && em->start + em->len >= start + len) {
504 write_unlock(&em_tree->lock);
507 start = em->start + em->len;
509 len = start + len - (em->start + em->len);
511 write_unlock(&em_tree->lock);
514 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
515 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
516 remove_extent_mapping(em_tree, em);
520 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
522 split->start = em->start;
523 split->len = start - em->start;
524 split->orig_start = em->orig_start;
525 split->block_start = em->block_start;
528 split->block_len = em->block_len;
530 split->block_len = split->len;
531 split->generation = gen;
532 split->bdev = em->bdev;
533 split->flags = flags;
534 split->compress_type = em->compress_type;
535 ret = add_extent_mapping(em_tree, split);
536 BUG_ON(ret); /* Logic error */
537 list_move(&split->list, &em_tree->modified_extents);
538 free_extent_map(split);
542 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
543 testend && em->start + em->len > start + len) {
544 u64 diff = start + len - em->start;
546 split->start = start + len;
547 split->len = em->start + em->len - (start + len);
548 split->bdev = em->bdev;
549 split->flags = flags;
550 split->compress_type = em->compress_type;
551 split->generation = gen;
554 split->block_len = em->block_len;
555 split->block_start = em->block_start;
556 split->orig_start = em->orig_start;
558 split->block_len = split->len;
559 split->block_start = em->block_start + diff;
560 split->orig_start = split->start;
563 ret = add_extent_mapping(em_tree, split);
564 BUG_ON(ret); /* Logic error */
565 list_move(&split->list, &em_tree->modified_extents);
566 free_extent_map(split);
570 write_unlock(&em_tree->lock);
574 /* once for the tree*/
578 free_extent_map(split);
580 free_extent_map(split2);
584 * this is very complex, but the basic idea is to drop all extents
585 * in the range start - end. hint_block is filled in with a block number
586 * that would be a good hint to the block allocator for this file.
588 * If an extent intersects the range but is not entirely inside the range
589 * it is either truncated or split. Anything entirely inside the range
590 * is deleted from the tree.
592 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
593 struct btrfs_root *root, struct inode *inode,
594 struct btrfs_path *path, u64 start, u64 end,
595 u64 *drop_end, int drop_cache)
597 struct extent_buffer *leaf;
598 struct btrfs_file_extent_item *fi;
599 struct btrfs_key key;
600 struct btrfs_key new_key;
601 u64 ino = btrfs_ino(inode);
602 u64 search_start = start;
605 u64 extent_offset = 0;
612 int modify_tree = -1;
613 int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
617 btrfs_drop_extent_cache(inode, start, end - 1, 0);
619 if (start >= BTRFS_I(inode)->disk_i_size)
624 ret = btrfs_lookup_file_extent(trans, root, path, ino,
625 search_start, modify_tree);
628 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
629 leaf = path->nodes[0];
630 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
631 if (key.objectid == ino &&
632 key.type == BTRFS_EXTENT_DATA_KEY)
637 leaf = path->nodes[0];
638 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
640 ret = btrfs_next_leaf(root, path);
647 leaf = path->nodes[0];
651 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
652 if (key.objectid > ino ||
653 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
656 fi = btrfs_item_ptr(leaf, path->slots[0],
657 struct btrfs_file_extent_item);
658 extent_type = btrfs_file_extent_type(leaf, fi);
660 if (extent_type == BTRFS_FILE_EXTENT_REG ||
661 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
662 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
663 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
664 extent_offset = btrfs_file_extent_offset(leaf, fi);
665 extent_end = key.offset +
666 btrfs_file_extent_num_bytes(leaf, fi);
667 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
668 extent_end = key.offset +
669 btrfs_file_extent_inline_len(leaf, fi);
672 extent_end = search_start;
675 if (extent_end <= search_start) {
681 search_start = max(key.offset, start);
682 if (recow || !modify_tree) {
684 btrfs_release_path(path);
689 * | - range to drop - |
690 * | -------- extent -------- |
692 if (start > key.offset && end < extent_end) {
694 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
696 memcpy(&new_key, &key, sizeof(new_key));
697 new_key.offset = start;
698 ret = btrfs_duplicate_item(trans, root, path,
700 if (ret == -EAGAIN) {
701 btrfs_release_path(path);
707 leaf = path->nodes[0];
708 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
709 struct btrfs_file_extent_item);
710 btrfs_set_file_extent_num_bytes(leaf, fi,
713 fi = btrfs_item_ptr(leaf, path->slots[0],
714 struct btrfs_file_extent_item);
716 extent_offset += start - key.offset;
717 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
718 btrfs_set_file_extent_num_bytes(leaf, fi,
720 btrfs_mark_buffer_dirty(leaf);
722 if (update_refs && disk_bytenr > 0) {
723 ret = btrfs_inc_extent_ref(trans, root,
724 disk_bytenr, num_bytes, 0,
725 root->root_key.objectid,
727 start - extent_offset, 0);
728 BUG_ON(ret); /* -ENOMEM */
733 * | ---- range to drop ----- |
734 * | -------- extent -------- |
736 if (start <= key.offset && end < extent_end) {
737 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
739 memcpy(&new_key, &key, sizeof(new_key));
740 new_key.offset = end;
741 btrfs_set_item_key_safe(trans, root, path, &new_key);
743 extent_offset += end - key.offset;
744 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
745 btrfs_set_file_extent_num_bytes(leaf, fi,
747 btrfs_mark_buffer_dirty(leaf);
748 if (update_refs && disk_bytenr > 0)
749 inode_sub_bytes(inode, end - key.offset);
753 search_start = extent_end;
755 * | ---- range to drop ----- |
756 * | -------- extent -------- |
758 if (start > key.offset && end >= extent_end) {
760 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
762 btrfs_set_file_extent_num_bytes(leaf, fi,
764 btrfs_mark_buffer_dirty(leaf);
765 if (update_refs && disk_bytenr > 0)
766 inode_sub_bytes(inode, extent_end - start);
767 if (end == extent_end)
775 * | ---- range to drop ----- |
776 * | ------ extent ------ |
778 if (start <= key.offset && end >= extent_end) {
780 del_slot = path->slots[0];
783 BUG_ON(del_slot + del_nr != path->slots[0]);
788 extent_type == BTRFS_FILE_EXTENT_INLINE) {
789 inode_sub_bytes(inode,
790 extent_end - key.offset);
791 extent_end = ALIGN(extent_end,
793 } else if (update_refs && disk_bytenr > 0) {
794 ret = btrfs_free_extent(trans, root,
795 disk_bytenr, num_bytes, 0,
796 root->root_key.objectid,
797 key.objectid, key.offset -
799 BUG_ON(ret); /* -ENOMEM */
800 inode_sub_bytes(inode,
801 extent_end - key.offset);
804 if (end == extent_end)
807 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
812 ret = btrfs_del_items(trans, root, path, del_slot,
815 btrfs_abort_transaction(trans, root, ret);
822 btrfs_release_path(path);
829 if (!ret && del_nr > 0) {
830 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
832 btrfs_abort_transaction(trans, root, ret);
836 *drop_end = found ? min(end, extent_end) : end;
837 btrfs_release_path(path);
841 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
842 struct btrfs_root *root, struct inode *inode, u64 start,
843 u64 end, int drop_cache)
845 struct btrfs_path *path;
848 path = btrfs_alloc_path();
851 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
853 btrfs_free_path(path);
857 static int extent_mergeable(struct extent_buffer *leaf, int slot,
858 u64 objectid, u64 bytenr, u64 orig_offset,
859 u64 *start, u64 *end)
861 struct btrfs_file_extent_item *fi;
862 struct btrfs_key key;
865 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
868 btrfs_item_key_to_cpu(leaf, &key, slot);
869 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
872 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
873 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
874 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
875 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
876 btrfs_file_extent_compression(leaf, fi) ||
877 btrfs_file_extent_encryption(leaf, fi) ||
878 btrfs_file_extent_other_encoding(leaf, fi))
881 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
882 if ((*start && *start != key.offset) || (*end && *end != extent_end))
891 * Mark extent in the range start - end as written.
893 * This changes extent type from 'pre-allocated' to 'regular'. If only
894 * part of extent is marked as written, the extent will be split into
897 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
898 struct inode *inode, u64 start, u64 end)
900 struct btrfs_root *root = BTRFS_I(inode)->root;
901 struct extent_buffer *leaf;
902 struct btrfs_path *path;
903 struct btrfs_file_extent_item *fi;
904 struct btrfs_key key;
905 struct btrfs_key new_key;
917 u64 ino = btrfs_ino(inode);
919 path = btrfs_alloc_path();
926 key.type = BTRFS_EXTENT_DATA_KEY;
929 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
932 if (ret > 0 && path->slots[0] > 0)
935 leaf = path->nodes[0];
936 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
937 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
938 fi = btrfs_item_ptr(leaf, path->slots[0],
939 struct btrfs_file_extent_item);
940 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
941 BTRFS_FILE_EXTENT_PREALLOC);
942 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
943 BUG_ON(key.offset > start || extent_end < end);
945 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
946 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
947 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
948 memcpy(&new_key, &key, sizeof(new_key));
950 if (start == key.offset && end < extent_end) {
953 if (extent_mergeable(leaf, path->slots[0] - 1,
954 ino, bytenr, orig_offset,
955 &other_start, &other_end)) {
956 new_key.offset = end;
957 btrfs_set_item_key_safe(trans, root, path, &new_key);
958 fi = btrfs_item_ptr(leaf, path->slots[0],
959 struct btrfs_file_extent_item);
960 btrfs_set_file_extent_generation(leaf, fi,
962 btrfs_set_file_extent_num_bytes(leaf, fi,
964 btrfs_set_file_extent_offset(leaf, fi,
966 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
967 struct btrfs_file_extent_item);
968 btrfs_set_file_extent_generation(leaf, fi,
970 btrfs_set_file_extent_num_bytes(leaf, fi,
972 btrfs_mark_buffer_dirty(leaf);
977 if (start > key.offset && end == extent_end) {
980 if (extent_mergeable(leaf, path->slots[0] + 1,
981 ino, bytenr, orig_offset,
982 &other_start, &other_end)) {
983 fi = btrfs_item_ptr(leaf, path->slots[0],
984 struct btrfs_file_extent_item);
985 btrfs_set_file_extent_num_bytes(leaf, fi,
987 btrfs_set_file_extent_generation(leaf, fi,
990 new_key.offset = start;
991 btrfs_set_item_key_safe(trans, root, path, &new_key);
993 fi = btrfs_item_ptr(leaf, path->slots[0],
994 struct btrfs_file_extent_item);
995 btrfs_set_file_extent_generation(leaf, fi,
997 btrfs_set_file_extent_num_bytes(leaf, fi,
999 btrfs_set_file_extent_offset(leaf, fi,
1000 start - orig_offset);
1001 btrfs_mark_buffer_dirty(leaf);
1006 while (start > key.offset || end < extent_end) {
1007 if (key.offset == start)
1010 new_key.offset = split;
1011 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1012 if (ret == -EAGAIN) {
1013 btrfs_release_path(path);
1017 btrfs_abort_transaction(trans, root, ret);
1021 leaf = path->nodes[0];
1022 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1023 struct btrfs_file_extent_item);
1024 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1025 btrfs_set_file_extent_num_bytes(leaf, fi,
1026 split - key.offset);
1028 fi = btrfs_item_ptr(leaf, path->slots[0],
1029 struct btrfs_file_extent_item);
1031 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1032 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1033 btrfs_set_file_extent_num_bytes(leaf, fi,
1034 extent_end - split);
1035 btrfs_mark_buffer_dirty(leaf);
1037 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1038 root->root_key.objectid,
1039 ino, orig_offset, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1042 if (split == start) {
1045 BUG_ON(start != key.offset);
1054 if (extent_mergeable(leaf, path->slots[0] + 1,
1055 ino, bytenr, orig_offset,
1056 &other_start, &other_end)) {
1058 btrfs_release_path(path);
1061 extent_end = other_end;
1062 del_slot = path->slots[0] + 1;
1064 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1065 0, root->root_key.objectid,
1066 ino, orig_offset, 0);
1067 BUG_ON(ret); /* -ENOMEM */
1071 if (extent_mergeable(leaf, path->slots[0] - 1,
1072 ino, bytenr, orig_offset,
1073 &other_start, &other_end)) {
1075 btrfs_release_path(path);
1078 key.offset = other_start;
1079 del_slot = path->slots[0];
1081 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1082 0, root->root_key.objectid,
1083 ino, orig_offset, 0);
1084 BUG_ON(ret); /* -ENOMEM */
1087 fi = btrfs_item_ptr(leaf, path->slots[0],
1088 struct btrfs_file_extent_item);
1089 btrfs_set_file_extent_type(leaf, fi,
1090 BTRFS_FILE_EXTENT_REG);
1091 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1092 btrfs_mark_buffer_dirty(leaf);
1094 fi = btrfs_item_ptr(leaf, del_slot - 1,
1095 struct btrfs_file_extent_item);
1096 btrfs_set_file_extent_type(leaf, fi,
1097 BTRFS_FILE_EXTENT_REG);
1098 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1099 btrfs_set_file_extent_num_bytes(leaf, fi,
1100 extent_end - key.offset);
1101 btrfs_mark_buffer_dirty(leaf);
1103 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1105 btrfs_abort_transaction(trans, root, ret);
1110 btrfs_free_path(path);
1115 * on error we return an unlocked page and the error value
1116 * on success we return a locked page and 0
1118 static int prepare_uptodate_page(struct page *page, u64 pos,
1119 bool force_uptodate)
1123 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1124 !PageUptodate(page)) {
1125 ret = btrfs_readpage(NULL, page);
1129 if (!PageUptodate(page)) {
1138 * this gets pages into the page cache and locks them down, it also properly
1139 * waits for data=ordered extents to finish before allowing the pages to be
1142 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1143 struct page **pages, size_t num_pages,
1144 loff_t pos, unsigned long first_index,
1145 size_t write_bytes, bool force_uptodate)
1147 struct extent_state *cached_state = NULL;
1149 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1150 struct inode *inode = fdentry(file)->d_inode;
1151 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1157 start_pos = pos & ~((u64)root->sectorsize - 1);
1158 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1161 for (i = 0; i < num_pages; i++) {
1162 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1163 mask | __GFP_WRITE);
1171 err = prepare_uptodate_page(pages[i], pos,
1173 if (i == num_pages - 1)
1174 err = prepare_uptodate_page(pages[i],
1175 pos + write_bytes, false);
1177 page_cache_release(pages[i]);
1181 wait_on_page_writeback(pages[i]);
1184 if (start_pos < inode->i_size) {
1185 struct btrfs_ordered_extent *ordered;
1186 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1187 start_pos, last_pos - 1, 0, &cached_state);
1188 ordered = btrfs_lookup_first_ordered_extent(inode,
1191 ordered->file_offset + ordered->len > start_pos &&
1192 ordered->file_offset < last_pos) {
1193 btrfs_put_ordered_extent(ordered);
1194 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1195 start_pos, last_pos - 1,
1196 &cached_state, GFP_NOFS);
1197 for (i = 0; i < num_pages; i++) {
1198 unlock_page(pages[i]);
1199 page_cache_release(pages[i]);
1201 btrfs_wait_ordered_range(inode, start_pos,
1202 last_pos - start_pos);
1206 btrfs_put_ordered_extent(ordered);
1208 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1209 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1210 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1211 0, 0, &cached_state, GFP_NOFS);
1212 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1213 start_pos, last_pos - 1, &cached_state,
1216 for (i = 0; i < num_pages; i++) {
1217 if (clear_page_dirty_for_io(pages[i]))
1218 account_page_redirty(pages[i]);
1219 set_page_extent_mapped(pages[i]);
1220 WARN_ON(!PageLocked(pages[i]));
1224 while (faili >= 0) {
1225 unlock_page(pages[faili]);
1226 page_cache_release(pages[faili]);
1233 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1237 struct inode *inode = fdentry(file)->d_inode;
1238 struct btrfs_root *root = BTRFS_I(inode)->root;
1239 struct page **pages = NULL;
1240 unsigned long first_index;
1241 size_t num_written = 0;
1244 bool force_page_uptodate = false;
1246 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1247 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1248 (sizeof(struct page *)));
1249 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1250 nrptrs = max(nrptrs, 8);
1251 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1255 first_index = pos >> PAGE_CACHE_SHIFT;
1257 while (iov_iter_count(i) > 0) {
1258 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1259 size_t write_bytes = min(iov_iter_count(i),
1260 nrptrs * (size_t)PAGE_CACHE_SIZE -
1262 size_t num_pages = (write_bytes + offset +
1263 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1267 WARN_ON(num_pages > nrptrs);
1270 * Fault pages before locking them in prepare_pages
1271 * to avoid recursive lock
1273 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1278 ret = btrfs_delalloc_reserve_space(inode,
1279 num_pages << PAGE_CACHE_SHIFT);
1284 * This is going to setup the pages array with the number of
1285 * pages we want, so we don't really need to worry about the
1286 * contents of pages from loop to loop
1288 ret = prepare_pages(root, file, pages, num_pages,
1289 pos, first_index, write_bytes,
1290 force_page_uptodate);
1292 btrfs_delalloc_release_space(inode,
1293 num_pages << PAGE_CACHE_SHIFT);
1297 copied = btrfs_copy_from_user(pos, num_pages,
1298 write_bytes, pages, i);
1301 * if we have trouble faulting in the pages, fall
1302 * back to one page at a time
1304 if (copied < write_bytes)
1308 force_page_uptodate = true;
1311 force_page_uptodate = false;
1312 dirty_pages = (copied + offset +
1313 PAGE_CACHE_SIZE - 1) >>
1318 * If we had a short copy we need to release the excess delaloc
1319 * bytes we reserved. We need to increment outstanding_extents
1320 * because btrfs_delalloc_release_space will decrement it, but
1321 * we still have an outstanding extent for the chunk we actually
1324 if (num_pages > dirty_pages) {
1326 spin_lock(&BTRFS_I(inode)->lock);
1327 BTRFS_I(inode)->outstanding_extents++;
1328 spin_unlock(&BTRFS_I(inode)->lock);
1330 btrfs_delalloc_release_space(inode,
1331 (num_pages - dirty_pages) <<
1336 ret = btrfs_dirty_pages(root, inode, pages,
1337 dirty_pages, pos, copied,
1340 btrfs_delalloc_release_space(inode,
1341 dirty_pages << PAGE_CACHE_SHIFT);
1342 btrfs_drop_pages(pages, num_pages);
1347 btrfs_drop_pages(pages, num_pages);
1351 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1353 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1354 btrfs_btree_balance_dirty(root);
1357 num_written += copied;
1362 return num_written ? num_written : ret;
1365 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1366 const struct iovec *iov,
1367 unsigned long nr_segs, loff_t pos,
1368 loff_t *ppos, size_t count, size_t ocount)
1370 struct file *file = iocb->ki_filp;
1373 ssize_t written_buffered;
1377 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1380 if (written < 0 || written == count)
1385 iov_iter_init(&i, iov, nr_segs, count, written);
1386 written_buffered = __btrfs_buffered_write(file, &i, pos);
1387 if (written_buffered < 0) {
1388 err = written_buffered;
1391 endbyte = pos + written_buffered - 1;
1392 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1395 written += written_buffered;
1396 *ppos = pos + written_buffered;
1397 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1398 endbyte >> PAGE_CACHE_SHIFT);
1400 return written ? written : err;
1403 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1404 const struct iovec *iov,
1405 unsigned long nr_segs, loff_t pos)
1407 struct file *file = iocb->ki_filp;
1408 struct inode *inode = fdentry(file)->d_inode;
1409 struct btrfs_root *root = BTRFS_I(inode)->root;
1410 loff_t *ppos = &iocb->ki_pos;
1412 ssize_t num_written = 0;
1414 size_t count, ocount;
1416 sb_start_write(inode->i_sb);
1418 mutex_lock(&inode->i_mutex);
1420 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1422 mutex_unlock(&inode->i_mutex);
1427 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1428 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1430 mutex_unlock(&inode->i_mutex);
1435 mutex_unlock(&inode->i_mutex);
1439 err = file_remove_suid(file);
1441 mutex_unlock(&inode->i_mutex);
1446 * If BTRFS flips readonly due to some impossible error
1447 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1448 * although we have opened a file as writable, we have
1449 * to stop this write operation to ensure FS consistency.
1451 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1452 mutex_unlock(&inode->i_mutex);
1457 err = file_update_time(file);
1459 mutex_unlock(&inode->i_mutex);
1463 start_pos = round_down(pos, root->sectorsize);
1464 if (start_pos > i_size_read(inode)) {
1465 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1467 mutex_unlock(&inode->i_mutex);
1472 if (unlikely(file->f_flags & O_DIRECT)) {
1473 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1474 pos, ppos, count, ocount);
1478 iov_iter_init(&i, iov, nr_segs, count, num_written);
1480 num_written = __btrfs_buffered_write(file, &i, pos);
1481 if (num_written > 0)
1482 *ppos = pos + num_written;
1485 mutex_unlock(&inode->i_mutex);
1488 * we want to make sure fsync finds this change
1489 * but we haven't joined a transaction running right now.
1491 * Later on, someone is sure to update the inode and get the
1492 * real transid recorded.
1494 * We set last_trans now to the fs_info generation + 1,
1495 * this will either be one more than the running transaction
1496 * or the generation used for the next transaction if there isn't
1497 * one running right now.
1499 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1500 if (num_written > 0 || num_written == -EIOCBQUEUED) {
1501 err = generic_write_sync(file, pos, num_written);
1502 if (err < 0 && num_written > 0)
1506 sb_end_write(inode->i_sb);
1507 current->backing_dev_info = NULL;
1508 return num_written ? num_written : err;
1511 int btrfs_release_file(struct inode *inode, struct file *filp)
1514 * ordered_data_close is set by settattr when we are about to truncate
1515 * a file from a non-zero size to a zero size. This tries to
1516 * flush down new bytes that may have been written if the
1517 * application were using truncate to replace a file in place.
1519 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1520 &BTRFS_I(inode)->runtime_flags)) {
1521 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1522 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1523 filemap_flush(inode->i_mapping);
1525 if (filp->private_data)
1526 btrfs_ioctl_trans_end(filp);
1531 * fsync call for both files and directories. This logs the inode into
1532 * the tree log instead of forcing full commits whenever possible.
1534 * It needs to call filemap_fdatawait so that all ordered extent updates are
1535 * in the metadata btree are up to date for copying to the log.
1537 * It drops the inode mutex before doing the tree log commit. This is an
1538 * important optimization for directories because holding the mutex prevents
1539 * new operations on the dir while we write to disk.
1541 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1543 struct dentry *dentry = file->f_path.dentry;
1544 struct inode *inode = dentry->d_inode;
1545 struct btrfs_root *root = BTRFS_I(inode)->root;
1547 struct btrfs_trans_handle *trans;
1549 trace_btrfs_sync_file(file, datasync);
1552 * We write the dirty pages in the range and wait until they complete
1553 * out of the ->i_mutex. If so, we can flush the dirty pages by
1554 * multi-task, and make the performance up.
1556 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1560 mutex_lock(&inode->i_mutex);
1563 * We flush the dirty pages again to avoid some dirty pages in the
1566 atomic_inc(&root->log_batch);
1567 btrfs_wait_ordered_range(inode, start, end - start + 1);
1568 atomic_inc(&root->log_batch);
1571 * check the transaction that last modified this inode
1572 * and see if its already been committed
1574 if (!BTRFS_I(inode)->last_trans) {
1575 mutex_unlock(&inode->i_mutex);
1580 * if the last transaction that changed this file was before
1581 * the current transaction, we can bail out now without any
1585 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1586 BTRFS_I(inode)->last_trans <=
1587 root->fs_info->last_trans_committed) {
1588 BTRFS_I(inode)->last_trans = 0;
1591 * We'v had everything committed since the last time we were
1592 * modified so clear this flag in case it was set for whatever
1593 * reason, it's no longer relevant.
1595 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1596 &BTRFS_I(inode)->runtime_flags);
1597 mutex_unlock(&inode->i_mutex);
1602 * ok we haven't committed the transaction yet, lets do a commit
1604 if (file->private_data)
1605 btrfs_ioctl_trans_end(file);
1607 trans = btrfs_start_transaction(root, 0);
1608 if (IS_ERR(trans)) {
1609 ret = PTR_ERR(trans);
1610 mutex_unlock(&inode->i_mutex);
1614 ret = btrfs_log_dentry_safe(trans, root, dentry);
1616 mutex_unlock(&inode->i_mutex);
1620 /* we've logged all the items and now have a consistent
1621 * version of the file in the log. It is possible that
1622 * someone will come in and modify the file, but that's
1623 * fine because the log is consistent on disk, and we
1624 * have references to all of the file's extents
1626 * It is possible that someone will come in and log the
1627 * file again, but that will end up using the synchronization
1628 * inside btrfs_sync_log to keep things safe.
1630 mutex_unlock(&inode->i_mutex);
1632 if (ret != BTRFS_NO_LOG_SYNC) {
1634 ret = btrfs_commit_transaction(trans, root);
1636 ret = btrfs_sync_log(trans, root);
1638 ret = btrfs_end_transaction(trans, root);
1640 ret = btrfs_commit_transaction(trans, root);
1643 ret = btrfs_end_transaction(trans, root);
1646 return ret > 0 ? -EIO : ret;
1649 static const struct vm_operations_struct btrfs_file_vm_ops = {
1650 .fault = filemap_fault,
1651 .page_mkwrite = btrfs_page_mkwrite,
1652 .remap_pages = generic_file_remap_pages,
1655 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1657 struct address_space *mapping = filp->f_mapping;
1659 if (!mapping->a_ops->readpage)
1662 file_accessed(filp);
1663 vma->vm_ops = &btrfs_file_vm_ops;
1668 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1669 int slot, u64 start, u64 end)
1671 struct btrfs_file_extent_item *fi;
1672 struct btrfs_key key;
1674 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1677 btrfs_item_key_to_cpu(leaf, &key, slot);
1678 if (key.objectid != btrfs_ino(inode) ||
1679 key.type != BTRFS_EXTENT_DATA_KEY)
1682 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1684 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1687 if (btrfs_file_extent_disk_bytenr(leaf, fi))
1690 if (key.offset == end)
1692 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1697 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1698 struct btrfs_path *path, u64 offset, u64 end)
1700 struct btrfs_root *root = BTRFS_I(inode)->root;
1701 struct extent_buffer *leaf;
1702 struct btrfs_file_extent_item *fi;
1703 struct extent_map *hole_em;
1704 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1705 struct btrfs_key key;
1708 key.objectid = btrfs_ino(inode);
1709 key.type = BTRFS_EXTENT_DATA_KEY;
1710 key.offset = offset;
1713 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1718 leaf = path->nodes[0];
1719 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1723 fi = btrfs_item_ptr(leaf, path->slots[0],
1724 struct btrfs_file_extent_item);
1725 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1727 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1728 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1729 btrfs_set_file_extent_offset(leaf, fi, 0);
1730 btrfs_mark_buffer_dirty(leaf);
1734 if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1738 key.offset = offset;
1739 btrfs_set_item_key_safe(trans, root, path, &key);
1740 fi = btrfs_item_ptr(leaf, path->slots[0],
1741 struct btrfs_file_extent_item);
1742 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1744 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1745 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1746 btrfs_set_file_extent_offset(leaf, fi, 0);
1747 btrfs_mark_buffer_dirty(leaf);
1750 btrfs_release_path(path);
1752 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1753 0, 0, end - offset, 0, end - offset,
1759 btrfs_release_path(path);
1761 hole_em = alloc_extent_map();
1763 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1764 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1765 &BTRFS_I(inode)->runtime_flags);
1767 hole_em->start = offset;
1768 hole_em->len = end - offset;
1769 hole_em->orig_start = offset;
1771 hole_em->block_start = EXTENT_MAP_HOLE;
1772 hole_em->block_len = 0;
1773 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1774 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1775 hole_em->generation = trans->transid;
1778 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1779 write_lock(&em_tree->lock);
1780 ret = add_extent_mapping(em_tree, hole_em);
1782 list_move(&hole_em->list,
1783 &em_tree->modified_extents);
1784 write_unlock(&em_tree->lock);
1785 } while (ret == -EEXIST);
1786 free_extent_map(hole_em);
1788 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1789 &BTRFS_I(inode)->runtime_flags);
1795 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1797 struct btrfs_root *root = BTRFS_I(inode)->root;
1798 struct extent_state *cached_state = NULL;
1799 struct btrfs_path *path;
1800 struct btrfs_block_rsv *rsv;
1801 struct btrfs_trans_handle *trans;
1802 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1803 u64 lockstart = (offset + mask) & ~mask;
1804 u64 lockend = ((offset + len) & ~mask) - 1;
1805 u64 cur_offset = lockstart;
1806 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1810 bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
1811 ((offset + len) >> PAGE_CACHE_SHIFT);
1813 btrfs_wait_ordered_range(inode, offset, len);
1815 mutex_lock(&inode->i_mutex);
1816 if (offset >= inode->i_size) {
1817 mutex_unlock(&inode->i_mutex);
1822 * Only do this if we are in the same page and we aren't doing the
1825 if (same_page && len < PAGE_CACHE_SIZE) {
1826 ret = btrfs_truncate_page(inode, offset, len, 0);
1827 mutex_unlock(&inode->i_mutex);
1831 /* zero back part of the first page */
1832 ret = btrfs_truncate_page(inode, offset, 0, 0);
1834 mutex_unlock(&inode->i_mutex);
1838 /* zero the front end of the last page */
1839 ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1841 mutex_unlock(&inode->i_mutex);
1845 if (lockend < lockstart) {
1846 mutex_unlock(&inode->i_mutex);
1851 struct btrfs_ordered_extent *ordered;
1853 truncate_pagecache_range(inode, lockstart, lockend);
1855 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1857 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1860 * We need to make sure we have no ordered extents in this range
1861 * and nobody raced in and read a page in this range, if we did
1862 * we need to try again.
1865 (ordered->file_offset + ordered->len < lockstart ||
1866 ordered->file_offset > lockend)) &&
1867 !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1868 lockend, EXTENT_UPTODATE, 0,
1871 btrfs_put_ordered_extent(ordered);
1875 btrfs_put_ordered_extent(ordered);
1876 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1877 lockend, &cached_state, GFP_NOFS);
1878 btrfs_wait_ordered_range(inode, lockstart,
1879 lockend - lockstart + 1);
1882 path = btrfs_alloc_path();
1888 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
1893 rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1897 * 1 - update the inode
1898 * 1 - removing the extents in the range
1899 * 1 - adding the hole extent
1901 trans = btrfs_start_transaction(root, 3);
1902 if (IS_ERR(trans)) {
1903 err = PTR_ERR(trans);
1907 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
1910 trans->block_rsv = rsv;
1912 while (cur_offset < lockend) {
1913 ret = __btrfs_drop_extents(trans, root, inode, path,
1914 cur_offset, lockend + 1,
1919 trans->block_rsv = &root->fs_info->trans_block_rsv;
1921 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1927 cur_offset = drop_end;
1929 ret = btrfs_update_inode(trans, root, inode);
1935 btrfs_end_transaction(trans, root);
1936 btrfs_btree_balance_dirty(root);
1938 trans = btrfs_start_transaction(root, 3);
1939 if (IS_ERR(trans)) {
1940 ret = PTR_ERR(trans);
1945 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
1947 BUG_ON(ret); /* shouldn't happen */
1948 trans->block_rsv = rsv;
1956 trans->block_rsv = &root->fs_info->trans_block_rsv;
1957 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1967 inode_inc_iversion(inode);
1968 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1970 trans->block_rsv = &root->fs_info->trans_block_rsv;
1971 ret = btrfs_update_inode(trans, root, inode);
1972 btrfs_end_transaction(trans, root);
1973 btrfs_btree_balance_dirty(root);
1975 btrfs_free_path(path);
1976 btrfs_free_block_rsv(root, rsv);
1978 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1979 &cached_state, GFP_NOFS);
1980 mutex_unlock(&inode->i_mutex);
1986 static long btrfs_fallocate(struct file *file, int mode,
1987 loff_t offset, loff_t len)
1989 struct inode *inode = file->f_path.dentry->d_inode;
1990 struct extent_state *cached_state = NULL;
1997 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1998 struct extent_map *em;
2001 alloc_start = offset & ~mask;
2002 alloc_end = (offset + len + mask) & ~mask;
2004 /* Make sure we aren't being give some crap mode */
2005 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2008 if (mode & FALLOC_FL_PUNCH_HOLE)
2009 return btrfs_punch_hole(inode, offset, len);
2012 * Make sure we have enough space before we do the
2015 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
2020 * wait for ordered IO before we have any locks. We'll loop again
2021 * below with the locks held.
2023 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2025 mutex_lock(&inode->i_mutex);
2026 ret = inode_newsize_ok(inode, alloc_end);
2030 if (alloc_start > inode->i_size) {
2031 ret = btrfs_cont_expand(inode, i_size_read(inode),
2037 locked_end = alloc_end - 1;
2039 struct btrfs_ordered_extent *ordered;
2041 /* the extent lock is ordered inside the running
2044 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2045 locked_end, 0, &cached_state);
2046 ordered = btrfs_lookup_first_ordered_extent(inode,
2049 ordered->file_offset + ordered->len > alloc_start &&
2050 ordered->file_offset < alloc_end) {
2051 btrfs_put_ordered_extent(ordered);
2052 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2053 alloc_start, locked_end,
2054 &cached_state, GFP_NOFS);
2056 * we can't wait on the range with the transaction
2057 * running or with the extent lock held
2059 btrfs_wait_ordered_range(inode, alloc_start,
2060 alloc_end - alloc_start);
2063 btrfs_put_ordered_extent(ordered);
2068 cur_offset = alloc_start;
2072 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2073 alloc_end - cur_offset, 0);
2074 if (IS_ERR_OR_NULL(em)) {
2081 last_byte = min(extent_map_end(em), alloc_end);
2082 actual_end = min_t(u64, extent_map_end(em), offset + len);
2083 last_byte = (last_byte + mask) & ~mask;
2085 if (em->block_start == EXTENT_MAP_HOLE ||
2086 (cur_offset >= inode->i_size &&
2087 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2088 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2089 last_byte - cur_offset,
2090 1 << inode->i_blkbits,
2095 free_extent_map(em);
2098 } else if (actual_end > inode->i_size &&
2099 !(mode & FALLOC_FL_KEEP_SIZE)) {
2101 * We didn't need to allocate any more space, but we
2102 * still extended the size of the file so we need to
2105 inode->i_ctime = CURRENT_TIME;
2106 i_size_write(inode, actual_end);
2107 btrfs_ordered_update_i_size(inode, actual_end, NULL);
2109 free_extent_map(em);
2111 cur_offset = last_byte;
2112 if (cur_offset >= alloc_end) {
2117 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2118 &cached_state, GFP_NOFS);
2120 mutex_unlock(&inode->i_mutex);
2121 /* Let go of our reservation. */
2122 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
2126 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
2128 struct btrfs_root *root = BTRFS_I(inode)->root;
2129 struct extent_map *em;
2130 struct extent_state *cached_state = NULL;
2131 u64 lockstart = *offset;
2132 u64 lockend = i_size_read(inode);
2133 u64 start = *offset;
2134 u64 orig_start = *offset;
2135 u64 len = i_size_read(inode);
2139 lockend = max_t(u64, root->sectorsize, lockend);
2140 if (lockend <= lockstart)
2141 lockend = lockstart + root->sectorsize;
2143 len = lockend - lockstart + 1;
2145 len = max_t(u64, len, root->sectorsize);
2146 if (inode->i_size == 0)
2149 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2153 * Delalloc is such a pain. If we have a hole and we have pending
2154 * delalloc for a portion of the hole we will get back a hole that
2155 * exists for the entire range since it hasn't been actually written
2156 * yet. So to take care of this case we need to look for an extent just
2157 * before the position we want in case there is outstanding delalloc
2160 if (origin == SEEK_HOLE && start != 0) {
2161 if (start <= root->sectorsize)
2162 em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2163 root->sectorsize, 0);
2165 em = btrfs_get_extent_fiemap(inode, NULL, 0,
2166 start - root->sectorsize,
2167 root->sectorsize, 0);
2172 last_end = em->start + em->len;
2173 if (em->block_start == EXTENT_MAP_DELALLOC)
2174 last_end = min_t(u64, last_end, inode->i_size);
2175 free_extent_map(em);
2179 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2185 if (em->block_start == EXTENT_MAP_HOLE) {
2186 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2187 if (last_end <= orig_start) {
2188 free_extent_map(em);
2194 if (origin == SEEK_HOLE) {
2196 free_extent_map(em);
2200 if (origin == SEEK_DATA) {
2201 if (em->block_start == EXTENT_MAP_DELALLOC) {
2202 if (start >= inode->i_size) {
2203 free_extent_map(em);
2210 free_extent_map(em);
2215 start = em->start + em->len;
2216 last_end = em->start + em->len;
2218 if (em->block_start == EXTENT_MAP_DELALLOC)
2219 last_end = min_t(u64, last_end, inode->i_size);
2221 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2222 free_extent_map(em);
2226 free_extent_map(em);
2230 *offset = min(*offset, inode->i_size);
2232 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2233 &cached_state, GFP_NOFS);
2237 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
2239 struct inode *inode = file->f_mapping->host;
2242 mutex_lock(&inode->i_mutex);
2246 offset = generic_file_llseek(file, offset, origin);
2250 if (offset >= i_size_read(inode)) {
2251 mutex_unlock(&inode->i_mutex);
2255 ret = find_desired_extent(inode, &offset, origin);
2257 mutex_unlock(&inode->i_mutex);
2262 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2266 if (offset > inode->i_sb->s_maxbytes) {
2271 /* Special lock needed here? */
2272 if (offset != file->f_pos) {
2273 file->f_pos = offset;
2274 file->f_version = 0;
2277 mutex_unlock(&inode->i_mutex);
2281 const struct file_operations btrfs_file_operations = {
2282 .llseek = btrfs_file_llseek,
2283 .read = do_sync_read,
2284 .write = do_sync_write,
2285 .aio_read = generic_file_aio_read,
2286 .splice_read = generic_file_splice_read,
2287 .aio_write = btrfs_file_aio_write,
2288 .mmap = btrfs_file_mmap,
2289 .open = generic_file_open,
2290 .release = btrfs_release_file,
2291 .fsync = btrfs_sync_file,
2292 .fallocate = btrfs_fallocate,
2293 .unlocked_ioctl = btrfs_ioctl,
2294 #ifdef CONFIG_COMPAT
2295 .compat_ioctl = btrfs_ioctl,
2299 void btrfs_auto_defrag_exit(void)
2301 if (btrfs_inode_defrag_cachep)
2302 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2305 int btrfs_auto_defrag_init(void)
2307 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2308 sizeof(struct inode_defrag), 0,
2309 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2311 if (!btrfs_inode_defrag_cachep)