2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
42 #include "transaction.h"
43 #include "btrfs_inode.h"
45 #include "print-tree.h"
47 #include "ordered-data.h"
50 #include "compression.h"
53 struct btrfs_iget_args {
55 struct btrfs_root *root;
58 static const struct inode_operations btrfs_dir_inode_operations;
59 static const struct inode_operations btrfs_symlink_inode_operations;
60 static const struct inode_operations btrfs_dir_ro_inode_operations;
61 static const struct inode_operations btrfs_special_inode_operations;
62 static const struct inode_operations btrfs_file_inode_operations;
63 static const struct address_space_operations btrfs_aops;
64 static const struct address_space_operations btrfs_symlink_aops;
65 static const struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
91 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
92 struct inode *inode, struct inode *dir)
96 err = btrfs_init_acl(trans, inode, dir);
98 err = btrfs_xattr_security_init(trans, inode, dir);
103 * this does all the hard work for inserting an inline extent into
104 * the btree. The caller should have done a btrfs_drop_extents so that
105 * no overlapping inline items exist in the btree
107 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
108 struct btrfs_root *root, struct inode *inode,
109 u64 start, size_t size, size_t compressed_size,
110 struct page **compressed_pages)
112 struct btrfs_key key;
113 struct btrfs_path *path;
114 struct extent_buffer *leaf;
115 struct page *page = NULL;
118 struct btrfs_file_extent_item *ei;
121 size_t cur_size = size;
123 unsigned long offset;
124 int use_compress = 0;
126 if (compressed_size && compressed_pages) {
128 cur_size = compressed_size;
131 path = btrfs_alloc_path();
135 path->leave_spinning = 1;
136 btrfs_set_trans_block_group(trans, inode);
138 key.objectid = inode->i_ino;
140 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
141 datasize = btrfs_file_extent_calc_inline_size(cur_size);
143 inode_add_bytes(inode, size);
144 ret = btrfs_insert_empty_item(trans, root, path, &key,
151 leaf = path->nodes[0];
152 ei = btrfs_item_ptr(leaf, path->slots[0],
153 struct btrfs_file_extent_item);
154 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
155 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
156 btrfs_set_file_extent_encryption(leaf, ei, 0);
157 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
158 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
159 ptr = btrfs_file_extent_inline_start(ei);
164 while (compressed_size > 0) {
165 cpage = compressed_pages[i];
166 cur_size = min_t(unsigned long, compressed_size,
169 kaddr = kmap_atomic(cpage, KM_USER0);
170 write_extent_buffer(leaf, kaddr, ptr, cur_size);
171 kunmap_atomic(kaddr, KM_USER0);
175 compressed_size -= cur_size;
177 btrfs_set_file_extent_compression(leaf, ei,
178 BTRFS_COMPRESS_ZLIB);
180 page = find_get_page(inode->i_mapping,
181 start >> PAGE_CACHE_SHIFT);
182 btrfs_set_file_extent_compression(leaf, ei, 0);
183 kaddr = kmap_atomic(page, KM_USER0);
184 offset = start & (PAGE_CACHE_SIZE - 1);
185 write_extent_buffer(leaf, kaddr + offset, ptr, size);
186 kunmap_atomic(kaddr, KM_USER0);
187 page_cache_release(page);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_free_path(path);
193 * we're an inline extent, so nobody can
194 * extend the file past i_size without locking
195 * a page we already have locked.
197 * We must do any isize and inode updates
198 * before we unlock the pages. Otherwise we
199 * could end up racing with unlink.
201 BTRFS_I(inode)->disk_i_size = inode->i_size;
202 btrfs_update_inode(trans, root, inode);
206 btrfs_free_path(path);
212 * conditionally insert an inline extent into the file. This
213 * does the checks required to make sure the data is small enough
214 * to fit as an inline extent.
216 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
217 struct btrfs_root *root,
218 struct inode *inode, u64 start, u64 end,
219 size_t compressed_size,
220 struct page **compressed_pages)
222 u64 isize = i_size_read(inode);
223 u64 actual_end = min(end + 1, isize);
224 u64 inline_len = actual_end - start;
225 u64 aligned_end = (end + root->sectorsize - 1) &
226 ~((u64)root->sectorsize - 1);
228 u64 data_len = inline_len;
232 data_len = compressed_size;
235 actual_end >= PAGE_CACHE_SIZE ||
236 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
238 (actual_end & (root->sectorsize - 1)) == 0) ||
240 data_len > root->fs_info->max_inline) {
244 ret = btrfs_drop_extents(trans, inode, start, aligned_end,
248 if (isize > actual_end)
249 inline_len = min_t(u64, isize, actual_end);
250 ret = insert_inline_extent(trans, root, inode, start,
251 inline_len, compressed_size,
254 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
258 struct async_extent {
263 unsigned long nr_pages;
264 struct list_head list;
269 struct btrfs_root *root;
270 struct page *locked_page;
273 struct list_head extents;
274 struct btrfs_work work;
277 static noinline int add_async_extent(struct async_cow *cow,
278 u64 start, u64 ram_size,
281 unsigned long nr_pages)
283 struct async_extent *async_extent;
285 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
286 async_extent->start = start;
287 async_extent->ram_size = ram_size;
288 async_extent->compressed_size = compressed_size;
289 async_extent->pages = pages;
290 async_extent->nr_pages = nr_pages;
291 list_add_tail(&async_extent->list, &cow->extents);
296 * we create compressed extents in two phases. The first
297 * phase compresses a range of pages that have already been
298 * locked (both pages and state bits are locked).
300 * This is done inside an ordered work queue, and the compression
301 * is spread across many cpus. The actual IO submission is step
302 * two, and the ordered work queue takes care of making sure that
303 * happens in the same order things were put onto the queue by
304 * writepages and friends.
306 * If this code finds it can't get good compression, it puts an
307 * entry onto the work queue to write the uncompressed bytes. This
308 * makes sure that both compressed inodes and uncompressed inodes
309 * are written in the same order that pdflush sent them down.
311 static noinline int compress_file_range(struct inode *inode,
312 struct page *locked_page,
314 struct async_cow *async_cow,
317 struct btrfs_root *root = BTRFS_I(inode)->root;
318 struct btrfs_trans_handle *trans;
322 u64 blocksize = root->sectorsize;
324 u64 isize = i_size_read(inode);
326 struct page **pages = NULL;
327 unsigned long nr_pages;
328 unsigned long nr_pages_ret = 0;
329 unsigned long total_compressed = 0;
330 unsigned long total_in = 0;
331 unsigned long max_compressed = 128 * 1024;
332 unsigned long max_uncompressed = 128 * 1024;
338 actual_end = min_t(u64, isize, end + 1);
341 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
342 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
345 * we don't want to send crud past the end of i_size through
346 * compression, that's just a waste of CPU time. So, if the
347 * end of the file is before the start of our current
348 * requested range of bytes, we bail out to the uncompressed
349 * cleanup code that can deal with all of this.
351 * It isn't really the fastest way to fix things, but this is a
352 * very uncommon corner.
354 if (actual_end <= start)
355 goto cleanup_and_bail_uncompressed;
357 total_compressed = actual_end - start;
359 /* we want to make sure that amount of ram required to uncompress
360 * an extent is reasonable, so we limit the total size in ram
361 * of a compressed extent to 128k. This is a crucial number
362 * because it also controls how easily we can spread reads across
363 * cpus for decompression.
365 * We also want to make sure the amount of IO required to do
366 * a random read is reasonably small, so we limit the size of
367 * a compressed extent to 128k.
369 total_compressed = min(total_compressed, max_uncompressed);
370 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
371 num_bytes = max(blocksize, num_bytes);
372 disk_num_bytes = num_bytes;
377 * we do compression for mount -o compress and when the
378 * inode has not been flagged as nocompress. This flag can
379 * change at any time if we discover bad compression ratios.
381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
382 btrfs_test_opt(root, COMPRESS)) {
384 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
386 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
387 total_compressed, pages,
388 nr_pages, &nr_pages_ret,
394 unsigned long offset = total_compressed &
395 (PAGE_CACHE_SIZE - 1);
396 struct page *page = pages[nr_pages_ret - 1];
399 /* zero the tail end of the last page, we might be
400 * sending it down to disk
403 kaddr = kmap_atomic(page, KM_USER0);
404 memset(kaddr + offset, 0,
405 PAGE_CACHE_SIZE - offset);
406 kunmap_atomic(kaddr, KM_USER0);
412 trans = btrfs_join_transaction(root, 1);
414 btrfs_set_trans_block_group(trans, inode);
416 /* lets try to make an inline extent */
417 if (ret || total_in < (actual_end - start)) {
418 /* we didn't compress the entire range, try
419 * to make an uncompressed inline extent.
421 ret = cow_file_range_inline(trans, root, inode,
422 start, end, 0, NULL);
424 /* try making a compressed inline extent */
425 ret = cow_file_range_inline(trans, root, inode,
427 total_compressed, pages);
431 * inline extent creation worked, we don't need
432 * to create any more async work items. Unlock
433 * and free up our temp pages.
435 extent_clear_unlock_delalloc(inode,
436 &BTRFS_I(inode)->io_tree,
438 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
439 EXTENT_CLEAR_DELALLOC |
440 EXTENT_CLEAR_ACCOUNTING |
441 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
443 btrfs_end_transaction(trans, root);
446 btrfs_end_transaction(trans, root);
451 * we aren't doing an inline extent round the compressed size
452 * up to a block size boundary so the allocator does sane
455 total_compressed = (total_compressed + blocksize - 1) &
459 * one last check to make sure the compression is really a
460 * win, compare the page count read with the blocks on disk
462 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
463 ~(PAGE_CACHE_SIZE - 1);
464 if (total_compressed >= total_in) {
467 disk_num_bytes = total_compressed;
468 num_bytes = total_in;
471 if (!will_compress && pages) {
473 * the compression code ran but failed to make things smaller,
474 * free any pages it allocated and our page pointer array
476 for (i = 0; i < nr_pages_ret; i++) {
477 WARN_ON(pages[i]->mapping);
478 page_cache_release(pages[i]);
482 total_compressed = 0;
485 /* flag the file so we don't compress in the future */
486 if (!btrfs_test_opt(root, FORCE_COMPRESS))
487 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
492 /* the async work queues will take care of doing actual
493 * allocation on disk for these compressed pages,
494 * and will submit them to the elevator.
496 add_async_extent(async_cow, start, num_bytes,
497 total_compressed, pages, nr_pages_ret);
499 if (start + num_bytes < end && start + num_bytes < actual_end) {
506 cleanup_and_bail_uncompressed:
508 * No compression, but we still need to write the pages in
509 * the file we've been given so far. redirty the locked
510 * page if it corresponds to our extent and set things up
511 * for the async work queue to run cow_file_range to do
512 * the normal delalloc dance
514 if (page_offset(locked_page) >= start &&
515 page_offset(locked_page) <= end) {
516 __set_page_dirty_nobuffers(locked_page);
517 /* unlocked later on in the async handlers */
519 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
527 for (i = 0; i < nr_pages_ret; i++) {
528 WARN_ON(pages[i]->mapping);
529 page_cache_release(pages[i]);
537 * phase two of compressed writeback. This is the ordered portion
538 * of the code, which only gets called in the order the work was
539 * queued. We walk all the async extents created by compress_file_range
540 * and send them down to the disk.
542 static noinline int submit_compressed_extents(struct inode *inode,
543 struct async_cow *async_cow)
545 struct async_extent *async_extent;
547 struct btrfs_trans_handle *trans;
548 struct btrfs_key ins;
549 struct extent_map *em;
550 struct btrfs_root *root = BTRFS_I(inode)->root;
551 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
552 struct extent_io_tree *io_tree;
555 if (list_empty(&async_cow->extents))
559 while (!list_empty(&async_cow->extents)) {
560 async_extent = list_entry(async_cow->extents.next,
561 struct async_extent, list);
562 list_del(&async_extent->list);
564 io_tree = &BTRFS_I(inode)->io_tree;
567 /* did the compression code fall back to uncompressed IO? */
568 if (!async_extent->pages) {
569 int page_started = 0;
570 unsigned long nr_written = 0;
572 lock_extent(io_tree, async_extent->start,
573 async_extent->start +
574 async_extent->ram_size - 1, GFP_NOFS);
576 /* allocate blocks */
577 ret = cow_file_range(inode, async_cow->locked_page,
579 async_extent->start +
580 async_extent->ram_size - 1,
581 &page_started, &nr_written, 0);
584 * if page_started, cow_file_range inserted an
585 * inline extent and took care of all the unlocking
586 * and IO for us. Otherwise, we need to submit
587 * all those pages down to the drive.
589 if (!page_started && !ret)
590 extent_write_locked_range(io_tree,
591 inode, async_extent->start,
592 async_extent->start +
593 async_extent->ram_size - 1,
601 lock_extent(io_tree, async_extent->start,
602 async_extent->start + async_extent->ram_size - 1,
605 trans = btrfs_join_transaction(root, 1);
606 ret = btrfs_reserve_extent(trans, root,
607 async_extent->compressed_size,
608 async_extent->compressed_size,
611 btrfs_end_transaction(trans, root);
615 for (i = 0; i < async_extent->nr_pages; i++) {
616 WARN_ON(async_extent->pages[i]->mapping);
617 page_cache_release(async_extent->pages[i]);
619 kfree(async_extent->pages);
620 async_extent->nr_pages = 0;
621 async_extent->pages = NULL;
622 unlock_extent(io_tree, async_extent->start,
623 async_extent->start +
624 async_extent->ram_size - 1, GFP_NOFS);
629 * here we're doing allocation and writeback of the
632 btrfs_drop_extent_cache(inode, async_extent->start,
633 async_extent->start +
634 async_extent->ram_size - 1, 0);
636 em = alloc_extent_map(GFP_NOFS);
637 em->start = async_extent->start;
638 em->len = async_extent->ram_size;
639 em->orig_start = em->start;
641 em->block_start = ins.objectid;
642 em->block_len = ins.offset;
643 em->bdev = root->fs_info->fs_devices->latest_bdev;
644 set_bit(EXTENT_FLAG_PINNED, &em->flags);
645 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
648 write_lock(&em_tree->lock);
649 ret = add_extent_mapping(em_tree, em);
650 write_unlock(&em_tree->lock);
651 if (ret != -EEXIST) {
655 btrfs_drop_extent_cache(inode, async_extent->start,
656 async_extent->start +
657 async_extent->ram_size - 1, 0);
660 ret = btrfs_add_ordered_extent(inode, async_extent->start,
662 async_extent->ram_size,
664 BTRFS_ORDERED_COMPRESSED);
668 * clear dirty, set writeback and unlock the pages.
670 extent_clear_unlock_delalloc(inode,
671 &BTRFS_I(inode)->io_tree,
673 async_extent->start +
674 async_extent->ram_size - 1,
675 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
676 EXTENT_CLEAR_UNLOCK |
677 EXTENT_CLEAR_DELALLOC |
678 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
680 ret = btrfs_submit_compressed_write(inode,
682 async_extent->ram_size,
684 ins.offset, async_extent->pages,
685 async_extent->nr_pages);
688 alloc_hint = ins.objectid + ins.offset;
697 * when extent_io.c finds a delayed allocation range in the file,
698 * the call backs end up in this code. The basic idea is to
699 * allocate extents on disk for the range, and create ordered data structs
700 * in ram to track those extents.
702 * locked_page is the page that writepage had locked already. We use
703 * it to make sure we don't do extra locks or unlocks.
705 * *page_started is set to one if we unlock locked_page and do everything
706 * required to start IO on it. It may be clean and already done with
709 static noinline int cow_file_range(struct inode *inode,
710 struct page *locked_page,
711 u64 start, u64 end, int *page_started,
712 unsigned long *nr_written,
715 struct btrfs_root *root = BTRFS_I(inode)->root;
716 struct btrfs_trans_handle *trans;
719 unsigned long ram_size;
722 u64 blocksize = root->sectorsize;
724 u64 isize = i_size_read(inode);
725 struct btrfs_key ins;
726 struct extent_map *em;
727 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
730 trans = btrfs_join_transaction(root, 1);
732 btrfs_set_trans_block_group(trans, inode);
734 actual_end = min_t(u64, isize, end + 1);
736 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
737 num_bytes = max(blocksize, num_bytes);
738 disk_num_bytes = num_bytes;
742 /* lets try to make an inline extent */
743 ret = cow_file_range_inline(trans, root, inode,
744 start, end, 0, NULL);
746 extent_clear_unlock_delalloc(inode,
747 &BTRFS_I(inode)->io_tree,
749 EXTENT_CLEAR_UNLOCK_PAGE |
750 EXTENT_CLEAR_UNLOCK |
751 EXTENT_CLEAR_DELALLOC |
752 EXTENT_CLEAR_ACCOUNTING |
754 EXTENT_SET_WRITEBACK |
755 EXTENT_END_WRITEBACK);
757 *nr_written = *nr_written +
758 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
765 BUG_ON(disk_num_bytes >
766 btrfs_super_total_bytes(&root->fs_info->super_copy));
769 read_lock(&BTRFS_I(inode)->extent_tree.lock);
770 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
774 * if block start isn't an actual block number then find the
775 * first block in this inode and use that as a hint. If that
776 * block is also bogus then just don't worry about it.
778 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
780 em = search_extent_mapping(em_tree, 0, 0);
781 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
782 alloc_hint = em->block_start;
786 alloc_hint = em->block_start;
790 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
791 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
793 while (disk_num_bytes > 0) {
796 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
797 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
798 root->sectorsize, 0, alloc_hint,
802 em = alloc_extent_map(GFP_NOFS);
804 em->orig_start = em->start;
805 ram_size = ins.offset;
806 em->len = ins.offset;
808 em->block_start = ins.objectid;
809 em->block_len = ins.offset;
810 em->bdev = root->fs_info->fs_devices->latest_bdev;
811 set_bit(EXTENT_FLAG_PINNED, &em->flags);
814 write_lock(&em_tree->lock);
815 ret = add_extent_mapping(em_tree, em);
816 write_unlock(&em_tree->lock);
817 if (ret != -EEXIST) {
821 btrfs_drop_extent_cache(inode, start,
822 start + ram_size - 1, 0);
825 cur_alloc_size = ins.offset;
826 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
827 ram_size, cur_alloc_size, 0);
830 if (root->root_key.objectid ==
831 BTRFS_DATA_RELOC_TREE_OBJECTID) {
832 ret = btrfs_reloc_clone_csums(inode, start,
837 if (disk_num_bytes < cur_alloc_size)
840 /* we're not doing compressed IO, don't unlock the first
841 * page (which the caller expects to stay locked), don't
842 * clear any dirty bits and don't set any writeback bits
844 * Do set the Private2 bit so we know this page was properly
845 * setup for writepage
847 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
848 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
851 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
852 start, start + ram_size - 1,
854 disk_num_bytes -= cur_alloc_size;
855 num_bytes -= cur_alloc_size;
856 alloc_hint = ins.objectid + ins.offset;
857 start += cur_alloc_size;
861 btrfs_end_transaction(trans, root);
867 * work queue call back to started compression on a file and pages
869 static noinline void async_cow_start(struct btrfs_work *work)
871 struct async_cow *async_cow;
873 async_cow = container_of(work, struct async_cow, work);
875 compress_file_range(async_cow->inode, async_cow->locked_page,
876 async_cow->start, async_cow->end, async_cow,
879 async_cow->inode = NULL;
883 * work queue call back to submit previously compressed pages
885 static noinline void async_cow_submit(struct btrfs_work *work)
887 struct async_cow *async_cow;
888 struct btrfs_root *root;
889 unsigned long nr_pages;
891 async_cow = container_of(work, struct async_cow, work);
893 root = async_cow->root;
894 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
897 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
899 if (atomic_read(&root->fs_info->async_delalloc_pages) <
901 waitqueue_active(&root->fs_info->async_submit_wait))
902 wake_up(&root->fs_info->async_submit_wait);
904 if (async_cow->inode)
905 submit_compressed_extents(async_cow->inode, async_cow);
908 static noinline void async_cow_free(struct btrfs_work *work)
910 struct async_cow *async_cow;
911 async_cow = container_of(work, struct async_cow, work);
915 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
916 u64 start, u64 end, int *page_started,
917 unsigned long *nr_written)
919 struct async_cow *async_cow;
920 struct btrfs_root *root = BTRFS_I(inode)->root;
921 unsigned long nr_pages;
923 int limit = 10 * 1024 * 1042;
925 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
926 1, 0, NULL, GFP_NOFS);
927 while (start < end) {
928 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
929 async_cow->inode = inode;
930 async_cow->root = root;
931 async_cow->locked_page = locked_page;
932 async_cow->start = start;
934 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
937 cur_end = min(end, start + 512 * 1024 - 1);
939 async_cow->end = cur_end;
940 INIT_LIST_HEAD(&async_cow->extents);
942 async_cow->work.func = async_cow_start;
943 async_cow->work.ordered_func = async_cow_submit;
944 async_cow->work.ordered_free = async_cow_free;
945 async_cow->work.flags = 0;
947 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
949 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
951 btrfs_queue_worker(&root->fs_info->delalloc_workers,
954 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
955 wait_event(root->fs_info->async_submit_wait,
956 (atomic_read(&root->fs_info->async_delalloc_pages) <
960 while (atomic_read(&root->fs_info->async_submit_draining) &&
961 atomic_read(&root->fs_info->async_delalloc_pages)) {
962 wait_event(root->fs_info->async_submit_wait,
963 (atomic_read(&root->fs_info->async_delalloc_pages) ==
967 *nr_written += nr_pages;
974 static noinline int csum_exist_in_range(struct btrfs_root *root,
975 u64 bytenr, u64 num_bytes)
978 struct btrfs_ordered_sum *sums;
981 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
982 bytenr + num_bytes - 1, &list);
983 if (ret == 0 && list_empty(&list))
986 while (!list_empty(&list)) {
987 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
988 list_del(&sums->list);
995 * when nowcow writeback call back. This checks for snapshots or COW copies
996 * of the extents that exist in the file, and COWs the file as required.
998 * If no cow copies or snapshots exist, we write directly to the existing
1001 static noinline int run_delalloc_nocow(struct inode *inode,
1002 struct page *locked_page,
1003 u64 start, u64 end, int *page_started, int force,
1004 unsigned long *nr_written)
1006 struct btrfs_root *root = BTRFS_I(inode)->root;
1007 struct btrfs_trans_handle *trans;
1008 struct extent_buffer *leaf;
1009 struct btrfs_path *path;
1010 struct btrfs_file_extent_item *fi;
1011 struct btrfs_key found_key;
1024 path = btrfs_alloc_path();
1026 trans = btrfs_join_transaction(root, 1);
1029 cow_start = (u64)-1;
1032 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1035 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1036 leaf = path->nodes[0];
1037 btrfs_item_key_to_cpu(leaf, &found_key,
1038 path->slots[0] - 1);
1039 if (found_key.objectid == inode->i_ino &&
1040 found_key.type == BTRFS_EXTENT_DATA_KEY)
1045 leaf = path->nodes[0];
1046 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1047 ret = btrfs_next_leaf(root, path);
1052 leaf = path->nodes[0];
1058 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1060 if (found_key.objectid > inode->i_ino ||
1061 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1062 found_key.offset > end)
1065 if (found_key.offset > cur_offset) {
1066 extent_end = found_key.offset;
1071 fi = btrfs_item_ptr(leaf, path->slots[0],
1072 struct btrfs_file_extent_item);
1073 extent_type = btrfs_file_extent_type(leaf, fi);
1075 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1076 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1077 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1078 extent_offset = btrfs_file_extent_offset(leaf, fi);
1079 extent_end = found_key.offset +
1080 btrfs_file_extent_num_bytes(leaf, fi);
1081 if (extent_end <= start) {
1085 if (disk_bytenr == 0)
1087 if (btrfs_file_extent_compression(leaf, fi) ||
1088 btrfs_file_extent_encryption(leaf, fi) ||
1089 btrfs_file_extent_other_encoding(leaf, fi))
1091 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1093 if (btrfs_extent_readonly(root, disk_bytenr))
1095 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1097 extent_offset, disk_bytenr))
1099 disk_bytenr += extent_offset;
1100 disk_bytenr += cur_offset - found_key.offset;
1101 num_bytes = min(end + 1, extent_end) - cur_offset;
1103 * force cow if csum exists in the range.
1104 * this ensure that csum for a given extent are
1105 * either valid or do not exist.
1107 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1110 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1111 extent_end = found_key.offset +
1112 btrfs_file_extent_inline_len(leaf, fi);
1113 extent_end = ALIGN(extent_end, root->sectorsize);
1118 if (extent_end <= start) {
1123 if (cow_start == (u64)-1)
1124 cow_start = cur_offset;
1125 cur_offset = extent_end;
1126 if (cur_offset > end)
1132 btrfs_release_path(root, path);
1133 if (cow_start != (u64)-1) {
1134 ret = cow_file_range(inode, locked_page, cow_start,
1135 found_key.offset - 1, page_started,
1138 cow_start = (u64)-1;
1141 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1142 struct extent_map *em;
1143 struct extent_map_tree *em_tree;
1144 em_tree = &BTRFS_I(inode)->extent_tree;
1145 em = alloc_extent_map(GFP_NOFS);
1146 em->start = cur_offset;
1147 em->orig_start = em->start;
1148 em->len = num_bytes;
1149 em->block_len = num_bytes;
1150 em->block_start = disk_bytenr;
1151 em->bdev = root->fs_info->fs_devices->latest_bdev;
1152 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1154 write_lock(&em_tree->lock);
1155 ret = add_extent_mapping(em_tree, em);
1156 write_unlock(&em_tree->lock);
1157 if (ret != -EEXIST) {
1158 free_extent_map(em);
1161 btrfs_drop_extent_cache(inode, em->start,
1162 em->start + em->len - 1, 0);
1164 type = BTRFS_ORDERED_PREALLOC;
1166 type = BTRFS_ORDERED_NOCOW;
1169 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1170 num_bytes, num_bytes, type);
1173 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1174 cur_offset, cur_offset + num_bytes - 1,
1175 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1176 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1177 EXTENT_SET_PRIVATE2);
1178 cur_offset = extent_end;
1179 if (cur_offset > end)
1182 btrfs_release_path(root, path);
1184 if (cur_offset <= end && cow_start == (u64)-1)
1185 cow_start = cur_offset;
1186 if (cow_start != (u64)-1) {
1187 ret = cow_file_range(inode, locked_page, cow_start, end,
1188 page_started, nr_written, 1);
1192 ret = btrfs_end_transaction(trans, root);
1194 btrfs_free_path(path);
1199 * extent_io.c call back to do delayed allocation processing
1201 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1202 u64 start, u64 end, int *page_started,
1203 unsigned long *nr_written)
1206 struct btrfs_root *root = BTRFS_I(inode)->root;
1208 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1209 ret = run_delalloc_nocow(inode, locked_page, start, end,
1210 page_started, 1, nr_written);
1211 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1212 ret = run_delalloc_nocow(inode, locked_page, start, end,
1213 page_started, 0, nr_written);
1214 else if (!btrfs_test_opt(root, COMPRESS))
1215 ret = cow_file_range(inode, locked_page, start, end,
1216 page_started, nr_written, 1);
1218 ret = cow_file_range_async(inode, locked_page, start, end,
1219 page_started, nr_written);
1223 static int btrfs_split_extent_hook(struct inode *inode,
1224 struct extent_state *orig, u64 split)
1226 struct btrfs_root *root = BTRFS_I(inode)->root;
1229 if (!(orig->state & EXTENT_DELALLOC))
1232 size = orig->end - orig->start + 1;
1233 if (size > root->fs_info->max_extent) {
1237 new_size = orig->end - split + 1;
1238 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1239 root->fs_info->max_extent);
1242 * if we break a large extent up then leave oustanding_extents
1243 * be, since we've already accounted for the large extent.
1245 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1246 root->fs_info->max_extent) < num_extents)
1250 spin_lock(&BTRFS_I(inode)->accounting_lock);
1251 BTRFS_I(inode)->outstanding_extents++;
1252 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1258 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1259 * extents so we can keep track of new extents that are just merged onto old
1260 * extents, such as when we are doing sequential writes, so we can properly
1261 * account for the metadata space we'll need.
1263 static int btrfs_merge_extent_hook(struct inode *inode,
1264 struct extent_state *new,
1265 struct extent_state *other)
1267 struct btrfs_root *root = BTRFS_I(inode)->root;
1268 u64 new_size, old_size;
1271 /* not delalloc, ignore it */
1272 if (!(other->state & EXTENT_DELALLOC))
1275 old_size = other->end - other->start + 1;
1276 if (new->start < other->start)
1277 new_size = other->end - new->start + 1;
1279 new_size = new->end - other->start + 1;
1281 /* we're not bigger than the max, unreserve the space and go */
1282 if (new_size <= root->fs_info->max_extent) {
1283 spin_lock(&BTRFS_I(inode)->accounting_lock);
1284 BTRFS_I(inode)->outstanding_extents--;
1285 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1290 * If we grew by another max_extent, just return, we want to keep that
1293 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1294 root->fs_info->max_extent);
1295 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1296 root->fs_info->max_extent) > num_extents)
1299 spin_lock(&BTRFS_I(inode)->accounting_lock);
1300 BTRFS_I(inode)->outstanding_extents--;
1301 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1307 * extent_io.c set_bit_hook, used to track delayed allocation
1308 * bytes in this file, and to maintain the list of inodes that
1309 * have pending delalloc work to be done.
1311 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1312 unsigned long old, unsigned long bits)
1316 * set_bit and clear bit hooks normally require _irqsave/restore
1317 * but in this case, we are only testeing for the DELALLOC
1318 * bit, which is only set or cleared with irqs on
1320 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1321 struct btrfs_root *root = BTRFS_I(inode)->root;
1323 spin_lock(&BTRFS_I(inode)->accounting_lock);
1324 BTRFS_I(inode)->outstanding_extents++;
1325 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1326 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1327 spin_lock(&root->fs_info->delalloc_lock);
1328 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1329 root->fs_info->delalloc_bytes += end - start + 1;
1330 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1331 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1332 &root->fs_info->delalloc_inodes);
1334 spin_unlock(&root->fs_info->delalloc_lock);
1340 * extent_io.c clear_bit_hook, see set_bit_hook for why
1342 static int btrfs_clear_bit_hook(struct inode *inode,
1343 struct extent_state *state, unsigned long bits)
1346 * set_bit and clear bit hooks normally require _irqsave/restore
1347 * but in this case, we are only testeing for the DELALLOC
1348 * bit, which is only set or cleared with irqs on
1350 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1351 struct btrfs_root *root = BTRFS_I(inode)->root;
1353 if (bits & EXTENT_DO_ACCOUNTING) {
1354 spin_lock(&BTRFS_I(inode)->accounting_lock);
1355 BTRFS_I(inode)->outstanding_extents--;
1356 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1357 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1360 spin_lock(&root->fs_info->delalloc_lock);
1361 if (state->end - state->start + 1 >
1362 root->fs_info->delalloc_bytes) {
1363 printk(KERN_INFO "btrfs warning: delalloc account "
1365 (unsigned long long)
1366 state->end - state->start + 1,
1367 (unsigned long long)
1368 root->fs_info->delalloc_bytes);
1369 btrfs_delalloc_free_space(root, inode, (u64)-1);
1370 root->fs_info->delalloc_bytes = 0;
1371 BTRFS_I(inode)->delalloc_bytes = 0;
1373 btrfs_delalloc_free_space(root, inode,
1376 root->fs_info->delalloc_bytes -= state->end -
1378 BTRFS_I(inode)->delalloc_bytes -= state->end -
1381 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1382 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1383 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1385 spin_unlock(&root->fs_info->delalloc_lock);
1391 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1392 * we don't create bios that span stripes or chunks
1394 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1395 size_t size, struct bio *bio,
1396 unsigned long bio_flags)
1398 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1399 struct btrfs_mapping_tree *map_tree;
1400 u64 logical = (u64)bio->bi_sector << 9;
1405 if (bio_flags & EXTENT_BIO_COMPRESSED)
1408 length = bio->bi_size;
1409 map_tree = &root->fs_info->mapping_tree;
1410 map_length = length;
1411 ret = btrfs_map_block(map_tree, READ, logical,
1412 &map_length, NULL, 0);
1414 if (map_length < length + size)
1420 * in order to insert checksums into the metadata in large chunks,
1421 * we wait until bio submission time. All the pages in the bio are
1422 * checksummed and sums are attached onto the ordered extent record.
1424 * At IO completion time the cums attached on the ordered extent record
1425 * are inserted into the btree
1427 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1428 struct bio *bio, int mirror_num,
1429 unsigned long bio_flags)
1431 struct btrfs_root *root = BTRFS_I(inode)->root;
1434 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1440 * in order to insert checksums into the metadata in large chunks,
1441 * we wait until bio submission time. All the pages in the bio are
1442 * checksummed and sums are attached onto the ordered extent record.
1444 * At IO completion time the cums attached on the ordered extent record
1445 * are inserted into the btree
1447 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1448 int mirror_num, unsigned long bio_flags)
1450 struct btrfs_root *root = BTRFS_I(inode)->root;
1451 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1455 * extent_io.c submission hook. This does the right thing for csum calculation
1456 * on write, or reading the csums from the tree before a read
1458 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1459 int mirror_num, unsigned long bio_flags)
1461 struct btrfs_root *root = BTRFS_I(inode)->root;
1465 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1467 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1470 if (!(rw & (1 << BIO_RW))) {
1471 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1472 return btrfs_submit_compressed_read(inode, bio,
1473 mirror_num, bio_flags);
1474 } else if (!skip_sum)
1475 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1477 } else if (!skip_sum) {
1478 /* csum items have already been cloned */
1479 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1481 /* we're doing a write, do the async checksumming */
1482 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1483 inode, rw, bio, mirror_num,
1484 bio_flags, __btrfs_submit_bio_start,
1485 __btrfs_submit_bio_done);
1489 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1493 * given a list of ordered sums record them in the inode. This happens
1494 * at IO completion time based on sums calculated at bio submission time.
1496 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1497 struct inode *inode, u64 file_offset,
1498 struct list_head *list)
1500 struct btrfs_ordered_sum *sum;
1502 btrfs_set_trans_block_group(trans, inode);
1504 list_for_each_entry(sum, list, list) {
1505 btrfs_csum_file_blocks(trans,
1506 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1511 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1513 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1515 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1519 /* see btrfs_writepage_start_hook for details on why this is required */
1520 struct btrfs_writepage_fixup {
1522 struct btrfs_work work;
1525 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1527 struct btrfs_writepage_fixup *fixup;
1528 struct btrfs_ordered_extent *ordered;
1530 struct inode *inode;
1534 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1538 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1539 ClearPageChecked(page);
1543 inode = page->mapping->host;
1544 page_start = page_offset(page);
1545 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1547 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1549 /* already ordered? We're done */
1550 if (PagePrivate2(page))
1553 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1555 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1556 page_end, GFP_NOFS);
1558 btrfs_start_ordered_extent(inode, ordered, 1);
1562 btrfs_set_extent_delalloc(inode, page_start, page_end);
1563 ClearPageChecked(page);
1565 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1568 page_cache_release(page);
1572 * There are a few paths in the higher layers of the kernel that directly
1573 * set the page dirty bit without asking the filesystem if it is a
1574 * good idea. This causes problems because we want to make sure COW
1575 * properly happens and the data=ordered rules are followed.
1577 * In our case any range that doesn't have the ORDERED bit set
1578 * hasn't been properly setup for IO. We kick off an async process
1579 * to fix it up. The async helper will wait for ordered extents, set
1580 * the delalloc bit and make it safe to write the page.
1582 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1584 struct inode *inode = page->mapping->host;
1585 struct btrfs_writepage_fixup *fixup;
1586 struct btrfs_root *root = BTRFS_I(inode)->root;
1588 /* this page is properly in the ordered list */
1589 if (TestClearPagePrivate2(page))
1592 if (PageChecked(page))
1595 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1599 SetPageChecked(page);
1600 page_cache_get(page);
1601 fixup->work.func = btrfs_writepage_fixup_worker;
1603 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1607 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1608 struct inode *inode, u64 file_pos,
1609 u64 disk_bytenr, u64 disk_num_bytes,
1610 u64 num_bytes, u64 ram_bytes,
1611 u8 compression, u8 encryption,
1612 u16 other_encoding, int extent_type)
1614 struct btrfs_root *root = BTRFS_I(inode)->root;
1615 struct btrfs_file_extent_item *fi;
1616 struct btrfs_path *path;
1617 struct extent_buffer *leaf;
1618 struct btrfs_key ins;
1622 path = btrfs_alloc_path();
1625 path->leave_spinning = 1;
1628 * we may be replacing one extent in the tree with another.
1629 * The new extent is pinned in the extent map, and we don't want
1630 * to drop it from the cache until it is completely in the btree.
1632 * So, tell btrfs_drop_extents to leave this extent in the cache.
1633 * the caller is expected to unpin it and allow it to be merged
1636 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1640 ins.objectid = inode->i_ino;
1641 ins.offset = file_pos;
1642 ins.type = BTRFS_EXTENT_DATA_KEY;
1643 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1645 leaf = path->nodes[0];
1646 fi = btrfs_item_ptr(leaf, path->slots[0],
1647 struct btrfs_file_extent_item);
1648 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1649 btrfs_set_file_extent_type(leaf, fi, extent_type);
1650 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1651 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1652 btrfs_set_file_extent_offset(leaf, fi, 0);
1653 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1654 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1655 btrfs_set_file_extent_compression(leaf, fi, compression);
1656 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1657 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1659 btrfs_unlock_up_safe(path, 1);
1660 btrfs_set_lock_blocking(leaf);
1662 btrfs_mark_buffer_dirty(leaf);
1664 inode_add_bytes(inode, num_bytes);
1666 ins.objectid = disk_bytenr;
1667 ins.offset = disk_num_bytes;
1668 ins.type = BTRFS_EXTENT_ITEM_KEY;
1669 ret = btrfs_alloc_reserved_file_extent(trans, root,
1670 root->root_key.objectid,
1671 inode->i_ino, file_pos, &ins);
1673 btrfs_free_path(path);
1679 * helper function for btrfs_finish_ordered_io, this
1680 * just reads in some of the csum leaves to prime them into ram
1681 * before we start the transaction. It limits the amount of btree
1682 * reads required while inside the transaction.
1684 /* as ordered data IO finishes, this gets called so we can finish
1685 * an ordered extent if the range of bytes in the file it covers are
1688 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1690 struct btrfs_root *root = BTRFS_I(inode)->root;
1691 struct btrfs_trans_handle *trans;
1692 struct btrfs_ordered_extent *ordered_extent = NULL;
1693 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1697 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1701 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1702 BUG_ON(!ordered_extent);
1704 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1705 BUG_ON(!list_empty(&ordered_extent->list));
1706 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1708 trans = btrfs_join_transaction(root, 1);
1709 ret = btrfs_update_inode(trans, root, inode);
1711 btrfs_end_transaction(trans, root);
1716 lock_extent(io_tree, ordered_extent->file_offset,
1717 ordered_extent->file_offset + ordered_extent->len - 1,
1720 trans = btrfs_join_transaction(root, 1);
1722 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1724 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1726 ret = btrfs_mark_extent_written(trans, inode,
1727 ordered_extent->file_offset,
1728 ordered_extent->file_offset +
1729 ordered_extent->len);
1732 ret = insert_reserved_file_extent(trans, inode,
1733 ordered_extent->file_offset,
1734 ordered_extent->start,
1735 ordered_extent->disk_len,
1736 ordered_extent->len,
1737 ordered_extent->len,
1739 BTRFS_FILE_EXTENT_REG);
1740 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1741 ordered_extent->file_offset,
1742 ordered_extent->len);
1745 unlock_extent(io_tree, ordered_extent->file_offset,
1746 ordered_extent->file_offset + ordered_extent->len - 1,
1748 add_pending_csums(trans, inode, ordered_extent->file_offset,
1749 &ordered_extent->list);
1751 /* this also removes the ordered extent from the tree */
1752 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1753 ret = btrfs_update_inode(trans, root, inode);
1755 btrfs_end_transaction(trans, root);
1758 btrfs_put_ordered_extent(ordered_extent);
1759 /* once for the tree */
1760 btrfs_put_ordered_extent(ordered_extent);
1765 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1766 struct extent_state *state, int uptodate)
1768 ClearPagePrivate2(page);
1769 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1773 * When IO fails, either with EIO or csum verification fails, we
1774 * try other mirrors that might have a good copy of the data. This
1775 * io_failure_record is used to record state as we go through all the
1776 * mirrors. If another mirror has good data, the page is set up to date
1777 * and things continue. If a good mirror can't be found, the original
1778 * bio end_io callback is called to indicate things have failed.
1780 struct io_failure_record {
1785 unsigned long bio_flags;
1789 static int btrfs_io_failed_hook(struct bio *failed_bio,
1790 struct page *page, u64 start, u64 end,
1791 struct extent_state *state)
1793 struct io_failure_record *failrec = NULL;
1795 struct extent_map *em;
1796 struct inode *inode = page->mapping->host;
1797 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1798 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1805 ret = get_state_private(failure_tree, start, &private);
1807 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1810 failrec->start = start;
1811 failrec->len = end - start + 1;
1812 failrec->last_mirror = 0;
1813 failrec->bio_flags = 0;
1815 read_lock(&em_tree->lock);
1816 em = lookup_extent_mapping(em_tree, start, failrec->len);
1817 if (em->start > start || em->start + em->len < start) {
1818 free_extent_map(em);
1821 read_unlock(&em_tree->lock);
1823 if (!em || IS_ERR(em)) {
1827 logical = start - em->start;
1828 logical = em->block_start + logical;
1829 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1830 logical = em->block_start;
1831 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1833 failrec->logical = logical;
1834 free_extent_map(em);
1835 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1836 EXTENT_DIRTY, GFP_NOFS);
1837 set_state_private(failure_tree, start,
1838 (u64)(unsigned long)failrec);
1840 failrec = (struct io_failure_record *)(unsigned long)private;
1842 num_copies = btrfs_num_copies(
1843 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1844 failrec->logical, failrec->len);
1845 failrec->last_mirror++;
1847 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1848 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1851 if (state && state->start != failrec->start)
1853 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1855 if (!state || failrec->last_mirror > num_copies) {
1856 set_state_private(failure_tree, failrec->start, 0);
1857 clear_extent_bits(failure_tree, failrec->start,
1858 failrec->start + failrec->len - 1,
1859 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1863 bio = bio_alloc(GFP_NOFS, 1);
1864 bio->bi_private = state;
1865 bio->bi_end_io = failed_bio->bi_end_io;
1866 bio->bi_sector = failrec->logical >> 9;
1867 bio->bi_bdev = failed_bio->bi_bdev;
1870 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1871 if (failed_bio->bi_rw & (1 << BIO_RW))
1876 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1877 failrec->last_mirror,
1878 failrec->bio_flags);
1883 * each time an IO finishes, we do a fast check in the IO failure tree
1884 * to see if we need to process or clean up an io_failure_record
1886 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1889 u64 private_failure;
1890 struct io_failure_record *failure;
1894 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1895 (u64)-1, 1, EXTENT_DIRTY)) {
1896 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1897 start, &private_failure);
1899 failure = (struct io_failure_record *)(unsigned long)
1901 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1903 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1905 failure->start + failure->len - 1,
1906 EXTENT_DIRTY | EXTENT_LOCKED,
1915 * when reads are done, we need to check csums to verify the data is correct
1916 * if there's a match, we allow the bio to finish. If not, we go through
1917 * the io_failure_record routines to find good copies
1919 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1920 struct extent_state *state)
1922 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1923 struct inode *inode = page->mapping->host;
1924 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1926 u64 private = ~(u32)0;
1928 struct btrfs_root *root = BTRFS_I(inode)->root;
1931 if (PageChecked(page)) {
1932 ClearPageChecked(page);
1936 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1939 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1940 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1941 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1946 if (state && state->start == start) {
1947 private = state->private;
1950 ret = get_state_private(io_tree, start, &private);
1952 kaddr = kmap_atomic(page, KM_USER0);
1956 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1957 btrfs_csum_final(csum, (char *)&csum);
1958 if (csum != private)
1961 kunmap_atomic(kaddr, KM_USER0);
1963 /* if the io failure tree for this inode is non-empty,
1964 * check to see if we've recovered from a failed IO
1966 btrfs_clean_io_failures(inode, start);
1970 if (printk_ratelimit()) {
1971 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1972 "private %llu\n", page->mapping->host->i_ino,
1973 (unsigned long long)start, csum,
1974 (unsigned long long)private);
1976 memset(kaddr + offset, 1, end - start + 1);
1977 flush_dcache_page(page);
1978 kunmap_atomic(kaddr, KM_USER0);
1984 struct delayed_iput {
1985 struct list_head list;
1986 struct inode *inode;
1989 void btrfs_add_delayed_iput(struct inode *inode)
1991 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1992 struct delayed_iput *delayed;
1994 if (atomic_add_unless(&inode->i_count, -1, 1))
1997 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
1998 delayed->inode = inode;
2000 spin_lock(&fs_info->delayed_iput_lock);
2001 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2002 spin_unlock(&fs_info->delayed_iput_lock);
2005 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2008 struct btrfs_fs_info *fs_info = root->fs_info;
2009 struct delayed_iput *delayed;
2012 spin_lock(&fs_info->delayed_iput_lock);
2013 empty = list_empty(&fs_info->delayed_iputs);
2014 spin_unlock(&fs_info->delayed_iput_lock);
2018 down_read(&root->fs_info->cleanup_work_sem);
2019 spin_lock(&fs_info->delayed_iput_lock);
2020 list_splice_init(&fs_info->delayed_iputs, &list);
2021 spin_unlock(&fs_info->delayed_iput_lock);
2023 while (!list_empty(&list)) {
2024 delayed = list_entry(list.next, struct delayed_iput, list);
2025 list_del(&delayed->list);
2026 iput(delayed->inode);
2029 up_read(&root->fs_info->cleanup_work_sem);
2033 * This creates an orphan entry for the given inode in case something goes
2034 * wrong in the middle of an unlink/truncate.
2036 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2038 struct btrfs_root *root = BTRFS_I(inode)->root;
2041 spin_lock(&root->list_lock);
2043 /* already on the orphan list, we're good */
2044 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2045 spin_unlock(&root->list_lock);
2049 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2051 spin_unlock(&root->list_lock);
2054 * insert an orphan item to track this unlinked/truncated file
2056 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2062 * We have done the truncate/delete so we can go ahead and remove the orphan
2063 * item for this particular inode.
2065 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2067 struct btrfs_root *root = BTRFS_I(inode)->root;
2070 spin_lock(&root->list_lock);
2072 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2073 spin_unlock(&root->list_lock);
2077 list_del_init(&BTRFS_I(inode)->i_orphan);
2079 spin_unlock(&root->list_lock);
2083 spin_unlock(&root->list_lock);
2085 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2091 * this cleans up any orphans that may be left on the list from the last use
2094 void btrfs_orphan_cleanup(struct btrfs_root *root)
2096 struct btrfs_path *path;
2097 struct extent_buffer *leaf;
2098 struct btrfs_item *item;
2099 struct btrfs_key key, found_key;
2100 struct btrfs_trans_handle *trans;
2101 struct inode *inode;
2102 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2104 if (!xchg(&root->clean_orphans, 0))
2107 path = btrfs_alloc_path();
2111 key.objectid = BTRFS_ORPHAN_OBJECTID;
2112 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2113 key.offset = (u64)-1;
2116 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2118 printk(KERN_ERR "Error searching slot for orphan: %d"
2124 * if ret == 0 means we found what we were searching for, which
2125 * is weird, but possible, so only screw with path if we didnt
2126 * find the key and see if we have stuff that matches
2129 if (path->slots[0] == 0)
2134 /* pull out the item */
2135 leaf = path->nodes[0];
2136 item = btrfs_item_nr(leaf, path->slots[0]);
2137 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2139 /* make sure the item matches what we want */
2140 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2142 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2145 /* release the path since we're done with it */
2146 btrfs_release_path(root, path);
2149 * this is where we are basically btrfs_lookup, without the
2150 * crossing root thing. we store the inode number in the
2151 * offset of the orphan item.
2153 found_key.objectid = found_key.offset;
2154 found_key.type = BTRFS_INODE_ITEM_KEY;
2155 found_key.offset = 0;
2156 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2161 * add this inode to the orphan list so btrfs_orphan_del does
2162 * the proper thing when we hit it
2164 spin_lock(&root->list_lock);
2165 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2166 spin_unlock(&root->list_lock);
2169 * if this is a bad inode, means we actually succeeded in
2170 * removing the inode, but not the orphan record, which means
2171 * we need to manually delete the orphan since iput will just
2172 * do a destroy_inode
2174 if (is_bad_inode(inode)) {
2175 trans = btrfs_start_transaction(root, 1);
2176 btrfs_orphan_del(trans, inode);
2177 btrfs_end_transaction(trans, root);
2182 /* if we have links, this was a truncate, lets do that */
2183 if (inode->i_nlink) {
2185 btrfs_truncate(inode);
2190 /* this will do delete_inode and everything for us */
2195 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2197 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2199 btrfs_free_path(path);
2203 * very simple check to peek ahead in the leaf looking for xattrs. If we
2204 * don't find any xattrs, we know there can't be any acls.
2206 * slot is the slot the inode is in, objectid is the objectid of the inode
2208 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2209 int slot, u64 objectid)
2211 u32 nritems = btrfs_header_nritems(leaf);
2212 struct btrfs_key found_key;
2216 while (slot < nritems) {
2217 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2219 /* we found a different objectid, there must not be acls */
2220 if (found_key.objectid != objectid)
2223 /* we found an xattr, assume we've got an acl */
2224 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2228 * we found a key greater than an xattr key, there can't
2229 * be any acls later on
2231 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2238 * it goes inode, inode backrefs, xattrs, extents,
2239 * so if there are a ton of hard links to an inode there can
2240 * be a lot of backrefs. Don't waste time searching too hard,
2241 * this is just an optimization
2246 /* we hit the end of the leaf before we found an xattr or
2247 * something larger than an xattr. We have to assume the inode
2254 * read an inode from the btree into the in-memory inode
2256 static void btrfs_read_locked_inode(struct inode *inode)
2258 struct btrfs_path *path;
2259 struct extent_buffer *leaf;
2260 struct btrfs_inode_item *inode_item;
2261 struct btrfs_timespec *tspec;
2262 struct btrfs_root *root = BTRFS_I(inode)->root;
2263 struct btrfs_key location;
2265 u64 alloc_group_block;
2269 path = btrfs_alloc_path();
2271 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2273 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2277 leaf = path->nodes[0];
2278 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2279 struct btrfs_inode_item);
2281 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2282 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2283 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2284 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2285 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2287 tspec = btrfs_inode_atime(inode_item);
2288 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2289 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2291 tspec = btrfs_inode_mtime(inode_item);
2292 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2293 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2295 tspec = btrfs_inode_ctime(inode_item);
2296 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2297 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2299 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2300 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2301 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2302 inode->i_generation = BTRFS_I(inode)->generation;
2304 rdev = btrfs_inode_rdev(leaf, inode_item);
2306 BTRFS_I(inode)->index_cnt = (u64)-1;
2307 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2309 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2312 * try to precache a NULL acl entry for files that don't have
2313 * any xattrs or acls
2315 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2317 cache_no_acl(inode);
2319 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2320 alloc_group_block, 0);
2321 btrfs_free_path(path);
2324 switch (inode->i_mode & S_IFMT) {
2326 inode->i_mapping->a_ops = &btrfs_aops;
2327 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2328 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2329 inode->i_fop = &btrfs_file_operations;
2330 inode->i_op = &btrfs_file_inode_operations;
2333 inode->i_fop = &btrfs_dir_file_operations;
2334 if (root == root->fs_info->tree_root)
2335 inode->i_op = &btrfs_dir_ro_inode_operations;
2337 inode->i_op = &btrfs_dir_inode_operations;
2340 inode->i_op = &btrfs_symlink_inode_operations;
2341 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2342 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2345 inode->i_op = &btrfs_special_inode_operations;
2346 init_special_inode(inode, inode->i_mode, rdev);
2350 btrfs_update_iflags(inode);
2354 btrfs_free_path(path);
2355 make_bad_inode(inode);
2359 * given a leaf and an inode, copy the inode fields into the leaf
2361 static void fill_inode_item(struct btrfs_trans_handle *trans,
2362 struct extent_buffer *leaf,
2363 struct btrfs_inode_item *item,
2364 struct inode *inode)
2366 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2367 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2368 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2369 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2370 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2372 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2373 inode->i_atime.tv_sec);
2374 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2375 inode->i_atime.tv_nsec);
2377 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2378 inode->i_mtime.tv_sec);
2379 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2380 inode->i_mtime.tv_nsec);
2382 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2383 inode->i_ctime.tv_sec);
2384 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2385 inode->i_ctime.tv_nsec);
2387 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2388 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2389 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2390 btrfs_set_inode_transid(leaf, item, trans->transid);
2391 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2392 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2393 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2397 * copy everything in the in-memory inode into the btree.
2399 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2400 struct btrfs_root *root, struct inode *inode)
2402 struct btrfs_inode_item *inode_item;
2403 struct btrfs_path *path;
2404 struct extent_buffer *leaf;
2407 path = btrfs_alloc_path();
2409 path->leave_spinning = 1;
2410 ret = btrfs_lookup_inode(trans, root, path,
2411 &BTRFS_I(inode)->location, 1);
2418 btrfs_unlock_up_safe(path, 1);
2419 leaf = path->nodes[0];
2420 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2421 struct btrfs_inode_item);
2423 fill_inode_item(trans, leaf, inode_item, inode);
2424 btrfs_mark_buffer_dirty(leaf);
2425 btrfs_set_inode_last_trans(trans, inode);
2428 btrfs_free_path(path);
2434 * unlink helper that gets used here in inode.c and in the tree logging
2435 * recovery code. It remove a link in a directory with a given name, and
2436 * also drops the back refs in the inode to the directory
2438 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2439 struct btrfs_root *root,
2440 struct inode *dir, struct inode *inode,
2441 const char *name, int name_len)
2443 struct btrfs_path *path;
2445 struct extent_buffer *leaf;
2446 struct btrfs_dir_item *di;
2447 struct btrfs_key key;
2450 path = btrfs_alloc_path();
2456 path->leave_spinning = 1;
2457 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2458 name, name_len, -1);
2467 leaf = path->nodes[0];
2468 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2469 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2472 btrfs_release_path(root, path);
2474 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2476 dir->i_ino, &index);
2478 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2479 "inode %lu parent %lu\n", name_len, name,
2480 inode->i_ino, dir->i_ino);
2484 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2485 index, name, name_len, -1);
2494 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2495 btrfs_release_path(root, path);
2497 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2499 BUG_ON(ret != 0 && ret != -ENOENT);
2501 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2505 btrfs_free_path(path);
2509 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2510 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2511 btrfs_update_inode(trans, root, dir);
2512 btrfs_drop_nlink(inode);
2513 ret = btrfs_update_inode(trans, root, inode);
2518 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2520 struct btrfs_root *root;
2521 struct btrfs_trans_handle *trans;
2522 struct inode *inode = dentry->d_inode;
2524 unsigned long nr = 0;
2526 root = BTRFS_I(dir)->root;
2529 * 5 items for unlink inode
2532 ret = btrfs_reserve_metadata_space(root, 6);
2536 trans = btrfs_start_transaction(root, 1);
2537 if (IS_ERR(trans)) {
2538 btrfs_unreserve_metadata_space(root, 6);
2539 return PTR_ERR(trans);
2542 btrfs_set_trans_block_group(trans, dir);
2544 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2546 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2547 dentry->d_name.name, dentry->d_name.len);
2549 if (inode->i_nlink == 0)
2550 ret = btrfs_orphan_add(trans, inode);
2552 nr = trans->blocks_used;
2554 btrfs_end_transaction_throttle(trans, root);
2555 btrfs_unreserve_metadata_space(root, 6);
2556 btrfs_btree_balance_dirty(root, nr);
2560 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2561 struct btrfs_root *root,
2562 struct inode *dir, u64 objectid,
2563 const char *name, int name_len)
2565 struct btrfs_path *path;
2566 struct extent_buffer *leaf;
2567 struct btrfs_dir_item *di;
2568 struct btrfs_key key;
2572 path = btrfs_alloc_path();
2576 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2577 name, name_len, -1);
2578 BUG_ON(!di || IS_ERR(di));
2580 leaf = path->nodes[0];
2581 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2582 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2583 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2585 btrfs_release_path(root, path);
2587 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2588 objectid, root->root_key.objectid,
2589 dir->i_ino, &index, name, name_len);
2591 BUG_ON(ret != -ENOENT);
2592 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2594 BUG_ON(!di || IS_ERR(di));
2596 leaf = path->nodes[0];
2597 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2598 btrfs_release_path(root, path);
2602 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2603 index, name, name_len, -1);
2604 BUG_ON(!di || IS_ERR(di));
2606 leaf = path->nodes[0];
2607 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2608 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2609 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2611 btrfs_release_path(root, path);
2613 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2614 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2615 ret = btrfs_update_inode(trans, root, dir);
2617 dir->i_sb->s_dirt = 1;
2619 btrfs_free_path(path);
2623 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2625 struct inode *inode = dentry->d_inode;
2628 struct btrfs_root *root = BTRFS_I(dir)->root;
2629 struct btrfs_trans_handle *trans;
2630 unsigned long nr = 0;
2632 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2633 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2636 ret = btrfs_reserve_metadata_space(root, 5);
2640 trans = btrfs_start_transaction(root, 1);
2641 if (IS_ERR(trans)) {
2642 btrfs_unreserve_metadata_space(root, 5);
2643 return PTR_ERR(trans);
2646 btrfs_set_trans_block_group(trans, dir);
2648 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2649 err = btrfs_unlink_subvol(trans, root, dir,
2650 BTRFS_I(inode)->location.objectid,
2651 dentry->d_name.name,
2652 dentry->d_name.len);
2656 err = btrfs_orphan_add(trans, inode);
2660 /* now the directory is empty */
2661 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2662 dentry->d_name.name, dentry->d_name.len);
2664 btrfs_i_size_write(inode, 0);
2666 nr = trans->blocks_used;
2667 ret = btrfs_end_transaction_throttle(trans, root);
2668 btrfs_unreserve_metadata_space(root, 5);
2669 btrfs_btree_balance_dirty(root, nr);
2678 * when truncating bytes in a file, it is possible to avoid reading
2679 * the leaves that contain only checksum items. This can be the
2680 * majority of the IO required to delete a large file, but it must
2681 * be done carefully.
2683 * The keys in the level just above the leaves are checked to make sure
2684 * the lowest key in a given leaf is a csum key, and starts at an offset
2685 * after the new size.
2687 * Then the key for the next leaf is checked to make sure it also has
2688 * a checksum item for the same file. If it does, we know our target leaf
2689 * contains only checksum items, and it can be safely freed without reading
2692 * This is just an optimization targeted at large files. It may do
2693 * nothing. It will return 0 unless things went badly.
2695 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2696 struct btrfs_root *root,
2697 struct btrfs_path *path,
2698 struct inode *inode, u64 new_size)
2700 struct btrfs_key key;
2703 struct btrfs_key found_key;
2704 struct btrfs_key other_key;
2705 struct btrfs_leaf_ref *ref;
2709 path->lowest_level = 1;
2710 key.objectid = inode->i_ino;
2711 key.type = BTRFS_CSUM_ITEM_KEY;
2712 key.offset = new_size;
2714 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2718 if (path->nodes[1] == NULL) {
2723 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2724 nritems = btrfs_header_nritems(path->nodes[1]);
2729 if (path->slots[1] >= nritems)
2732 /* did we find a key greater than anything we want to delete? */
2733 if (found_key.objectid > inode->i_ino ||
2734 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2737 /* we check the next key in the node to make sure the leave contains
2738 * only checksum items. This comparison doesn't work if our
2739 * leaf is the last one in the node
2741 if (path->slots[1] + 1 >= nritems) {
2743 /* search forward from the last key in the node, this
2744 * will bring us into the next node in the tree
2746 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2748 /* unlikely, but we inc below, so check to be safe */
2749 if (found_key.offset == (u64)-1)
2752 /* search_forward needs a path with locks held, do the
2753 * search again for the original key. It is possible
2754 * this will race with a balance and return a path that
2755 * we could modify, but this drop is just an optimization
2756 * and is allowed to miss some leaves.
2758 btrfs_release_path(root, path);
2761 /* setup a max key for search_forward */
2762 other_key.offset = (u64)-1;
2763 other_key.type = key.type;
2764 other_key.objectid = key.objectid;
2766 path->keep_locks = 1;
2767 ret = btrfs_search_forward(root, &found_key, &other_key,
2769 path->keep_locks = 0;
2770 if (ret || found_key.objectid != key.objectid ||
2771 found_key.type != key.type) {
2776 key.offset = found_key.offset;
2777 btrfs_release_path(root, path);
2782 /* we know there's one more slot after us in the tree,
2783 * read that key so we can verify it is also a checksum item
2785 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2787 if (found_key.objectid < inode->i_ino)
2790 if (found_key.type != key.type || found_key.offset < new_size)
2794 * if the key for the next leaf isn't a csum key from this objectid,
2795 * we can't be sure there aren't good items inside this leaf.
2798 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2801 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2802 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2804 * it is safe to delete this leaf, it contains only
2805 * csum items from this inode at an offset >= new_size
2807 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2810 if (root->ref_cows && leaf_gen < trans->transid) {
2811 ref = btrfs_alloc_leaf_ref(root, 0);
2813 ref->root_gen = root->root_key.offset;
2814 ref->bytenr = leaf_start;
2816 ref->generation = leaf_gen;
2819 btrfs_sort_leaf_ref(ref);
2821 ret = btrfs_add_leaf_ref(root, ref, 0);
2823 btrfs_free_leaf_ref(root, ref);
2829 btrfs_release_path(root, path);
2831 if (other_key.objectid == inode->i_ino &&
2832 other_key.type == key.type && other_key.offset > key.offset) {
2833 key.offset = other_key.offset;
2839 /* fixup any changes we've made to the path */
2840 path->lowest_level = 0;
2841 path->keep_locks = 0;
2842 btrfs_release_path(root, path);
2849 * this can truncate away extent items, csum items and directory items.
2850 * It starts at a high offset and removes keys until it can't find
2851 * any higher than new_size
2853 * csum items that cross the new i_size are truncated to the new size
2856 * min_type is the minimum key type to truncate down to. If set to 0, this
2857 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2859 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2860 struct btrfs_root *root,
2861 struct inode *inode,
2862 u64 new_size, u32 min_type)
2864 struct btrfs_path *path;
2865 struct extent_buffer *leaf;
2866 struct btrfs_file_extent_item *fi;
2867 struct btrfs_key key;
2868 struct btrfs_key found_key;
2869 u64 extent_start = 0;
2870 u64 extent_num_bytes = 0;
2871 u64 extent_offset = 0;
2873 u64 mask = root->sectorsize - 1;
2874 u32 found_type = (u8)-1;
2877 int pending_del_nr = 0;
2878 int pending_del_slot = 0;
2879 int extent_type = -1;
2884 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
2887 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2889 path = btrfs_alloc_path();
2893 key.objectid = inode->i_ino;
2894 key.offset = (u64)-1;
2898 path->leave_spinning = 1;
2899 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2906 /* there are no items in the tree for us to truncate, we're
2909 if (path->slots[0] == 0)
2916 leaf = path->nodes[0];
2917 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2918 found_type = btrfs_key_type(&found_key);
2921 if (found_key.objectid != inode->i_ino)
2924 if (found_type < min_type)
2927 item_end = found_key.offset;
2928 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2929 fi = btrfs_item_ptr(leaf, path->slots[0],
2930 struct btrfs_file_extent_item);
2931 extent_type = btrfs_file_extent_type(leaf, fi);
2932 encoding = btrfs_file_extent_compression(leaf, fi);
2933 encoding |= btrfs_file_extent_encryption(leaf, fi);
2934 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2936 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2938 btrfs_file_extent_num_bytes(leaf, fi);
2939 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2940 item_end += btrfs_file_extent_inline_len(leaf,
2945 if (found_type > min_type) {
2948 if (item_end < new_size)
2950 if (found_key.offset >= new_size)
2956 /* FIXME, shrink the extent if the ref count is only 1 */
2957 if (found_type != BTRFS_EXTENT_DATA_KEY)
2960 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2962 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2963 if (!del_item && !encoding) {
2964 u64 orig_num_bytes =
2965 btrfs_file_extent_num_bytes(leaf, fi);
2966 extent_num_bytes = new_size -
2967 found_key.offset + root->sectorsize - 1;
2968 extent_num_bytes = extent_num_bytes &
2969 ~((u64)root->sectorsize - 1);
2970 btrfs_set_file_extent_num_bytes(leaf, fi,
2972 num_dec = (orig_num_bytes -
2974 if (root->ref_cows && extent_start != 0)
2975 inode_sub_bytes(inode, num_dec);
2976 btrfs_mark_buffer_dirty(leaf);
2979 btrfs_file_extent_disk_num_bytes(leaf,
2981 extent_offset = found_key.offset -
2982 btrfs_file_extent_offset(leaf, fi);
2984 /* FIXME blocksize != 4096 */
2985 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2986 if (extent_start != 0) {
2989 inode_sub_bytes(inode, num_dec);
2992 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2994 * we can't truncate inline items that have had
2998 btrfs_file_extent_compression(leaf, fi) == 0 &&
2999 btrfs_file_extent_encryption(leaf, fi) == 0 &&
3000 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3001 u32 size = new_size - found_key.offset;
3003 if (root->ref_cows) {
3004 inode_sub_bytes(inode, item_end + 1 -
3008 btrfs_file_extent_calc_inline_size(size);
3009 ret = btrfs_truncate_item(trans, root, path,
3012 } else if (root->ref_cows) {
3013 inode_sub_bytes(inode, item_end + 1 -
3019 if (!pending_del_nr) {
3020 /* no pending yet, add ourselves */
3021 pending_del_slot = path->slots[0];
3023 } else if (pending_del_nr &&
3024 path->slots[0] + 1 == pending_del_slot) {
3025 /* hop on the pending chunk */
3027 pending_del_slot = path->slots[0];
3034 if (found_extent && root->ref_cows) {
3035 btrfs_set_path_blocking(path);
3036 ret = btrfs_free_extent(trans, root, extent_start,
3037 extent_num_bytes, 0,
3038 btrfs_header_owner(leaf),
3039 inode->i_ino, extent_offset);
3043 if (found_type == BTRFS_INODE_ITEM_KEY)
3046 if (path->slots[0] == 0 ||
3047 path->slots[0] != pending_del_slot) {
3048 if (root->ref_cows) {
3052 if (pending_del_nr) {
3053 ret = btrfs_del_items(trans, root, path,
3059 btrfs_release_path(root, path);
3066 if (pending_del_nr) {
3067 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3070 btrfs_free_path(path);
3075 * taken from block_truncate_page, but does cow as it zeros out
3076 * any bytes left in the last page in the file.
3078 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3080 struct inode *inode = mapping->host;
3081 struct btrfs_root *root = BTRFS_I(inode)->root;
3082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3083 struct btrfs_ordered_extent *ordered;
3085 u32 blocksize = root->sectorsize;
3086 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3087 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3093 if ((offset & (blocksize - 1)) == 0)
3095 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3099 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3105 page = grab_cache_page(mapping, index);
3107 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3108 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3112 page_start = page_offset(page);
3113 page_end = page_start + PAGE_CACHE_SIZE - 1;
3115 if (!PageUptodate(page)) {
3116 ret = btrfs_readpage(NULL, page);
3118 if (page->mapping != mapping) {
3120 page_cache_release(page);
3123 if (!PageUptodate(page)) {
3128 wait_on_page_writeback(page);
3130 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3131 set_page_extent_mapped(page);
3133 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3135 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3137 page_cache_release(page);
3138 btrfs_start_ordered_extent(inode, ordered, 1);
3139 btrfs_put_ordered_extent(ordered);
3143 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3144 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3147 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3149 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3154 if (offset != PAGE_CACHE_SIZE) {
3156 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3157 flush_dcache_page(page);
3160 ClearPageChecked(page);
3161 set_page_dirty(page);
3162 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3166 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3167 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3169 page_cache_release(page);
3174 int btrfs_cont_expand(struct inode *inode, loff_t size)
3176 struct btrfs_trans_handle *trans;
3177 struct btrfs_root *root = BTRFS_I(inode)->root;
3178 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3179 struct extent_map *em;
3180 u64 mask = root->sectorsize - 1;
3181 u64 hole_start = (inode->i_size + mask) & ~mask;
3182 u64 block_end = (size + mask) & ~mask;
3188 if (size <= hole_start)
3192 struct btrfs_ordered_extent *ordered;
3193 btrfs_wait_ordered_range(inode, hole_start,
3194 block_end - hole_start);
3195 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3196 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3199 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3200 btrfs_put_ordered_extent(ordered);
3203 cur_offset = hole_start;
3205 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3206 block_end - cur_offset, 0);
3207 BUG_ON(IS_ERR(em) || !em);
3208 last_byte = min(extent_map_end(em), block_end);
3209 last_byte = (last_byte + mask) & ~mask;
3210 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3212 hole_size = last_byte - cur_offset;
3214 err = btrfs_reserve_metadata_space(root, 2);
3218 trans = btrfs_start_transaction(root, 1);
3219 btrfs_set_trans_block_group(trans, inode);
3221 err = btrfs_drop_extents(trans, inode, cur_offset,
3222 cur_offset + hole_size,
3226 err = btrfs_insert_file_extent(trans, root,
3227 inode->i_ino, cur_offset, 0,
3228 0, hole_size, 0, hole_size,
3232 btrfs_drop_extent_cache(inode, hole_start,
3235 btrfs_end_transaction(trans, root);
3236 btrfs_unreserve_metadata_space(root, 2);
3238 free_extent_map(em);
3239 cur_offset = last_byte;
3240 if (cur_offset >= block_end)
3244 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3248 static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3250 struct btrfs_root *root = BTRFS_I(inode)->root;
3251 struct btrfs_trans_handle *trans;
3255 if (attr->ia_size == inode->i_size)
3258 if (attr->ia_size > inode->i_size) {
3259 unsigned long limit;
3260 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3261 if (attr->ia_size > inode->i_sb->s_maxbytes)
3263 if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3264 send_sig(SIGXFSZ, current, 0);
3269 ret = btrfs_reserve_metadata_space(root, 1);
3273 trans = btrfs_start_transaction(root, 1);
3274 btrfs_set_trans_block_group(trans, inode);
3276 ret = btrfs_orphan_add(trans, inode);
3279 nr = trans->blocks_used;
3280 btrfs_end_transaction(trans, root);
3281 btrfs_unreserve_metadata_space(root, 1);
3282 btrfs_btree_balance_dirty(root, nr);
3284 if (attr->ia_size > inode->i_size) {
3285 ret = btrfs_cont_expand(inode, attr->ia_size);
3287 btrfs_truncate(inode);
3291 i_size_write(inode, attr->ia_size);
3292 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3294 trans = btrfs_start_transaction(root, 1);
3295 btrfs_set_trans_block_group(trans, inode);
3297 ret = btrfs_update_inode(trans, root, inode);
3299 if (inode->i_nlink > 0) {
3300 ret = btrfs_orphan_del(trans, inode);
3303 nr = trans->blocks_used;
3304 btrfs_end_transaction(trans, root);
3305 btrfs_btree_balance_dirty(root, nr);
3310 * We're truncating a file that used to have good data down to
3311 * zero. Make sure it gets into the ordered flush list so that
3312 * any new writes get down to disk quickly.
3314 if (attr->ia_size == 0)
3315 BTRFS_I(inode)->ordered_data_close = 1;
3317 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3318 ret = vmtruncate(inode, attr->ia_size);
3324 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3326 struct inode *inode = dentry->d_inode;
3329 err = inode_change_ok(inode, attr);
3333 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3334 err = btrfs_setattr_size(inode, attr);
3338 attr->ia_valid &= ~ATTR_SIZE;
3341 err = inode_setattr(inode, attr);
3343 if (!err && ((attr->ia_valid & ATTR_MODE)))
3344 err = btrfs_acl_chmod(inode);
3348 void btrfs_delete_inode(struct inode *inode)
3350 struct btrfs_trans_handle *trans;
3351 struct btrfs_root *root = BTRFS_I(inode)->root;
3355 truncate_inode_pages(&inode->i_data, 0);
3356 if (is_bad_inode(inode)) {
3357 btrfs_orphan_del(NULL, inode);
3360 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3362 if (root->fs_info->log_root_recovering) {
3363 BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3367 if (inode->i_nlink > 0) {
3368 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3372 btrfs_i_size_write(inode, 0);
3375 trans = btrfs_start_transaction(root, 1);
3376 btrfs_set_trans_block_group(trans, inode);
3377 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3382 nr = trans->blocks_used;
3383 btrfs_end_transaction(trans, root);
3385 btrfs_btree_balance_dirty(root, nr);
3389 ret = btrfs_orphan_del(trans, inode);
3393 nr = trans->blocks_used;
3394 btrfs_end_transaction(trans, root);
3395 btrfs_btree_balance_dirty(root, nr);
3402 * this returns the key found in the dir entry in the location pointer.
3403 * If no dir entries were found, location->objectid is 0.
3405 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3406 struct btrfs_key *location)
3408 const char *name = dentry->d_name.name;
3409 int namelen = dentry->d_name.len;
3410 struct btrfs_dir_item *di;
3411 struct btrfs_path *path;
3412 struct btrfs_root *root = BTRFS_I(dir)->root;
3415 path = btrfs_alloc_path();
3418 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3423 if (!di || IS_ERR(di))
3426 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3428 btrfs_free_path(path);
3431 location->objectid = 0;
3436 * when we hit a tree root in a directory, the btrfs part of the inode
3437 * needs to be changed to reflect the root directory of the tree root. This
3438 * is kind of like crossing a mount point.
3440 static int fixup_tree_root_location(struct btrfs_root *root,
3442 struct dentry *dentry,
3443 struct btrfs_key *location,
3444 struct btrfs_root **sub_root)
3446 struct btrfs_path *path;
3447 struct btrfs_root *new_root;
3448 struct btrfs_root_ref *ref;
3449 struct extent_buffer *leaf;
3453 path = btrfs_alloc_path();
3460 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3461 BTRFS_I(dir)->root->root_key.objectid,
3462 location->objectid);
3469 leaf = path->nodes[0];
3470 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3471 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3472 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3475 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3476 (unsigned long)(ref + 1),
3477 dentry->d_name.len);
3481 btrfs_release_path(root->fs_info->tree_root, path);
3483 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3484 if (IS_ERR(new_root)) {
3485 err = PTR_ERR(new_root);
3489 if (btrfs_root_refs(&new_root->root_item) == 0) {
3494 *sub_root = new_root;
3495 location->objectid = btrfs_root_dirid(&new_root->root_item);
3496 location->type = BTRFS_INODE_ITEM_KEY;
3497 location->offset = 0;
3500 btrfs_free_path(path);
3504 static void inode_tree_add(struct inode *inode)
3506 struct btrfs_root *root = BTRFS_I(inode)->root;
3507 struct btrfs_inode *entry;
3509 struct rb_node *parent;
3511 p = &root->inode_tree.rb_node;
3514 if (hlist_unhashed(&inode->i_hash))
3517 spin_lock(&root->inode_lock);
3520 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3522 if (inode->i_ino < entry->vfs_inode.i_ino)
3523 p = &parent->rb_left;
3524 else if (inode->i_ino > entry->vfs_inode.i_ino)
3525 p = &parent->rb_right;
3527 WARN_ON(!(entry->vfs_inode.i_state &
3528 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3529 rb_erase(parent, &root->inode_tree);
3530 RB_CLEAR_NODE(parent);
3531 spin_unlock(&root->inode_lock);
3535 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3536 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3537 spin_unlock(&root->inode_lock);
3540 static void inode_tree_del(struct inode *inode)
3542 struct btrfs_root *root = BTRFS_I(inode)->root;
3545 spin_lock(&root->inode_lock);
3546 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3547 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3548 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3549 empty = RB_EMPTY_ROOT(&root->inode_tree);
3551 spin_unlock(&root->inode_lock);
3553 if (empty && btrfs_root_refs(&root->root_item) == 0) {
3554 synchronize_srcu(&root->fs_info->subvol_srcu);
3555 spin_lock(&root->inode_lock);
3556 empty = RB_EMPTY_ROOT(&root->inode_tree);
3557 spin_unlock(&root->inode_lock);
3559 btrfs_add_dead_root(root);
3563 int btrfs_invalidate_inodes(struct btrfs_root *root)
3565 struct rb_node *node;
3566 struct rb_node *prev;
3567 struct btrfs_inode *entry;
3568 struct inode *inode;
3571 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3573 spin_lock(&root->inode_lock);
3575 node = root->inode_tree.rb_node;
3579 entry = rb_entry(node, struct btrfs_inode, rb_node);
3581 if (objectid < entry->vfs_inode.i_ino)
3582 node = node->rb_left;
3583 else if (objectid > entry->vfs_inode.i_ino)
3584 node = node->rb_right;
3590 entry = rb_entry(prev, struct btrfs_inode, rb_node);
3591 if (objectid <= entry->vfs_inode.i_ino) {
3595 prev = rb_next(prev);
3599 entry = rb_entry(node, struct btrfs_inode, rb_node);
3600 objectid = entry->vfs_inode.i_ino + 1;
3601 inode = igrab(&entry->vfs_inode);
3603 spin_unlock(&root->inode_lock);
3604 if (atomic_read(&inode->i_count) > 1)
3605 d_prune_aliases(inode);
3607 * btrfs_drop_inode will remove it from
3608 * the inode cache when its usage count
3613 spin_lock(&root->inode_lock);
3617 if (cond_resched_lock(&root->inode_lock))
3620 node = rb_next(node);
3622 spin_unlock(&root->inode_lock);
3626 static noinline void init_btrfs_i(struct inode *inode)
3628 struct btrfs_inode *bi = BTRFS_I(inode);
3633 bi->last_sub_trans = 0;
3634 bi->logged_trans = 0;
3635 bi->delalloc_bytes = 0;
3636 bi->reserved_bytes = 0;
3637 bi->disk_i_size = 0;
3639 bi->index_cnt = (u64)-1;
3640 bi->last_unlink_trans = 0;
3641 bi->ordered_data_close = 0;
3642 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3643 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3644 inode->i_mapping, GFP_NOFS);
3645 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3646 inode->i_mapping, GFP_NOFS);
3647 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3648 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3649 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3650 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3651 mutex_init(&BTRFS_I(inode)->log_mutex);
3654 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3656 struct btrfs_iget_args *args = p;
3657 inode->i_ino = args->ino;
3658 init_btrfs_i(inode);
3659 BTRFS_I(inode)->root = args->root;
3660 btrfs_set_inode_space_info(args->root, inode);
3664 static int btrfs_find_actor(struct inode *inode, void *opaque)
3666 struct btrfs_iget_args *args = opaque;
3667 return args->ino == inode->i_ino &&
3668 args->root == BTRFS_I(inode)->root;
3671 static struct inode *btrfs_iget_locked(struct super_block *s,
3673 struct btrfs_root *root)
3675 struct inode *inode;
3676 struct btrfs_iget_args args;
3677 args.ino = objectid;
3680 inode = iget5_locked(s, objectid, btrfs_find_actor,
3681 btrfs_init_locked_inode,
3686 /* Get an inode object given its location and corresponding root.
3687 * Returns in *is_new if the inode was read from disk
3689 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3690 struct btrfs_root *root, int *new)
3692 struct inode *inode;
3694 inode = btrfs_iget_locked(s, location->objectid, root);
3696 return ERR_PTR(-ENOMEM);
3698 if (inode->i_state & I_NEW) {
3699 BTRFS_I(inode)->root = root;
3700 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3701 btrfs_read_locked_inode(inode);
3703 inode_tree_add(inode);
3704 unlock_new_inode(inode);
3712 static struct inode *new_simple_dir(struct super_block *s,
3713 struct btrfs_key *key,
3714 struct btrfs_root *root)
3716 struct inode *inode = new_inode(s);
3719 return ERR_PTR(-ENOMEM);
3721 init_btrfs_i(inode);
3723 BTRFS_I(inode)->root = root;
3724 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3725 BTRFS_I(inode)->dummy_inode = 1;
3727 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3728 inode->i_op = &simple_dir_inode_operations;
3729 inode->i_fop = &simple_dir_operations;
3730 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3731 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3736 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3738 struct inode *inode;
3739 struct btrfs_root *root = BTRFS_I(dir)->root;
3740 struct btrfs_root *sub_root = root;
3741 struct btrfs_key location;
3745 dentry->d_op = &btrfs_dentry_operations;
3747 if (dentry->d_name.len > BTRFS_NAME_LEN)
3748 return ERR_PTR(-ENAMETOOLONG);
3750 ret = btrfs_inode_by_name(dir, dentry, &location);
3753 return ERR_PTR(ret);
3755 if (location.objectid == 0)
3758 if (location.type == BTRFS_INODE_ITEM_KEY) {
3759 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3763 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3765 index = srcu_read_lock(&root->fs_info->subvol_srcu);
3766 ret = fixup_tree_root_location(root, dir, dentry,
3767 &location, &sub_root);
3770 inode = ERR_PTR(ret);
3772 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3774 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
3776 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3778 if (root != sub_root) {
3779 down_read(&root->fs_info->cleanup_work_sem);
3780 if (!(inode->i_sb->s_flags & MS_RDONLY))
3781 btrfs_orphan_cleanup(sub_root);
3782 up_read(&root->fs_info->cleanup_work_sem);
3788 static int btrfs_dentry_delete(struct dentry *dentry)
3790 struct btrfs_root *root;
3792 if (!dentry->d_inode && !IS_ROOT(dentry))
3793 dentry = dentry->d_parent;
3795 if (dentry->d_inode) {
3796 root = BTRFS_I(dentry->d_inode)->root;
3797 if (btrfs_root_refs(&root->root_item) == 0)
3803 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3804 struct nameidata *nd)
3806 struct inode *inode;
3808 inode = btrfs_lookup_dentry(dir, dentry);
3810 return ERR_CAST(inode);
3812 return d_splice_alias(inode, dentry);
3815 static unsigned char btrfs_filetype_table[] = {
3816 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3819 static int btrfs_real_readdir(struct file *filp, void *dirent,
3822 struct inode *inode = filp->f_dentry->d_inode;
3823 struct btrfs_root *root = BTRFS_I(inode)->root;
3824 struct btrfs_item *item;
3825 struct btrfs_dir_item *di;
3826 struct btrfs_key key;
3827 struct btrfs_key found_key;
3828 struct btrfs_path *path;
3831 struct extent_buffer *leaf;
3834 unsigned char d_type;
3839 int key_type = BTRFS_DIR_INDEX_KEY;
3844 /* FIXME, use a real flag for deciding about the key type */
3845 if (root->fs_info->tree_root == root)
3846 key_type = BTRFS_DIR_ITEM_KEY;
3848 /* special case for "." */
3849 if (filp->f_pos == 0) {
3850 over = filldir(dirent, ".", 1,
3857 /* special case for .., just use the back ref */
3858 if (filp->f_pos == 1) {
3859 u64 pino = parent_ino(filp->f_path.dentry);
3860 over = filldir(dirent, "..", 2,
3866 path = btrfs_alloc_path();
3869 btrfs_set_key_type(&key, key_type);
3870 key.offset = filp->f_pos;
3871 key.objectid = inode->i_ino;
3873 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3879 leaf = path->nodes[0];
3880 nritems = btrfs_header_nritems(leaf);
3881 slot = path->slots[0];
3882 if (advance || slot >= nritems) {
3883 if (slot >= nritems - 1) {
3884 ret = btrfs_next_leaf(root, path);
3887 leaf = path->nodes[0];
3888 nritems = btrfs_header_nritems(leaf);
3889 slot = path->slots[0];
3897 item = btrfs_item_nr(leaf, slot);
3898 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3900 if (found_key.objectid != key.objectid)
3902 if (btrfs_key_type(&found_key) != key_type)
3904 if (found_key.offset < filp->f_pos)
3907 filp->f_pos = found_key.offset;
3909 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3911 di_total = btrfs_item_size(leaf, item);
3913 while (di_cur < di_total) {
3914 struct btrfs_key location;
3916 name_len = btrfs_dir_name_len(leaf, di);
3917 if (name_len <= sizeof(tmp_name)) {
3918 name_ptr = tmp_name;
3920 name_ptr = kmalloc(name_len, GFP_NOFS);
3926 read_extent_buffer(leaf, name_ptr,
3927 (unsigned long)(di + 1), name_len);
3929 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3930 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3932 /* is this a reference to our own snapshot? If so
3935 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3936 location.objectid == root->root_key.objectid) {
3940 over = filldir(dirent, name_ptr, name_len,
3941 found_key.offset, location.objectid,
3945 if (name_ptr != tmp_name)
3950 di_len = btrfs_dir_name_len(leaf, di) +
3951 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3953 di = (struct btrfs_dir_item *)((char *)di + di_len);
3957 /* Reached end of directory/root. Bump pos past the last item. */
3958 if (key_type == BTRFS_DIR_INDEX_KEY)
3960 * 32-bit glibc will use getdents64, but then strtol -
3961 * so the last number we can serve is this.
3963 filp->f_pos = 0x7fffffff;
3969 btrfs_free_path(path);
3973 int btrfs_write_inode(struct inode *inode, int wait)
3975 struct btrfs_root *root = BTRFS_I(inode)->root;
3976 struct btrfs_trans_handle *trans;
3979 if (root->fs_info->btree_inode == inode)
3983 trans = btrfs_join_transaction(root, 1);
3984 btrfs_set_trans_block_group(trans, inode);
3985 ret = btrfs_commit_transaction(trans, root);
3991 * This is somewhat expensive, updating the tree every time the
3992 * inode changes. But, it is most likely to find the inode in cache.
3993 * FIXME, needs more benchmarking...there are no reasons other than performance
3994 * to keep or drop this code.
3996 void btrfs_dirty_inode(struct inode *inode)
3998 struct btrfs_root *root = BTRFS_I(inode)->root;
3999 struct btrfs_trans_handle *trans;
4001 trans = btrfs_join_transaction(root, 1);
4002 btrfs_set_trans_block_group(trans, inode);
4003 btrfs_update_inode(trans, root, inode);
4004 btrfs_end_transaction(trans, root);
4008 * find the highest existing sequence number in a directory
4009 * and then set the in-memory index_cnt variable to reflect
4010 * free sequence numbers
4012 static int btrfs_set_inode_index_count(struct inode *inode)
4014 struct btrfs_root *root = BTRFS_I(inode)->root;
4015 struct btrfs_key key, found_key;
4016 struct btrfs_path *path;
4017 struct extent_buffer *leaf;
4020 key.objectid = inode->i_ino;
4021 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4022 key.offset = (u64)-1;
4024 path = btrfs_alloc_path();
4028 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4031 /* FIXME: we should be able to handle this */
4037 * MAGIC NUMBER EXPLANATION:
4038 * since we search a directory based on f_pos we have to start at 2
4039 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4040 * else has to start at 2
4042 if (path->slots[0] == 0) {
4043 BTRFS_I(inode)->index_cnt = 2;
4049 leaf = path->nodes[0];
4050 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4052 if (found_key.objectid != inode->i_ino ||
4053 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4054 BTRFS_I(inode)->index_cnt = 2;
4058 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4060 btrfs_free_path(path);
4065 * helper to find a free sequence number in a given directory. This current
4066 * code is very simple, later versions will do smarter things in the btree
4068 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4072 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4073 ret = btrfs_set_inode_index_count(dir);
4078 *index = BTRFS_I(dir)->index_cnt;
4079 BTRFS_I(dir)->index_cnt++;
4084 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4085 struct btrfs_root *root,
4087 const char *name, int name_len,
4088 u64 ref_objectid, u64 objectid,
4089 u64 alloc_hint, int mode, u64 *index)
4091 struct inode *inode;
4092 struct btrfs_inode_item *inode_item;
4093 struct btrfs_key *location;
4094 struct btrfs_path *path;
4095 struct btrfs_inode_ref *ref;
4096 struct btrfs_key key[2];
4102 path = btrfs_alloc_path();
4105 inode = new_inode(root->fs_info->sb);
4107 return ERR_PTR(-ENOMEM);
4110 ret = btrfs_set_inode_index(dir, index);
4113 return ERR_PTR(ret);
4117 * index_cnt is ignored for everything but a dir,
4118 * btrfs_get_inode_index_count has an explanation for the magic
4121 init_btrfs_i(inode);
4122 BTRFS_I(inode)->index_cnt = 2;
4123 BTRFS_I(inode)->root = root;
4124 BTRFS_I(inode)->generation = trans->transid;
4125 btrfs_set_inode_space_info(root, inode);
4131 BTRFS_I(inode)->block_group =
4132 btrfs_find_block_group(root, 0, alloc_hint, owner);
4134 key[0].objectid = objectid;
4135 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4138 key[1].objectid = objectid;
4139 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4140 key[1].offset = ref_objectid;
4142 sizes[0] = sizeof(struct btrfs_inode_item);
4143 sizes[1] = name_len + sizeof(*ref);
4145 path->leave_spinning = 1;
4146 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4150 inode->i_uid = current_fsuid();
4152 if (dir && (dir->i_mode & S_ISGID)) {
4153 inode->i_gid = dir->i_gid;
4157 inode->i_gid = current_fsgid();
4159 inode->i_mode = mode;
4160 inode->i_ino = objectid;
4161 inode_set_bytes(inode, 0);
4162 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4163 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4164 struct btrfs_inode_item);
4165 fill_inode_item(trans, path->nodes[0], inode_item, inode);
4167 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4168 struct btrfs_inode_ref);
4169 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4170 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4171 ptr = (unsigned long)(ref + 1);
4172 write_extent_buffer(path->nodes[0], name, ptr, name_len);
4174 btrfs_mark_buffer_dirty(path->nodes[0]);
4175 btrfs_free_path(path);
4177 location = &BTRFS_I(inode)->location;
4178 location->objectid = objectid;
4179 location->offset = 0;
4180 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4182 btrfs_inherit_iflags(inode, dir);
4184 if ((mode & S_IFREG)) {
4185 if (btrfs_test_opt(root, NODATASUM))
4186 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4187 if (btrfs_test_opt(root, NODATACOW))
4188 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4191 insert_inode_hash(inode);
4192 inode_tree_add(inode);
4196 BTRFS_I(dir)->index_cnt--;
4197 btrfs_free_path(path);
4199 return ERR_PTR(ret);
4202 static inline u8 btrfs_inode_type(struct inode *inode)
4204 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4208 * utility function to add 'inode' into 'parent_inode' with
4209 * a give name and a given sequence number.
4210 * if 'add_backref' is true, also insert a backref from the
4211 * inode to the parent directory.
4213 int btrfs_add_link(struct btrfs_trans_handle *trans,
4214 struct inode *parent_inode, struct inode *inode,
4215 const char *name, int name_len, int add_backref, u64 index)
4218 struct btrfs_key key;
4219 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4221 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4222 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4224 key.objectid = inode->i_ino;
4225 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4229 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4230 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4231 key.objectid, root->root_key.objectid,
4232 parent_inode->i_ino,
4233 index, name, name_len);
4234 } else if (add_backref) {
4235 ret = btrfs_insert_inode_ref(trans, root,
4236 name, name_len, inode->i_ino,
4237 parent_inode->i_ino, index);
4241 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4242 parent_inode->i_ino, &key,
4243 btrfs_inode_type(inode), index);
4246 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4248 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4249 ret = btrfs_update_inode(trans, root, parent_inode);
4254 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4255 struct dentry *dentry, struct inode *inode,
4256 int backref, u64 index)
4258 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4259 inode, dentry->d_name.name,
4260 dentry->d_name.len, backref, index);
4262 d_instantiate(dentry, inode);
4270 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4271 int mode, dev_t rdev)
4273 struct btrfs_trans_handle *trans;
4274 struct btrfs_root *root = BTRFS_I(dir)->root;
4275 struct inode *inode = NULL;
4279 unsigned long nr = 0;
4282 if (!new_valid_dev(rdev))
4286 * 2 for inode item and ref
4288 * 1 for xattr if selinux is on
4290 err = btrfs_reserve_metadata_space(root, 5);
4294 trans = btrfs_start_transaction(root, 1);
4297 btrfs_set_trans_block_group(trans, dir);
4299 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4305 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4307 dentry->d_parent->d_inode->i_ino, objectid,
4308 BTRFS_I(dir)->block_group, mode, &index);
4309 err = PTR_ERR(inode);
4313 err = btrfs_init_inode_security(trans, inode, dir);
4319 btrfs_set_trans_block_group(trans, inode);
4320 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4324 inode->i_op = &btrfs_special_inode_operations;
4325 init_special_inode(inode, inode->i_mode, rdev);
4326 btrfs_update_inode(trans, root, inode);
4328 btrfs_update_inode_block_group(trans, inode);
4329 btrfs_update_inode_block_group(trans, dir);
4331 nr = trans->blocks_used;
4332 btrfs_end_transaction_throttle(trans, root);
4334 btrfs_unreserve_metadata_space(root, 5);
4336 inode_dec_link_count(inode);
4339 btrfs_btree_balance_dirty(root, nr);
4343 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4344 int mode, struct nameidata *nd)
4346 struct btrfs_trans_handle *trans;
4347 struct btrfs_root *root = BTRFS_I(dir)->root;
4348 struct inode *inode = NULL;
4351 unsigned long nr = 0;
4356 * 2 for inode item and ref
4358 * 1 for xattr if selinux is on
4360 err = btrfs_reserve_metadata_space(root, 5);
4364 trans = btrfs_start_transaction(root, 1);
4367 btrfs_set_trans_block_group(trans, dir);
4369 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4375 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4377 dentry->d_parent->d_inode->i_ino,
4378 objectid, BTRFS_I(dir)->block_group, mode,
4380 err = PTR_ERR(inode);
4384 err = btrfs_init_inode_security(trans, inode, dir);
4390 btrfs_set_trans_block_group(trans, inode);
4391 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4395 inode->i_mapping->a_ops = &btrfs_aops;
4396 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4397 inode->i_fop = &btrfs_file_operations;
4398 inode->i_op = &btrfs_file_inode_operations;
4399 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4401 btrfs_update_inode_block_group(trans, inode);
4402 btrfs_update_inode_block_group(trans, dir);
4404 nr = trans->blocks_used;
4405 btrfs_end_transaction_throttle(trans, root);
4407 btrfs_unreserve_metadata_space(root, 5);
4409 inode_dec_link_count(inode);
4412 btrfs_btree_balance_dirty(root, nr);
4416 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4417 struct dentry *dentry)
4419 struct btrfs_trans_handle *trans;
4420 struct btrfs_root *root = BTRFS_I(dir)->root;
4421 struct inode *inode = old_dentry->d_inode;
4423 unsigned long nr = 0;
4427 if (inode->i_nlink == 0)
4430 /* do not allow sys_link's with other subvols of the same device */
4431 if (root->objectid != BTRFS_I(inode)->root->objectid)
4435 * 1 item for inode ref
4436 * 2 items for dir items
4438 err = btrfs_reserve_metadata_space(root, 3);
4442 btrfs_inc_nlink(inode);
4444 err = btrfs_set_inode_index(dir, &index);
4448 trans = btrfs_start_transaction(root, 1);
4450 btrfs_set_trans_block_group(trans, dir);
4451 atomic_inc(&inode->i_count);
4453 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4458 btrfs_update_inode_block_group(trans, dir);
4459 err = btrfs_update_inode(trans, root, inode);
4461 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4464 nr = trans->blocks_used;
4465 btrfs_end_transaction_throttle(trans, root);
4467 btrfs_unreserve_metadata_space(root, 3);
4469 inode_dec_link_count(inode);
4472 btrfs_btree_balance_dirty(root, nr);
4476 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4478 struct inode *inode = NULL;
4479 struct btrfs_trans_handle *trans;
4480 struct btrfs_root *root = BTRFS_I(dir)->root;
4482 int drop_on_err = 0;
4485 unsigned long nr = 1;
4488 * 2 items for inode and ref
4489 * 2 items for dir items
4490 * 1 for xattr if selinux is on
4492 err = btrfs_reserve_metadata_space(root, 5);
4496 trans = btrfs_start_transaction(root, 1);
4501 btrfs_set_trans_block_group(trans, dir);
4503 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4509 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4511 dentry->d_parent->d_inode->i_ino, objectid,
4512 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4514 if (IS_ERR(inode)) {
4515 err = PTR_ERR(inode);
4521 err = btrfs_init_inode_security(trans, inode, dir);
4525 inode->i_op = &btrfs_dir_inode_operations;
4526 inode->i_fop = &btrfs_dir_file_operations;
4527 btrfs_set_trans_block_group(trans, inode);
4529 btrfs_i_size_write(inode, 0);
4530 err = btrfs_update_inode(trans, root, inode);
4534 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4535 inode, dentry->d_name.name,
4536 dentry->d_name.len, 0, index);
4540 d_instantiate(dentry, inode);
4542 btrfs_update_inode_block_group(trans, inode);
4543 btrfs_update_inode_block_group(trans, dir);
4546 nr = trans->blocks_used;
4547 btrfs_end_transaction_throttle(trans, root);
4550 btrfs_unreserve_metadata_space(root, 5);
4553 btrfs_btree_balance_dirty(root, nr);
4557 /* helper for btfs_get_extent. Given an existing extent in the tree,
4558 * and an extent that you want to insert, deal with overlap and insert
4559 * the new extent into the tree.
4561 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4562 struct extent_map *existing,
4563 struct extent_map *em,
4564 u64 map_start, u64 map_len)
4568 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4569 start_diff = map_start - em->start;
4570 em->start = map_start;
4572 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4573 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4574 em->block_start += start_diff;
4575 em->block_len -= start_diff;
4577 return add_extent_mapping(em_tree, em);
4580 static noinline int uncompress_inline(struct btrfs_path *path,
4581 struct inode *inode, struct page *page,
4582 size_t pg_offset, u64 extent_offset,
4583 struct btrfs_file_extent_item *item)
4586 struct extent_buffer *leaf = path->nodes[0];
4589 unsigned long inline_size;
4592 WARN_ON(pg_offset != 0);
4593 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4594 inline_size = btrfs_file_extent_inline_item_len(leaf,
4595 btrfs_item_nr(leaf, path->slots[0]));
4596 tmp = kmalloc(inline_size, GFP_NOFS);
4597 ptr = btrfs_file_extent_inline_start(item);
4599 read_extent_buffer(leaf, tmp, ptr, inline_size);
4601 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4602 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4603 inline_size, max_size);
4605 char *kaddr = kmap_atomic(page, KM_USER0);
4606 unsigned long copy_size = min_t(u64,
4607 PAGE_CACHE_SIZE - pg_offset,
4608 max_size - extent_offset);
4609 memset(kaddr + pg_offset, 0, copy_size);
4610 kunmap_atomic(kaddr, KM_USER0);
4617 * a bit scary, this does extent mapping from logical file offset to the disk.
4618 * the ugly parts come from merging extents from the disk with the in-ram
4619 * representation. This gets more complex because of the data=ordered code,
4620 * where the in-ram extents might be locked pending data=ordered completion.
4622 * This also copies inline extents directly into the page.
4625 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4626 size_t pg_offset, u64 start, u64 len,
4632 u64 extent_start = 0;
4634 u64 objectid = inode->i_ino;
4636 struct btrfs_path *path = NULL;
4637 struct btrfs_root *root = BTRFS_I(inode)->root;
4638 struct btrfs_file_extent_item *item;
4639 struct extent_buffer *leaf;
4640 struct btrfs_key found_key;
4641 struct extent_map *em = NULL;
4642 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4643 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4644 struct btrfs_trans_handle *trans = NULL;
4648 read_lock(&em_tree->lock);
4649 em = lookup_extent_mapping(em_tree, start, len);
4651 em->bdev = root->fs_info->fs_devices->latest_bdev;
4652 read_unlock(&em_tree->lock);
4655 if (em->start > start || em->start + em->len <= start)
4656 free_extent_map(em);
4657 else if (em->block_start == EXTENT_MAP_INLINE && page)
4658 free_extent_map(em);
4662 em = alloc_extent_map(GFP_NOFS);
4667 em->bdev = root->fs_info->fs_devices->latest_bdev;
4668 em->start = EXTENT_MAP_HOLE;
4669 em->orig_start = EXTENT_MAP_HOLE;
4671 em->block_len = (u64)-1;
4674 path = btrfs_alloc_path();
4678 ret = btrfs_lookup_file_extent(trans, root, path,
4679 objectid, start, trans != NULL);
4686 if (path->slots[0] == 0)
4691 leaf = path->nodes[0];
4692 item = btrfs_item_ptr(leaf, path->slots[0],
4693 struct btrfs_file_extent_item);
4694 /* are we inside the extent that was found? */
4695 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4696 found_type = btrfs_key_type(&found_key);
4697 if (found_key.objectid != objectid ||
4698 found_type != BTRFS_EXTENT_DATA_KEY) {
4702 found_type = btrfs_file_extent_type(leaf, item);
4703 extent_start = found_key.offset;
4704 compressed = btrfs_file_extent_compression(leaf, item);
4705 if (found_type == BTRFS_FILE_EXTENT_REG ||
4706 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4707 extent_end = extent_start +
4708 btrfs_file_extent_num_bytes(leaf, item);
4709 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4711 size = btrfs_file_extent_inline_len(leaf, item);
4712 extent_end = (extent_start + size + root->sectorsize - 1) &
4713 ~((u64)root->sectorsize - 1);
4716 if (start >= extent_end) {
4718 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4719 ret = btrfs_next_leaf(root, path);
4726 leaf = path->nodes[0];
4728 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4729 if (found_key.objectid != objectid ||
4730 found_key.type != BTRFS_EXTENT_DATA_KEY)
4732 if (start + len <= found_key.offset)
4735 em->len = found_key.offset - start;
4739 if (found_type == BTRFS_FILE_EXTENT_REG ||
4740 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4741 em->start = extent_start;
4742 em->len = extent_end - extent_start;
4743 em->orig_start = extent_start -
4744 btrfs_file_extent_offset(leaf, item);
4745 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4747 em->block_start = EXTENT_MAP_HOLE;
4751 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4752 em->block_start = bytenr;
4753 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4756 bytenr += btrfs_file_extent_offset(leaf, item);
4757 em->block_start = bytenr;
4758 em->block_len = em->len;
4759 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4760 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4763 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4767 size_t extent_offset;
4770 em->block_start = EXTENT_MAP_INLINE;
4771 if (!page || create) {
4772 em->start = extent_start;
4773 em->len = extent_end - extent_start;
4777 size = btrfs_file_extent_inline_len(leaf, item);
4778 extent_offset = page_offset(page) + pg_offset - extent_start;
4779 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4780 size - extent_offset);
4781 em->start = extent_start + extent_offset;
4782 em->len = (copy_size + root->sectorsize - 1) &
4783 ~((u64)root->sectorsize - 1);
4784 em->orig_start = EXTENT_MAP_INLINE;
4786 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4787 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4788 if (create == 0 && !PageUptodate(page)) {
4789 if (btrfs_file_extent_compression(leaf, item) ==
4790 BTRFS_COMPRESS_ZLIB) {
4791 ret = uncompress_inline(path, inode, page,
4793 extent_offset, item);
4797 read_extent_buffer(leaf, map + pg_offset, ptr,
4799 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4800 memset(map + pg_offset + copy_size, 0,
4801 PAGE_CACHE_SIZE - pg_offset -
4806 flush_dcache_page(page);
4807 } else if (create && PageUptodate(page)) {
4810 free_extent_map(em);
4812 btrfs_release_path(root, path);
4813 trans = btrfs_join_transaction(root, 1);
4817 write_extent_buffer(leaf, map + pg_offset, ptr,
4820 btrfs_mark_buffer_dirty(leaf);
4822 set_extent_uptodate(io_tree, em->start,
4823 extent_map_end(em) - 1, GFP_NOFS);
4826 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4833 em->block_start = EXTENT_MAP_HOLE;
4834 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4836 btrfs_release_path(root, path);
4837 if (em->start > start || extent_map_end(em) <= start) {
4838 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4839 "[%llu %llu]\n", (unsigned long long)em->start,
4840 (unsigned long long)em->len,
4841 (unsigned long long)start,
4842 (unsigned long long)len);
4848 write_lock(&em_tree->lock);
4849 ret = add_extent_mapping(em_tree, em);
4850 /* it is possible that someone inserted the extent into the tree
4851 * while we had the lock dropped. It is also possible that
4852 * an overlapping map exists in the tree
4854 if (ret == -EEXIST) {
4855 struct extent_map *existing;
4859 existing = lookup_extent_mapping(em_tree, start, len);
4860 if (existing && (existing->start > start ||
4861 existing->start + existing->len <= start)) {
4862 free_extent_map(existing);
4866 existing = lookup_extent_mapping(em_tree, em->start,
4869 err = merge_extent_mapping(em_tree, existing,
4872 free_extent_map(existing);
4874 free_extent_map(em);
4879 free_extent_map(em);
4883 free_extent_map(em);
4888 write_unlock(&em_tree->lock);
4891 btrfs_free_path(path);
4893 ret = btrfs_end_transaction(trans, root);
4898 free_extent_map(em);
4899 return ERR_PTR(err);
4904 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4905 const struct iovec *iov, loff_t offset,
4906 unsigned long nr_segs)
4911 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4912 __u64 start, __u64 len)
4914 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4917 int btrfs_readpage(struct file *file, struct page *page)
4919 struct extent_io_tree *tree;
4920 tree = &BTRFS_I(page->mapping->host)->io_tree;
4921 return extent_read_full_page(tree, page, btrfs_get_extent);
4924 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4926 struct extent_io_tree *tree;
4929 if (current->flags & PF_MEMALLOC) {
4930 redirty_page_for_writepage(wbc, page);
4934 tree = &BTRFS_I(page->mapping->host)->io_tree;
4935 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4938 int btrfs_writepages(struct address_space *mapping,
4939 struct writeback_control *wbc)
4941 struct extent_io_tree *tree;
4943 tree = &BTRFS_I(mapping->host)->io_tree;
4944 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4948 btrfs_readpages(struct file *file, struct address_space *mapping,
4949 struct list_head *pages, unsigned nr_pages)
4951 struct extent_io_tree *tree;
4952 tree = &BTRFS_I(mapping->host)->io_tree;
4953 return extent_readpages(tree, mapping, pages, nr_pages,
4956 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4958 struct extent_io_tree *tree;
4959 struct extent_map_tree *map;
4962 tree = &BTRFS_I(page->mapping->host)->io_tree;
4963 map = &BTRFS_I(page->mapping->host)->extent_tree;
4964 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4966 ClearPagePrivate(page);
4967 set_page_private(page, 0);
4968 page_cache_release(page);
4973 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4975 if (PageWriteback(page) || PageDirty(page))
4977 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4980 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4982 struct extent_io_tree *tree;
4983 struct btrfs_ordered_extent *ordered;
4984 u64 page_start = page_offset(page);
4985 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4989 * we have the page locked, so new writeback can't start,
4990 * and the dirty bit won't be cleared while we are here.
4992 * Wait for IO on this page so that we can safely clear
4993 * the PagePrivate2 bit and do ordered accounting
4995 wait_on_page_writeback(page);
4997 tree = &BTRFS_I(page->mapping->host)->io_tree;
4999 btrfs_releasepage(page, GFP_NOFS);
5002 lock_extent(tree, page_start, page_end, GFP_NOFS);
5003 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
5007 * IO on this page will never be started, so we need
5008 * to account for any ordered extents now
5010 clear_extent_bit(tree, page_start, page_end,
5011 EXTENT_DIRTY | EXTENT_DELALLOC |
5012 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
5015 * whoever cleared the private bit is responsible
5016 * for the finish_ordered_io
5018 if (TestClearPagePrivate2(page)) {
5019 btrfs_finish_ordered_io(page->mapping->host,
5020 page_start, page_end);
5022 btrfs_put_ordered_extent(ordered);
5023 lock_extent(tree, page_start, page_end, GFP_NOFS);
5025 clear_extent_bit(tree, page_start, page_end,
5026 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5027 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
5028 __btrfs_releasepage(page, GFP_NOFS);
5030 ClearPageChecked(page);
5031 if (PagePrivate(page)) {
5032 ClearPagePrivate(page);
5033 set_page_private(page, 0);
5034 page_cache_release(page);
5039 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
5040 * called from a page fault handler when a page is first dirtied. Hence we must
5041 * be careful to check for EOF conditions here. We set the page up correctly
5042 * for a written page which means we get ENOSPC checking when writing into
5043 * holes and correct delalloc and unwritten extent mapping on filesystems that
5044 * support these features.
5046 * We are not allowed to take the i_mutex here so we have to play games to
5047 * protect against truncate races as the page could now be beyond EOF. Because
5048 * vmtruncate() writes the inode size before removing pages, once we have the
5049 * page lock we can determine safely if the page is beyond EOF. If it is not
5050 * beyond EOF, then the page is guaranteed safe against truncation until we
5053 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5055 struct page *page = vmf->page;
5056 struct inode *inode = fdentry(vma->vm_file)->d_inode;
5057 struct btrfs_root *root = BTRFS_I(inode)->root;
5058 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5059 struct btrfs_ordered_extent *ordered;
5061 unsigned long zero_start;
5067 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
5071 else /* -ENOSPC, -EIO, etc */
5072 ret = VM_FAULT_SIGBUS;
5076 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5078 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5079 ret = VM_FAULT_SIGBUS;
5083 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5086 size = i_size_read(inode);
5087 page_start = page_offset(page);
5088 page_end = page_start + PAGE_CACHE_SIZE - 1;
5090 if ((page->mapping != inode->i_mapping) ||
5091 (page_start >= size)) {
5092 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5093 /* page got truncated out from underneath us */
5096 wait_on_page_writeback(page);
5098 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5099 set_page_extent_mapped(page);
5102 * we can't set the delalloc bits if there are pending ordered
5103 * extents. Drop our locks and wait for them to finish
5105 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5107 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5109 btrfs_start_ordered_extent(inode, ordered, 1);
5110 btrfs_put_ordered_extent(ordered);
5115 * XXX - page_mkwrite gets called every time the page is dirtied, even
5116 * if it was already dirty, so for space accounting reasons we need to
5117 * clear any delalloc bits for the range we are fixing to save. There
5118 * is probably a better way to do this, but for now keep consistent with
5119 * prepare_pages in the normal write path.
5121 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
5122 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5125 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
5127 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5128 ret = VM_FAULT_SIGBUS;
5129 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5134 /* page is wholly or partially inside EOF */
5135 if (page_start + PAGE_CACHE_SIZE > size)
5136 zero_start = size & ~PAGE_CACHE_MASK;
5138 zero_start = PAGE_CACHE_SIZE;
5140 if (zero_start != PAGE_CACHE_SIZE) {
5142 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5143 flush_dcache_page(page);
5146 ClearPageChecked(page);
5147 set_page_dirty(page);
5148 SetPageUptodate(page);
5150 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5151 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5153 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5156 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5158 return VM_FAULT_LOCKED;
5164 static void btrfs_truncate(struct inode *inode)
5166 struct btrfs_root *root = BTRFS_I(inode)->root;
5168 struct btrfs_trans_handle *trans;
5170 u64 mask = root->sectorsize - 1;
5172 if (!S_ISREG(inode->i_mode)) {
5177 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5181 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5182 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5184 trans = btrfs_start_transaction(root, 1);
5185 btrfs_set_trans_block_group(trans, inode);
5188 * setattr is responsible for setting the ordered_data_close flag,
5189 * but that is only tested during the last file release. That
5190 * could happen well after the next commit, leaving a great big
5191 * window where new writes may get lost if someone chooses to write
5192 * to this file after truncating to zero
5194 * The inode doesn't have any dirty data here, and so if we commit
5195 * this is a noop. If someone immediately starts writing to the inode
5196 * it is very likely we'll catch some of their writes in this
5197 * transaction, and the commit will find this file on the ordered
5198 * data list with good things to send down.
5200 * This is a best effort solution, there is still a window where
5201 * using truncate to replace the contents of the file will
5202 * end up with a zero length file after a crash.
5204 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5205 btrfs_add_ordered_operation(trans, root, inode);
5208 ret = btrfs_truncate_inode_items(trans, root, inode,
5210 BTRFS_EXTENT_DATA_KEY);
5214 ret = btrfs_update_inode(trans, root, inode);
5217 nr = trans->blocks_used;
5218 btrfs_end_transaction(trans, root);
5219 btrfs_btree_balance_dirty(root, nr);
5221 trans = btrfs_start_transaction(root, 1);
5222 btrfs_set_trans_block_group(trans, inode);
5225 if (ret == 0 && inode->i_nlink > 0) {
5226 ret = btrfs_orphan_del(trans, inode);
5230 ret = btrfs_update_inode(trans, root, inode);
5233 nr = trans->blocks_used;
5234 ret = btrfs_end_transaction_throttle(trans, root);
5236 btrfs_btree_balance_dirty(root, nr);
5240 * create a new subvolume directory/inode (helper for the ioctl).
5242 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5243 struct btrfs_root *new_root,
5244 u64 new_dirid, u64 alloc_hint)
5246 struct inode *inode;
5250 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5251 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5253 return PTR_ERR(inode);
5254 inode->i_op = &btrfs_dir_inode_operations;
5255 inode->i_fop = &btrfs_dir_file_operations;
5258 btrfs_i_size_write(inode, 0);
5260 err = btrfs_update_inode(trans, new_root, inode);
5267 /* helper function for file defrag and space balancing. This
5268 * forces readahead on a given range of bytes in an inode
5270 unsigned long btrfs_force_ra(struct address_space *mapping,
5271 struct file_ra_state *ra, struct file *file,
5272 pgoff_t offset, pgoff_t last_index)
5274 pgoff_t req_size = last_index - offset + 1;
5276 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5277 return offset + req_size;
5280 struct inode *btrfs_alloc_inode(struct super_block *sb)
5282 struct btrfs_inode *ei;
5284 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5288 ei->last_sub_trans = 0;
5289 ei->logged_trans = 0;
5290 ei->outstanding_extents = 0;
5291 ei->reserved_extents = 0;
5293 spin_lock_init(&ei->accounting_lock);
5294 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5295 INIT_LIST_HEAD(&ei->i_orphan);
5296 INIT_LIST_HEAD(&ei->ordered_operations);
5297 return &ei->vfs_inode;
5300 void btrfs_destroy_inode(struct inode *inode)
5302 struct btrfs_ordered_extent *ordered;
5303 struct btrfs_root *root = BTRFS_I(inode)->root;
5305 WARN_ON(!list_empty(&inode->i_dentry));
5306 WARN_ON(inode->i_data.nrpages);
5309 * This can happen where we create an inode, but somebody else also
5310 * created the same inode and we need to destroy the one we already
5317 * Make sure we're properly removed from the ordered operation
5321 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5322 spin_lock(&root->fs_info->ordered_extent_lock);
5323 list_del_init(&BTRFS_I(inode)->ordered_operations);
5324 spin_unlock(&root->fs_info->ordered_extent_lock);
5327 spin_lock(&root->list_lock);
5328 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5329 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5331 list_del_init(&BTRFS_I(inode)->i_orphan);
5333 spin_unlock(&root->list_lock);
5336 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5340 printk(KERN_ERR "btrfs found ordered "
5341 "extent %llu %llu on inode cleanup\n",
5342 (unsigned long long)ordered->file_offset,
5343 (unsigned long long)ordered->len);
5344 btrfs_remove_ordered_extent(inode, ordered);
5345 btrfs_put_ordered_extent(ordered);
5346 btrfs_put_ordered_extent(ordered);
5349 inode_tree_del(inode);
5350 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5352 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5355 void btrfs_drop_inode(struct inode *inode)
5357 struct btrfs_root *root = BTRFS_I(inode)->root;
5359 if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5360 generic_delete_inode(inode);
5362 generic_drop_inode(inode);
5365 static void init_once(void *foo)
5367 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5369 inode_init_once(&ei->vfs_inode);
5372 void btrfs_destroy_cachep(void)
5374 if (btrfs_inode_cachep)
5375 kmem_cache_destroy(btrfs_inode_cachep);
5376 if (btrfs_trans_handle_cachep)
5377 kmem_cache_destroy(btrfs_trans_handle_cachep);
5378 if (btrfs_transaction_cachep)
5379 kmem_cache_destroy(btrfs_transaction_cachep);
5380 if (btrfs_path_cachep)
5381 kmem_cache_destroy(btrfs_path_cachep);
5384 int btrfs_init_cachep(void)
5386 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5387 sizeof(struct btrfs_inode), 0,
5388 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5389 if (!btrfs_inode_cachep)
5392 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5393 sizeof(struct btrfs_trans_handle), 0,
5394 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5395 if (!btrfs_trans_handle_cachep)
5398 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5399 sizeof(struct btrfs_transaction), 0,
5400 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5401 if (!btrfs_transaction_cachep)
5404 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5405 sizeof(struct btrfs_path), 0,
5406 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5407 if (!btrfs_path_cachep)
5412 btrfs_destroy_cachep();
5416 static int btrfs_getattr(struct vfsmount *mnt,
5417 struct dentry *dentry, struct kstat *stat)
5419 struct inode *inode = dentry->d_inode;
5420 generic_fillattr(inode, stat);
5421 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5422 stat->blksize = PAGE_CACHE_SIZE;
5423 stat->blocks = (inode_get_bytes(inode) +
5424 BTRFS_I(inode)->delalloc_bytes) >> 9;
5428 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5429 struct inode *new_dir, struct dentry *new_dentry)
5431 struct btrfs_trans_handle *trans;
5432 struct btrfs_root *root = BTRFS_I(old_dir)->root;
5433 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5434 struct inode *new_inode = new_dentry->d_inode;
5435 struct inode *old_inode = old_dentry->d_inode;
5436 struct timespec ctime = CURRENT_TIME;
5441 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5444 /* we only allow rename subvolume link between subvolumes */
5445 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5448 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5449 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5452 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5453 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5457 * We want to reserve the absolute worst case amount of items. So if
5458 * both inodes are subvols and we need to unlink them then that would
5459 * require 4 item modifications, but if they are both normal inodes it
5460 * would require 5 item modifications, so we'll assume their normal
5461 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5462 * should cover the worst case number of items we'll modify.
5464 ret = btrfs_reserve_metadata_space(root, 11);
5469 * we're using rename to replace one file with another.
5470 * and the replacement file is large. Start IO on it now so
5471 * we don't add too much work to the end of the transaction
5473 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5474 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5475 filemap_flush(old_inode->i_mapping);
5477 /* close the racy window with snapshot create/destroy ioctl */
5478 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5479 down_read(&root->fs_info->subvol_sem);
5481 trans = btrfs_start_transaction(root, 1);
5482 btrfs_set_trans_block_group(trans, new_dir);
5485 btrfs_record_root_in_trans(trans, dest);
5487 ret = btrfs_set_inode_index(new_dir, &index);
5491 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5492 /* force full log commit if subvolume involved. */
5493 root->fs_info->last_trans_log_full_commit = trans->transid;
5495 ret = btrfs_insert_inode_ref(trans, dest,
5496 new_dentry->d_name.name,
5497 new_dentry->d_name.len,
5499 new_dir->i_ino, index);
5503 * this is an ugly little race, but the rename is required
5504 * to make sure that if we crash, the inode is either at the
5505 * old name or the new one. pinning the log transaction lets
5506 * us make sure we don't allow a log commit to come in after
5507 * we unlink the name but before we add the new name back in.
5509 btrfs_pin_log_trans(root);
5512 * make sure the inode gets flushed if it is replacing
5515 if (new_inode && new_inode->i_size &&
5516 old_inode && S_ISREG(old_inode->i_mode)) {
5517 btrfs_add_ordered_operation(trans, root, old_inode);
5520 old_dir->i_ctime = old_dir->i_mtime = ctime;
5521 new_dir->i_ctime = new_dir->i_mtime = ctime;
5522 old_inode->i_ctime = ctime;
5524 if (old_dentry->d_parent != new_dentry->d_parent)
5525 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5527 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5528 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5529 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5530 old_dentry->d_name.name,
5531 old_dentry->d_name.len);
5533 btrfs_inc_nlink(old_dentry->d_inode);
5534 ret = btrfs_unlink_inode(trans, root, old_dir,
5535 old_dentry->d_inode,
5536 old_dentry->d_name.name,
5537 old_dentry->d_name.len);
5542 new_inode->i_ctime = CURRENT_TIME;
5543 if (unlikely(new_inode->i_ino ==
5544 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5545 root_objectid = BTRFS_I(new_inode)->location.objectid;
5546 ret = btrfs_unlink_subvol(trans, dest, new_dir,
5548 new_dentry->d_name.name,
5549 new_dentry->d_name.len);
5550 BUG_ON(new_inode->i_nlink == 0);
5552 ret = btrfs_unlink_inode(trans, dest, new_dir,
5553 new_dentry->d_inode,
5554 new_dentry->d_name.name,
5555 new_dentry->d_name.len);
5558 if (new_inode->i_nlink == 0) {
5559 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5564 ret = btrfs_add_link(trans, new_dir, old_inode,
5565 new_dentry->d_name.name,
5566 new_dentry->d_name.len, 0, index);
5569 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5570 btrfs_log_new_name(trans, old_inode, old_dir,
5571 new_dentry->d_parent);
5572 btrfs_end_log_trans(root);
5575 btrfs_end_transaction_throttle(trans, root);
5577 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5578 up_read(&root->fs_info->subvol_sem);
5580 btrfs_unreserve_metadata_space(root, 11);
5585 * some fairly slow code that needs optimization. This walks the list
5586 * of all the inodes with pending delalloc and forces them to disk.
5588 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5590 struct list_head *head = &root->fs_info->delalloc_inodes;
5591 struct btrfs_inode *binode;
5592 struct inode *inode;
5594 if (root->fs_info->sb->s_flags & MS_RDONLY)
5597 spin_lock(&root->fs_info->delalloc_lock);
5598 while (!list_empty(head)) {
5599 binode = list_entry(head->next, struct btrfs_inode,
5601 inode = igrab(&binode->vfs_inode);
5603 list_del_init(&binode->delalloc_inodes);
5604 spin_unlock(&root->fs_info->delalloc_lock);
5606 filemap_flush(inode->i_mapping);
5608 btrfs_add_delayed_iput(inode);
5613 spin_lock(&root->fs_info->delalloc_lock);
5615 spin_unlock(&root->fs_info->delalloc_lock);
5617 /* the filemap_flush will queue IO into the worker threads, but
5618 * we have to make sure the IO is actually started and that
5619 * ordered extents get created before we return
5621 atomic_inc(&root->fs_info->async_submit_draining);
5622 while (atomic_read(&root->fs_info->nr_async_submits) ||
5623 atomic_read(&root->fs_info->async_delalloc_pages)) {
5624 wait_event(root->fs_info->async_submit_wait,
5625 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5626 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5628 atomic_dec(&root->fs_info->async_submit_draining);
5632 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5633 const char *symname)
5635 struct btrfs_trans_handle *trans;
5636 struct btrfs_root *root = BTRFS_I(dir)->root;
5637 struct btrfs_path *path;
5638 struct btrfs_key key;
5639 struct inode *inode = NULL;
5647 struct btrfs_file_extent_item *ei;
5648 struct extent_buffer *leaf;
5649 unsigned long nr = 0;
5651 name_len = strlen(symname) + 1;
5652 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5653 return -ENAMETOOLONG;
5656 * 2 items for inode item and ref
5657 * 2 items for dir items
5658 * 1 item for xattr if selinux is on
5660 err = btrfs_reserve_metadata_space(root, 5);
5664 trans = btrfs_start_transaction(root, 1);
5667 btrfs_set_trans_block_group(trans, dir);
5669 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5675 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5677 dentry->d_parent->d_inode->i_ino, objectid,
5678 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5680 err = PTR_ERR(inode);
5684 err = btrfs_init_inode_security(trans, inode, dir);
5690 btrfs_set_trans_block_group(trans, inode);
5691 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5695 inode->i_mapping->a_ops = &btrfs_aops;
5696 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5697 inode->i_fop = &btrfs_file_operations;
5698 inode->i_op = &btrfs_file_inode_operations;
5699 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5701 btrfs_update_inode_block_group(trans, inode);
5702 btrfs_update_inode_block_group(trans, dir);
5706 path = btrfs_alloc_path();
5708 key.objectid = inode->i_ino;
5710 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5711 datasize = btrfs_file_extent_calc_inline_size(name_len);
5712 err = btrfs_insert_empty_item(trans, root, path, &key,
5718 leaf = path->nodes[0];
5719 ei = btrfs_item_ptr(leaf, path->slots[0],
5720 struct btrfs_file_extent_item);
5721 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5722 btrfs_set_file_extent_type(leaf, ei,
5723 BTRFS_FILE_EXTENT_INLINE);
5724 btrfs_set_file_extent_encryption(leaf, ei, 0);
5725 btrfs_set_file_extent_compression(leaf, ei, 0);
5726 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5727 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5729 ptr = btrfs_file_extent_inline_start(ei);
5730 write_extent_buffer(leaf, symname, ptr, name_len);
5731 btrfs_mark_buffer_dirty(leaf);
5732 btrfs_free_path(path);
5734 inode->i_op = &btrfs_symlink_inode_operations;
5735 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5736 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5737 inode_set_bytes(inode, name_len);
5738 btrfs_i_size_write(inode, name_len - 1);
5739 err = btrfs_update_inode(trans, root, inode);
5744 nr = trans->blocks_used;
5745 btrfs_end_transaction_throttle(trans, root);
5747 btrfs_unreserve_metadata_space(root, 5);
5749 inode_dec_link_count(inode);
5752 btrfs_btree_balance_dirty(root, nr);
5756 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5757 u64 alloc_hint, int mode, loff_t actual_len)
5759 struct btrfs_trans_handle *trans;
5760 struct btrfs_root *root = BTRFS_I(inode)->root;
5761 struct btrfs_key ins;
5763 u64 cur_offset = start;
5764 u64 num_bytes = end - start;
5768 while (num_bytes > 0) {
5769 alloc_size = min(num_bytes, root->fs_info->max_extent);
5771 trans = btrfs_start_transaction(root, 1);
5773 ret = btrfs_reserve_extent(trans, root, alloc_size,
5774 root->sectorsize, 0, alloc_hint,
5781 ret = btrfs_reserve_metadata_space(root, 3);
5783 btrfs_free_reserved_extent(root, ins.objectid,
5788 ret = insert_reserved_file_extent(trans, inode,
5789 cur_offset, ins.objectid,
5790 ins.offset, ins.offset,
5791 ins.offset, 0, 0, 0,
5792 BTRFS_FILE_EXTENT_PREALLOC);
5794 btrfs_drop_extent_cache(inode, cur_offset,
5795 cur_offset + ins.offset -1, 0);
5797 num_bytes -= ins.offset;
5798 cur_offset += ins.offset;
5799 alloc_hint = ins.objectid + ins.offset;
5801 inode->i_ctime = CURRENT_TIME;
5802 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5803 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5804 (actual_len > inode->i_size) &&
5805 (cur_offset > inode->i_size)) {
5807 if (cur_offset > actual_len)
5808 i_size = actual_len;
5810 i_size = cur_offset;
5811 i_size_write(inode, i_size);
5812 btrfs_ordered_update_i_size(inode, i_size, NULL);
5815 ret = btrfs_update_inode(trans, root, inode);
5818 btrfs_end_transaction(trans, root);
5819 btrfs_unreserve_metadata_space(root, 3);
5824 btrfs_end_transaction(trans, root);
5829 static long btrfs_fallocate(struct inode *inode, int mode,
5830 loff_t offset, loff_t len)
5838 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5839 struct extent_map *em;
5842 alloc_start = offset & ~mask;
5843 alloc_end = (offset + len + mask) & ~mask;
5846 * wait for ordered IO before we have any locks. We'll loop again
5847 * below with the locks held.
5849 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5851 mutex_lock(&inode->i_mutex);
5852 if (alloc_start > inode->i_size) {
5853 ret = btrfs_cont_expand(inode, alloc_start);
5858 ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
5859 alloc_end - alloc_start);
5863 locked_end = alloc_end - 1;
5865 struct btrfs_ordered_extent *ordered;
5867 /* the extent lock is ordered inside the running
5870 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5872 ordered = btrfs_lookup_first_ordered_extent(inode,
5875 ordered->file_offset + ordered->len > alloc_start &&
5876 ordered->file_offset < alloc_end) {
5877 btrfs_put_ordered_extent(ordered);
5878 unlock_extent(&BTRFS_I(inode)->io_tree,
5879 alloc_start, locked_end, GFP_NOFS);
5881 * we can't wait on the range with the transaction
5882 * running or with the extent lock held
5884 btrfs_wait_ordered_range(inode, alloc_start,
5885 alloc_end - alloc_start);
5888 btrfs_put_ordered_extent(ordered);
5893 cur_offset = alloc_start;
5895 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5896 alloc_end - cur_offset, 0);
5897 BUG_ON(IS_ERR(em) || !em);
5898 last_byte = min(extent_map_end(em), alloc_end);
5899 last_byte = (last_byte + mask) & ~mask;
5900 if (em->block_start == EXTENT_MAP_HOLE ||
5901 (cur_offset >= inode->i_size &&
5902 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5903 ret = prealloc_file_range(inode,
5904 cur_offset, last_byte,
5905 alloc_hint, mode, offset+len);
5907 free_extent_map(em);
5911 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5912 alloc_hint = em->block_start;
5913 free_extent_map(em);
5915 cur_offset = last_byte;
5916 if (cur_offset >= alloc_end) {
5921 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5924 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5925 alloc_end - alloc_start);
5927 mutex_unlock(&inode->i_mutex);
5931 static int btrfs_set_page_dirty(struct page *page)
5933 return __set_page_dirty_nobuffers(page);
5936 static int btrfs_permission(struct inode *inode, int mask)
5938 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5940 return generic_permission(inode, mask, btrfs_check_acl);
5943 static const struct inode_operations btrfs_dir_inode_operations = {
5944 .getattr = btrfs_getattr,
5945 .lookup = btrfs_lookup,
5946 .create = btrfs_create,
5947 .unlink = btrfs_unlink,
5949 .mkdir = btrfs_mkdir,
5950 .rmdir = btrfs_rmdir,
5951 .rename = btrfs_rename,
5952 .symlink = btrfs_symlink,
5953 .setattr = btrfs_setattr,
5954 .mknod = btrfs_mknod,
5955 .setxattr = btrfs_setxattr,
5956 .getxattr = btrfs_getxattr,
5957 .listxattr = btrfs_listxattr,
5958 .removexattr = btrfs_removexattr,
5959 .permission = btrfs_permission,
5961 static const struct inode_operations btrfs_dir_ro_inode_operations = {
5962 .lookup = btrfs_lookup,
5963 .permission = btrfs_permission,
5966 static const struct file_operations btrfs_dir_file_operations = {
5967 .llseek = generic_file_llseek,
5968 .read = generic_read_dir,
5969 .readdir = btrfs_real_readdir,
5970 .unlocked_ioctl = btrfs_ioctl,
5971 #ifdef CONFIG_COMPAT
5972 .compat_ioctl = btrfs_ioctl,
5974 .release = btrfs_release_file,
5975 .fsync = btrfs_sync_file,
5978 static struct extent_io_ops btrfs_extent_io_ops = {
5979 .fill_delalloc = run_delalloc_range,
5980 .submit_bio_hook = btrfs_submit_bio_hook,
5981 .merge_bio_hook = btrfs_merge_bio_hook,
5982 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5983 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5984 .writepage_start_hook = btrfs_writepage_start_hook,
5985 .readpage_io_failed_hook = btrfs_io_failed_hook,
5986 .set_bit_hook = btrfs_set_bit_hook,
5987 .clear_bit_hook = btrfs_clear_bit_hook,
5988 .merge_extent_hook = btrfs_merge_extent_hook,
5989 .split_extent_hook = btrfs_split_extent_hook,
5993 * btrfs doesn't support the bmap operation because swapfiles
5994 * use bmap to make a mapping of extents in the file. They assume
5995 * these extents won't change over the life of the file and they
5996 * use the bmap result to do IO directly to the drive.
5998 * the btrfs bmap call would return logical addresses that aren't
5999 * suitable for IO and they also will change frequently as COW
6000 * operations happen. So, swapfile + btrfs == corruption.
6002 * For now we're avoiding this by dropping bmap.
6004 static const struct address_space_operations btrfs_aops = {
6005 .readpage = btrfs_readpage,
6006 .writepage = btrfs_writepage,
6007 .writepages = btrfs_writepages,
6008 .readpages = btrfs_readpages,
6009 .sync_page = block_sync_page,
6010 .direct_IO = btrfs_direct_IO,
6011 .invalidatepage = btrfs_invalidatepage,
6012 .releasepage = btrfs_releasepage,
6013 .set_page_dirty = btrfs_set_page_dirty,
6014 .error_remove_page = generic_error_remove_page,
6017 static const struct address_space_operations btrfs_symlink_aops = {
6018 .readpage = btrfs_readpage,
6019 .writepage = btrfs_writepage,
6020 .invalidatepage = btrfs_invalidatepage,
6021 .releasepage = btrfs_releasepage,
6024 static const struct inode_operations btrfs_file_inode_operations = {
6025 .truncate = btrfs_truncate,
6026 .getattr = btrfs_getattr,
6027 .setattr = btrfs_setattr,
6028 .setxattr = btrfs_setxattr,
6029 .getxattr = btrfs_getxattr,
6030 .listxattr = btrfs_listxattr,
6031 .removexattr = btrfs_removexattr,
6032 .permission = btrfs_permission,
6033 .fallocate = btrfs_fallocate,
6034 .fiemap = btrfs_fiemap,
6036 static const struct inode_operations btrfs_special_inode_operations = {
6037 .getattr = btrfs_getattr,
6038 .setattr = btrfs_setattr,
6039 .permission = btrfs_permission,
6040 .setxattr = btrfs_setxattr,
6041 .getxattr = btrfs_getxattr,
6042 .listxattr = btrfs_listxattr,
6043 .removexattr = btrfs_removexattr,
6045 static const struct inode_operations btrfs_symlink_inode_operations = {
6046 .readlink = generic_readlink,
6047 .follow_link = page_follow_link_light,
6048 .put_link = page_put_link,
6049 .permission = btrfs_permission,
6050 .setxattr = btrfs_setxattr,
6051 .getxattr = btrfs_getxattr,
6052 .listxattr = btrfs_listxattr,
6053 .removexattr = btrfs_removexattr,
6056 const struct dentry_operations btrfs_dentry_operations = {
6057 .d_delete = btrfs_dentry_delete,