2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
49 * rename foo/some_dir foo2/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_DIR_INDEX 2
94 #define LOG_WALK_REPLAY_ALL 3
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode,
101 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root,
103 struct btrfs_path *path, u64 objectid);
104 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
105 struct btrfs_root *root,
106 struct btrfs_root *log,
107 struct btrfs_path *path,
108 u64 dirid, int del_all);
111 * tree logging is a special write ahead log used to make sure that
112 * fsyncs and O_SYNCs can happen without doing full tree commits.
114 * Full tree commits are expensive because they require commonly
115 * modified blocks to be recowed, creating many dirty pages in the
116 * extent tree an 4x-6x higher write load than ext3.
118 * Instead of doing a tree commit on every fsync, we use the
119 * key ranges and transaction ids to find items for a given file or directory
120 * that have changed in this transaction. Those items are copied into
121 * a special tree (one per subvolume root), that tree is written to disk
122 * and then the fsync is considered complete.
124 * After a crash, items are copied out of the log-tree back into the
125 * subvolume tree. Any file data extents found are recorded in the extent
126 * allocation tree, and the log-tree freed.
128 * The log tree is read three times, once to pin down all the extents it is
129 * using in ram and once, once to create all the inodes logged in the tree
130 * and once to do all the other items.
134 * start a sub transaction and setup the log tree
135 * this increments the log tree writer count to make the people
136 * syncing the tree wait for us to finish
138 static int start_log_trans(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root,
140 struct btrfs_log_ctx *ctx)
145 mutex_lock(&root->log_mutex);
146 if (root->log_root) {
147 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
151 if (!root->log_start_pid) {
152 root->log_start_pid = current->pid;
153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154 } else if (root->log_start_pid != current->pid) {
155 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
158 atomic_inc(&root->log_batch);
159 atomic_inc(&root->log_writers);
161 index = root->log_transid % 2;
162 list_add_tail(&ctx->list, &root->log_ctxs[index]);
163 ctx->log_transid = root->log_transid;
165 mutex_unlock(&root->log_mutex);
170 mutex_lock(&root->fs_info->tree_log_mutex);
171 if (!root->fs_info->log_root_tree)
172 ret = btrfs_init_log_root_tree(trans, root->fs_info);
173 mutex_unlock(&root->fs_info->tree_log_mutex);
177 if (!root->log_root) {
178 ret = btrfs_add_log_tree(trans, root);
182 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
183 root->log_start_pid = current->pid;
184 atomic_inc(&root->log_batch);
185 atomic_inc(&root->log_writers);
187 index = root->log_transid % 2;
188 list_add_tail(&ctx->list, &root->log_ctxs[index]);
189 ctx->log_transid = root->log_transid;
192 mutex_unlock(&root->log_mutex);
197 * returns 0 if there was a log transaction running and we were able
198 * to join, or returns -ENOENT if there were not transactions
201 static int join_running_log_trans(struct btrfs_root *root)
209 mutex_lock(&root->log_mutex);
210 if (root->log_root) {
212 atomic_inc(&root->log_writers);
214 mutex_unlock(&root->log_mutex);
219 * This either makes the current running log transaction wait
220 * until you call btrfs_end_log_trans() or it makes any future
221 * log transactions wait until you call btrfs_end_log_trans()
223 int btrfs_pin_log_trans(struct btrfs_root *root)
227 mutex_lock(&root->log_mutex);
228 atomic_inc(&root->log_writers);
229 mutex_unlock(&root->log_mutex);
234 * indicate we're done making changes to the log tree
235 * and wake up anyone waiting to do a sync
237 void btrfs_end_log_trans(struct btrfs_root *root)
239 if (atomic_dec_and_test(&root->log_writers)) {
241 if (waitqueue_active(&root->log_writer_wait))
242 wake_up(&root->log_writer_wait);
248 * the walk control struct is used to pass state down the chain when
249 * processing the log tree. The stage field tells us which part
250 * of the log tree processing we are currently doing. The others
251 * are state fields used for that specific part
253 struct walk_control {
254 /* should we free the extent on disk when done? This is used
255 * at transaction commit time while freeing a log tree
259 /* should we write out the extent buffer? This is used
260 * while flushing the log tree to disk during a sync
264 /* should we wait for the extent buffer io to finish? Also used
265 * while flushing the log tree to disk for a sync
269 /* pin only walk, we record which extents on disk belong to the
274 /* what stage of the replay code we're currently in */
277 /* the root we are currently replaying */
278 struct btrfs_root *replay_dest;
280 /* the trans handle for the current replay */
281 struct btrfs_trans_handle *trans;
283 /* the function that gets used to process blocks we find in the
284 * tree. Note the extent_buffer might not be up to date when it is
285 * passed in, and it must be checked or read if you need the data
288 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
289 struct walk_control *wc, u64 gen);
293 * process_func used to pin down extents, write them or wait on them
295 static int process_one_buffer(struct btrfs_root *log,
296 struct extent_buffer *eb,
297 struct walk_control *wc, u64 gen)
302 * If this fs is mixed then we need to be able to process the leaves to
303 * pin down any logged extents, so we have to read the block.
305 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
306 ret = btrfs_read_buffer(eb, gen);
312 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
315 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
316 if (wc->pin && btrfs_header_level(eb) == 0)
317 ret = btrfs_exclude_logged_extents(log, eb);
319 btrfs_write_tree_block(eb);
321 btrfs_wait_tree_block_writeback(eb);
327 * Item overwrite used by replay and tree logging. eb, slot and key all refer
328 * to the src data we are copying out.
330 * root is the tree we are copying into, and path is a scratch
331 * path for use in this function (it should be released on entry and
332 * will be released on exit).
334 * If the key is already in the destination tree the existing item is
335 * overwritten. If the existing item isn't big enough, it is extended.
336 * If it is too large, it is truncated.
338 * If the key isn't in the destination yet, a new item is inserted.
340 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
341 struct btrfs_root *root,
342 struct btrfs_path *path,
343 struct extent_buffer *eb, int slot,
344 struct btrfs_key *key)
348 u64 saved_i_size = 0;
349 int save_old_i_size = 0;
350 unsigned long src_ptr;
351 unsigned long dst_ptr;
352 int overwrite_root = 0;
353 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
355 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
358 item_size = btrfs_item_size_nr(eb, slot);
359 src_ptr = btrfs_item_ptr_offset(eb, slot);
361 /* look for the key in the destination tree */
362 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
369 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
371 if (dst_size != item_size)
374 if (item_size == 0) {
375 btrfs_release_path(path);
378 dst_copy = kmalloc(item_size, GFP_NOFS);
379 src_copy = kmalloc(item_size, GFP_NOFS);
380 if (!dst_copy || !src_copy) {
381 btrfs_release_path(path);
387 read_extent_buffer(eb, src_copy, src_ptr, item_size);
389 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
390 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
392 ret = memcmp(dst_copy, src_copy, item_size);
397 * they have the same contents, just return, this saves
398 * us from cowing blocks in the destination tree and doing
399 * extra writes that may not have been done by a previous
403 btrfs_release_path(path);
408 * We need to load the old nbytes into the inode so when we
409 * replay the extents we've logged we get the right nbytes.
412 struct btrfs_inode_item *item;
416 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
417 struct btrfs_inode_item);
418 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
419 item = btrfs_item_ptr(eb, slot,
420 struct btrfs_inode_item);
421 btrfs_set_inode_nbytes(eb, item, nbytes);
424 * If this is a directory we need to reset the i_size to
425 * 0 so that we can set it up properly when replaying
426 * the rest of the items in this log.
428 mode = btrfs_inode_mode(eb, item);
430 btrfs_set_inode_size(eb, item, 0);
432 } else if (inode_item) {
433 struct btrfs_inode_item *item;
437 * New inode, set nbytes to 0 so that the nbytes comes out
438 * properly when we replay the extents.
440 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
441 btrfs_set_inode_nbytes(eb, item, 0);
444 * If this is a directory we need to reset the i_size to 0 so
445 * that we can set it up properly when replaying the rest of
446 * the items in this log.
448 mode = btrfs_inode_mode(eb, item);
450 btrfs_set_inode_size(eb, item, 0);
453 btrfs_release_path(path);
454 /* try to insert the key into the destination tree */
455 ret = btrfs_insert_empty_item(trans, root, path,
458 /* make sure any existing item is the correct size */
459 if (ret == -EEXIST) {
461 found_size = btrfs_item_size_nr(path->nodes[0],
463 if (found_size > item_size)
464 btrfs_truncate_item(root, path, item_size, 1);
465 else if (found_size < item_size)
466 btrfs_extend_item(root, path,
467 item_size - found_size);
471 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
474 /* don't overwrite an existing inode if the generation number
475 * was logged as zero. This is done when the tree logging code
476 * is just logging an inode to make sure it exists after recovery.
478 * Also, don't overwrite i_size on directories during replay.
479 * log replay inserts and removes directory items based on the
480 * state of the tree found in the subvolume, and i_size is modified
483 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
484 struct btrfs_inode_item *src_item;
485 struct btrfs_inode_item *dst_item;
487 src_item = (struct btrfs_inode_item *)src_ptr;
488 dst_item = (struct btrfs_inode_item *)dst_ptr;
490 if (btrfs_inode_generation(eb, src_item) == 0)
493 if (overwrite_root &&
494 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
495 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
497 saved_i_size = btrfs_inode_size(path->nodes[0],
502 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
505 if (save_old_i_size) {
506 struct btrfs_inode_item *dst_item;
507 dst_item = (struct btrfs_inode_item *)dst_ptr;
508 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
511 /* make sure the generation is filled in */
512 if (key->type == BTRFS_INODE_ITEM_KEY) {
513 struct btrfs_inode_item *dst_item;
514 dst_item = (struct btrfs_inode_item *)dst_ptr;
515 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
516 btrfs_set_inode_generation(path->nodes[0], dst_item,
521 btrfs_mark_buffer_dirty(path->nodes[0]);
522 btrfs_release_path(path);
527 * simple helper to read an inode off the disk from a given root
528 * This can only be called for subvolume roots and not for the log
530 static noinline struct inode *read_one_inode(struct btrfs_root *root,
533 struct btrfs_key key;
536 key.objectid = objectid;
537 key.type = BTRFS_INODE_ITEM_KEY;
539 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
542 } else if (is_bad_inode(inode)) {
549 /* replays a single extent in 'eb' at 'slot' with 'key' into the
550 * subvolume 'root'. path is released on entry and should be released
553 * extents in the log tree have not been allocated out of the extent
554 * tree yet. So, this completes the allocation, taking a reference
555 * as required if the extent already exists or creating a new extent
556 * if it isn't in the extent allocation tree yet.
558 * The extent is inserted into the file, dropping any existing extents
559 * from the file that overlap the new one.
561 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
562 struct btrfs_root *root,
563 struct btrfs_path *path,
564 struct extent_buffer *eb, int slot,
565 struct btrfs_key *key)
569 u64 start = key->offset;
571 struct btrfs_file_extent_item *item;
572 struct inode *inode = NULL;
576 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
577 found_type = btrfs_file_extent_type(eb, item);
579 if (found_type == BTRFS_FILE_EXTENT_REG ||
580 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
581 nbytes = btrfs_file_extent_num_bytes(eb, item);
582 extent_end = start + nbytes;
585 * We don't add to the inodes nbytes if we are prealloc or a
588 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
590 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
591 size = btrfs_file_extent_inline_len(eb, slot, item);
592 nbytes = btrfs_file_extent_ram_bytes(eb, item);
593 extent_end = ALIGN(start + size, root->sectorsize);
599 inode = read_one_inode(root, key->objectid);
606 * first check to see if we already have this extent in the
607 * file. This must be done before the btrfs_drop_extents run
608 * so we don't try to drop this extent.
610 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
614 (found_type == BTRFS_FILE_EXTENT_REG ||
615 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
616 struct btrfs_file_extent_item cmp1;
617 struct btrfs_file_extent_item cmp2;
618 struct btrfs_file_extent_item *existing;
619 struct extent_buffer *leaf;
621 leaf = path->nodes[0];
622 existing = btrfs_item_ptr(leaf, path->slots[0],
623 struct btrfs_file_extent_item);
625 read_extent_buffer(eb, &cmp1, (unsigned long)item,
627 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
631 * we already have a pointer to this exact extent,
632 * we don't have to do anything
634 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
635 btrfs_release_path(path);
639 btrfs_release_path(path);
641 /* drop any overlapping extents */
642 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
646 if (found_type == BTRFS_FILE_EXTENT_REG ||
647 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
649 unsigned long dest_offset;
650 struct btrfs_key ins;
652 ret = btrfs_insert_empty_item(trans, root, path, key,
656 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
658 copy_extent_buffer(path->nodes[0], eb, dest_offset,
659 (unsigned long)item, sizeof(*item));
661 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
662 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
663 ins.type = BTRFS_EXTENT_ITEM_KEY;
664 offset = key->offset - btrfs_file_extent_offset(eb, item);
666 if (ins.objectid > 0) {
669 LIST_HEAD(ordered_sums);
671 * is this extent already allocated in the extent
672 * allocation tree? If so, just add a reference
674 ret = btrfs_lookup_extent(root, ins.objectid,
677 ret = btrfs_inc_extent_ref(trans, root,
678 ins.objectid, ins.offset,
679 0, root->root_key.objectid,
680 key->objectid, offset, 0);
685 * insert the extent pointer in the extent
688 ret = btrfs_alloc_logged_file_extent(trans,
689 root, root->root_key.objectid,
690 key->objectid, offset, &ins);
694 btrfs_release_path(path);
696 if (btrfs_file_extent_compression(eb, item)) {
697 csum_start = ins.objectid;
698 csum_end = csum_start + ins.offset;
700 csum_start = ins.objectid +
701 btrfs_file_extent_offset(eb, item);
702 csum_end = csum_start +
703 btrfs_file_extent_num_bytes(eb, item);
706 ret = btrfs_lookup_csums_range(root->log_root,
707 csum_start, csum_end - 1,
711 while (!list_empty(&ordered_sums)) {
712 struct btrfs_ordered_sum *sums;
713 sums = list_entry(ordered_sums.next,
714 struct btrfs_ordered_sum,
717 ret = btrfs_csum_file_blocks(trans,
718 root->fs_info->csum_root,
720 list_del(&sums->list);
726 btrfs_release_path(path);
728 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
729 /* inline extents are easy, we just overwrite them */
730 ret = overwrite_item(trans, root, path, eb, slot, key);
735 inode_add_bytes(inode, nbytes);
736 ret = btrfs_update_inode(trans, root, inode);
744 * when cleaning up conflicts between the directory names in the
745 * subvolume, directory names in the log and directory names in the
746 * inode back references, we may have to unlink inodes from directories.
748 * This is a helper function to do the unlink of a specific directory
751 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
752 struct btrfs_root *root,
753 struct btrfs_path *path,
755 struct btrfs_dir_item *di)
760 struct extent_buffer *leaf;
761 struct btrfs_key location;
764 leaf = path->nodes[0];
766 btrfs_dir_item_key_to_cpu(leaf, di, &location);
767 name_len = btrfs_dir_name_len(leaf, di);
768 name = kmalloc(name_len, GFP_NOFS);
772 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
773 btrfs_release_path(path);
775 inode = read_one_inode(root, location.objectid);
781 ret = link_to_fixup_dir(trans, root, path, location.objectid);
785 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
789 ret = btrfs_run_delayed_items(trans, root);
797 * helper function to see if a given name and sequence number found
798 * in an inode back reference are already in a directory and correctly
799 * point to this inode
801 static noinline int inode_in_dir(struct btrfs_root *root,
802 struct btrfs_path *path,
803 u64 dirid, u64 objectid, u64 index,
804 const char *name, int name_len)
806 struct btrfs_dir_item *di;
807 struct btrfs_key location;
810 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
811 index, name, name_len, 0);
812 if (di && !IS_ERR(di)) {
813 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
814 if (location.objectid != objectid)
818 btrfs_release_path(path);
820 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
821 if (di && !IS_ERR(di)) {
822 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
823 if (location.objectid != objectid)
829 btrfs_release_path(path);
834 * helper function to check a log tree for a named back reference in
835 * an inode. This is used to decide if a back reference that is
836 * found in the subvolume conflicts with what we find in the log.
838 * inode backreferences may have multiple refs in a single item,
839 * during replay we process one reference at a time, and we don't
840 * want to delete valid links to a file from the subvolume if that
841 * link is also in the log.
843 static noinline int backref_in_log(struct btrfs_root *log,
844 struct btrfs_key *key,
846 char *name, int namelen)
848 struct btrfs_path *path;
849 struct btrfs_inode_ref *ref;
851 unsigned long ptr_end;
852 unsigned long name_ptr;
858 path = btrfs_alloc_path();
862 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
866 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
868 if (key->type == BTRFS_INODE_EXTREF_KEY) {
869 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
870 name, namelen, NULL))
876 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
877 ptr_end = ptr + item_size;
878 while (ptr < ptr_end) {
879 ref = (struct btrfs_inode_ref *)ptr;
880 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
881 if (found_name_len == namelen) {
882 name_ptr = (unsigned long)(ref + 1);
883 ret = memcmp_extent_buffer(path->nodes[0], name,
890 ptr = (unsigned long)(ref + 1) + found_name_len;
893 btrfs_free_path(path);
897 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
898 struct btrfs_root *root,
899 struct btrfs_path *path,
900 struct btrfs_root *log_root,
901 struct inode *dir, struct inode *inode,
902 struct extent_buffer *eb,
903 u64 inode_objectid, u64 parent_objectid,
904 u64 ref_index, char *name, int namelen,
910 struct extent_buffer *leaf;
911 struct btrfs_dir_item *di;
912 struct btrfs_key search_key;
913 struct btrfs_inode_extref *extref;
916 /* Search old style refs */
917 search_key.objectid = inode_objectid;
918 search_key.type = BTRFS_INODE_REF_KEY;
919 search_key.offset = parent_objectid;
920 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
922 struct btrfs_inode_ref *victim_ref;
924 unsigned long ptr_end;
926 leaf = path->nodes[0];
928 /* are we trying to overwrite a back ref for the root directory
929 * if so, just jump out, we're done
931 if (search_key.objectid == search_key.offset)
934 /* check all the names in this back reference to see
935 * if they are in the log. if so, we allow them to stay
936 * otherwise they must be unlinked as a conflict
938 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
939 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
940 while (ptr < ptr_end) {
941 victim_ref = (struct btrfs_inode_ref *)ptr;
942 victim_name_len = btrfs_inode_ref_name_len(leaf,
944 victim_name = kmalloc(victim_name_len, GFP_NOFS);
948 read_extent_buffer(leaf, victim_name,
949 (unsigned long)(victim_ref + 1),
952 if (!backref_in_log(log_root, &search_key,
957 btrfs_release_path(path);
959 ret = btrfs_unlink_inode(trans, root, dir,
965 ret = btrfs_run_delayed_items(trans, root);
973 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
977 * NOTE: we have searched root tree and checked the
978 * coresponding ref, it does not need to check again.
982 btrfs_release_path(path);
984 /* Same search but for extended refs */
985 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
986 inode_objectid, parent_objectid, 0,
988 if (!IS_ERR_OR_NULL(extref)) {
992 struct inode *victim_parent;
994 leaf = path->nodes[0];
996 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
997 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
999 while (cur_offset < item_size) {
1000 extref = (struct btrfs_inode_extref *)base + cur_offset;
1002 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1004 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1007 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1010 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1013 search_key.objectid = inode_objectid;
1014 search_key.type = BTRFS_INODE_EXTREF_KEY;
1015 search_key.offset = btrfs_extref_hash(parent_objectid,
1019 if (!backref_in_log(log_root, &search_key,
1020 parent_objectid, victim_name,
1023 victim_parent = read_one_inode(root,
1025 if (victim_parent) {
1027 btrfs_release_path(path);
1029 ret = btrfs_unlink_inode(trans, root,
1035 ret = btrfs_run_delayed_items(
1038 iput(victim_parent);
1049 cur_offset += victim_name_len + sizeof(*extref);
1053 btrfs_release_path(path);
1055 /* look for a conflicting sequence number */
1056 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1057 ref_index, name, namelen, 0);
1058 if (di && !IS_ERR(di)) {
1059 ret = drop_one_dir_item(trans, root, path, dir, di);
1063 btrfs_release_path(path);
1065 /* look for a conflicing name */
1066 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1068 if (di && !IS_ERR(di)) {
1069 ret = drop_one_dir_item(trans, root, path, dir, di);
1073 btrfs_release_path(path);
1078 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1079 u32 *namelen, char **name, u64 *index,
1080 u64 *parent_objectid)
1082 struct btrfs_inode_extref *extref;
1084 extref = (struct btrfs_inode_extref *)ref_ptr;
1086 *namelen = btrfs_inode_extref_name_len(eb, extref);
1087 *name = kmalloc(*namelen, GFP_NOFS);
1091 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1094 *index = btrfs_inode_extref_index(eb, extref);
1095 if (parent_objectid)
1096 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1101 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1102 u32 *namelen, char **name, u64 *index)
1104 struct btrfs_inode_ref *ref;
1106 ref = (struct btrfs_inode_ref *)ref_ptr;
1108 *namelen = btrfs_inode_ref_name_len(eb, ref);
1109 *name = kmalloc(*namelen, GFP_NOFS);
1113 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1115 *index = btrfs_inode_ref_index(eb, ref);
1121 * replay one inode back reference item found in the log tree.
1122 * eb, slot and key refer to the buffer and key found in the log tree.
1123 * root is the destination we are replaying into, and path is for temp
1124 * use by this function. (it should be released on return).
1126 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1127 struct btrfs_root *root,
1128 struct btrfs_root *log,
1129 struct btrfs_path *path,
1130 struct extent_buffer *eb, int slot,
1131 struct btrfs_key *key)
1133 struct inode *dir = NULL;
1134 struct inode *inode = NULL;
1135 unsigned long ref_ptr;
1136 unsigned long ref_end;
1140 int search_done = 0;
1141 int log_ref_ver = 0;
1142 u64 parent_objectid;
1145 int ref_struct_size;
1147 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1148 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1150 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1151 struct btrfs_inode_extref *r;
1153 ref_struct_size = sizeof(struct btrfs_inode_extref);
1155 r = (struct btrfs_inode_extref *)ref_ptr;
1156 parent_objectid = btrfs_inode_extref_parent(eb, r);
1158 ref_struct_size = sizeof(struct btrfs_inode_ref);
1159 parent_objectid = key->offset;
1161 inode_objectid = key->objectid;
1164 * it is possible that we didn't log all the parent directories
1165 * for a given inode. If we don't find the dir, just don't
1166 * copy the back ref in. The link count fixup code will take
1169 dir = read_one_inode(root, parent_objectid);
1175 inode = read_one_inode(root, inode_objectid);
1181 while (ref_ptr < ref_end) {
1183 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1184 &ref_index, &parent_objectid);
1186 * parent object can change from one array
1190 dir = read_one_inode(root, parent_objectid);
1196 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1202 /* if we already have a perfect match, we're done */
1203 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1204 ref_index, name, namelen)) {
1206 * look for a conflicting back reference in the
1207 * metadata. if we find one we have to unlink that name
1208 * of the file before we add our new link. Later on, we
1209 * overwrite any existing back reference, and we don't
1210 * want to create dangling pointers in the directory.
1214 ret = __add_inode_ref(trans, root, path, log,
1218 ref_index, name, namelen,
1227 /* insert our name */
1228 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1233 btrfs_update_inode(trans, root, inode);
1236 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1245 /* finally write the back reference in the inode */
1246 ret = overwrite_item(trans, root, path, eb, slot, key);
1248 btrfs_release_path(path);
1255 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1256 struct btrfs_root *root, u64 offset)
1259 ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
1260 offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
1262 ret = btrfs_insert_orphan_item(trans, root, offset);
1266 static int count_inode_extrefs(struct btrfs_root *root,
1267 struct inode *inode, struct btrfs_path *path)
1271 unsigned int nlink = 0;
1274 u64 inode_objectid = btrfs_ino(inode);
1277 struct btrfs_inode_extref *extref;
1278 struct extent_buffer *leaf;
1281 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1286 leaf = path->nodes[0];
1287 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1288 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1290 while (cur_offset < item_size) {
1291 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1292 name_len = btrfs_inode_extref_name_len(leaf, extref);
1296 cur_offset += name_len + sizeof(*extref);
1300 btrfs_release_path(path);
1302 btrfs_release_path(path);
1309 static int count_inode_refs(struct btrfs_root *root,
1310 struct inode *inode, struct btrfs_path *path)
1313 struct btrfs_key key;
1314 unsigned int nlink = 0;
1316 unsigned long ptr_end;
1318 u64 ino = btrfs_ino(inode);
1321 key.type = BTRFS_INODE_REF_KEY;
1322 key.offset = (u64)-1;
1325 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1329 if (path->slots[0] == 0)
1334 btrfs_item_key_to_cpu(path->nodes[0], &key,
1336 if (key.objectid != ino ||
1337 key.type != BTRFS_INODE_REF_KEY)
1339 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1340 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1342 while (ptr < ptr_end) {
1343 struct btrfs_inode_ref *ref;
1345 ref = (struct btrfs_inode_ref *)ptr;
1346 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1348 ptr = (unsigned long)(ref + 1) + name_len;
1352 if (key.offset == 0)
1354 if (path->slots[0] > 0) {
1359 btrfs_release_path(path);
1361 btrfs_release_path(path);
1367 * There are a few corners where the link count of the file can't
1368 * be properly maintained during replay. So, instead of adding
1369 * lots of complexity to the log code, we just scan the backrefs
1370 * for any file that has been through replay.
1372 * The scan will update the link count on the inode to reflect the
1373 * number of back refs found. If it goes down to zero, the iput
1374 * will free the inode.
1376 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1377 struct btrfs_root *root,
1378 struct inode *inode)
1380 struct btrfs_path *path;
1383 u64 ino = btrfs_ino(inode);
1385 path = btrfs_alloc_path();
1389 ret = count_inode_refs(root, inode, path);
1395 ret = count_inode_extrefs(root, inode, path);
1406 if (nlink != inode->i_nlink) {
1407 set_nlink(inode, nlink);
1408 btrfs_update_inode(trans, root, inode);
1410 BTRFS_I(inode)->index_cnt = (u64)-1;
1412 if (inode->i_nlink == 0) {
1413 if (S_ISDIR(inode->i_mode)) {
1414 ret = replay_dir_deletes(trans, root, NULL, path,
1419 ret = insert_orphan_item(trans, root, ino);
1423 btrfs_free_path(path);
1427 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1428 struct btrfs_root *root,
1429 struct btrfs_path *path)
1432 struct btrfs_key key;
1433 struct inode *inode;
1435 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1436 key.type = BTRFS_ORPHAN_ITEM_KEY;
1437 key.offset = (u64)-1;
1439 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1444 if (path->slots[0] == 0)
1449 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1450 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1451 key.type != BTRFS_ORPHAN_ITEM_KEY)
1454 ret = btrfs_del_item(trans, root, path);
1458 btrfs_release_path(path);
1459 inode = read_one_inode(root, key.offset);
1463 ret = fixup_inode_link_count(trans, root, inode);
1469 * fixup on a directory may create new entries,
1470 * make sure we always look for the highset possible
1473 key.offset = (u64)-1;
1477 btrfs_release_path(path);
1483 * record a given inode in the fixup dir so we can check its link
1484 * count when replay is done. The link count is incremented here
1485 * so the inode won't go away until we check it
1487 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1488 struct btrfs_root *root,
1489 struct btrfs_path *path,
1492 struct btrfs_key key;
1494 struct inode *inode;
1496 inode = read_one_inode(root, objectid);
1500 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1501 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1502 key.offset = objectid;
1504 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1506 btrfs_release_path(path);
1508 if (!inode->i_nlink)
1509 set_nlink(inode, 1);
1512 ret = btrfs_update_inode(trans, root, inode);
1513 } else if (ret == -EEXIST) {
1516 BUG(); /* Logic Error */
1524 * when replaying the log for a directory, we only insert names
1525 * for inodes that actually exist. This means an fsync on a directory
1526 * does not implicitly fsync all the new files in it
1528 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1529 struct btrfs_root *root,
1530 struct btrfs_path *path,
1531 u64 dirid, u64 index,
1532 char *name, int name_len, u8 type,
1533 struct btrfs_key *location)
1535 struct inode *inode;
1539 inode = read_one_inode(root, location->objectid);
1543 dir = read_one_inode(root, dirid);
1549 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1551 /* FIXME, put inode into FIXUP list */
1559 * take a single entry in a log directory item and replay it into
1562 * if a conflicting item exists in the subdirectory already,
1563 * the inode it points to is unlinked and put into the link count
1566 * If a name from the log points to a file or directory that does
1567 * not exist in the FS, it is skipped. fsyncs on directories
1568 * do not force down inodes inside that directory, just changes to the
1569 * names or unlinks in a directory.
1571 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1572 struct btrfs_root *root,
1573 struct btrfs_path *path,
1574 struct extent_buffer *eb,
1575 struct btrfs_dir_item *di,
1576 struct btrfs_key *key)
1580 struct btrfs_dir_item *dst_di;
1581 struct btrfs_key found_key;
1582 struct btrfs_key log_key;
1587 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1589 dir = read_one_inode(root, key->objectid);
1593 name_len = btrfs_dir_name_len(eb, di);
1594 name = kmalloc(name_len, GFP_NOFS);
1600 log_type = btrfs_dir_type(eb, di);
1601 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1604 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1605 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1610 btrfs_release_path(path);
1612 if (key->type == BTRFS_DIR_ITEM_KEY) {
1613 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1615 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1616 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1625 if (IS_ERR_OR_NULL(dst_di)) {
1626 /* we need a sequence number to insert, so we only
1627 * do inserts for the BTRFS_DIR_INDEX_KEY types
1629 if (key->type != BTRFS_DIR_INDEX_KEY)
1634 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1635 /* the existing item matches the logged item */
1636 if (found_key.objectid == log_key.objectid &&
1637 found_key.type == log_key.type &&
1638 found_key.offset == log_key.offset &&
1639 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1644 * don't drop the conflicting directory entry if the inode
1645 * for the new entry doesn't exist
1650 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1654 if (key->type == BTRFS_DIR_INDEX_KEY)
1657 btrfs_release_path(path);
1658 if (!ret && update_size) {
1659 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1660 ret = btrfs_update_inode(trans, root, dir);
1667 btrfs_release_path(path);
1668 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1669 name, name_len, log_type, &log_key);
1670 if (ret && ret != -ENOENT)
1672 update_size = false;
1678 * find all the names in a directory item and reconcile them into
1679 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1680 * one name in a directory item, but the same code gets used for
1681 * both directory index types
1683 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1684 struct btrfs_root *root,
1685 struct btrfs_path *path,
1686 struct extent_buffer *eb, int slot,
1687 struct btrfs_key *key)
1690 u32 item_size = btrfs_item_size_nr(eb, slot);
1691 struct btrfs_dir_item *di;
1694 unsigned long ptr_end;
1696 ptr = btrfs_item_ptr_offset(eb, slot);
1697 ptr_end = ptr + item_size;
1698 while (ptr < ptr_end) {
1699 di = (struct btrfs_dir_item *)ptr;
1700 if (verify_dir_item(root, eb, di))
1702 name_len = btrfs_dir_name_len(eb, di);
1703 ret = replay_one_name(trans, root, path, eb, di, key);
1706 ptr = (unsigned long)(di + 1);
1713 * directory replay has two parts. There are the standard directory
1714 * items in the log copied from the subvolume, and range items
1715 * created in the log while the subvolume was logged.
1717 * The range items tell us which parts of the key space the log
1718 * is authoritative for. During replay, if a key in the subvolume
1719 * directory is in a logged range item, but not actually in the log
1720 * that means it was deleted from the directory before the fsync
1721 * and should be removed.
1723 static noinline int find_dir_range(struct btrfs_root *root,
1724 struct btrfs_path *path,
1725 u64 dirid, int key_type,
1726 u64 *start_ret, u64 *end_ret)
1728 struct btrfs_key key;
1730 struct btrfs_dir_log_item *item;
1734 if (*start_ret == (u64)-1)
1737 key.objectid = dirid;
1738 key.type = key_type;
1739 key.offset = *start_ret;
1741 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1745 if (path->slots[0] == 0)
1750 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1752 if (key.type != key_type || key.objectid != dirid) {
1756 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1757 struct btrfs_dir_log_item);
1758 found_end = btrfs_dir_log_end(path->nodes[0], item);
1760 if (*start_ret >= key.offset && *start_ret <= found_end) {
1762 *start_ret = key.offset;
1763 *end_ret = found_end;
1768 /* check the next slot in the tree to see if it is a valid item */
1769 nritems = btrfs_header_nritems(path->nodes[0]);
1770 if (path->slots[0] >= nritems) {
1771 ret = btrfs_next_leaf(root, path);
1778 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1780 if (key.type != key_type || key.objectid != dirid) {
1784 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1785 struct btrfs_dir_log_item);
1786 found_end = btrfs_dir_log_end(path->nodes[0], item);
1787 *start_ret = key.offset;
1788 *end_ret = found_end;
1791 btrfs_release_path(path);
1796 * this looks for a given directory item in the log. If the directory
1797 * item is not in the log, the item is removed and the inode it points
1800 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 struct btrfs_root *log,
1803 struct btrfs_path *path,
1804 struct btrfs_path *log_path,
1806 struct btrfs_key *dir_key)
1809 struct extent_buffer *eb;
1812 struct btrfs_dir_item *di;
1813 struct btrfs_dir_item *log_di;
1816 unsigned long ptr_end;
1818 struct inode *inode;
1819 struct btrfs_key location;
1822 eb = path->nodes[0];
1823 slot = path->slots[0];
1824 item_size = btrfs_item_size_nr(eb, slot);
1825 ptr = btrfs_item_ptr_offset(eb, slot);
1826 ptr_end = ptr + item_size;
1827 while (ptr < ptr_end) {
1828 di = (struct btrfs_dir_item *)ptr;
1829 if (verify_dir_item(root, eb, di)) {
1834 name_len = btrfs_dir_name_len(eb, di);
1835 name = kmalloc(name_len, GFP_NOFS);
1840 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1843 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1844 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1847 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1848 log_di = btrfs_lookup_dir_index_item(trans, log,
1854 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
1855 btrfs_dir_item_key_to_cpu(eb, di, &location);
1856 btrfs_release_path(path);
1857 btrfs_release_path(log_path);
1858 inode = read_one_inode(root, location.objectid);
1864 ret = link_to_fixup_dir(trans, root,
1865 path, location.objectid);
1873 ret = btrfs_unlink_inode(trans, root, dir, inode,
1876 ret = btrfs_run_delayed_items(trans, root);
1882 /* there might still be more names under this key
1883 * check and repeat if required
1885 ret = btrfs_search_slot(NULL, root, dir_key, path,
1891 } else if (IS_ERR(log_di)) {
1893 return PTR_ERR(log_di);
1895 btrfs_release_path(log_path);
1898 ptr = (unsigned long)(di + 1);
1903 btrfs_release_path(path);
1904 btrfs_release_path(log_path);
1909 * deletion replay happens before we copy any new directory items
1910 * out of the log or out of backreferences from inodes. It
1911 * scans the log to find ranges of keys that log is authoritative for,
1912 * and then scans the directory to find items in those ranges that are
1913 * not present in the log.
1915 * Anything we don't find in the log is unlinked and removed from the
1918 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1919 struct btrfs_root *root,
1920 struct btrfs_root *log,
1921 struct btrfs_path *path,
1922 u64 dirid, int del_all)
1926 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1928 struct btrfs_key dir_key;
1929 struct btrfs_key found_key;
1930 struct btrfs_path *log_path;
1933 dir_key.objectid = dirid;
1934 dir_key.type = BTRFS_DIR_ITEM_KEY;
1935 log_path = btrfs_alloc_path();
1939 dir = read_one_inode(root, dirid);
1940 /* it isn't an error if the inode isn't there, that can happen
1941 * because we replay the deletes before we copy in the inode item
1945 btrfs_free_path(log_path);
1953 range_end = (u64)-1;
1955 ret = find_dir_range(log, path, dirid, key_type,
1956 &range_start, &range_end);
1961 dir_key.offset = range_start;
1964 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1969 nritems = btrfs_header_nritems(path->nodes[0]);
1970 if (path->slots[0] >= nritems) {
1971 ret = btrfs_next_leaf(root, path);
1975 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1977 if (found_key.objectid != dirid ||
1978 found_key.type != dir_key.type)
1981 if (found_key.offset > range_end)
1984 ret = check_item_in_log(trans, root, log, path,
1989 if (found_key.offset == (u64)-1)
1991 dir_key.offset = found_key.offset + 1;
1993 btrfs_release_path(path);
1994 if (range_end == (u64)-1)
1996 range_start = range_end + 1;
2001 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2002 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2003 dir_key.type = BTRFS_DIR_INDEX_KEY;
2004 btrfs_release_path(path);
2008 btrfs_release_path(path);
2009 btrfs_free_path(log_path);
2015 * the process_func used to replay items from the log tree. This
2016 * gets called in two different stages. The first stage just looks
2017 * for inodes and makes sure they are all copied into the subvolume.
2019 * The second stage copies all the other item types from the log into
2020 * the subvolume. The two stage approach is slower, but gets rid of
2021 * lots of complexity around inodes referencing other inodes that exist
2022 * only in the log (references come from either directory items or inode
2025 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2026 struct walk_control *wc, u64 gen)
2029 struct btrfs_path *path;
2030 struct btrfs_root *root = wc->replay_dest;
2031 struct btrfs_key key;
2036 ret = btrfs_read_buffer(eb, gen);
2040 level = btrfs_header_level(eb);
2045 path = btrfs_alloc_path();
2049 nritems = btrfs_header_nritems(eb);
2050 for (i = 0; i < nritems; i++) {
2051 btrfs_item_key_to_cpu(eb, &key, i);
2053 /* inode keys are done during the first stage */
2054 if (key.type == BTRFS_INODE_ITEM_KEY &&
2055 wc->stage == LOG_WALK_REPLAY_INODES) {
2056 struct btrfs_inode_item *inode_item;
2059 inode_item = btrfs_item_ptr(eb, i,
2060 struct btrfs_inode_item);
2061 mode = btrfs_inode_mode(eb, inode_item);
2062 if (S_ISDIR(mode)) {
2063 ret = replay_dir_deletes(wc->trans,
2064 root, log, path, key.objectid, 0);
2068 ret = overwrite_item(wc->trans, root, path,
2073 /* for regular files, make sure corresponding
2074 * orhpan item exist. extents past the new EOF
2075 * will be truncated later by orphan cleanup.
2077 if (S_ISREG(mode)) {
2078 ret = insert_orphan_item(wc->trans, root,
2084 ret = link_to_fixup_dir(wc->trans, root,
2085 path, key.objectid);
2090 if (key.type == BTRFS_DIR_INDEX_KEY &&
2091 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2092 ret = replay_one_dir_item(wc->trans, root, path,
2098 if (wc->stage < LOG_WALK_REPLAY_ALL)
2101 /* these keys are simply copied */
2102 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2103 ret = overwrite_item(wc->trans, root, path,
2107 } else if (key.type == BTRFS_INODE_REF_KEY ||
2108 key.type == BTRFS_INODE_EXTREF_KEY) {
2109 ret = add_inode_ref(wc->trans, root, log, path,
2111 if (ret && ret != -ENOENT)
2114 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2115 ret = replay_one_extent(wc->trans, root, path,
2119 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2120 ret = replay_one_dir_item(wc->trans, root, path,
2126 btrfs_free_path(path);
2130 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2131 struct btrfs_root *root,
2132 struct btrfs_path *path, int *level,
2133 struct walk_control *wc)
2138 struct extent_buffer *next;
2139 struct extent_buffer *cur;
2140 struct extent_buffer *parent;
2144 WARN_ON(*level < 0);
2145 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2147 while (*level > 0) {
2148 WARN_ON(*level < 0);
2149 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2150 cur = path->nodes[*level];
2152 WARN_ON(btrfs_header_level(cur) != *level);
2154 if (path->slots[*level] >=
2155 btrfs_header_nritems(cur))
2158 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2159 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2160 blocksize = btrfs_level_size(root, *level - 1);
2162 parent = path->nodes[*level];
2163 root_owner = btrfs_header_owner(parent);
2165 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
2170 ret = wc->process_func(root, next, wc, ptr_gen);
2172 free_extent_buffer(next);
2176 path->slots[*level]++;
2178 ret = btrfs_read_buffer(next, ptr_gen);
2180 free_extent_buffer(next);
2185 btrfs_tree_lock(next);
2186 btrfs_set_lock_blocking(next);
2187 clean_tree_block(trans, root, next);
2188 btrfs_wait_tree_block_writeback(next);
2189 btrfs_tree_unlock(next);
2192 WARN_ON(root_owner !=
2193 BTRFS_TREE_LOG_OBJECTID);
2194 ret = btrfs_free_and_pin_reserved_extent(root,
2197 free_extent_buffer(next);
2201 free_extent_buffer(next);
2204 ret = btrfs_read_buffer(next, ptr_gen);
2206 free_extent_buffer(next);
2210 WARN_ON(*level <= 0);
2211 if (path->nodes[*level-1])
2212 free_extent_buffer(path->nodes[*level-1]);
2213 path->nodes[*level-1] = next;
2214 *level = btrfs_header_level(next);
2215 path->slots[*level] = 0;
2218 WARN_ON(*level < 0);
2219 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2221 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2227 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2228 struct btrfs_root *root,
2229 struct btrfs_path *path, int *level,
2230 struct walk_control *wc)
2237 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2238 slot = path->slots[i];
2239 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2242 WARN_ON(*level == 0);
2245 struct extent_buffer *parent;
2246 if (path->nodes[*level] == root->node)
2247 parent = path->nodes[*level];
2249 parent = path->nodes[*level + 1];
2251 root_owner = btrfs_header_owner(parent);
2252 ret = wc->process_func(root, path->nodes[*level], wc,
2253 btrfs_header_generation(path->nodes[*level]));
2258 struct extent_buffer *next;
2260 next = path->nodes[*level];
2263 btrfs_tree_lock(next);
2264 btrfs_set_lock_blocking(next);
2265 clean_tree_block(trans, root, next);
2266 btrfs_wait_tree_block_writeback(next);
2267 btrfs_tree_unlock(next);
2270 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2271 ret = btrfs_free_and_pin_reserved_extent(root,
2272 path->nodes[*level]->start,
2273 path->nodes[*level]->len);
2277 free_extent_buffer(path->nodes[*level]);
2278 path->nodes[*level] = NULL;
2286 * drop the reference count on the tree rooted at 'snap'. This traverses
2287 * the tree freeing any blocks that have a ref count of zero after being
2290 static int walk_log_tree(struct btrfs_trans_handle *trans,
2291 struct btrfs_root *log, struct walk_control *wc)
2296 struct btrfs_path *path;
2299 path = btrfs_alloc_path();
2303 level = btrfs_header_level(log->node);
2305 path->nodes[level] = log->node;
2306 extent_buffer_get(log->node);
2307 path->slots[level] = 0;
2310 wret = walk_down_log_tree(trans, log, path, &level, wc);
2318 wret = walk_up_log_tree(trans, log, path, &level, wc);
2327 /* was the root node processed? if not, catch it here */
2328 if (path->nodes[orig_level]) {
2329 ret = wc->process_func(log, path->nodes[orig_level], wc,
2330 btrfs_header_generation(path->nodes[orig_level]));
2334 struct extent_buffer *next;
2336 next = path->nodes[orig_level];
2339 btrfs_tree_lock(next);
2340 btrfs_set_lock_blocking(next);
2341 clean_tree_block(trans, log, next);
2342 btrfs_wait_tree_block_writeback(next);
2343 btrfs_tree_unlock(next);
2346 WARN_ON(log->root_key.objectid !=
2347 BTRFS_TREE_LOG_OBJECTID);
2348 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2356 btrfs_free_path(path);
2361 * helper function to update the item for a given subvolumes log root
2362 * in the tree of log roots
2364 static int update_log_root(struct btrfs_trans_handle *trans,
2365 struct btrfs_root *log)
2369 if (log->log_transid == 1) {
2370 /* insert root item on the first sync */
2371 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2372 &log->root_key, &log->root_item);
2374 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2375 &log->root_key, &log->root_item);
2380 static void wait_log_commit(struct btrfs_trans_handle *trans,
2381 struct btrfs_root *root, int transid)
2384 int index = transid % 2;
2387 * we only allow two pending log transactions at a time,
2388 * so we know that if ours is more than 2 older than the
2389 * current transaction, we're done
2392 prepare_to_wait(&root->log_commit_wait[index],
2393 &wait, TASK_UNINTERRUPTIBLE);
2394 mutex_unlock(&root->log_mutex);
2396 if (root->log_transid_committed < transid &&
2397 atomic_read(&root->log_commit[index]))
2400 finish_wait(&root->log_commit_wait[index], &wait);
2401 mutex_lock(&root->log_mutex);
2402 } while (root->log_transid_committed < transid &&
2403 atomic_read(&root->log_commit[index]));
2406 static void wait_for_writer(struct btrfs_trans_handle *trans,
2407 struct btrfs_root *root)
2411 while (atomic_read(&root->log_writers)) {
2412 prepare_to_wait(&root->log_writer_wait,
2413 &wait, TASK_UNINTERRUPTIBLE);
2414 mutex_unlock(&root->log_mutex);
2415 if (atomic_read(&root->log_writers))
2417 mutex_lock(&root->log_mutex);
2418 finish_wait(&root->log_writer_wait, &wait);
2422 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2423 struct btrfs_log_ctx *ctx)
2428 mutex_lock(&root->log_mutex);
2429 list_del_init(&ctx->list);
2430 mutex_unlock(&root->log_mutex);
2434 * Invoked in log mutex context, or be sure there is no other task which
2435 * can access the list.
2437 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2438 int index, int error)
2440 struct btrfs_log_ctx *ctx;
2443 INIT_LIST_HEAD(&root->log_ctxs[index]);
2447 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2448 ctx->log_ret = error;
2450 INIT_LIST_HEAD(&root->log_ctxs[index]);
2454 * btrfs_sync_log does sends a given tree log down to the disk and
2455 * updates the super blocks to record it. When this call is done,
2456 * you know that any inodes previously logged are safely on disk only
2459 * Any other return value means you need to call btrfs_commit_transaction.
2460 * Some of the edge cases for fsyncing directories that have had unlinks
2461 * or renames done in the past mean that sometimes the only safe
2462 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2463 * that has happened.
2465 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2466 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2472 struct btrfs_root *log = root->log_root;
2473 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2474 int log_transid = 0;
2475 struct btrfs_log_ctx root_log_ctx;
2476 struct blk_plug plug;
2478 mutex_lock(&root->log_mutex);
2479 log_transid = ctx->log_transid;
2480 if (root->log_transid_committed >= log_transid) {
2481 mutex_unlock(&root->log_mutex);
2482 return ctx->log_ret;
2485 index1 = log_transid % 2;
2486 if (atomic_read(&root->log_commit[index1])) {
2487 wait_log_commit(trans, root, log_transid);
2488 mutex_unlock(&root->log_mutex);
2489 return ctx->log_ret;
2491 ASSERT(log_transid == root->log_transid);
2492 atomic_set(&root->log_commit[index1], 1);
2494 /* wait for previous tree log sync to complete */
2495 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2496 wait_log_commit(trans, root, log_transid - 1);
2499 int batch = atomic_read(&root->log_batch);
2500 /* when we're on an ssd, just kick the log commit out */
2501 if (!btrfs_test_opt(root, SSD) &&
2502 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2503 mutex_unlock(&root->log_mutex);
2504 schedule_timeout_uninterruptible(1);
2505 mutex_lock(&root->log_mutex);
2507 wait_for_writer(trans, root);
2508 if (batch == atomic_read(&root->log_batch))
2512 /* bail out if we need to do a full commit */
2513 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2515 btrfs_free_logged_extents(log, log_transid);
2516 mutex_unlock(&root->log_mutex);
2520 if (log_transid % 2 == 0)
2521 mark = EXTENT_DIRTY;
2525 /* we start IO on all the marked extents here, but we don't actually
2526 * wait for them until later.
2528 blk_start_plug(&plug);
2529 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2531 blk_finish_plug(&plug);
2532 btrfs_abort_transaction(trans, root, ret);
2533 btrfs_free_logged_extents(log, log_transid);
2534 btrfs_set_log_full_commit(root->fs_info, trans);
2535 mutex_unlock(&root->log_mutex);
2539 btrfs_set_root_node(&log->root_item, log->node);
2541 root->log_transid++;
2542 log->log_transid = root->log_transid;
2543 root->log_start_pid = 0;
2545 * IO has been started, blocks of the log tree have WRITTEN flag set
2546 * in their headers. new modifications of the log will be written to
2547 * new positions. so it's safe to allow log writers to go in.
2549 mutex_unlock(&root->log_mutex);
2551 btrfs_init_log_ctx(&root_log_ctx);
2553 mutex_lock(&log_root_tree->log_mutex);
2554 atomic_inc(&log_root_tree->log_batch);
2555 atomic_inc(&log_root_tree->log_writers);
2557 index2 = log_root_tree->log_transid % 2;
2558 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2559 root_log_ctx.log_transid = log_root_tree->log_transid;
2561 mutex_unlock(&log_root_tree->log_mutex);
2563 ret = update_log_root(trans, log);
2565 mutex_lock(&log_root_tree->log_mutex);
2566 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2568 if (waitqueue_active(&log_root_tree->log_writer_wait))
2569 wake_up(&log_root_tree->log_writer_wait);
2573 if (!list_empty(&root_log_ctx.list))
2574 list_del_init(&root_log_ctx.list);
2576 blk_finish_plug(&plug);
2577 btrfs_set_log_full_commit(root->fs_info, trans);
2579 if (ret != -ENOSPC) {
2580 btrfs_abort_transaction(trans, root, ret);
2581 mutex_unlock(&log_root_tree->log_mutex);
2584 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2585 btrfs_free_logged_extents(log, log_transid);
2586 mutex_unlock(&log_root_tree->log_mutex);
2591 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2592 mutex_unlock(&log_root_tree->log_mutex);
2593 ret = root_log_ctx.log_ret;
2597 index2 = root_log_ctx.log_transid % 2;
2598 if (atomic_read(&log_root_tree->log_commit[index2])) {
2599 blk_finish_plug(&plug);
2600 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2601 wait_log_commit(trans, log_root_tree,
2602 root_log_ctx.log_transid);
2603 btrfs_free_logged_extents(log, log_transid);
2604 mutex_unlock(&log_root_tree->log_mutex);
2605 ret = root_log_ctx.log_ret;
2608 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2609 atomic_set(&log_root_tree->log_commit[index2], 1);
2611 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2612 wait_log_commit(trans, log_root_tree,
2613 root_log_ctx.log_transid - 1);
2616 wait_for_writer(trans, log_root_tree);
2619 * now that we've moved on to the tree of log tree roots,
2620 * check the full commit flag again
2622 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2623 blk_finish_plug(&plug);
2624 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2625 btrfs_free_logged_extents(log, log_transid);
2626 mutex_unlock(&log_root_tree->log_mutex);
2628 goto out_wake_log_root;
2631 ret = btrfs_write_marked_extents(log_root_tree,
2632 &log_root_tree->dirty_log_pages,
2633 EXTENT_DIRTY | EXTENT_NEW);
2634 blk_finish_plug(&plug);
2636 btrfs_set_log_full_commit(root->fs_info, trans);
2637 btrfs_abort_transaction(trans, root, ret);
2638 btrfs_free_logged_extents(log, log_transid);
2639 mutex_unlock(&log_root_tree->log_mutex);
2640 goto out_wake_log_root;
2642 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2643 btrfs_wait_marked_extents(log_root_tree,
2644 &log_root_tree->dirty_log_pages,
2645 EXTENT_NEW | EXTENT_DIRTY);
2646 btrfs_wait_logged_extents(log, log_transid);
2648 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2649 log_root_tree->node->start);
2650 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2651 btrfs_header_level(log_root_tree->node));
2653 log_root_tree->log_transid++;
2654 mutex_unlock(&log_root_tree->log_mutex);
2657 * nobody else is going to jump in and write the the ctree
2658 * super here because the log_commit atomic below is protecting
2659 * us. We must be called with a transaction handle pinning
2660 * the running transaction open, so a full commit can't hop
2661 * in and cause problems either.
2663 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2665 btrfs_set_log_full_commit(root->fs_info, trans);
2666 btrfs_abort_transaction(trans, root, ret);
2667 goto out_wake_log_root;
2670 mutex_lock(&root->log_mutex);
2671 if (root->last_log_commit < log_transid)
2672 root->last_log_commit = log_transid;
2673 mutex_unlock(&root->log_mutex);
2677 * We needn't get log_mutex here because we are sure all
2678 * the other tasks are blocked.
2680 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2682 mutex_lock(&log_root_tree->log_mutex);
2683 log_root_tree->log_transid_committed++;
2684 atomic_set(&log_root_tree->log_commit[index2], 0);
2685 mutex_unlock(&log_root_tree->log_mutex);
2687 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2688 wake_up(&log_root_tree->log_commit_wait[index2]);
2691 btrfs_remove_all_log_ctxs(root, index1, ret);
2693 mutex_lock(&root->log_mutex);
2694 root->log_transid_committed++;
2695 atomic_set(&root->log_commit[index1], 0);
2696 mutex_unlock(&root->log_mutex);
2698 if (waitqueue_active(&root->log_commit_wait[index1]))
2699 wake_up(&root->log_commit_wait[index1]);
2703 static void free_log_tree(struct btrfs_trans_handle *trans,
2704 struct btrfs_root *log)
2709 struct walk_control wc = {
2711 .process_func = process_one_buffer
2714 ret = walk_log_tree(trans, log, &wc);
2715 /* I don't think this can happen but just in case */
2717 btrfs_abort_transaction(trans, log, ret);
2720 ret = find_first_extent_bit(&log->dirty_log_pages,
2721 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2726 clear_extent_bits(&log->dirty_log_pages, start, end,
2727 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2731 * We may have short-circuited the log tree with the full commit logic
2732 * and left ordered extents on our list, so clear these out to keep us
2733 * from leaking inodes and memory.
2735 btrfs_free_logged_extents(log, 0);
2736 btrfs_free_logged_extents(log, 1);
2738 free_extent_buffer(log->node);
2743 * free all the extents used by the tree log. This should be called
2744 * at commit time of the full transaction
2746 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2748 if (root->log_root) {
2749 free_log_tree(trans, root->log_root);
2750 root->log_root = NULL;
2755 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2756 struct btrfs_fs_info *fs_info)
2758 if (fs_info->log_root_tree) {
2759 free_log_tree(trans, fs_info->log_root_tree);
2760 fs_info->log_root_tree = NULL;
2766 * If both a file and directory are logged, and unlinks or renames are
2767 * mixed in, we have a few interesting corners:
2769 * create file X in dir Y
2770 * link file X to X.link in dir Y
2772 * unlink file X but leave X.link
2775 * After a crash we would expect only X.link to exist. But file X
2776 * didn't get fsync'd again so the log has back refs for X and X.link.
2778 * We solve this by removing directory entries and inode backrefs from the
2779 * log when a file that was logged in the current transaction is
2780 * unlinked. Any later fsync will include the updated log entries, and
2781 * we'll be able to reconstruct the proper directory items from backrefs.
2783 * This optimizations allows us to avoid relogging the entire inode
2784 * or the entire directory.
2786 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2787 struct btrfs_root *root,
2788 const char *name, int name_len,
2789 struct inode *dir, u64 index)
2791 struct btrfs_root *log;
2792 struct btrfs_dir_item *di;
2793 struct btrfs_path *path;
2797 u64 dir_ino = btrfs_ino(dir);
2799 if (BTRFS_I(dir)->logged_trans < trans->transid)
2802 ret = join_running_log_trans(root);
2806 mutex_lock(&BTRFS_I(dir)->log_mutex);
2808 log = root->log_root;
2809 path = btrfs_alloc_path();
2815 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2816 name, name_len, -1);
2822 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2823 bytes_del += name_len;
2829 btrfs_release_path(path);
2830 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2831 index, name, name_len, -1);
2837 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2838 bytes_del += name_len;
2845 /* update the directory size in the log to reflect the names
2849 struct btrfs_key key;
2851 key.objectid = dir_ino;
2853 key.type = BTRFS_INODE_ITEM_KEY;
2854 btrfs_release_path(path);
2856 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2862 struct btrfs_inode_item *item;
2865 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2866 struct btrfs_inode_item);
2867 i_size = btrfs_inode_size(path->nodes[0], item);
2868 if (i_size > bytes_del)
2869 i_size -= bytes_del;
2872 btrfs_set_inode_size(path->nodes[0], item, i_size);
2873 btrfs_mark_buffer_dirty(path->nodes[0]);
2876 btrfs_release_path(path);
2879 btrfs_free_path(path);
2881 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2882 if (ret == -ENOSPC) {
2883 btrfs_set_log_full_commit(root->fs_info, trans);
2886 btrfs_abort_transaction(trans, root, ret);
2888 btrfs_end_log_trans(root);
2893 /* see comments for btrfs_del_dir_entries_in_log */
2894 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2895 struct btrfs_root *root,
2896 const char *name, int name_len,
2897 struct inode *inode, u64 dirid)
2899 struct btrfs_root *log;
2903 if (BTRFS_I(inode)->logged_trans < trans->transid)
2906 ret = join_running_log_trans(root);
2909 log = root->log_root;
2910 mutex_lock(&BTRFS_I(inode)->log_mutex);
2912 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2914 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2915 if (ret == -ENOSPC) {
2916 btrfs_set_log_full_commit(root->fs_info, trans);
2918 } else if (ret < 0 && ret != -ENOENT)
2919 btrfs_abort_transaction(trans, root, ret);
2920 btrfs_end_log_trans(root);
2926 * creates a range item in the log for 'dirid'. first_offset and
2927 * last_offset tell us which parts of the key space the log should
2928 * be considered authoritative for.
2930 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2931 struct btrfs_root *log,
2932 struct btrfs_path *path,
2933 int key_type, u64 dirid,
2934 u64 first_offset, u64 last_offset)
2937 struct btrfs_key key;
2938 struct btrfs_dir_log_item *item;
2940 key.objectid = dirid;
2941 key.offset = first_offset;
2942 if (key_type == BTRFS_DIR_ITEM_KEY)
2943 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2945 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2946 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2950 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2951 struct btrfs_dir_log_item);
2952 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2953 btrfs_mark_buffer_dirty(path->nodes[0]);
2954 btrfs_release_path(path);
2959 * log all the items included in the current transaction for a given
2960 * directory. This also creates the range items in the log tree required
2961 * to replay anything deleted before the fsync
2963 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2964 struct btrfs_root *root, struct inode *inode,
2965 struct btrfs_path *path,
2966 struct btrfs_path *dst_path, int key_type,
2967 u64 min_offset, u64 *last_offset_ret)
2969 struct btrfs_key min_key;
2970 struct btrfs_root *log = root->log_root;
2971 struct extent_buffer *src;
2976 u64 first_offset = min_offset;
2977 u64 last_offset = (u64)-1;
2978 u64 ino = btrfs_ino(inode);
2980 log = root->log_root;
2982 min_key.objectid = ino;
2983 min_key.type = key_type;
2984 min_key.offset = min_offset;
2986 path->keep_locks = 1;
2988 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
2991 * we didn't find anything from this transaction, see if there
2992 * is anything at all
2994 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2995 min_key.objectid = ino;
2996 min_key.type = key_type;
2997 min_key.offset = (u64)-1;
2998 btrfs_release_path(path);
2999 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3001 btrfs_release_path(path);
3004 ret = btrfs_previous_item(root, path, ino, key_type);
3006 /* if ret == 0 there are items for this type,
3007 * create a range to tell us the last key of this type.
3008 * otherwise, there are no items in this directory after
3009 * *min_offset, and we create a range to indicate that.
3012 struct btrfs_key tmp;
3013 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3015 if (key_type == tmp.type)
3016 first_offset = max(min_offset, tmp.offset) + 1;
3021 /* go backward to find any previous key */
3022 ret = btrfs_previous_item(root, path, ino, key_type);
3024 struct btrfs_key tmp;
3025 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3026 if (key_type == tmp.type) {
3027 first_offset = tmp.offset;
3028 ret = overwrite_item(trans, log, dst_path,
3029 path->nodes[0], path->slots[0],
3037 btrfs_release_path(path);
3039 /* find the first key from this transaction again */
3040 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3041 if (WARN_ON(ret != 0))
3045 * we have a block from this transaction, log every item in it
3046 * from our directory
3049 struct btrfs_key tmp;
3050 src = path->nodes[0];
3051 nritems = btrfs_header_nritems(src);
3052 for (i = path->slots[0]; i < nritems; i++) {
3053 btrfs_item_key_to_cpu(src, &min_key, i);
3055 if (min_key.objectid != ino || min_key.type != key_type)
3057 ret = overwrite_item(trans, log, dst_path, src, i,
3064 path->slots[0] = nritems;
3067 * look ahead to the next item and see if it is also
3068 * from this directory and from this transaction
3070 ret = btrfs_next_leaf(root, path);
3072 last_offset = (u64)-1;
3075 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3076 if (tmp.objectid != ino || tmp.type != key_type) {
3077 last_offset = (u64)-1;
3080 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3081 ret = overwrite_item(trans, log, dst_path,
3082 path->nodes[0], path->slots[0],
3087 last_offset = tmp.offset;
3092 btrfs_release_path(path);
3093 btrfs_release_path(dst_path);
3096 *last_offset_ret = last_offset;
3098 * insert the log range keys to indicate where the log
3101 ret = insert_dir_log_key(trans, log, path, key_type,
3102 ino, first_offset, last_offset);
3110 * logging directories is very similar to logging inodes, We find all the items
3111 * from the current transaction and write them to the log.
3113 * The recovery code scans the directory in the subvolume, and if it finds a
3114 * key in the range logged that is not present in the log tree, then it means
3115 * that dir entry was unlinked during the transaction.
3117 * In order for that scan to work, we must include one key smaller than
3118 * the smallest logged by this transaction and one key larger than the largest
3119 * key logged by this transaction.
3121 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3122 struct btrfs_root *root, struct inode *inode,
3123 struct btrfs_path *path,
3124 struct btrfs_path *dst_path)
3129 int key_type = BTRFS_DIR_ITEM_KEY;
3135 ret = log_dir_items(trans, root, inode, path,
3136 dst_path, key_type, min_key,
3140 if (max_key == (u64)-1)
3142 min_key = max_key + 1;
3145 if (key_type == BTRFS_DIR_ITEM_KEY) {
3146 key_type = BTRFS_DIR_INDEX_KEY;
3153 * a helper function to drop items from the log before we relog an
3154 * inode. max_key_type indicates the highest item type to remove.
3155 * This cannot be run for file data extents because it does not
3156 * free the extents they point to.
3158 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3159 struct btrfs_root *log,
3160 struct btrfs_path *path,
3161 u64 objectid, int max_key_type)
3164 struct btrfs_key key;
3165 struct btrfs_key found_key;
3168 key.objectid = objectid;
3169 key.type = max_key_type;
3170 key.offset = (u64)-1;
3173 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3174 BUG_ON(ret == 0); /* Logic error */
3178 if (path->slots[0] == 0)
3182 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3185 if (found_key.objectid != objectid)
3188 found_key.offset = 0;
3190 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3193 ret = btrfs_del_items(trans, log, path, start_slot,
3194 path->slots[0] - start_slot + 1);
3196 * If start slot isn't 0 then we don't need to re-search, we've
3197 * found the last guy with the objectid in this tree.
3199 if (ret || start_slot != 0)
3201 btrfs_release_path(path);
3203 btrfs_release_path(path);
3209 static void fill_inode_item(struct btrfs_trans_handle *trans,
3210 struct extent_buffer *leaf,
3211 struct btrfs_inode_item *item,
3212 struct inode *inode, int log_inode_only)
3214 struct btrfs_map_token token;
3216 btrfs_init_map_token(&token);
3218 if (log_inode_only) {
3219 /* set the generation to zero so the recover code
3220 * can tell the difference between an logging
3221 * just to say 'this inode exists' and a logging
3222 * to say 'update this inode with these values'
3224 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3225 btrfs_set_token_inode_size(leaf, item, 0, &token);
3227 btrfs_set_token_inode_generation(leaf, item,
3228 BTRFS_I(inode)->generation,
3230 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3233 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3234 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3235 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3236 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3238 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3239 inode->i_atime.tv_sec, &token);
3240 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3241 inode->i_atime.tv_nsec, &token);
3243 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3244 inode->i_mtime.tv_sec, &token);
3245 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3246 inode->i_mtime.tv_nsec, &token);
3248 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3249 inode->i_ctime.tv_sec, &token);
3250 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3251 inode->i_ctime.tv_nsec, &token);
3253 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3256 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3257 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3258 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3259 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3260 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3263 static int log_inode_item(struct btrfs_trans_handle *trans,
3264 struct btrfs_root *log, struct btrfs_path *path,
3265 struct inode *inode)
3267 struct btrfs_inode_item *inode_item;
3270 ret = btrfs_insert_empty_item(trans, log, path,
3271 &BTRFS_I(inode)->location,
3272 sizeof(*inode_item));
3273 if (ret && ret != -EEXIST)
3275 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3276 struct btrfs_inode_item);
3277 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
3278 btrfs_release_path(path);
3282 static noinline int copy_items(struct btrfs_trans_handle *trans,
3283 struct inode *inode,
3284 struct btrfs_path *dst_path,
3285 struct btrfs_path *src_path, u64 *last_extent,
3286 int start_slot, int nr, int inode_only)
3288 unsigned long src_offset;
3289 unsigned long dst_offset;
3290 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3291 struct btrfs_file_extent_item *extent;
3292 struct btrfs_inode_item *inode_item;
3293 struct extent_buffer *src = src_path->nodes[0];
3294 struct btrfs_key first_key, last_key, key;
3296 struct btrfs_key *ins_keys;
3300 struct list_head ordered_sums;
3301 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3302 bool has_extents = false;
3303 bool need_find_last_extent = true;
3306 INIT_LIST_HEAD(&ordered_sums);
3308 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3309 nr * sizeof(u32), GFP_NOFS);
3313 first_key.objectid = (u64)-1;
3315 ins_sizes = (u32 *)ins_data;
3316 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3318 for (i = 0; i < nr; i++) {
3319 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3320 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3322 ret = btrfs_insert_empty_items(trans, log, dst_path,
3323 ins_keys, ins_sizes, nr);
3329 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3330 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3331 dst_path->slots[0]);
3333 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3335 if ((i == (nr - 1)))
3336 last_key = ins_keys[i];
3338 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3339 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3341 struct btrfs_inode_item);
3342 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3343 inode, inode_only == LOG_INODE_EXISTS);
3345 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3346 src_offset, ins_sizes[i]);
3350 * We set need_find_last_extent here in case we know we were
3351 * processing other items and then walk into the first extent in
3352 * the inode. If we don't hit an extent then nothing changes,
3353 * we'll do the last search the next time around.
3355 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3357 if (first_key.objectid == (u64)-1)
3358 first_key = ins_keys[i];
3360 need_find_last_extent = false;
3363 /* take a reference on file data extents so that truncates
3364 * or deletes of this inode don't have to relog the inode
3367 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
3370 extent = btrfs_item_ptr(src, start_slot + i,
3371 struct btrfs_file_extent_item);
3373 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3376 found_type = btrfs_file_extent_type(src, extent);
3377 if (found_type == BTRFS_FILE_EXTENT_REG) {
3379 ds = btrfs_file_extent_disk_bytenr(src,
3381 /* ds == 0 is a hole */
3385 dl = btrfs_file_extent_disk_num_bytes(src,
3387 cs = btrfs_file_extent_offset(src, extent);
3388 cl = btrfs_file_extent_num_bytes(src,
3390 if (btrfs_file_extent_compression(src,
3396 ret = btrfs_lookup_csums_range(
3397 log->fs_info->csum_root,
3398 ds + cs, ds + cs + cl - 1,
3401 btrfs_release_path(dst_path);
3409 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3410 btrfs_release_path(dst_path);
3414 * we have to do this after the loop above to avoid changing the
3415 * log tree while trying to change the log tree.
3418 while (!list_empty(&ordered_sums)) {
3419 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3420 struct btrfs_ordered_sum,
3423 ret = btrfs_csum_file_blocks(trans, log, sums);
3424 list_del(&sums->list);
3431 if (need_find_last_extent && *last_extent == first_key.offset) {
3433 * We don't have any leafs between our current one and the one
3434 * we processed before that can have file extent items for our
3435 * inode (and have a generation number smaller than our current
3438 need_find_last_extent = false;
3442 * Because we use btrfs_search_forward we could skip leaves that were
3443 * not modified and then assume *last_extent is valid when it really
3444 * isn't. So back up to the previous leaf and read the end of the last
3445 * extent before we go and fill in holes.
3447 if (need_find_last_extent) {
3450 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3455 if (src_path->slots[0])
3456 src_path->slots[0]--;
3457 src = src_path->nodes[0];
3458 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3459 if (key.objectid != btrfs_ino(inode) ||
3460 key.type != BTRFS_EXTENT_DATA_KEY)
3462 extent = btrfs_item_ptr(src, src_path->slots[0],
3463 struct btrfs_file_extent_item);
3464 if (btrfs_file_extent_type(src, extent) ==
3465 BTRFS_FILE_EXTENT_INLINE) {
3466 len = btrfs_file_extent_inline_len(src,
3469 *last_extent = ALIGN(key.offset + len,
3472 len = btrfs_file_extent_num_bytes(src, extent);
3473 *last_extent = key.offset + len;
3477 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3478 * things could have happened
3480 * 1) A merge could have happened, so we could currently be on a leaf
3481 * that holds what we were copying in the first place.
3482 * 2) A split could have happened, and now not all of the items we want
3483 * are on the same leaf.
3485 * So we need to adjust how we search for holes, we need to drop the
3486 * path and re-search for the first extent key we found, and then walk
3487 * forward until we hit the last one we copied.
3489 if (need_find_last_extent) {
3490 /* btrfs_prev_leaf could return 1 without releasing the path */
3491 btrfs_release_path(src_path);
3492 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3497 src = src_path->nodes[0];
3498 i = src_path->slots[0];
3504 * Ok so here we need to go through and fill in any holes we may have
3505 * to make sure that holes are punched for those areas in case they had
3506 * extents previously.
3512 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3513 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3517 src = src_path->nodes[0];
3521 btrfs_item_key_to_cpu(src, &key, i);
3522 if (!btrfs_comp_cpu_keys(&key, &last_key))
3524 if (key.objectid != btrfs_ino(inode) ||
3525 key.type != BTRFS_EXTENT_DATA_KEY) {
3529 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3530 if (btrfs_file_extent_type(src, extent) ==
3531 BTRFS_FILE_EXTENT_INLINE) {
3532 len = btrfs_file_extent_inline_len(src, i, extent);
3533 extent_end = ALIGN(key.offset + len, log->sectorsize);
3535 len = btrfs_file_extent_num_bytes(src, extent);
3536 extent_end = key.offset + len;
3540 if (*last_extent == key.offset) {
3541 *last_extent = extent_end;
3544 offset = *last_extent;
3545 len = key.offset - *last_extent;
3546 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3547 offset, 0, 0, len, 0, len, 0,
3551 *last_extent = extent_end;
3554 * Need to let the callers know we dropped the path so they should
3557 if (!ret && need_find_last_extent)
3562 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3564 struct extent_map *em1, *em2;
3566 em1 = list_entry(a, struct extent_map, list);
3567 em2 = list_entry(b, struct extent_map, list);
3569 if (em1->start < em2->start)
3571 else if (em1->start > em2->start)
3576 static int log_one_extent(struct btrfs_trans_handle *trans,
3577 struct inode *inode, struct btrfs_root *root,
3578 struct extent_map *em, struct btrfs_path *path,
3579 struct list_head *logged_list)
3581 struct btrfs_root *log = root->log_root;
3582 struct btrfs_file_extent_item *fi;
3583 struct extent_buffer *leaf;
3584 struct btrfs_ordered_extent *ordered;
3585 struct list_head ordered_sums;
3586 struct btrfs_map_token token;
3587 struct btrfs_key key;
3588 u64 mod_start = em->mod_start;
3589 u64 mod_len = em->mod_len;
3592 u64 extent_offset = em->start - em->orig_start;
3595 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3596 int extent_inserted = 0;
3598 INIT_LIST_HEAD(&ordered_sums);
3599 btrfs_init_map_token(&token);
3601 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3602 em->start + em->len, NULL, 0, 1,
3603 sizeof(*fi), &extent_inserted);
3607 if (!extent_inserted) {
3608 key.objectid = btrfs_ino(inode);
3609 key.type = BTRFS_EXTENT_DATA_KEY;
3610 key.offset = em->start;
3612 ret = btrfs_insert_empty_item(trans, log, path, &key,
3617 leaf = path->nodes[0];
3618 fi = btrfs_item_ptr(leaf, path->slots[0],
3619 struct btrfs_file_extent_item);
3621 btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
3623 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3625 btrfs_set_token_file_extent_type(leaf, fi,
3626 BTRFS_FILE_EXTENT_PREALLOC,
3629 btrfs_set_token_file_extent_type(leaf, fi,
3630 BTRFS_FILE_EXTENT_REG,
3632 if (em->block_start == EXTENT_MAP_HOLE)
3636 block_len = max(em->block_len, em->orig_block_len);
3637 if (em->compress_type != BTRFS_COMPRESS_NONE) {
3638 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3641 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3643 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3644 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3646 extent_offset, &token);
3647 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3650 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3651 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3655 btrfs_set_token_file_extent_offset(leaf, fi,
3656 em->start - em->orig_start,
3658 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3659 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
3660 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3662 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3663 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3664 btrfs_mark_buffer_dirty(leaf);
3666 btrfs_release_path(path);
3675 * First check and see if our csums are on our outstanding ordered
3678 list_for_each_entry(ordered, logged_list, log_list) {
3679 struct btrfs_ordered_sum *sum;
3684 if (ordered->file_offset + ordered->len <= mod_start ||
3685 mod_start + mod_len <= ordered->file_offset)
3689 * We are going to copy all the csums on this ordered extent, so
3690 * go ahead and adjust mod_start and mod_len in case this
3691 * ordered extent has already been logged.
3693 if (ordered->file_offset > mod_start) {
3694 if (ordered->file_offset + ordered->len >=
3695 mod_start + mod_len)
3696 mod_len = ordered->file_offset - mod_start;
3698 * If we have this case
3700 * |--------- logged extent ---------|
3701 * |----- ordered extent ----|
3703 * Just don't mess with mod_start and mod_len, we'll
3704 * just end up logging more csums than we need and it
3708 if (ordered->file_offset + ordered->len <
3709 mod_start + mod_len) {
3710 mod_len = (mod_start + mod_len) -
3711 (ordered->file_offset + ordered->len);
3712 mod_start = ordered->file_offset +
3720 * To keep us from looping for the above case of an ordered
3721 * extent that falls inside of the logged extent.
3723 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3727 if (ordered->csum_bytes_left) {
3728 btrfs_start_ordered_extent(inode, ordered, 0);
3729 wait_event(ordered->wait,
3730 ordered->csum_bytes_left == 0);
3733 list_for_each_entry(sum, &ordered->list, list) {
3734 ret = btrfs_csum_file_blocks(trans, log, sum);
3742 if (!mod_len || ret)
3745 if (em->compress_type) {
3747 csum_len = block_len;
3749 csum_offset = mod_start - em->start;
3753 /* block start is already adjusted for the file extent offset. */
3754 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3755 em->block_start + csum_offset,
3756 em->block_start + csum_offset +
3757 csum_len - 1, &ordered_sums, 0);
3761 while (!list_empty(&ordered_sums)) {
3762 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3763 struct btrfs_ordered_sum,
3766 ret = btrfs_csum_file_blocks(trans, log, sums);
3767 list_del(&sums->list);
3774 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3775 struct btrfs_root *root,
3776 struct inode *inode,
3777 struct btrfs_path *path,
3778 struct list_head *logged_list)
3780 struct extent_map *em, *n;
3781 struct list_head extents;
3782 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3787 INIT_LIST_HEAD(&extents);
3789 write_lock(&tree->lock);
3790 test_gen = root->fs_info->last_trans_committed;
3792 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3793 list_del_init(&em->list);
3796 * Just an arbitrary number, this can be really CPU intensive
3797 * once we start getting a lot of extents, and really once we
3798 * have a bunch of extents we just want to commit since it will
3801 if (++num > 32768) {
3802 list_del_init(&tree->modified_extents);
3807 if (em->generation <= test_gen)
3809 /* Need a ref to keep it from getting evicted from cache */
3810 atomic_inc(&em->refs);
3811 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3812 list_add_tail(&em->list, &extents);
3816 list_sort(NULL, &extents, extent_cmp);
3819 while (!list_empty(&extents)) {
3820 em = list_entry(extents.next, struct extent_map, list);
3822 list_del_init(&em->list);
3825 * If we had an error we just need to delete everybody from our
3829 clear_em_logging(tree, em);
3830 free_extent_map(em);
3834 write_unlock(&tree->lock);
3836 ret = log_one_extent(trans, inode, root, em, path, logged_list);
3837 write_lock(&tree->lock);
3838 clear_em_logging(tree, em);
3839 free_extent_map(em);
3841 WARN_ON(!list_empty(&extents));
3842 write_unlock(&tree->lock);
3844 btrfs_release_path(path);
3848 /* log a single inode in the tree log.
3849 * At least one parent directory for this inode must exist in the tree
3850 * or be logged already.
3852 * Any items from this inode changed by the current transaction are copied
3853 * to the log tree. An extra reference is taken on any extents in this
3854 * file, allowing us to avoid a whole pile of corner cases around logging
3855 * blocks that have been removed from the tree.
3857 * See LOG_INODE_ALL and related defines for a description of what inode_only
3860 * This handles both files and directories.
3862 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3863 struct btrfs_root *root, struct inode *inode,
3868 struct btrfs_path *path;
3869 struct btrfs_path *dst_path;
3870 struct btrfs_key min_key;
3871 struct btrfs_key max_key;
3872 struct btrfs_root *log = root->log_root;
3873 struct extent_buffer *src = NULL;
3874 LIST_HEAD(logged_list);
3875 u64 last_extent = 0;
3879 int ins_start_slot = 0;
3881 bool fast_search = false;
3882 u64 ino = btrfs_ino(inode);
3883 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3885 path = btrfs_alloc_path();
3888 dst_path = btrfs_alloc_path();
3890 btrfs_free_path(path);
3894 min_key.objectid = ino;
3895 min_key.type = BTRFS_INODE_ITEM_KEY;
3898 max_key.objectid = ino;
3901 /* today the code can only do partial logging of directories */
3902 if (S_ISDIR(inode->i_mode) ||
3903 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3904 &BTRFS_I(inode)->runtime_flags) &&
3905 inode_only == LOG_INODE_EXISTS))
3906 max_key.type = BTRFS_XATTR_ITEM_KEY;
3908 max_key.type = (u8)-1;
3909 max_key.offset = (u64)-1;
3911 /* Only run delayed items if we are a dir or a new file */
3912 if (S_ISDIR(inode->i_mode) ||
3913 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
3914 ret = btrfs_commit_inode_delayed_items(trans, inode);
3916 btrfs_free_path(path);
3917 btrfs_free_path(dst_path);
3922 mutex_lock(&BTRFS_I(inode)->log_mutex);
3924 btrfs_get_logged_extents(inode, &logged_list);
3927 * a brute force approach to making sure we get the most uptodate
3928 * copies of everything.
3930 if (S_ISDIR(inode->i_mode)) {
3931 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3933 if (inode_only == LOG_INODE_EXISTS)
3934 max_key_type = BTRFS_XATTR_ITEM_KEY;
3935 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3937 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3938 &BTRFS_I(inode)->runtime_flags)) {
3939 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3940 &BTRFS_I(inode)->runtime_flags);
3941 ret = btrfs_truncate_inode_items(trans, log,
3943 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3944 &BTRFS_I(inode)->runtime_flags) ||
3945 inode_only == LOG_INODE_EXISTS) {
3946 if (inode_only == LOG_INODE_ALL)
3948 max_key.type = BTRFS_XATTR_ITEM_KEY;
3949 ret = drop_objectid_items(trans, log, path, ino,
3952 if (inode_only == LOG_INODE_ALL)
3954 ret = log_inode_item(trans, log, dst_path, inode);
3967 path->keep_locks = 1;
3971 ret = btrfs_search_forward(root, &min_key,
3972 path, trans->transid);
3976 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3977 if (min_key.objectid != ino)
3979 if (min_key.type > max_key.type)
3982 src = path->nodes[0];
3983 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
3986 } else if (!ins_nr) {
3987 ins_start_slot = path->slots[0];
3992 ret = copy_items(trans, inode, dst_path, path, &last_extent,
3993 ins_start_slot, ins_nr, inode_only);
3999 btrfs_release_path(path);
4003 ins_start_slot = path->slots[0];
4006 nritems = btrfs_header_nritems(path->nodes[0]);
4008 if (path->slots[0] < nritems) {
4009 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4014 ret = copy_items(trans, inode, dst_path, path,
4015 &last_extent, ins_start_slot,
4016 ins_nr, inode_only);
4024 btrfs_release_path(path);
4026 if (min_key.offset < (u64)-1) {
4028 } else if (min_key.type < max_key.type) {
4036 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4037 ins_start_slot, ins_nr, inode_only);
4047 btrfs_release_path(path);
4048 btrfs_release_path(dst_path);
4050 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4056 } else if (inode_only == LOG_INODE_ALL) {
4057 struct extent_map *em, *n;
4059 write_lock(&em_tree->lock);
4061 * We can't just remove every em if we're called for a ranged
4062 * fsync - that is, one that doesn't cover the whole possible
4063 * file range (0 to LLONG_MAX). This is because we can have
4064 * em's that fall outside the range we're logging and therefore
4065 * their ordered operations haven't completed yet
4066 * (btrfs_finish_ordered_io() not invoked yet). This means we
4067 * didn't get their respective file extent item in the fs/subvol
4068 * tree yet, and need to let the next fast fsync (one which
4069 * consults the list of modified extent maps) find the em so
4070 * that it logs a matching file extent item and waits for the
4071 * respective ordered operation to complete (if it's still
4074 * Removing every em outside the range we're logging would make
4075 * the next fast fsync not log their matching file extent items,
4076 * therefore making us lose data after a log replay.
4078 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4080 const u64 mod_end = em->mod_start + em->mod_len - 1;
4082 if (em->mod_start >= start && mod_end <= end)
4083 list_del_init(&em->list);
4085 write_unlock(&em_tree->lock);
4088 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4089 ret = log_directory_changes(trans, root, inode, path, dst_path);
4096 BTRFS_I(inode)->logged_trans = trans->transid;
4097 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4100 btrfs_put_logged_extents(&logged_list);
4102 btrfs_submit_logged_extents(&logged_list, log);
4103 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4105 btrfs_free_path(path);
4106 btrfs_free_path(dst_path);
4111 * follow the dentry parent pointers up the chain and see if any
4112 * of the directories in it require a full commit before they can
4113 * be logged. Returns zero if nothing special needs to be done or 1 if
4114 * a full commit is required.
4116 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4117 struct inode *inode,
4118 struct dentry *parent,
4119 struct super_block *sb,
4123 struct btrfs_root *root;
4124 struct dentry *old_parent = NULL;
4125 struct inode *orig_inode = inode;
4128 * for regular files, if its inode is already on disk, we don't
4129 * have to worry about the parents at all. This is because
4130 * we can use the last_unlink_trans field to record renames
4131 * and other fun in this file.
4133 if (S_ISREG(inode->i_mode) &&
4134 BTRFS_I(inode)->generation <= last_committed &&
4135 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4138 if (!S_ISDIR(inode->i_mode)) {
4139 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4141 inode = parent->d_inode;
4146 * If we are logging a directory then we start with our inode,
4147 * not our parents inode, so we need to skipp setting the
4148 * logged_trans so that further down in the log code we don't
4149 * think this inode has already been logged.
4151 if (inode != orig_inode)
4152 BTRFS_I(inode)->logged_trans = trans->transid;
4155 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4156 root = BTRFS_I(inode)->root;
4159 * make sure any commits to the log are forced
4160 * to be full commits
4162 btrfs_set_log_full_commit(root->fs_info, trans);
4167 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4170 if (IS_ROOT(parent))
4173 parent = dget_parent(parent);
4175 old_parent = parent;
4176 inode = parent->d_inode;
4185 * helper function around btrfs_log_inode to make sure newly created
4186 * parent directories also end up in the log. A minimal inode and backref
4187 * only logging is done of any parent directories that are older than
4188 * the last committed transaction
4190 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4191 struct btrfs_root *root, struct inode *inode,
4192 struct dentry *parent,
4196 struct btrfs_log_ctx *ctx)
4198 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
4199 struct super_block *sb;
4200 struct dentry *old_parent = NULL;
4202 u64 last_committed = root->fs_info->last_trans_committed;
4206 if (btrfs_test_opt(root, NOTREELOG)) {
4212 * The prev transaction commit doesn't complete, we need do
4213 * full commit by ourselves.
4215 if (root->fs_info->last_trans_log_full_commit >
4216 root->fs_info->last_trans_committed) {
4221 if (root != BTRFS_I(inode)->root ||
4222 btrfs_root_refs(&root->root_item) == 0) {
4227 ret = check_parent_dirs_for_sync(trans, inode, parent,
4228 sb, last_committed);
4232 if (btrfs_inode_in_log(inode, trans->transid)) {
4233 ret = BTRFS_NO_LOG_SYNC;
4237 ret = start_log_trans(trans, root, ctx);
4241 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end);
4246 * for regular files, if its inode is already on disk, we don't
4247 * have to worry about the parents at all. This is because
4248 * we can use the last_unlink_trans field to record renames
4249 * and other fun in this file.
4251 if (S_ISREG(inode->i_mode) &&
4252 BTRFS_I(inode)->generation <= last_committed &&
4253 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
4258 inode_only = LOG_INODE_EXISTS;
4260 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4263 inode = parent->d_inode;
4264 if (root != BTRFS_I(inode)->root)
4267 if (BTRFS_I(inode)->generation >
4268 root->fs_info->last_trans_committed) {
4269 ret = btrfs_log_inode(trans, root, inode, inode_only,
4274 if (IS_ROOT(parent))
4277 parent = dget_parent(parent);
4279 old_parent = parent;
4285 btrfs_set_log_full_commit(root->fs_info, trans);
4290 btrfs_remove_log_ctx(root, ctx);
4291 btrfs_end_log_trans(root);
4297 * it is not safe to log dentry if the chunk root has added new
4298 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4299 * If this returns 1, you must commit the transaction to safely get your
4302 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
4303 struct btrfs_root *root, struct dentry *dentry,
4306 struct btrfs_log_ctx *ctx)
4308 struct dentry *parent = dget_parent(dentry);
4311 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
4312 start, end, 0, ctx);
4319 * should be called during mount to recover any replay any log trees
4322 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4325 struct btrfs_path *path;
4326 struct btrfs_trans_handle *trans;
4327 struct btrfs_key key;
4328 struct btrfs_key found_key;
4329 struct btrfs_key tmp_key;
4330 struct btrfs_root *log;
4331 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
4332 struct walk_control wc = {
4333 .process_func = process_one_buffer,
4337 path = btrfs_alloc_path();
4341 fs_info->log_root_recovering = 1;
4343 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4344 if (IS_ERR(trans)) {
4345 ret = PTR_ERR(trans);
4352 ret = walk_log_tree(trans, log_root_tree, &wc);
4354 btrfs_error(fs_info, ret, "Failed to pin buffers while "
4355 "recovering log root tree.");
4360 key.objectid = BTRFS_TREE_LOG_OBJECTID;
4361 key.offset = (u64)-1;
4362 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
4365 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
4368 btrfs_error(fs_info, ret,
4369 "Couldn't find tree log root.");
4373 if (path->slots[0] == 0)
4377 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4379 btrfs_release_path(path);
4380 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4383 log = btrfs_read_fs_root(log_root_tree, &found_key);
4386 btrfs_error(fs_info, ret,
4387 "Couldn't read tree log root.");
4391 tmp_key.objectid = found_key.offset;
4392 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
4393 tmp_key.offset = (u64)-1;
4395 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4396 if (IS_ERR(wc.replay_dest)) {
4397 ret = PTR_ERR(wc.replay_dest);
4398 free_extent_buffer(log->node);
4399 free_extent_buffer(log->commit_root);
4401 btrfs_error(fs_info, ret, "Couldn't read target root "
4402 "for tree log recovery.");
4406 wc.replay_dest->log_root = log;
4407 btrfs_record_root_in_trans(trans, wc.replay_dest);
4408 ret = walk_log_tree(trans, log, &wc);
4410 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4411 ret = fixup_inode_link_counts(trans, wc.replay_dest,
4415 key.offset = found_key.offset - 1;
4416 wc.replay_dest->log_root = NULL;
4417 free_extent_buffer(log->node);
4418 free_extent_buffer(log->commit_root);
4424 if (found_key.offset == 0)
4427 btrfs_release_path(path);
4429 /* step one is to pin it all, step two is to replay just inodes */
4432 wc.process_func = replay_one_buffer;
4433 wc.stage = LOG_WALK_REPLAY_INODES;
4436 /* step three is to replay everything */
4437 if (wc.stage < LOG_WALK_REPLAY_ALL) {
4442 btrfs_free_path(path);
4444 /* step 4: commit the transaction, which also unpins the blocks */
4445 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
4449 free_extent_buffer(log_root_tree->node);
4450 log_root_tree->log_root = NULL;
4451 fs_info->log_root_recovering = 0;
4452 kfree(log_root_tree);
4457 btrfs_end_transaction(wc.trans, fs_info->tree_root);
4458 btrfs_free_path(path);
4463 * there are some corner cases where we want to force a full
4464 * commit instead of allowing a directory to be logged.
4466 * They revolve around files there were unlinked from the directory, and
4467 * this function updates the parent directory so that a full commit is
4468 * properly done if it is fsync'd later after the unlinks are done.
4470 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4471 struct inode *dir, struct inode *inode,
4475 * when we're logging a file, if it hasn't been renamed
4476 * or unlinked, and its inode is fully committed on disk,
4477 * we don't have to worry about walking up the directory chain
4478 * to log its parents.
4480 * So, we use the last_unlink_trans field to put this transid
4481 * into the file. When the file is logged we check it and
4482 * don't log the parents if the file is fully on disk.
4484 if (S_ISREG(inode->i_mode))
4485 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4488 * if this directory was already logged any new
4489 * names for this file/dir will get recorded
4492 if (BTRFS_I(dir)->logged_trans == trans->transid)
4496 * if the inode we're about to unlink was logged,
4497 * the log will be properly updated for any new names
4499 if (BTRFS_I(inode)->logged_trans == trans->transid)
4503 * when renaming files across directories, if the directory
4504 * there we're unlinking from gets fsync'd later on, there's
4505 * no way to find the destination directory later and fsync it
4506 * properly. So, we have to be conservative and force commits
4507 * so the new name gets discovered.
4512 /* we can safely do the unlink without any special recording */
4516 BTRFS_I(dir)->last_unlink_trans = trans->transid;
4520 * Call this after adding a new name for a file and it will properly
4521 * update the log to reflect the new name.
4523 * It will return zero if all goes well, and it will return 1 if a
4524 * full transaction commit is required.
4526 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4527 struct inode *inode, struct inode *old_dir,
4528 struct dentry *parent)
4530 struct btrfs_root * root = BTRFS_I(inode)->root;
4533 * this will force the logging code to walk the dentry chain
4536 if (S_ISREG(inode->i_mode))
4537 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4540 * if this inode hasn't been logged and directory we're renaming it
4541 * from hasn't been logged, we don't need to log it
4543 if (BTRFS_I(inode)->logged_trans <=
4544 root->fs_info->last_trans_committed &&
4545 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4546 root->fs_info->last_trans_committed))
4549 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
4550 LLONG_MAX, 1, NULL);