2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/list_sort.h>
23 #include "transaction.h"
26 #include "print-tree.h"
32 /* magic values for the inode_only field in btrfs_log_inode:
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
42 * directory trouble cases
44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
45 * log, we must force a full commit before doing an fsync of the directory
46 * where the unlink was done.
47 * ---> record transid of last unlink/rename per directory
51 * rename foo/some_dir foo2/some_dir
53 * fsync foo/some_dir/some_file
55 * The fsync above will unlink the original some_dir without recording
56 * it in its new location (foo2). After a crash, some_dir will be gone
57 * unless the fsync of some_file forces a full commit
59 * 2) we must log any new names for any file or dir that is in the fsync
60 * log. ---> check inode while renaming/linking.
62 * 2a) we must log any new names for any file or dir during rename
63 * when the directory they are being removed from was logged.
64 * ---> check inode and old parent dir during rename
66 * 2a is actually the more important variant. With the extra logging
67 * a crash might unlink the old name without recreating the new one
69 * 3) after a crash, we must go through any directories with a link count
70 * of zero and redo the rm -rf
77 * The directory f1 was fully removed from the FS, but fsync was never
78 * called on f1, only its parent dir. After a crash the rm -rf must
79 * be replayed. This must be able to recurse down the entire
80 * directory tree. The inode link count fixup code takes care of the
85 * stages for the tree walking. The first
86 * stage (0) is to only pin down the blocks we find
87 * the second stage (1) is to make sure that all the inodes
88 * we find in the log are created in the subvolume.
90 * The last stage is to deal with directories and links and extents
91 * and all the other fun semantics
93 #define LOG_WALK_PIN_ONLY 0
94 #define LOG_WALK_REPLAY_INODES 1
95 #define LOG_WALK_REPLAY_ALL 2
97 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, struct inode *inode,
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
110 * tree logging is a special write ahead log used to make sure that
111 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 * Full tree commits are expensive because they require commonly
114 * modified blocks to be recowed, creating many dirty pages in the
115 * extent tree an 4x-6x higher write load than ext3.
117 * Instead of doing a tree commit on every fsync, we use the
118 * key ranges and transaction ids to find items for a given file or directory
119 * that have changed in this transaction. Those items are copied into
120 * a special tree (one per subvolume root), that tree is written to disk
121 * and then the fsync is considered complete.
123 * After a crash, items are copied out of the log-tree back into the
124 * subvolume tree. Any file data extents found are recorded in the extent
125 * allocation tree, and the log-tree freed.
127 * The log tree is read three times, once to pin down all the extents it is
128 * using in ram and once, once to create all the inodes logged in the tree
129 * and once to do all the other items.
133 * start a sub transaction and setup the log tree
134 * this increments the log tree writer count to make the people
135 * syncing the tree wait for us to finish
137 static int start_log_trans(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root)
143 mutex_lock(&root->log_mutex);
144 if (root->log_root) {
145 if (!root->log_start_pid) {
146 root->log_start_pid = current->pid;
147 root->log_multiple_pids = false;
148 } else if (root->log_start_pid != current->pid) {
149 root->log_multiple_pids = true;
152 atomic_inc(&root->log_batch);
153 atomic_inc(&root->log_writers);
154 mutex_unlock(&root->log_mutex);
157 root->log_multiple_pids = false;
158 root->log_start_pid = current->pid;
159 mutex_lock(&root->fs_info->tree_log_mutex);
160 if (!root->fs_info->log_root_tree) {
161 ret = btrfs_init_log_root_tree(trans, root->fs_info);
165 if (err == 0 && !root->log_root) {
166 ret = btrfs_add_log_tree(trans, root);
170 mutex_unlock(&root->fs_info->tree_log_mutex);
171 atomic_inc(&root->log_batch);
172 atomic_inc(&root->log_writers);
173 mutex_unlock(&root->log_mutex);
178 * returns 0 if there was a log transaction running and we were able
179 * to join, or returns -ENOENT if there were not transactions
182 static int join_running_log_trans(struct btrfs_root *root)
190 mutex_lock(&root->log_mutex);
191 if (root->log_root) {
193 atomic_inc(&root->log_writers);
195 mutex_unlock(&root->log_mutex);
200 * This either makes the current running log transaction wait
201 * until you call btrfs_end_log_trans() or it makes any future
202 * log transactions wait until you call btrfs_end_log_trans()
204 int btrfs_pin_log_trans(struct btrfs_root *root)
208 mutex_lock(&root->log_mutex);
209 atomic_inc(&root->log_writers);
210 mutex_unlock(&root->log_mutex);
215 * indicate we're done making changes to the log tree
216 * and wake up anyone waiting to do a sync
218 void btrfs_end_log_trans(struct btrfs_root *root)
220 if (atomic_dec_and_test(&root->log_writers)) {
222 if (waitqueue_active(&root->log_writer_wait))
223 wake_up(&root->log_writer_wait);
229 * the walk control struct is used to pass state down the chain when
230 * processing the log tree. The stage field tells us which part
231 * of the log tree processing we are currently doing. The others
232 * are state fields used for that specific part
234 struct walk_control {
235 /* should we free the extent on disk when done? This is used
236 * at transaction commit time while freeing a log tree
240 /* should we write out the extent buffer? This is used
241 * while flushing the log tree to disk during a sync
245 /* should we wait for the extent buffer io to finish? Also used
246 * while flushing the log tree to disk for a sync
250 /* pin only walk, we record which extents on disk belong to the
255 /* what stage of the replay code we're currently in */
258 /* the root we are currently replaying */
259 struct btrfs_root *replay_dest;
261 /* the trans handle for the current replay */
262 struct btrfs_trans_handle *trans;
264 /* the function that gets used to process blocks we find in the
265 * tree. Note the extent_buffer might not be up to date when it is
266 * passed in, and it must be checked or read if you need the data
269 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
270 struct walk_control *wc, u64 gen);
274 * process_func used to pin down extents, write them or wait on them
276 static int process_one_buffer(struct btrfs_root *log,
277 struct extent_buffer *eb,
278 struct walk_control *wc, u64 gen)
281 btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
284 if (btrfs_buffer_uptodate(eb, gen, 0)) {
286 btrfs_write_tree_block(eb);
288 btrfs_wait_tree_block_writeback(eb);
294 * Item overwrite used by replay and tree logging. eb, slot and key all refer
295 * to the src data we are copying out.
297 * root is the tree we are copying into, and path is a scratch
298 * path for use in this function (it should be released on entry and
299 * will be released on exit).
301 * If the key is already in the destination tree the existing item is
302 * overwritten. If the existing item isn't big enough, it is extended.
303 * If it is too large, it is truncated.
305 * If the key isn't in the destination yet, a new item is inserted.
307 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
308 struct btrfs_root *root,
309 struct btrfs_path *path,
310 struct extent_buffer *eb, int slot,
311 struct btrfs_key *key)
315 u64 saved_i_size = 0;
316 int save_old_i_size = 0;
317 unsigned long src_ptr;
318 unsigned long dst_ptr;
319 int overwrite_root = 0;
320 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
322 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
325 item_size = btrfs_item_size_nr(eb, slot);
326 src_ptr = btrfs_item_ptr_offset(eb, slot);
328 /* look for the key in the destination tree */
329 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
336 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
338 if (dst_size != item_size)
341 if (item_size == 0) {
342 btrfs_release_path(path);
345 dst_copy = kmalloc(item_size, GFP_NOFS);
346 src_copy = kmalloc(item_size, GFP_NOFS);
347 if (!dst_copy || !src_copy) {
348 btrfs_release_path(path);
354 read_extent_buffer(eb, src_copy, src_ptr, item_size);
356 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
357 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
359 ret = memcmp(dst_copy, src_copy, item_size);
364 * they have the same contents, just return, this saves
365 * us from cowing blocks in the destination tree and doing
366 * extra writes that may not have been done by a previous
370 btrfs_release_path(path);
375 * We need to load the old nbytes into the inode so when we
376 * replay the extents we've logged we get the right nbytes.
379 struct btrfs_inode_item *item;
382 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
383 struct btrfs_inode_item);
384 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
385 item = btrfs_item_ptr(eb, slot,
386 struct btrfs_inode_item);
387 btrfs_set_inode_nbytes(eb, item, nbytes);
389 } else if (inode_item) {
390 struct btrfs_inode_item *item;
393 * New inode, set nbytes to 0 so that the nbytes comes out
394 * properly when we replay the extents.
396 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
397 btrfs_set_inode_nbytes(eb, item, 0);
400 btrfs_release_path(path);
401 /* try to insert the key into the destination tree */
402 ret = btrfs_insert_empty_item(trans, root, path,
405 /* make sure any existing item is the correct size */
406 if (ret == -EEXIST) {
408 found_size = btrfs_item_size_nr(path->nodes[0],
410 if (found_size > item_size)
411 btrfs_truncate_item(trans, root, path, item_size, 1);
412 else if (found_size < item_size)
413 btrfs_extend_item(trans, root, path,
414 item_size - found_size);
418 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
421 /* don't overwrite an existing inode if the generation number
422 * was logged as zero. This is done when the tree logging code
423 * is just logging an inode to make sure it exists after recovery.
425 * Also, don't overwrite i_size on directories during replay.
426 * log replay inserts and removes directory items based on the
427 * state of the tree found in the subvolume, and i_size is modified
430 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
431 struct btrfs_inode_item *src_item;
432 struct btrfs_inode_item *dst_item;
434 src_item = (struct btrfs_inode_item *)src_ptr;
435 dst_item = (struct btrfs_inode_item *)dst_ptr;
437 if (btrfs_inode_generation(eb, src_item) == 0)
440 if (overwrite_root &&
441 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
442 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
444 saved_i_size = btrfs_inode_size(path->nodes[0],
449 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
452 if (save_old_i_size) {
453 struct btrfs_inode_item *dst_item;
454 dst_item = (struct btrfs_inode_item *)dst_ptr;
455 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
458 /* make sure the generation is filled in */
459 if (key->type == BTRFS_INODE_ITEM_KEY) {
460 struct btrfs_inode_item *dst_item;
461 dst_item = (struct btrfs_inode_item *)dst_ptr;
462 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
463 btrfs_set_inode_generation(path->nodes[0], dst_item,
468 btrfs_mark_buffer_dirty(path->nodes[0]);
469 btrfs_release_path(path);
474 * simple helper to read an inode off the disk from a given root
475 * This can only be called for subvolume roots and not for the log
477 static noinline struct inode *read_one_inode(struct btrfs_root *root,
480 struct btrfs_key key;
483 key.objectid = objectid;
484 key.type = BTRFS_INODE_ITEM_KEY;
486 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
489 } else if (is_bad_inode(inode)) {
496 /* replays a single extent in 'eb' at 'slot' with 'key' into the
497 * subvolume 'root'. path is released on entry and should be released
500 * extents in the log tree have not been allocated out of the extent
501 * tree yet. So, this completes the allocation, taking a reference
502 * as required if the extent already exists or creating a new extent
503 * if it isn't in the extent allocation tree yet.
505 * The extent is inserted into the file, dropping any existing extents
506 * from the file that overlap the new one.
508 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
509 struct btrfs_root *root,
510 struct btrfs_path *path,
511 struct extent_buffer *eb, int slot,
512 struct btrfs_key *key)
516 u64 start = key->offset;
518 struct btrfs_file_extent_item *item;
519 struct inode *inode = NULL;
523 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
524 found_type = btrfs_file_extent_type(eb, item);
526 if (found_type == BTRFS_FILE_EXTENT_REG ||
527 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
528 nbytes = btrfs_file_extent_num_bytes(eb, item);
529 extent_end = start + nbytes;
532 * We don't add to the inodes nbytes if we are prealloc or a
535 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
537 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
538 size = btrfs_file_extent_inline_len(eb, item);
539 nbytes = btrfs_file_extent_ram_bytes(eb, item);
540 extent_end = ALIGN(start + size, root->sectorsize);
546 inode = read_one_inode(root, key->objectid);
553 * first check to see if we already have this extent in the
554 * file. This must be done before the btrfs_drop_extents run
555 * so we don't try to drop this extent.
557 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
561 (found_type == BTRFS_FILE_EXTENT_REG ||
562 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
563 struct btrfs_file_extent_item cmp1;
564 struct btrfs_file_extent_item cmp2;
565 struct btrfs_file_extent_item *existing;
566 struct extent_buffer *leaf;
568 leaf = path->nodes[0];
569 existing = btrfs_item_ptr(leaf, path->slots[0],
570 struct btrfs_file_extent_item);
572 read_extent_buffer(eb, &cmp1, (unsigned long)item,
574 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
578 * we already have a pointer to this exact extent,
579 * we don't have to do anything
581 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
582 btrfs_release_path(path);
586 btrfs_release_path(path);
588 /* drop any overlapping extents */
589 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
592 if (found_type == BTRFS_FILE_EXTENT_REG ||
593 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
595 unsigned long dest_offset;
596 struct btrfs_key ins;
598 ret = btrfs_insert_empty_item(trans, root, path, key,
601 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
603 copy_extent_buffer(path->nodes[0], eb, dest_offset,
604 (unsigned long)item, sizeof(*item));
606 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
607 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
608 ins.type = BTRFS_EXTENT_ITEM_KEY;
609 offset = key->offset - btrfs_file_extent_offset(eb, item);
611 if (ins.objectid > 0) {
614 LIST_HEAD(ordered_sums);
616 * is this extent already allocated in the extent
617 * allocation tree? If so, just add a reference
619 ret = btrfs_lookup_extent(root, ins.objectid,
622 ret = btrfs_inc_extent_ref(trans, root,
623 ins.objectid, ins.offset,
624 0, root->root_key.objectid,
625 key->objectid, offset, 0);
629 * insert the extent pointer in the extent
632 ret = btrfs_alloc_logged_file_extent(trans,
633 root, root->root_key.objectid,
634 key->objectid, offset, &ins);
637 btrfs_release_path(path);
639 if (btrfs_file_extent_compression(eb, item)) {
640 csum_start = ins.objectid;
641 csum_end = csum_start + ins.offset;
643 csum_start = ins.objectid +
644 btrfs_file_extent_offset(eb, item);
645 csum_end = csum_start +
646 btrfs_file_extent_num_bytes(eb, item);
649 ret = btrfs_lookup_csums_range(root->log_root,
650 csum_start, csum_end - 1,
653 while (!list_empty(&ordered_sums)) {
654 struct btrfs_ordered_sum *sums;
655 sums = list_entry(ordered_sums.next,
656 struct btrfs_ordered_sum,
658 ret = btrfs_csum_file_blocks(trans,
659 root->fs_info->csum_root,
662 list_del(&sums->list);
666 btrfs_release_path(path);
668 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
669 /* inline extents are easy, we just overwrite them */
670 ret = overwrite_item(trans, root, path, eb, slot, key);
674 inode_add_bytes(inode, nbytes);
675 ret = btrfs_update_inode(trans, root, inode);
683 * when cleaning up conflicts between the directory names in the
684 * subvolume, directory names in the log and directory names in the
685 * inode back references, we may have to unlink inodes from directories.
687 * This is a helper function to do the unlink of a specific directory
690 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
691 struct btrfs_root *root,
692 struct btrfs_path *path,
694 struct btrfs_dir_item *di)
699 struct extent_buffer *leaf;
700 struct btrfs_key location;
703 leaf = path->nodes[0];
705 btrfs_dir_item_key_to_cpu(leaf, di, &location);
706 name_len = btrfs_dir_name_len(leaf, di);
707 name = kmalloc(name_len, GFP_NOFS);
711 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
712 btrfs_release_path(path);
714 inode = read_one_inode(root, location.objectid);
720 ret = link_to_fixup_dir(trans, root, path, location.objectid);
723 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
729 btrfs_run_delayed_items(trans, root);
734 * helper function to see if a given name and sequence number found
735 * in an inode back reference are already in a directory and correctly
736 * point to this inode
738 static noinline int inode_in_dir(struct btrfs_root *root,
739 struct btrfs_path *path,
740 u64 dirid, u64 objectid, u64 index,
741 const char *name, int name_len)
743 struct btrfs_dir_item *di;
744 struct btrfs_key location;
747 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
748 index, name, name_len, 0);
749 if (di && !IS_ERR(di)) {
750 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
751 if (location.objectid != objectid)
755 btrfs_release_path(path);
757 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
758 if (di && !IS_ERR(di)) {
759 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
760 if (location.objectid != objectid)
766 btrfs_release_path(path);
771 * helper function to check a log tree for a named back reference in
772 * an inode. This is used to decide if a back reference that is
773 * found in the subvolume conflicts with what we find in the log.
775 * inode backreferences may have multiple refs in a single item,
776 * during replay we process one reference at a time, and we don't
777 * want to delete valid links to a file from the subvolume if that
778 * link is also in the log.
780 static noinline int backref_in_log(struct btrfs_root *log,
781 struct btrfs_key *key,
783 char *name, int namelen)
785 struct btrfs_path *path;
786 struct btrfs_inode_ref *ref;
788 unsigned long ptr_end;
789 unsigned long name_ptr;
795 path = btrfs_alloc_path();
799 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
803 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
805 if (key->type == BTRFS_INODE_EXTREF_KEY) {
806 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
807 name, namelen, NULL))
813 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
814 ptr_end = ptr + item_size;
815 while (ptr < ptr_end) {
816 ref = (struct btrfs_inode_ref *)ptr;
817 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
818 if (found_name_len == namelen) {
819 name_ptr = (unsigned long)(ref + 1);
820 ret = memcmp_extent_buffer(path->nodes[0], name,
827 ptr = (unsigned long)(ref + 1) + found_name_len;
830 btrfs_free_path(path);
834 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
835 struct btrfs_root *root,
836 struct btrfs_path *path,
837 struct btrfs_root *log_root,
838 struct inode *dir, struct inode *inode,
839 struct extent_buffer *eb,
840 u64 inode_objectid, u64 parent_objectid,
841 u64 ref_index, char *name, int namelen,
847 struct extent_buffer *leaf;
848 struct btrfs_dir_item *di;
849 struct btrfs_key search_key;
850 struct btrfs_inode_extref *extref;
853 /* Search old style refs */
854 search_key.objectid = inode_objectid;
855 search_key.type = BTRFS_INODE_REF_KEY;
856 search_key.offset = parent_objectid;
857 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
859 struct btrfs_inode_ref *victim_ref;
861 unsigned long ptr_end;
863 leaf = path->nodes[0];
865 /* are we trying to overwrite a back ref for the root directory
866 * if so, just jump out, we're done
868 if (search_key.objectid == search_key.offset)
871 /* check all the names in this back reference to see
872 * if they are in the log. if so, we allow them to stay
873 * otherwise they must be unlinked as a conflict
875 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
876 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
877 while (ptr < ptr_end) {
878 victim_ref = (struct btrfs_inode_ref *)ptr;
879 victim_name_len = btrfs_inode_ref_name_len(leaf,
881 victim_name = kmalloc(victim_name_len, GFP_NOFS);
882 BUG_ON(!victim_name);
884 read_extent_buffer(leaf, victim_name,
885 (unsigned long)(victim_ref + 1),
888 if (!backref_in_log(log_root, &search_key,
892 btrfs_inc_nlink(inode);
893 btrfs_release_path(path);
895 ret = btrfs_unlink_inode(trans, root, dir,
899 btrfs_run_delayed_items(trans, root);
906 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
911 * NOTE: we have searched root tree and checked the
912 * coresponding ref, it does not need to check again.
916 btrfs_release_path(path);
918 /* Same search but for extended refs */
919 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
920 inode_objectid, parent_objectid, 0,
922 if (!IS_ERR_OR_NULL(extref)) {
926 struct inode *victim_parent;
928 leaf = path->nodes[0];
930 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
931 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
933 while (cur_offset < item_size) {
934 extref = (struct btrfs_inode_extref *)base + cur_offset;
936 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
938 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
941 victim_name = kmalloc(victim_name_len, GFP_NOFS);
942 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
945 search_key.objectid = inode_objectid;
946 search_key.type = BTRFS_INODE_EXTREF_KEY;
947 search_key.offset = btrfs_extref_hash(parent_objectid,
951 if (!backref_in_log(log_root, &search_key,
952 parent_objectid, victim_name,
955 victim_parent = read_one_inode(root,
958 btrfs_inc_nlink(inode);
959 btrfs_release_path(path);
961 ret = btrfs_unlink_inode(trans, root,
966 btrfs_run_delayed_items(trans, root);
977 cur_offset += victim_name_len + sizeof(*extref);
981 btrfs_release_path(path);
983 /* look for a conflicting sequence number */
984 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
985 ref_index, name, namelen, 0);
986 if (di && !IS_ERR(di)) {
987 ret = drop_one_dir_item(trans, root, path, dir, di);
990 btrfs_release_path(path);
992 /* look for a conflicing name */
993 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
995 if (di && !IS_ERR(di)) {
996 ret = drop_one_dir_item(trans, root, path, dir, di);
999 btrfs_release_path(path);
1004 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1005 u32 *namelen, char **name, u64 *index,
1006 u64 *parent_objectid)
1008 struct btrfs_inode_extref *extref;
1010 extref = (struct btrfs_inode_extref *)ref_ptr;
1012 *namelen = btrfs_inode_extref_name_len(eb, extref);
1013 *name = kmalloc(*namelen, GFP_NOFS);
1017 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1020 *index = btrfs_inode_extref_index(eb, extref);
1021 if (parent_objectid)
1022 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1027 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1028 u32 *namelen, char **name, u64 *index)
1030 struct btrfs_inode_ref *ref;
1032 ref = (struct btrfs_inode_ref *)ref_ptr;
1034 *namelen = btrfs_inode_ref_name_len(eb, ref);
1035 *name = kmalloc(*namelen, GFP_NOFS);
1039 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1041 *index = btrfs_inode_ref_index(eb, ref);
1047 * replay one inode back reference item found in the log tree.
1048 * eb, slot and key refer to the buffer and key found in the log tree.
1049 * root is the destination we are replaying into, and path is for temp
1050 * use by this function. (it should be released on return).
1052 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1053 struct btrfs_root *root,
1054 struct btrfs_root *log,
1055 struct btrfs_path *path,
1056 struct extent_buffer *eb, int slot,
1057 struct btrfs_key *key)
1060 struct inode *inode;
1061 unsigned long ref_ptr;
1062 unsigned long ref_end;
1066 int search_done = 0;
1067 int log_ref_ver = 0;
1068 u64 parent_objectid;
1071 int ref_struct_size;
1073 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1074 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1076 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1077 struct btrfs_inode_extref *r;
1079 ref_struct_size = sizeof(struct btrfs_inode_extref);
1081 r = (struct btrfs_inode_extref *)ref_ptr;
1082 parent_objectid = btrfs_inode_extref_parent(eb, r);
1084 ref_struct_size = sizeof(struct btrfs_inode_ref);
1085 parent_objectid = key->offset;
1087 inode_objectid = key->objectid;
1090 * it is possible that we didn't log all the parent directories
1091 * for a given inode. If we don't find the dir, just don't
1092 * copy the back ref in. The link count fixup code will take
1095 dir = read_one_inode(root, parent_objectid);
1099 inode = read_one_inode(root, inode_objectid);
1105 while (ref_ptr < ref_end) {
1107 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1108 &ref_index, &parent_objectid);
1110 * parent object can change from one array
1114 dir = read_one_inode(root, parent_objectid);
1118 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1124 /* if we already have a perfect match, we're done */
1125 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1126 ref_index, name, namelen)) {
1128 * look for a conflicting back reference in the
1129 * metadata. if we find one we have to unlink that name
1130 * of the file before we add our new link. Later on, we
1131 * overwrite any existing back reference, and we don't
1132 * want to create dangling pointers in the directory.
1136 ret = __add_inode_ref(trans, root, path, log,
1140 ref_index, name, namelen,
1147 /* insert our name */
1148 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1152 btrfs_update_inode(trans, root, inode);
1155 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1163 /* finally write the back reference in the inode */
1164 ret = overwrite_item(trans, root, path, eb, slot, key);
1168 btrfs_release_path(path);
1174 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1175 struct btrfs_root *root, u64 offset)
1178 ret = btrfs_find_orphan_item(root, offset);
1180 ret = btrfs_insert_orphan_item(trans, root, offset);
1184 static int count_inode_extrefs(struct btrfs_root *root,
1185 struct inode *inode, struct btrfs_path *path)
1189 unsigned int nlink = 0;
1192 u64 inode_objectid = btrfs_ino(inode);
1195 struct btrfs_inode_extref *extref;
1196 struct extent_buffer *leaf;
1199 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1204 leaf = path->nodes[0];
1205 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1206 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1208 while (cur_offset < item_size) {
1209 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1210 name_len = btrfs_inode_extref_name_len(leaf, extref);
1214 cur_offset += name_len + sizeof(*extref);
1218 btrfs_release_path(path);
1220 btrfs_release_path(path);
1227 static int count_inode_refs(struct btrfs_root *root,
1228 struct inode *inode, struct btrfs_path *path)
1231 struct btrfs_key key;
1232 unsigned int nlink = 0;
1234 unsigned long ptr_end;
1236 u64 ino = btrfs_ino(inode);
1239 key.type = BTRFS_INODE_REF_KEY;
1240 key.offset = (u64)-1;
1243 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1247 if (path->slots[0] == 0)
1251 btrfs_item_key_to_cpu(path->nodes[0], &key,
1253 if (key.objectid != ino ||
1254 key.type != BTRFS_INODE_REF_KEY)
1256 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1257 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1259 while (ptr < ptr_end) {
1260 struct btrfs_inode_ref *ref;
1262 ref = (struct btrfs_inode_ref *)ptr;
1263 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1265 ptr = (unsigned long)(ref + 1) + name_len;
1269 if (key.offset == 0)
1272 btrfs_release_path(path);
1274 btrfs_release_path(path);
1280 * There are a few corners where the link count of the file can't
1281 * be properly maintained during replay. So, instead of adding
1282 * lots of complexity to the log code, we just scan the backrefs
1283 * for any file that has been through replay.
1285 * The scan will update the link count on the inode to reflect the
1286 * number of back refs found. If it goes down to zero, the iput
1287 * will free the inode.
1289 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1290 struct btrfs_root *root,
1291 struct inode *inode)
1293 struct btrfs_path *path;
1296 u64 ino = btrfs_ino(inode);
1298 path = btrfs_alloc_path();
1302 ret = count_inode_refs(root, inode, path);
1308 ret = count_inode_extrefs(root, inode, path);
1319 if (nlink != inode->i_nlink) {
1320 set_nlink(inode, nlink);
1321 btrfs_update_inode(trans, root, inode);
1323 BTRFS_I(inode)->index_cnt = (u64)-1;
1325 if (inode->i_nlink == 0) {
1326 if (S_ISDIR(inode->i_mode)) {
1327 ret = replay_dir_deletes(trans, root, NULL, path,
1331 ret = insert_orphan_item(trans, root, ino);
1336 btrfs_free_path(path);
1340 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1341 struct btrfs_root *root,
1342 struct btrfs_path *path)
1345 struct btrfs_key key;
1346 struct inode *inode;
1348 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1349 key.type = BTRFS_ORPHAN_ITEM_KEY;
1350 key.offset = (u64)-1;
1352 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1357 if (path->slots[0] == 0)
1362 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1363 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1364 key.type != BTRFS_ORPHAN_ITEM_KEY)
1367 ret = btrfs_del_item(trans, root, path);
1371 btrfs_release_path(path);
1372 inode = read_one_inode(root, key.offset);
1376 ret = fixup_inode_link_count(trans, root, inode);
1382 * fixup on a directory may create new entries,
1383 * make sure we always look for the highset possible
1386 key.offset = (u64)-1;
1390 btrfs_release_path(path);
1396 * record a given inode in the fixup dir so we can check its link
1397 * count when replay is done. The link count is incremented here
1398 * so the inode won't go away until we check it
1400 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1401 struct btrfs_root *root,
1402 struct btrfs_path *path,
1405 struct btrfs_key key;
1407 struct inode *inode;
1409 inode = read_one_inode(root, objectid);
1413 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1414 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1415 key.offset = objectid;
1417 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1419 btrfs_release_path(path);
1421 if (!inode->i_nlink)
1422 set_nlink(inode, 1);
1424 btrfs_inc_nlink(inode);
1425 ret = btrfs_update_inode(trans, root, inode);
1426 } else if (ret == -EEXIST) {
1437 * when replaying the log for a directory, we only insert names
1438 * for inodes that actually exist. This means an fsync on a directory
1439 * does not implicitly fsync all the new files in it
1441 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1442 struct btrfs_root *root,
1443 struct btrfs_path *path,
1444 u64 dirid, u64 index,
1445 char *name, int name_len, u8 type,
1446 struct btrfs_key *location)
1448 struct inode *inode;
1452 inode = read_one_inode(root, location->objectid);
1456 dir = read_one_inode(root, dirid);
1461 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1463 /* FIXME, put inode into FIXUP list */
1471 * take a single entry in a log directory item and replay it into
1474 * if a conflicting item exists in the subdirectory already,
1475 * the inode it points to is unlinked and put into the link count
1478 * If a name from the log points to a file or directory that does
1479 * not exist in the FS, it is skipped. fsyncs on directories
1480 * do not force down inodes inside that directory, just changes to the
1481 * names or unlinks in a directory.
1483 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1484 struct btrfs_root *root,
1485 struct btrfs_path *path,
1486 struct extent_buffer *eb,
1487 struct btrfs_dir_item *di,
1488 struct btrfs_key *key)
1492 struct btrfs_dir_item *dst_di;
1493 struct btrfs_key found_key;
1494 struct btrfs_key log_key;
1500 dir = read_one_inode(root, key->objectid);
1504 name_len = btrfs_dir_name_len(eb, di);
1505 name = kmalloc(name_len, GFP_NOFS);
1509 log_type = btrfs_dir_type(eb, di);
1510 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1513 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1514 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1519 btrfs_release_path(path);
1521 if (key->type == BTRFS_DIR_ITEM_KEY) {
1522 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1524 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1525 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1532 if (IS_ERR_OR_NULL(dst_di)) {
1533 /* we need a sequence number to insert, so we only
1534 * do inserts for the BTRFS_DIR_INDEX_KEY types
1536 if (key->type != BTRFS_DIR_INDEX_KEY)
1541 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1542 /* the existing item matches the logged item */
1543 if (found_key.objectid == log_key.objectid &&
1544 found_key.type == log_key.type &&
1545 found_key.offset == log_key.offset &&
1546 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1551 * don't drop the conflicting directory entry if the inode
1552 * for the new entry doesn't exist
1557 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1560 if (key->type == BTRFS_DIR_INDEX_KEY)
1563 btrfs_release_path(path);
1569 btrfs_release_path(path);
1570 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1571 name, name_len, log_type, &log_key);
1573 BUG_ON(ret && ret != -ENOENT);
1578 * find all the names in a directory item and reconcile them into
1579 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1580 * one name in a directory item, but the same code gets used for
1581 * both directory index types
1583 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1584 struct btrfs_root *root,
1585 struct btrfs_path *path,
1586 struct extent_buffer *eb, int slot,
1587 struct btrfs_key *key)
1590 u32 item_size = btrfs_item_size_nr(eb, slot);
1591 struct btrfs_dir_item *di;
1594 unsigned long ptr_end;
1596 ptr = btrfs_item_ptr_offset(eb, slot);
1597 ptr_end = ptr + item_size;
1598 while (ptr < ptr_end) {
1599 di = (struct btrfs_dir_item *)ptr;
1600 if (verify_dir_item(root, eb, di))
1602 name_len = btrfs_dir_name_len(eb, di);
1603 ret = replay_one_name(trans, root, path, eb, di, key);
1605 ptr = (unsigned long)(di + 1);
1612 * directory replay has two parts. There are the standard directory
1613 * items in the log copied from the subvolume, and range items
1614 * created in the log while the subvolume was logged.
1616 * The range items tell us which parts of the key space the log
1617 * is authoritative for. During replay, if a key in the subvolume
1618 * directory is in a logged range item, but not actually in the log
1619 * that means it was deleted from the directory before the fsync
1620 * and should be removed.
1622 static noinline int find_dir_range(struct btrfs_root *root,
1623 struct btrfs_path *path,
1624 u64 dirid, int key_type,
1625 u64 *start_ret, u64 *end_ret)
1627 struct btrfs_key key;
1629 struct btrfs_dir_log_item *item;
1633 if (*start_ret == (u64)-1)
1636 key.objectid = dirid;
1637 key.type = key_type;
1638 key.offset = *start_ret;
1640 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1644 if (path->slots[0] == 0)
1649 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1651 if (key.type != key_type || key.objectid != dirid) {
1655 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1656 struct btrfs_dir_log_item);
1657 found_end = btrfs_dir_log_end(path->nodes[0], item);
1659 if (*start_ret >= key.offset && *start_ret <= found_end) {
1661 *start_ret = key.offset;
1662 *end_ret = found_end;
1667 /* check the next slot in the tree to see if it is a valid item */
1668 nritems = btrfs_header_nritems(path->nodes[0]);
1669 if (path->slots[0] >= nritems) {
1670 ret = btrfs_next_leaf(root, path);
1677 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1679 if (key.type != key_type || key.objectid != dirid) {
1683 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1684 struct btrfs_dir_log_item);
1685 found_end = btrfs_dir_log_end(path->nodes[0], item);
1686 *start_ret = key.offset;
1687 *end_ret = found_end;
1690 btrfs_release_path(path);
1695 * this looks for a given directory item in the log. If the directory
1696 * item is not in the log, the item is removed and the inode it points
1699 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1700 struct btrfs_root *root,
1701 struct btrfs_root *log,
1702 struct btrfs_path *path,
1703 struct btrfs_path *log_path,
1705 struct btrfs_key *dir_key)
1708 struct extent_buffer *eb;
1711 struct btrfs_dir_item *di;
1712 struct btrfs_dir_item *log_di;
1715 unsigned long ptr_end;
1717 struct inode *inode;
1718 struct btrfs_key location;
1721 eb = path->nodes[0];
1722 slot = path->slots[0];
1723 item_size = btrfs_item_size_nr(eb, slot);
1724 ptr = btrfs_item_ptr_offset(eb, slot);
1725 ptr_end = ptr + item_size;
1726 while (ptr < ptr_end) {
1727 di = (struct btrfs_dir_item *)ptr;
1728 if (verify_dir_item(root, eb, di)) {
1733 name_len = btrfs_dir_name_len(eb, di);
1734 name = kmalloc(name_len, GFP_NOFS);
1739 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1742 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1743 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1746 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1747 log_di = btrfs_lookup_dir_index_item(trans, log,
1753 if (IS_ERR_OR_NULL(log_di)) {
1754 btrfs_dir_item_key_to_cpu(eb, di, &location);
1755 btrfs_release_path(path);
1756 btrfs_release_path(log_path);
1757 inode = read_one_inode(root, location.objectid);
1763 ret = link_to_fixup_dir(trans, root,
1764 path, location.objectid);
1766 btrfs_inc_nlink(inode);
1767 ret = btrfs_unlink_inode(trans, root, dir, inode,
1771 btrfs_run_delayed_items(trans, root);
1776 /* there might still be more names under this key
1777 * check and repeat if required
1779 ret = btrfs_search_slot(NULL, root, dir_key, path,
1786 btrfs_release_path(log_path);
1789 ptr = (unsigned long)(di + 1);
1794 btrfs_release_path(path);
1795 btrfs_release_path(log_path);
1800 * deletion replay happens before we copy any new directory items
1801 * out of the log or out of backreferences from inodes. It
1802 * scans the log to find ranges of keys that log is authoritative for,
1803 * and then scans the directory to find items in those ranges that are
1804 * not present in the log.
1806 * Anything we don't find in the log is unlinked and removed from the
1809 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1810 struct btrfs_root *root,
1811 struct btrfs_root *log,
1812 struct btrfs_path *path,
1813 u64 dirid, int del_all)
1817 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1819 struct btrfs_key dir_key;
1820 struct btrfs_key found_key;
1821 struct btrfs_path *log_path;
1824 dir_key.objectid = dirid;
1825 dir_key.type = BTRFS_DIR_ITEM_KEY;
1826 log_path = btrfs_alloc_path();
1830 dir = read_one_inode(root, dirid);
1831 /* it isn't an error if the inode isn't there, that can happen
1832 * because we replay the deletes before we copy in the inode item
1836 btrfs_free_path(log_path);
1844 range_end = (u64)-1;
1846 ret = find_dir_range(log, path, dirid, key_type,
1847 &range_start, &range_end);
1852 dir_key.offset = range_start;
1855 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1860 nritems = btrfs_header_nritems(path->nodes[0]);
1861 if (path->slots[0] >= nritems) {
1862 ret = btrfs_next_leaf(root, path);
1866 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1868 if (found_key.objectid != dirid ||
1869 found_key.type != dir_key.type)
1872 if (found_key.offset > range_end)
1875 ret = check_item_in_log(trans, root, log, path,
1879 if (found_key.offset == (u64)-1)
1881 dir_key.offset = found_key.offset + 1;
1883 btrfs_release_path(path);
1884 if (range_end == (u64)-1)
1886 range_start = range_end + 1;
1891 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1892 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1893 dir_key.type = BTRFS_DIR_INDEX_KEY;
1894 btrfs_release_path(path);
1898 btrfs_release_path(path);
1899 btrfs_free_path(log_path);
1905 * the process_func used to replay items from the log tree. This
1906 * gets called in two different stages. The first stage just looks
1907 * for inodes and makes sure they are all copied into the subvolume.
1909 * The second stage copies all the other item types from the log into
1910 * the subvolume. The two stage approach is slower, but gets rid of
1911 * lots of complexity around inodes referencing other inodes that exist
1912 * only in the log (references come from either directory items or inode
1915 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1916 struct walk_control *wc, u64 gen)
1919 struct btrfs_path *path;
1920 struct btrfs_root *root = wc->replay_dest;
1921 struct btrfs_key key;
1926 ret = btrfs_read_buffer(eb, gen);
1930 level = btrfs_header_level(eb);
1935 path = btrfs_alloc_path();
1939 nritems = btrfs_header_nritems(eb);
1940 for (i = 0; i < nritems; i++) {
1941 btrfs_item_key_to_cpu(eb, &key, i);
1943 /* inode keys are done during the first stage */
1944 if (key.type == BTRFS_INODE_ITEM_KEY &&
1945 wc->stage == LOG_WALK_REPLAY_INODES) {
1946 struct btrfs_inode_item *inode_item;
1949 inode_item = btrfs_item_ptr(eb, i,
1950 struct btrfs_inode_item);
1951 mode = btrfs_inode_mode(eb, inode_item);
1952 if (S_ISDIR(mode)) {
1953 ret = replay_dir_deletes(wc->trans,
1954 root, log, path, key.objectid, 0);
1957 ret = overwrite_item(wc->trans, root, path,
1961 /* for regular files, make sure corresponding
1962 * orhpan item exist. extents past the new EOF
1963 * will be truncated later by orphan cleanup.
1965 if (S_ISREG(mode)) {
1966 ret = insert_orphan_item(wc->trans, root,
1971 ret = link_to_fixup_dir(wc->trans, root,
1972 path, key.objectid);
1975 if (wc->stage < LOG_WALK_REPLAY_ALL)
1978 /* these keys are simply copied */
1979 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1980 ret = overwrite_item(wc->trans, root, path,
1983 } else if (key.type == BTRFS_INODE_REF_KEY) {
1984 ret = add_inode_ref(wc->trans, root, log, path,
1986 BUG_ON(ret && ret != -ENOENT);
1987 } else if (key.type == BTRFS_INODE_EXTREF_KEY) {
1988 ret = add_inode_ref(wc->trans, root, log, path,
1990 BUG_ON(ret && ret != -ENOENT);
1991 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
1992 ret = replay_one_extent(wc->trans, root, path,
1995 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
1996 key.type == BTRFS_DIR_INDEX_KEY) {
1997 ret = replay_one_dir_item(wc->trans, root, path,
2002 btrfs_free_path(path);
2006 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2007 struct btrfs_root *root,
2008 struct btrfs_path *path, int *level,
2009 struct walk_control *wc)
2014 struct extent_buffer *next;
2015 struct extent_buffer *cur;
2016 struct extent_buffer *parent;
2020 WARN_ON(*level < 0);
2021 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2023 while (*level > 0) {
2024 WARN_ON(*level < 0);
2025 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2026 cur = path->nodes[*level];
2028 if (btrfs_header_level(cur) != *level)
2031 if (path->slots[*level] >=
2032 btrfs_header_nritems(cur))
2035 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2036 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2037 blocksize = btrfs_level_size(root, *level - 1);
2039 parent = path->nodes[*level];
2040 root_owner = btrfs_header_owner(parent);
2042 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
2047 ret = wc->process_func(root, next, wc, ptr_gen);
2051 path->slots[*level]++;
2053 ret = btrfs_read_buffer(next, ptr_gen);
2055 free_extent_buffer(next);
2059 btrfs_tree_lock(next);
2060 btrfs_set_lock_blocking(next);
2061 clean_tree_block(trans, root, next);
2062 btrfs_wait_tree_block_writeback(next);
2063 btrfs_tree_unlock(next);
2065 WARN_ON(root_owner !=
2066 BTRFS_TREE_LOG_OBJECTID);
2067 ret = btrfs_free_and_pin_reserved_extent(root,
2069 BUG_ON(ret); /* -ENOMEM or logic errors */
2071 free_extent_buffer(next);
2074 ret = btrfs_read_buffer(next, ptr_gen);
2076 free_extent_buffer(next);
2080 WARN_ON(*level <= 0);
2081 if (path->nodes[*level-1])
2082 free_extent_buffer(path->nodes[*level-1]);
2083 path->nodes[*level-1] = next;
2084 *level = btrfs_header_level(next);
2085 path->slots[*level] = 0;
2088 WARN_ON(*level < 0);
2089 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2091 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2097 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2098 struct btrfs_root *root,
2099 struct btrfs_path *path, int *level,
2100 struct walk_control *wc)
2107 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2108 slot = path->slots[i];
2109 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2112 WARN_ON(*level == 0);
2115 struct extent_buffer *parent;
2116 if (path->nodes[*level] == root->node)
2117 parent = path->nodes[*level];
2119 parent = path->nodes[*level + 1];
2121 root_owner = btrfs_header_owner(parent);
2122 ret = wc->process_func(root, path->nodes[*level], wc,
2123 btrfs_header_generation(path->nodes[*level]));
2128 struct extent_buffer *next;
2130 next = path->nodes[*level];
2132 btrfs_tree_lock(next);
2133 btrfs_set_lock_blocking(next);
2134 clean_tree_block(trans, root, next);
2135 btrfs_wait_tree_block_writeback(next);
2136 btrfs_tree_unlock(next);
2138 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2139 ret = btrfs_free_and_pin_reserved_extent(root,
2140 path->nodes[*level]->start,
2141 path->nodes[*level]->len);
2144 free_extent_buffer(path->nodes[*level]);
2145 path->nodes[*level] = NULL;
2153 * drop the reference count on the tree rooted at 'snap'. This traverses
2154 * the tree freeing any blocks that have a ref count of zero after being
2157 static int walk_log_tree(struct btrfs_trans_handle *trans,
2158 struct btrfs_root *log, struct walk_control *wc)
2163 struct btrfs_path *path;
2167 path = btrfs_alloc_path();
2171 level = btrfs_header_level(log->node);
2173 path->nodes[level] = log->node;
2174 extent_buffer_get(log->node);
2175 path->slots[level] = 0;
2178 wret = walk_down_log_tree(trans, log, path, &level, wc);
2186 wret = walk_up_log_tree(trans, log, path, &level, wc);
2195 /* was the root node processed? if not, catch it here */
2196 if (path->nodes[orig_level]) {
2197 ret = wc->process_func(log, path->nodes[orig_level], wc,
2198 btrfs_header_generation(path->nodes[orig_level]));
2202 struct extent_buffer *next;
2204 next = path->nodes[orig_level];
2206 btrfs_tree_lock(next);
2207 btrfs_set_lock_blocking(next);
2208 clean_tree_block(trans, log, next);
2209 btrfs_wait_tree_block_writeback(next);
2210 btrfs_tree_unlock(next);
2212 WARN_ON(log->root_key.objectid !=
2213 BTRFS_TREE_LOG_OBJECTID);
2214 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2216 BUG_ON(ret); /* -ENOMEM or logic errors */
2221 for (i = 0; i <= orig_level; i++) {
2222 if (path->nodes[i]) {
2223 free_extent_buffer(path->nodes[i]);
2224 path->nodes[i] = NULL;
2227 btrfs_free_path(path);
2232 * helper function to update the item for a given subvolumes log root
2233 * in the tree of log roots
2235 static int update_log_root(struct btrfs_trans_handle *trans,
2236 struct btrfs_root *log)
2240 if (log->log_transid == 1) {
2241 /* insert root item on the first sync */
2242 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2243 &log->root_key, &log->root_item);
2245 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2246 &log->root_key, &log->root_item);
2251 static int wait_log_commit(struct btrfs_trans_handle *trans,
2252 struct btrfs_root *root, unsigned long transid)
2255 int index = transid % 2;
2258 * we only allow two pending log transactions at a time,
2259 * so we know that if ours is more than 2 older than the
2260 * current transaction, we're done
2263 prepare_to_wait(&root->log_commit_wait[index],
2264 &wait, TASK_UNINTERRUPTIBLE);
2265 mutex_unlock(&root->log_mutex);
2267 if (root->fs_info->last_trans_log_full_commit !=
2268 trans->transid && root->log_transid < transid + 2 &&
2269 atomic_read(&root->log_commit[index]))
2272 finish_wait(&root->log_commit_wait[index], &wait);
2273 mutex_lock(&root->log_mutex);
2274 } while (root->fs_info->last_trans_log_full_commit !=
2275 trans->transid && root->log_transid < transid + 2 &&
2276 atomic_read(&root->log_commit[index]));
2280 static void wait_for_writer(struct btrfs_trans_handle *trans,
2281 struct btrfs_root *root)
2284 while (root->fs_info->last_trans_log_full_commit !=
2285 trans->transid && atomic_read(&root->log_writers)) {
2286 prepare_to_wait(&root->log_writer_wait,
2287 &wait, TASK_UNINTERRUPTIBLE);
2288 mutex_unlock(&root->log_mutex);
2289 if (root->fs_info->last_trans_log_full_commit !=
2290 trans->transid && atomic_read(&root->log_writers))
2292 mutex_lock(&root->log_mutex);
2293 finish_wait(&root->log_writer_wait, &wait);
2298 * btrfs_sync_log does sends a given tree log down to the disk and
2299 * updates the super blocks to record it. When this call is done,
2300 * you know that any inodes previously logged are safely on disk only
2303 * Any other return value means you need to call btrfs_commit_transaction.
2304 * Some of the edge cases for fsyncing directories that have had unlinks
2305 * or renames done in the past mean that sometimes the only safe
2306 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2307 * that has happened.
2309 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2310 struct btrfs_root *root)
2316 struct btrfs_root *log = root->log_root;
2317 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2318 unsigned long log_transid = 0;
2320 mutex_lock(&root->log_mutex);
2321 log_transid = root->log_transid;
2322 index1 = root->log_transid % 2;
2323 if (atomic_read(&root->log_commit[index1])) {
2324 wait_log_commit(trans, root, root->log_transid);
2325 mutex_unlock(&root->log_mutex);
2328 atomic_set(&root->log_commit[index1], 1);
2330 /* wait for previous tree log sync to complete */
2331 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2332 wait_log_commit(trans, root, root->log_transid - 1);
2334 int batch = atomic_read(&root->log_batch);
2335 /* when we're on an ssd, just kick the log commit out */
2336 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
2337 mutex_unlock(&root->log_mutex);
2338 schedule_timeout_uninterruptible(1);
2339 mutex_lock(&root->log_mutex);
2341 wait_for_writer(trans, root);
2342 if (batch == atomic_read(&root->log_batch))
2346 /* bail out if we need to do a full commit */
2347 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2349 btrfs_free_logged_extents(log, log_transid);
2350 mutex_unlock(&root->log_mutex);
2354 if (log_transid % 2 == 0)
2355 mark = EXTENT_DIRTY;
2359 /* we start IO on all the marked extents here, but we don't actually
2360 * wait for them until later.
2362 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2364 btrfs_abort_transaction(trans, root, ret);
2365 btrfs_free_logged_extents(log, log_transid);
2366 mutex_unlock(&root->log_mutex);
2370 btrfs_set_root_node(&log->root_item, log->node);
2372 root->log_transid++;
2373 log->log_transid = root->log_transid;
2374 root->log_start_pid = 0;
2377 * IO has been started, blocks of the log tree have WRITTEN flag set
2378 * in their headers. new modifications of the log will be written to
2379 * new positions. so it's safe to allow log writers to go in.
2381 mutex_unlock(&root->log_mutex);
2383 mutex_lock(&log_root_tree->log_mutex);
2384 atomic_inc(&log_root_tree->log_batch);
2385 atomic_inc(&log_root_tree->log_writers);
2386 mutex_unlock(&log_root_tree->log_mutex);
2388 ret = update_log_root(trans, log);
2390 mutex_lock(&log_root_tree->log_mutex);
2391 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2393 if (waitqueue_active(&log_root_tree->log_writer_wait))
2394 wake_up(&log_root_tree->log_writer_wait);
2398 if (ret != -ENOSPC) {
2399 btrfs_abort_transaction(trans, root, ret);
2400 mutex_unlock(&log_root_tree->log_mutex);
2403 root->fs_info->last_trans_log_full_commit = trans->transid;
2404 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2405 btrfs_free_logged_extents(log, log_transid);
2406 mutex_unlock(&log_root_tree->log_mutex);
2411 index2 = log_root_tree->log_transid % 2;
2412 if (atomic_read(&log_root_tree->log_commit[index2])) {
2413 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2414 wait_log_commit(trans, log_root_tree,
2415 log_root_tree->log_transid);
2416 btrfs_free_logged_extents(log, log_transid);
2417 mutex_unlock(&log_root_tree->log_mutex);
2421 atomic_set(&log_root_tree->log_commit[index2], 1);
2423 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2424 wait_log_commit(trans, log_root_tree,
2425 log_root_tree->log_transid - 1);
2428 wait_for_writer(trans, log_root_tree);
2431 * now that we've moved on to the tree of log tree roots,
2432 * check the full commit flag again
2434 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2435 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2436 btrfs_free_logged_extents(log, log_transid);
2437 mutex_unlock(&log_root_tree->log_mutex);
2439 goto out_wake_log_root;
2442 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2443 &log_root_tree->dirty_log_pages,
2444 EXTENT_DIRTY | EXTENT_NEW);
2446 btrfs_abort_transaction(trans, root, ret);
2447 btrfs_free_logged_extents(log, log_transid);
2448 mutex_unlock(&log_root_tree->log_mutex);
2449 goto out_wake_log_root;
2451 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2452 btrfs_wait_logged_extents(log, log_transid);
2454 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2455 log_root_tree->node->start);
2456 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2457 btrfs_header_level(log_root_tree->node));
2459 log_root_tree->log_transid++;
2462 mutex_unlock(&log_root_tree->log_mutex);
2465 * nobody else is going to jump in and write the the ctree
2466 * super here because the log_commit atomic below is protecting
2467 * us. We must be called with a transaction handle pinning
2468 * the running transaction open, so a full commit can't hop
2469 * in and cause problems either.
2471 btrfs_scrub_pause_super(root);
2472 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2473 btrfs_scrub_continue_super(root);
2475 btrfs_abort_transaction(trans, root, ret);
2476 goto out_wake_log_root;
2479 mutex_lock(&root->log_mutex);
2480 if (root->last_log_commit < log_transid)
2481 root->last_log_commit = log_transid;
2482 mutex_unlock(&root->log_mutex);
2485 atomic_set(&log_root_tree->log_commit[index2], 0);
2487 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2488 wake_up(&log_root_tree->log_commit_wait[index2]);
2490 atomic_set(&root->log_commit[index1], 0);
2492 if (waitqueue_active(&root->log_commit_wait[index1]))
2493 wake_up(&root->log_commit_wait[index1]);
2497 static void free_log_tree(struct btrfs_trans_handle *trans,
2498 struct btrfs_root *log)
2503 struct walk_control wc = {
2505 .process_func = process_one_buffer
2509 ret = walk_log_tree(trans, log, &wc);
2514 ret = find_first_extent_bit(&log->dirty_log_pages,
2515 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2520 clear_extent_bits(&log->dirty_log_pages, start, end,
2521 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2525 * We may have short-circuited the log tree with the full commit logic
2526 * and left ordered extents on our list, so clear these out to keep us
2527 * from leaking inodes and memory.
2529 btrfs_free_logged_extents(log, 0);
2530 btrfs_free_logged_extents(log, 1);
2532 free_extent_buffer(log->node);
2537 * free all the extents used by the tree log. This should be called
2538 * at commit time of the full transaction
2540 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2542 if (root->log_root) {
2543 free_log_tree(trans, root->log_root);
2544 root->log_root = NULL;
2549 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2550 struct btrfs_fs_info *fs_info)
2552 if (fs_info->log_root_tree) {
2553 free_log_tree(trans, fs_info->log_root_tree);
2554 fs_info->log_root_tree = NULL;
2560 * If both a file and directory are logged, and unlinks or renames are
2561 * mixed in, we have a few interesting corners:
2563 * create file X in dir Y
2564 * link file X to X.link in dir Y
2566 * unlink file X but leave X.link
2569 * After a crash we would expect only X.link to exist. But file X
2570 * didn't get fsync'd again so the log has back refs for X and X.link.
2572 * We solve this by removing directory entries and inode backrefs from the
2573 * log when a file that was logged in the current transaction is
2574 * unlinked. Any later fsync will include the updated log entries, and
2575 * we'll be able to reconstruct the proper directory items from backrefs.
2577 * This optimizations allows us to avoid relogging the entire inode
2578 * or the entire directory.
2580 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2581 struct btrfs_root *root,
2582 const char *name, int name_len,
2583 struct inode *dir, u64 index)
2585 struct btrfs_root *log;
2586 struct btrfs_dir_item *di;
2587 struct btrfs_path *path;
2591 u64 dir_ino = btrfs_ino(dir);
2593 if (BTRFS_I(dir)->logged_trans < trans->transid)
2596 ret = join_running_log_trans(root);
2600 mutex_lock(&BTRFS_I(dir)->log_mutex);
2602 log = root->log_root;
2603 path = btrfs_alloc_path();
2609 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2610 name, name_len, -1);
2616 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2617 bytes_del += name_len;
2620 btrfs_release_path(path);
2621 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2622 index, name, name_len, -1);
2628 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2629 bytes_del += name_len;
2633 /* update the directory size in the log to reflect the names
2637 struct btrfs_key key;
2639 key.objectid = dir_ino;
2641 key.type = BTRFS_INODE_ITEM_KEY;
2642 btrfs_release_path(path);
2644 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2650 struct btrfs_inode_item *item;
2653 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2654 struct btrfs_inode_item);
2655 i_size = btrfs_inode_size(path->nodes[0], item);
2656 if (i_size > bytes_del)
2657 i_size -= bytes_del;
2660 btrfs_set_inode_size(path->nodes[0], item, i_size);
2661 btrfs_mark_buffer_dirty(path->nodes[0]);
2664 btrfs_release_path(path);
2667 btrfs_free_path(path);
2669 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2670 if (ret == -ENOSPC) {
2671 root->fs_info->last_trans_log_full_commit = trans->transid;
2674 btrfs_abort_transaction(trans, root, ret);
2676 btrfs_end_log_trans(root);
2681 /* see comments for btrfs_del_dir_entries_in_log */
2682 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2683 struct btrfs_root *root,
2684 const char *name, int name_len,
2685 struct inode *inode, u64 dirid)
2687 struct btrfs_root *log;
2691 if (BTRFS_I(inode)->logged_trans < trans->transid)
2694 ret = join_running_log_trans(root);
2697 log = root->log_root;
2698 mutex_lock(&BTRFS_I(inode)->log_mutex);
2700 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2702 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2703 if (ret == -ENOSPC) {
2704 root->fs_info->last_trans_log_full_commit = trans->transid;
2706 } else if (ret < 0 && ret != -ENOENT)
2707 btrfs_abort_transaction(trans, root, ret);
2708 btrfs_end_log_trans(root);
2714 * creates a range item in the log for 'dirid'. first_offset and
2715 * last_offset tell us which parts of the key space the log should
2716 * be considered authoritative for.
2718 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2719 struct btrfs_root *log,
2720 struct btrfs_path *path,
2721 int key_type, u64 dirid,
2722 u64 first_offset, u64 last_offset)
2725 struct btrfs_key key;
2726 struct btrfs_dir_log_item *item;
2728 key.objectid = dirid;
2729 key.offset = first_offset;
2730 if (key_type == BTRFS_DIR_ITEM_KEY)
2731 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2733 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2734 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2738 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2739 struct btrfs_dir_log_item);
2740 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2741 btrfs_mark_buffer_dirty(path->nodes[0]);
2742 btrfs_release_path(path);
2747 * log all the items included in the current transaction for a given
2748 * directory. This also creates the range items in the log tree required
2749 * to replay anything deleted before the fsync
2751 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2752 struct btrfs_root *root, struct inode *inode,
2753 struct btrfs_path *path,
2754 struct btrfs_path *dst_path, int key_type,
2755 u64 min_offset, u64 *last_offset_ret)
2757 struct btrfs_key min_key;
2758 struct btrfs_key max_key;
2759 struct btrfs_root *log = root->log_root;
2760 struct extent_buffer *src;
2765 u64 first_offset = min_offset;
2766 u64 last_offset = (u64)-1;
2767 u64 ino = btrfs_ino(inode);
2769 log = root->log_root;
2770 max_key.objectid = ino;
2771 max_key.offset = (u64)-1;
2772 max_key.type = key_type;
2774 min_key.objectid = ino;
2775 min_key.type = key_type;
2776 min_key.offset = min_offset;
2778 path->keep_locks = 1;
2780 ret = btrfs_search_forward(root, &min_key, &max_key,
2781 path, trans->transid);
2784 * we didn't find anything from this transaction, see if there
2785 * is anything at all
2787 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2788 min_key.objectid = ino;
2789 min_key.type = key_type;
2790 min_key.offset = (u64)-1;
2791 btrfs_release_path(path);
2792 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2794 btrfs_release_path(path);
2797 ret = btrfs_previous_item(root, path, ino, key_type);
2799 /* if ret == 0 there are items for this type,
2800 * create a range to tell us the last key of this type.
2801 * otherwise, there are no items in this directory after
2802 * *min_offset, and we create a range to indicate that.
2805 struct btrfs_key tmp;
2806 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2808 if (key_type == tmp.type)
2809 first_offset = max(min_offset, tmp.offset) + 1;
2814 /* go backward to find any previous key */
2815 ret = btrfs_previous_item(root, path, ino, key_type);
2817 struct btrfs_key tmp;
2818 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2819 if (key_type == tmp.type) {
2820 first_offset = tmp.offset;
2821 ret = overwrite_item(trans, log, dst_path,
2822 path->nodes[0], path->slots[0],
2830 btrfs_release_path(path);
2832 /* find the first key from this transaction again */
2833 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2840 * we have a block from this transaction, log every item in it
2841 * from our directory
2844 struct btrfs_key tmp;
2845 src = path->nodes[0];
2846 nritems = btrfs_header_nritems(src);
2847 for (i = path->slots[0]; i < nritems; i++) {
2848 btrfs_item_key_to_cpu(src, &min_key, i);
2850 if (min_key.objectid != ino || min_key.type != key_type)
2852 ret = overwrite_item(trans, log, dst_path, src, i,
2859 path->slots[0] = nritems;
2862 * look ahead to the next item and see if it is also
2863 * from this directory and from this transaction
2865 ret = btrfs_next_leaf(root, path);
2867 last_offset = (u64)-1;
2870 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2871 if (tmp.objectid != ino || tmp.type != key_type) {
2872 last_offset = (u64)-1;
2875 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2876 ret = overwrite_item(trans, log, dst_path,
2877 path->nodes[0], path->slots[0],
2882 last_offset = tmp.offset;
2887 btrfs_release_path(path);
2888 btrfs_release_path(dst_path);
2891 *last_offset_ret = last_offset;
2893 * insert the log range keys to indicate where the log
2896 ret = insert_dir_log_key(trans, log, path, key_type,
2897 ino, first_offset, last_offset);
2905 * logging directories is very similar to logging inodes, We find all the items
2906 * from the current transaction and write them to the log.
2908 * The recovery code scans the directory in the subvolume, and if it finds a
2909 * key in the range logged that is not present in the log tree, then it means
2910 * that dir entry was unlinked during the transaction.
2912 * In order for that scan to work, we must include one key smaller than
2913 * the smallest logged by this transaction and one key larger than the largest
2914 * key logged by this transaction.
2916 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2917 struct btrfs_root *root, struct inode *inode,
2918 struct btrfs_path *path,
2919 struct btrfs_path *dst_path)
2924 int key_type = BTRFS_DIR_ITEM_KEY;
2930 ret = log_dir_items(trans, root, inode, path,
2931 dst_path, key_type, min_key,
2935 if (max_key == (u64)-1)
2937 min_key = max_key + 1;
2940 if (key_type == BTRFS_DIR_ITEM_KEY) {
2941 key_type = BTRFS_DIR_INDEX_KEY;
2948 * a helper function to drop items from the log before we relog an
2949 * inode. max_key_type indicates the highest item type to remove.
2950 * This cannot be run for file data extents because it does not
2951 * free the extents they point to.
2953 static int drop_objectid_items(struct btrfs_trans_handle *trans,
2954 struct btrfs_root *log,
2955 struct btrfs_path *path,
2956 u64 objectid, int max_key_type)
2959 struct btrfs_key key;
2960 struct btrfs_key found_key;
2963 key.objectid = objectid;
2964 key.type = max_key_type;
2965 key.offset = (u64)-1;
2968 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2973 if (path->slots[0] == 0)
2977 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2980 if (found_key.objectid != objectid)
2983 found_key.offset = 0;
2985 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
2988 ret = btrfs_del_items(trans, log, path, start_slot,
2989 path->slots[0] - start_slot + 1);
2991 * If start slot isn't 0 then we don't need to re-search, we've
2992 * found the last guy with the objectid in this tree.
2994 if (ret || start_slot != 0)
2996 btrfs_release_path(path);
2998 btrfs_release_path(path);
3004 static void fill_inode_item(struct btrfs_trans_handle *trans,
3005 struct extent_buffer *leaf,
3006 struct btrfs_inode_item *item,
3007 struct inode *inode, int log_inode_only)
3009 struct btrfs_map_token token;
3011 btrfs_init_map_token(&token);
3013 if (log_inode_only) {
3014 /* set the generation to zero so the recover code
3015 * can tell the difference between an logging
3016 * just to say 'this inode exists' and a logging
3017 * to say 'update this inode with these values'
3019 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3020 btrfs_set_token_inode_size(leaf, item, 0, &token);
3022 btrfs_set_token_inode_generation(leaf, item,
3023 BTRFS_I(inode)->generation,
3025 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3028 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3029 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3030 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3031 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3033 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3034 inode->i_atime.tv_sec, &token);
3035 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3036 inode->i_atime.tv_nsec, &token);
3038 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3039 inode->i_mtime.tv_sec, &token);
3040 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3041 inode->i_mtime.tv_nsec, &token);
3043 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3044 inode->i_ctime.tv_sec, &token);
3045 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3046 inode->i_ctime.tv_nsec, &token);
3048 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3051 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3052 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3053 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3054 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3055 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3058 static int log_inode_item(struct btrfs_trans_handle *trans,
3059 struct btrfs_root *log, struct btrfs_path *path,
3060 struct inode *inode)
3062 struct btrfs_inode_item *inode_item;
3063 struct btrfs_key key;
3066 memcpy(&key, &BTRFS_I(inode)->location, sizeof(key));
3067 ret = btrfs_insert_empty_item(trans, log, path, &key,
3068 sizeof(*inode_item));
3069 if (ret && ret != -EEXIST)
3071 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3072 struct btrfs_inode_item);
3073 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
3074 btrfs_release_path(path);
3078 static noinline int copy_items(struct btrfs_trans_handle *trans,
3079 struct inode *inode,
3080 struct btrfs_path *dst_path,
3081 struct extent_buffer *src,
3082 int start_slot, int nr, int inode_only)
3084 unsigned long src_offset;
3085 unsigned long dst_offset;
3086 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3087 struct btrfs_file_extent_item *extent;
3088 struct btrfs_inode_item *inode_item;
3090 struct btrfs_key *ins_keys;
3094 struct list_head ordered_sums;
3095 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3097 INIT_LIST_HEAD(&ordered_sums);
3099 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3100 nr * sizeof(u32), GFP_NOFS);
3104 ins_sizes = (u32 *)ins_data;
3105 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3107 for (i = 0; i < nr; i++) {
3108 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3109 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3111 ret = btrfs_insert_empty_items(trans, log, dst_path,
3112 ins_keys, ins_sizes, nr);
3118 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3119 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3120 dst_path->slots[0]);
3122 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3124 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3125 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3127 struct btrfs_inode_item);
3128 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3129 inode, inode_only == LOG_INODE_EXISTS);
3131 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3132 src_offset, ins_sizes[i]);
3135 /* take a reference on file data extents so that truncates
3136 * or deletes of this inode don't have to relog the inode
3139 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
3142 extent = btrfs_item_ptr(src, start_slot + i,
3143 struct btrfs_file_extent_item);
3145 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3148 found_type = btrfs_file_extent_type(src, extent);
3149 if (found_type == BTRFS_FILE_EXTENT_REG) {
3151 ds = btrfs_file_extent_disk_bytenr(src,
3153 /* ds == 0 is a hole */
3157 dl = btrfs_file_extent_disk_num_bytes(src,
3159 cs = btrfs_file_extent_offset(src, extent);
3160 cl = btrfs_file_extent_num_bytes(src,
3162 if (btrfs_file_extent_compression(src,
3168 ret = btrfs_lookup_csums_range(
3169 log->fs_info->csum_root,
3170 ds + cs, ds + cs + cl - 1,
3177 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3178 btrfs_release_path(dst_path);
3182 * we have to do this after the loop above to avoid changing the
3183 * log tree while trying to change the log tree.
3186 while (!list_empty(&ordered_sums)) {
3187 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3188 struct btrfs_ordered_sum,
3191 ret = btrfs_csum_file_blocks(trans, log, sums);
3192 list_del(&sums->list);
3198 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3200 struct extent_map *em1, *em2;
3202 em1 = list_entry(a, struct extent_map, list);
3203 em2 = list_entry(b, struct extent_map, list);
3205 if (em1->start < em2->start)
3207 else if (em1->start > em2->start)
3212 static int drop_adjacent_extents(struct btrfs_trans_handle *trans,
3213 struct btrfs_root *root, struct inode *inode,
3214 struct extent_map *em,
3215 struct btrfs_path *path)
3217 struct btrfs_file_extent_item *fi;
3218 struct extent_buffer *leaf;
3219 struct btrfs_key key, new_key;
3220 struct btrfs_map_token token;
3222 u64 extent_offset = 0;
3229 btrfs_init_map_token(&token);
3230 leaf = path->nodes[0];
3232 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3234 ret = btrfs_del_items(trans, root, path,
3241 ret = btrfs_next_leaf_write(trans, root, path, 1);
3246 leaf = path->nodes[0];
3249 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3250 if (key.objectid != btrfs_ino(inode) ||
3251 key.type != BTRFS_EXTENT_DATA_KEY ||
3252 key.offset >= em->start + em->len)
3255 fi = btrfs_item_ptr(leaf, path->slots[0],
3256 struct btrfs_file_extent_item);
3257 extent_type = btrfs_token_file_extent_type(leaf, fi, &token);
3258 if (extent_type == BTRFS_FILE_EXTENT_REG ||
3259 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3260 extent_offset = btrfs_token_file_extent_offset(leaf,
3262 extent_end = key.offset +
3263 btrfs_token_file_extent_num_bytes(leaf, fi,
3265 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3266 extent_end = key.offset +
3267 btrfs_file_extent_inline_len(leaf, fi);
3272 if (extent_end <= em->len + em->start) {
3274 del_slot = path->slots[0];
3281 * Ok so we'll ignore previous items if we log a new extent,
3282 * which can lead to overlapping extents, so if we have an
3283 * existing extent we want to adjust we _have_ to check the next
3284 * guy to make sure we even need this extent anymore, this keeps
3285 * us from panicing in set_item_key_safe.
3287 if (path->slots[0] < btrfs_header_nritems(leaf) - 1) {
3288 struct btrfs_key tmp_key;
3290 btrfs_item_key_to_cpu(leaf, &tmp_key,
3291 path->slots[0] + 1);
3292 if (tmp_key.objectid == btrfs_ino(inode) &&
3293 tmp_key.type == BTRFS_EXTENT_DATA_KEY &&
3294 tmp_key.offset <= em->start + em->len) {
3296 del_slot = path->slots[0];
3302 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
3303 memcpy(&new_key, &key, sizeof(new_key));
3304 new_key.offset = em->start + em->len;
3305 btrfs_set_item_key_safe(trans, root, path, &new_key);
3306 extent_offset += em->start + em->len - key.offset;
3307 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset,
3309 btrfs_set_token_file_extent_num_bytes(leaf, fi, extent_end -
3310 (em->start + em->len),
3312 btrfs_mark_buffer_dirty(leaf);
3316 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
3321 static int log_one_extent(struct btrfs_trans_handle *trans,
3322 struct inode *inode, struct btrfs_root *root,
3323 struct extent_map *em, struct btrfs_path *path)
3325 struct btrfs_root *log = root->log_root;
3326 struct btrfs_file_extent_item *fi;
3327 struct extent_buffer *leaf;
3328 struct btrfs_ordered_extent *ordered;
3329 struct list_head ordered_sums;
3330 struct btrfs_map_token token;
3331 struct btrfs_key key;
3332 u64 mod_start = em->mod_start;
3333 u64 mod_len = em->mod_len;
3336 u64 extent_offset = em->start - em->orig_start;
3339 int index = log->log_transid % 2;
3340 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3343 INIT_LIST_HEAD(&ordered_sums);
3344 btrfs_init_map_token(&token);
3345 key.objectid = btrfs_ino(inode);
3346 key.type = BTRFS_EXTENT_DATA_KEY;
3347 key.offset = em->start;
3348 path->really_keep_locks = 1;
3350 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
3351 if (ret && ret != -EEXIST) {
3352 path->really_keep_locks = 0;
3355 leaf = path->nodes[0];
3356 fi = btrfs_item_ptr(leaf, path->slots[0],
3357 struct btrfs_file_extent_item);
3360 * If we are overwriting an inline extent with a real one then we need
3361 * to just delete the inline extent as it may not be large enough to
3362 * have the entire file_extent_item.
3364 if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
3365 BTRFS_FILE_EXTENT_INLINE) {
3366 ret = btrfs_del_item(trans, log, path);
3367 btrfs_release_path(path);
3369 path->really_keep_locks = 0;
3375 btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
3377 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3379 btrfs_set_token_file_extent_type(leaf, fi,
3380 BTRFS_FILE_EXTENT_PREALLOC,
3383 btrfs_set_token_file_extent_type(leaf, fi,
3384 BTRFS_FILE_EXTENT_REG,
3386 if (em->block_start == 0)
3390 block_len = max(em->block_len, em->orig_block_len);
3391 if (em->compress_type != BTRFS_COMPRESS_NONE) {
3392 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3395 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3397 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3398 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3400 extent_offset, &token);
3401 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3404 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3405 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3409 btrfs_set_token_file_extent_offset(leaf, fi,
3410 em->start - em->orig_start,
3412 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3413 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->len, &token);
3414 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3416 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3417 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3418 btrfs_mark_buffer_dirty(leaf);
3421 * Have to check the extent to the right of us to make sure it doesn't
3422 * fall in our current range. We're ok if the previous extent is in our
3423 * range since the recovery stuff will run us in key order and thus just
3424 * drop the part we overwrote.
3426 ret = drop_adjacent_extents(trans, log, inode, em, path);
3427 btrfs_release_path(path);
3428 path->really_keep_locks = 0;
3436 if (em->compress_type) {
3438 csum_len = block_len;
3442 * First check and see if our csums are on our outstanding ordered
3446 spin_lock_irq(&log->log_extents_lock[index]);
3447 list_for_each_entry(ordered, &log->logged_list[index], log_list) {
3448 struct btrfs_ordered_sum *sum;
3453 if (ordered->inode != inode)
3456 if (ordered->file_offset + ordered->len <= mod_start ||
3457 mod_start + mod_len <= ordered->file_offset)
3461 * We are going to copy all the csums on this ordered extent, so
3462 * go ahead and adjust mod_start and mod_len in case this
3463 * ordered extent has already been logged.
3465 if (ordered->file_offset > mod_start) {
3466 if (ordered->file_offset + ordered->len >=
3467 mod_start + mod_len)
3468 mod_len = ordered->file_offset - mod_start;
3470 * If we have this case
3472 * |--------- logged extent ---------|
3473 * |----- ordered extent ----|
3475 * Just don't mess with mod_start and mod_len, we'll
3476 * just end up logging more csums than we need and it
3480 if (ordered->file_offset + ordered->len <
3481 mod_start + mod_len) {
3482 mod_len = (mod_start + mod_len) -
3483 (ordered->file_offset + ordered->len);
3484 mod_start = ordered->file_offset +
3492 * To keep us from looping for the above case of an ordered
3493 * extent that falls inside of the logged extent.
3495 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3498 atomic_inc(&ordered->refs);
3499 spin_unlock_irq(&log->log_extents_lock[index]);
3501 * we've dropped the lock, we must either break or
3502 * start over after this.
3505 wait_event(ordered->wait, ordered->csum_bytes_left == 0);
3507 list_for_each_entry(sum, &ordered->list, list) {
3508 ret = btrfs_csum_file_blocks(trans, log, sum);
3510 btrfs_put_ordered_extent(ordered);
3514 btrfs_put_ordered_extent(ordered);
3518 spin_unlock_irq(&log->log_extents_lock[index]);
3521 if (!mod_len || ret)
3524 csum_offset = mod_start - em->start;
3527 /* block start is already adjusted for the file extent offset. */
3528 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3529 em->block_start + csum_offset,
3530 em->block_start + csum_offset +
3531 csum_len - 1, &ordered_sums, 0);
3535 while (!list_empty(&ordered_sums)) {
3536 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3537 struct btrfs_ordered_sum,
3540 ret = btrfs_csum_file_blocks(trans, log, sums);
3541 list_del(&sums->list);
3548 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3549 struct btrfs_root *root,
3550 struct inode *inode,
3551 struct btrfs_path *path)
3553 struct extent_map *em, *n;
3554 struct list_head extents;
3555 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3560 INIT_LIST_HEAD(&extents);
3562 write_lock(&tree->lock);
3563 test_gen = root->fs_info->last_trans_committed;
3565 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3566 list_del_init(&em->list);
3569 * Just an arbitrary number, this can be really CPU intensive
3570 * once we start getting a lot of extents, and really once we
3571 * have a bunch of extents we just want to commit since it will
3574 if (++num > 32768) {
3575 list_del_init(&tree->modified_extents);
3580 if (em->generation <= test_gen)
3582 /* Need a ref to keep it from getting evicted from cache */
3583 atomic_inc(&em->refs);
3584 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3585 list_add_tail(&em->list, &extents);
3589 list_sort(NULL, &extents, extent_cmp);
3592 while (!list_empty(&extents)) {
3593 em = list_entry(extents.next, struct extent_map, list);
3595 list_del_init(&em->list);
3598 * If we had an error we just need to delete everybody from our
3602 clear_em_logging(tree, em);
3603 free_extent_map(em);
3607 write_unlock(&tree->lock);
3609 ret = log_one_extent(trans, inode, root, em, path);
3610 write_lock(&tree->lock);
3611 clear_em_logging(tree, em);
3612 free_extent_map(em);
3614 WARN_ON(!list_empty(&extents));
3615 write_unlock(&tree->lock);
3617 btrfs_release_path(path);
3621 /* log a single inode in the tree log.
3622 * At least one parent directory for this inode must exist in the tree
3623 * or be logged already.
3625 * Any items from this inode changed by the current transaction are copied
3626 * to the log tree. An extra reference is taken on any extents in this
3627 * file, allowing us to avoid a whole pile of corner cases around logging
3628 * blocks that have been removed from the tree.
3630 * See LOG_INODE_ALL and related defines for a description of what inode_only
3633 * This handles both files and directories.
3635 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3636 struct btrfs_root *root, struct inode *inode,
3639 struct btrfs_path *path;
3640 struct btrfs_path *dst_path;
3641 struct btrfs_key min_key;
3642 struct btrfs_key max_key;
3643 struct btrfs_root *log = root->log_root;
3644 struct extent_buffer *src = NULL;
3648 int ins_start_slot = 0;
3650 bool fast_search = false;
3651 u64 ino = btrfs_ino(inode);
3653 log = root->log_root;
3655 path = btrfs_alloc_path();
3658 dst_path = btrfs_alloc_path();
3660 btrfs_free_path(path);
3664 min_key.objectid = ino;
3665 min_key.type = BTRFS_INODE_ITEM_KEY;
3668 max_key.objectid = ino;
3671 /* today the code can only do partial logging of directories */
3672 if (S_ISDIR(inode->i_mode) ||
3673 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3674 &BTRFS_I(inode)->runtime_flags) &&
3675 inode_only == LOG_INODE_EXISTS))
3676 max_key.type = BTRFS_XATTR_ITEM_KEY;
3678 max_key.type = (u8)-1;
3679 max_key.offset = (u64)-1;
3681 /* Only run delayed items if we are a dir or a new file */
3682 if (S_ISDIR(inode->i_mode) ||
3683 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
3684 ret = btrfs_commit_inode_delayed_items(trans, inode);
3686 btrfs_free_path(path);
3687 btrfs_free_path(dst_path);
3692 mutex_lock(&BTRFS_I(inode)->log_mutex);
3694 btrfs_get_logged_extents(log, inode);
3697 * a brute force approach to making sure we get the most uptodate
3698 * copies of everything.
3700 if (S_ISDIR(inode->i_mode)) {
3701 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3703 if (inode_only == LOG_INODE_EXISTS)
3704 max_key_type = BTRFS_XATTR_ITEM_KEY;
3705 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3707 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3708 &BTRFS_I(inode)->runtime_flags)) {
3709 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3710 &BTRFS_I(inode)->runtime_flags);
3711 ret = btrfs_truncate_inode_items(trans, log,
3713 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3714 &BTRFS_I(inode)->runtime_flags)) {
3715 if (inode_only == LOG_INODE_ALL)
3717 max_key.type = BTRFS_XATTR_ITEM_KEY;
3718 ret = drop_objectid_items(trans, log, path, ino,
3721 if (inode_only == LOG_INODE_ALL)
3723 ret = log_inode_item(trans, log, dst_path, inode);
3736 path->keep_locks = 1;
3740 ret = btrfs_search_forward(root, &min_key, &max_key,
3741 path, trans->transid);
3745 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3746 if (min_key.objectid != ino)
3748 if (min_key.type > max_key.type)
3751 src = path->nodes[0];
3752 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
3755 } else if (!ins_nr) {
3756 ins_start_slot = path->slots[0];
3761 ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
3762 ins_nr, inode_only);
3768 ins_start_slot = path->slots[0];
3771 nritems = btrfs_header_nritems(path->nodes[0]);
3773 if (path->slots[0] < nritems) {
3774 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
3779 ret = copy_items(trans, inode, dst_path, src,
3781 ins_nr, inode_only);
3788 btrfs_release_path(path);
3790 if (min_key.offset < (u64)-1)
3792 else if (min_key.type < (u8)-1)
3794 else if (min_key.objectid < (u64)-1)
3800 ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
3801 ins_nr, inode_only);
3811 btrfs_release_path(dst_path);
3812 ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
3818 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3819 struct extent_map *em, *n;
3821 write_lock(&tree->lock);
3822 list_for_each_entry_safe(em, n, &tree->modified_extents, list)
3823 list_del_init(&em->list);
3824 write_unlock(&tree->lock);
3827 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
3828 btrfs_release_path(path);
3829 btrfs_release_path(dst_path);
3830 ret = log_directory_changes(trans, root, inode, path, dst_path);
3836 BTRFS_I(inode)->logged_trans = trans->transid;
3837 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
3840 btrfs_free_logged_extents(log, log->log_transid);
3841 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3843 btrfs_free_path(path);
3844 btrfs_free_path(dst_path);
3849 * follow the dentry parent pointers up the chain and see if any
3850 * of the directories in it require a full commit before they can
3851 * be logged. Returns zero if nothing special needs to be done or 1 if
3852 * a full commit is required.
3854 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
3855 struct inode *inode,
3856 struct dentry *parent,
3857 struct super_block *sb,
3861 struct btrfs_root *root;
3862 struct dentry *old_parent = NULL;
3865 * for regular files, if its inode is already on disk, we don't
3866 * have to worry about the parents at all. This is because
3867 * we can use the last_unlink_trans field to record renames
3868 * and other fun in this file.
3870 if (S_ISREG(inode->i_mode) &&
3871 BTRFS_I(inode)->generation <= last_committed &&
3872 BTRFS_I(inode)->last_unlink_trans <= last_committed)
3875 if (!S_ISDIR(inode->i_mode)) {
3876 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3878 inode = parent->d_inode;
3882 BTRFS_I(inode)->logged_trans = trans->transid;
3885 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
3886 root = BTRFS_I(inode)->root;
3889 * make sure any commits to the log are forced
3890 * to be full commits
3892 root->fs_info->last_trans_log_full_commit =
3898 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3901 if (IS_ROOT(parent))
3904 parent = dget_parent(parent);
3906 old_parent = parent;
3907 inode = parent->d_inode;
3916 * helper function around btrfs_log_inode to make sure newly created
3917 * parent directories also end up in the log. A minimal inode and backref
3918 * only logging is done of any parent directories that are older than
3919 * the last committed transaction
3921 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3922 struct btrfs_root *root, struct inode *inode,
3923 struct dentry *parent, int exists_only)
3925 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
3926 struct super_block *sb;
3927 struct dentry *old_parent = NULL;
3929 u64 last_committed = root->fs_info->last_trans_committed;
3933 if (btrfs_test_opt(root, NOTREELOG)) {
3938 if (root->fs_info->last_trans_log_full_commit >
3939 root->fs_info->last_trans_committed) {
3944 if (root != BTRFS_I(inode)->root ||
3945 btrfs_root_refs(&root->root_item) == 0) {
3950 ret = check_parent_dirs_for_sync(trans, inode, parent,
3951 sb, last_committed);
3955 if (btrfs_inode_in_log(inode, trans->transid)) {
3956 ret = BTRFS_NO_LOG_SYNC;
3960 ret = start_log_trans(trans, root);
3964 ret = btrfs_log_inode(trans, root, inode, inode_only);
3969 * for regular files, if its inode is already on disk, we don't
3970 * have to worry about the parents at all. This is because
3971 * we can use the last_unlink_trans field to record renames
3972 * and other fun in this file.
3974 if (S_ISREG(inode->i_mode) &&
3975 BTRFS_I(inode)->generation <= last_committed &&
3976 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
3981 inode_only = LOG_INODE_EXISTS;
3983 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3986 inode = parent->d_inode;
3987 if (root != BTRFS_I(inode)->root)
3990 if (BTRFS_I(inode)->generation >
3991 root->fs_info->last_trans_committed) {
3992 ret = btrfs_log_inode(trans, root, inode, inode_only);
3996 if (IS_ROOT(parent))
3999 parent = dget_parent(parent);
4001 old_parent = parent;
4007 root->fs_info->last_trans_log_full_commit = trans->transid;
4010 btrfs_end_log_trans(root);
4016 * it is not safe to log dentry if the chunk root has added new
4017 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4018 * If this returns 1, you must commit the transaction to safely get your
4021 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
4022 struct btrfs_root *root, struct dentry *dentry)
4024 struct dentry *parent = dget_parent(dentry);
4027 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
4034 * should be called during mount to recover any replay any log trees
4037 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4040 struct btrfs_path *path;
4041 struct btrfs_trans_handle *trans;
4042 struct btrfs_key key;
4043 struct btrfs_key found_key;
4044 struct btrfs_key tmp_key;
4045 struct btrfs_root *log;
4046 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
4047 struct walk_control wc = {
4048 .process_func = process_one_buffer,
4052 path = btrfs_alloc_path();
4056 fs_info->log_root_recovering = 1;
4058 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4059 if (IS_ERR(trans)) {
4060 ret = PTR_ERR(trans);
4067 ret = walk_log_tree(trans, log_root_tree, &wc);
4069 btrfs_error(fs_info, ret, "Failed to pin buffers while "
4070 "recovering log root tree.");
4075 key.objectid = BTRFS_TREE_LOG_OBJECTID;
4076 key.offset = (u64)-1;
4077 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
4080 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
4083 btrfs_error(fs_info, ret,
4084 "Couldn't find tree log root.");
4088 if (path->slots[0] == 0)
4092 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4094 btrfs_release_path(path);
4095 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4098 log = btrfs_read_fs_root_no_radix(log_root_tree,
4102 btrfs_error(fs_info, ret,
4103 "Couldn't read tree log root.");
4107 tmp_key.objectid = found_key.offset;
4108 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
4109 tmp_key.offset = (u64)-1;
4111 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4112 if (IS_ERR(wc.replay_dest)) {
4113 ret = PTR_ERR(wc.replay_dest);
4114 btrfs_error(fs_info, ret, "Couldn't read target root "
4115 "for tree log recovery.");
4119 wc.replay_dest->log_root = log;
4120 btrfs_record_root_in_trans(trans, wc.replay_dest);
4121 ret = walk_log_tree(trans, log, &wc);
4124 if (wc.stage == LOG_WALK_REPLAY_ALL) {
4125 ret = fixup_inode_link_counts(trans, wc.replay_dest,
4130 key.offset = found_key.offset - 1;
4131 wc.replay_dest->log_root = NULL;
4132 free_extent_buffer(log->node);
4133 free_extent_buffer(log->commit_root);
4136 if (found_key.offset == 0)
4139 btrfs_release_path(path);
4141 /* step one is to pin it all, step two is to replay just inodes */
4144 wc.process_func = replay_one_buffer;
4145 wc.stage = LOG_WALK_REPLAY_INODES;
4148 /* step three is to replay everything */
4149 if (wc.stage < LOG_WALK_REPLAY_ALL) {
4154 btrfs_free_path(path);
4156 free_extent_buffer(log_root_tree->node);
4157 log_root_tree->log_root = NULL;
4158 fs_info->log_root_recovering = 0;
4160 /* step 4: commit the transaction, which also unpins the blocks */
4161 btrfs_commit_transaction(trans, fs_info->tree_root);
4163 kfree(log_root_tree);
4167 btrfs_free_path(path);
4172 * there are some corner cases where we want to force a full
4173 * commit instead of allowing a directory to be logged.
4175 * They revolve around files there were unlinked from the directory, and
4176 * this function updates the parent directory so that a full commit is
4177 * properly done if it is fsync'd later after the unlinks are done.
4179 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4180 struct inode *dir, struct inode *inode,
4184 * when we're logging a file, if it hasn't been renamed
4185 * or unlinked, and its inode is fully committed on disk,
4186 * we don't have to worry about walking up the directory chain
4187 * to log its parents.
4189 * So, we use the last_unlink_trans field to put this transid
4190 * into the file. When the file is logged we check it and
4191 * don't log the parents if the file is fully on disk.
4193 if (S_ISREG(inode->i_mode))
4194 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4197 * if this directory was already logged any new
4198 * names for this file/dir will get recorded
4201 if (BTRFS_I(dir)->logged_trans == trans->transid)
4205 * if the inode we're about to unlink was logged,
4206 * the log will be properly updated for any new names
4208 if (BTRFS_I(inode)->logged_trans == trans->transid)
4212 * when renaming files across directories, if the directory
4213 * there we're unlinking from gets fsync'd later on, there's
4214 * no way to find the destination directory later and fsync it
4215 * properly. So, we have to be conservative and force commits
4216 * so the new name gets discovered.
4221 /* we can safely do the unlink without any special recording */
4225 BTRFS_I(dir)->last_unlink_trans = trans->transid;
4229 * Call this after adding a new name for a file and it will properly
4230 * update the log to reflect the new name.
4232 * It will return zero if all goes well, and it will return 1 if a
4233 * full transaction commit is required.
4235 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4236 struct inode *inode, struct inode *old_dir,
4237 struct dentry *parent)
4239 struct btrfs_root * root = BTRFS_I(inode)->root;
4242 * this will force the logging code to walk the dentry chain
4245 if (S_ISREG(inode->i_mode))
4246 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4249 * if this inode hasn't been logged and directory we're renaming it
4250 * from hasn't been logged, we don't need to log it
4252 if (BTRFS_I(inode)->logged_trans <=
4253 root->fs_info->last_trans_committed &&
4254 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4255 root->fs_info->last_trans_committed))
4258 return btrfs_log_inode_parent(trans, root, inode, parent, 1);