1 #include <linux/bitops.h>
2 #include <linux/slab.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 #include "check-integrity.h"
23 static struct kmem_cache *extent_state_cache;
24 static struct kmem_cache *extent_buffer_cache;
26 static LIST_HEAD(buffers);
27 static LIST_HEAD(states);
31 static DEFINE_SPINLOCK(leak_lock);
34 #define BUFFER_LRU_MAX 64
39 struct rb_node rb_node;
42 struct extent_page_data {
44 struct extent_io_tree *tree;
45 get_extent_t *get_extent;
47 /* tells writepage not to lock the state bits for this range
48 * it still does the unlocking
50 unsigned int extent_locked:1;
52 /* tells the submit_bio code to use a WRITE_SYNC */
53 unsigned int sync_io:1;
56 static inline struct btrfs_fs_info *
57 tree_fs_info(struct extent_io_tree *tree)
59 return btrfs_sb(tree->mapping->host->i_sb);
62 int __init extent_io_init(void)
64 extent_state_cache = kmem_cache_create("extent_state",
65 sizeof(struct extent_state), 0,
66 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
67 if (!extent_state_cache)
70 extent_buffer_cache = kmem_cache_create("extent_buffers",
71 sizeof(struct extent_buffer), 0,
72 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
73 if (!extent_buffer_cache)
74 goto free_state_cache;
78 kmem_cache_destroy(extent_state_cache);
82 void extent_io_exit(void)
84 struct extent_state *state;
85 struct extent_buffer *eb;
87 while (!list_empty(&states)) {
88 state = list_entry(states.next, struct extent_state, leak_list);
89 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
90 "state %lu in tree %p refs %d\n",
91 (unsigned long long)state->start,
92 (unsigned long long)state->end,
93 state->state, state->tree, atomic_read(&state->refs));
94 list_del(&state->leak_list);
95 kmem_cache_free(extent_state_cache, state);
99 while (!list_empty(&buffers)) {
100 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
101 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
102 "refs %d\n", (unsigned long long)eb->start,
103 eb->len, atomic_read(&eb->refs));
104 list_del(&eb->leak_list);
105 kmem_cache_free(extent_buffer_cache, eb);
107 if (extent_state_cache)
108 kmem_cache_destroy(extent_state_cache);
109 if (extent_buffer_cache)
110 kmem_cache_destroy(extent_buffer_cache);
113 void extent_io_tree_init(struct extent_io_tree *tree,
114 struct address_space *mapping)
116 tree->state = RB_ROOT;
117 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
119 tree->dirty_bytes = 0;
120 spin_lock_init(&tree->lock);
121 spin_lock_init(&tree->buffer_lock);
122 tree->mapping = mapping;
125 static struct extent_state *alloc_extent_state(gfp_t mask)
127 struct extent_state *state;
132 state = kmem_cache_alloc(extent_state_cache, mask);
139 spin_lock_irqsave(&leak_lock, flags);
140 list_add(&state->leak_list, &states);
141 spin_unlock_irqrestore(&leak_lock, flags);
143 atomic_set(&state->refs, 1);
144 init_waitqueue_head(&state->wq);
148 void free_extent_state(struct extent_state *state)
152 if (atomic_dec_and_test(&state->refs)) {
156 WARN_ON(state->tree);
158 spin_lock_irqsave(&leak_lock, flags);
159 list_del(&state->leak_list);
160 spin_unlock_irqrestore(&leak_lock, flags);
162 kmem_cache_free(extent_state_cache, state);
166 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
167 struct rb_node *node)
169 struct rb_node **p = &root->rb_node;
170 struct rb_node *parent = NULL;
171 struct tree_entry *entry;
175 entry = rb_entry(parent, struct tree_entry, rb_node);
177 if (offset < entry->start)
179 else if (offset > entry->end)
185 entry = rb_entry(node, struct tree_entry, rb_node);
186 rb_link_node(node, parent, p);
187 rb_insert_color(node, root);
191 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
192 struct rb_node **prev_ret,
193 struct rb_node **next_ret)
195 struct rb_root *root = &tree->state;
196 struct rb_node *n = root->rb_node;
197 struct rb_node *prev = NULL;
198 struct rb_node *orig_prev = NULL;
199 struct tree_entry *entry;
200 struct tree_entry *prev_entry = NULL;
203 entry = rb_entry(n, struct tree_entry, rb_node);
207 if (offset < entry->start)
209 else if (offset > entry->end)
217 while (prev && offset > prev_entry->end) {
218 prev = rb_next(prev);
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
226 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
227 while (prev && offset < prev_entry->start) {
228 prev = rb_prev(prev);
229 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
236 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
239 struct rb_node *prev = NULL;
242 ret = __etree_search(tree, offset, &prev, NULL);
248 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
249 struct extent_state *other)
251 if (tree->ops && tree->ops->merge_extent_hook)
252 tree->ops->merge_extent_hook(tree->mapping->host, new,
257 * utility function to look for merge candidates inside a given range.
258 * Any extents with matching state are merged together into a single
259 * extent in the tree. Extents with EXTENT_IO in their state field
260 * are not merged because the end_io handlers need to be able to do
261 * operations on them without sleeping (or doing allocations/splits).
263 * This should be called with the tree lock held.
265 static void merge_state(struct extent_io_tree *tree,
266 struct extent_state *state)
268 struct extent_state *other;
269 struct rb_node *other_node;
271 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
274 other_node = rb_prev(&state->rb_node);
276 other = rb_entry(other_node, struct extent_state, rb_node);
277 if (other->end == state->start - 1 &&
278 other->state == state->state) {
279 merge_cb(tree, state, other);
280 state->start = other->start;
282 rb_erase(&other->rb_node, &tree->state);
283 free_extent_state(other);
286 other_node = rb_next(&state->rb_node);
288 other = rb_entry(other_node, struct extent_state, rb_node);
289 if (other->start == state->end + 1 &&
290 other->state == state->state) {
291 merge_cb(tree, state, other);
292 state->end = other->end;
294 rb_erase(&other->rb_node, &tree->state);
295 free_extent_state(other);
300 static void set_state_cb(struct extent_io_tree *tree,
301 struct extent_state *state, int *bits)
303 if (tree->ops && tree->ops->set_bit_hook)
304 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
307 static void clear_state_cb(struct extent_io_tree *tree,
308 struct extent_state *state, int *bits)
310 if (tree->ops && tree->ops->clear_bit_hook)
311 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
314 static void set_state_bits(struct extent_io_tree *tree,
315 struct extent_state *state, int *bits);
318 * insert an extent_state struct into the tree. 'bits' are set on the
319 * struct before it is inserted.
321 * This may return -EEXIST if the extent is already there, in which case the
322 * state struct is freed.
324 * The tree lock is not taken internally. This is a utility function and
325 * probably isn't what you want to call (see set/clear_extent_bit).
327 static int insert_state(struct extent_io_tree *tree,
328 struct extent_state *state, u64 start, u64 end,
331 struct rb_node *node;
334 printk(KERN_ERR "btrfs end < start %llu %llu\n",
335 (unsigned long long)end,
336 (unsigned long long)start);
339 state->start = start;
342 set_state_bits(tree, state, bits);
344 node = tree_insert(&tree->state, end, &state->rb_node);
346 struct extent_state *found;
347 found = rb_entry(node, struct extent_state, rb_node);
348 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
349 "%llu %llu\n", (unsigned long long)found->start,
350 (unsigned long long)found->end,
351 (unsigned long long)start, (unsigned long long)end);
355 merge_state(tree, state);
359 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
362 if (tree->ops && tree->ops->split_extent_hook)
363 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
367 * split a given extent state struct in two, inserting the preallocated
368 * struct 'prealloc' as the newly created second half. 'split' indicates an
369 * offset inside 'orig' where it should be split.
372 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
373 * are two extent state structs in the tree:
374 * prealloc: [orig->start, split - 1]
375 * orig: [ split, orig->end ]
377 * The tree locks are not taken by this function. They need to be held
380 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
381 struct extent_state *prealloc, u64 split)
383 struct rb_node *node;
385 split_cb(tree, orig, split);
387 prealloc->start = orig->start;
388 prealloc->end = split - 1;
389 prealloc->state = orig->state;
392 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
394 free_extent_state(prealloc);
397 prealloc->tree = tree;
402 * utility function to clear some bits in an extent state struct.
403 * it will optionally wake up any one waiting on this state (wake == 1), or
404 * forcibly remove the state from the tree (delete == 1).
406 * If no bits are set on the state struct after clearing things, the
407 * struct is freed and removed from the tree
409 static int clear_state_bit(struct extent_io_tree *tree,
410 struct extent_state *state,
413 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
414 int ret = state->state & bits_to_clear;
416 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
417 u64 range = state->end - state->start + 1;
418 WARN_ON(range > tree->dirty_bytes);
419 tree->dirty_bytes -= range;
421 clear_state_cb(tree, state, bits);
422 state->state &= ~bits_to_clear;
425 if (state->state == 0) {
427 rb_erase(&state->rb_node, &tree->state);
429 free_extent_state(state);
434 merge_state(tree, state);
439 static struct extent_state *
440 alloc_extent_state_atomic(struct extent_state *prealloc)
443 prealloc = alloc_extent_state(GFP_ATOMIC);
448 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
450 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
451 "Extent tree was modified by another "
452 "thread while locked.");
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
463 * the range [start, end] is inclusive.
465 * This takes the tree lock, and returns 0 on success and < 0 on error.
467 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
468 int bits, int wake, int delete,
469 struct extent_state **cached_state,
472 struct extent_state *state;
473 struct extent_state *cached;
474 struct extent_state *prealloc = NULL;
475 struct rb_node *next_node;
476 struct rb_node *node;
482 bits |= ~EXTENT_CTLBITS;
483 bits |= EXTENT_FIRST_DELALLOC;
485 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
488 if (!prealloc && (mask & __GFP_WAIT)) {
489 prealloc = alloc_extent_state(mask);
494 spin_lock(&tree->lock);
496 cached = *cached_state;
499 *cached_state = NULL;
503 if (cached && cached->tree && cached->start <= start &&
504 cached->end > start) {
506 atomic_dec(&cached->refs);
511 free_extent_state(cached);
514 * this search will find the extents that end after
517 node = tree_search(tree, start);
520 state = rb_entry(node, struct extent_state, rb_node);
522 if (state->start > end)
524 WARN_ON(state->end < start);
525 last_end = state->end;
527 if (state->end < end && !need_resched())
528 next_node = rb_next(&state->rb_node);
532 /* the state doesn't have the wanted bits, go ahead */
533 if (!(state->state & bits))
537 * | ---- desired range ---- |
539 * | ------------- state -------------- |
541 * We need to split the extent we found, and may flip
542 * bits on second half.
544 * If the extent we found extends past our range, we
545 * just split and search again. It'll get split again
546 * the next time though.
548 * If the extent we found is inside our range, we clear
549 * the desired bit on it.
552 if (state->start < start) {
553 prealloc = alloc_extent_state_atomic(prealloc);
555 err = split_state(tree, state, prealloc, start);
557 extent_io_tree_panic(tree, err);
562 if (state->end <= end) {
563 clear_state_bit(tree, state, &bits, wake);
564 if (last_end == (u64)-1)
566 start = last_end + 1;
571 * | ---- desired range ---- |
573 * We need to split the extent, and clear the bit
576 if (state->start <= end && state->end > end) {
577 prealloc = alloc_extent_state_atomic(prealloc);
579 err = split_state(tree, state, prealloc, end + 1);
581 extent_io_tree_panic(tree, err);
586 clear_state_bit(tree, prealloc, &bits, wake);
592 clear_state_bit(tree, state, &bits, wake);
594 if (last_end == (u64)-1)
596 start = last_end + 1;
597 if (start <= end && next_node) {
598 state = rb_entry(next_node, struct extent_state,
605 spin_unlock(&tree->lock);
607 free_extent_state(prealloc);
614 spin_unlock(&tree->lock);
615 if (mask & __GFP_WAIT)
620 static int wait_on_state(struct extent_io_tree *tree,
621 struct extent_state *state)
622 __releases(tree->lock)
623 __acquires(tree->lock)
626 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
627 spin_unlock(&tree->lock);
629 spin_lock(&tree->lock);
630 finish_wait(&state->wq, &wait);
635 * waits for one or more bits to clear on a range in the state tree.
636 * The range [start, end] is inclusive.
637 * The tree lock is taken by this function
639 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
641 struct extent_state *state;
642 struct rb_node *node;
644 spin_lock(&tree->lock);
648 * this search will find all the extents that end after
651 node = tree_search(tree, start);
655 state = rb_entry(node, struct extent_state, rb_node);
657 if (state->start > end)
660 if (state->state & bits) {
661 start = state->start;
662 atomic_inc(&state->refs);
663 wait_on_state(tree, state);
664 free_extent_state(state);
667 start = state->end + 1;
672 cond_resched_lock(&tree->lock);
675 spin_unlock(&tree->lock);
679 static void set_state_bits(struct extent_io_tree *tree,
680 struct extent_state *state,
683 int bits_to_set = *bits & ~EXTENT_CTLBITS;
685 set_state_cb(tree, state, bits);
686 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
687 u64 range = state->end - state->start + 1;
688 tree->dirty_bytes += range;
690 state->state |= bits_to_set;
693 static void cache_state(struct extent_state *state,
694 struct extent_state **cached_ptr)
696 if (cached_ptr && !(*cached_ptr)) {
697 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
699 atomic_inc(&state->refs);
704 static void uncache_state(struct extent_state **cached_ptr)
706 if (cached_ptr && (*cached_ptr)) {
707 struct extent_state *state = *cached_ptr;
709 free_extent_state(state);
714 * set some bits on a range in the tree. This may require allocations or
715 * sleeping, so the gfp mask is used to indicate what is allowed.
717 * If any of the exclusive bits are set, this will fail with -EEXIST if some
718 * part of the range already has the desired bits set. The start of the
719 * existing range is returned in failed_start in this case.
721 * [start, end] is inclusive This takes the tree lock.
724 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
725 int bits, int exclusive_bits, u64 *failed_start,
726 struct extent_state **cached_state, gfp_t mask)
728 struct extent_state *state;
729 struct extent_state *prealloc = NULL;
730 struct rb_node *node;
735 bits |= EXTENT_FIRST_DELALLOC;
737 if (!prealloc && (mask & __GFP_WAIT)) {
738 prealloc = alloc_extent_state(mask);
742 spin_lock(&tree->lock);
743 if (cached_state && *cached_state) {
744 state = *cached_state;
745 if (state->start <= start && state->end > start &&
747 node = &state->rb_node;
752 * this search will find all the extents that end after
755 node = tree_search(tree, start);
757 prealloc = alloc_extent_state_atomic(prealloc);
759 err = insert_state(tree, prealloc, start, end, &bits);
761 extent_io_tree_panic(tree, err);
766 state = rb_entry(node, struct extent_state, rb_node);
768 last_start = state->start;
769 last_end = state->end;
772 * | ---- desired range ---- |
775 * Just lock what we found and keep going
777 if (state->start == start && state->end <= end) {
778 struct rb_node *next_node;
779 if (state->state & exclusive_bits) {
780 *failed_start = state->start;
785 set_state_bits(tree, state, &bits);
787 cache_state(state, cached_state);
788 merge_state(tree, state);
789 if (last_end == (u64)-1)
792 start = last_end + 1;
793 next_node = rb_next(&state->rb_node);
794 if (next_node && start < end && prealloc && !need_resched()) {
795 state = rb_entry(next_node, struct extent_state,
797 if (state->start == start)
804 * | ---- desired range ---- |
807 * | ------------- state -------------- |
809 * We need to split the extent we found, and may flip bits on
812 * If the extent we found extends past our
813 * range, we just split and search again. It'll get split
814 * again the next time though.
816 * If the extent we found is inside our range, we set the
819 if (state->start < start) {
820 if (state->state & exclusive_bits) {
821 *failed_start = start;
826 prealloc = alloc_extent_state_atomic(prealloc);
828 err = split_state(tree, state, prealloc, start);
830 extent_io_tree_panic(tree, err);
835 if (state->end <= end) {
836 set_state_bits(tree, state, &bits);
837 cache_state(state, cached_state);
838 merge_state(tree, state);
839 if (last_end == (u64)-1)
841 start = last_end + 1;
846 * | ---- desired range ---- |
847 * | state | or | state |
849 * There's a hole, we need to insert something in it and
850 * ignore the extent we found.
852 if (state->start > start) {
854 if (end < last_start)
857 this_end = last_start - 1;
859 prealloc = alloc_extent_state_atomic(prealloc);
863 * Avoid to free 'prealloc' if it can be merged with
866 err = insert_state(tree, prealloc, start, this_end,
869 extent_io_tree_panic(tree, err);
871 cache_state(prealloc, cached_state);
873 start = this_end + 1;
877 * | ---- desired range ---- |
879 * We need to split the extent, and set the bit
882 if (state->start <= end && state->end > end) {
883 if (state->state & exclusive_bits) {
884 *failed_start = start;
889 prealloc = alloc_extent_state_atomic(prealloc);
891 err = split_state(tree, state, prealloc, end + 1);
893 extent_io_tree_panic(tree, err);
895 set_state_bits(tree, prealloc, &bits);
896 cache_state(prealloc, cached_state);
897 merge_state(tree, prealloc);
905 spin_unlock(&tree->lock);
907 free_extent_state(prealloc);
914 spin_unlock(&tree->lock);
915 if (mask & __GFP_WAIT)
921 * convert_extent - convert all bits in a given range from one bit to another
922 * @tree: the io tree to search
923 * @start: the start offset in bytes
924 * @end: the end offset in bytes (inclusive)
925 * @bits: the bits to set in this range
926 * @clear_bits: the bits to clear in this range
927 * @mask: the allocation mask
929 * This will go through and set bits for the given range. If any states exist
930 * already in this range they are set with the given bit and cleared of the
931 * clear_bits. This is only meant to be used by things that are mergeable, ie
932 * converting from say DELALLOC to DIRTY. This is not meant to be used with
933 * boundary bits like LOCK.
935 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
936 int bits, int clear_bits, gfp_t mask)
938 struct extent_state *state;
939 struct extent_state *prealloc = NULL;
940 struct rb_node *node;
946 if (!prealloc && (mask & __GFP_WAIT)) {
947 prealloc = alloc_extent_state(mask);
952 spin_lock(&tree->lock);
954 * this search will find all the extents that end after
957 node = tree_search(tree, start);
959 prealloc = alloc_extent_state_atomic(prealloc);
964 err = insert_state(tree, prealloc, start, end, &bits);
967 extent_io_tree_panic(tree, err);
970 state = rb_entry(node, struct extent_state, rb_node);
972 last_start = state->start;
973 last_end = state->end;
976 * | ---- desired range ---- |
979 * Just lock what we found and keep going
981 if (state->start == start && state->end <= end) {
982 struct rb_node *next_node;
984 set_state_bits(tree, state, &bits);
985 clear_state_bit(tree, state, &clear_bits, 0);
986 if (last_end == (u64)-1)
989 start = last_end + 1;
990 next_node = rb_next(&state->rb_node);
991 if (next_node && start < end && prealloc && !need_resched()) {
992 state = rb_entry(next_node, struct extent_state,
994 if (state->start == start)
1001 * | ---- desired range ---- |
1004 * | ------------- state -------------- |
1006 * We need to split the extent we found, and may flip bits on
1009 * If the extent we found extends past our
1010 * range, we just split and search again. It'll get split
1011 * again the next time though.
1013 * If the extent we found is inside our range, we set the
1014 * desired bit on it.
1016 if (state->start < start) {
1017 prealloc = alloc_extent_state_atomic(prealloc);
1022 err = split_state(tree, state, prealloc, start);
1024 extent_io_tree_panic(tree, err);
1028 if (state->end <= end) {
1029 set_state_bits(tree, state, &bits);
1030 clear_state_bit(tree, state, &clear_bits, 0);
1031 if (last_end == (u64)-1)
1033 start = last_end + 1;
1038 * | ---- desired range ---- |
1039 * | state | or | state |
1041 * There's a hole, we need to insert something in it and
1042 * ignore the extent we found.
1044 if (state->start > start) {
1046 if (end < last_start)
1049 this_end = last_start - 1;
1051 prealloc = alloc_extent_state_atomic(prealloc);
1058 * Avoid to free 'prealloc' if it can be merged with
1061 err = insert_state(tree, prealloc, start, this_end,
1064 extent_io_tree_panic(tree, err);
1066 start = this_end + 1;
1070 * | ---- desired range ---- |
1072 * We need to split the extent, and set the bit
1075 if (state->start <= end && state->end > end) {
1076 prealloc = alloc_extent_state_atomic(prealloc);
1082 err = split_state(tree, state, prealloc, end + 1);
1084 extent_io_tree_panic(tree, err);
1086 set_state_bits(tree, prealloc, &bits);
1087 clear_state_bit(tree, prealloc, &clear_bits, 0);
1095 spin_unlock(&tree->lock);
1097 free_extent_state(prealloc);
1104 spin_unlock(&tree->lock);
1105 if (mask & __GFP_WAIT)
1110 /* wrappers around set/clear extent bit */
1111 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1114 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
1118 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1119 int bits, gfp_t mask)
1121 return set_extent_bit(tree, start, end, bits, 0, NULL,
1125 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1126 int bits, gfp_t mask)
1128 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1131 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1132 struct extent_state **cached_state, gfp_t mask)
1134 return set_extent_bit(tree, start, end,
1135 EXTENT_DELALLOC | EXTENT_UPTODATE,
1136 0, NULL, cached_state, mask);
1139 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1142 return clear_extent_bit(tree, start, end,
1143 EXTENT_DIRTY | EXTENT_DELALLOC |
1144 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1147 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1150 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
1154 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1155 struct extent_state **cached_state, gfp_t mask)
1157 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1158 NULL, cached_state, mask);
1161 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1162 u64 end, struct extent_state **cached_state,
1165 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1166 cached_state, mask);
1170 * either insert or lock state struct between start and end use mask to tell
1171 * us if waiting is desired.
1173 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1174 int bits, struct extent_state **cached_state, gfp_t mask)
1179 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1180 EXTENT_LOCKED, &failed_start,
1181 cached_state, mask);
1182 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1183 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1184 start = failed_start;
1188 WARN_ON(start > end);
1193 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1195 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1198 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1204 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1205 &failed_start, NULL, mask);
1206 if (err == -EEXIST) {
1207 if (failed_start > start)
1208 clear_extent_bit(tree, start, failed_start - 1,
1209 EXTENT_LOCKED, 1, 0, NULL, mask);
1215 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1216 struct extent_state **cached, gfp_t mask)
1218 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1222 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1224 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1229 * helper function to set both pages and extents in the tree writeback
1231 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1233 unsigned long index = start >> PAGE_CACHE_SHIFT;
1234 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1237 while (index <= end_index) {
1238 page = find_get_page(tree->mapping, index);
1240 set_page_writeback(page);
1241 page_cache_release(page);
1247 /* find the first state struct with 'bits' set after 'start', and
1248 * return it. tree->lock must be held. NULL will returned if
1249 * nothing was found after 'start'
1251 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1252 u64 start, int bits)
1254 struct rb_node *node;
1255 struct extent_state *state;
1258 * this search will find all the extents that end after
1261 node = tree_search(tree, start);
1266 state = rb_entry(node, struct extent_state, rb_node);
1267 if (state->end >= start && (state->state & bits))
1270 node = rb_next(node);
1279 * find the first offset in the io tree with 'bits' set. zero is
1280 * returned if we find something, and *start_ret and *end_ret are
1281 * set to reflect the state struct that was found.
1283 * If nothing was found, 1 is returned, < 0 on error
1285 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1286 u64 *start_ret, u64 *end_ret, int bits)
1288 struct extent_state *state;
1291 spin_lock(&tree->lock);
1292 state = find_first_extent_bit_state(tree, start, bits);
1294 *start_ret = state->start;
1295 *end_ret = state->end;
1298 spin_unlock(&tree->lock);
1303 * find a contiguous range of bytes in the file marked as delalloc, not
1304 * more than 'max_bytes'. start and end are used to return the range,
1306 * 1 is returned if we find something, 0 if nothing was in the tree
1308 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1309 u64 *start, u64 *end, u64 max_bytes,
1310 struct extent_state **cached_state)
1312 struct rb_node *node;
1313 struct extent_state *state;
1314 u64 cur_start = *start;
1316 u64 total_bytes = 0;
1318 spin_lock(&tree->lock);
1321 * this search will find all the extents that end after
1324 node = tree_search(tree, cur_start);
1332 state = rb_entry(node, struct extent_state, rb_node);
1333 if (found && (state->start != cur_start ||
1334 (state->state & EXTENT_BOUNDARY))) {
1337 if (!(state->state & EXTENT_DELALLOC)) {
1343 *start = state->start;
1344 *cached_state = state;
1345 atomic_inc(&state->refs);
1349 cur_start = state->end + 1;
1350 node = rb_next(node);
1353 total_bytes += state->end - state->start + 1;
1354 if (total_bytes >= max_bytes)
1358 spin_unlock(&tree->lock);
1362 static noinline int __unlock_for_delalloc(struct inode *inode,
1363 struct page *locked_page,
1367 struct page *pages[16];
1368 unsigned long index = start >> PAGE_CACHE_SHIFT;
1369 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1370 unsigned long nr_pages = end_index - index + 1;
1373 if (index == locked_page->index && end_index == index)
1376 while (nr_pages > 0) {
1377 ret = find_get_pages_contig(inode->i_mapping, index,
1378 min_t(unsigned long, nr_pages,
1379 ARRAY_SIZE(pages)), pages);
1380 for (i = 0; i < ret; i++) {
1381 if (pages[i] != locked_page)
1382 unlock_page(pages[i]);
1383 page_cache_release(pages[i]);
1392 static noinline int lock_delalloc_pages(struct inode *inode,
1393 struct page *locked_page,
1397 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1398 unsigned long start_index = index;
1399 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1400 unsigned long pages_locked = 0;
1401 struct page *pages[16];
1402 unsigned long nrpages;
1406 /* the caller is responsible for locking the start index */
1407 if (index == locked_page->index && index == end_index)
1410 /* skip the page at the start index */
1411 nrpages = end_index - index + 1;
1412 while (nrpages > 0) {
1413 ret = find_get_pages_contig(inode->i_mapping, index,
1414 min_t(unsigned long,
1415 nrpages, ARRAY_SIZE(pages)), pages);
1420 /* now we have an array of pages, lock them all */
1421 for (i = 0; i < ret; i++) {
1423 * the caller is taking responsibility for
1426 if (pages[i] != locked_page) {
1427 lock_page(pages[i]);
1428 if (!PageDirty(pages[i]) ||
1429 pages[i]->mapping != inode->i_mapping) {
1431 unlock_page(pages[i]);
1432 page_cache_release(pages[i]);
1436 page_cache_release(pages[i]);
1445 if (ret && pages_locked) {
1446 __unlock_for_delalloc(inode, locked_page,
1448 ((u64)(start_index + pages_locked - 1)) <<
1455 * find a contiguous range of bytes in the file marked as delalloc, not
1456 * more than 'max_bytes'. start and end are used to return the range,
1458 * 1 is returned if we find something, 0 if nothing was in the tree
1460 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1461 struct extent_io_tree *tree,
1462 struct page *locked_page,
1463 u64 *start, u64 *end,
1469 struct extent_state *cached_state = NULL;
1474 /* step one, find a bunch of delalloc bytes starting at start */
1475 delalloc_start = *start;
1477 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1478 max_bytes, &cached_state);
1479 if (!found || delalloc_end <= *start) {
1480 *start = delalloc_start;
1481 *end = delalloc_end;
1482 free_extent_state(cached_state);
1487 * start comes from the offset of locked_page. We have to lock
1488 * pages in order, so we can't process delalloc bytes before
1491 if (delalloc_start < *start)
1492 delalloc_start = *start;
1495 * make sure to limit the number of pages we try to lock down
1498 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1499 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1501 /* step two, lock all the pages after the page that has start */
1502 ret = lock_delalloc_pages(inode, locked_page,
1503 delalloc_start, delalloc_end);
1504 if (ret == -EAGAIN) {
1505 /* some of the pages are gone, lets avoid looping by
1506 * shortening the size of the delalloc range we're searching
1508 free_extent_state(cached_state);
1510 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1511 max_bytes = PAGE_CACHE_SIZE - offset;
1521 /* step three, lock the state bits for the whole range */
1522 lock_extent_bits(tree, delalloc_start, delalloc_end,
1523 0, &cached_state, GFP_NOFS);
1525 /* then test to make sure it is all still delalloc */
1526 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1527 EXTENT_DELALLOC, 1, cached_state);
1529 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1530 &cached_state, GFP_NOFS);
1531 __unlock_for_delalloc(inode, locked_page,
1532 delalloc_start, delalloc_end);
1536 free_extent_state(cached_state);
1537 *start = delalloc_start;
1538 *end = delalloc_end;
1543 int extent_clear_unlock_delalloc(struct inode *inode,
1544 struct extent_io_tree *tree,
1545 u64 start, u64 end, struct page *locked_page,
1549 struct page *pages[16];
1550 unsigned long index = start >> PAGE_CACHE_SHIFT;
1551 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1552 unsigned long nr_pages = end_index - index + 1;
1556 if (op & EXTENT_CLEAR_UNLOCK)
1557 clear_bits |= EXTENT_LOCKED;
1558 if (op & EXTENT_CLEAR_DIRTY)
1559 clear_bits |= EXTENT_DIRTY;
1561 if (op & EXTENT_CLEAR_DELALLOC)
1562 clear_bits |= EXTENT_DELALLOC;
1564 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1565 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1566 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1567 EXTENT_SET_PRIVATE2)))
1570 while (nr_pages > 0) {
1571 ret = find_get_pages_contig(inode->i_mapping, index,
1572 min_t(unsigned long,
1573 nr_pages, ARRAY_SIZE(pages)), pages);
1574 for (i = 0; i < ret; i++) {
1576 if (op & EXTENT_SET_PRIVATE2)
1577 SetPagePrivate2(pages[i]);
1579 if (pages[i] == locked_page) {
1580 page_cache_release(pages[i]);
1583 if (op & EXTENT_CLEAR_DIRTY)
1584 clear_page_dirty_for_io(pages[i]);
1585 if (op & EXTENT_SET_WRITEBACK)
1586 set_page_writeback(pages[i]);
1587 if (op & EXTENT_END_WRITEBACK)
1588 end_page_writeback(pages[i]);
1589 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1590 unlock_page(pages[i]);
1591 page_cache_release(pages[i]);
1601 * count the number of bytes in the tree that have a given bit(s)
1602 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1603 * cached. The total number found is returned.
1605 u64 count_range_bits(struct extent_io_tree *tree,
1606 u64 *start, u64 search_end, u64 max_bytes,
1607 unsigned long bits, int contig)
1609 struct rb_node *node;
1610 struct extent_state *state;
1611 u64 cur_start = *start;
1612 u64 total_bytes = 0;
1616 if (search_end <= cur_start) {
1621 spin_lock(&tree->lock);
1622 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1623 total_bytes = tree->dirty_bytes;
1627 * this search will find all the extents that end after
1630 node = tree_search(tree, cur_start);
1635 state = rb_entry(node, struct extent_state, rb_node);
1636 if (state->start > search_end)
1638 if (contig && found && state->start > last + 1)
1640 if (state->end >= cur_start && (state->state & bits) == bits) {
1641 total_bytes += min(search_end, state->end) + 1 -
1642 max(cur_start, state->start);
1643 if (total_bytes >= max_bytes)
1646 *start = max(cur_start, state->start);
1650 } else if (contig && found) {
1653 node = rb_next(node);
1658 spin_unlock(&tree->lock);
1663 * set the private field for a given byte offset in the tree. If there isn't
1664 * an extent_state there already, this does nothing.
1666 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1668 struct rb_node *node;
1669 struct extent_state *state;
1672 spin_lock(&tree->lock);
1674 * this search will find all the extents that end after
1677 node = tree_search(tree, start);
1682 state = rb_entry(node, struct extent_state, rb_node);
1683 if (state->start != start) {
1687 state->private = private;
1689 spin_unlock(&tree->lock);
1693 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1695 struct rb_node *node;
1696 struct extent_state *state;
1699 spin_lock(&tree->lock);
1701 * this search will find all the extents that end after
1704 node = tree_search(tree, start);
1709 state = rb_entry(node, struct extent_state, rb_node);
1710 if (state->start != start) {
1714 *private = state->private;
1716 spin_unlock(&tree->lock);
1721 * searches a range in the state tree for a given mask.
1722 * If 'filled' == 1, this returns 1 only if every extent in the tree
1723 * has the bits set. Otherwise, 1 is returned if any bit in the
1724 * range is found set.
1726 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1727 int bits, int filled, struct extent_state *cached)
1729 struct extent_state *state = NULL;
1730 struct rb_node *node;
1733 spin_lock(&tree->lock);
1734 if (cached && cached->tree && cached->start <= start &&
1735 cached->end > start)
1736 node = &cached->rb_node;
1738 node = tree_search(tree, start);
1739 while (node && start <= end) {
1740 state = rb_entry(node, struct extent_state, rb_node);
1742 if (filled && state->start > start) {
1747 if (state->start > end)
1750 if (state->state & bits) {
1754 } else if (filled) {
1759 if (state->end == (u64)-1)
1762 start = state->end + 1;
1765 node = rb_next(node);
1772 spin_unlock(&tree->lock);
1777 * helper function to set a given page up to date if all the
1778 * extents in the tree for that page are up to date
1780 static int check_page_uptodate(struct extent_io_tree *tree,
1783 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1784 u64 end = start + PAGE_CACHE_SIZE - 1;
1785 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1786 SetPageUptodate(page);
1791 * helper function to unlock a page if all the extents in the tree
1792 * for that page are unlocked
1794 static int check_page_locked(struct extent_io_tree *tree,
1797 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1798 u64 end = start + PAGE_CACHE_SIZE - 1;
1799 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1805 * helper function to end page writeback if all the extents
1806 * in the tree for that page are done with writeback
1808 static int check_page_writeback(struct extent_io_tree *tree,
1811 end_page_writeback(page);
1816 * When IO fails, either with EIO or csum verification fails, we
1817 * try other mirrors that might have a good copy of the data. This
1818 * io_failure_record is used to record state as we go through all the
1819 * mirrors. If another mirror has good data, the page is set up to date
1820 * and things continue. If a good mirror can't be found, the original
1821 * bio end_io callback is called to indicate things have failed.
1823 struct io_failure_record {
1828 unsigned long bio_flags;
1834 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1839 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1841 set_state_private(failure_tree, rec->start, 0);
1842 ret = clear_extent_bits(failure_tree, rec->start,
1843 rec->start + rec->len - 1,
1844 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1849 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1850 rec->start + rec->len - 1,
1851 EXTENT_DAMAGED, GFP_NOFS);
1860 static void repair_io_failure_callback(struct bio *bio, int err)
1862 complete(bio->bi_private);
1866 * this bypasses the standard btrfs submit functions deliberately, as
1867 * the standard behavior is to write all copies in a raid setup. here we only
1868 * want to write the one bad copy. so we do the mapping for ourselves and issue
1869 * submit_bio directly.
1870 * to avoid any synchonization issues, wait for the data after writing, which
1871 * actually prevents the read that triggered the error from finishing.
1872 * currently, there can be no more than two copies of every data bit. thus,
1873 * exactly one rewrite is required.
1875 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1876 u64 length, u64 logical, struct page *page,
1880 struct btrfs_device *dev;
1881 DECLARE_COMPLETION_ONSTACK(compl);
1884 struct btrfs_bio *bbio = NULL;
1887 BUG_ON(!mirror_num);
1889 bio = bio_alloc(GFP_NOFS, 1);
1892 bio->bi_private = &compl;
1893 bio->bi_end_io = repair_io_failure_callback;
1895 map_length = length;
1897 ret = btrfs_map_block(map_tree, WRITE, logical,
1898 &map_length, &bbio, mirror_num);
1903 BUG_ON(mirror_num != bbio->mirror_num);
1904 sector = bbio->stripes[mirror_num-1].physical >> 9;
1905 bio->bi_sector = sector;
1906 dev = bbio->stripes[mirror_num-1].dev;
1908 if (!dev || !dev->bdev || !dev->writeable) {
1912 bio->bi_bdev = dev->bdev;
1913 bio_add_page(bio, page, length, start-page_offset(page));
1914 btrfsic_submit_bio(WRITE_SYNC, bio);
1915 wait_for_completion(&compl);
1917 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1918 /* try to remap that extent elsewhere? */
1923 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1924 "sector %llu)\n", page->mapping->host->i_ino, start,
1932 * each time an IO finishes, we do a fast check in the IO failure tree
1933 * to see if we need to process or clean up an io_failure_record
1935 static int clean_io_failure(u64 start, struct page *page)
1938 u64 private_failure;
1939 struct io_failure_record *failrec;
1940 struct btrfs_mapping_tree *map_tree;
1941 struct extent_state *state;
1945 struct inode *inode = page->mapping->host;
1948 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1949 (u64)-1, 1, EXTENT_DIRTY, 0);
1953 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1958 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1959 BUG_ON(!failrec->this_mirror);
1961 if (failrec->in_validation) {
1962 /* there was no real error, just free the record */
1963 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1969 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1970 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1973 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1975 if (state && state->start == failrec->start) {
1976 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1977 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1979 if (num_copies > 1) {
1980 ret = repair_io_failure(map_tree, start, failrec->len,
1981 failrec->logical, page,
1982 failrec->failed_mirror);
1989 ret = free_io_failure(inode, failrec, did_repair);
1995 * this is a generic handler for readpage errors (default
1996 * readpage_io_failed_hook). if other copies exist, read those and write back
1997 * good data to the failed position. does not investigate in remapping the
1998 * failed extent elsewhere, hoping the device will be smart enough to do this as
2002 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2003 u64 start, u64 end, int failed_mirror,
2004 struct extent_state *state)
2006 struct io_failure_record *failrec = NULL;
2008 struct extent_map *em;
2009 struct inode *inode = page->mapping->host;
2010 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2011 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2012 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2019 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2021 ret = get_state_private(failure_tree, start, &private);
2023 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2026 failrec->start = start;
2027 failrec->len = end - start + 1;
2028 failrec->this_mirror = 0;
2029 failrec->bio_flags = 0;
2030 failrec->in_validation = 0;
2032 read_lock(&em_tree->lock);
2033 em = lookup_extent_mapping(em_tree, start, failrec->len);
2035 read_unlock(&em_tree->lock);
2040 if (em->start > start || em->start + em->len < start) {
2041 free_extent_map(em);
2044 read_unlock(&em_tree->lock);
2046 if (!em || IS_ERR(em)) {
2050 logical = start - em->start;
2051 logical = em->block_start + logical;
2052 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2053 logical = em->block_start;
2054 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2055 extent_set_compress_type(&failrec->bio_flags,
2058 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2059 "len=%llu\n", logical, start, failrec->len);
2060 failrec->logical = logical;
2061 free_extent_map(em);
2063 /* set the bits in the private failure tree */
2064 ret = set_extent_bits(failure_tree, start, end,
2065 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2067 ret = set_state_private(failure_tree, start,
2068 (u64)(unsigned long)failrec);
2069 /* set the bits in the inode's tree */
2071 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2078 failrec = (struct io_failure_record *)(unsigned long)private;
2079 pr_debug("bio_readpage_error: (found) logical=%llu, "
2080 "start=%llu, len=%llu, validation=%d\n",
2081 failrec->logical, failrec->start, failrec->len,
2082 failrec->in_validation);
2084 * when data can be on disk more than twice, add to failrec here
2085 * (e.g. with a list for failed_mirror) to make
2086 * clean_io_failure() clean all those errors at once.
2089 num_copies = btrfs_num_copies(
2090 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2091 failrec->logical, failrec->len);
2092 if (num_copies == 1) {
2094 * we only have a single copy of the data, so don't bother with
2095 * all the retry and error correction code that follows. no
2096 * matter what the error is, it is very likely to persist.
2098 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2099 "state=%p, num_copies=%d, next_mirror %d, "
2100 "failed_mirror %d\n", state, num_copies,
2101 failrec->this_mirror, failed_mirror);
2102 free_io_failure(inode, failrec, 0);
2107 spin_lock(&tree->lock);
2108 state = find_first_extent_bit_state(tree, failrec->start,
2110 if (state && state->start != failrec->start)
2112 spin_unlock(&tree->lock);
2116 * there are two premises:
2117 * a) deliver good data to the caller
2118 * b) correct the bad sectors on disk
2120 if (failed_bio->bi_vcnt > 1) {
2122 * to fulfill b), we need to know the exact failing sectors, as
2123 * we don't want to rewrite any more than the failed ones. thus,
2124 * we need separate read requests for the failed bio
2126 * if the following BUG_ON triggers, our validation request got
2127 * merged. we need separate requests for our algorithm to work.
2129 BUG_ON(failrec->in_validation);
2130 failrec->in_validation = 1;
2131 failrec->this_mirror = failed_mirror;
2132 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2135 * we're ready to fulfill a) and b) alongside. get a good copy
2136 * of the failed sector and if we succeed, we have setup
2137 * everything for repair_io_failure to do the rest for us.
2139 if (failrec->in_validation) {
2140 BUG_ON(failrec->this_mirror != failed_mirror);
2141 failrec->in_validation = 0;
2142 failrec->this_mirror = 0;
2144 failrec->failed_mirror = failed_mirror;
2145 failrec->this_mirror++;
2146 if (failrec->this_mirror == failed_mirror)
2147 failrec->this_mirror++;
2148 read_mode = READ_SYNC;
2151 if (!state || failrec->this_mirror > num_copies) {
2152 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2153 "next_mirror %d, failed_mirror %d\n", state,
2154 num_copies, failrec->this_mirror, failed_mirror);
2155 free_io_failure(inode, failrec, 0);
2159 bio = bio_alloc(GFP_NOFS, 1);
2160 bio->bi_private = state;
2161 bio->bi_end_io = failed_bio->bi_end_io;
2162 bio->bi_sector = failrec->logical >> 9;
2163 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2166 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2168 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2169 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2170 failrec->this_mirror, num_copies, failrec->in_validation);
2172 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2173 failrec->this_mirror,
2174 failrec->bio_flags, 0);
2178 /* lots and lots of room for performance fixes in the end_bio funcs */
2180 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2182 int uptodate = (err == 0);
2183 struct extent_io_tree *tree;
2186 tree = &BTRFS_I(page->mapping->host)->io_tree;
2188 if (tree->ops && tree->ops->writepage_end_io_hook) {
2189 ret = tree->ops->writepage_end_io_hook(page, start,
2190 end, NULL, uptodate);
2195 if (!uptodate && tree->ops &&
2196 tree->ops->writepage_io_failed_hook) {
2197 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2199 /* Writeback already completed */
2206 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2207 ClearPageUptodate(page);
2214 * after a writepage IO is done, we need to:
2215 * clear the uptodate bits on error
2216 * clear the writeback bits in the extent tree for this IO
2217 * end_page_writeback if the page has no more pending IO
2219 * Scheduling is not allowed, so the extent state tree is expected
2220 * to have one and only one object corresponding to this IO.
2222 static void end_bio_extent_writepage(struct bio *bio, int err)
2224 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2225 struct extent_io_tree *tree;
2231 struct page *page = bvec->bv_page;
2232 tree = &BTRFS_I(page->mapping->host)->io_tree;
2234 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2236 end = start + bvec->bv_len - 1;
2238 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2243 if (--bvec >= bio->bi_io_vec)
2244 prefetchw(&bvec->bv_page->flags);
2246 if (end_extent_writepage(page, err, start, end))
2250 end_page_writeback(page);
2252 check_page_writeback(tree, page);
2253 } while (bvec >= bio->bi_io_vec);
2259 * after a readpage IO is done, we need to:
2260 * clear the uptodate bits on error
2261 * set the uptodate bits if things worked
2262 * set the page up to date if all extents in the tree are uptodate
2263 * clear the lock bit in the extent tree
2264 * unlock the page if there are no other extents locked for it
2266 * Scheduling is not allowed, so the extent state tree is expected
2267 * to have one and only one object corresponding to this IO.
2269 static void end_bio_extent_readpage(struct bio *bio, int err)
2271 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2272 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2273 struct bio_vec *bvec = bio->bi_io_vec;
2274 struct extent_io_tree *tree;
2284 struct page *page = bvec->bv_page;
2285 struct extent_state *cached = NULL;
2286 struct extent_state *state;
2288 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2289 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2290 (long int)bio->bi_bdev);
2291 tree = &BTRFS_I(page->mapping->host)->io_tree;
2293 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2295 end = start + bvec->bv_len - 1;
2297 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2302 if (++bvec <= bvec_end)
2303 prefetchw(&bvec->bv_page->flags);
2305 spin_lock(&tree->lock);
2306 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2307 if (state && state->start == start) {
2309 * take a reference on the state, unlock will drop
2312 cache_state(state, &cached);
2314 spin_unlock(&tree->lock);
2316 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2317 ret = tree->ops->readpage_end_io_hook(page, start, end,
2322 clean_io_failure(start, page);
2326 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2328 * The generic bio_readpage_error handles errors the
2329 * following way: If possible, new read requests are
2330 * created and submitted and will end up in
2331 * end_bio_extent_readpage as well (if we're lucky, not
2332 * in the !uptodate case). In that case it returns 0 and
2333 * we just go on with the next page in our bio. If it
2334 * can't handle the error it will return -EIO and we
2335 * remain responsible for that page.
2337 ret = bio_readpage_error(bio, page, start, end,
2338 failed_mirror, NULL);
2342 test_bit(BIO_UPTODATE, &bio->bi_flags);
2345 uncache_state(&cached);
2348 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2349 ret = tree->ops->readpage_io_failed_hook(
2350 bio, page, start, end,
2351 failed_mirror, state);
2359 set_extent_uptodate(tree, start, end, &cached,
2362 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2366 SetPageUptodate(page);
2368 ClearPageUptodate(page);
2374 check_page_uptodate(tree, page);
2376 ClearPageUptodate(page);
2379 check_page_locked(tree, page);
2381 } while (bvec <= bvec_end);
2387 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2392 bio = bio_alloc(gfp_flags, nr_vecs);
2394 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2395 while (!bio && (nr_vecs /= 2))
2396 bio = bio_alloc(gfp_flags, nr_vecs);
2401 bio->bi_bdev = bdev;
2402 bio->bi_sector = first_sector;
2407 static int __must_check submit_one_bio(int rw, struct bio *bio,
2408 int mirror_num, unsigned long bio_flags)
2411 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2412 struct page *page = bvec->bv_page;
2413 struct extent_io_tree *tree = bio->bi_private;
2416 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2418 bio->bi_private = NULL;
2422 if (tree->ops && tree->ops->submit_bio_hook)
2423 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2424 mirror_num, bio_flags, start);
2426 btrfsic_submit_bio(rw, bio);
2428 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2434 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2435 unsigned long offset, size_t size, struct bio *bio,
2436 unsigned long bio_flags)
2439 if (tree->ops && tree->ops->merge_bio_hook)
2440 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2447 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2448 struct page *page, sector_t sector,
2449 size_t size, unsigned long offset,
2450 struct block_device *bdev,
2451 struct bio **bio_ret,
2452 unsigned long max_pages,
2453 bio_end_io_t end_io_func,
2455 unsigned long prev_bio_flags,
2456 unsigned long bio_flags)
2462 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2463 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2464 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2466 if (bio_ret && *bio_ret) {
2469 contig = bio->bi_sector == sector;
2471 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2474 if (prev_bio_flags != bio_flags || !contig ||
2475 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2476 bio_add_page(bio, page, page_size, offset) < page_size) {
2477 ret = submit_one_bio(rw, bio, mirror_num,
2485 if (this_compressed)
2488 nr = bio_get_nr_vecs(bdev);
2490 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2494 bio_add_page(bio, page, page_size, offset);
2495 bio->bi_end_io = end_io_func;
2496 bio->bi_private = tree;
2501 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2508 void set_page_extent_mapped(struct page *page)
2510 if (!PagePrivate(page)) {
2511 SetPagePrivate(page);
2512 page_cache_get(page);
2513 set_page_private(page, EXTENT_PAGE_PRIVATE);
2517 static void set_page_extent_head(struct page *page, unsigned long len)
2519 WARN_ON(!PagePrivate(page));
2520 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
2524 * basic readpage implementation. Locked extent state structs are inserted
2525 * into the tree that are removed when the IO is done (by the end_io
2528 static int __extent_read_full_page(struct extent_io_tree *tree,
2530 get_extent_t *get_extent,
2531 struct bio **bio, int mirror_num,
2532 unsigned long *bio_flags)
2534 struct inode *inode = page->mapping->host;
2535 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2536 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2540 u64 last_byte = i_size_read(inode);
2544 struct extent_map *em;
2545 struct block_device *bdev;
2546 struct btrfs_ordered_extent *ordered;
2549 size_t pg_offset = 0;
2551 size_t disk_io_size;
2552 size_t blocksize = inode->i_sb->s_blocksize;
2553 unsigned long this_bio_flag = 0;
2555 set_page_extent_mapped(page);
2557 if (!PageUptodate(page)) {
2558 if (cleancache_get_page(page) == 0) {
2559 BUG_ON(blocksize != PAGE_SIZE);
2566 lock_extent(tree, start, end, GFP_NOFS);
2567 ordered = btrfs_lookup_ordered_extent(inode, start);
2570 unlock_extent(tree, start, end, GFP_NOFS);
2571 btrfs_start_ordered_extent(inode, ordered, 1);
2572 btrfs_put_ordered_extent(ordered);
2575 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2577 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2580 iosize = PAGE_CACHE_SIZE - zero_offset;
2581 userpage = kmap_atomic(page, KM_USER0);
2582 memset(userpage + zero_offset, 0, iosize);
2583 flush_dcache_page(page);
2584 kunmap_atomic(userpage, KM_USER0);
2587 while (cur <= end) {
2588 if (cur >= last_byte) {
2590 struct extent_state *cached = NULL;
2592 iosize = PAGE_CACHE_SIZE - pg_offset;
2593 userpage = kmap_atomic(page, KM_USER0);
2594 memset(userpage + pg_offset, 0, iosize);
2595 flush_dcache_page(page);
2596 kunmap_atomic(userpage, KM_USER0);
2597 set_extent_uptodate(tree, cur, cur + iosize - 1,
2599 unlock_extent_cached(tree, cur, cur + iosize - 1,
2603 em = get_extent(inode, page, pg_offset, cur,
2605 if (IS_ERR_OR_NULL(em)) {
2607 unlock_extent(tree, cur, end, GFP_NOFS);
2610 extent_offset = cur - em->start;
2611 BUG_ON(extent_map_end(em) <= cur);
2614 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2615 this_bio_flag = EXTENT_BIO_COMPRESSED;
2616 extent_set_compress_type(&this_bio_flag,
2620 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2621 cur_end = min(extent_map_end(em) - 1, end);
2622 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2623 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2624 disk_io_size = em->block_len;
2625 sector = em->block_start >> 9;
2627 sector = (em->block_start + extent_offset) >> 9;
2628 disk_io_size = iosize;
2631 block_start = em->block_start;
2632 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2633 block_start = EXTENT_MAP_HOLE;
2634 free_extent_map(em);
2637 /* we've found a hole, just zero and go on */
2638 if (block_start == EXTENT_MAP_HOLE) {
2640 struct extent_state *cached = NULL;
2642 userpage = kmap_atomic(page, KM_USER0);
2643 memset(userpage + pg_offset, 0, iosize);
2644 flush_dcache_page(page);
2645 kunmap_atomic(userpage, KM_USER0);
2647 set_extent_uptodate(tree, cur, cur + iosize - 1,
2649 unlock_extent_cached(tree, cur, cur + iosize - 1,
2652 pg_offset += iosize;
2655 /* the get_extent function already copied into the page */
2656 if (test_range_bit(tree, cur, cur_end,
2657 EXTENT_UPTODATE, 1, NULL)) {
2658 check_page_uptodate(tree, page);
2659 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2661 pg_offset += iosize;
2664 /* we have an inline extent but it didn't get marked up
2665 * to date. Error out
2667 if (block_start == EXTENT_MAP_INLINE) {
2669 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2671 pg_offset += iosize;
2676 if (tree->ops && tree->ops->readpage_io_hook) {
2677 ret = tree->ops->readpage_io_hook(page, cur,
2681 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2683 ret = submit_extent_page(READ, tree, page,
2684 sector, disk_io_size, pg_offset,
2686 end_bio_extent_readpage, mirror_num,
2690 *bio_flags = this_bio_flag;
2695 pg_offset += iosize;
2699 if (!PageError(page))
2700 SetPageUptodate(page);
2706 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2707 get_extent_t *get_extent, int mirror_num)
2709 struct bio *bio = NULL;
2710 unsigned long bio_flags = 0;
2713 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2716 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2722 static noinline void update_nr_written(struct page *page,
2723 struct writeback_control *wbc,
2724 unsigned long nr_written)
2726 wbc->nr_to_write -= nr_written;
2727 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2728 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2729 page->mapping->writeback_index = page->index + nr_written;
2733 * the writepage semantics are similar to regular writepage. extent
2734 * records are inserted to lock ranges in the tree, and as dirty areas
2735 * are found, they are marked writeback. Then the lock bits are removed
2736 * and the end_io handler clears the writeback ranges
2738 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2741 struct inode *inode = page->mapping->host;
2742 struct extent_page_data *epd = data;
2743 struct extent_io_tree *tree = epd->tree;
2744 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2746 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2750 u64 last_byte = i_size_read(inode);
2754 struct extent_state *cached_state = NULL;
2755 struct extent_map *em;
2756 struct block_device *bdev;
2759 size_t pg_offset = 0;
2761 loff_t i_size = i_size_read(inode);
2762 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2768 unsigned long nr_written = 0;
2769 bool fill_delalloc = true;
2771 if (wbc->sync_mode == WB_SYNC_ALL)
2772 write_flags = WRITE_SYNC;
2774 write_flags = WRITE;
2776 trace___extent_writepage(page, inode, wbc);
2778 WARN_ON(!PageLocked(page));
2780 ClearPageError(page);
2782 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2783 if (page->index > end_index ||
2784 (page->index == end_index && !pg_offset)) {
2785 page->mapping->a_ops->invalidatepage(page, 0);
2790 if (page->index == end_index) {
2793 userpage = kmap_atomic(page, KM_USER0);
2794 memset(userpage + pg_offset, 0,
2795 PAGE_CACHE_SIZE - pg_offset);
2796 kunmap_atomic(userpage, KM_USER0);
2797 flush_dcache_page(page);
2801 set_page_extent_mapped(page);
2803 if (!tree->ops || !tree->ops->fill_delalloc)
2804 fill_delalloc = false;
2806 delalloc_start = start;
2809 if (!epd->extent_locked && fill_delalloc) {
2810 u64 delalloc_to_write = 0;
2812 * make sure the wbc mapping index is at least updated
2815 update_nr_written(page, wbc, 0);
2817 while (delalloc_end < page_end) {
2818 nr_delalloc = find_lock_delalloc_range(inode, tree,
2823 if (nr_delalloc == 0) {
2824 delalloc_start = delalloc_end + 1;
2827 ret = tree->ops->fill_delalloc(inode, page,
2834 * delalloc_end is already one less than the total
2835 * length, so we don't subtract one from
2838 delalloc_to_write += (delalloc_end - delalloc_start +
2841 delalloc_start = delalloc_end + 1;
2843 if (wbc->nr_to_write < delalloc_to_write) {
2846 if (delalloc_to_write < thresh * 2)
2847 thresh = delalloc_to_write;
2848 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2852 /* did the fill delalloc function already unlock and start
2858 * we've unlocked the page, so we can't update
2859 * the mapping's writeback index, just update
2862 wbc->nr_to_write -= nr_written;
2866 if (tree->ops && tree->ops->writepage_start_hook) {
2867 ret = tree->ops->writepage_start_hook(page, start,
2870 /* Fixup worker will requeue */
2872 wbc->pages_skipped++;
2874 redirty_page_for_writepage(wbc, page);
2875 update_nr_written(page, wbc, nr_written);
2883 * we don't want to touch the inode after unlocking the page,
2884 * so we update the mapping writeback index now
2886 update_nr_written(page, wbc, nr_written + 1);
2889 if (last_byte <= start) {
2890 if (tree->ops && tree->ops->writepage_end_io_hook)
2891 tree->ops->writepage_end_io_hook(page, start,
2896 blocksize = inode->i_sb->s_blocksize;
2898 while (cur <= end) {
2899 if (cur >= last_byte) {
2900 if (tree->ops && tree->ops->writepage_end_io_hook)
2901 tree->ops->writepage_end_io_hook(page, cur,
2905 em = epd->get_extent(inode, page, pg_offset, cur,
2907 if (IS_ERR_OR_NULL(em)) {
2912 extent_offset = cur - em->start;
2913 BUG_ON(extent_map_end(em) <= cur);
2915 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2916 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2917 sector = (em->block_start + extent_offset) >> 9;
2919 block_start = em->block_start;
2920 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2921 free_extent_map(em);
2925 * compressed and inline extents are written through other
2928 if (compressed || block_start == EXTENT_MAP_HOLE ||
2929 block_start == EXTENT_MAP_INLINE) {
2931 * end_io notification does not happen here for
2932 * compressed extents
2934 if (!compressed && tree->ops &&
2935 tree->ops->writepage_end_io_hook)
2936 tree->ops->writepage_end_io_hook(page, cur,
2939 else if (compressed) {
2940 /* we don't want to end_page_writeback on
2941 * a compressed extent. this happens
2948 pg_offset += iosize;
2951 /* leave this out until we have a page_mkwrite call */
2952 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2953 EXTENT_DIRTY, 0, NULL)) {
2955 pg_offset += iosize;
2959 if (tree->ops && tree->ops->writepage_io_hook) {
2960 ret = tree->ops->writepage_io_hook(page, cur,
2968 unsigned long max_nr = end_index + 1;
2970 set_range_writeback(tree, cur, cur + iosize - 1);
2971 if (!PageWriteback(page)) {
2972 printk(KERN_ERR "btrfs warning page %lu not "
2973 "writeback, cur %llu end %llu\n",
2974 page->index, (unsigned long long)cur,
2975 (unsigned long long)end);
2978 ret = submit_extent_page(write_flags, tree, page,
2979 sector, iosize, pg_offset,
2980 bdev, &epd->bio, max_nr,
2981 end_bio_extent_writepage,
2987 pg_offset += iosize;
2992 /* make sure the mapping tag for page dirty gets cleared */
2993 set_page_writeback(page);
2994 end_page_writeback(page);
3000 /* drop our reference on any cached states */
3001 free_extent_state(cached_state);
3006 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3007 * @mapping: address space structure to write
3008 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3009 * @writepage: function called for each page
3010 * @data: data passed to writepage function
3012 * If a page is already under I/O, write_cache_pages() skips it, even
3013 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3014 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3015 * and msync() need to guarantee that all the data which was dirty at the time
3016 * the call was made get new I/O started against them. If wbc->sync_mode is
3017 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3018 * existing IO to complete.
3020 static int extent_write_cache_pages(struct extent_io_tree *tree,
3021 struct address_space *mapping,
3022 struct writeback_control *wbc,
3023 writepage_t writepage, void *data,
3024 void (*flush_fn)(void *))
3028 int nr_to_write_done = 0;
3029 struct pagevec pvec;
3032 pgoff_t end; /* Inclusive */
3036 pagevec_init(&pvec, 0);
3037 if (wbc->range_cyclic) {
3038 index = mapping->writeback_index; /* Start from prev offset */
3041 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3042 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3045 if (wbc->sync_mode == WB_SYNC_ALL)
3046 tag = PAGECACHE_TAG_TOWRITE;
3048 tag = PAGECACHE_TAG_DIRTY;
3050 if (wbc->sync_mode == WB_SYNC_ALL)
3051 tag_pages_for_writeback(mapping, index, end);
3052 while (!done && !nr_to_write_done && (index <= end) &&
3053 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3054 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3058 for (i = 0; i < nr_pages; i++) {
3059 struct page *page = pvec.pages[i];
3062 * At this point we hold neither mapping->tree_lock nor
3063 * lock on the page itself: the page may be truncated or
3064 * invalidated (changing page->mapping to NULL), or even
3065 * swizzled back from swapper_space to tmpfs file
3069 tree->ops->write_cache_pages_lock_hook) {
3070 tree->ops->write_cache_pages_lock_hook(page,
3073 if (!trylock_page(page)) {
3079 if (unlikely(page->mapping != mapping)) {
3084 if (!wbc->range_cyclic && page->index > end) {
3090 if (wbc->sync_mode != WB_SYNC_NONE) {
3091 if (PageWriteback(page))
3093 wait_on_page_writeback(page);
3096 if (PageWriteback(page) ||
3097 !clear_page_dirty_for_io(page)) {
3102 ret = (*writepage)(page, wbc, data);
3104 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3112 * the filesystem may choose to bump up nr_to_write.
3113 * We have to make sure to honor the new nr_to_write
3116 nr_to_write_done = wbc->nr_to_write <= 0;
3118 pagevec_release(&pvec);
3121 if (!scanned && !done) {
3123 * We hit the last page and there is more work to be done: wrap
3124 * back to the start of the file
3133 static void flush_epd_write_bio(struct extent_page_data *epd)
3142 ret = submit_one_bio(rw, epd->bio, 0, 0);
3148 static noinline void flush_write_bio(void *data)
3150 struct extent_page_data *epd = data;
3151 flush_epd_write_bio(epd);
3154 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3155 get_extent_t *get_extent,
3156 struct writeback_control *wbc)
3159 struct extent_page_data epd = {
3162 .get_extent = get_extent,
3164 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3167 ret = __extent_writepage(page, wbc, &epd);
3169 flush_epd_write_bio(&epd);
3173 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3174 u64 start, u64 end, get_extent_t *get_extent,
3178 struct address_space *mapping = inode->i_mapping;
3180 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3183 struct extent_page_data epd = {
3186 .get_extent = get_extent,
3188 .sync_io = mode == WB_SYNC_ALL,
3190 struct writeback_control wbc_writepages = {
3192 .nr_to_write = nr_pages * 2,
3193 .range_start = start,
3194 .range_end = end + 1,
3197 while (start <= end) {
3198 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3199 if (clear_page_dirty_for_io(page))
3200 ret = __extent_writepage(page, &wbc_writepages, &epd);
3202 if (tree->ops && tree->ops->writepage_end_io_hook)
3203 tree->ops->writepage_end_io_hook(page, start,
3204 start + PAGE_CACHE_SIZE - 1,
3208 page_cache_release(page);
3209 start += PAGE_CACHE_SIZE;
3212 flush_epd_write_bio(&epd);
3216 int extent_writepages(struct extent_io_tree *tree,
3217 struct address_space *mapping,
3218 get_extent_t *get_extent,
3219 struct writeback_control *wbc)
3222 struct extent_page_data epd = {
3225 .get_extent = get_extent,
3227 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3230 ret = extent_write_cache_pages(tree, mapping, wbc,
3231 __extent_writepage, &epd,
3233 flush_epd_write_bio(&epd);
3237 int extent_readpages(struct extent_io_tree *tree,
3238 struct address_space *mapping,
3239 struct list_head *pages, unsigned nr_pages,
3240 get_extent_t get_extent)
3242 struct bio *bio = NULL;
3244 unsigned long bio_flags = 0;
3246 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3247 struct page *page = list_entry(pages->prev, struct page, lru);
3249 prefetchw(&page->flags);
3250 list_del(&page->lru);
3251 if (!add_to_page_cache_lru(page, mapping,
3252 page->index, GFP_NOFS)) {
3253 __extent_read_full_page(tree, page, get_extent,
3254 &bio, 0, &bio_flags);
3256 page_cache_release(page);
3258 BUG_ON(!list_empty(pages));
3260 int ret = submit_one_bio(READ, bio, 0, bio_flags);
3267 * basic invalidatepage code, this waits on any locked or writeback
3268 * ranges corresponding to the page, and then deletes any extent state
3269 * records from the tree
3271 int extent_invalidatepage(struct extent_io_tree *tree,
3272 struct page *page, unsigned long offset)
3274 struct extent_state *cached_state = NULL;
3275 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3276 u64 end = start + PAGE_CACHE_SIZE - 1;
3277 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3279 start += (offset + blocksize - 1) & ~(blocksize - 1);
3283 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
3284 wait_on_page_writeback(page);
3285 clear_extent_bit(tree, start, end,
3286 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3287 EXTENT_DO_ACCOUNTING,
3288 1, 1, &cached_state, GFP_NOFS);
3293 * a helper for releasepage, this tests for areas of the page that
3294 * are locked or under IO and drops the related state bits if it is safe
3297 int try_release_extent_state(struct extent_map_tree *map,
3298 struct extent_io_tree *tree, struct page *page,
3301 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3302 u64 end = start + PAGE_CACHE_SIZE - 1;
3305 if (test_range_bit(tree, start, end,
3306 EXTENT_IOBITS, 0, NULL))
3309 if ((mask & GFP_NOFS) == GFP_NOFS)
3312 * at this point we can safely clear everything except the
3313 * locked bit and the nodatasum bit
3315 ret = clear_extent_bit(tree, start, end,
3316 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3319 /* if clear_extent_bit failed for enomem reasons,
3320 * we can't allow the release to continue.
3331 * a helper for releasepage. As long as there are no locked extents
3332 * in the range corresponding to the page, both state records and extent
3333 * map records are removed
3335 int try_release_extent_mapping(struct extent_map_tree *map,
3336 struct extent_io_tree *tree, struct page *page,
3339 struct extent_map *em;
3340 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3341 u64 end = start + PAGE_CACHE_SIZE - 1;
3343 if ((mask & __GFP_WAIT) &&
3344 page->mapping->host->i_size > 16 * 1024 * 1024) {
3346 while (start <= end) {
3347 len = end - start + 1;
3348 write_lock(&map->lock);
3349 em = lookup_extent_mapping(map, start, len);
3351 write_unlock(&map->lock);
3354 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3355 em->start != start) {
3356 write_unlock(&map->lock);
3357 free_extent_map(em);
3360 if (!test_range_bit(tree, em->start,
3361 extent_map_end(em) - 1,
3362 EXTENT_LOCKED | EXTENT_WRITEBACK,
3364 remove_extent_mapping(map, em);
3365 /* once for the rb tree */
3366 free_extent_map(em);
3368 start = extent_map_end(em);
3369 write_unlock(&map->lock);
3372 free_extent_map(em);
3375 return try_release_extent_state(map, tree, page, mask);
3379 * helper function for fiemap, which doesn't want to see any holes.
3380 * This maps until we find something past 'last'
3382 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3385 get_extent_t *get_extent)
3387 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3388 struct extent_map *em;
3395 len = last - offset;
3398 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3399 em = get_extent(inode, NULL, 0, offset, len, 0);
3400 if (IS_ERR_OR_NULL(em))
3403 /* if this isn't a hole return it */
3404 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3405 em->block_start != EXTENT_MAP_HOLE) {
3409 /* this is a hole, advance to the next extent */
3410 offset = extent_map_end(em);
3411 free_extent_map(em);
3418 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3419 __u64 start, __u64 len, get_extent_t *get_extent)
3423 u64 max = start + len;
3427 u64 last_for_get_extent = 0;
3429 u64 isize = i_size_read(inode);
3430 struct btrfs_key found_key;
3431 struct extent_map *em = NULL;
3432 struct extent_state *cached_state = NULL;
3433 struct btrfs_path *path;
3434 struct btrfs_file_extent_item *item;
3439 unsigned long emflags;
3444 path = btrfs_alloc_path();
3447 path->leave_spinning = 1;
3449 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3450 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3453 * lookup the last file extent. We're not using i_size here
3454 * because there might be preallocation past i_size
3456 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3457 path, btrfs_ino(inode), -1, 0);
3459 btrfs_free_path(path);
3464 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3465 struct btrfs_file_extent_item);
3466 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3467 found_type = btrfs_key_type(&found_key);
3469 /* No extents, but there might be delalloc bits */
3470 if (found_key.objectid != btrfs_ino(inode) ||
3471 found_type != BTRFS_EXTENT_DATA_KEY) {
3472 /* have to trust i_size as the end */
3474 last_for_get_extent = isize;
3477 * remember the start of the last extent. There are a
3478 * bunch of different factors that go into the length of the
3479 * extent, so its much less complex to remember where it started
3481 last = found_key.offset;
3482 last_for_get_extent = last + 1;
3484 btrfs_free_path(path);
3487 * we might have some extents allocated but more delalloc past those
3488 * extents. so, we trust isize unless the start of the last extent is
3493 last_for_get_extent = isize;
3496 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3497 &cached_state, GFP_NOFS);
3499 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3509 u64 offset_in_extent;
3511 /* break if the extent we found is outside the range */
3512 if (em->start >= max || extent_map_end(em) < off)
3516 * get_extent may return an extent that starts before our
3517 * requested range. We have to make sure the ranges
3518 * we return to fiemap always move forward and don't
3519 * overlap, so adjust the offsets here
3521 em_start = max(em->start, off);
3524 * record the offset from the start of the extent
3525 * for adjusting the disk offset below
3527 offset_in_extent = em_start - em->start;
3528 em_end = extent_map_end(em);
3529 em_len = em_end - em_start;
3530 emflags = em->flags;
3535 * bump off for our next call to get_extent
3537 off = extent_map_end(em);
3541 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3543 flags |= FIEMAP_EXTENT_LAST;
3544 } else if (em->block_start == EXTENT_MAP_INLINE) {
3545 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3546 FIEMAP_EXTENT_NOT_ALIGNED);
3547 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3548 flags |= (FIEMAP_EXTENT_DELALLOC |
3549 FIEMAP_EXTENT_UNKNOWN);
3551 disko = em->block_start + offset_in_extent;
3553 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3554 flags |= FIEMAP_EXTENT_ENCODED;
3556 free_extent_map(em);
3558 if ((em_start >= last) || em_len == (u64)-1 ||
3559 (last == (u64)-1 && isize <= em_end)) {
3560 flags |= FIEMAP_EXTENT_LAST;
3564 /* now scan forward to see if this is really the last extent. */
3565 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3572 flags |= FIEMAP_EXTENT_LAST;
3575 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3581 free_extent_map(em);
3583 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3584 &cached_state, GFP_NOFS);
3588 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3592 struct address_space *mapping;
3595 return eb->first_page;
3596 i += eb->start >> PAGE_CACHE_SHIFT;
3597 mapping = eb->first_page->mapping;
3602 * extent_buffer_page is only called after pinning the page
3603 * by increasing the reference count. So we know the page must
3604 * be in the radix tree.
3607 p = radix_tree_lookup(&mapping->page_tree, i);
3613 inline unsigned long num_extent_pages(u64 start, u64 len)
3615 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3616 (start >> PAGE_CACHE_SHIFT);
3619 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3624 struct extent_buffer *eb = NULL;
3626 unsigned long flags;
3629 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3634 rwlock_init(&eb->lock);
3635 atomic_set(&eb->write_locks, 0);
3636 atomic_set(&eb->read_locks, 0);
3637 atomic_set(&eb->blocking_readers, 0);
3638 atomic_set(&eb->blocking_writers, 0);
3639 atomic_set(&eb->spinning_readers, 0);
3640 atomic_set(&eb->spinning_writers, 0);
3641 eb->lock_nested = 0;
3642 init_waitqueue_head(&eb->write_lock_wq);
3643 init_waitqueue_head(&eb->read_lock_wq);
3646 spin_lock_irqsave(&leak_lock, flags);
3647 list_add(&eb->leak_list, &buffers);
3648 spin_unlock_irqrestore(&leak_lock, flags);
3650 atomic_set(&eb->refs, 1);
3655 static void __free_extent_buffer(struct extent_buffer *eb)
3658 unsigned long flags;
3659 spin_lock_irqsave(&leak_lock, flags);
3660 list_del(&eb->leak_list);
3661 spin_unlock_irqrestore(&leak_lock, flags);
3663 kmem_cache_free(extent_buffer_cache, eb);
3667 * Helper for releasing extent buffer page.
3669 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3670 unsigned long start_idx)
3672 unsigned long index;
3675 if (!eb->first_page)
3678 index = num_extent_pages(eb->start, eb->len);
3679 if (start_idx >= index)
3684 page = extent_buffer_page(eb, index);
3686 page_cache_release(page);
3687 } while (index != start_idx);
3691 * Helper for releasing the extent buffer.
3693 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3695 btrfs_release_extent_buffer_page(eb, 0);
3696 __free_extent_buffer(eb);
3699 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3700 u64 start, unsigned long len,
3703 unsigned long num_pages = num_extent_pages(start, len);
3705 unsigned long index = start >> PAGE_CACHE_SHIFT;
3706 struct extent_buffer *eb;
3707 struct extent_buffer *exists = NULL;
3709 struct address_space *mapping = tree->mapping;
3714 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3715 if (eb && atomic_inc_not_zero(&eb->refs)) {
3717 mark_page_accessed(eb->first_page);
3722 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3727 eb->first_page = page0;
3730 page_cache_get(page0);
3731 mark_page_accessed(page0);
3732 set_page_extent_mapped(page0);
3733 set_page_extent_head(page0, len);
3734 uptodate = PageUptodate(page0);
3738 for (; i < num_pages; i++, index++) {
3739 p = find_or_create_page(mapping, index, GFP_NOFS);
3744 set_page_extent_mapped(p);
3745 mark_page_accessed(p);
3748 set_page_extent_head(p, len);
3750 set_page_private(p, EXTENT_PAGE_PRIVATE);
3752 if (!PageUptodate(p))
3756 * see below about how we avoid a nasty race with release page
3757 * and why we unlock later
3763 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3765 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3769 spin_lock(&tree->buffer_lock);
3770 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3771 if (ret == -EEXIST) {
3772 exists = radix_tree_lookup(&tree->buffer,
3773 start >> PAGE_CACHE_SHIFT);
3774 /* add one reference for the caller */
3775 atomic_inc(&exists->refs);
3776 spin_unlock(&tree->buffer_lock);
3777 radix_tree_preload_end();
3780 /* add one reference for the tree */
3781 atomic_inc(&eb->refs);
3782 spin_unlock(&tree->buffer_lock);
3783 radix_tree_preload_end();
3786 * there is a race where release page may have
3787 * tried to find this extent buffer in the radix
3788 * but failed. It will tell the VM it is safe to
3789 * reclaim the, and it will clear the page private bit.
3790 * We must make sure to set the page private bit properly
3791 * after the extent buffer is in the radix tree so
3792 * it doesn't get lost
3794 set_page_extent_mapped(eb->first_page);
3795 set_page_extent_head(eb->first_page, eb->len);
3797 unlock_page(eb->first_page);
3801 if (eb->first_page && !page0)
3802 unlock_page(eb->first_page);
3804 if (!atomic_dec_and_test(&eb->refs))
3806 btrfs_release_extent_buffer(eb);
3810 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3811 u64 start, unsigned long len)
3813 struct extent_buffer *eb;
3816 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3817 if (eb && atomic_inc_not_zero(&eb->refs)) {
3819 mark_page_accessed(eb->first_page);
3827 void free_extent_buffer(struct extent_buffer *eb)
3832 if (!atomic_dec_and_test(&eb->refs))
3838 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3839 struct extent_buffer *eb)
3842 unsigned long num_pages;
3845 num_pages = num_extent_pages(eb->start, eb->len);
3847 for (i = 0; i < num_pages; i++) {
3848 page = extent_buffer_page(eb, i);
3849 if (!PageDirty(page))
3853 WARN_ON(!PagePrivate(page));
3855 set_page_extent_mapped(page);
3857 set_page_extent_head(page, eb->len);
3859 clear_page_dirty_for_io(page);
3860 spin_lock_irq(&page->mapping->tree_lock);
3861 if (!PageDirty(page)) {
3862 radix_tree_tag_clear(&page->mapping->page_tree,
3864 PAGECACHE_TAG_DIRTY);
3866 spin_unlock_irq(&page->mapping->tree_lock);
3867 ClearPageError(page);
3873 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3874 struct extent_buffer *eb)
3877 unsigned long num_pages;
3880 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3881 num_pages = num_extent_pages(eb->start, eb->len);
3882 for (i = 0; i < num_pages; i++)
3883 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3887 static int __eb_straddles_pages(u64 start, u64 len)
3889 if (len < PAGE_CACHE_SIZE)
3891 if (start & (PAGE_CACHE_SIZE - 1))
3893 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3898 static int eb_straddles_pages(struct extent_buffer *eb)
3900 return __eb_straddles_pages(eb->start, eb->len);
3903 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3904 struct extent_buffer *eb,
3905 struct extent_state **cached_state)
3909 unsigned long num_pages;
3911 num_pages = num_extent_pages(eb->start, eb->len);
3912 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3914 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3915 cached_state, GFP_NOFS);
3917 for (i = 0; i < num_pages; i++) {
3918 page = extent_buffer_page(eb, i);
3920 ClearPageUptodate(page);
3925 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3926 struct extent_buffer *eb)
3930 unsigned long num_pages;
3932 num_pages = num_extent_pages(eb->start, eb->len);
3934 if (eb_straddles_pages(eb)) {
3935 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3938 for (i = 0; i < num_pages; i++) {
3939 page = extent_buffer_page(eb, i);
3940 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3941 ((i == num_pages - 1) &&
3942 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3943 check_page_uptodate(tree, page);
3946 SetPageUptodate(page);
3951 int extent_range_uptodate(struct extent_io_tree *tree,
3956 int pg_uptodate = 1;
3958 unsigned long index;
3960 if (__eb_straddles_pages(start, end - start + 1)) {
3961 ret = test_range_bit(tree, start, end,
3962 EXTENT_UPTODATE, 1, NULL);
3966 while (start <= end) {
3967 index = start >> PAGE_CACHE_SHIFT;
3968 page = find_get_page(tree->mapping, index);
3971 uptodate = PageUptodate(page);
3972 page_cache_release(page);
3977 start += PAGE_CACHE_SIZE;
3982 int extent_buffer_uptodate(struct extent_io_tree *tree,
3983 struct extent_buffer *eb,
3984 struct extent_state *cached_state)
3987 unsigned long num_pages;
3990 int pg_uptodate = 1;
3992 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3995 if (eb_straddles_pages(eb)) {
3996 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3997 EXTENT_UPTODATE, 1, cached_state);
4002 num_pages = num_extent_pages(eb->start, eb->len);
4003 for (i = 0; i < num_pages; i++) {
4004 page = extent_buffer_page(eb, i);
4005 if (!PageUptodate(page)) {
4013 int read_extent_buffer_pages(struct extent_io_tree *tree,
4014 struct extent_buffer *eb, u64 start, int wait,
4015 get_extent_t *get_extent, int mirror_num)
4018 unsigned long start_i;
4022 int locked_pages = 0;
4023 int all_uptodate = 1;
4024 int inc_all_pages = 0;
4025 unsigned long num_pages;
4026 struct bio *bio = NULL;
4027 unsigned long bio_flags = 0;
4029 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4032 if (eb_straddles_pages(eb)) {
4033 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
4034 EXTENT_UPTODATE, 1, NULL)) {
4040 WARN_ON(start < eb->start);
4041 start_i = (start >> PAGE_CACHE_SHIFT) -
4042 (eb->start >> PAGE_CACHE_SHIFT);
4047 num_pages = num_extent_pages(eb->start, eb->len);
4048 for (i = start_i; i < num_pages; i++) {
4049 page = extent_buffer_page(eb, i);
4050 if (wait == WAIT_NONE) {
4051 if (!trylock_page(page))
4057 if (!PageUptodate(page))
4062 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4066 for (i = start_i; i < num_pages; i++) {
4067 page = extent_buffer_page(eb, i);
4069 WARN_ON(!PagePrivate(page));
4071 set_page_extent_mapped(page);
4073 set_page_extent_head(page, eb->len);
4076 page_cache_get(page);
4077 if (!PageUptodate(page)) {
4080 ClearPageError(page);
4081 err = __extent_read_full_page(tree, page,
4083 mirror_num, &bio_flags);
4092 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4096 if (ret || wait != WAIT_COMPLETE)
4099 for (i = start_i; i < num_pages; i++) {
4100 page = extent_buffer_page(eb, i);
4101 wait_on_page_locked(page);
4102 if (!PageUptodate(page))
4107 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4112 while (locked_pages > 0) {
4113 page = extent_buffer_page(eb, i);
4121 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4122 unsigned long start,
4129 char *dst = (char *)dstv;
4130 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4131 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4133 WARN_ON(start > eb->len);
4134 WARN_ON(start + len > eb->start + eb->len);
4136 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4139 page = extent_buffer_page(eb, i);
4141 cur = min(len, (PAGE_CACHE_SIZE - offset));
4142 kaddr = page_address(page);
4143 memcpy(dst, kaddr + offset, cur);
4152 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4153 unsigned long min_len, char **map,
4154 unsigned long *map_start,
4155 unsigned long *map_len)
4157 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4160 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4161 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4162 unsigned long end_i = (start_offset + start + min_len - 1) >>
4169 offset = start_offset;
4173 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4176 if (start + min_len > eb->len) {
4177 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4178 "wanted %lu %lu\n", (unsigned long long)eb->start,
4179 eb->len, start, min_len);
4184 p = extent_buffer_page(eb, i);
4185 kaddr = page_address(p);
4186 *map = kaddr + offset;
4187 *map_len = PAGE_CACHE_SIZE - offset;
4191 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4192 unsigned long start,
4199 char *ptr = (char *)ptrv;
4200 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4201 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4204 WARN_ON(start > eb->len);
4205 WARN_ON(start + len > eb->start + eb->len);
4207 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4210 page = extent_buffer_page(eb, i);
4212 cur = min(len, (PAGE_CACHE_SIZE - offset));
4214 kaddr = page_address(page);
4215 ret = memcmp(ptr, kaddr + offset, cur);
4227 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4228 unsigned long start, unsigned long len)
4234 char *src = (char *)srcv;
4235 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4236 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4238 WARN_ON(start > eb->len);
4239 WARN_ON(start + len > eb->start + eb->len);
4241 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4244 page = extent_buffer_page(eb, i);
4245 WARN_ON(!PageUptodate(page));
4247 cur = min(len, PAGE_CACHE_SIZE - offset);
4248 kaddr = page_address(page);
4249 memcpy(kaddr + offset, src, cur);
4258 void memset_extent_buffer(struct extent_buffer *eb, char c,
4259 unsigned long start, unsigned long len)
4265 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4266 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4268 WARN_ON(start > eb->len);
4269 WARN_ON(start + len > eb->start + eb->len);
4271 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4274 page = extent_buffer_page(eb, i);
4275 WARN_ON(!PageUptodate(page));
4277 cur = min(len, PAGE_CACHE_SIZE - offset);
4278 kaddr = page_address(page);
4279 memset(kaddr + offset, c, cur);
4287 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4288 unsigned long dst_offset, unsigned long src_offset,
4291 u64 dst_len = dst->len;
4296 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4297 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4299 WARN_ON(src->len != dst_len);
4301 offset = (start_offset + dst_offset) &
4302 ((unsigned long)PAGE_CACHE_SIZE - 1);
4305 page = extent_buffer_page(dst, i);
4306 WARN_ON(!PageUptodate(page));
4308 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4310 kaddr = page_address(page);
4311 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4320 static void move_pages(struct page *dst_page, struct page *src_page,
4321 unsigned long dst_off, unsigned long src_off,
4324 char *dst_kaddr = page_address(dst_page);
4325 if (dst_page == src_page) {
4326 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4328 char *src_kaddr = page_address(src_page);
4329 char *p = dst_kaddr + dst_off + len;
4330 char *s = src_kaddr + src_off + len;
4337 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4339 unsigned long distance = (src > dst) ? src - dst : dst - src;
4340 return distance < len;
4343 static void copy_pages(struct page *dst_page, struct page *src_page,
4344 unsigned long dst_off, unsigned long src_off,
4347 char *dst_kaddr = page_address(dst_page);
4350 if (dst_page != src_page) {
4351 src_kaddr = page_address(src_page);
4353 src_kaddr = dst_kaddr;
4354 BUG_ON(areas_overlap(src_off, dst_off, len));
4357 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4360 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4361 unsigned long src_offset, unsigned long len)
4364 size_t dst_off_in_page;
4365 size_t src_off_in_page;
4366 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4367 unsigned long dst_i;
4368 unsigned long src_i;
4370 if (src_offset + len > dst->len) {
4371 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4372 "len %lu dst len %lu\n", src_offset, len, dst->len);
4375 if (dst_offset + len > dst->len) {
4376 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4377 "len %lu dst len %lu\n", dst_offset, len, dst->len);
4382 dst_off_in_page = (start_offset + dst_offset) &
4383 ((unsigned long)PAGE_CACHE_SIZE - 1);
4384 src_off_in_page = (start_offset + src_offset) &
4385 ((unsigned long)PAGE_CACHE_SIZE - 1);
4387 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4388 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4390 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4392 cur = min_t(unsigned long, cur,
4393 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4395 copy_pages(extent_buffer_page(dst, dst_i),
4396 extent_buffer_page(dst, src_i),
4397 dst_off_in_page, src_off_in_page, cur);
4405 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4406 unsigned long src_offset, unsigned long len)
4409 size_t dst_off_in_page;
4410 size_t src_off_in_page;
4411 unsigned long dst_end = dst_offset + len - 1;
4412 unsigned long src_end = src_offset + len - 1;
4413 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4414 unsigned long dst_i;
4415 unsigned long src_i;
4417 if (src_offset + len > dst->len) {
4418 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4419 "len %lu len %lu\n", src_offset, len, dst->len);
4422 if (dst_offset + len > dst->len) {
4423 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4424 "len %lu len %lu\n", dst_offset, len, dst->len);
4427 if (!areas_overlap(src_offset, dst_offset, len)) {
4428 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4432 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4433 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4435 dst_off_in_page = (start_offset + dst_end) &
4436 ((unsigned long)PAGE_CACHE_SIZE - 1);
4437 src_off_in_page = (start_offset + src_end) &
4438 ((unsigned long)PAGE_CACHE_SIZE - 1);
4440 cur = min_t(unsigned long, len, src_off_in_page + 1);
4441 cur = min(cur, dst_off_in_page + 1);
4442 move_pages(extent_buffer_page(dst, dst_i),
4443 extent_buffer_page(dst, src_i),
4444 dst_off_in_page - cur + 1,
4445 src_off_in_page - cur + 1, cur);
4453 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4455 struct extent_buffer *eb =
4456 container_of(head, struct extent_buffer, rcu_head);
4458 btrfs_release_extent_buffer(eb);
4461 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4463 u64 start = page_offset(page);
4464 struct extent_buffer *eb;
4467 spin_lock(&tree->buffer_lock);
4468 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4470 spin_unlock(&tree->buffer_lock);
4474 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4480 * set @eb->refs to 0 if it is already 1, and then release the @eb.
4483 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
4488 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4490 spin_unlock(&tree->buffer_lock);
4492 /* at this point we can safely release the extent buffer */
4493 if (atomic_read(&eb->refs) == 0)
4494 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);