Btrfs: use slabs for auto defrag allocation
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43
44 static struct kmem_cache *btrfs_inode_defrag_cachep;
45 /*
46  * when auto defrag is enabled we
47  * queue up these defrag structs to remember which
48  * inodes need defragging passes
49  */
50 struct inode_defrag {
51         struct rb_node rb_node;
52         /* objectid */
53         u64 ino;
54         /*
55          * transid where the defrag was added, we search for
56          * extents newer than this
57          */
58         u64 transid;
59
60         /* root objectid */
61         u64 root;
62
63         /* last offset we were able to defrag */
64         u64 last_offset;
65
66         /* if we've wrapped around back to zero once already */
67         int cycled;
68 };
69
70 static int __compare_inode_defrag(struct inode_defrag *defrag1,
71                                   struct inode_defrag *defrag2)
72 {
73         if (defrag1->root > defrag2->root)
74                 return 1;
75         else if (defrag1->root < defrag2->root)
76                 return -1;
77         else if (defrag1->ino > defrag2->ino)
78                 return 1;
79         else if (defrag1->ino < defrag2->ino)
80                 return -1;
81         else
82                 return 0;
83 }
84
85 /* pop a record for an inode into the defrag tree.  The lock
86  * must be held already
87  *
88  * If you're inserting a record for an older transid than an
89  * existing record, the transid already in the tree is lowered
90  *
91  * If an existing record is found the defrag item you
92  * pass in is freed
93  */
94 static void __btrfs_add_inode_defrag(struct inode *inode,
95                                     struct inode_defrag *defrag)
96 {
97         struct btrfs_root *root = BTRFS_I(inode)->root;
98         struct inode_defrag *entry;
99         struct rb_node **p;
100         struct rb_node *parent = NULL;
101         int ret;
102
103         p = &root->fs_info->defrag_inodes.rb_node;
104         while (*p) {
105                 parent = *p;
106                 entry = rb_entry(parent, struct inode_defrag, rb_node);
107
108                 ret = __compare_inode_defrag(defrag, entry);
109                 if (ret < 0)
110                         p = &parent->rb_left;
111                 else if (ret > 0)
112                         p = &parent->rb_right;
113                 else {
114                         /* if we're reinserting an entry for
115                          * an old defrag run, make sure to
116                          * lower the transid of our existing record
117                          */
118                         if (defrag->transid < entry->transid)
119                                 entry->transid = defrag->transid;
120                         if (defrag->last_offset > entry->last_offset)
121                                 entry->last_offset = defrag->last_offset;
122                         goto exists;
123                 }
124         }
125         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
126         rb_link_node(&defrag->rb_node, parent, p);
127         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
128         return;
129
130 exists:
131         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
132         return;
133
134 }
135
136 /*
137  * insert a defrag record for this inode if auto defrag is
138  * enabled
139  */
140 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
141                            struct inode *inode)
142 {
143         struct btrfs_root *root = BTRFS_I(inode)->root;
144         struct inode_defrag *defrag;
145         u64 transid;
146
147         if (!btrfs_test_opt(root, AUTO_DEFRAG))
148                 return 0;
149
150         if (btrfs_fs_closing(root->fs_info))
151                 return 0;
152
153         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
154                 return 0;
155
156         if (trans)
157                 transid = trans->transid;
158         else
159                 transid = BTRFS_I(inode)->root->last_trans;
160
161         defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
162         if (!defrag)
163                 return -ENOMEM;
164
165         defrag->ino = btrfs_ino(inode);
166         defrag->transid = transid;
167         defrag->root = root->root_key.objectid;
168
169         spin_lock(&root->fs_info->defrag_inodes_lock);
170         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
171                 __btrfs_add_inode_defrag(inode, defrag);
172         else
173                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
174         spin_unlock(&root->fs_info->defrag_inodes_lock);
175         return 0;
176 }
177
178 /*
179  * must be called with the defrag_inodes lock held
180  */
181 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
182                                              u64 root, u64 ino,
183                                              struct rb_node **next)
184 {
185         struct inode_defrag *entry = NULL;
186         struct inode_defrag tmp;
187         struct rb_node *p;
188         struct rb_node *parent = NULL;
189         int ret;
190
191         tmp.ino = ino;
192         tmp.root = root;
193
194         p = info->defrag_inodes.rb_node;
195         while (p) {
196                 parent = p;
197                 entry = rb_entry(parent, struct inode_defrag, rb_node);
198
199                 ret = __compare_inode_defrag(&tmp, entry);
200                 if (ret < 0)
201                         p = parent->rb_left;
202                 else if (ret > 0)
203                         p = parent->rb_right;
204                 else
205                         return entry;
206         }
207
208         if (next) {
209                 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
210                         parent = rb_next(parent);
211                         entry = rb_entry(parent, struct inode_defrag, rb_node);
212                 }
213                 *next = parent;
214         }
215         return NULL;
216 }
217
218 /*
219  * run through the list of inodes in the FS that need
220  * defragging
221  */
222 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
223 {
224         struct inode_defrag *defrag;
225         struct btrfs_root *inode_root;
226         struct inode *inode;
227         struct rb_node *n;
228         struct btrfs_key key;
229         struct btrfs_ioctl_defrag_range_args range;
230         u64 first_ino = 0;
231         u64 root_objectid = 0;
232         int num_defrag;
233         int defrag_batch = 1024;
234
235         memset(&range, 0, sizeof(range));
236         range.len = (u64)-1;
237
238         atomic_inc(&fs_info->defrag_running);
239         spin_lock(&fs_info->defrag_inodes_lock);
240         while(1) {
241                 n = NULL;
242
243                 /* find an inode to defrag */
244                 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
245                                                  first_ino, &n);
246                 if (!defrag) {
247                         if (n) {
248                                 defrag = rb_entry(n, struct inode_defrag,
249                                                   rb_node);
250                         } else if (root_objectid || first_ino) {
251                                 root_objectid = 0;
252                                 first_ino = 0;
253                                 continue;
254                         } else {
255                                 break;
256                         }
257                 }
258
259                 /* remove it from the rbtree */
260                 first_ino = defrag->ino + 1;
261                 root_objectid = defrag->root;
262                 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
263
264                 if (btrfs_fs_closing(fs_info))
265                         goto next_free;
266
267                 spin_unlock(&fs_info->defrag_inodes_lock);
268
269                 /* get the inode */
270                 key.objectid = defrag->root;
271                 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
272                 key.offset = (u64)-1;
273                 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
274                 if (IS_ERR(inode_root))
275                         goto next;
276
277                 key.objectid = defrag->ino;
278                 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
279                 key.offset = 0;
280
281                 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
282                 if (IS_ERR(inode))
283                         goto next;
284
285                 /* do a chunk of defrag */
286                 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
287                 range.start = defrag->last_offset;
288                 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
289                                                defrag_batch);
290                 /*
291                  * if we filled the whole defrag batch, there
292                  * must be more work to do.  Queue this defrag
293                  * again
294                  */
295                 if (num_defrag == defrag_batch) {
296                         defrag->last_offset = range.start;
297                         __btrfs_add_inode_defrag(inode, defrag);
298                         /*
299                          * we don't want to kfree defrag, we added it back to
300                          * the rbtree
301                          */
302                         defrag = NULL;
303                 } else if (defrag->last_offset && !defrag->cycled) {
304                         /*
305                          * we didn't fill our defrag batch, but
306                          * we didn't start at zero.  Make sure we loop
307                          * around to the start of the file.
308                          */
309                         defrag->last_offset = 0;
310                         defrag->cycled = 1;
311                         __btrfs_add_inode_defrag(inode, defrag);
312                         defrag = NULL;
313                 }
314
315                 iput(inode);
316 next:
317                 spin_lock(&fs_info->defrag_inodes_lock);
318 next_free:
319                 if (defrag)
320                         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
321         }
322         spin_unlock(&fs_info->defrag_inodes_lock);
323
324         atomic_dec(&fs_info->defrag_running);
325
326         /*
327          * during unmount, we use the transaction_wait queue to
328          * wait for the defragger to stop
329          */
330         wake_up(&fs_info->transaction_wait);
331         return 0;
332 }
333
334 /* simple helper to fault in pages and copy.  This should go away
335  * and be replaced with calls into generic code.
336  */
337 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
338                                          size_t write_bytes,
339                                          struct page **prepared_pages,
340                                          struct iov_iter *i)
341 {
342         size_t copied = 0;
343         size_t total_copied = 0;
344         int pg = 0;
345         int offset = pos & (PAGE_CACHE_SIZE - 1);
346
347         while (write_bytes > 0) {
348                 size_t count = min_t(size_t,
349                                      PAGE_CACHE_SIZE - offset, write_bytes);
350                 struct page *page = prepared_pages[pg];
351                 /*
352                  * Copy data from userspace to the current page
353                  *
354                  * Disable pagefault to avoid recursive lock since
355                  * the pages are already locked
356                  */
357                 pagefault_disable();
358                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
359                 pagefault_enable();
360
361                 /* Flush processor's dcache for this page */
362                 flush_dcache_page(page);
363
364                 /*
365                  * if we get a partial write, we can end up with
366                  * partially up to date pages.  These add
367                  * a lot of complexity, so make sure they don't
368                  * happen by forcing this copy to be retried.
369                  *
370                  * The rest of the btrfs_file_write code will fall
371                  * back to page at a time copies after we return 0.
372                  */
373                 if (!PageUptodate(page) && copied < count)
374                         copied = 0;
375
376                 iov_iter_advance(i, copied);
377                 write_bytes -= copied;
378                 total_copied += copied;
379
380                 /* Return to btrfs_file_aio_write to fault page */
381                 if (unlikely(copied == 0))
382                         break;
383
384                 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
385                         offset += copied;
386                 } else {
387                         pg++;
388                         offset = 0;
389                 }
390         }
391         return total_copied;
392 }
393
394 /*
395  * unlocks pages after btrfs_file_write is done with them
396  */
397 void btrfs_drop_pages(struct page **pages, size_t num_pages)
398 {
399         size_t i;
400         for (i = 0; i < num_pages; i++) {
401                 /* page checked is some magic around finding pages that
402                  * have been modified without going through btrfs_set_page_dirty
403                  * clear it here
404                  */
405                 ClearPageChecked(pages[i]);
406                 unlock_page(pages[i]);
407                 mark_page_accessed(pages[i]);
408                 page_cache_release(pages[i]);
409         }
410 }
411
412 /*
413  * after copy_from_user, pages need to be dirtied and we need to make
414  * sure holes are created between the current EOF and the start of
415  * any next extents (if required).
416  *
417  * this also makes the decision about creating an inline extent vs
418  * doing real data extents, marking pages dirty and delalloc as required.
419  */
420 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
421                       struct page **pages, size_t num_pages,
422                       loff_t pos, size_t write_bytes,
423                       struct extent_state **cached)
424 {
425         int err = 0;
426         int i;
427         u64 num_bytes;
428         u64 start_pos;
429         u64 end_of_last_block;
430         u64 end_pos = pos + write_bytes;
431         loff_t isize = i_size_read(inode);
432
433         start_pos = pos & ~((u64)root->sectorsize - 1);
434         num_bytes = (write_bytes + pos - start_pos +
435                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
436
437         end_of_last_block = start_pos + num_bytes - 1;
438         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
439                                         cached);
440         if (err)
441                 return err;
442
443         for (i = 0; i < num_pages; i++) {
444                 struct page *p = pages[i];
445                 SetPageUptodate(p);
446                 ClearPageChecked(p);
447                 set_page_dirty(p);
448         }
449
450         /*
451          * we've only changed i_size in ram, and we haven't updated
452          * the disk i_size.  There is no need to log the inode
453          * at this time.
454          */
455         if (end_pos > isize)
456                 i_size_write(inode, end_pos);
457         return 0;
458 }
459
460 /*
461  * this drops all the extents in the cache that intersect the range
462  * [start, end].  Existing extents are split as required.
463  */
464 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
465                              int skip_pinned)
466 {
467         struct extent_map *em;
468         struct extent_map *split = NULL;
469         struct extent_map *split2 = NULL;
470         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
471         u64 len = end - start + 1;
472         u64 gen;
473         int ret;
474         int testend = 1;
475         unsigned long flags;
476         int compressed = 0;
477
478         WARN_ON(end < start);
479         if (end == (u64)-1) {
480                 len = (u64)-1;
481                 testend = 0;
482         }
483         while (1) {
484                 int no_splits = 0;
485
486                 if (!split)
487                         split = alloc_extent_map();
488                 if (!split2)
489                         split2 = alloc_extent_map();
490                 if (!split || !split2)
491                         no_splits = 1;
492
493                 write_lock(&em_tree->lock);
494                 em = lookup_extent_mapping(em_tree, start, len);
495                 if (!em) {
496                         write_unlock(&em_tree->lock);
497                         break;
498                 }
499                 flags = em->flags;
500                 gen = em->generation;
501                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
502                         if (testend && em->start + em->len >= start + len) {
503                                 free_extent_map(em);
504                                 write_unlock(&em_tree->lock);
505                                 break;
506                         }
507                         start = em->start + em->len;
508                         if (testend)
509                                 len = start + len - (em->start + em->len);
510                         free_extent_map(em);
511                         write_unlock(&em_tree->lock);
512                         continue;
513                 }
514                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
515                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
516                 remove_extent_mapping(em_tree, em);
517                 if (no_splits)
518                         goto next;
519
520                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
521                     em->start < start) {
522                         split->start = em->start;
523                         split->len = start - em->start;
524                         split->orig_start = em->orig_start;
525                         split->block_start = em->block_start;
526
527                         if (compressed)
528                                 split->block_len = em->block_len;
529                         else
530                                 split->block_len = split->len;
531                         split->generation = gen;
532                         split->bdev = em->bdev;
533                         split->flags = flags;
534                         split->compress_type = em->compress_type;
535                         ret = add_extent_mapping(em_tree, split);
536                         BUG_ON(ret); /* Logic error */
537                         list_move(&split->list, &em_tree->modified_extents);
538                         free_extent_map(split);
539                         split = split2;
540                         split2 = NULL;
541                 }
542                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
543                     testend && em->start + em->len > start + len) {
544                         u64 diff = start + len - em->start;
545
546                         split->start = start + len;
547                         split->len = em->start + em->len - (start + len);
548                         split->bdev = em->bdev;
549                         split->flags = flags;
550                         split->compress_type = em->compress_type;
551                         split->generation = gen;
552
553                         if (compressed) {
554                                 split->block_len = em->block_len;
555                                 split->block_start = em->block_start;
556                                 split->orig_start = em->orig_start;
557                         } else {
558                                 split->block_len = split->len;
559                                 split->block_start = em->block_start + diff;
560                                 split->orig_start = split->start;
561                         }
562
563                         ret = add_extent_mapping(em_tree, split);
564                         BUG_ON(ret); /* Logic error */
565                         list_move(&split->list, &em_tree->modified_extents);
566                         free_extent_map(split);
567                         split = NULL;
568                 }
569 next:
570                 write_unlock(&em_tree->lock);
571
572                 /* once for us */
573                 free_extent_map(em);
574                 /* once for the tree*/
575                 free_extent_map(em);
576         }
577         if (split)
578                 free_extent_map(split);
579         if (split2)
580                 free_extent_map(split2);
581 }
582
583 /*
584  * this is very complex, but the basic idea is to drop all extents
585  * in the range start - end.  hint_block is filled in with a block number
586  * that would be a good hint to the block allocator for this file.
587  *
588  * If an extent intersects the range but is not entirely inside the range
589  * it is either truncated or split.  Anything entirely inside the range
590  * is deleted from the tree.
591  */
592 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
593                          struct btrfs_root *root, struct inode *inode,
594                          struct btrfs_path *path, u64 start, u64 end,
595                          u64 *drop_end, int drop_cache)
596 {
597         struct extent_buffer *leaf;
598         struct btrfs_file_extent_item *fi;
599         struct btrfs_key key;
600         struct btrfs_key new_key;
601         u64 ino = btrfs_ino(inode);
602         u64 search_start = start;
603         u64 disk_bytenr = 0;
604         u64 num_bytes = 0;
605         u64 extent_offset = 0;
606         u64 extent_end = 0;
607         int del_nr = 0;
608         int del_slot = 0;
609         int extent_type;
610         int recow;
611         int ret;
612         int modify_tree = -1;
613         int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
614         int found = 0;
615
616         if (drop_cache)
617                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
618
619         if (start >= BTRFS_I(inode)->disk_i_size)
620                 modify_tree = 0;
621
622         while (1) {
623                 recow = 0;
624                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
625                                                search_start, modify_tree);
626                 if (ret < 0)
627                         break;
628                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
629                         leaf = path->nodes[0];
630                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
631                         if (key.objectid == ino &&
632                             key.type == BTRFS_EXTENT_DATA_KEY)
633                                 path->slots[0]--;
634                 }
635                 ret = 0;
636 next_slot:
637                 leaf = path->nodes[0];
638                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
639                         BUG_ON(del_nr > 0);
640                         ret = btrfs_next_leaf(root, path);
641                         if (ret < 0)
642                                 break;
643                         if (ret > 0) {
644                                 ret = 0;
645                                 break;
646                         }
647                         leaf = path->nodes[0];
648                         recow = 1;
649                 }
650
651                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
652                 if (key.objectid > ino ||
653                     key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
654                         break;
655
656                 fi = btrfs_item_ptr(leaf, path->slots[0],
657                                     struct btrfs_file_extent_item);
658                 extent_type = btrfs_file_extent_type(leaf, fi);
659
660                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
661                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
662                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
663                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
664                         extent_offset = btrfs_file_extent_offset(leaf, fi);
665                         extent_end = key.offset +
666                                 btrfs_file_extent_num_bytes(leaf, fi);
667                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
668                         extent_end = key.offset +
669                                 btrfs_file_extent_inline_len(leaf, fi);
670                 } else {
671                         WARN_ON(1);
672                         extent_end = search_start;
673                 }
674
675                 if (extent_end <= search_start) {
676                         path->slots[0]++;
677                         goto next_slot;
678                 }
679
680                 found = 1;
681                 search_start = max(key.offset, start);
682                 if (recow || !modify_tree) {
683                         modify_tree = -1;
684                         btrfs_release_path(path);
685                         continue;
686                 }
687
688                 /*
689                  *     | - range to drop - |
690                  *  | -------- extent -------- |
691                  */
692                 if (start > key.offset && end < extent_end) {
693                         BUG_ON(del_nr > 0);
694                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
695
696                         memcpy(&new_key, &key, sizeof(new_key));
697                         new_key.offset = start;
698                         ret = btrfs_duplicate_item(trans, root, path,
699                                                    &new_key);
700                         if (ret == -EAGAIN) {
701                                 btrfs_release_path(path);
702                                 continue;
703                         }
704                         if (ret < 0)
705                                 break;
706
707                         leaf = path->nodes[0];
708                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
709                                             struct btrfs_file_extent_item);
710                         btrfs_set_file_extent_num_bytes(leaf, fi,
711                                                         start - key.offset);
712
713                         fi = btrfs_item_ptr(leaf, path->slots[0],
714                                             struct btrfs_file_extent_item);
715
716                         extent_offset += start - key.offset;
717                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
718                         btrfs_set_file_extent_num_bytes(leaf, fi,
719                                                         extent_end - start);
720                         btrfs_mark_buffer_dirty(leaf);
721
722                         if (update_refs && disk_bytenr > 0) {
723                                 ret = btrfs_inc_extent_ref(trans, root,
724                                                 disk_bytenr, num_bytes, 0,
725                                                 root->root_key.objectid,
726                                                 new_key.objectid,
727                                                 start - extent_offset, 0);
728                                 BUG_ON(ret); /* -ENOMEM */
729                         }
730                         key.offset = start;
731                 }
732                 /*
733                  *  | ---- range to drop ----- |
734                  *      | -------- extent -------- |
735                  */
736                 if (start <= key.offset && end < extent_end) {
737                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
738
739                         memcpy(&new_key, &key, sizeof(new_key));
740                         new_key.offset = end;
741                         btrfs_set_item_key_safe(trans, root, path, &new_key);
742
743                         extent_offset += end - key.offset;
744                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
745                         btrfs_set_file_extent_num_bytes(leaf, fi,
746                                                         extent_end - end);
747                         btrfs_mark_buffer_dirty(leaf);
748                         if (update_refs && disk_bytenr > 0)
749                                 inode_sub_bytes(inode, end - key.offset);
750                         break;
751                 }
752
753                 search_start = extent_end;
754                 /*
755                  *       | ---- range to drop ----- |
756                  *  | -------- extent -------- |
757                  */
758                 if (start > key.offset && end >= extent_end) {
759                         BUG_ON(del_nr > 0);
760                         BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
761
762                         btrfs_set_file_extent_num_bytes(leaf, fi,
763                                                         start - key.offset);
764                         btrfs_mark_buffer_dirty(leaf);
765                         if (update_refs && disk_bytenr > 0)
766                                 inode_sub_bytes(inode, extent_end - start);
767                         if (end == extent_end)
768                                 break;
769
770                         path->slots[0]++;
771                         goto next_slot;
772                 }
773
774                 /*
775                  *  | ---- range to drop ----- |
776                  *    | ------ extent ------ |
777                  */
778                 if (start <= key.offset && end >= extent_end) {
779                         if (del_nr == 0) {
780                                 del_slot = path->slots[0];
781                                 del_nr = 1;
782                         } else {
783                                 BUG_ON(del_slot + del_nr != path->slots[0]);
784                                 del_nr++;
785                         }
786
787                         if (update_refs &&
788                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
789                                 inode_sub_bytes(inode,
790                                                 extent_end - key.offset);
791                                 extent_end = ALIGN(extent_end,
792                                                    root->sectorsize);
793                         } else if (update_refs && disk_bytenr > 0) {
794                                 ret = btrfs_free_extent(trans, root,
795                                                 disk_bytenr, num_bytes, 0,
796                                                 root->root_key.objectid,
797                                                 key.objectid, key.offset -
798                                                 extent_offset, 0);
799                                 BUG_ON(ret); /* -ENOMEM */
800                                 inode_sub_bytes(inode,
801                                                 extent_end - key.offset);
802                         }
803
804                         if (end == extent_end)
805                                 break;
806
807                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
808                                 path->slots[0]++;
809                                 goto next_slot;
810                         }
811
812                         ret = btrfs_del_items(trans, root, path, del_slot,
813                                               del_nr);
814                         if (ret) {
815                                 btrfs_abort_transaction(trans, root, ret);
816                                 break;
817                         }
818
819                         del_nr = 0;
820                         del_slot = 0;
821
822                         btrfs_release_path(path);
823                         continue;
824                 }
825
826                 BUG_ON(1);
827         }
828
829         if (!ret && del_nr > 0) {
830                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
831                 if (ret)
832                         btrfs_abort_transaction(trans, root, ret);
833         }
834
835         if (drop_end)
836                 *drop_end = found ? min(end, extent_end) : end;
837         btrfs_release_path(path);
838         return ret;
839 }
840
841 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
842                        struct btrfs_root *root, struct inode *inode, u64 start,
843                        u64 end, int drop_cache)
844 {
845         struct btrfs_path *path;
846         int ret;
847
848         path = btrfs_alloc_path();
849         if (!path)
850                 return -ENOMEM;
851         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
852                                    drop_cache);
853         btrfs_free_path(path);
854         return ret;
855 }
856
857 static int extent_mergeable(struct extent_buffer *leaf, int slot,
858                             u64 objectid, u64 bytenr, u64 orig_offset,
859                             u64 *start, u64 *end)
860 {
861         struct btrfs_file_extent_item *fi;
862         struct btrfs_key key;
863         u64 extent_end;
864
865         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
866                 return 0;
867
868         btrfs_item_key_to_cpu(leaf, &key, slot);
869         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
870                 return 0;
871
872         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
873         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
874             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
875             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
876             btrfs_file_extent_compression(leaf, fi) ||
877             btrfs_file_extent_encryption(leaf, fi) ||
878             btrfs_file_extent_other_encoding(leaf, fi))
879                 return 0;
880
881         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
882         if ((*start && *start != key.offset) || (*end && *end != extent_end))
883                 return 0;
884
885         *start = key.offset;
886         *end = extent_end;
887         return 1;
888 }
889
890 /*
891  * Mark extent in the range start - end as written.
892  *
893  * This changes extent type from 'pre-allocated' to 'regular'. If only
894  * part of extent is marked as written, the extent will be split into
895  * two or three.
896  */
897 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
898                               struct inode *inode, u64 start, u64 end)
899 {
900         struct btrfs_root *root = BTRFS_I(inode)->root;
901         struct extent_buffer *leaf;
902         struct btrfs_path *path;
903         struct btrfs_file_extent_item *fi;
904         struct btrfs_key key;
905         struct btrfs_key new_key;
906         u64 bytenr;
907         u64 num_bytes;
908         u64 extent_end;
909         u64 orig_offset;
910         u64 other_start;
911         u64 other_end;
912         u64 split;
913         int del_nr = 0;
914         int del_slot = 0;
915         int recow;
916         int ret;
917         u64 ino = btrfs_ino(inode);
918
919         path = btrfs_alloc_path();
920         if (!path)
921                 return -ENOMEM;
922 again:
923         recow = 0;
924         split = start;
925         key.objectid = ino;
926         key.type = BTRFS_EXTENT_DATA_KEY;
927         key.offset = split;
928
929         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
930         if (ret < 0)
931                 goto out;
932         if (ret > 0 && path->slots[0] > 0)
933                 path->slots[0]--;
934
935         leaf = path->nodes[0];
936         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
937         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
938         fi = btrfs_item_ptr(leaf, path->slots[0],
939                             struct btrfs_file_extent_item);
940         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
941                BTRFS_FILE_EXTENT_PREALLOC);
942         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
943         BUG_ON(key.offset > start || extent_end < end);
944
945         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
946         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
947         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
948         memcpy(&new_key, &key, sizeof(new_key));
949
950         if (start == key.offset && end < extent_end) {
951                 other_start = 0;
952                 other_end = start;
953                 if (extent_mergeable(leaf, path->slots[0] - 1,
954                                      ino, bytenr, orig_offset,
955                                      &other_start, &other_end)) {
956                         new_key.offset = end;
957                         btrfs_set_item_key_safe(trans, root, path, &new_key);
958                         fi = btrfs_item_ptr(leaf, path->slots[0],
959                                             struct btrfs_file_extent_item);
960                         btrfs_set_file_extent_generation(leaf, fi,
961                                                          trans->transid);
962                         btrfs_set_file_extent_num_bytes(leaf, fi,
963                                                         extent_end - end);
964                         btrfs_set_file_extent_offset(leaf, fi,
965                                                      end - orig_offset);
966                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
967                                             struct btrfs_file_extent_item);
968                         btrfs_set_file_extent_generation(leaf, fi,
969                                                          trans->transid);
970                         btrfs_set_file_extent_num_bytes(leaf, fi,
971                                                         end - other_start);
972                         btrfs_mark_buffer_dirty(leaf);
973                         goto out;
974                 }
975         }
976
977         if (start > key.offset && end == extent_end) {
978                 other_start = end;
979                 other_end = 0;
980                 if (extent_mergeable(leaf, path->slots[0] + 1,
981                                      ino, bytenr, orig_offset,
982                                      &other_start, &other_end)) {
983                         fi = btrfs_item_ptr(leaf, path->slots[0],
984                                             struct btrfs_file_extent_item);
985                         btrfs_set_file_extent_num_bytes(leaf, fi,
986                                                         start - key.offset);
987                         btrfs_set_file_extent_generation(leaf, fi,
988                                                          trans->transid);
989                         path->slots[0]++;
990                         new_key.offset = start;
991                         btrfs_set_item_key_safe(trans, root, path, &new_key);
992
993                         fi = btrfs_item_ptr(leaf, path->slots[0],
994                                             struct btrfs_file_extent_item);
995                         btrfs_set_file_extent_generation(leaf, fi,
996                                                          trans->transid);
997                         btrfs_set_file_extent_num_bytes(leaf, fi,
998                                                         other_end - start);
999                         btrfs_set_file_extent_offset(leaf, fi,
1000                                                      start - orig_offset);
1001                         btrfs_mark_buffer_dirty(leaf);
1002                         goto out;
1003                 }
1004         }
1005
1006         while (start > key.offset || end < extent_end) {
1007                 if (key.offset == start)
1008                         split = end;
1009
1010                 new_key.offset = split;
1011                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1012                 if (ret == -EAGAIN) {
1013                         btrfs_release_path(path);
1014                         goto again;
1015                 }
1016                 if (ret < 0) {
1017                         btrfs_abort_transaction(trans, root, ret);
1018                         goto out;
1019                 }
1020
1021                 leaf = path->nodes[0];
1022                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1023                                     struct btrfs_file_extent_item);
1024                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1025                 btrfs_set_file_extent_num_bytes(leaf, fi,
1026                                                 split - key.offset);
1027
1028                 fi = btrfs_item_ptr(leaf, path->slots[0],
1029                                     struct btrfs_file_extent_item);
1030
1031                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1032                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1033                 btrfs_set_file_extent_num_bytes(leaf, fi,
1034                                                 extent_end - split);
1035                 btrfs_mark_buffer_dirty(leaf);
1036
1037                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1038                                            root->root_key.objectid,
1039                                            ino, orig_offset, 0);
1040                 BUG_ON(ret); /* -ENOMEM */
1041
1042                 if (split == start) {
1043                         key.offset = start;
1044                 } else {
1045                         BUG_ON(start != key.offset);
1046                         path->slots[0]--;
1047                         extent_end = end;
1048                 }
1049                 recow = 1;
1050         }
1051
1052         other_start = end;
1053         other_end = 0;
1054         if (extent_mergeable(leaf, path->slots[0] + 1,
1055                              ino, bytenr, orig_offset,
1056                              &other_start, &other_end)) {
1057                 if (recow) {
1058                         btrfs_release_path(path);
1059                         goto again;
1060                 }
1061                 extent_end = other_end;
1062                 del_slot = path->slots[0] + 1;
1063                 del_nr++;
1064                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1065                                         0, root->root_key.objectid,
1066                                         ino, orig_offset, 0);
1067                 BUG_ON(ret); /* -ENOMEM */
1068         }
1069         other_start = 0;
1070         other_end = start;
1071         if (extent_mergeable(leaf, path->slots[0] - 1,
1072                              ino, bytenr, orig_offset,
1073                              &other_start, &other_end)) {
1074                 if (recow) {
1075                         btrfs_release_path(path);
1076                         goto again;
1077                 }
1078                 key.offset = other_start;
1079                 del_slot = path->slots[0];
1080                 del_nr++;
1081                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1082                                         0, root->root_key.objectid,
1083                                         ino, orig_offset, 0);
1084                 BUG_ON(ret); /* -ENOMEM */
1085         }
1086         if (del_nr == 0) {
1087                 fi = btrfs_item_ptr(leaf, path->slots[0],
1088                            struct btrfs_file_extent_item);
1089                 btrfs_set_file_extent_type(leaf, fi,
1090                                            BTRFS_FILE_EXTENT_REG);
1091                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1092                 btrfs_mark_buffer_dirty(leaf);
1093         } else {
1094                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1095                            struct btrfs_file_extent_item);
1096                 btrfs_set_file_extent_type(leaf, fi,
1097                                            BTRFS_FILE_EXTENT_REG);
1098                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1099                 btrfs_set_file_extent_num_bytes(leaf, fi,
1100                                                 extent_end - key.offset);
1101                 btrfs_mark_buffer_dirty(leaf);
1102
1103                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1104                 if (ret < 0) {
1105                         btrfs_abort_transaction(trans, root, ret);
1106                         goto out;
1107                 }
1108         }
1109 out:
1110         btrfs_free_path(path);
1111         return 0;
1112 }
1113
1114 /*
1115  * on error we return an unlocked page and the error value
1116  * on success we return a locked page and 0
1117  */
1118 static int prepare_uptodate_page(struct page *page, u64 pos,
1119                                  bool force_uptodate)
1120 {
1121         int ret = 0;
1122
1123         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1124             !PageUptodate(page)) {
1125                 ret = btrfs_readpage(NULL, page);
1126                 if (ret)
1127                         return ret;
1128                 lock_page(page);
1129                 if (!PageUptodate(page)) {
1130                         unlock_page(page);
1131                         return -EIO;
1132                 }
1133         }
1134         return 0;
1135 }
1136
1137 /*
1138  * this gets pages into the page cache and locks them down, it also properly
1139  * waits for data=ordered extents to finish before allowing the pages to be
1140  * modified.
1141  */
1142 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1143                          struct page **pages, size_t num_pages,
1144                          loff_t pos, unsigned long first_index,
1145                          size_t write_bytes, bool force_uptodate)
1146 {
1147         struct extent_state *cached_state = NULL;
1148         int i;
1149         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1150         struct inode *inode = fdentry(file)->d_inode;
1151         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1152         int err = 0;
1153         int faili = 0;
1154         u64 start_pos;
1155         u64 last_pos;
1156
1157         start_pos = pos & ~((u64)root->sectorsize - 1);
1158         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1159
1160 again:
1161         for (i = 0; i < num_pages; i++) {
1162                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1163                                                mask | __GFP_WRITE);
1164                 if (!pages[i]) {
1165                         faili = i - 1;
1166                         err = -ENOMEM;
1167                         goto fail;
1168                 }
1169
1170                 if (i == 0)
1171                         err = prepare_uptodate_page(pages[i], pos,
1172                                                     force_uptodate);
1173                 if (i == num_pages - 1)
1174                         err = prepare_uptodate_page(pages[i],
1175                                                     pos + write_bytes, false);
1176                 if (err) {
1177                         page_cache_release(pages[i]);
1178                         faili = i - 1;
1179                         goto fail;
1180                 }
1181                 wait_on_page_writeback(pages[i]);
1182         }
1183         err = 0;
1184         if (start_pos < inode->i_size) {
1185                 struct btrfs_ordered_extent *ordered;
1186                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1187                                  start_pos, last_pos - 1, 0, &cached_state);
1188                 ordered = btrfs_lookup_first_ordered_extent(inode,
1189                                                             last_pos - 1);
1190                 if (ordered &&
1191                     ordered->file_offset + ordered->len > start_pos &&
1192                     ordered->file_offset < last_pos) {
1193                         btrfs_put_ordered_extent(ordered);
1194                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1195                                              start_pos, last_pos - 1,
1196                                              &cached_state, GFP_NOFS);
1197                         for (i = 0; i < num_pages; i++) {
1198                                 unlock_page(pages[i]);
1199                                 page_cache_release(pages[i]);
1200                         }
1201                         btrfs_wait_ordered_range(inode, start_pos,
1202                                                  last_pos - start_pos);
1203                         goto again;
1204                 }
1205                 if (ordered)
1206                         btrfs_put_ordered_extent(ordered);
1207
1208                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1209                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1210                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1211                                   0, 0, &cached_state, GFP_NOFS);
1212                 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1213                                      start_pos, last_pos - 1, &cached_state,
1214                                      GFP_NOFS);
1215         }
1216         for (i = 0; i < num_pages; i++) {
1217                 if (clear_page_dirty_for_io(pages[i]))
1218                         account_page_redirty(pages[i]);
1219                 set_page_extent_mapped(pages[i]);
1220                 WARN_ON(!PageLocked(pages[i]));
1221         }
1222         return 0;
1223 fail:
1224         while (faili >= 0) {
1225                 unlock_page(pages[faili]);
1226                 page_cache_release(pages[faili]);
1227                 faili--;
1228         }
1229         return err;
1230
1231 }
1232
1233 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1234                                                struct iov_iter *i,
1235                                                loff_t pos)
1236 {
1237         struct inode *inode = fdentry(file)->d_inode;
1238         struct btrfs_root *root = BTRFS_I(inode)->root;
1239         struct page **pages = NULL;
1240         unsigned long first_index;
1241         size_t num_written = 0;
1242         int nrptrs;
1243         int ret = 0;
1244         bool force_page_uptodate = false;
1245
1246         nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1247                      PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1248                      (sizeof(struct page *)));
1249         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1250         nrptrs = max(nrptrs, 8);
1251         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1252         if (!pages)
1253                 return -ENOMEM;
1254
1255         first_index = pos >> PAGE_CACHE_SHIFT;
1256
1257         while (iov_iter_count(i) > 0) {
1258                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1259                 size_t write_bytes = min(iov_iter_count(i),
1260                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1261                                          offset);
1262                 size_t num_pages = (write_bytes + offset +
1263                                     PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1264                 size_t dirty_pages;
1265                 size_t copied;
1266
1267                 WARN_ON(num_pages > nrptrs);
1268
1269                 /*
1270                  * Fault pages before locking them in prepare_pages
1271                  * to avoid recursive lock
1272                  */
1273                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1274                         ret = -EFAULT;
1275                         break;
1276                 }
1277
1278                 ret = btrfs_delalloc_reserve_space(inode,
1279                                         num_pages << PAGE_CACHE_SHIFT);
1280                 if (ret)
1281                         break;
1282
1283                 /*
1284                  * This is going to setup the pages array with the number of
1285                  * pages we want, so we don't really need to worry about the
1286                  * contents of pages from loop to loop
1287                  */
1288                 ret = prepare_pages(root, file, pages, num_pages,
1289                                     pos, first_index, write_bytes,
1290                                     force_page_uptodate);
1291                 if (ret) {
1292                         btrfs_delalloc_release_space(inode,
1293                                         num_pages << PAGE_CACHE_SHIFT);
1294                         break;
1295                 }
1296
1297                 copied = btrfs_copy_from_user(pos, num_pages,
1298                                            write_bytes, pages, i);
1299
1300                 /*
1301                  * if we have trouble faulting in the pages, fall
1302                  * back to one page at a time
1303                  */
1304                 if (copied < write_bytes)
1305                         nrptrs = 1;
1306
1307                 if (copied == 0) {
1308                         force_page_uptodate = true;
1309                         dirty_pages = 0;
1310                 } else {
1311                         force_page_uptodate = false;
1312                         dirty_pages = (copied + offset +
1313                                        PAGE_CACHE_SIZE - 1) >>
1314                                        PAGE_CACHE_SHIFT;
1315                 }
1316
1317                 /*
1318                  * If we had a short copy we need to release the excess delaloc
1319                  * bytes we reserved.  We need to increment outstanding_extents
1320                  * because btrfs_delalloc_release_space will decrement it, but
1321                  * we still have an outstanding extent for the chunk we actually
1322                  * managed to copy.
1323                  */
1324                 if (num_pages > dirty_pages) {
1325                         if (copied > 0) {
1326                                 spin_lock(&BTRFS_I(inode)->lock);
1327                                 BTRFS_I(inode)->outstanding_extents++;
1328                                 spin_unlock(&BTRFS_I(inode)->lock);
1329                         }
1330                         btrfs_delalloc_release_space(inode,
1331                                         (num_pages - dirty_pages) <<
1332                                         PAGE_CACHE_SHIFT);
1333                 }
1334
1335                 if (copied > 0) {
1336                         ret = btrfs_dirty_pages(root, inode, pages,
1337                                                 dirty_pages, pos, copied,
1338                                                 NULL);
1339                         if (ret) {
1340                                 btrfs_delalloc_release_space(inode,
1341                                         dirty_pages << PAGE_CACHE_SHIFT);
1342                                 btrfs_drop_pages(pages, num_pages);
1343                                 break;
1344                         }
1345                 }
1346
1347                 btrfs_drop_pages(pages, num_pages);
1348
1349                 cond_resched();
1350
1351                 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1352                                                    dirty_pages);
1353                 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1354                         btrfs_btree_balance_dirty(root);
1355
1356                 pos += copied;
1357                 num_written += copied;
1358         }
1359
1360         kfree(pages);
1361
1362         return num_written ? num_written : ret;
1363 }
1364
1365 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1366                                     const struct iovec *iov,
1367                                     unsigned long nr_segs, loff_t pos,
1368                                     loff_t *ppos, size_t count, size_t ocount)
1369 {
1370         struct file *file = iocb->ki_filp;
1371         struct iov_iter i;
1372         ssize_t written;
1373         ssize_t written_buffered;
1374         loff_t endbyte;
1375         int err;
1376
1377         written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1378                                             count, ocount);
1379
1380         if (written < 0 || written == count)
1381                 return written;
1382
1383         pos += written;
1384         count -= written;
1385         iov_iter_init(&i, iov, nr_segs, count, written);
1386         written_buffered = __btrfs_buffered_write(file, &i, pos);
1387         if (written_buffered < 0) {
1388                 err = written_buffered;
1389                 goto out;
1390         }
1391         endbyte = pos + written_buffered - 1;
1392         err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1393         if (err)
1394                 goto out;
1395         written += written_buffered;
1396         *ppos = pos + written_buffered;
1397         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1398                                  endbyte >> PAGE_CACHE_SHIFT);
1399 out:
1400         return written ? written : err;
1401 }
1402
1403 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1404                                     const struct iovec *iov,
1405                                     unsigned long nr_segs, loff_t pos)
1406 {
1407         struct file *file = iocb->ki_filp;
1408         struct inode *inode = fdentry(file)->d_inode;
1409         struct btrfs_root *root = BTRFS_I(inode)->root;
1410         loff_t *ppos = &iocb->ki_pos;
1411         u64 start_pos;
1412         ssize_t num_written = 0;
1413         ssize_t err = 0;
1414         size_t count, ocount;
1415
1416         sb_start_write(inode->i_sb);
1417
1418         mutex_lock(&inode->i_mutex);
1419
1420         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1421         if (err) {
1422                 mutex_unlock(&inode->i_mutex);
1423                 goto out;
1424         }
1425         count = ocount;
1426
1427         current->backing_dev_info = inode->i_mapping->backing_dev_info;
1428         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1429         if (err) {
1430                 mutex_unlock(&inode->i_mutex);
1431                 goto out;
1432         }
1433
1434         if (count == 0) {
1435                 mutex_unlock(&inode->i_mutex);
1436                 goto out;
1437         }
1438
1439         err = file_remove_suid(file);
1440         if (err) {
1441                 mutex_unlock(&inode->i_mutex);
1442                 goto out;
1443         }
1444
1445         /*
1446          * If BTRFS flips readonly due to some impossible error
1447          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1448          * although we have opened a file as writable, we have
1449          * to stop this write operation to ensure FS consistency.
1450          */
1451         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1452                 mutex_unlock(&inode->i_mutex);
1453                 err = -EROFS;
1454                 goto out;
1455         }
1456
1457         err = file_update_time(file);
1458         if (err) {
1459                 mutex_unlock(&inode->i_mutex);
1460                 goto out;
1461         }
1462
1463         start_pos = round_down(pos, root->sectorsize);
1464         if (start_pos > i_size_read(inode)) {
1465                 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1466                 if (err) {
1467                         mutex_unlock(&inode->i_mutex);
1468                         goto out;
1469                 }
1470         }
1471
1472         if (unlikely(file->f_flags & O_DIRECT)) {
1473                 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1474                                                    pos, ppos, count, ocount);
1475         } else {
1476                 struct iov_iter i;
1477
1478                 iov_iter_init(&i, iov, nr_segs, count, num_written);
1479
1480                 num_written = __btrfs_buffered_write(file, &i, pos);
1481                 if (num_written > 0)
1482                         *ppos = pos + num_written;
1483         }
1484
1485         mutex_unlock(&inode->i_mutex);
1486
1487         /*
1488          * we want to make sure fsync finds this change
1489          * but we haven't joined a transaction running right now.
1490          *
1491          * Later on, someone is sure to update the inode and get the
1492          * real transid recorded.
1493          *
1494          * We set last_trans now to the fs_info generation + 1,
1495          * this will either be one more than the running transaction
1496          * or the generation used for the next transaction if there isn't
1497          * one running right now.
1498          */
1499         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1500         if (num_written > 0 || num_written == -EIOCBQUEUED) {
1501                 err = generic_write_sync(file, pos, num_written);
1502                 if (err < 0 && num_written > 0)
1503                         num_written = err;
1504         }
1505 out:
1506         sb_end_write(inode->i_sb);
1507         current->backing_dev_info = NULL;
1508         return num_written ? num_written : err;
1509 }
1510
1511 int btrfs_release_file(struct inode *inode, struct file *filp)
1512 {
1513         /*
1514          * ordered_data_close is set by settattr when we are about to truncate
1515          * a file from a non-zero size to a zero size.  This tries to
1516          * flush down new bytes that may have been written if the
1517          * application were using truncate to replace a file in place.
1518          */
1519         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1520                                &BTRFS_I(inode)->runtime_flags)) {
1521                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1522                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1523                         filemap_flush(inode->i_mapping);
1524         }
1525         if (filp->private_data)
1526                 btrfs_ioctl_trans_end(filp);
1527         return 0;
1528 }
1529
1530 /*
1531  * fsync call for both files and directories.  This logs the inode into
1532  * the tree log instead of forcing full commits whenever possible.
1533  *
1534  * It needs to call filemap_fdatawait so that all ordered extent updates are
1535  * in the metadata btree are up to date for copying to the log.
1536  *
1537  * It drops the inode mutex before doing the tree log commit.  This is an
1538  * important optimization for directories because holding the mutex prevents
1539  * new operations on the dir while we write to disk.
1540  */
1541 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1542 {
1543         struct dentry *dentry = file->f_path.dentry;
1544         struct inode *inode = dentry->d_inode;
1545         struct btrfs_root *root = BTRFS_I(inode)->root;
1546         int ret = 0;
1547         struct btrfs_trans_handle *trans;
1548
1549         trace_btrfs_sync_file(file, datasync);
1550
1551         /*
1552          * We write the dirty pages in the range and wait until they complete
1553          * out of the ->i_mutex. If so, we can flush the dirty pages by
1554          * multi-task, and make the performance up.
1555          */
1556         ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1557         if (ret)
1558                 return ret;
1559
1560         mutex_lock(&inode->i_mutex);
1561
1562         /*
1563          * We flush the dirty pages again to avoid some dirty pages in the
1564          * range being left.
1565          */
1566         atomic_inc(&root->log_batch);
1567         btrfs_wait_ordered_range(inode, start, end - start + 1);
1568         atomic_inc(&root->log_batch);
1569
1570         /*
1571          * check the transaction that last modified this inode
1572          * and see if its already been committed
1573          */
1574         if (!BTRFS_I(inode)->last_trans) {
1575                 mutex_unlock(&inode->i_mutex);
1576                 goto out;
1577         }
1578
1579         /*
1580          * if the last transaction that changed this file was before
1581          * the current transaction, we can bail out now without any
1582          * syncing
1583          */
1584         smp_mb();
1585         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1586             BTRFS_I(inode)->last_trans <=
1587             root->fs_info->last_trans_committed) {
1588                 BTRFS_I(inode)->last_trans = 0;
1589
1590                 /*
1591                  * We'v had everything committed since the last time we were
1592                  * modified so clear this flag in case it was set for whatever
1593                  * reason, it's no longer relevant.
1594                  */
1595                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1596                           &BTRFS_I(inode)->runtime_flags);
1597                 mutex_unlock(&inode->i_mutex);
1598                 goto out;
1599         }
1600
1601         /*
1602          * ok we haven't committed the transaction yet, lets do a commit
1603          */
1604         if (file->private_data)
1605                 btrfs_ioctl_trans_end(file);
1606
1607         trans = btrfs_start_transaction(root, 0);
1608         if (IS_ERR(trans)) {
1609                 ret = PTR_ERR(trans);
1610                 mutex_unlock(&inode->i_mutex);
1611                 goto out;
1612         }
1613
1614         ret = btrfs_log_dentry_safe(trans, root, dentry);
1615         if (ret < 0) {
1616                 mutex_unlock(&inode->i_mutex);
1617                 goto out;
1618         }
1619
1620         /* we've logged all the items and now have a consistent
1621          * version of the file in the log.  It is possible that
1622          * someone will come in and modify the file, but that's
1623          * fine because the log is consistent on disk, and we
1624          * have references to all of the file's extents
1625          *
1626          * It is possible that someone will come in and log the
1627          * file again, but that will end up using the synchronization
1628          * inside btrfs_sync_log to keep things safe.
1629          */
1630         mutex_unlock(&inode->i_mutex);
1631
1632         if (ret != BTRFS_NO_LOG_SYNC) {
1633                 if (ret > 0) {
1634                         ret = btrfs_commit_transaction(trans, root);
1635                 } else {
1636                         ret = btrfs_sync_log(trans, root);
1637                         if (ret == 0)
1638                                 ret = btrfs_end_transaction(trans, root);
1639                         else
1640                                 ret = btrfs_commit_transaction(trans, root);
1641                 }
1642         } else {
1643                 ret = btrfs_end_transaction(trans, root);
1644         }
1645 out:
1646         return ret > 0 ? -EIO : ret;
1647 }
1648
1649 static const struct vm_operations_struct btrfs_file_vm_ops = {
1650         .fault          = filemap_fault,
1651         .page_mkwrite   = btrfs_page_mkwrite,
1652         .remap_pages    = generic_file_remap_pages,
1653 };
1654
1655 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1656 {
1657         struct address_space *mapping = filp->f_mapping;
1658
1659         if (!mapping->a_ops->readpage)
1660                 return -ENOEXEC;
1661
1662         file_accessed(filp);
1663         vma->vm_ops = &btrfs_file_vm_ops;
1664
1665         return 0;
1666 }
1667
1668 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1669                           int slot, u64 start, u64 end)
1670 {
1671         struct btrfs_file_extent_item *fi;
1672         struct btrfs_key key;
1673
1674         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1675                 return 0;
1676
1677         btrfs_item_key_to_cpu(leaf, &key, slot);
1678         if (key.objectid != btrfs_ino(inode) ||
1679             key.type != BTRFS_EXTENT_DATA_KEY)
1680                 return 0;
1681
1682         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1683
1684         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1685                 return 0;
1686
1687         if (btrfs_file_extent_disk_bytenr(leaf, fi))
1688                 return 0;
1689
1690         if (key.offset == end)
1691                 return 1;
1692         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1693                 return 1;
1694         return 0;
1695 }
1696
1697 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1698                       struct btrfs_path *path, u64 offset, u64 end)
1699 {
1700         struct btrfs_root *root = BTRFS_I(inode)->root;
1701         struct extent_buffer *leaf;
1702         struct btrfs_file_extent_item *fi;
1703         struct extent_map *hole_em;
1704         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1705         struct btrfs_key key;
1706         int ret;
1707
1708         key.objectid = btrfs_ino(inode);
1709         key.type = BTRFS_EXTENT_DATA_KEY;
1710         key.offset = offset;
1711
1712
1713         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1714         if (ret < 0)
1715                 return ret;
1716         BUG_ON(!ret);
1717
1718         leaf = path->nodes[0];
1719         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1720                 u64 num_bytes;
1721
1722                 path->slots[0]--;
1723                 fi = btrfs_item_ptr(leaf, path->slots[0],
1724                                     struct btrfs_file_extent_item);
1725                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1726                         end - offset;
1727                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1728                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1729                 btrfs_set_file_extent_offset(leaf, fi, 0);
1730                 btrfs_mark_buffer_dirty(leaf);
1731                 goto out;
1732         }
1733
1734         if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1735                 u64 num_bytes;
1736
1737                 path->slots[0]++;
1738                 key.offset = offset;
1739                 btrfs_set_item_key_safe(trans, root, path, &key);
1740                 fi = btrfs_item_ptr(leaf, path->slots[0],
1741                                     struct btrfs_file_extent_item);
1742                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1743                         offset;
1744                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1745                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1746                 btrfs_set_file_extent_offset(leaf, fi, 0);
1747                 btrfs_mark_buffer_dirty(leaf);
1748                 goto out;
1749         }
1750         btrfs_release_path(path);
1751
1752         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1753                                        0, 0, end - offset, 0, end - offset,
1754                                        0, 0, 0);
1755         if (ret)
1756                 return ret;
1757
1758 out:
1759         btrfs_release_path(path);
1760
1761         hole_em = alloc_extent_map();
1762         if (!hole_em) {
1763                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1764                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1765                         &BTRFS_I(inode)->runtime_flags);
1766         } else {
1767                 hole_em->start = offset;
1768                 hole_em->len = end - offset;
1769                 hole_em->orig_start = offset;
1770
1771                 hole_em->block_start = EXTENT_MAP_HOLE;
1772                 hole_em->block_len = 0;
1773                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1774                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1775                 hole_em->generation = trans->transid;
1776
1777                 do {
1778                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1779                         write_lock(&em_tree->lock);
1780                         ret = add_extent_mapping(em_tree, hole_em);
1781                         if (!ret)
1782                                 list_move(&hole_em->list,
1783                                           &em_tree->modified_extents);
1784                         write_unlock(&em_tree->lock);
1785                 } while (ret == -EEXIST);
1786                 free_extent_map(hole_em);
1787                 if (ret)
1788                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1789                                 &BTRFS_I(inode)->runtime_flags);
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1796 {
1797         struct btrfs_root *root = BTRFS_I(inode)->root;
1798         struct extent_state *cached_state = NULL;
1799         struct btrfs_path *path;
1800         struct btrfs_block_rsv *rsv;
1801         struct btrfs_trans_handle *trans;
1802         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1803         u64 lockstart = (offset + mask) & ~mask;
1804         u64 lockend = ((offset + len) & ~mask) - 1;
1805         u64 cur_offset = lockstart;
1806         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1807         u64 drop_end;
1808         int ret = 0;
1809         int err = 0;
1810         bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
1811                 ((offset + len) >> PAGE_CACHE_SHIFT);
1812
1813         btrfs_wait_ordered_range(inode, offset, len);
1814
1815         mutex_lock(&inode->i_mutex);
1816         if (offset >= inode->i_size) {
1817                 mutex_unlock(&inode->i_mutex);
1818                 return 0;
1819         }
1820
1821         /*
1822          * Only do this if we are in the same page and we aren't doing the
1823          * entire page.
1824          */
1825         if (same_page && len < PAGE_CACHE_SIZE) {
1826                 ret = btrfs_truncate_page(inode, offset, len, 0);
1827                 mutex_unlock(&inode->i_mutex);
1828                 return ret;
1829         }
1830
1831         /* zero back part of the first page */
1832         ret = btrfs_truncate_page(inode, offset, 0, 0);
1833         if (ret) {
1834                 mutex_unlock(&inode->i_mutex);
1835                 return ret;
1836         }
1837
1838         /* zero the front end of the last page */
1839         ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1840         if (ret) {
1841                 mutex_unlock(&inode->i_mutex);
1842                 return ret;
1843         }
1844
1845         if (lockend < lockstart) {
1846                 mutex_unlock(&inode->i_mutex);
1847                 return 0;
1848         }
1849
1850         while (1) {
1851                 struct btrfs_ordered_extent *ordered;
1852
1853                 truncate_pagecache_range(inode, lockstart, lockend);
1854
1855                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1856                                  0, &cached_state);
1857                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1858
1859                 /*
1860                  * We need to make sure we have no ordered extents in this range
1861                  * and nobody raced in and read a page in this range, if we did
1862                  * we need to try again.
1863                  */
1864                 if ((!ordered ||
1865                     (ordered->file_offset + ordered->len < lockstart ||
1866                      ordered->file_offset > lockend)) &&
1867                      !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1868                                      lockend, EXTENT_UPTODATE, 0,
1869                                      cached_state)) {
1870                         if (ordered)
1871                                 btrfs_put_ordered_extent(ordered);
1872                         break;
1873                 }
1874                 if (ordered)
1875                         btrfs_put_ordered_extent(ordered);
1876                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1877                                      lockend, &cached_state, GFP_NOFS);
1878                 btrfs_wait_ordered_range(inode, lockstart,
1879                                          lockend - lockstart + 1);
1880         }
1881
1882         path = btrfs_alloc_path();
1883         if (!path) {
1884                 ret = -ENOMEM;
1885                 goto out;
1886         }
1887
1888         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
1889         if (!rsv) {
1890                 ret = -ENOMEM;
1891                 goto out_free;
1892         }
1893         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1894         rsv->failfast = 1;
1895
1896         /*
1897          * 1 - update the inode
1898          * 1 - removing the extents in the range
1899          * 1 - adding the hole extent
1900          */
1901         trans = btrfs_start_transaction(root, 3);
1902         if (IS_ERR(trans)) {
1903                 err = PTR_ERR(trans);
1904                 goto out_free;
1905         }
1906
1907         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
1908                                       min_size);
1909         BUG_ON(ret);
1910         trans->block_rsv = rsv;
1911
1912         while (cur_offset < lockend) {
1913                 ret = __btrfs_drop_extents(trans, root, inode, path,
1914                                            cur_offset, lockend + 1,
1915                                            &drop_end, 1);
1916                 if (ret != -ENOSPC)
1917                         break;
1918
1919                 trans->block_rsv = &root->fs_info->trans_block_rsv;
1920
1921                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1922                 if (ret) {
1923                         err = ret;
1924                         break;
1925                 }
1926
1927                 cur_offset = drop_end;
1928
1929                 ret = btrfs_update_inode(trans, root, inode);
1930                 if (ret) {
1931                         err = ret;
1932                         break;
1933                 }
1934
1935                 btrfs_end_transaction(trans, root);
1936                 btrfs_btree_balance_dirty(root);
1937
1938                 trans = btrfs_start_transaction(root, 3);
1939                 if (IS_ERR(trans)) {
1940                         ret = PTR_ERR(trans);
1941                         trans = NULL;
1942                         break;
1943                 }
1944
1945                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
1946                                               rsv, min_size);
1947                 BUG_ON(ret);    /* shouldn't happen */
1948                 trans->block_rsv = rsv;
1949         }
1950
1951         if (ret) {
1952                 err = ret;
1953                 goto out_trans;
1954         }
1955
1956         trans->block_rsv = &root->fs_info->trans_block_rsv;
1957         ret = fill_holes(trans, inode, path, cur_offset, drop_end);
1958         if (ret) {
1959                 err = ret;
1960                 goto out_trans;
1961         }
1962
1963 out_trans:
1964         if (!trans)
1965                 goto out_free;
1966
1967         inode_inc_iversion(inode);
1968         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1969
1970         trans->block_rsv = &root->fs_info->trans_block_rsv;
1971         ret = btrfs_update_inode(trans, root, inode);
1972         btrfs_end_transaction(trans, root);
1973         btrfs_btree_balance_dirty(root);
1974 out_free:
1975         btrfs_free_path(path);
1976         btrfs_free_block_rsv(root, rsv);
1977 out:
1978         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1979                              &cached_state, GFP_NOFS);
1980         mutex_unlock(&inode->i_mutex);
1981         if (ret && !err)
1982                 err = ret;
1983         return err;
1984 }
1985
1986 static long btrfs_fallocate(struct file *file, int mode,
1987                             loff_t offset, loff_t len)
1988 {
1989         struct inode *inode = file->f_path.dentry->d_inode;
1990         struct extent_state *cached_state = NULL;
1991         u64 cur_offset;
1992         u64 last_byte;
1993         u64 alloc_start;
1994         u64 alloc_end;
1995         u64 alloc_hint = 0;
1996         u64 locked_end;
1997         u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1998         struct extent_map *em;
1999         int ret;
2000
2001         alloc_start = offset & ~mask;
2002         alloc_end =  (offset + len + mask) & ~mask;
2003
2004         /* Make sure we aren't being give some crap mode */
2005         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2006                 return -EOPNOTSUPP;
2007
2008         if (mode & FALLOC_FL_PUNCH_HOLE)
2009                 return btrfs_punch_hole(inode, offset, len);
2010
2011         /*
2012          * Make sure we have enough space before we do the
2013          * allocation.
2014          */
2015         ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
2016         if (ret)
2017                 return ret;
2018
2019         /*
2020          * wait for ordered IO before we have any locks.  We'll loop again
2021          * below with the locks held.
2022          */
2023         btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2024
2025         mutex_lock(&inode->i_mutex);
2026         ret = inode_newsize_ok(inode, alloc_end);
2027         if (ret)
2028                 goto out;
2029
2030         if (alloc_start > inode->i_size) {
2031                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2032                                         alloc_start);
2033                 if (ret)
2034                         goto out;
2035         }
2036
2037         locked_end = alloc_end - 1;
2038         while (1) {
2039                 struct btrfs_ordered_extent *ordered;
2040
2041                 /* the extent lock is ordered inside the running
2042                  * transaction
2043                  */
2044                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2045                                  locked_end, 0, &cached_state);
2046                 ordered = btrfs_lookup_first_ordered_extent(inode,
2047                                                             alloc_end - 1);
2048                 if (ordered &&
2049                     ordered->file_offset + ordered->len > alloc_start &&
2050                     ordered->file_offset < alloc_end) {
2051                         btrfs_put_ordered_extent(ordered);
2052                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2053                                              alloc_start, locked_end,
2054                                              &cached_state, GFP_NOFS);
2055                         /*
2056                          * we can't wait on the range with the transaction
2057                          * running or with the extent lock held
2058                          */
2059                         btrfs_wait_ordered_range(inode, alloc_start,
2060                                                  alloc_end - alloc_start);
2061                 } else {
2062                         if (ordered)
2063                                 btrfs_put_ordered_extent(ordered);
2064                         break;
2065                 }
2066         }
2067
2068         cur_offset = alloc_start;
2069         while (1) {
2070                 u64 actual_end;
2071
2072                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2073                                       alloc_end - cur_offset, 0);
2074                 if (IS_ERR_OR_NULL(em)) {
2075                         if (!em)
2076                                 ret = -ENOMEM;
2077                         else
2078                                 ret = PTR_ERR(em);
2079                         break;
2080                 }
2081                 last_byte = min(extent_map_end(em), alloc_end);
2082                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2083                 last_byte = (last_byte + mask) & ~mask;
2084
2085                 if (em->block_start == EXTENT_MAP_HOLE ||
2086                     (cur_offset >= inode->i_size &&
2087                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2088                         ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2089                                                         last_byte - cur_offset,
2090                                                         1 << inode->i_blkbits,
2091                                                         offset + len,
2092                                                         &alloc_hint);
2093
2094                         if (ret < 0) {
2095                                 free_extent_map(em);
2096                                 break;
2097                         }
2098                 } else if (actual_end > inode->i_size &&
2099                            !(mode & FALLOC_FL_KEEP_SIZE)) {
2100                         /*
2101                          * We didn't need to allocate any more space, but we
2102                          * still extended the size of the file so we need to
2103                          * update i_size.
2104                          */
2105                         inode->i_ctime = CURRENT_TIME;
2106                         i_size_write(inode, actual_end);
2107                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2108                 }
2109                 free_extent_map(em);
2110
2111                 cur_offset = last_byte;
2112                 if (cur_offset >= alloc_end) {
2113                         ret = 0;
2114                         break;
2115                 }
2116         }
2117         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2118                              &cached_state, GFP_NOFS);
2119 out:
2120         mutex_unlock(&inode->i_mutex);
2121         /* Let go of our reservation. */
2122         btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
2123         return ret;
2124 }
2125
2126 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
2127 {
2128         struct btrfs_root *root = BTRFS_I(inode)->root;
2129         struct extent_map *em;
2130         struct extent_state *cached_state = NULL;
2131         u64 lockstart = *offset;
2132         u64 lockend = i_size_read(inode);
2133         u64 start = *offset;
2134         u64 orig_start = *offset;
2135         u64 len = i_size_read(inode);
2136         u64 last_end = 0;
2137         int ret = 0;
2138
2139         lockend = max_t(u64, root->sectorsize, lockend);
2140         if (lockend <= lockstart)
2141                 lockend = lockstart + root->sectorsize;
2142
2143         len = lockend - lockstart + 1;
2144
2145         len = max_t(u64, len, root->sectorsize);
2146         if (inode->i_size == 0)
2147                 return -ENXIO;
2148
2149         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2150                          &cached_state);
2151
2152         /*
2153          * Delalloc is such a pain.  If we have a hole and we have pending
2154          * delalloc for a portion of the hole we will get back a hole that
2155          * exists for the entire range since it hasn't been actually written
2156          * yet.  So to take care of this case we need to look for an extent just
2157          * before the position we want in case there is outstanding delalloc
2158          * going on here.
2159          */
2160         if (origin == SEEK_HOLE && start != 0) {
2161                 if (start <= root->sectorsize)
2162                         em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2163                                                      root->sectorsize, 0);
2164                 else
2165                         em = btrfs_get_extent_fiemap(inode, NULL, 0,
2166                                                      start - root->sectorsize,
2167                                                      root->sectorsize, 0);
2168                 if (IS_ERR(em)) {
2169                         ret = PTR_ERR(em);
2170                         goto out;
2171                 }
2172                 last_end = em->start + em->len;
2173                 if (em->block_start == EXTENT_MAP_DELALLOC)
2174                         last_end = min_t(u64, last_end, inode->i_size);
2175                 free_extent_map(em);
2176         }
2177
2178         while (1) {
2179                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2180                 if (IS_ERR(em)) {
2181                         ret = PTR_ERR(em);
2182                         break;
2183                 }
2184
2185                 if (em->block_start == EXTENT_MAP_HOLE) {
2186                         if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2187                                 if (last_end <= orig_start) {
2188                                         free_extent_map(em);
2189                                         ret = -ENXIO;
2190                                         break;
2191                                 }
2192                         }
2193
2194                         if (origin == SEEK_HOLE) {
2195                                 *offset = start;
2196                                 free_extent_map(em);
2197                                 break;
2198                         }
2199                 } else {
2200                         if (origin == SEEK_DATA) {
2201                                 if (em->block_start == EXTENT_MAP_DELALLOC) {
2202                                         if (start >= inode->i_size) {
2203                                                 free_extent_map(em);
2204                                                 ret = -ENXIO;
2205                                                 break;
2206                                         }
2207                                 }
2208
2209                                 *offset = start;
2210                                 free_extent_map(em);
2211                                 break;
2212                         }
2213                 }
2214
2215                 start = em->start + em->len;
2216                 last_end = em->start + em->len;
2217
2218                 if (em->block_start == EXTENT_MAP_DELALLOC)
2219                         last_end = min_t(u64, last_end, inode->i_size);
2220
2221                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2222                         free_extent_map(em);
2223                         ret = -ENXIO;
2224                         break;
2225                 }
2226                 free_extent_map(em);
2227                 cond_resched();
2228         }
2229         if (!ret)
2230                 *offset = min(*offset, inode->i_size);
2231 out:
2232         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2233                              &cached_state, GFP_NOFS);
2234         return ret;
2235 }
2236
2237 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
2238 {
2239         struct inode *inode = file->f_mapping->host;
2240         int ret;
2241
2242         mutex_lock(&inode->i_mutex);
2243         switch (origin) {
2244         case SEEK_END:
2245         case SEEK_CUR:
2246                 offset = generic_file_llseek(file, offset, origin);
2247                 goto out;
2248         case SEEK_DATA:
2249         case SEEK_HOLE:
2250                 if (offset >= i_size_read(inode)) {
2251                         mutex_unlock(&inode->i_mutex);
2252                         return -ENXIO;
2253                 }
2254
2255                 ret = find_desired_extent(inode, &offset, origin);
2256                 if (ret) {
2257                         mutex_unlock(&inode->i_mutex);
2258                         return ret;
2259                 }
2260         }
2261
2262         if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2263                 offset = -EINVAL;
2264                 goto out;
2265         }
2266         if (offset > inode->i_sb->s_maxbytes) {
2267                 offset = -EINVAL;
2268                 goto out;
2269         }
2270
2271         /* Special lock needed here? */
2272         if (offset != file->f_pos) {
2273                 file->f_pos = offset;
2274                 file->f_version = 0;
2275         }
2276 out:
2277         mutex_unlock(&inode->i_mutex);
2278         return offset;
2279 }
2280
2281 const struct file_operations btrfs_file_operations = {
2282         .llseek         = btrfs_file_llseek,
2283         .read           = do_sync_read,
2284         .write          = do_sync_write,
2285         .aio_read       = generic_file_aio_read,
2286         .splice_read    = generic_file_splice_read,
2287         .aio_write      = btrfs_file_aio_write,
2288         .mmap           = btrfs_file_mmap,
2289         .open           = generic_file_open,
2290         .release        = btrfs_release_file,
2291         .fsync          = btrfs_sync_file,
2292         .fallocate      = btrfs_fallocate,
2293         .unlocked_ioctl = btrfs_ioctl,
2294 #ifdef CONFIG_COMPAT
2295         .compat_ioctl   = btrfs_ioctl,
2296 #endif
2297 };
2298
2299 void btrfs_auto_defrag_exit(void)
2300 {
2301         if (btrfs_inode_defrag_cachep)
2302                 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2303 }
2304
2305 int btrfs_auto_defrag_init(void)
2306 {
2307         btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2308                                         sizeof(struct inode_defrag), 0,
2309                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2310                                         NULL);
2311         if (!btrfs_inode_defrag_cachep)
2312                 return -ENOMEM;
2313
2314         return 0;
2315 }