Btrfs: fix locking in btrfs_destroy_delayed_refs
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / free-space-cache.c
1 /*
2  * Copyright (C) 2008 Red Hat.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
24 #include "ctree.h"
25 #include "free-space-cache.h"
26 #include "transaction.h"
27 #include "disk-io.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
30
31 #define BITS_PER_BITMAP         (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33
34 static int link_free_space(struct btrfs_free_space_ctl *ctl,
35                            struct btrfs_free_space *info);
36 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
37                               struct btrfs_free_space *info);
38
39 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
40                                                struct btrfs_path *path,
41                                                u64 offset)
42 {
43         struct btrfs_key key;
44         struct btrfs_key location;
45         struct btrfs_disk_key disk_key;
46         struct btrfs_free_space_header *header;
47         struct extent_buffer *leaf;
48         struct inode *inode = NULL;
49         int ret;
50
51         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
52         key.offset = offset;
53         key.type = 0;
54
55         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
56         if (ret < 0)
57                 return ERR_PTR(ret);
58         if (ret > 0) {
59                 btrfs_release_path(path);
60                 return ERR_PTR(-ENOENT);
61         }
62
63         leaf = path->nodes[0];
64         header = btrfs_item_ptr(leaf, path->slots[0],
65                                 struct btrfs_free_space_header);
66         btrfs_free_space_key(leaf, header, &disk_key);
67         btrfs_disk_key_to_cpu(&location, &disk_key);
68         btrfs_release_path(path);
69
70         inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
71         if (!inode)
72                 return ERR_PTR(-ENOENT);
73         if (IS_ERR(inode))
74                 return inode;
75         if (is_bad_inode(inode)) {
76                 iput(inode);
77                 return ERR_PTR(-ENOENT);
78         }
79
80         inode->i_mapping->flags &= ~__GFP_FS;
81
82         return inode;
83 }
84
85 struct inode *lookup_free_space_inode(struct btrfs_root *root,
86                                       struct btrfs_block_group_cache
87                                       *block_group, struct btrfs_path *path)
88 {
89         struct inode *inode = NULL;
90         u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
91
92         spin_lock(&block_group->lock);
93         if (block_group->inode)
94                 inode = igrab(block_group->inode);
95         spin_unlock(&block_group->lock);
96         if (inode)
97                 return inode;
98
99         inode = __lookup_free_space_inode(root, path,
100                                           block_group->key.objectid);
101         if (IS_ERR(inode))
102                 return inode;
103
104         spin_lock(&block_group->lock);
105         if (!((BTRFS_I(inode)->flags & flags) == flags)) {
106                 printk(KERN_INFO "Old style space inode found, converting.\n");
107                 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
108                         BTRFS_INODE_NODATACOW;
109                 block_group->disk_cache_state = BTRFS_DC_CLEAR;
110         }
111
112         if (!block_group->iref) {
113                 block_group->inode = igrab(inode);
114                 block_group->iref = 1;
115         }
116         spin_unlock(&block_group->lock);
117
118         return inode;
119 }
120
121 int __create_free_space_inode(struct btrfs_root *root,
122                               struct btrfs_trans_handle *trans,
123                               struct btrfs_path *path, u64 ino, u64 offset)
124 {
125         struct btrfs_key key;
126         struct btrfs_disk_key disk_key;
127         struct btrfs_free_space_header *header;
128         struct btrfs_inode_item *inode_item;
129         struct extent_buffer *leaf;
130         u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
131         int ret;
132
133         ret = btrfs_insert_empty_inode(trans, root, path, ino);
134         if (ret)
135                 return ret;
136
137         /* We inline crc's for the free disk space cache */
138         if (ino != BTRFS_FREE_INO_OBJECTID)
139                 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
140
141         leaf = path->nodes[0];
142         inode_item = btrfs_item_ptr(leaf, path->slots[0],
143                                     struct btrfs_inode_item);
144         btrfs_item_key(leaf, &disk_key, path->slots[0]);
145         memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
146                              sizeof(*inode_item));
147         btrfs_set_inode_generation(leaf, inode_item, trans->transid);
148         btrfs_set_inode_size(leaf, inode_item, 0);
149         btrfs_set_inode_nbytes(leaf, inode_item, 0);
150         btrfs_set_inode_uid(leaf, inode_item, 0);
151         btrfs_set_inode_gid(leaf, inode_item, 0);
152         btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
153         btrfs_set_inode_flags(leaf, inode_item, flags);
154         btrfs_set_inode_nlink(leaf, inode_item, 1);
155         btrfs_set_inode_transid(leaf, inode_item, trans->transid);
156         btrfs_set_inode_block_group(leaf, inode_item, offset);
157         btrfs_mark_buffer_dirty(leaf);
158         btrfs_release_path(path);
159
160         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
161         key.offset = offset;
162         key.type = 0;
163
164         ret = btrfs_insert_empty_item(trans, root, path, &key,
165                                       sizeof(struct btrfs_free_space_header));
166         if (ret < 0) {
167                 btrfs_release_path(path);
168                 return ret;
169         }
170         leaf = path->nodes[0];
171         header = btrfs_item_ptr(leaf, path->slots[0],
172                                 struct btrfs_free_space_header);
173         memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
174         btrfs_set_free_space_key(leaf, header, &disk_key);
175         btrfs_mark_buffer_dirty(leaf);
176         btrfs_release_path(path);
177
178         return 0;
179 }
180
181 int create_free_space_inode(struct btrfs_root *root,
182                             struct btrfs_trans_handle *trans,
183                             struct btrfs_block_group_cache *block_group,
184                             struct btrfs_path *path)
185 {
186         int ret;
187         u64 ino;
188
189         ret = btrfs_find_free_objectid(root, &ino);
190         if (ret < 0)
191                 return ret;
192
193         return __create_free_space_inode(root, trans, path, ino,
194                                          block_group->key.objectid);
195 }
196
197 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
198                                     struct btrfs_trans_handle *trans,
199                                     struct btrfs_path *path,
200                                     struct inode *inode)
201 {
202         struct btrfs_block_rsv *rsv;
203         u64 needed_bytes;
204         loff_t oldsize;
205         int ret = 0;
206
207         rsv = trans->block_rsv;
208         trans->block_rsv = &root->fs_info->global_block_rsv;
209
210         /* 1 for slack space, 1 for updating the inode */
211         needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
212                 btrfs_calc_trans_metadata_size(root, 1);
213
214         spin_lock(&trans->block_rsv->lock);
215         if (trans->block_rsv->reserved < needed_bytes) {
216                 spin_unlock(&trans->block_rsv->lock);
217                 trans->block_rsv = rsv;
218                 return -ENOSPC;
219         }
220         spin_unlock(&trans->block_rsv->lock);
221
222         oldsize = i_size_read(inode);
223         btrfs_i_size_write(inode, 0);
224         truncate_pagecache(inode, oldsize, 0);
225
226         /*
227          * We don't need an orphan item because truncating the free space cache
228          * will never be split across transactions.
229          */
230         ret = btrfs_truncate_inode_items(trans, root, inode,
231                                          0, BTRFS_EXTENT_DATA_KEY);
232
233         if (ret) {
234                 trans->block_rsv = rsv;
235                 btrfs_abort_transaction(trans, root, ret);
236                 return ret;
237         }
238
239         ret = btrfs_update_inode(trans, root, inode);
240         if (ret)
241                 btrfs_abort_transaction(trans, root, ret);
242         trans->block_rsv = rsv;
243
244         return ret;
245 }
246
247 static int readahead_cache(struct inode *inode)
248 {
249         struct file_ra_state *ra;
250         unsigned long last_index;
251
252         ra = kzalloc(sizeof(*ra), GFP_NOFS);
253         if (!ra)
254                 return -ENOMEM;
255
256         file_ra_state_init(ra, inode->i_mapping);
257         last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
258
259         page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
260
261         kfree(ra);
262
263         return 0;
264 }
265
266 struct io_ctl {
267         void *cur, *orig;
268         struct page *page;
269         struct page **pages;
270         struct btrfs_root *root;
271         unsigned long size;
272         int index;
273         int num_pages;
274         unsigned check_crcs:1;
275 };
276
277 static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
278                        struct btrfs_root *root)
279 {
280         memset(io_ctl, 0, sizeof(struct io_ctl));
281         io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
282                 PAGE_CACHE_SHIFT;
283         io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
284                                 GFP_NOFS);
285         if (!io_ctl->pages)
286                 return -ENOMEM;
287         io_ctl->root = root;
288         if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
289                 io_ctl->check_crcs = 1;
290         return 0;
291 }
292
293 static void io_ctl_free(struct io_ctl *io_ctl)
294 {
295         kfree(io_ctl->pages);
296 }
297
298 static void io_ctl_unmap_page(struct io_ctl *io_ctl)
299 {
300         if (io_ctl->cur) {
301                 kunmap(io_ctl->page);
302                 io_ctl->cur = NULL;
303                 io_ctl->orig = NULL;
304         }
305 }
306
307 static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
308 {
309         WARN_ON(io_ctl->cur);
310         BUG_ON(io_ctl->index >= io_ctl->num_pages);
311         io_ctl->page = io_ctl->pages[io_ctl->index++];
312         io_ctl->cur = kmap(io_ctl->page);
313         io_ctl->orig = io_ctl->cur;
314         io_ctl->size = PAGE_CACHE_SIZE;
315         if (clear)
316                 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
317 }
318
319 static void io_ctl_drop_pages(struct io_ctl *io_ctl)
320 {
321         int i;
322
323         io_ctl_unmap_page(io_ctl);
324
325         for (i = 0; i < io_ctl->num_pages; i++) {
326                 if (io_ctl->pages[i]) {
327                         ClearPageChecked(io_ctl->pages[i]);
328                         unlock_page(io_ctl->pages[i]);
329                         page_cache_release(io_ctl->pages[i]);
330                 }
331         }
332 }
333
334 static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
335                                 int uptodate)
336 {
337         struct page *page;
338         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
339         int i;
340
341         for (i = 0; i < io_ctl->num_pages; i++) {
342                 page = find_or_create_page(inode->i_mapping, i, mask);
343                 if (!page) {
344                         io_ctl_drop_pages(io_ctl);
345                         return -ENOMEM;
346                 }
347                 io_ctl->pages[i] = page;
348                 if (uptodate && !PageUptodate(page)) {
349                         btrfs_readpage(NULL, page);
350                         lock_page(page);
351                         if (!PageUptodate(page)) {
352                                 printk(KERN_ERR "btrfs: error reading free "
353                                        "space cache\n");
354                                 io_ctl_drop_pages(io_ctl);
355                                 return -EIO;
356                         }
357                 }
358         }
359
360         for (i = 0; i < io_ctl->num_pages; i++) {
361                 clear_page_dirty_for_io(io_ctl->pages[i]);
362                 set_page_extent_mapped(io_ctl->pages[i]);
363         }
364
365         return 0;
366 }
367
368 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
369 {
370         u64 *val;
371
372         io_ctl_map_page(io_ctl, 1);
373
374         /*
375          * Skip the csum areas.  If we don't check crcs then we just have a
376          * 64bit chunk at the front of the first page.
377          */
378         if (io_ctl->check_crcs) {
379                 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
380                 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
381         } else {
382                 io_ctl->cur += sizeof(u64);
383                 io_ctl->size -= sizeof(u64) * 2;
384         }
385
386         val = io_ctl->cur;
387         *val = cpu_to_le64(generation);
388         io_ctl->cur += sizeof(u64);
389 }
390
391 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
392 {
393         u64 *gen;
394
395         /*
396          * Skip the crc area.  If we don't check crcs then we just have a 64bit
397          * chunk at the front of the first page.
398          */
399         if (io_ctl->check_crcs) {
400                 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
401                 io_ctl->size -= sizeof(u64) +
402                         (sizeof(u32) * io_ctl->num_pages);
403         } else {
404                 io_ctl->cur += sizeof(u64);
405                 io_ctl->size -= sizeof(u64) * 2;
406         }
407
408         gen = io_ctl->cur;
409         if (le64_to_cpu(*gen) != generation) {
410                 printk_ratelimited(KERN_ERR "btrfs: space cache generation "
411                                    "(%Lu) does not match inode (%Lu)\n", *gen,
412                                    generation);
413                 io_ctl_unmap_page(io_ctl);
414                 return -EIO;
415         }
416         io_ctl->cur += sizeof(u64);
417         return 0;
418 }
419
420 static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
421 {
422         u32 *tmp;
423         u32 crc = ~(u32)0;
424         unsigned offset = 0;
425
426         if (!io_ctl->check_crcs) {
427                 io_ctl_unmap_page(io_ctl);
428                 return;
429         }
430
431         if (index == 0)
432                 offset = sizeof(u32) * io_ctl->num_pages;
433
434         crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
435                               PAGE_CACHE_SIZE - offset);
436         btrfs_csum_final(crc, (char *)&crc);
437         io_ctl_unmap_page(io_ctl);
438         tmp = kmap(io_ctl->pages[0]);
439         tmp += index;
440         *tmp = crc;
441         kunmap(io_ctl->pages[0]);
442 }
443
444 static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
445 {
446         u32 *tmp, val;
447         u32 crc = ~(u32)0;
448         unsigned offset = 0;
449
450         if (!io_ctl->check_crcs) {
451                 io_ctl_map_page(io_ctl, 0);
452                 return 0;
453         }
454
455         if (index == 0)
456                 offset = sizeof(u32) * io_ctl->num_pages;
457
458         tmp = kmap(io_ctl->pages[0]);
459         tmp += index;
460         val = *tmp;
461         kunmap(io_ctl->pages[0]);
462
463         io_ctl_map_page(io_ctl, 0);
464         crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
465                               PAGE_CACHE_SIZE - offset);
466         btrfs_csum_final(crc, (char *)&crc);
467         if (val != crc) {
468                 printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
469                                    "space cache\n");
470                 io_ctl_unmap_page(io_ctl);
471                 return -EIO;
472         }
473
474         return 0;
475 }
476
477 static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
478                             void *bitmap)
479 {
480         struct btrfs_free_space_entry *entry;
481
482         if (!io_ctl->cur)
483                 return -ENOSPC;
484
485         entry = io_ctl->cur;
486         entry->offset = cpu_to_le64(offset);
487         entry->bytes = cpu_to_le64(bytes);
488         entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
489                 BTRFS_FREE_SPACE_EXTENT;
490         io_ctl->cur += sizeof(struct btrfs_free_space_entry);
491         io_ctl->size -= sizeof(struct btrfs_free_space_entry);
492
493         if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
494                 return 0;
495
496         io_ctl_set_crc(io_ctl, io_ctl->index - 1);
497
498         /* No more pages to map */
499         if (io_ctl->index >= io_ctl->num_pages)
500                 return 0;
501
502         /* map the next page */
503         io_ctl_map_page(io_ctl, 1);
504         return 0;
505 }
506
507 static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
508 {
509         if (!io_ctl->cur)
510                 return -ENOSPC;
511
512         /*
513          * If we aren't at the start of the current page, unmap this one and
514          * map the next one if there is any left.
515          */
516         if (io_ctl->cur != io_ctl->orig) {
517                 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
518                 if (io_ctl->index >= io_ctl->num_pages)
519                         return -ENOSPC;
520                 io_ctl_map_page(io_ctl, 0);
521         }
522
523         memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
524         io_ctl_set_crc(io_ctl, io_ctl->index - 1);
525         if (io_ctl->index < io_ctl->num_pages)
526                 io_ctl_map_page(io_ctl, 0);
527         return 0;
528 }
529
530 static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
531 {
532         /*
533          * If we're not on the boundary we know we've modified the page and we
534          * need to crc the page.
535          */
536         if (io_ctl->cur != io_ctl->orig)
537                 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
538         else
539                 io_ctl_unmap_page(io_ctl);
540
541         while (io_ctl->index < io_ctl->num_pages) {
542                 io_ctl_map_page(io_ctl, 1);
543                 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
544         }
545 }
546
547 static int io_ctl_read_entry(struct io_ctl *io_ctl,
548                             struct btrfs_free_space *entry, u8 *type)
549 {
550         struct btrfs_free_space_entry *e;
551         int ret;
552
553         if (!io_ctl->cur) {
554                 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
555                 if (ret)
556                         return ret;
557         }
558
559         e = io_ctl->cur;
560         entry->offset = le64_to_cpu(e->offset);
561         entry->bytes = le64_to_cpu(e->bytes);
562         *type = e->type;
563         io_ctl->cur += sizeof(struct btrfs_free_space_entry);
564         io_ctl->size -= sizeof(struct btrfs_free_space_entry);
565
566         if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
567                 return 0;
568
569         io_ctl_unmap_page(io_ctl);
570
571         return 0;
572 }
573
574 static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
575                               struct btrfs_free_space *entry)
576 {
577         int ret;
578
579         ret = io_ctl_check_crc(io_ctl, io_ctl->index);
580         if (ret)
581                 return ret;
582
583         memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
584         io_ctl_unmap_page(io_ctl);
585
586         return 0;
587 }
588
589 /*
590  * Since we attach pinned extents after the fact we can have contiguous sections
591  * of free space that are split up in entries.  This poses a problem with the
592  * tree logging stuff since it could have allocated across what appears to be 2
593  * entries since we would have merged the entries when adding the pinned extents
594  * back to the free space cache.  So run through the space cache that we just
595  * loaded and merge contiguous entries.  This will make the log replay stuff not
596  * blow up and it will make for nicer allocator behavior.
597  */
598 static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
599 {
600         struct btrfs_free_space *e, *prev = NULL;
601         struct rb_node *n;
602
603 again:
604         spin_lock(&ctl->tree_lock);
605         for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
606                 e = rb_entry(n, struct btrfs_free_space, offset_index);
607                 if (!prev)
608                         goto next;
609                 if (e->bitmap || prev->bitmap)
610                         goto next;
611                 if (prev->offset + prev->bytes == e->offset) {
612                         unlink_free_space(ctl, prev);
613                         unlink_free_space(ctl, e);
614                         prev->bytes += e->bytes;
615                         kmem_cache_free(btrfs_free_space_cachep, e);
616                         link_free_space(ctl, prev);
617                         prev = NULL;
618                         spin_unlock(&ctl->tree_lock);
619                         goto again;
620                 }
621 next:
622                 prev = e;
623         }
624         spin_unlock(&ctl->tree_lock);
625 }
626
627 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
628                             struct btrfs_free_space_ctl *ctl,
629                             struct btrfs_path *path, u64 offset)
630 {
631         struct btrfs_free_space_header *header;
632         struct extent_buffer *leaf;
633         struct io_ctl io_ctl;
634         struct btrfs_key key;
635         struct btrfs_free_space *e, *n;
636         struct list_head bitmaps;
637         u64 num_entries;
638         u64 num_bitmaps;
639         u64 generation;
640         u8 type;
641         int ret = 0;
642
643         INIT_LIST_HEAD(&bitmaps);
644
645         /* Nothing in the space cache, goodbye */
646         if (!i_size_read(inode))
647                 return 0;
648
649         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
650         key.offset = offset;
651         key.type = 0;
652
653         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
654         if (ret < 0)
655                 return 0;
656         else if (ret > 0) {
657                 btrfs_release_path(path);
658                 return 0;
659         }
660
661         ret = -1;
662
663         leaf = path->nodes[0];
664         header = btrfs_item_ptr(leaf, path->slots[0],
665                                 struct btrfs_free_space_header);
666         num_entries = btrfs_free_space_entries(leaf, header);
667         num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
668         generation = btrfs_free_space_generation(leaf, header);
669         btrfs_release_path(path);
670
671         if (BTRFS_I(inode)->generation != generation) {
672                 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
673                        " not match free space cache generation (%llu)\n",
674                        (unsigned long long)BTRFS_I(inode)->generation,
675                        (unsigned long long)generation);
676                 return 0;
677         }
678
679         if (!num_entries)
680                 return 0;
681
682         ret = io_ctl_init(&io_ctl, inode, root);
683         if (ret)
684                 return ret;
685
686         ret = readahead_cache(inode);
687         if (ret)
688                 goto out;
689
690         ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
691         if (ret)
692                 goto out;
693
694         ret = io_ctl_check_crc(&io_ctl, 0);
695         if (ret)
696                 goto free_cache;
697
698         ret = io_ctl_check_generation(&io_ctl, generation);
699         if (ret)
700                 goto free_cache;
701
702         while (num_entries) {
703                 e = kmem_cache_zalloc(btrfs_free_space_cachep,
704                                       GFP_NOFS);
705                 if (!e)
706                         goto free_cache;
707
708                 ret = io_ctl_read_entry(&io_ctl, e, &type);
709                 if (ret) {
710                         kmem_cache_free(btrfs_free_space_cachep, e);
711                         goto free_cache;
712                 }
713
714                 if (!e->bytes) {
715                         kmem_cache_free(btrfs_free_space_cachep, e);
716                         goto free_cache;
717                 }
718
719                 if (type == BTRFS_FREE_SPACE_EXTENT) {
720                         spin_lock(&ctl->tree_lock);
721                         ret = link_free_space(ctl, e);
722                         spin_unlock(&ctl->tree_lock);
723                         if (ret) {
724                                 printk(KERN_ERR "Duplicate entries in "
725                                        "free space cache, dumping\n");
726                                 kmem_cache_free(btrfs_free_space_cachep, e);
727                                 goto free_cache;
728                         }
729                 } else {
730                         BUG_ON(!num_bitmaps);
731                         num_bitmaps--;
732                         e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
733                         if (!e->bitmap) {
734                                 kmem_cache_free(
735                                         btrfs_free_space_cachep, e);
736                                 goto free_cache;
737                         }
738                         spin_lock(&ctl->tree_lock);
739                         ret = link_free_space(ctl, e);
740                         ctl->total_bitmaps++;
741                         ctl->op->recalc_thresholds(ctl);
742                         spin_unlock(&ctl->tree_lock);
743                         if (ret) {
744                                 printk(KERN_ERR "Duplicate entries in "
745                                        "free space cache, dumping\n");
746                                 kmem_cache_free(btrfs_free_space_cachep, e);
747                                 goto free_cache;
748                         }
749                         list_add_tail(&e->list, &bitmaps);
750                 }
751
752                 num_entries--;
753         }
754
755         io_ctl_unmap_page(&io_ctl);
756
757         /*
758          * We add the bitmaps at the end of the entries in order that
759          * the bitmap entries are added to the cache.
760          */
761         list_for_each_entry_safe(e, n, &bitmaps, list) {
762                 list_del_init(&e->list);
763                 ret = io_ctl_read_bitmap(&io_ctl, e);
764                 if (ret)
765                         goto free_cache;
766         }
767
768         io_ctl_drop_pages(&io_ctl);
769         merge_space_tree(ctl);
770         ret = 1;
771 out:
772         io_ctl_free(&io_ctl);
773         return ret;
774 free_cache:
775         io_ctl_drop_pages(&io_ctl);
776         __btrfs_remove_free_space_cache(ctl);
777         goto out;
778 }
779
780 int load_free_space_cache(struct btrfs_fs_info *fs_info,
781                           struct btrfs_block_group_cache *block_group)
782 {
783         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
784         struct btrfs_root *root = fs_info->tree_root;
785         struct inode *inode;
786         struct btrfs_path *path;
787         int ret = 0;
788         bool matched;
789         u64 used = btrfs_block_group_used(&block_group->item);
790
791         /*
792          * If this block group has been marked to be cleared for one reason or
793          * another then we can't trust the on disk cache, so just return.
794          */
795         spin_lock(&block_group->lock);
796         if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
797                 spin_unlock(&block_group->lock);
798                 return 0;
799         }
800         spin_unlock(&block_group->lock);
801
802         path = btrfs_alloc_path();
803         if (!path)
804                 return 0;
805         path->search_commit_root = 1;
806         path->skip_locking = 1;
807
808         inode = lookup_free_space_inode(root, block_group, path);
809         if (IS_ERR(inode)) {
810                 btrfs_free_path(path);
811                 return 0;
812         }
813
814         /* We may have converted the inode and made the cache invalid. */
815         spin_lock(&block_group->lock);
816         if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
817                 spin_unlock(&block_group->lock);
818                 btrfs_free_path(path);
819                 goto out;
820         }
821         spin_unlock(&block_group->lock);
822
823         ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
824                                       path, block_group->key.objectid);
825         btrfs_free_path(path);
826         if (ret <= 0)
827                 goto out;
828
829         spin_lock(&ctl->tree_lock);
830         matched = (ctl->free_space == (block_group->key.offset - used -
831                                        block_group->bytes_super));
832         spin_unlock(&ctl->tree_lock);
833
834         if (!matched) {
835                 __btrfs_remove_free_space_cache(ctl);
836                 printk(KERN_ERR "block group %llu has an wrong amount of free "
837                        "space\n", block_group->key.objectid);
838                 ret = -1;
839         }
840 out:
841         if (ret < 0) {
842                 /* This cache is bogus, make sure it gets cleared */
843                 spin_lock(&block_group->lock);
844                 block_group->disk_cache_state = BTRFS_DC_CLEAR;
845                 spin_unlock(&block_group->lock);
846                 ret = 0;
847
848                 printk(KERN_ERR "btrfs: failed to load free space cache "
849                        "for block group %llu\n", block_group->key.objectid);
850         }
851
852         iput(inode);
853         return ret;
854 }
855
856 /**
857  * __btrfs_write_out_cache - write out cached info to an inode
858  * @root - the root the inode belongs to
859  * @ctl - the free space cache we are going to write out
860  * @block_group - the block_group for this cache if it belongs to a block_group
861  * @trans - the trans handle
862  * @path - the path to use
863  * @offset - the offset for the key we'll insert
864  *
865  * This function writes out a free space cache struct to disk for quick recovery
866  * on mount.  This will return 0 if it was successfull in writing the cache out,
867  * and -1 if it was not.
868  */
869 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
870                             struct btrfs_free_space_ctl *ctl,
871                             struct btrfs_block_group_cache *block_group,
872                             struct btrfs_trans_handle *trans,
873                             struct btrfs_path *path, u64 offset)
874 {
875         struct btrfs_free_space_header *header;
876         struct extent_buffer *leaf;
877         struct rb_node *node;
878         struct list_head *pos, *n;
879         struct extent_state *cached_state = NULL;
880         struct btrfs_free_cluster *cluster = NULL;
881         struct extent_io_tree *unpin = NULL;
882         struct io_ctl io_ctl;
883         struct list_head bitmap_list;
884         struct btrfs_key key;
885         u64 start, extent_start, extent_end, len;
886         int entries = 0;
887         int bitmaps = 0;
888         int ret;
889         int err = -1;
890
891         INIT_LIST_HEAD(&bitmap_list);
892
893         if (!i_size_read(inode))
894                 return -1;
895
896         ret = io_ctl_init(&io_ctl, inode, root);
897         if (ret)
898                 return -1;
899
900         /* Get the cluster for this block_group if it exists */
901         if (block_group && !list_empty(&block_group->cluster_list))
902                 cluster = list_entry(block_group->cluster_list.next,
903                                      struct btrfs_free_cluster,
904                                      block_group_list);
905
906         /* Lock all pages first so we can lock the extent safely. */
907         io_ctl_prepare_pages(&io_ctl, inode, 0);
908
909         lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
910                          0, &cached_state);
911
912         node = rb_first(&ctl->free_space_offset);
913         if (!node && cluster) {
914                 node = rb_first(&cluster->root);
915                 cluster = NULL;
916         }
917
918         /* Make sure we can fit our crcs into the first page */
919         if (io_ctl.check_crcs &&
920             (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
921                 WARN_ON(1);
922                 goto out_nospc;
923         }
924
925         io_ctl_set_generation(&io_ctl, trans->transid);
926
927         /* Write out the extent entries */
928         while (node) {
929                 struct btrfs_free_space *e;
930
931                 e = rb_entry(node, struct btrfs_free_space, offset_index);
932                 entries++;
933
934                 ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
935                                        e->bitmap);
936                 if (ret)
937                         goto out_nospc;
938
939                 if (e->bitmap) {
940                         list_add_tail(&e->list, &bitmap_list);
941                         bitmaps++;
942                 }
943                 node = rb_next(node);
944                 if (!node && cluster) {
945                         node = rb_first(&cluster->root);
946                         cluster = NULL;
947                 }
948         }
949
950         /*
951          * We want to add any pinned extents to our free space cache
952          * so we don't leak the space
953          */
954
955         /*
956          * We shouldn't have switched the pinned extents yet so this is the
957          * right one
958          */
959         unpin = root->fs_info->pinned_extents;
960
961         if (block_group)
962                 start = block_group->key.objectid;
963
964         while (block_group && (start < block_group->key.objectid +
965                                block_group->key.offset)) {
966                 ret = find_first_extent_bit(unpin, start,
967                                             &extent_start, &extent_end,
968                                             EXTENT_DIRTY);
969                 if (ret) {
970                         ret = 0;
971                         break;
972                 }
973
974                 /* This pinned extent is out of our range */
975                 if (extent_start >= block_group->key.objectid +
976                     block_group->key.offset)
977                         break;
978
979                 extent_start = max(extent_start, start);
980                 extent_end = min(block_group->key.objectid +
981                                  block_group->key.offset, extent_end + 1);
982                 len = extent_end - extent_start;
983
984                 entries++;
985                 ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
986                 if (ret)
987                         goto out_nospc;
988
989                 start = extent_end;
990         }
991
992         /* Write out the bitmaps */
993         list_for_each_safe(pos, n, &bitmap_list) {
994                 struct btrfs_free_space *entry =
995                         list_entry(pos, struct btrfs_free_space, list);
996
997                 ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
998                 if (ret)
999                         goto out_nospc;
1000                 list_del_init(&entry->list);
1001         }
1002
1003         /* Zero out the rest of the pages just to make sure */
1004         io_ctl_zero_remaining_pages(&io_ctl);
1005
1006         ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1007                                 0, i_size_read(inode), &cached_state);
1008         io_ctl_drop_pages(&io_ctl);
1009         unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1010                              i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1011
1012         if (ret)
1013                 goto out;
1014
1015
1016         btrfs_wait_ordered_range(inode, 0, (u64)-1);
1017
1018         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1019         key.offset = offset;
1020         key.type = 0;
1021
1022         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1023         if (ret < 0) {
1024                 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1025                                  EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1026                                  GFP_NOFS);
1027                 goto out;
1028         }
1029         leaf = path->nodes[0];
1030         if (ret > 0) {
1031                 struct btrfs_key found_key;
1032                 BUG_ON(!path->slots[0]);
1033                 path->slots[0]--;
1034                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1035                 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1036                     found_key.offset != offset) {
1037                         clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1038                                          inode->i_size - 1,
1039                                          EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1040                                          NULL, GFP_NOFS);
1041                         btrfs_release_path(path);
1042                         goto out;
1043                 }
1044         }
1045
1046         BTRFS_I(inode)->generation = trans->transid;
1047         header = btrfs_item_ptr(leaf, path->slots[0],
1048                                 struct btrfs_free_space_header);
1049         btrfs_set_free_space_entries(leaf, header, entries);
1050         btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1051         btrfs_set_free_space_generation(leaf, header, trans->transid);
1052         btrfs_mark_buffer_dirty(leaf);
1053         btrfs_release_path(path);
1054
1055         err = 0;
1056 out:
1057         io_ctl_free(&io_ctl);
1058         if (err) {
1059                 invalidate_inode_pages2(inode->i_mapping);
1060                 BTRFS_I(inode)->generation = 0;
1061         }
1062         btrfs_update_inode(trans, root, inode);
1063         return err;
1064
1065 out_nospc:
1066         list_for_each_safe(pos, n, &bitmap_list) {
1067                 struct btrfs_free_space *entry =
1068                         list_entry(pos, struct btrfs_free_space, list);
1069                 list_del_init(&entry->list);
1070         }
1071         io_ctl_drop_pages(&io_ctl);
1072         unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1073                              i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1074         goto out;
1075 }
1076
1077 int btrfs_write_out_cache(struct btrfs_root *root,
1078                           struct btrfs_trans_handle *trans,
1079                           struct btrfs_block_group_cache *block_group,
1080                           struct btrfs_path *path)
1081 {
1082         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1083         struct inode *inode;
1084         int ret = 0;
1085
1086         root = root->fs_info->tree_root;
1087
1088         spin_lock(&block_group->lock);
1089         if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1090                 spin_unlock(&block_group->lock);
1091                 return 0;
1092         }
1093         spin_unlock(&block_group->lock);
1094
1095         inode = lookup_free_space_inode(root, block_group, path);
1096         if (IS_ERR(inode))
1097                 return 0;
1098
1099         ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1100                                       path, block_group->key.objectid);
1101         if (ret) {
1102                 spin_lock(&block_group->lock);
1103                 block_group->disk_cache_state = BTRFS_DC_ERROR;
1104                 spin_unlock(&block_group->lock);
1105                 ret = 0;
1106 #ifdef DEBUG
1107                 printk(KERN_ERR "btrfs: failed to write free space cache "
1108                        "for block group %llu\n", block_group->key.objectid);
1109 #endif
1110         }
1111
1112         iput(inode);
1113         return ret;
1114 }
1115
1116 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1117                                           u64 offset)
1118 {
1119         BUG_ON(offset < bitmap_start);
1120         offset -= bitmap_start;
1121         return (unsigned long)(div_u64(offset, unit));
1122 }
1123
1124 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1125 {
1126         return (unsigned long)(div_u64(bytes, unit));
1127 }
1128
1129 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1130                                    u64 offset)
1131 {
1132         u64 bitmap_start;
1133         u64 bytes_per_bitmap;
1134
1135         bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1136         bitmap_start = offset - ctl->start;
1137         bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1138         bitmap_start *= bytes_per_bitmap;
1139         bitmap_start += ctl->start;
1140
1141         return bitmap_start;
1142 }
1143
1144 static int tree_insert_offset(struct rb_root *root, u64 offset,
1145                               struct rb_node *node, int bitmap)
1146 {
1147         struct rb_node **p = &root->rb_node;
1148         struct rb_node *parent = NULL;
1149         struct btrfs_free_space *info;
1150
1151         while (*p) {
1152                 parent = *p;
1153                 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1154
1155                 if (offset < info->offset) {
1156                         p = &(*p)->rb_left;
1157                 } else if (offset > info->offset) {
1158                         p = &(*p)->rb_right;
1159                 } else {
1160                         /*
1161                          * we could have a bitmap entry and an extent entry
1162                          * share the same offset.  If this is the case, we want
1163                          * the extent entry to always be found first if we do a
1164                          * linear search through the tree, since we want to have
1165                          * the quickest allocation time, and allocating from an
1166                          * extent is faster than allocating from a bitmap.  So
1167                          * if we're inserting a bitmap and we find an entry at
1168                          * this offset, we want to go right, or after this entry
1169                          * logically.  If we are inserting an extent and we've
1170                          * found a bitmap, we want to go left, or before
1171                          * logically.
1172                          */
1173                         if (bitmap) {
1174                                 if (info->bitmap) {
1175                                         WARN_ON_ONCE(1);
1176                                         return -EEXIST;
1177                                 }
1178                                 p = &(*p)->rb_right;
1179                         } else {
1180                                 if (!info->bitmap) {
1181                                         WARN_ON_ONCE(1);
1182                                         return -EEXIST;
1183                                 }
1184                                 p = &(*p)->rb_left;
1185                         }
1186                 }
1187         }
1188
1189         rb_link_node(node, parent, p);
1190         rb_insert_color(node, root);
1191
1192         return 0;
1193 }
1194
1195 /*
1196  * searches the tree for the given offset.
1197  *
1198  * fuzzy - If this is set, then we are trying to make an allocation, and we just
1199  * want a section that has at least bytes size and comes at or after the given
1200  * offset.
1201  */
1202 static struct btrfs_free_space *
1203 tree_search_offset(struct btrfs_free_space_ctl *ctl,
1204                    u64 offset, int bitmap_only, int fuzzy)
1205 {
1206         struct rb_node *n = ctl->free_space_offset.rb_node;
1207         struct btrfs_free_space *entry, *prev = NULL;
1208
1209         /* find entry that is closest to the 'offset' */
1210         while (1) {
1211                 if (!n) {
1212                         entry = NULL;
1213                         break;
1214                 }
1215
1216                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1217                 prev = entry;
1218
1219                 if (offset < entry->offset)
1220                         n = n->rb_left;
1221                 else if (offset > entry->offset)
1222                         n = n->rb_right;
1223                 else
1224                         break;
1225         }
1226
1227         if (bitmap_only) {
1228                 if (!entry)
1229                         return NULL;
1230                 if (entry->bitmap)
1231                         return entry;
1232
1233                 /*
1234                  * bitmap entry and extent entry may share same offset,
1235                  * in that case, bitmap entry comes after extent entry.
1236                  */
1237                 n = rb_next(n);
1238                 if (!n)
1239                         return NULL;
1240                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1241                 if (entry->offset != offset)
1242                         return NULL;
1243
1244                 WARN_ON(!entry->bitmap);
1245                 return entry;
1246         } else if (entry) {
1247                 if (entry->bitmap) {
1248                         /*
1249                          * if previous extent entry covers the offset,
1250                          * we should return it instead of the bitmap entry
1251                          */
1252                         n = &entry->offset_index;
1253                         while (1) {
1254                                 n = rb_prev(n);
1255                                 if (!n)
1256                                         break;
1257                                 prev = rb_entry(n, struct btrfs_free_space,
1258                                                 offset_index);
1259                                 if (!prev->bitmap) {
1260                                         if (prev->offset + prev->bytes > offset)
1261                                                 entry = prev;
1262                                         break;
1263                                 }
1264                         }
1265                 }
1266                 return entry;
1267         }
1268
1269         if (!prev)
1270                 return NULL;
1271
1272         /* find last entry before the 'offset' */
1273         entry = prev;
1274         if (entry->offset > offset) {
1275                 n = rb_prev(&entry->offset_index);
1276                 if (n) {
1277                         entry = rb_entry(n, struct btrfs_free_space,
1278                                         offset_index);
1279                         BUG_ON(entry->offset > offset);
1280                 } else {
1281                         if (fuzzy)
1282                                 return entry;
1283                         else
1284                                 return NULL;
1285                 }
1286         }
1287
1288         if (entry->bitmap) {
1289                 n = &entry->offset_index;
1290                 while (1) {
1291                         n = rb_prev(n);
1292                         if (!n)
1293                                 break;
1294                         prev = rb_entry(n, struct btrfs_free_space,
1295                                         offset_index);
1296                         if (!prev->bitmap) {
1297                                 if (prev->offset + prev->bytes > offset)
1298                                         return prev;
1299                                 break;
1300                         }
1301                 }
1302                 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1303                         return entry;
1304         } else if (entry->offset + entry->bytes > offset)
1305                 return entry;
1306
1307         if (!fuzzy)
1308                 return NULL;
1309
1310         while (1) {
1311                 if (entry->bitmap) {
1312                         if (entry->offset + BITS_PER_BITMAP *
1313                             ctl->unit > offset)
1314                                 break;
1315                 } else {
1316                         if (entry->offset + entry->bytes > offset)
1317                                 break;
1318                 }
1319
1320                 n = rb_next(&entry->offset_index);
1321                 if (!n)
1322                         return NULL;
1323                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1324         }
1325         return entry;
1326 }
1327
1328 static inline void
1329 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1330                     struct btrfs_free_space *info)
1331 {
1332         rb_erase(&info->offset_index, &ctl->free_space_offset);
1333         ctl->free_extents--;
1334 }
1335
1336 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1337                               struct btrfs_free_space *info)
1338 {
1339         __unlink_free_space(ctl, info);
1340         ctl->free_space -= info->bytes;
1341 }
1342
1343 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1344                            struct btrfs_free_space *info)
1345 {
1346         int ret = 0;
1347
1348         BUG_ON(!info->bitmap && !info->bytes);
1349         ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1350                                  &info->offset_index, (info->bitmap != NULL));
1351         if (ret)
1352                 return ret;
1353
1354         ctl->free_space += info->bytes;
1355         ctl->free_extents++;
1356         return ret;
1357 }
1358
1359 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1360 {
1361         struct btrfs_block_group_cache *block_group = ctl->private;
1362         u64 max_bytes;
1363         u64 bitmap_bytes;
1364         u64 extent_bytes;
1365         u64 size = block_group->key.offset;
1366         u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1367         int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1368
1369         BUG_ON(ctl->total_bitmaps > max_bitmaps);
1370
1371         /*
1372          * The goal is to keep the total amount of memory used per 1gb of space
1373          * at or below 32k, so we need to adjust how much memory we allow to be
1374          * used by extent based free space tracking
1375          */
1376         if (size < 1024 * 1024 * 1024)
1377                 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1378         else
1379                 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1380                         div64_u64(size, 1024 * 1024 * 1024);
1381
1382         /*
1383          * we want to account for 1 more bitmap than what we have so we can make
1384          * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1385          * we add more bitmaps.
1386          */
1387         bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1388
1389         if (bitmap_bytes >= max_bytes) {
1390                 ctl->extents_thresh = 0;
1391                 return;
1392         }
1393
1394         /*
1395          * we want the extent entry threshold to always be at most 1/2 the maxw
1396          * bytes we can have, or whatever is less than that.
1397          */
1398         extent_bytes = max_bytes - bitmap_bytes;
1399         extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1400
1401         ctl->extents_thresh =
1402                 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1403 }
1404
1405 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1406                                        struct btrfs_free_space *info,
1407                                        u64 offset, u64 bytes)
1408 {
1409         unsigned long start, count;
1410
1411         start = offset_to_bit(info->offset, ctl->unit, offset);
1412         count = bytes_to_bits(bytes, ctl->unit);
1413         BUG_ON(start + count > BITS_PER_BITMAP);
1414
1415         bitmap_clear(info->bitmap, start, count);
1416
1417         info->bytes -= bytes;
1418 }
1419
1420 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1421                               struct btrfs_free_space *info, u64 offset,
1422                               u64 bytes)
1423 {
1424         __bitmap_clear_bits(ctl, info, offset, bytes);
1425         ctl->free_space -= bytes;
1426 }
1427
1428 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1429                             struct btrfs_free_space *info, u64 offset,
1430                             u64 bytes)
1431 {
1432         unsigned long start, count;
1433
1434         start = offset_to_bit(info->offset, ctl->unit, offset);
1435         count = bytes_to_bits(bytes, ctl->unit);
1436         BUG_ON(start + count > BITS_PER_BITMAP);
1437
1438         bitmap_set(info->bitmap, start, count);
1439
1440         info->bytes += bytes;
1441         ctl->free_space += bytes;
1442 }
1443
1444 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1445                          struct btrfs_free_space *bitmap_info, u64 *offset,
1446                          u64 *bytes)
1447 {
1448         unsigned long found_bits = 0;
1449         unsigned long bits, i;
1450         unsigned long next_zero;
1451
1452         i = offset_to_bit(bitmap_info->offset, ctl->unit,
1453                           max_t(u64, *offset, bitmap_info->offset));
1454         bits = bytes_to_bits(*bytes, ctl->unit);
1455
1456         for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1457              i < BITS_PER_BITMAP;
1458              i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1459                 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1460                                                BITS_PER_BITMAP, i);
1461                 if ((next_zero - i) >= bits) {
1462                         found_bits = next_zero - i;
1463                         break;
1464                 }
1465                 i = next_zero;
1466         }
1467
1468         if (found_bits) {
1469                 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1470                 *bytes = (u64)(found_bits) * ctl->unit;
1471                 return 0;
1472         }
1473
1474         return -1;
1475 }
1476
1477 static struct btrfs_free_space *
1478 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1479 {
1480         struct btrfs_free_space *entry;
1481         struct rb_node *node;
1482         int ret;
1483
1484         if (!ctl->free_space_offset.rb_node)
1485                 return NULL;
1486
1487         entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1488         if (!entry)
1489                 return NULL;
1490
1491         for (node = &entry->offset_index; node; node = rb_next(node)) {
1492                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1493                 if (entry->bytes < *bytes)
1494                         continue;
1495
1496                 if (entry->bitmap) {
1497                         ret = search_bitmap(ctl, entry, offset, bytes);
1498                         if (!ret)
1499                                 return entry;
1500                         continue;
1501                 }
1502
1503                 *offset = entry->offset;
1504                 *bytes = entry->bytes;
1505                 return entry;
1506         }
1507
1508         return NULL;
1509 }
1510
1511 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1512                            struct btrfs_free_space *info, u64 offset)
1513 {
1514         info->offset = offset_to_bitmap(ctl, offset);
1515         info->bytes = 0;
1516         INIT_LIST_HEAD(&info->list);
1517         link_free_space(ctl, info);
1518         ctl->total_bitmaps++;
1519
1520         ctl->op->recalc_thresholds(ctl);
1521 }
1522
1523 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1524                         struct btrfs_free_space *bitmap_info)
1525 {
1526         unlink_free_space(ctl, bitmap_info);
1527         kfree(bitmap_info->bitmap);
1528         kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1529         ctl->total_bitmaps--;
1530         ctl->op->recalc_thresholds(ctl);
1531 }
1532
1533 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1534                               struct btrfs_free_space *bitmap_info,
1535                               u64 *offset, u64 *bytes)
1536 {
1537         u64 end;
1538         u64 search_start, search_bytes;
1539         int ret;
1540
1541 again:
1542         end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1543
1544         /*
1545          * XXX - this can go away after a few releases.
1546          *
1547          * since the only user of btrfs_remove_free_space is the tree logging
1548          * stuff, and the only way to test that is under crash conditions, we
1549          * want to have this debug stuff here just in case somethings not
1550          * working.  Search the bitmap for the space we are trying to use to
1551          * make sure its actually there.  If its not there then we need to stop
1552          * because something has gone wrong.
1553          */
1554         search_start = *offset;
1555         search_bytes = *bytes;
1556         search_bytes = min(search_bytes, end - search_start + 1);
1557         ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1558         BUG_ON(ret < 0 || search_start != *offset);
1559
1560         if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1561                 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1562                 *bytes -= end - *offset + 1;
1563                 *offset = end + 1;
1564         } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1565                 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1566                 *bytes = 0;
1567         }
1568
1569         if (*bytes) {
1570                 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1571                 if (!bitmap_info->bytes)
1572                         free_bitmap(ctl, bitmap_info);
1573
1574                 /*
1575                  * no entry after this bitmap, but we still have bytes to
1576                  * remove, so something has gone wrong.
1577                  */
1578                 if (!next)
1579                         return -EINVAL;
1580
1581                 bitmap_info = rb_entry(next, struct btrfs_free_space,
1582                                        offset_index);
1583
1584                 /*
1585                  * if the next entry isn't a bitmap we need to return to let the
1586                  * extent stuff do its work.
1587                  */
1588                 if (!bitmap_info->bitmap)
1589                         return -EAGAIN;
1590
1591                 /*
1592                  * Ok the next item is a bitmap, but it may not actually hold
1593                  * the information for the rest of this free space stuff, so
1594                  * look for it, and if we don't find it return so we can try
1595                  * everything over again.
1596                  */
1597                 search_start = *offset;
1598                 search_bytes = *bytes;
1599                 ret = search_bitmap(ctl, bitmap_info, &search_start,
1600                                     &search_bytes);
1601                 if (ret < 0 || search_start != *offset)
1602                         return -EAGAIN;
1603
1604                 goto again;
1605         } else if (!bitmap_info->bytes)
1606                 free_bitmap(ctl, bitmap_info);
1607
1608         return 0;
1609 }
1610
1611 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1612                                struct btrfs_free_space *info, u64 offset,
1613                                u64 bytes)
1614 {
1615         u64 bytes_to_set = 0;
1616         u64 end;
1617
1618         end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1619
1620         bytes_to_set = min(end - offset, bytes);
1621
1622         bitmap_set_bits(ctl, info, offset, bytes_to_set);
1623
1624         return bytes_to_set;
1625
1626 }
1627
1628 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1629                       struct btrfs_free_space *info)
1630 {
1631         struct btrfs_block_group_cache *block_group = ctl->private;
1632
1633         /*
1634          * If we are below the extents threshold then we can add this as an
1635          * extent, and don't have to deal with the bitmap
1636          */
1637         if (ctl->free_extents < ctl->extents_thresh) {
1638                 /*
1639                  * If this block group has some small extents we don't want to
1640                  * use up all of our free slots in the cache with them, we want
1641                  * to reserve them to larger extents, however if we have plent
1642                  * of cache left then go ahead an dadd them, no sense in adding
1643                  * the overhead of a bitmap if we don't have to.
1644                  */
1645                 if (info->bytes <= block_group->sectorsize * 4) {
1646                         if (ctl->free_extents * 2 <= ctl->extents_thresh)
1647                                 return false;
1648                 } else {
1649                         return false;
1650                 }
1651         }
1652
1653         /*
1654          * some block groups are so tiny they can't be enveloped by a bitmap, so
1655          * don't even bother to create a bitmap for this
1656          */
1657         if (BITS_PER_BITMAP * block_group->sectorsize >
1658             block_group->key.offset)
1659                 return false;
1660
1661         return true;
1662 }
1663
1664 static struct btrfs_free_space_op free_space_op = {
1665         .recalc_thresholds      = recalculate_thresholds,
1666         .use_bitmap             = use_bitmap,
1667 };
1668
1669 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1670                               struct btrfs_free_space *info)
1671 {
1672         struct btrfs_free_space *bitmap_info;
1673         struct btrfs_block_group_cache *block_group = NULL;
1674         int added = 0;
1675         u64 bytes, offset, bytes_added;
1676         int ret;
1677
1678         bytes = info->bytes;
1679         offset = info->offset;
1680
1681         if (!ctl->op->use_bitmap(ctl, info))
1682                 return 0;
1683
1684         if (ctl->op == &free_space_op)
1685                 block_group = ctl->private;
1686 again:
1687         /*
1688          * Since we link bitmaps right into the cluster we need to see if we
1689          * have a cluster here, and if so and it has our bitmap we need to add
1690          * the free space to that bitmap.
1691          */
1692         if (block_group && !list_empty(&block_group->cluster_list)) {
1693                 struct btrfs_free_cluster *cluster;
1694                 struct rb_node *node;
1695                 struct btrfs_free_space *entry;
1696
1697                 cluster = list_entry(block_group->cluster_list.next,
1698                                      struct btrfs_free_cluster,
1699                                      block_group_list);
1700                 spin_lock(&cluster->lock);
1701                 node = rb_first(&cluster->root);
1702                 if (!node) {
1703                         spin_unlock(&cluster->lock);
1704                         goto no_cluster_bitmap;
1705                 }
1706
1707                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1708                 if (!entry->bitmap) {
1709                         spin_unlock(&cluster->lock);
1710                         goto no_cluster_bitmap;
1711                 }
1712
1713                 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1714                         bytes_added = add_bytes_to_bitmap(ctl, entry,
1715                                                           offset, bytes);
1716                         bytes -= bytes_added;
1717                         offset += bytes_added;
1718                 }
1719                 spin_unlock(&cluster->lock);
1720                 if (!bytes) {
1721                         ret = 1;
1722                         goto out;
1723                 }
1724         }
1725
1726 no_cluster_bitmap:
1727         bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1728                                          1, 0);
1729         if (!bitmap_info) {
1730                 BUG_ON(added);
1731                 goto new_bitmap;
1732         }
1733
1734         bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1735         bytes -= bytes_added;
1736         offset += bytes_added;
1737         added = 0;
1738
1739         if (!bytes) {
1740                 ret = 1;
1741                 goto out;
1742         } else
1743                 goto again;
1744
1745 new_bitmap:
1746         if (info && info->bitmap) {
1747                 add_new_bitmap(ctl, info, offset);
1748                 added = 1;
1749                 info = NULL;
1750                 goto again;
1751         } else {
1752                 spin_unlock(&ctl->tree_lock);
1753
1754                 /* no pre-allocated info, allocate a new one */
1755                 if (!info) {
1756                         info = kmem_cache_zalloc(btrfs_free_space_cachep,
1757                                                  GFP_NOFS);
1758                         if (!info) {
1759                                 spin_lock(&ctl->tree_lock);
1760                                 ret = -ENOMEM;
1761                                 goto out;
1762                         }
1763                 }
1764
1765                 /* allocate the bitmap */
1766                 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1767                 spin_lock(&ctl->tree_lock);
1768                 if (!info->bitmap) {
1769                         ret = -ENOMEM;
1770                         goto out;
1771                 }
1772                 goto again;
1773         }
1774
1775 out:
1776         if (info) {
1777                 if (info->bitmap)
1778                         kfree(info->bitmap);
1779                 kmem_cache_free(btrfs_free_space_cachep, info);
1780         }
1781
1782         return ret;
1783 }
1784
1785 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1786                           struct btrfs_free_space *info, bool update_stat)
1787 {
1788         struct btrfs_free_space *left_info;
1789         struct btrfs_free_space *right_info;
1790         bool merged = false;
1791         u64 offset = info->offset;
1792         u64 bytes = info->bytes;
1793
1794         /*
1795          * first we want to see if there is free space adjacent to the range we
1796          * are adding, if there is remove that struct and add a new one to
1797          * cover the entire range
1798          */
1799         right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1800         if (right_info && rb_prev(&right_info->offset_index))
1801                 left_info = rb_entry(rb_prev(&right_info->offset_index),
1802                                      struct btrfs_free_space, offset_index);
1803         else
1804                 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1805
1806         if (right_info && !right_info->bitmap) {
1807                 if (update_stat)
1808                         unlink_free_space(ctl, right_info);
1809                 else
1810                         __unlink_free_space(ctl, right_info);
1811                 info->bytes += right_info->bytes;
1812                 kmem_cache_free(btrfs_free_space_cachep, right_info);
1813                 merged = true;
1814         }
1815
1816         if (left_info && !left_info->bitmap &&
1817             left_info->offset + left_info->bytes == offset) {
1818                 if (update_stat)
1819                         unlink_free_space(ctl, left_info);
1820                 else
1821                         __unlink_free_space(ctl, left_info);
1822                 info->offset = left_info->offset;
1823                 info->bytes += left_info->bytes;
1824                 kmem_cache_free(btrfs_free_space_cachep, left_info);
1825                 merged = true;
1826         }
1827
1828         return merged;
1829 }
1830
1831 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1832                            u64 offset, u64 bytes)
1833 {
1834         struct btrfs_free_space *info;
1835         int ret = 0;
1836
1837         info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1838         if (!info)
1839                 return -ENOMEM;
1840
1841         info->offset = offset;
1842         info->bytes = bytes;
1843
1844         spin_lock(&ctl->tree_lock);
1845
1846         if (try_merge_free_space(ctl, info, true))
1847                 goto link;
1848
1849         /*
1850          * There was no extent directly to the left or right of this new
1851          * extent then we know we're going to have to allocate a new extent, so
1852          * before we do that see if we need to drop this into a bitmap
1853          */
1854         ret = insert_into_bitmap(ctl, info);
1855         if (ret < 0) {
1856                 goto out;
1857         } else if (ret) {
1858                 ret = 0;
1859                 goto out;
1860         }
1861 link:
1862         ret = link_free_space(ctl, info);
1863         if (ret)
1864                 kmem_cache_free(btrfs_free_space_cachep, info);
1865 out:
1866         spin_unlock(&ctl->tree_lock);
1867
1868         if (ret) {
1869                 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1870                 BUG_ON(ret == -EEXIST);
1871         }
1872
1873         return ret;
1874 }
1875
1876 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1877                             u64 offset, u64 bytes)
1878 {
1879         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1880         struct btrfs_free_space *info;
1881         struct btrfs_free_space *next_info = NULL;
1882         int ret = 0;
1883
1884         spin_lock(&ctl->tree_lock);
1885
1886 again:
1887         info = tree_search_offset(ctl, offset, 0, 0);
1888         if (!info) {
1889                 /*
1890                  * oops didn't find an extent that matched the space we wanted
1891                  * to remove, look for a bitmap instead
1892                  */
1893                 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1894                                           1, 0);
1895                 if (!info) {
1896                         /* the tree logging code might be calling us before we
1897                          * have fully loaded the free space rbtree for this
1898                          * block group.  So it is possible the entry won't
1899                          * be in the rbtree yet at all.  The caching code
1900                          * will make sure not to put it in the rbtree if
1901                          * the logging code has pinned it.
1902                          */
1903                         goto out_lock;
1904                 }
1905         }
1906
1907         if (info->bytes < bytes && rb_next(&info->offset_index)) {
1908                 u64 end;
1909                 next_info = rb_entry(rb_next(&info->offset_index),
1910                                              struct btrfs_free_space,
1911                                              offset_index);
1912
1913                 if (next_info->bitmap)
1914                         end = next_info->offset +
1915                               BITS_PER_BITMAP * ctl->unit - 1;
1916                 else
1917                         end = next_info->offset + next_info->bytes;
1918
1919                 if (next_info->bytes < bytes ||
1920                     next_info->offset > offset || offset > end) {
1921                         printk(KERN_CRIT "Found free space at %llu, size %llu,"
1922                               " trying to use %llu\n",
1923                               (unsigned long long)info->offset,
1924                               (unsigned long long)info->bytes,
1925                               (unsigned long long)bytes);
1926                         WARN_ON(1);
1927                         ret = -EINVAL;
1928                         goto out_lock;
1929                 }
1930
1931                 info = next_info;
1932         }
1933
1934         if (info->bytes == bytes) {
1935                 unlink_free_space(ctl, info);
1936                 if (info->bitmap) {
1937                         kfree(info->bitmap);
1938                         ctl->total_bitmaps--;
1939                 }
1940                 kmem_cache_free(btrfs_free_space_cachep, info);
1941                 ret = 0;
1942                 goto out_lock;
1943         }
1944
1945         if (!info->bitmap && info->offset == offset) {
1946                 unlink_free_space(ctl, info);
1947                 info->offset += bytes;
1948                 info->bytes -= bytes;
1949                 ret = link_free_space(ctl, info);
1950                 WARN_ON(ret);
1951                 goto out_lock;
1952         }
1953
1954         if (!info->bitmap && info->offset <= offset &&
1955             info->offset + info->bytes >= offset + bytes) {
1956                 u64 old_start = info->offset;
1957                 /*
1958                  * we're freeing space in the middle of the info,
1959                  * this can happen during tree log replay
1960                  *
1961                  * first unlink the old info and then
1962                  * insert it again after the hole we're creating
1963                  */
1964                 unlink_free_space(ctl, info);
1965                 if (offset + bytes < info->offset + info->bytes) {
1966                         u64 old_end = info->offset + info->bytes;
1967
1968                         info->offset = offset + bytes;
1969                         info->bytes = old_end - info->offset;
1970                         ret = link_free_space(ctl, info);
1971                         WARN_ON(ret);
1972                         if (ret)
1973                                 goto out_lock;
1974                 } else {
1975                         /* the hole we're creating ends at the end
1976                          * of the info struct, just free the info
1977                          */
1978                         kmem_cache_free(btrfs_free_space_cachep, info);
1979                 }
1980                 spin_unlock(&ctl->tree_lock);
1981
1982                 /* step two, insert a new info struct to cover
1983                  * anything before the hole
1984                  */
1985                 ret = btrfs_add_free_space(block_group, old_start,
1986                                            offset - old_start);
1987                 WARN_ON(ret); /* -ENOMEM */
1988                 goto out;
1989         }
1990
1991         ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1992         if (ret == -EAGAIN)
1993                 goto again;
1994         BUG_ON(ret); /* logic error */
1995 out_lock:
1996         spin_unlock(&ctl->tree_lock);
1997 out:
1998         return ret;
1999 }
2000
2001 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2002                            u64 bytes)
2003 {
2004         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2005         struct btrfs_free_space *info;
2006         struct rb_node *n;
2007         int count = 0;
2008
2009         for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2010                 info = rb_entry(n, struct btrfs_free_space, offset_index);
2011                 if (info->bytes >= bytes)
2012                         count++;
2013                 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
2014                        (unsigned long long)info->offset,
2015                        (unsigned long long)info->bytes,
2016                        (info->bitmap) ? "yes" : "no");
2017         }
2018         printk(KERN_INFO "block group has cluster?: %s\n",
2019                list_empty(&block_group->cluster_list) ? "no" : "yes");
2020         printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
2021                "\n", count);
2022 }
2023
2024 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2025 {
2026         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2027
2028         spin_lock_init(&ctl->tree_lock);
2029         ctl->unit = block_group->sectorsize;
2030         ctl->start = block_group->key.objectid;
2031         ctl->private = block_group;
2032         ctl->op = &free_space_op;
2033
2034         /*
2035          * we only want to have 32k of ram per block group for keeping
2036          * track of free space, and if we pass 1/2 of that we want to
2037          * start converting things over to using bitmaps
2038          */
2039         ctl->extents_thresh = ((1024 * 32) / 2) /
2040                                 sizeof(struct btrfs_free_space);
2041 }
2042
2043 /*
2044  * for a given cluster, put all of its extents back into the free
2045  * space cache.  If the block group passed doesn't match the block group
2046  * pointed to by the cluster, someone else raced in and freed the
2047  * cluster already.  In that case, we just return without changing anything
2048  */
2049 static int
2050 __btrfs_return_cluster_to_free_space(
2051                              struct btrfs_block_group_cache *block_group,
2052                              struct btrfs_free_cluster *cluster)
2053 {
2054         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2055         struct btrfs_free_space *entry;
2056         struct rb_node *node;
2057
2058         spin_lock(&cluster->lock);
2059         if (cluster->block_group != block_group)
2060                 goto out;
2061
2062         cluster->block_group = NULL;
2063         cluster->window_start = 0;
2064         list_del_init(&cluster->block_group_list);
2065
2066         node = rb_first(&cluster->root);
2067         while (node) {
2068                 bool bitmap;
2069
2070                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2071                 node = rb_next(&entry->offset_index);
2072                 rb_erase(&entry->offset_index, &cluster->root);
2073
2074                 bitmap = (entry->bitmap != NULL);
2075                 if (!bitmap)
2076                         try_merge_free_space(ctl, entry, false);
2077                 tree_insert_offset(&ctl->free_space_offset,
2078                                    entry->offset, &entry->offset_index, bitmap);
2079         }
2080         cluster->root = RB_ROOT;
2081
2082 out:
2083         spin_unlock(&cluster->lock);
2084         btrfs_put_block_group(block_group);
2085         return 0;
2086 }
2087
2088 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
2089 {
2090         struct btrfs_free_space *info;
2091         struct rb_node *node;
2092
2093         while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2094                 info = rb_entry(node, struct btrfs_free_space, offset_index);
2095                 if (!info->bitmap) {
2096                         unlink_free_space(ctl, info);
2097                         kmem_cache_free(btrfs_free_space_cachep, info);
2098                 } else {
2099                         free_bitmap(ctl, info);
2100                 }
2101                 if (need_resched()) {
2102                         spin_unlock(&ctl->tree_lock);
2103                         cond_resched();
2104                         spin_lock(&ctl->tree_lock);
2105                 }
2106         }
2107 }
2108
2109 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2110 {
2111         spin_lock(&ctl->tree_lock);
2112         __btrfs_remove_free_space_cache_locked(ctl);
2113         spin_unlock(&ctl->tree_lock);
2114 }
2115
2116 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2117 {
2118         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2119         struct btrfs_free_cluster *cluster;
2120         struct list_head *head;
2121
2122         spin_lock(&ctl->tree_lock);
2123         while ((head = block_group->cluster_list.next) !=
2124                &block_group->cluster_list) {
2125                 cluster = list_entry(head, struct btrfs_free_cluster,
2126                                      block_group_list);
2127
2128                 WARN_ON(cluster->block_group != block_group);
2129                 __btrfs_return_cluster_to_free_space(block_group, cluster);
2130                 if (need_resched()) {
2131                         spin_unlock(&ctl->tree_lock);
2132                         cond_resched();
2133                         spin_lock(&ctl->tree_lock);
2134                 }
2135         }
2136         __btrfs_remove_free_space_cache_locked(ctl);
2137         spin_unlock(&ctl->tree_lock);
2138
2139 }
2140
2141 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2142                                u64 offset, u64 bytes, u64 empty_size)
2143 {
2144         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2145         struct btrfs_free_space *entry = NULL;
2146         u64 bytes_search = bytes + empty_size;
2147         u64 ret = 0;
2148
2149         spin_lock(&ctl->tree_lock);
2150         entry = find_free_space(ctl, &offset, &bytes_search);
2151         if (!entry)
2152                 goto out;
2153
2154         ret = offset;
2155         if (entry->bitmap) {
2156                 bitmap_clear_bits(ctl, entry, offset, bytes);
2157                 if (!entry->bytes)
2158                         free_bitmap(ctl, entry);
2159         } else {
2160                 unlink_free_space(ctl, entry);
2161                 entry->offset += bytes;
2162                 entry->bytes -= bytes;
2163                 if (!entry->bytes)
2164                         kmem_cache_free(btrfs_free_space_cachep, entry);
2165                 else
2166                         link_free_space(ctl, entry);
2167         }
2168
2169 out:
2170         spin_unlock(&ctl->tree_lock);
2171
2172         return ret;
2173 }
2174
2175 /*
2176  * given a cluster, put all of its extents back into the free space
2177  * cache.  If a block group is passed, this function will only free
2178  * a cluster that belongs to the passed block group.
2179  *
2180  * Otherwise, it'll get a reference on the block group pointed to by the
2181  * cluster and remove the cluster from it.
2182  */
2183 int btrfs_return_cluster_to_free_space(
2184                                struct btrfs_block_group_cache *block_group,
2185                                struct btrfs_free_cluster *cluster)
2186 {
2187         struct btrfs_free_space_ctl *ctl;
2188         int ret;
2189
2190         /* first, get a safe pointer to the block group */
2191         spin_lock(&cluster->lock);
2192         if (!block_group) {
2193                 block_group = cluster->block_group;
2194                 if (!block_group) {
2195                         spin_unlock(&cluster->lock);
2196                         return 0;
2197                 }
2198         } else if (cluster->block_group != block_group) {
2199                 /* someone else has already freed it don't redo their work */
2200                 spin_unlock(&cluster->lock);
2201                 return 0;
2202         }
2203         atomic_inc(&block_group->count);
2204         spin_unlock(&cluster->lock);
2205
2206         ctl = block_group->free_space_ctl;
2207
2208         /* now return any extents the cluster had on it */
2209         spin_lock(&ctl->tree_lock);
2210         ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2211         spin_unlock(&ctl->tree_lock);
2212
2213         /* finally drop our ref */
2214         btrfs_put_block_group(block_group);
2215         return ret;
2216 }
2217
2218 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2219                                    struct btrfs_free_cluster *cluster,
2220                                    struct btrfs_free_space *entry,
2221                                    u64 bytes, u64 min_start)
2222 {
2223         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2224         int err;
2225         u64 search_start = cluster->window_start;
2226         u64 search_bytes = bytes;
2227         u64 ret = 0;
2228
2229         search_start = min_start;
2230         search_bytes = bytes;
2231
2232         err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2233         if (err)
2234                 return 0;
2235
2236         ret = search_start;
2237         __bitmap_clear_bits(ctl, entry, ret, bytes);
2238
2239         return ret;
2240 }
2241
2242 /*
2243  * given a cluster, try to allocate 'bytes' from it, returns 0
2244  * if it couldn't find anything suitably large, or a logical disk offset
2245  * if things worked out
2246  */
2247 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2248                              struct btrfs_free_cluster *cluster, u64 bytes,
2249                              u64 min_start)
2250 {
2251         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2252         struct btrfs_free_space *entry = NULL;
2253         struct rb_node *node;
2254         u64 ret = 0;
2255
2256         spin_lock(&cluster->lock);
2257         if (bytes > cluster->max_size)
2258                 goto out;
2259
2260         if (cluster->block_group != block_group)
2261                 goto out;
2262
2263         node = rb_first(&cluster->root);
2264         if (!node)
2265                 goto out;
2266
2267         entry = rb_entry(node, struct btrfs_free_space, offset_index);
2268         while(1) {
2269                 if (entry->bytes < bytes ||
2270                     (!entry->bitmap && entry->offset < min_start)) {
2271                         node = rb_next(&entry->offset_index);
2272                         if (!node)
2273                                 break;
2274                         entry = rb_entry(node, struct btrfs_free_space,
2275                                          offset_index);
2276                         continue;
2277                 }
2278
2279                 if (entry->bitmap) {
2280                         ret = btrfs_alloc_from_bitmap(block_group,
2281                                                       cluster, entry, bytes,
2282                                                       cluster->window_start);
2283                         if (ret == 0) {
2284                                 node = rb_next(&entry->offset_index);
2285                                 if (!node)
2286                                         break;
2287                                 entry = rb_entry(node, struct btrfs_free_space,
2288                                                  offset_index);
2289                                 continue;
2290                         }
2291                         cluster->window_start += bytes;
2292                 } else {
2293                         ret = entry->offset;
2294
2295                         entry->offset += bytes;
2296                         entry->bytes -= bytes;
2297                 }
2298
2299                 if (entry->bytes == 0)
2300                         rb_erase(&entry->offset_index, &cluster->root);
2301                 break;
2302         }
2303 out:
2304         spin_unlock(&cluster->lock);
2305
2306         if (!ret)
2307                 return 0;
2308
2309         spin_lock(&ctl->tree_lock);
2310
2311         ctl->free_space -= bytes;
2312         if (entry->bytes == 0) {
2313                 ctl->free_extents--;
2314                 if (entry->bitmap) {
2315                         kfree(entry->bitmap);
2316                         ctl->total_bitmaps--;
2317                         ctl->op->recalc_thresholds(ctl);
2318                 }
2319                 kmem_cache_free(btrfs_free_space_cachep, entry);
2320         }
2321
2322         spin_unlock(&ctl->tree_lock);
2323
2324         return ret;
2325 }
2326
2327 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2328                                 struct btrfs_free_space *entry,
2329                                 struct btrfs_free_cluster *cluster,
2330                                 u64 offset, u64 bytes,
2331                                 u64 cont1_bytes, u64 min_bytes)
2332 {
2333         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2334         unsigned long next_zero;
2335         unsigned long i;
2336         unsigned long want_bits;
2337         unsigned long min_bits;
2338         unsigned long found_bits;
2339         unsigned long start = 0;
2340         unsigned long total_found = 0;
2341         int ret;
2342
2343         i = offset_to_bit(entry->offset, block_group->sectorsize,
2344                           max_t(u64, offset, entry->offset));
2345         want_bits = bytes_to_bits(bytes, block_group->sectorsize);
2346         min_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2347
2348 again:
2349         found_bits = 0;
2350         for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2351              i < BITS_PER_BITMAP;
2352              i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2353                 next_zero = find_next_zero_bit(entry->bitmap,
2354                                                BITS_PER_BITMAP, i);
2355                 if (next_zero - i >= min_bits) {
2356                         found_bits = next_zero - i;
2357                         break;
2358                 }
2359                 i = next_zero;
2360         }
2361
2362         if (!found_bits)
2363                 return -ENOSPC;
2364
2365         if (!total_found) {
2366                 start = i;
2367                 cluster->max_size = 0;
2368         }
2369
2370         total_found += found_bits;
2371
2372         if (cluster->max_size < found_bits * block_group->sectorsize)
2373                 cluster->max_size = found_bits * block_group->sectorsize;
2374
2375         if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2376                 i = next_zero + 1;
2377                 goto again;
2378         }
2379
2380         cluster->window_start = start * block_group->sectorsize +
2381                 entry->offset;
2382         rb_erase(&entry->offset_index, &ctl->free_space_offset);
2383         ret = tree_insert_offset(&cluster->root, entry->offset,
2384                                  &entry->offset_index, 1);
2385         BUG_ON(ret); /* -EEXIST; Logic error */
2386
2387         trace_btrfs_setup_cluster(block_group, cluster,
2388                                   total_found * block_group->sectorsize, 1);
2389         return 0;
2390 }
2391
2392 /*
2393  * This searches the block group for just extents to fill the cluster with.
2394  * Try to find a cluster with at least bytes total bytes, at least one
2395  * extent of cont1_bytes, and other clusters of at least min_bytes.
2396  */
2397 static noinline int
2398 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2399                         struct btrfs_free_cluster *cluster,
2400                         struct list_head *bitmaps, u64 offset, u64 bytes,
2401                         u64 cont1_bytes, u64 min_bytes)
2402 {
2403         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2404         struct btrfs_free_space *first = NULL;
2405         struct btrfs_free_space *entry = NULL;
2406         struct btrfs_free_space *last;
2407         struct rb_node *node;
2408         u64 window_start;
2409         u64 window_free;
2410         u64 max_extent;
2411         u64 total_size = 0;
2412
2413         entry = tree_search_offset(ctl, offset, 0, 1);
2414         if (!entry)
2415                 return -ENOSPC;
2416
2417         /*
2418          * We don't want bitmaps, so just move along until we find a normal
2419          * extent entry.
2420          */
2421         while (entry->bitmap || entry->bytes < min_bytes) {
2422                 if (entry->bitmap && list_empty(&entry->list))
2423                         list_add_tail(&entry->list, bitmaps);
2424                 node = rb_next(&entry->offset_index);
2425                 if (!node)
2426                         return -ENOSPC;
2427                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2428         }
2429
2430         window_start = entry->offset;
2431         window_free = entry->bytes;
2432         max_extent = entry->bytes;
2433         first = entry;
2434         last = entry;
2435
2436         for (node = rb_next(&entry->offset_index); node;
2437              node = rb_next(&entry->offset_index)) {
2438                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2439
2440                 if (entry->bitmap) {
2441                         if (list_empty(&entry->list))
2442                                 list_add_tail(&entry->list, bitmaps);
2443                         continue;
2444                 }
2445
2446                 if (entry->bytes < min_bytes)
2447                         continue;
2448
2449                 last = entry;
2450                 window_free += entry->bytes;
2451                 if (entry->bytes > max_extent)
2452                         max_extent = entry->bytes;
2453         }
2454
2455         if (window_free < bytes || max_extent < cont1_bytes)
2456                 return -ENOSPC;
2457
2458         cluster->window_start = first->offset;
2459
2460         node = &first->offset_index;
2461
2462         /*
2463          * now we've found our entries, pull them out of the free space
2464          * cache and put them into the cluster rbtree
2465          */
2466         do {
2467                 int ret;
2468
2469                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2470                 node = rb_next(&entry->offset_index);
2471                 if (entry->bitmap || entry->bytes < min_bytes)
2472                         continue;
2473
2474                 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2475                 ret = tree_insert_offset(&cluster->root, entry->offset,
2476                                          &entry->offset_index, 0);
2477                 total_size += entry->bytes;
2478                 BUG_ON(ret); /* -EEXIST; Logic error */
2479         } while (node && entry != last);
2480
2481         cluster->max_size = max_extent;
2482         trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2483         return 0;
2484 }
2485
2486 /*
2487  * This specifically looks for bitmaps that may work in the cluster, we assume
2488  * that we have already failed to find extents that will work.
2489  */
2490 static noinline int
2491 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2492                      struct btrfs_free_cluster *cluster,
2493                      struct list_head *bitmaps, u64 offset, u64 bytes,
2494                      u64 cont1_bytes, u64 min_bytes)
2495 {
2496         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2497         struct btrfs_free_space *entry;
2498         int ret = -ENOSPC;
2499         u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2500
2501         if (ctl->total_bitmaps == 0)
2502                 return -ENOSPC;
2503
2504         /*
2505          * The bitmap that covers offset won't be in the list unless offset
2506          * is just its start offset.
2507          */
2508         entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2509         if (entry->offset != bitmap_offset) {
2510                 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2511                 if (entry && list_empty(&entry->list))
2512                         list_add(&entry->list, bitmaps);
2513         }
2514
2515         list_for_each_entry(entry, bitmaps, list) {
2516                 if (entry->bytes < bytes)
2517                         continue;
2518                 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2519                                            bytes, cont1_bytes, min_bytes);
2520                 if (!ret)
2521                         return 0;
2522         }
2523
2524         /*
2525          * The bitmaps list has all the bitmaps that record free space
2526          * starting after offset, so no more search is required.
2527          */
2528         return -ENOSPC;
2529 }
2530
2531 /*
2532  * here we try to find a cluster of blocks in a block group.  The goal
2533  * is to find at least bytes+empty_size.
2534  * We might not find them all in one contiguous area.
2535  *
2536  * returns zero and sets up cluster if things worked out, otherwise
2537  * it returns -enospc
2538  */
2539 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2540                              struct btrfs_root *root,
2541                              struct btrfs_block_group_cache *block_group,
2542                              struct btrfs_free_cluster *cluster,
2543                              u64 offset, u64 bytes, u64 empty_size)
2544 {
2545         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2546         struct btrfs_free_space *entry, *tmp;
2547         LIST_HEAD(bitmaps);
2548         u64 min_bytes;
2549         u64 cont1_bytes;
2550         int ret;
2551
2552         /*
2553          * Choose the minimum extent size we'll require for this
2554          * cluster.  For SSD_SPREAD, don't allow any fragmentation.
2555          * For metadata, allow allocates with smaller extents.  For
2556          * data, keep it dense.
2557          */
2558         if (btrfs_test_opt(root, SSD_SPREAD)) {
2559                 cont1_bytes = min_bytes = bytes + empty_size;
2560         } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2561                 cont1_bytes = bytes;
2562                 min_bytes = block_group->sectorsize;
2563         } else {
2564                 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
2565                 min_bytes = block_group->sectorsize;
2566         }
2567
2568         spin_lock(&ctl->tree_lock);
2569
2570         /*
2571          * If we know we don't have enough space to make a cluster don't even
2572          * bother doing all the work to try and find one.
2573          */
2574         if (ctl->free_space < bytes) {
2575                 spin_unlock(&ctl->tree_lock);
2576                 return -ENOSPC;
2577         }
2578
2579         spin_lock(&cluster->lock);
2580
2581         /* someone already found a cluster, hooray */
2582         if (cluster->block_group) {
2583                 ret = 0;
2584                 goto out;
2585         }
2586
2587         trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2588                                  min_bytes);
2589
2590         INIT_LIST_HEAD(&bitmaps);
2591         ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2592                                       bytes + empty_size,
2593                                       cont1_bytes, min_bytes);
2594         if (ret)
2595                 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2596                                            offset, bytes + empty_size,
2597                                            cont1_bytes, min_bytes);
2598
2599         /* Clear our temporary list */
2600         list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2601                 list_del_init(&entry->list);
2602
2603         if (!ret) {
2604                 atomic_inc(&block_group->count);
2605                 list_add_tail(&cluster->block_group_list,
2606                               &block_group->cluster_list);
2607                 cluster->block_group = block_group;
2608         } else {
2609                 trace_btrfs_failed_cluster_setup(block_group);
2610         }
2611 out:
2612         spin_unlock(&cluster->lock);
2613         spin_unlock(&ctl->tree_lock);
2614
2615         return ret;
2616 }
2617
2618 /*
2619  * simple code to zero out a cluster
2620  */
2621 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2622 {
2623         spin_lock_init(&cluster->lock);
2624         spin_lock_init(&cluster->refill_lock);
2625         cluster->root = RB_ROOT;
2626         cluster->max_size = 0;
2627         INIT_LIST_HEAD(&cluster->block_group_list);
2628         cluster->block_group = NULL;
2629 }
2630
2631 static int do_trimming(struct btrfs_block_group_cache *block_group,
2632                        u64 *total_trimmed, u64 start, u64 bytes,
2633                        u64 reserved_start, u64 reserved_bytes)
2634 {
2635         struct btrfs_space_info *space_info = block_group->space_info;
2636         struct btrfs_fs_info *fs_info = block_group->fs_info;
2637         int ret;
2638         int update = 0;
2639         u64 trimmed = 0;
2640
2641         spin_lock(&space_info->lock);
2642         spin_lock(&block_group->lock);
2643         if (!block_group->ro) {
2644                 block_group->reserved += reserved_bytes;
2645                 space_info->bytes_reserved += reserved_bytes;
2646                 update = 1;
2647         }
2648         spin_unlock(&block_group->lock);
2649         spin_unlock(&space_info->lock);
2650
2651         ret = btrfs_error_discard_extent(fs_info->extent_root,
2652                                          start, bytes, &trimmed);
2653         if (!ret)
2654                 *total_trimmed += trimmed;
2655
2656         btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2657
2658         if (update) {
2659                 spin_lock(&space_info->lock);
2660                 spin_lock(&block_group->lock);
2661                 if (block_group->ro)
2662                         space_info->bytes_readonly += reserved_bytes;
2663                 block_group->reserved -= reserved_bytes;
2664                 space_info->bytes_reserved -= reserved_bytes;
2665                 spin_unlock(&space_info->lock);
2666                 spin_unlock(&block_group->lock);
2667         }
2668
2669         return ret;
2670 }
2671
2672 static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2673                           u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2674 {
2675         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2676         struct btrfs_free_space *entry;
2677         struct rb_node *node;
2678         int ret = 0;
2679         u64 extent_start;
2680         u64 extent_bytes;
2681         u64 bytes;
2682
2683         while (start < end) {
2684                 spin_lock(&ctl->tree_lock);
2685
2686                 if (ctl->free_space < minlen) {
2687                         spin_unlock(&ctl->tree_lock);
2688                         break;
2689                 }
2690
2691                 entry = tree_search_offset(ctl, start, 0, 1);
2692                 if (!entry) {
2693                         spin_unlock(&ctl->tree_lock);
2694                         break;
2695                 }
2696
2697                 /* skip bitmaps */
2698                 while (entry->bitmap) {
2699                         node = rb_next(&entry->offset_index);
2700                         if (!node) {
2701                                 spin_unlock(&ctl->tree_lock);
2702                                 goto out;
2703                         }
2704                         entry = rb_entry(node, struct btrfs_free_space,
2705                                          offset_index);
2706                 }
2707
2708                 if (entry->offset >= end) {
2709                         spin_unlock(&ctl->tree_lock);
2710                         break;
2711                 }
2712
2713                 extent_start = entry->offset;
2714                 extent_bytes = entry->bytes;
2715                 start = max(start, extent_start);
2716                 bytes = min(extent_start + extent_bytes, end) - start;
2717                 if (bytes < minlen) {
2718                         spin_unlock(&ctl->tree_lock);
2719                         goto next;
2720                 }
2721
2722                 unlink_free_space(ctl, entry);
2723                 kmem_cache_free(btrfs_free_space_cachep, entry);
2724
2725                 spin_unlock(&ctl->tree_lock);
2726
2727                 ret = do_trimming(block_group, total_trimmed, start, bytes,
2728                                   extent_start, extent_bytes);
2729                 if (ret)
2730                         break;
2731 next:
2732                 start += bytes;
2733
2734                 if (fatal_signal_pending(current)) {
2735                         ret = -ERESTARTSYS;
2736                         break;
2737                 }
2738
2739                 cond_resched();
2740         }
2741 out:
2742         return ret;
2743 }
2744
2745 static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
2746                         u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2747 {
2748         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2749         struct btrfs_free_space *entry;
2750         int ret = 0;
2751         int ret2;
2752         u64 bytes;
2753         u64 offset = offset_to_bitmap(ctl, start);
2754
2755         while (offset < end) {
2756                 bool next_bitmap = false;
2757
2758                 spin_lock(&ctl->tree_lock);
2759
2760                 if (ctl->free_space < minlen) {
2761                         spin_unlock(&ctl->tree_lock);
2762                         break;
2763                 }
2764
2765                 entry = tree_search_offset(ctl, offset, 1, 0);
2766                 if (!entry) {
2767                         spin_unlock(&ctl->tree_lock);
2768                         next_bitmap = true;
2769                         goto next;
2770                 }
2771
2772                 bytes = minlen;
2773                 ret2 = search_bitmap(ctl, entry, &start, &bytes);
2774                 if (ret2 || start >= end) {
2775                         spin_unlock(&ctl->tree_lock);
2776                         next_bitmap = true;
2777                         goto next;
2778                 }
2779
2780                 bytes = min(bytes, end - start);
2781                 if (bytes < minlen) {
2782                         spin_unlock(&ctl->tree_lock);
2783                         goto next;
2784                 }
2785
2786                 bitmap_clear_bits(ctl, entry, start, bytes);
2787                 if (entry->bytes == 0)
2788                         free_bitmap(ctl, entry);
2789
2790                 spin_unlock(&ctl->tree_lock);
2791
2792                 ret = do_trimming(block_group, total_trimmed, start, bytes,
2793                                   start, bytes);
2794                 if (ret)
2795                         break;
2796 next:
2797                 if (next_bitmap) {
2798                         offset += BITS_PER_BITMAP * ctl->unit;
2799                 } else {
2800                         start += bytes;
2801                         if (start >= offset + BITS_PER_BITMAP * ctl->unit)
2802                                 offset += BITS_PER_BITMAP * ctl->unit;
2803                 }
2804
2805                 if (fatal_signal_pending(current)) {
2806                         ret = -ERESTARTSYS;
2807                         break;
2808                 }
2809
2810                 cond_resched();
2811         }
2812
2813         return ret;
2814 }
2815
2816 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2817                            u64 *trimmed, u64 start, u64 end, u64 minlen)
2818 {
2819         int ret;
2820
2821         *trimmed = 0;
2822
2823         ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2824         if (ret)
2825                 return ret;
2826
2827         ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
2828
2829         return ret;
2830 }
2831
2832 /*
2833  * Find the left-most item in the cache tree, and then return the
2834  * smallest inode number in the item.
2835  *
2836  * Note: the returned inode number may not be the smallest one in
2837  * the tree, if the left-most item is a bitmap.
2838  */
2839 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2840 {
2841         struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2842         struct btrfs_free_space *entry = NULL;
2843         u64 ino = 0;
2844
2845         spin_lock(&ctl->tree_lock);
2846
2847         if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2848                 goto out;
2849
2850         entry = rb_entry(rb_first(&ctl->free_space_offset),
2851                          struct btrfs_free_space, offset_index);
2852
2853         if (!entry->bitmap) {
2854                 ino = entry->offset;
2855
2856                 unlink_free_space(ctl, entry);
2857                 entry->offset++;
2858                 entry->bytes--;
2859                 if (!entry->bytes)
2860                         kmem_cache_free(btrfs_free_space_cachep, entry);
2861                 else
2862                         link_free_space(ctl, entry);
2863         } else {
2864                 u64 offset = 0;
2865                 u64 count = 1;
2866                 int ret;
2867
2868                 ret = search_bitmap(ctl, entry, &offset, &count);
2869                 /* Logic error; Should be empty if it can't find anything */
2870                 BUG_ON(ret);
2871
2872                 ino = offset;
2873                 bitmap_clear_bits(ctl, entry, offset, 1);
2874                 if (entry->bytes == 0)
2875                         free_bitmap(ctl, entry);
2876         }
2877 out:
2878         spin_unlock(&ctl->tree_lock);
2879
2880         return ino;
2881 }
2882
2883 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2884                                     struct btrfs_path *path)
2885 {
2886         struct inode *inode = NULL;
2887
2888         spin_lock(&root->cache_lock);
2889         if (root->cache_inode)
2890                 inode = igrab(root->cache_inode);
2891         spin_unlock(&root->cache_lock);
2892         if (inode)
2893                 return inode;
2894
2895         inode = __lookup_free_space_inode(root, path, 0);
2896         if (IS_ERR(inode))
2897                 return inode;
2898
2899         spin_lock(&root->cache_lock);
2900         if (!btrfs_fs_closing(root->fs_info))
2901                 root->cache_inode = igrab(inode);
2902         spin_unlock(&root->cache_lock);
2903
2904         return inode;
2905 }
2906
2907 int create_free_ino_inode(struct btrfs_root *root,
2908                           struct btrfs_trans_handle *trans,
2909                           struct btrfs_path *path)
2910 {
2911         return __create_free_space_inode(root, trans, path,
2912                                          BTRFS_FREE_INO_OBJECTID, 0);
2913 }
2914
2915 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2916 {
2917         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2918         struct btrfs_path *path;
2919         struct inode *inode;
2920         int ret = 0;
2921         u64 root_gen = btrfs_root_generation(&root->root_item);
2922
2923         if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2924                 return 0;
2925
2926         /*
2927          * If we're unmounting then just return, since this does a search on the
2928          * normal root and not the commit root and we could deadlock.
2929          */
2930         if (btrfs_fs_closing(fs_info))
2931                 return 0;
2932
2933         path = btrfs_alloc_path();
2934         if (!path)
2935                 return 0;
2936
2937         inode = lookup_free_ino_inode(root, path);
2938         if (IS_ERR(inode))
2939                 goto out;
2940
2941         if (root_gen != BTRFS_I(inode)->generation)
2942                 goto out_put;
2943
2944         ret = __load_free_space_cache(root, inode, ctl, path, 0);
2945
2946         if (ret < 0)
2947                 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2948                        "root %llu\n", root->root_key.objectid);
2949 out_put:
2950         iput(inode);
2951 out:
2952         btrfs_free_path(path);
2953         return ret;
2954 }
2955
2956 int btrfs_write_out_ino_cache(struct btrfs_root *root,
2957                               struct btrfs_trans_handle *trans,
2958                               struct btrfs_path *path)
2959 {
2960         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2961         struct inode *inode;
2962         int ret;
2963
2964         if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2965                 return 0;
2966
2967         inode = lookup_free_ino_inode(root, path);
2968         if (IS_ERR(inode))
2969                 return 0;
2970
2971         ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2972         if (ret) {
2973                 btrfs_delalloc_release_metadata(inode, inode->i_size);
2974 #ifdef DEBUG
2975                 printk(KERN_ERR "btrfs: failed to write free ino cache "
2976                        "for root %llu\n", root->root_key.objectid);
2977 #endif
2978         }
2979
2980         iput(inode);
2981         return ret;
2982 }