fc9563d4269354a58305d376fc4a8276a220103b
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 /*
31  * delayed back reference update tracking.  For subvolume trees
32  * we queue up extent allocations and backref maintenance for
33  * delayed processing.   This avoids deep call chains where we
34  * add extents in the middle of btrfs_search_slot, and it allows
35  * us to buffer up frequently modified backrefs in an rb tree instead
36  * of hammering updates on the extent allocation tree.
37  */
38
39 /*
40  * compare two delayed tree backrefs with same bytenr and type
41  */
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43                           struct btrfs_delayed_tree_ref *ref1, int type)
44 {
45         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46                 if (ref1->root < ref2->root)
47                         return -1;
48                 if (ref1->root > ref2->root)
49                         return 1;
50         } else {
51                 if (ref1->parent < ref2->parent)
52                         return -1;
53                 if (ref1->parent > ref2->parent)
54                         return 1;
55         }
56         return 0;
57 }
58
59 /*
60  * compare two delayed data backrefs with same bytenr and type
61  */
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63                           struct btrfs_delayed_data_ref *ref1)
64 {
65         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66                 if (ref1->root < ref2->root)
67                         return -1;
68                 if (ref1->root > ref2->root)
69                         return 1;
70                 if (ref1->objectid < ref2->objectid)
71                         return -1;
72                 if (ref1->objectid > ref2->objectid)
73                         return 1;
74                 if (ref1->offset < ref2->offset)
75                         return -1;
76                 if (ref1->offset > ref2->offset)
77                         return 1;
78         } else {
79                 if (ref1->parent < ref2->parent)
80                         return -1;
81                 if (ref1->parent > ref2->parent)
82                         return 1;
83         }
84         return 0;
85 }
86
87 /* insert a new ref to head ref rbtree */
88 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
89                                                    struct rb_node *node)
90 {
91         struct rb_node **p = &root->rb_node;
92         struct rb_node *parent_node = NULL;
93         struct btrfs_delayed_ref_head *entry;
94         struct btrfs_delayed_ref_head *ins;
95         u64 bytenr;
96
97         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
98         bytenr = ins->node.bytenr;
99         while (*p) {
100                 parent_node = *p;
101                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
102                                  href_node);
103
104                 if (bytenr < entry->node.bytenr)
105                         p = &(*p)->rb_left;
106                 else if (bytenr > entry->node.bytenr)
107                         p = &(*p)->rb_right;
108                 else
109                         return entry;
110         }
111
112         rb_link_node(node, parent_node, p);
113         rb_insert_color(node, root);
114         return NULL;
115 }
116
117 /*
118  * find an head entry based on bytenr. This returns the delayed ref
119  * head if it was able to find one, or NULL if nothing was in that spot.
120  * If return_bigger is given, the next bigger entry is returned if no exact
121  * match is found.
122  */
123 static struct btrfs_delayed_ref_head *
124 find_ref_head(struct rb_root *root, u64 bytenr,
125               int return_bigger)
126 {
127         struct rb_node *n;
128         struct btrfs_delayed_ref_head *entry;
129
130         n = root->rb_node;
131         entry = NULL;
132         while (n) {
133                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
134
135                 if (bytenr < entry->node.bytenr)
136                         n = n->rb_left;
137                 else if (bytenr > entry->node.bytenr)
138                         n = n->rb_right;
139                 else
140                         return entry;
141         }
142         if (entry && return_bigger) {
143                 if (bytenr > entry->node.bytenr) {
144                         n = rb_next(&entry->href_node);
145                         if (!n)
146                                 n = rb_first(root);
147                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
148                                          href_node);
149                         return entry;
150                 }
151                 return entry;
152         }
153         return NULL;
154 }
155
156 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
157                            struct btrfs_delayed_ref_head *head)
158 {
159         struct btrfs_delayed_ref_root *delayed_refs;
160
161         delayed_refs = &trans->transaction->delayed_refs;
162         assert_spin_locked(&delayed_refs->lock);
163         if (mutex_trylock(&head->mutex))
164                 return 0;
165
166         atomic_inc(&head->node.refs);
167         spin_unlock(&delayed_refs->lock);
168
169         mutex_lock(&head->mutex);
170         spin_lock(&delayed_refs->lock);
171         if (!head->node.in_tree) {
172                 mutex_unlock(&head->mutex);
173                 btrfs_put_delayed_ref(&head->node);
174                 return -EAGAIN;
175         }
176         btrfs_put_delayed_ref(&head->node);
177         return 0;
178 }
179
180 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
181                                     struct btrfs_delayed_ref_root *delayed_refs,
182                                     struct btrfs_delayed_ref_head *head,
183                                     struct btrfs_delayed_ref_node *ref)
184 {
185         if (btrfs_delayed_ref_is_head(ref)) {
186                 head = btrfs_delayed_node_to_head(ref);
187                 rb_erase(&head->href_node, &delayed_refs->href_root);
188         } else {
189                 assert_spin_locked(&head->lock);
190                 list_del(&ref->list);
191         }
192         ref->in_tree = 0;
193         btrfs_put_delayed_ref(ref);
194         atomic_dec(&delayed_refs->num_entries);
195         if (trans->delayed_ref_updates)
196                 trans->delayed_ref_updates--;
197 }
198
199 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
200                             struct btrfs_delayed_ref_root *delayed_refs,
201                             u64 seq)
202 {
203         struct seq_list *elem;
204         int ret = 0;
205
206         spin_lock(&fs_info->tree_mod_seq_lock);
207         if (!list_empty(&fs_info->tree_mod_seq_list)) {
208                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
209                                         struct seq_list, list);
210                 if (seq >= elem->seq) {
211                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
212                                  (u32)(seq >> 32), (u32)seq,
213                                  (u32)(elem->seq >> 32), (u32)elem->seq,
214                                  delayed_refs);
215                         ret = 1;
216                 }
217         }
218
219         spin_unlock(&fs_info->tree_mod_seq_lock);
220         return ret;
221 }
222
223 struct btrfs_delayed_ref_head *
224 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
225 {
226         struct btrfs_delayed_ref_root *delayed_refs;
227         struct btrfs_delayed_ref_head *head;
228         u64 start;
229         bool loop = false;
230
231         delayed_refs = &trans->transaction->delayed_refs;
232
233 again:
234         start = delayed_refs->run_delayed_start;
235         head = find_ref_head(&delayed_refs->href_root, start, 1);
236         if (!head && !loop) {
237                 delayed_refs->run_delayed_start = 0;
238                 start = 0;
239                 loop = true;
240                 head = find_ref_head(&delayed_refs->href_root, start, 1);
241                 if (!head)
242                         return NULL;
243         } else if (!head && loop) {
244                 return NULL;
245         }
246
247         while (head->processing) {
248                 struct rb_node *node;
249
250                 node = rb_next(&head->href_node);
251                 if (!node) {
252                         if (loop)
253                                 return NULL;
254                         delayed_refs->run_delayed_start = 0;
255                         start = 0;
256                         loop = true;
257                         goto again;
258                 }
259                 head = rb_entry(node, struct btrfs_delayed_ref_head,
260                                 href_node);
261         }
262
263         head->processing = 1;
264         WARN_ON(delayed_refs->num_heads_ready == 0);
265         delayed_refs->num_heads_ready--;
266         delayed_refs->run_delayed_start = head->node.bytenr +
267                 head->node.num_bytes;
268         return head;
269 }
270
271 /*
272  * Helper to insert the ref_node to the tail or merge with tail.
273  *
274  * Return 0 for insert.
275  * Return >0 for merge.
276  */
277 static int
278 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
279                            struct btrfs_delayed_ref_root *root,
280                            struct btrfs_delayed_ref_head *href,
281                            struct btrfs_delayed_ref_node *ref)
282 {
283         struct btrfs_delayed_ref_node *exist;
284         int mod;
285         int ret = 0;
286
287         spin_lock(&href->lock);
288         /* Check whether we can merge the tail node with ref */
289         if (list_empty(&href->ref_list))
290                 goto add_tail;
291         exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
292                            list);
293         /* No need to compare bytenr nor is_head */
294         if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
295             exist->seq != ref->seq)
296                 goto add_tail;
297
298         if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
299              exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
300             comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
301                            btrfs_delayed_node_to_tree_ref(ref),
302                            ref->type))
303                 goto add_tail;
304         if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
305              exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
306             comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
307                            btrfs_delayed_node_to_data_ref(ref)))
308                 goto add_tail;
309
310         /* Now we are sure we can merge */
311         ret = 1;
312         if (exist->action == ref->action) {
313                 mod = ref->ref_mod;
314         } else {
315                 /* Need to change action */
316                 if (exist->ref_mod < ref->ref_mod) {
317                         exist->action = ref->action;
318                         mod = -exist->ref_mod;
319                         exist->ref_mod = ref->ref_mod;
320                 } else
321                         mod = -ref->ref_mod;
322         }
323         exist->ref_mod += mod;
324
325         /* remove existing tail if its ref_mod is zero */
326         if (exist->ref_mod == 0)
327                 drop_delayed_ref(trans, root, href, exist);
328         spin_unlock(&href->lock);
329         return ret;
330
331 add_tail:
332         list_add_tail(&ref->list, &href->ref_list);
333         atomic_inc(&root->num_entries);
334         trans->delayed_ref_updates++;
335         spin_unlock(&href->lock);
336         return ret;
337 }
338
339 /*
340  * helper function to update the accounting in the head ref
341  * existing and update must have the same bytenr
342  */
343 static noinline void
344 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
345                          struct btrfs_delayed_ref_node *existing,
346                          struct btrfs_delayed_ref_node *update)
347 {
348         struct btrfs_delayed_ref_head *existing_ref;
349         struct btrfs_delayed_ref_head *ref;
350         int old_ref_mod;
351
352         existing_ref = btrfs_delayed_node_to_head(existing);
353         ref = btrfs_delayed_node_to_head(update);
354         BUG_ON(existing_ref->is_data != ref->is_data);
355
356         spin_lock(&existing_ref->lock);
357         if (ref->must_insert_reserved) {
358                 /* if the extent was freed and then
359                  * reallocated before the delayed ref
360                  * entries were processed, we can end up
361                  * with an existing head ref without
362                  * the must_insert_reserved flag set.
363                  * Set it again here
364                  */
365                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
366
367                 /*
368                  * update the num_bytes so we make sure the accounting
369                  * is done correctly
370                  */
371                 existing->num_bytes = update->num_bytes;
372
373         }
374
375         if (ref->extent_op) {
376                 if (!existing_ref->extent_op) {
377                         existing_ref->extent_op = ref->extent_op;
378                 } else {
379                         if (ref->extent_op->update_key) {
380                                 memcpy(&existing_ref->extent_op->key,
381                                        &ref->extent_op->key,
382                                        sizeof(ref->extent_op->key));
383                                 existing_ref->extent_op->update_key = 1;
384                         }
385                         if (ref->extent_op->update_flags) {
386                                 existing_ref->extent_op->flags_to_set |=
387                                         ref->extent_op->flags_to_set;
388                                 existing_ref->extent_op->update_flags = 1;
389                         }
390                         btrfs_free_delayed_extent_op(ref->extent_op);
391                 }
392         }
393         /*
394          * update the reference mod on the head to reflect this new operation,
395          * only need the lock for this case cause we could be processing it
396          * currently, for refs we just added we know we're a-ok.
397          */
398         old_ref_mod = existing_ref->total_ref_mod;
399         existing->ref_mod += update->ref_mod;
400         existing_ref->total_ref_mod += update->ref_mod;
401
402         /*
403          * If we are going to from a positive ref mod to a negative or vice
404          * versa we need to make sure to adjust pending_csums accordingly.
405          */
406         if (existing_ref->is_data) {
407                 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
408                         delayed_refs->pending_csums -= existing->num_bytes;
409                 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
410                         delayed_refs->pending_csums += existing->num_bytes;
411         }
412         spin_unlock(&existing_ref->lock);
413 }
414
415 /*
416  * helper function to actually insert a head node into the rbtree.
417  * this does all the dirty work in terms of maintaining the correct
418  * overall modification count.
419  */
420 static noinline struct btrfs_delayed_ref_head *
421 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
422                      struct btrfs_trans_handle *trans,
423                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
424                      u64 num_bytes, int action, int is_data)
425 {
426         struct btrfs_delayed_ref_head *existing;
427         struct btrfs_delayed_ref_head *head_ref = NULL;
428         struct btrfs_delayed_ref_root *delayed_refs;
429         int count_mod = 1;
430         int must_insert_reserved = 0;
431
432         /*
433          * the head node stores the sum of all the mods, so dropping a ref
434          * should drop the sum in the head node by one.
435          */
436         if (action == BTRFS_UPDATE_DELAYED_HEAD)
437                 count_mod = 0;
438         else if (action == BTRFS_DROP_DELAYED_REF)
439                 count_mod = -1;
440
441         /*
442          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
443          * the reserved accounting when the extent is finally added, or
444          * if a later modification deletes the delayed ref without ever
445          * inserting the extent into the extent allocation tree.
446          * ref->must_insert_reserved is the flag used to record
447          * that accounting mods are required.
448          *
449          * Once we record must_insert_reserved, switch the action to
450          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
451          */
452         if (action == BTRFS_ADD_DELAYED_EXTENT)
453                 must_insert_reserved = 1;
454         else
455                 must_insert_reserved = 0;
456
457         delayed_refs = &trans->transaction->delayed_refs;
458
459         /* first set the basic ref node struct up */
460         atomic_set(&ref->refs, 1);
461         ref->bytenr = bytenr;
462         ref->num_bytes = num_bytes;
463         ref->ref_mod = count_mod;
464         ref->type  = 0;
465         ref->action  = 0;
466         ref->is_head = 1;
467         ref->in_tree = 1;
468         ref->seq = 0;
469
470         head_ref = btrfs_delayed_node_to_head(ref);
471         head_ref->must_insert_reserved = must_insert_reserved;
472         head_ref->is_data = is_data;
473         INIT_LIST_HEAD(&head_ref->ref_list);
474         head_ref->processing = 0;
475         head_ref->total_ref_mod = count_mod;
476
477         spin_lock_init(&head_ref->lock);
478         mutex_init(&head_ref->mutex);
479
480         trace_add_delayed_ref_head(ref, head_ref, action);
481
482         existing = htree_insert(&delayed_refs->href_root,
483                                 &head_ref->href_node);
484         if (existing) {
485                 update_existing_head_ref(delayed_refs, &existing->node, ref);
486                 /*
487                  * we've updated the existing ref, free the newly
488                  * allocated ref
489                  */
490                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
491                 head_ref = existing;
492         } else {
493                 if (is_data && count_mod < 0)
494                         delayed_refs->pending_csums += num_bytes;
495                 delayed_refs->num_heads++;
496                 delayed_refs->num_heads_ready++;
497                 atomic_inc(&delayed_refs->num_entries);
498                 trans->delayed_ref_updates++;
499         }
500         return head_ref;
501 }
502
503 /*
504  * helper to insert a delayed tree ref into the rbtree.
505  */
506 static noinline void
507 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
508                      struct btrfs_trans_handle *trans,
509                      struct btrfs_delayed_ref_head *head_ref,
510                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
511                      u64 num_bytes, u64 parent, u64 ref_root, int level,
512                      int action, int no_quota)
513 {
514         struct btrfs_delayed_tree_ref *full_ref;
515         struct btrfs_delayed_ref_root *delayed_refs;
516         u64 seq = 0;
517         int ret;
518
519         if (action == BTRFS_ADD_DELAYED_EXTENT)
520                 action = BTRFS_ADD_DELAYED_REF;
521
522         if (is_fstree(ref_root))
523                 seq = atomic64_read(&fs_info->tree_mod_seq);
524         delayed_refs = &trans->transaction->delayed_refs;
525
526         /* first set the basic ref node struct up */
527         atomic_set(&ref->refs, 1);
528         ref->bytenr = bytenr;
529         ref->num_bytes = num_bytes;
530         ref->ref_mod = 1;
531         ref->action = action;
532         ref->is_head = 0;
533         ref->in_tree = 1;
534         ref->no_quota = no_quota;
535         ref->seq = seq;
536
537         full_ref = btrfs_delayed_node_to_tree_ref(ref);
538         full_ref->parent = parent;
539         full_ref->root = ref_root;
540         if (parent)
541                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
542         else
543                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
544         full_ref->level = level;
545
546         trace_add_delayed_tree_ref(ref, full_ref, action);
547
548         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
549
550         /*
551          * XXX: memory should be freed at the same level allocated.
552          * But bad practice is anywhere... Follow it now. Need cleanup.
553          */
554         if (ret > 0)
555                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
556 }
557
558 /*
559  * helper to insert a delayed data ref into the rbtree.
560  */
561 static noinline void
562 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
563                      struct btrfs_trans_handle *trans,
564                      struct btrfs_delayed_ref_head *head_ref,
565                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
566                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
567                      u64 offset, int action, int no_quota)
568 {
569         struct btrfs_delayed_data_ref *full_ref;
570         struct btrfs_delayed_ref_root *delayed_refs;
571         u64 seq = 0;
572         int ret;
573
574         if (action == BTRFS_ADD_DELAYED_EXTENT)
575                 action = BTRFS_ADD_DELAYED_REF;
576
577         delayed_refs = &trans->transaction->delayed_refs;
578
579         if (is_fstree(ref_root))
580                 seq = atomic64_read(&fs_info->tree_mod_seq);
581
582         /* first set the basic ref node struct up */
583         atomic_set(&ref->refs, 1);
584         ref->bytenr = bytenr;
585         ref->num_bytes = num_bytes;
586         ref->ref_mod = 1;
587         ref->action = action;
588         ref->is_head = 0;
589         ref->in_tree = 1;
590         ref->no_quota = no_quota;
591         ref->seq = seq;
592
593         full_ref = btrfs_delayed_node_to_data_ref(ref);
594         full_ref->parent = parent;
595         full_ref->root = ref_root;
596         if (parent)
597                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
598         else
599                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
600
601         full_ref->objectid = owner;
602         full_ref->offset = offset;
603
604         trace_add_delayed_data_ref(ref, full_ref, action);
605
606         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
607
608         if (ret > 0)
609                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
610 }
611
612 /*
613  * add a delayed tree ref.  This does all of the accounting required
614  * to make sure the delayed ref is eventually processed before this
615  * transaction commits.
616  */
617 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
618                                struct btrfs_trans_handle *trans,
619                                u64 bytenr, u64 num_bytes, u64 parent,
620                                u64 ref_root,  int level, int action,
621                                struct btrfs_delayed_extent_op *extent_op,
622                                int no_quota)
623 {
624         struct btrfs_delayed_tree_ref *ref;
625         struct btrfs_delayed_ref_head *head_ref;
626         struct btrfs_delayed_ref_root *delayed_refs;
627
628         if (!is_fstree(ref_root) || !fs_info->quota_enabled)
629                 no_quota = 0;
630
631         BUG_ON(extent_op && extent_op->is_data);
632         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
633         if (!ref)
634                 return -ENOMEM;
635
636         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
637         if (!head_ref) {
638                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
639                 return -ENOMEM;
640         }
641
642         head_ref->extent_op = extent_op;
643
644         delayed_refs = &trans->transaction->delayed_refs;
645         spin_lock(&delayed_refs->lock);
646
647         /*
648          * insert both the head node and the new ref without dropping
649          * the spin lock
650          */
651         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
652                                         bytenr, num_bytes, action, 0);
653
654         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
655                                    num_bytes, parent, ref_root, level, action,
656                                    no_quota);
657         spin_unlock(&delayed_refs->lock);
658
659         return 0;
660 }
661
662 /*
663  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
664  */
665 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
666                                struct btrfs_trans_handle *trans,
667                                u64 bytenr, u64 num_bytes,
668                                u64 parent, u64 ref_root,
669                                u64 owner, u64 offset, int action,
670                                struct btrfs_delayed_extent_op *extent_op,
671                                int no_quota)
672 {
673         struct btrfs_delayed_data_ref *ref;
674         struct btrfs_delayed_ref_head *head_ref;
675         struct btrfs_delayed_ref_root *delayed_refs;
676
677         if (!is_fstree(ref_root) || !fs_info->quota_enabled)
678                 no_quota = 0;
679
680         BUG_ON(extent_op && !extent_op->is_data);
681         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
682         if (!ref)
683                 return -ENOMEM;
684
685         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
686         if (!head_ref) {
687                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
688                 return -ENOMEM;
689         }
690
691         head_ref->extent_op = extent_op;
692
693         delayed_refs = &trans->transaction->delayed_refs;
694         spin_lock(&delayed_refs->lock);
695
696         /*
697          * insert both the head node and the new ref without dropping
698          * the spin lock
699          */
700         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
701                                         bytenr, num_bytes, action, 1);
702
703         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
704                                    num_bytes, parent, ref_root, owner, offset,
705                                    action, no_quota);
706         spin_unlock(&delayed_refs->lock);
707
708         return 0;
709 }
710
711 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
712                                 struct btrfs_trans_handle *trans,
713                                 u64 bytenr, u64 num_bytes,
714                                 struct btrfs_delayed_extent_op *extent_op)
715 {
716         struct btrfs_delayed_ref_head *head_ref;
717         struct btrfs_delayed_ref_root *delayed_refs;
718
719         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
720         if (!head_ref)
721                 return -ENOMEM;
722
723         head_ref->extent_op = extent_op;
724
725         delayed_refs = &trans->transaction->delayed_refs;
726         spin_lock(&delayed_refs->lock);
727
728         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
729                                    num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
730                                    extent_op->is_data);
731
732         spin_unlock(&delayed_refs->lock);
733         return 0;
734 }
735
736 /*
737  * this does a simple search for the head node for a given extent.
738  * It must be called with the delayed ref spinlock held, and it returns
739  * the head node if any where found, or NULL if not.
740  */
741 struct btrfs_delayed_ref_head *
742 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
743 {
744         struct btrfs_delayed_ref_root *delayed_refs;
745
746         delayed_refs = &trans->transaction->delayed_refs;
747         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
748 }
749
750 void btrfs_delayed_ref_exit(void)
751 {
752         if (btrfs_delayed_ref_head_cachep)
753                 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
754         if (btrfs_delayed_tree_ref_cachep)
755                 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
756         if (btrfs_delayed_data_ref_cachep)
757                 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
758         if (btrfs_delayed_extent_op_cachep)
759                 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
760 }
761
762 int btrfs_delayed_ref_init(void)
763 {
764         btrfs_delayed_ref_head_cachep = kmem_cache_create(
765                                 "btrfs_delayed_ref_head",
766                                 sizeof(struct btrfs_delayed_ref_head), 0,
767                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
768         if (!btrfs_delayed_ref_head_cachep)
769                 goto fail;
770
771         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
772                                 "btrfs_delayed_tree_ref",
773                                 sizeof(struct btrfs_delayed_tree_ref), 0,
774                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
775         if (!btrfs_delayed_tree_ref_cachep)
776                 goto fail;
777
778         btrfs_delayed_data_ref_cachep = kmem_cache_create(
779                                 "btrfs_delayed_data_ref",
780                                 sizeof(struct btrfs_delayed_data_ref), 0,
781                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
782         if (!btrfs_delayed_data_ref_cachep)
783                 goto fail;
784
785         btrfs_delayed_extent_op_cachep = kmem_cache_create(
786                                 "btrfs_delayed_extent_op",
787                                 sizeof(struct btrfs_delayed_extent_op), 0,
788                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
789         if (!btrfs_delayed_extent_op_cachep)
790                 goto fail;
791
792         return 0;
793 fail:
794         btrfs_delayed_ref_exit();
795         return -ENOMEM;
796 }