Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / delayed-ref.c
index 6d16bea94e1cd1cac5c102efa8b50290c2b4e218..8f8ed7d20bac5f4e058fc693d0fdc24fde7abeda 100644 (file)
@@ -489,11 +489,13 @@ update_existing_ref(struct btrfs_trans_handle *trans,
  * existing and update must have the same bytenr
  */
 static noinline void
-update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
+update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+                        struct btrfs_delayed_ref_node *existing,
                         struct btrfs_delayed_ref_node *update)
 {
        struct btrfs_delayed_ref_head *existing_ref;
        struct btrfs_delayed_ref_head *ref;
+       int old_ref_mod;
 
        existing_ref = btrfs_delayed_node_to_head(existing);
        ref = btrfs_delayed_node_to_head(update);
@@ -541,7 +543,20 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
         * only need the lock for this case cause we could be processing it
         * currently, for refs we just added we know we're a-ok.
         */
+       old_ref_mod = existing_ref->total_ref_mod;
        existing->ref_mod += update->ref_mod;
+       existing_ref->total_ref_mod += update->ref_mod;
+
+       /*
+        * If we are going to from a positive ref mod to a negative or vice
+        * versa we need to make sure to adjust pending_csums accordingly.
+        */
+       if (existing_ref->is_data) {
+               if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
+                       delayed_refs->pending_csums -= existing->num_bytes;
+               if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
+                       delayed_refs->pending_csums += existing->num_bytes;
+       }
        spin_unlock(&existing_ref->lock);
 }
 
@@ -605,6 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
        head_ref->is_data = is_data;
        head_ref->ref_root = RB_ROOT;
        head_ref->processing = 0;
+       head_ref->total_ref_mod = count_mod;
 
        spin_lock_init(&head_ref->lock);
        mutex_init(&head_ref->mutex);
@@ -614,7 +630,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
        existing = htree_insert(&delayed_refs->href_root,
                                &head_ref->href_node);
        if (existing) {
-               update_existing_head_ref(&existing->node, ref);
+               update_existing_head_ref(delayed_refs, &existing->node, ref);
                /*
                 * we've updated the existing ref, free the newly
                 * allocated ref
@@ -622,6 +638,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
                kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
                head_ref = existing;
        } else {
+               if (is_data && count_mod < 0)
+                       delayed_refs->pending_csums += num_bytes;
                delayed_refs->num_heads++;
                delayed_refs->num_heads_ready++;
                atomic_inc(&delayed_refs->num_entries);