Merge tag 'llvmlinux-for-v3.15' of git://git.linuxfoundation.org/llvmlinux/kernel
[firefly-linux-kernel-4.4.55.git] / mm / truncate.c
index 2e84fe59190b7e8073fe96c25fc2e9212c5b9da7..e5cc39ab0751f08b30691c3e7e8c98e32ded409c 100644 (file)
@@ -25,6 +25,9 @@
 static void clear_exceptional_entry(struct address_space *mapping,
                                    pgoff_t index, void *entry)
 {
+       struct radix_tree_node *node;
+       void **slot;
+
        /* Handled by shmem itself */
        if (shmem_mapping(mapping))
                return;
@@ -35,7 +38,27 @@ static void clear_exceptional_entry(struct address_space *mapping,
         * without the tree itself locked.  These unlocked entries
         * need verification under the tree lock.
         */
-       radix_tree_delete_item(&mapping->page_tree, index, entry);
+       if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
+               goto unlock;
+       if (*slot != entry)
+               goto unlock;
+       radix_tree_replace_slot(slot, NULL);
+       mapping->nrshadows--;
+       if (!node)
+               goto unlock;
+       workingset_node_shadows_dec(node);
+       /*
+        * Don't track node without shadow entries.
+        *
+        * Avoid acquiring the list_lru lock if already untracked.
+        * The list_empty() test is safe as node->private_list is
+        * protected by mapping->tree_lock.
+        */
+       if (!workingset_node_shadows(node) &&
+           !list_empty(&node->private_list))
+               list_lru_del(&workingset_shadow_nodes, &node->private_list);
+       __radix_tree_delete_node(&mapping->page_tree, node);
+unlock:
        spin_unlock_irq(&mapping->tree_lock);
 }
 
@@ -229,7 +252,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        int             i;
 
        cleancache_invalidate_inode(mapping);
-       if (mapping->nrpages == 0)
+       if (mapping->nrpages == 0 && mapping->nrshadows == 0)
                return;
 
        /* Offsets within partial pages */
@@ -391,6 +414,53 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 }
 EXPORT_SYMBOL(truncate_inode_pages);
 
+/**
+ * truncate_inode_pages_final - truncate *all* pages before inode dies
+ * @mapping: mapping to truncate
+ *
+ * Called under (and serialized by) inode->i_mutex.
+ *
+ * Filesystems have to use this in the .evict_inode path to inform the
+ * VM that this is the final truncate and the inode is going away.
+ */
+void truncate_inode_pages_final(struct address_space *mapping)
+{
+       unsigned long nrshadows;
+       unsigned long nrpages;
+
+       /*
+        * Page reclaim can not participate in regular inode lifetime
+        * management (can't call iput()) and thus can race with the
+        * inode teardown.  Tell it when the address space is exiting,
+        * so that it does not install eviction information after the
+        * final truncate has begun.
+        */
+       mapping_set_exiting(mapping);
+
+       /*
+        * When reclaim installs eviction entries, it increases
+        * nrshadows first, then decreases nrpages.  Make sure we see
+        * this in the right order or we might miss an entry.
+        */
+       nrpages = mapping->nrpages;
+       smp_rmb();
+       nrshadows = mapping->nrshadows;
+
+       if (nrpages || nrshadows) {
+               /*
+                * As truncation uses a lockless tree lookup, cycle
+                * the tree lock to make sure any ongoing tree
+                * modification that does not see AS_EXITING is
+                * completed before starting the final truncate.
+                */
+               spin_lock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
+
+               truncate_inode_pages(mapping, 0);
+       }
+}
+EXPORT_SYMBOL(truncate_inode_pages_final);
+
 /**
  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  * @mapping: the address_space which holds the pages to invalidate
@@ -484,7 +554,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
                goto failed;
 
        BUG_ON(page_has_private(page));
-       __delete_from_page_cache(page);
+       __delete_from_page_cache(page, NULL);
        spin_unlock_irq(&mapping->tree_lock);
        mem_cgroup_uncharge_cache_page(page);