Revert "[XFS] remove old vmap cache"
authorFelix Blyakher <felixb@sgi.com>
Wed, 18 Feb 2009 21:56:51 +0000 (15:56 -0600)
committerFelix Blyakher <felixb@sgi.com>
Thu, 19 Feb 2009 19:15:55 +0000 (13:15 -0600)
This reverts commit d2859751cd0bf586941ffa7308635a293f943c17.

This commit caused regression. We'll try to fix use of new
vmap API for next release.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Felix Blyakher <felixb@sgi.com>
fs/xfs/linux-2.6/xfs_buf.c

index 0b2177a9fbdcfc5677c9b9937f050c5e6c0060b0..cb329edc925b915ae5e590edeb4840810f811326 100644 (file)
@@ -165,6 +165,75 @@ test_page_region(
        return (mask && (page_private(page) & mask) == mask);
 }
 
+/*
+ *     Mapping of multi-page buffers into contiguous virtual space
+ */
+
+typedef struct a_list {
+       void            *vm_addr;
+       struct a_list   *next;
+} a_list_t;
+
+static a_list_t                *as_free_head;
+static int             as_list_len;
+static DEFINE_SPINLOCK(as_lock);
+
+/*
+ *     Try to batch vunmaps because they are costly.
+ */
+STATIC void
+free_address(
+       void            *addr)
+{
+       a_list_t        *aentry;
+
+#ifdef CONFIG_XEN
+       /*
+        * Xen needs to be able to make sure it can get an exclusive
+        * RO mapping of pages it wants to turn into a pagetable.  If
+        * a newly allocated page is also still being vmap()ed by xfs,
+        * it will cause pagetable construction to fail.  This is a
+        * quick workaround to always eagerly unmap pages so that Xen
+        * is happy.
+        */
+       vunmap(addr);
+       return;
+#endif
+
+       aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
+       if (likely(aentry)) {
+               spin_lock(&as_lock);
+               aentry->next = as_free_head;
+               aentry->vm_addr = addr;
+               as_free_head = aentry;
+               as_list_len++;
+               spin_unlock(&as_lock);
+       } else {
+               vunmap(addr);
+       }
+}
+
+STATIC void
+purge_addresses(void)
+{
+       a_list_t        *aentry, *old;
+
+       if (as_free_head == NULL)
+               return;
+
+       spin_lock(&as_lock);
+       aentry = as_free_head;
+       as_free_head = NULL;
+       as_list_len = 0;
+       spin_unlock(&as_lock);
+
+       while ((old = aentry) != NULL) {
+               vunmap(aentry->vm_addr);
+               aentry = aentry->next;
+               kfree(old);
+       }
+}
+
 /*
  *     Internal xfs_buf_t object manipulation
  */
@@ -264,7 +333,7 @@ xfs_buf_free(
                uint            i;
 
                if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
-                       vunmap(bp->b_addr - bp->b_offset);
+                       free_address(bp->b_addr - bp->b_offset);
 
                for (i = 0; i < bp->b_page_count; i++) {
                        struct page     *page = bp->b_pages[i];
@@ -386,6 +455,8 @@ _xfs_buf_map_pages(
                bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
                bp->b_flags |= XBF_MAPPED;
        } else if (flags & XBF_MAPPED) {
+               if (as_list_len > 64)
+                       purge_addresses();
                bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
                                        VM_MAP, PAGE_KERNEL);
                if (unlikely(bp->b_addr == NULL))
@@ -1672,6 +1743,8 @@ xfsbufd(
                        count++;
                }
 
+               if (as_list_len > 0)
+                       purge_addresses();
                if (count)
                        blk_run_address_space(target->bt_mapping);