mm/zpool: zbud/zsmalloc implement zpool
authorDan Streetman <ddstreet@ieee.org>
Wed, 6 Aug 2014 23:08:38 +0000 (16:08 -0700)
committerAlex Shi <alex.shi@linaro.org>
Mon, 11 May 2015 13:18:42 +0000 (21:18 +0800)
Update zbud and zsmalloc to implement the zpool api.

[fengguang.wu@intel.com: make functions static]
Signed-off-by: Dan Streetman <ddstreet@ieee.org>
Tested-by: Seth Jennings <sjennings@variantweb.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Weijie Yang <weijie.yang@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit c795779df29e180738568d2a5eb3a42f3b5e47f0)
Signed-off-by: Alex Shi <alex.shi@linaro.org>
 Conflicts:
mm/zbud.c

 Conflicts solution:
remove zbud

mm/zsmalloc.c

index 3078eca4737da1fb754cf9b7e18d35741798a337..fc25b58a02f81bbc6031c8139bf6bdd88e82cc38 100644 (file)
@@ -80,6 +80,7 @@
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/zsmalloc.h>
+#include <linux/zpool.h>
 
 /*
  * This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -239,6 +240,82 @@ struct mapping_area {
        enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+/* zpool driver */
+
+#ifdef CONFIG_ZPOOL
+
+static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
+{
+       return zs_create_pool(gfp);
+}
+
+static void zs_zpool_destroy(void *pool)
+{
+       zs_destroy_pool(pool);
+}
+
+static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
+                       unsigned long *handle)
+{
+       *handle = zs_malloc(pool, size);
+       return *handle ? 0 : -1;
+}
+static void zs_zpool_free(void *pool, unsigned long handle)
+{
+       zs_free(pool, handle);
+}
+
+static int zs_zpool_shrink(void *pool, unsigned int pages,
+                       unsigned int *reclaimed)
+{
+       return -EINVAL;
+}
+
+static void *zs_zpool_map(void *pool, unsigned long handle,
+                       enum zpool_mapmode mm)
+{
+       enum zs_mapmode zs_mm;
+
+       switch (mm) {
+       case ZPOOL_MM_RO:
+               zs_mm = ZS_MM_RO;
+               break;
+       case ZPOOL_MM_WO:
+               zs_mm = ZS_MM_WO;
+               break;
+       case ZPOOL_MM_RW: /* fallthru */
+       default:
+               zs_mm = ZS_MM_RW;
+               break;
+       }
+
+       return zs_map_object(pool, handle, zs_mm);
+}
+static void zs_zpool_unmap(void *pool, unsigned long handle)
+{
+       zs_unmap_object(pool, handle);
+}
+
+static u64 zs_zpool_total_size(void *pool)
+{
+       return zs_get_total_size_bytes(pool);
+}
+
+static struct zpool_driver zs_zpool_driver = {
+       .type =         "zsmalloc",
+       .owner =        THIS_MODULE,
+       .create =       zs_zpool_create,
+       .destroy =      zs_zpool_destroy,
+       .malloc =       zs_zpool_malloc,
+       .free =         zs_zpool_free,
+       .shrink =       zs_zpool_shrink,
+       .map =          zs_zpool_map,
+       .unmap =        zs_zpool_unmap,
+       .total_size =   zs_zpool_total_size,
+};
+
+#endif /* CONFIG_ZPOOL */
+
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
 
@@ -779,6 +856,10 @@ static void zs_exit(void)
 {
        int cpu;
 
+#ifdef CONFIG_ZPOOL
+       zpool_unregister_driver(&zs_zpool_driver);
+#endif
+
        cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu)
@@ -805,6 +886,10 @@ static int zs_init(void)
 
        cpu_notifier_register_done();
 
+#ifdef CONFIG_ZPOOL
+       zpool_register_driver(&zs_zpool_driver);
+#endif
+
        return 0;
 fail:
        zs_exit();