zswap: change zpool/compressor at runtime
[firefly-linux-kernel-4.4.55.git] / mm / zsmalloc.c
index 8f76d8875acaba9beb5212f390bc2758d724d06b..f135b1b6fcdcab49aaf0845e078c6fc299b4b28b 100644 (file)
@@ -246,6 +246,14 @@ struct zs_pool {
        atomic_long_t pages_allocated;
 
        struct zs_pool_stats stats;
+
+       /* Compact classes */
+       struct shrinker shrinker;
+       /*
+        * To signify that register_shrinker() was successful
+        * and unregister_shrinker() will not Oops.
+        */
+       bool shrinker_enabled;
 #ifdef CONFIG_ZSMALLOC_STAT
        struct dentry *stat_dentry;
 #endif
@@ -280,8 +288,7 @@ static int create_handle_cache(struct zs_pool *pool)
 
 static void destroy_handle_cache(struct zs_pool *pool)
 {
-       if (pool->handle_cachep)
-               kmem_cache_destroy(pool->handle_cachep);
+       kmem_cache_destroy(pool->handle_cachep);
 }
 
 static unsigned long alloc_handle(struct zs_pool *pool)
@@ -304,7 +311,8 @@ static void record_obj(unsigned long handle, unsigned long obj)
 
 #ifdef CONFIG_ZPOOL
 
-static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops,
+static void *zs_zpool_create(char *name, gfp_t gfp,
+                            const struct zpool_ops *zpool_ops,
                             struct zpool *zpool)
 {
        return zs_create_pool(name, gfp);
@@ -635,13 +643,22 @@ static void insert_zspage(struct page *page, struct size_class *class,
        if (fullness >= _ZS_NR_FULLNESS_GROUPS)
                return;
 
-       head = &class->fullness_list[fullness];
-       if (*head)
-               list_add_tail(&page->lru, &(*head)->lru);
-
-       *head = page;
        zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
                        CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
+
+       head = &class->fullness_list[fullness];
+       if (!*head) {
+               *head = page;
+               return;
+       }
+
+       /*
+        * We want to see more ZS_FULL pages and less almost
+        * empty/full. Put pages with higher ->inuse first.
+        */
+       list_add_tail(&page->lru, &(*head)->lru);
+       if (page->inuse >= (*head)->inuse)
+               *head = page;
 }
 
 /*
@@ -1579,8 +1596,6 @@ struct zs_compact_control {
         /* Starting object index within @s_page which used for live object
          * in the subpage. */
        int index;
-       /* How many of objects were migrated */
-       int nr_migrated;
 };
 
 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
@@ -1617,7 +1632,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                record_obj(handle, free_obj);
                unpin_tag(handle);
                obj_free(pool, class, used_obj);
-               cc->nr_migrated++;
        }
 
        /* Remember last position in this iteration */
@@ -1643,8 +1657,17 @@ static struct page *isolate_target_page(struct size_class *class)
        return page;
 }
 
-static void putback_zspage(struct zs_pool *pool, struct size_class *class,
-                               struct page *first_page)
+/*
+ * putback_zspage - add @first_page into right class's fullness list
+ * @pool: target pool
+ * @class: destination class
+ * @first_page: target page
+ *
+ * Return @fist_page's fullness_group
+ */
+static enum fullness_group putback_zspage(struct zs_pool *pool,
+                       struct size_class *class,
+                       struct page *first_page)
 {
        enum fullness_group fullness;
 
@@ -1662,15 +1685,23 @@ static void putback_zspage(struct zs_pool *pool, struct size_class *class,
 
                free_zspage(first_page);
        }
+
+       return fullness;
 }
 
 static struct page *isolate_source_page(struct size_class *class)
 {
-       struct page *page;
+       int i;
+       struct page *page = NULL;
+
+       for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) {
+               page = class->fullness_list[i];
+               if (!page)
+                       continue;
 
-       page = class->fullness_list[ZS_ALMOST_EMPTY];
-       if (page)
-               remove_zspage(page, class, ZS_ALMOST_EMPTY);
+               remove_zspage(page, class, i);
+               break;
+       }
 
        return page;
 }
@@ -1679,23 +1710,18 @@ static struct page *isolate_source_page(struct size_class *class)
  *
  * Based on the number of unused allocated objects calculate
  * and return the number of pages that we can free.
- *
- * Should be called under class->lock.
  */
 static unsigned long zs_can_compact(struct size_class *class)
 {
        unsigned long obj_wasted;
 
-       if (!zs_stat_get(class, CLASS_ALMOST_EMPTY))
-               return 0;
-
        obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
                zs_stat_get(class, OBJ_USED);
 
        obj_wasted /= get_maxobj_per_zspage(class->size,
                        class->pages_per_zspage);
 
-       return obj_wasted * get_pages_per_zspage(class->size);
+       return obj_wasted * class->pages_per_zspage;
 }
 
 static void __zs_compact(struct zs_pool *pool, struct size_class *class)
@@ -1704,7 +1730,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
        struct page *src_page;
        struct page *dst_page = NULL;
 
-       cc.nr_migrated = 0;
        spin_lock(&class->lock);
        while ((src_page = isolate_source_page(class))) {
 
@@ -1733,7 +1758,8 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
                        break;
 
                putback_zspage(pool, class, dst_page);
-               putback_zspage(pool, class, src_page);
+               if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+                       pool->stats.pages_compacted += class->pages_per_zspage;
                spin_unlock(&class->lock);
                cond_resched();
                spin_lock(&class->lock);
@@ -1742,8 +1768,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
        if (src_page)
                putback_zspage(pool, class, src_page);
 
-       pool->stats.num_migrated += cc.nr_migrated;
-
        spin_unlock(&class->lock);
 }
 
@@ -1761,7 +1785,7 @@ unsigned long zs_compact(struct zs_pool *pool)
                __zs_compact(pool, class);
        }
 
-       return pool->stats.num_migrated;
+       return pool->stats.pages_compacted;
 }
 EXPORT_SYMBOL_GPL(zs_compact);
 
@@ -1771,6 +1795,67 @@ void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
 }
 EXPORT_SYMBOL_GPL(zs_pool_stats);
 
+static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
+               struct shrink_control *sc)
+{
+       unsigned long pages_freed;
+       struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+                       shrinker);
+
+       pages_freed = pool->stats.pages_compacted;
+       /*
+        * Compact classes and calculate compaction delta.
+        * Can run concurrently with a manually triggered
+        * (by user) compaction.
+        */
+       pages_freed = zs_compact(pool) - pages_freed;
+
+       return pages_freed ? pages_freed : SHRINK_STOP;
+}
+
+static unsigned long zs_shrinker_count(struct shrinker *shrinker,
+               struct shrink_control *sc)
+{
+       int i;
+       struct size_class *class;
+       unsigned long pages_to_free = 0;
+       struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+                       shrinker);
+
+       if (!pool->shrinker_enabled)
+               return 0;
+
+       for (i = zs_size_classes - 1; i >= 0; i--) {
+               class = pool->size_class[i];
+               if (!class)
+                       continue;
+               if (class->index != i)
+                       continue;
+
+               pages_to_free += zs_can_compact(class);
+       }
+
+       return pages_to_free;
+}
+
+static void zs_unregister_shrinker(struct zs_pool *pool)
+{
+       if (pool->shrinker_enabled) {
+               unregister_shrinker(&pool->shrinker);
+               pool->shrinker_enabled = false;
+       }
+}
+
+static int zs_register_shrinker(struct zs_pool *pool)
+{
+       pool->shrinker.scan_objects = zs_shrinker_scan;
+       pool->shrinker.count_objects = zs_shrinker_count;
+       pool->shrinker.batch = 0;
+       pool->shrinker.seeks = DEFAULT_SEEKS;
+
+       return register_shrinker(&pool->shrinker);
+}
+
 /**
  * zs_create_pool - Creates an allocation pool to work from.
  * @flags: allocation flags used to allocate pool metadata
@@ -1856,6 +1941,12 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
        if (zs_pool_stat_create(name, pool))
                goto err;
 
+       /*
+        * Not critical, we still can use the pool
+        * and user can trigger compaction manually.
+        */
+       if (zs_register_shrinker(pool) == 0)
+               pool->shrinker_enabled = true;
        return pool;
 
 err:
@@ -1868,6 +1959,7 @@ void zs_destroy_pool(struct zs_pool *pool)
 {
        int i;
 
+       zs_unregister_shrinker(pool);
        zs_pool_stat_destroy(pool);
 
        for (i = 0; i < zs_size_classes; i++) {