4 * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two
9 * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
10 * 1) "compression buddies" ("zbud") is used for ephemeral pages
11 * 2) xvmalloc is used for persistent pages.
12 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
13 * so maximizes space efficiency, while zbud allows pairs (and potentially,
14 * in the future, more than a pair of) compressed pages to be closely linked
15 * so that reclaiming can be done via the kernel's physical-page-oriented
16 * "shrinker" interface.
18 * [1] For a definition of page-accessible memory (aka PAM), see:
19 * http://marc.info/?l=linux-mm&m=127811271605009
21 * - handle remotifying of buddied pages (see zbud_remotify_zbpg)
22 * - kernel boot params: nocleancache/nofrontswap don't always work?!?
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/lzo.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/types.h>
33 #include <linux/atomic.h>
34 #include <linux/math64.h>
38 #include "cluster/tcp.h"
40 #include "../zram/xvmalloc.h" /* if built in drivers/staging */
42 #define RAMSTER_TESTING
44 #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
45 #error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
47 #ifdef CONFIG_CLEANCACHE
48 #include <linux/cleancache.h>
50 #ifdef CONFIG_FRONTSWAP
51 #include <linux/frontswap.h>
54 enum ramster_remotify_op {
55 RAMSTER_REMOTIFY_EPH_PUT,
56 RAMSTER_REMOTIFY_PERS_PUT,
57 RAMSTER_REMOTIFY_FLUSH_PAGE,
58 RAMSTER_REMOTIFY_FLUSH_OBJ,
59 RAMSTER_INTRANSIT_PERS
62 struct ramster_remotify_hdr {
63 enum ramster_remotify_op op;
64 struct list_head list;
67 #define ZBH_SENTINEL 0x43214321
68 #define ZBPG_SENTINEL 0xdeadbeef
70 #define ZBUD_MAX_BUDS 2
73 struct ramster_remotify_hdr rem_op;
78 uint16_t size; /* compressed size in bytes, zero means unused */
82 #define ZVH_SENTINEL 0x43214321
83 static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
86 struct ramster_remotify_hdr rem_op;
94 struct flushlist_node {
95 struct ramster_remotify_hdr rem_op;
96 struct tmem_xhandle xh;
100 struct ramster_remotify_hdr rem_op;
102 struct zbud_hdr zbud;
103 struct flushlist_node flist;
104 } remotify_list_node;
106 static LIST_HEAD(zcache_rem_op_list);
107 static DEFINE_SPINLOCK(zcache_rem_op_list_lock);
110 /* this is more aggressive but may cause other problems? */
111 #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
113 #define ZCACHE_GFP_MASK \
114 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
117 #define MAX_POOLS_PER_CLIENT 16
119 #define MAX_CLIENTS 16
120 #define LOCAL_CLIENT ((uint16_t)-1)
122 MODULE_LICENSE("GPL");
124 struct zcache_client {
125 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
126 struct xv_pool *xvpool;
131 static struct zcache_client zcache_host;
132 static struct zcache_client zcache_clients[MAX_CLIENTS];
134 static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
137 if (cli == &zcache_host)
139 return cli - &zcache_clients[0];
142 static inline bool is_local_client(struct zcache_client *cli)
144 return cli == &zcache_host;
148 * Compression buddies ("zbud") provides for packing two (or, possibly
149 * in the future, more) compressed ephemeral pages into a single "raw"
150 * (physical) page and tracking them with data structures so that
151 * the raw pages can be easily reclaimed.
153 * A zbud page ("zbpg") is an aligned page containing a list_head,
154 * a lock, and two "zbud headers". The remainder of the physical
155 * page is divided up into aligned 64-byte "chunks" which contain
156 * the compressed data for zero, one, or two zbuds. Each zbpg
157 * resides on: (1) an "unused list" if it has no zbuds; (2) a
158 * "buddied" list if it is fully populated with two zbuds; or
159 * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
160 * the one unbuddied zbud uses. The data inside a zbpg cannot be
161 * read or written unless the zbpg's lock is held.
165 struct list_head bud_list;
167 struct zbud_hdr buddy[ZBUD_MAX_BUDS];
169 /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
172 #define CHUNK_SHIFT 6
173 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
174 #define CHUNK_MASK (~(CHUNK_SIZE-1))
175 #define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
176 CHUNK_MASK) >> CHUNK_SHIFT)
177 #define MAX_CHUNK (NCHUNKS-1)
180 struct list_head list;
182 } zbud_unbuddied[NCHUNKS];
183 /* list N contains pages with N chunks USED and NCHUNKS-N unused */
184 /* element 0 is never used but optimizing that isn't worth it */
185 static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
187 struct list_head zbud_buddied_list;
188 static unsigned long zcache_zbud_buddied_count;
190 /* protects the buddied list and all unbuddied lists */
191 static DEFINE_SPINLOCK(zbud_budlists_spinlock);
193 static atomic_t zcache_zbud_curr_raw_pages;
194 static atomic_t zcache_zbud_curr_zpages;
195 static unsigned long zcache_zbud_curr_zbytes;
196 static unsigned long zcache_zbud_cumul_zpages;
197 static unsigned long zcache_zbud_cumul_zbytes;
198 static unsigned long zcache_compress_poor;
199 static unsigned long zcache_policy_percent_exceeded;
200 static unsigned long zcache_mean_compress_poor;
204 * - Remote pages are pages with a local pampd but the data is remote
205 * - Foreign pages are pages stored locally but belonging to another node
207 static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
208 static unsigned long ramster_pers_remotify_enable;
209 static unsigned long ramster_eph_remotify_enable;
210 static unsigned long ramster_eph_pages_remoted;
211 static unsigned long ramster_eph_pages_remote_failed;
212 static unsigned long ramster_pers_pages_remoted;
213 static unsigned long ramster_pers_pages_remote_failed;
214 static unsigned long ramster_pers_pages_remote_nomem;
215 static unsigned long ramster_remote_objects_flushed;
216 static unsigned long ramster_remote_object_flushes_failed;
217 static unsigned long ramster_remote_pages_flushed;
218 static unsigned long ramster_remote_page_flushes_failed;
219 static unsigned long ramster_remote_eph_pages_succ_get;
220 static unsigned long ramster_remote_pers_pages_succ_get;
221 static unsigned long ramster_remote_eph_pages_unsucc_get;
222 static unsigned long ramster_remote_pers_pages_unsucc_get;
223 static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0);
224 static unsigned long ramster_curr_flnode_count_max;
225 static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0);
226 static unsigned long ramster_foreign_eph_pampd_count_max;
227 static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0);
228 static unsigned long ramster_foreign_pers_pampd_count_max;
230 /* forward references */
231 static void *zcache_get_free_page(void);
232 static void zcache_free_page(void *p);
235 * zbud helper functions
238 static inline unsigned zbud_max_buddy_size(void)
240 return MAX_CHUNK << CHUNK_SHIFT;
243 static inline unsigned zbud_size_to_chunks(unsigned size)
245 BUG_ON(size == 0 || size > zbud_max_buddy_size());
246 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
249 static inline int zbud_budnum(struct zbud_hdr *zh)
251 unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
252 struct zbud_page *zbpg = NULL;
253 unsigned budnum = -1U;
256 for (i = 0; i < ZBUD_MAX_BUDS; i++)
257 if (offset == offsetof(typeof(*zbpg), buddy[i])) {
261 BUG_ON(budnum == -1U);
265 static char *zbud_data(struct zbud_hdr *zh, unsigned size)
267 struct zbud_page *zbpg;
271 ASSERT_SENTINEL(zh, ZBH);
272 budnum = zbud_budnum(zh);
273 BUG_ON(size == 0 || size > zbud_max_buddy_size());
274 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
275 ASSERT_SPINLOCK(&zbpg->lock);
278 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
280 else if (budnum == 1)
281 p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
285 static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh)
287 struct zbud_page *zbpg;
291 ASSERT_SENTINEL(zh, ZBH);
292 budnum = zbud_budnum(zh);
293 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
294 spin_lock(&zbpg->lock);
295 BUG_ON(zh->size > *size);
298 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
300 else if (budnum == 1)
301 p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK);
302 /* client should be filled in by caller */
303 memcpy(data, p, zh->size);
305 spin_unlock(&zbpg->lock);
309 * zbud raw page management
312 static struct zbud_page *zbud_alloc_raw_page(void)
314 struct zbud_page *zbpg = NULL;
315 struct zbud_hdr *zh0, *zh1;
316 zbpg = zcache_get_free_page();
317 if (likely(zbpg != NULL)) {
318 INIT_LIST_HEAD(&zbpg->bud_list);
319 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
320 spin_lock_init(&zbpg->lock);
321 atomic_inc(&zcache_zbud_curr_raw_pages);
322 INIT_LIST_HEAD(&zbpg->bud_list);
323 SET_SENTINEL(zbpg, ZBPG);
324 zh0->size = 0; zh1->size = 0;
325 tmem_oid_set_invalid(&zh0->oid);
326 tmem_oid_set_invalid(&zh1->oid);
331 static void zbud_free_raw_page(struct zbud_page *zbpg)
333 struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
335 ASSERT_SENTINEL(zbpg, ZBPG);
336 BUG_ON(!list_empty(&zbpg->bud_list));
337 ASSERT_SPINLOCK(&zbpg->lock);
338 BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
339 BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
340 INVERT_SENTINEL(zbpg, ZBPG);
341 spin_unlock(&zbpg->lock);
342 atomic_dec(&zcache_zbud_curr_raw_pages);
343 zcache_free_page(zbpg);
347 * core zbud handling routines
350 static unsigned zbud_free(struct zbud_hdr *zh)
354 ASSERT_SENTINEL(zh, ZBH);
355 BUG_ON(!tmem_oid_valid(&zh->oid));
357 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
359 tmem_oid_set_invalid(&zh->oid);
360 INVERT_SENTINEL(zh, ZBH);
361 zcache_zbud_curr_zbytes -= size;
362 atomic_dec(&zcache_zbud_curr_zpages);
366 static void zbud_free_and_delist(struct zbud_hdr *zh)
369 struct zbud_hdr *zh_other;
370 unsigned budnum = zbud_budnum(zh), size;
371 struct zbud_page *zbpg =
372 container_of(zh, struct zbud_page, buddy[budnum]);
374 /* FIXME, should be BUG_ON, pool destruction path doesn't disable
375 * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()->
376 * tmem_objnode_node_destroy()-> zcache_pampd_free() */
377 WARN_ON(!irqs_disabled());
378 spin_lock(&zbpg->lock);
379 if (list_empty(&zbpg->bud_list)) {
380 /* ignore zombie page... see zbud_evict_pages() */
381 spin_unlock(&zbpg->lock);
384 size = zbud_free(zh);
385 ASSERT_SPINLOCK(&zbpg->lock);
386 zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
387 if (zh_other->size == 0) { /* was unbuddied: unlist and free */
388 chunks = zbud_size_to_chunks(size) ;
389 spin_lock(&zbud_budlists_spinlock);
390 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
391 list_del_init(&zbpg->bud_list);
392 zbud_unbuddied[chunks].count--;
393 spin_unlock(&zbud_budlists_spinlock);
394 zbud_free_raw_page(zbpg);
395 } else { /* was buddied: move remaining buddy to unbuddied list */
396 chunks = zbud_size_to_chunks(zh_other->size) ;
397 spin_lock(&zbud_budlists_spinlock);
398 list_del_init(&zbpg->bud_list);
399 zcache_zbud_buddied_count--;
400 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
401 zbud_unbuddied[chunks].count++;
402 spin_unlock(&zbud_budlists_spinlock);
403 spin_unlock(&zbpg->lock);
407 static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
408 struct tmem_oid *oid,
409 uint32_t index, struct page *page,
410 void *cdata, unsigned size)
412 struct zbud_hdr *zh0, *zh1, *zh = NULL;
413 struct zbud_page *zbpg = NULL, *ztmp;
416 int i, found_good_buddy = 0;
418 nchunks = zbud_size_to_chunks(size) ;
419 for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
420 spin_lock(&zbud_budlists_spinlock);
421 if (!list_empty(&zbud_unbuddied[i].list)) {
422 list_for_each_entry_safe(zbpg, ztmp,
423 &zbud_unbuddied[i].list, bud_list) {
424 if (spin_trylock(&zbpg->lock)) {
425 found_good_buddy = i;
426 goto found_unbuddied;
430 spin_unlock(&zbud_budlists_spinlock);
432 /* didn't find a good buddy, try allocating a new page */
433 zbpg = zbud_alloc_raw_page();
434 if (unlikely(zbpg == NULL))
436 /* ok, have a page, now compress the data before taking locks */
437 spin_lock(&zbud_budlists_spinlock);
438 spin_lock(&zbpg->lock);
439 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
440 zbud_unbuddied[nchunks].count++;
441 zh = &zbpg->buddy[0];
445 ASSERT_SPINLOCK(&zbpg->lock);
446 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
447 BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
448 if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
449 ASSERT_SENTINEL(zh0, ZBH);
451 } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
452 ASSERT_SENTINEL(zh1, ZBH);
456 list_del_init(&zbpg->bud_list);
457 zbud_unbuddied[found_good_buddy].count--;
458 list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
459 zcache_zbud_buddied_count++;
462 SET_SENTINEL(zh, ZBH);
466 zh->pool_id = pool_id;
467 zh->client_id = client_id;
468 to = zbud_data(zh, size);
469 memcpy(to, cdata, size);
470 spin_unlock(&zbpg->lock);
471 spin_unlock(&zbud_budlists_spinlock);
472 zbud_cumul_chunk_counts[nchunks]++;
473 atomic_inc(&zcache_zbud_curr_zpages);
474 zcache_zbud_cumul_zpages++;
475 zcache_zbud_curr_zbytes += size;
476 zcache_zbud_cumul_zbytes += size;
481 static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
483 struct zbud_page *zbpg;
484 unsigned budnum = zbud_budnum(zh);
485 size_t out_len = PAGE_SIZE;
486 char *to_va, *from_va;
490 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
491 spin_lock(&zbpg->lock);
492 if (list_empty(&zbpg->bud_list)) {
493 /* ignore zombie page... see zbud_evict_pages() */
497 ASSERT_SENTINEL(zh, ZBH);
498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
499 to_va = kmap_atomic(page, KM_USER0);
501 from_va = zbud_data(zh, size);
502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
503 BUG_ON(ret != LZO_E_OK);
504 BUG_ON(out_len != PAGE_SIZE);
505 kunmap_atomic(to_va, KM_USER0);
507 spin_unlock(&zbpg->lock);
512 * The following routines handle shrinking of ephemeral pages by evicting
513 * pages "least valuable" first.
516 static unsigned long zcache_evicted_raw_pages;
517 static unsigned long zcache_evicted_buddied_pages;
518 static unsigned long zcache_evicted_unbuddied_pages;
520 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
522 static void zcache_put_pool(struct tmem_pool *pool);
525 * Flush and free all zbuds in a zbpg, then free the pageframe
527 static void zbud_evict_zbpg(struct zbud_page *zbpg)
531 uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
532 uint32_t index[ZBUD_MAX_BUDS];
533 struct tmem_oid oid[ZBUD_MAX_BUDS];
534 struct tmem_pool *pool;
537 ASSERT_SPINLOCK(&zbpg->lock);
538 for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
539 zh = &zbpg->buddy[i];
541 client_id[j] = zh->client_id;
542 pool_id[j] = zh->pool_id;
544 index[j] = zh->index;
548 spin_unlock(&zbpg->lock);
549 for (i = 0; i < j; i++) {
550 pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
551 BUG_ON(pool == NULL);
552 local_irq_save(flags);
553 /* these flushes should dispose of any local storage */
554 tmem_flush_page(pool, &oid[i], index[i]);
555 local_irq_restore(flags);
556 zcache_put_pool(pool);
561 * Free nr pages. This code is funky because we want to hold the locks
562 * protecting various lists for as short a time as possible, and in some
563 * circumstances the list may change asynchronously when the list lock is
564 * not held. In some cases we also trylock not only to avoid waiting on a
565 * page in use by another cpu, but also to avoid potential deadlock due to
568 static void zbud_evict_pages(int nr)
570 struct zbud_page *zbpg;
571 int i, newly_unused_pages = 0;
574 /* now try freeing unbuddied pages, starting with least space avail */
575 for (i = 0; i < MAX_CHUNK; i++) {
577 spin_lock_bh(&zbud_budlists_spinlock);
578 if (list_empty(&zbud_unbuddied[i].list)) {
579 spin_unlock_bh(&zbud_budlists_spinlock);
582 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
583 if (unlikely(!spin_trylock(&zbpg->lock)))
585 zbud_unbuddied[i].count--;
586 spin_unlock(&zbud_budlists_spinlock);
587 zcache_evicted_unbuddied_pages++;
588 /* want budlists unlocked when doing zbpg eviction */
589 zbud_evict_zbpg(zbpg);
590 newly_unused_pages++;
594 goto retry_unbud_list_i;
596 spin_unlock_bh(&zbud_budlists_spinlock);
599 /* as a last resort, free buddied pages */
601 spin_lock_bh(&zbud_budlists_spinlock);
602 if (list_empty(&zbud_buddied_list)) {
603 spin_unlock_bh(&zbud_budlists_spinlock);
606 list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
607 if (unlikely(!spin_trylock(&zbpg->lock)))
609 zcache_zbud_buddied_count--;
610 spin_unlock(&zbud_budlists_spinlock);
611 zcache_evicted_buddied_pages++;
612 /* want budlists unlocked when doing zbpg eviction */
613 zbud_evict_zbpg(zbpg);
614 newly_unused_pages++;
620 spin_unlock_bh(&zbud_budlists_spinlock);
626 static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem);
628 static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data,
631 struct tmem_pool *pool;
632 int i, remotenode, ret = -1;
633 unsigned char cksum, *p;
636 for (p = data, cksum = 0, i = 0; i < size; i++)
638 ret = ramster_remote_put(xh, data, size, true, &remotenode);
640 /* data was successfully remoted so change the local version
641 * to point to the remote node where it landed */
642 pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id);
643 BUG_ON(pool == NULL);
644 local_irq_save(flags);
645 /* tmem_replace will also free up any local space */
646 (void)tmem_replace(pool, &xh->oid, xh->index,
647 pampd_make_remote(remotenode, size, cksum));
648 local_irq_restore(flags);
649 zcache_put_pool(pool);
650 ramster_eph_pages_remoted++;
653 ramster_eph_pages_remote_failed++;
657 static int zbud_remotify_zbpg(struct zbud_page *zbpg)
659 struct zbud_hdr *zh1, *zh2 = NULL;
660 struct tmem_xhandle xh1, xh2 = { 0 };
661 char *data1 = NULL, *data2 = NULL;
662 size_t size1 = 0, size2 = 0;
664 unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
666 ASSERT_SPINLOCK(&zbpg->lock);
667 if (zbpg->buddy[0].size == 0)
668 zh1 = &zbpg->buddy[1];
669 else if (zbpg->buddy[1].size == 0)
670 zh1 = &zbpg->buddy[0];
672 zh1 = &zbpg->buddy[0];
673 zh2 = &zbpg->buddy[1];
675 /* don't remotify pages that are already remotified */
676 if (zh1->client_id != LOCAL_CLIENT)
678 if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT))
681 /* copy the data and metadata so can release lock */
683 xh1.client_id = zh1->client_id;
684 xh1.pool_id = zh1->pool_id;
686 xh1.index = zh1->index;
688 data1 = zbud_data(zh1, size1);
689 memcpy(tmpmem, zbud_data(zh1, size1), size1);
694 xh2.client_id = zh2->client_id;
695 xh2.pool_id = zh2->pool_id;
697 xh2.index = zh2->index;
699 memcpy(tmpmem, zbud_data(zh2, size2), size2);
702 spin_unlock(&zbpg->lock);
705 /* OK, no locks held anymore, remotify one or both zbuds */
707 ret = zbud_remotify_zbud(&xh1, data1, size1);
709 ret |= zbud_remotify_zbud(&xh2, data2, size2);
713 void zbud_remotify_pages(int nr)
715 struct zbud_page *zbpg;
719 * for now just try remotifying unbuddied pages, starting with
722 for (i = 0; i < MAX_CHUNK; i++) {
724 preempt_disable(); /* enable in zbud_remotify_zbpg */
725 spin_lock_bh(&zbud_budlists_spinlock);
726 if (list_empty(&zbud_unbuddied[i].list)) {
727 spin_unlock_bh(&zbud_budlists_spinlock);
729 continue; /* next i in for loop */
731 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
732 if (unlikely(!spin_trylock(&zbpg->lock)))
733 continue; /* next list_for_each_entry */
734 zbud_unbuddied[i].count--;
735 /* want budlists unlocked when doing zbpg remotify */
736 spin_unlock_bh(&zbud_budlists_spinlock);
737 ret = zbud_remotify_zbpg(zbpg);
738 /* preemption is re-enabled in zbud_remotify_zbpg */
742 goto retry_unbud_list_i;
744 /* if fail to remotify any page, quit */
745 pr_err("TESTING zbud_remotify_pages failed on page,"
746 " trying to re-add\n");
747 spin_lock_bh(&zbud_budlists_spinlock);
748 spin_lock(&zbpg->lock);
749 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list);
750 zbud_unbuddied[i].count++;
751 spin_unlock(&zbpg->lock);
752 spin_unlock_bh(&zbud_budlists_spinlock);
753 pr_err("TESTING zbud_remotify_pages failed on page,"
754 " finished re-add\n");
757 spin_unlock_bh(&zbud_budlists_spinlock);
762 preempt_disable(); /* enable in zbud_remotify_zbpg */
763 spin_lock_bh(&zbud_budlists_spinlock);
764 if (list_empty(&zbud_buddied_list))
766 list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
767 if (unlikely(!spin_trylock(&zbpg->lock)))
768 continue; /* next list_for_each_entry */
769 zcache_zbud_buddied_count--;
770 /* want budlists unlocked when doing zbpg remotify */
771 spin_unlock_bh(&zbud_budlists_spinlock);
772 ret = zbud_remotify_zbpg(zbpg);
773 /* preemption is re-enabled in zbud_remotify_zbpg */
777 goto next_buddied_zbpg;
779 /* if fail to remotify any page, quit */
780 pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
781 " trying to re-add\n");
782 spin_lock_bh(&zbud_budlists_spinlock);
783 spin_lock(&zbpg->lock);
784 list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
785 zcache_zbud_buddied_count++;
786 spin_unlock(&zbpg->lock);
787 spin_unlock_bh(&zbud_budlists_spinlock);
788 pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
789 " finished re-add\n");
793 spin_unlock_bh(&zbud_budlists_spinlock);
799 /* the "flush list" asynchronously collects pages to remotely flush */
800 #define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
801 static void ramster_flnode_free(struct flushlist_node *,
804 static void zcache_remote_flush_page(struct flushlist_node *flnode)
806 struct tmem_xhandle *xh;
811 remotenode = flnode->xh.client_id;
812 ret = ramster_remote_flush(xh, remotenode);
814 ramster_remote_pages_flushed++;
816 ramster_remote_page_flushes_failed++;
817 preempt_enable_no_resched();
818 ramster_flnode_free(flnode, NULL);
821 static void zcache_remote_flush_object(struct flushlist_node *flnode)
823 struct tmem_xhandle *xh;
828 remotenode = flnode->xh.client_id;
829 ret = ramster_remote_flush_object(xh, remotenode);
831 ramster_remote_objects_flushed++;
833 ramster_remote_object_flushes_failed++;
834 preempt_enable_no_resched();
835 ramster_flnode_free(flnode, NULL);
838 static void zcache_remote_eph_put(struct zbud_hdr *zbud)
843 static void zcache_remote_pers_put(struct zv_hdr *zv)
845 struct tmem_xhandle xh;
848 int remotenode, ret = -1;
850 struct tmem_pool *pool;
855 unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
857 ASSERT_SENTINEL(zv, ZVH);
858 BUG_ON(zv->client_id != LOCAL_CLIENT);
860 xh.client_id = zv->client_id;
861 xh.pool_id = zv->pool_id;
863 xh.index = zv->index;
864 size = xv_get_object_size(zv) - sizeof(*zv);
865 BUG_ON(size == 0 || size > zv_max_page_size);
866 data = (char *)zv + sizeof(*zv);
867 for (p = data, cksum = 0, i = 0; i < size; i++)
869 memcpy(tmpmem, data, size);
871 pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id);
872 ephemeral = is_ephemeral(pool);
873 zcache_put_pool(pool);
874 /* now OK to release lock set in caller */
875 spin_unlock(&zcache_rem_op_list_lock);
878 ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode);
879 preempt_enable_no_resched();
882 * This is some form of a memory leak... if the remote put
883 * fails, there will never be another attempt to remotify
884 * this page. But since we've dropped the zv pointer,
885 * the page may have been freed or the data replaced
886 * so we can't just "put it back" in the remote op list.
887 * Even if we could, not sure where to put it in the list
888 * because there may be flushes that must be strictly
889 * ordered vs the put. So leave this as a FIXME for now.
890 * But count them so we know if it becomes a problem.
892 ramster_pers_pages_remote_failed++;
895 atomic_inc(&ramster_remote_pers_pages);
896 ramster_pers_pages_remoted++;
898 * data was successfully remoted so change the local version to
899 * point to the remote node where it landed
902 pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
903 local_irq_save(flags);
904 (void)tmem_replace(pool, &xh.oid, xh.index,
905 pampd_make_remote(remotenode, size, cksum));
906 local_irq_restore(flags);
907 zcache_put_pool(pool);
913 static void zcache_do_remotify_ops(int nr)
915 struct ramster_remotify_hdr *rem_op;
916 union remotify_list_node *u;
921 spin_lock(&zcache_rem_op_list_lock);
922 if (list_empty(&zcache_rem_op_list)) {
923 spin_unlock(&zcache_rem_op_list_lock);
926 rem_op = list_first_entry(&zcache_rem_op_list,
927 struct ramster_remotify_hdr, list);
928 list_del_init(&rem_op->list);
929 if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT)
930 spin_unlock(&zcache_rem_op_list_lock);
931 u = (union remotify_list_node *)rem_op;
932 switch (rem_op->op) {
933 case RAMSTER_REMOTIFY_EPH_PUT:
935 zcache_remote_eph_put((struct zbud_hdr *)rem_op);
937 case RAMSTER_REMOTIFY_PERS_PUT:
938 zcache_remote_pers_put((struct zv_hdr *)rem_op);
940 case RAMSTER_REMOTIFY_FLUSH_PAGE:
941 zcache_remote_flush_page((struct flushlist_node *)u);
943 case RAMSTER_REMOTIFY_FLUSH_OBJ:
944 zcache_remote_flush_object((struct flushlist_node *)u);
955 * For now, just push over a few pages every few seconds to
956 * ensure that it basically works
958 static struct workqueue_struct *ramster_remotify_workqueue;
959 static void ramster_remotify_process(struct work_struct *work);
960 static DECLARE_DELAYED_WORK(ramster_remotify_worker,
961 ramster_remotify_process);
963 static void ramster_remotify_queue_delayed_work(unsigned long delay)
965 if (!queue_delayed_work(ramster_remotify_workqueue,
966 &ramster_remotify_worker, delay))
967 pr_err("ramster_remotify: bad workqueue\n");
971 static int use_frontswap;
972 static int use_cleancache;
973 static void ramster_remotify_process(struct work_struct *work)
975 static bool remotify_in_progress;
977 BUG_ON(irqs_disabled());
978 if (remotify_in_progress)
979 ramster_remotify_queue_delayed_work(HZ);
981 remotify_in_progress = true;
982 #ifdef CONFIG_CLEANCACHE
983 if (use_cleancache && ramster_eph_remotify_enable)
984 zbud_remotify_pages(5000); /* FIXME is this a good number? */
986 #ifdef CONFIG_FRONTSWAP
987 if (use_frontswap && ramster_pers_remotify_enable)
988 zcache_do_remotify_ops(500); /* FIXME is this a good number? */
990 remotify_in_progress = false;
991 ramster_remotify_queue_delayed_work(HZ);
995 static void ramster_remotify_init(void)
997 unsigned long n = 60UL;
998 ramster_remotify_workqueue =
999 create_singlethread_workqueue("ramster_remotify");
1000 ramster_remotify_queue_delayed_work(n * HZ);
1004 static void zbud_init(void)
1008 INIT_LIST_HEAD(&zbud_buddied_list);
1009 zcache_zbud_buddied_count = 0;
1010 for (i = 0; i < NCHUNKS; i++) {
1011 INIT_LIST_HEAD(&zbud_unbuddied[i].list);
1012 zbud_unbuddied[i].count = 0;
1018 * These sysfs routines show a nice distribution of how many zbpg's are
1019 * currently (and have ever been placed) in each unbuddied list. It's fun
1020 * to watch but can probably go away before final merge.
1022 static int zbud_show_unbuddied_list_counts(char *buf)
1027 for (i = 0; i < NCHUNKS; i++)
1028 p += sprintf(p, "%u ", zbud_unbuddied[i].count);
1032 static int zbud_show_cumul_chunk_counts(char *buf)
1034 unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
1035 unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
1036 unsigned long total_chunks_lte_42 = 0;
1039 for (i = 0; i < NCHUNKS; i++) {
1040 p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
1041 chunks += zbud_cumul_chunk_counts[i];
1042 total_chunks += zbud_cumul_chunk_counts[i];
1043 sum_total_chunks += i * zbud_cumul_chunk_counts[i];
1045 total_chunks_lte_21 = total_chunks;
1047 total_chunks_lte_32 = total_chunks;
1049 total_chunks_lte_42 = total_chunks;
1051 p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
1052 total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
1053 chunks == 0 ? 0 : sum_total_chunks / chunks);
1059 * This "zv" PAM implementation combines the TLSF-based xvMalloc
1060 * with lzo1x compression to maximize the amount of data that can
1061 * be packed into a physical page.
1063 * Zv represents a PAM page with the index and object (plus a "size" value
1064 * necessary for decompression) immediately preceding the compressed data.
1067 /* rudimentary policy limits */
1068 /* total number of persistent pages may not exceed this percentage */
1069 static unsigned int zv_page_count_policy_percent = 75;
1071 * byte count defining poor compression; pages with greater zsize will be
1074 static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
1076 * byte count defining poor *mean* compression; pages with greater zsize
1077 * will be rejected until sufficient better-compressed pages are accepted
1078 * driving the mean below this threshold
1080 static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
1082 static atomic_t zv_curr_dist_counts[NCHUNKS];
1083 static atomic_t zv_cumul_dist_counts[NCHUNKS];
1086 static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1087 struct tmem_oid *oid, uint32_t index,
1088 void *cdata, unsigned clen)
1091 struct zv_hdr *zv = NULL;
1093 int alloc_size = clen + sizeof(struct zv_hdr);
1094 int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
1097 BUG_ON(!irqs_disabled());
1098 BUG_ON(chunks >= NCHUNKS);
1099 ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
1100 &page, &offset, ZCACHE_GFP_MASK);
1103 atomic_inc(&zv_curr_dist_counts[chunks]);
1104 atomic_inc(&zv_cumul_dist_counts[chunks]);
1105 zv = kmap_atomic(page, KM_USER0) + offset;
1108 zv->pool_id = pool_id;
1109 SET_SENTINEL(zv, ZVH);
1110 INIT_LIST_HEAD(&zv->rem_op.list);
1111 zv->client_id = get_client_id_from_client(cli);
1112 zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT;
1113 if (zv->client_id == LOCAL_CLIENT) {
1114 spin_lock(&zcache_rem_op_list_lock);
1115 list_add_tail(&zv->rem_op.list, &zcache_rem_op_list);
1116 spin_unlock(&zcache_rem_op_list_lock);
1118 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
1119 kunmap_atomic(zv, KM_USER0);
1124 /* similar to zv_create, but just reserve space, no data yet */
1125 static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1126 struct tmem_oid *oid, uint32_t index,
1129 struct zcache_client *cli = pool->client;
1131 struct zv_hdr *zv = NULL;
1135 BUG_ON(!irqs_disabled());
1136 BUG_ON(!is_local_client(pool->client));
1137 ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
1138 &page, &offset, ZCACHE_GFP_MASK);
1141 zv = kmap_atomic(page, KM_USER0) + offset;
1142 SET_SENTINEL(zv, ZVH);
1143 INIT_LIST_HEAD(&zv->rem_op.list);
1144 zv->client_id = LOCAL_CLIENT;
1145 zv->rem_op.op = RAMSTER_INTRANSIT_PERS;
1148 zv->pool_id = pool->pool_id;
1149 kunmap_atomic(zv, KM_USER0);
1154 static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
1156 unsigned long flags;
1159 uint16_t size = xv_get_object_size(zv);
1160 int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
1162 ASSERT_SENTINEL(zv, ZVH);
1163 BUG_ON(chunks >= NCHUNKS);
1164 atomic_dec(&zv_curr_dist_counts[chunks]);
1165 size -= sizeof(*zv);
1166 spin_lock(&zcache_rem_op_list_lock);
1167 size = xv_get_object_size(zv) - sizeof(*zv);
1169 INVERT_SENTINEL(zv, ZVH);
1170 if (!list_empty(&zv->rem_op.list))
1171 list_del_init(&zv->rem_op.list);
1172 spin_unlock(&zcache_rem_op_list_lock);
1173 page = virt_to_page(zv);
1174 offset = (unsigned long)zv & ~PAGE_MASK;
1175 local_irq_save(flags);
1176 xv_free(xvpool, page, offset);
1177 local_irq_restore(flags);
1180 static void zv_decompress(struct page *page, struct zv_hdr *zv)
1182 size_t clen = PAGE_SIZE;
1187 ASSERT_SENTINEL(zv, ZVH);
1188 size = xv_get_object_size(zv) - sizeof(*zv);
1190 to_va = kmap_atomic(page, KM_USER0);
1191 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
1192 size, to_va, &clen);
1193 kunmap_atomic(to_va, KM_USER0);
1194 BUG_ON(ret != LZO_E_OK);
1195 BUG_ON(clen != PAGE_SIZE);
1198 static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv)
1202 ASSERT_SENTINEL(zv, ZVH);
1203 size = xv_get_object_size(zv) - sizeof(*zv);
1204 BUG_ON(size == 0 || size > zv_max_page_size);
1205 BUG_ON(size > *bufsize);
1206 memcpy(data, (char *)zv + sizeof(*zv), size);
1210 static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size)
1214 ASSERT_SENTINEL(zv, ZVH);
1215 zv_size = xv_get_object_size(zv) - sizeof(*zv);
1216 BUG_ON(zv_size != size);
1217 BUG_ON(zv_size == 0 || zv_size > zv_max_page_size);
1218 memcpy((char *)zv + sizeof(*zv), data, size);
1223 * show a distribution of compression stats for zv pages.
1226 static int zv_curr_dist_counts_show(char *buf)
1228 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
1231 for (i = 0; i < NCHUNKS; i++) {
1232 n = atomic_read(&zv_curr_dist_counts[i]);
1233 p += sprintf(p, "%lu ", n);
1235 sum_total_chunks += i * n;
1237 p += sprintf(p, "mean:%lu\n",
1238 chunks == 0 ? 0 : sum_total_chunks / chunks);
1242 static int zv_cumul_dist_counts_show(char *buf)
1244 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
1247 for (i = 0; i < NCHUNKS; i++) {
1248 n = atomic_read(&zv_cumul_dist_counts[i]);
1249 p += sprintf(p, "%lu ", n);
1251 sum_total_chunks += i * n;
1253 p += sprintf(p, "mean:%lu\n",
1254 chunks == 0 ? 0 : sum_total_chunks / chunks);
1259 * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
1260 * pages that don't compress to less than this value (including metadata
1261 * overhead) to be rejected. We don't allow the value to get too close
1264 static ssize_t zv_max_zsize_show(struct kobject *kobj,
1265 struct kobj_attribute *attr,
1268 return sprintf(buf, "%u\n", zv_max_zsize);
1271 static ssize_t zv_max_zsize_store(struct kobject *kobj,
1272 struct kobj_attribute *attr,
1273 const char *buf, size_t count)
1278 if (!capable(CAP_SYS_ADMIN))
1281 err = strict_strtoul(buf, 10, &val);
1282 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
1289 * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
1290 * pages that don't compress to less than this value (including metadata
1291 * overhead) to be rejected UNLESS the mean compression is also smaller
1292 * than this value. In other words, we are load-balancing-by-zsize the
1293 * accepted pages. Again, we don't allow the value to get too close
1296 static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
1297 struct kobj_attribute *attr,
1300 return sprintf(buf, "%u\n", zv_max_mean_zsize);
1303 static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
1304 struct kobj_attribute *attr,
1305 const char *buf, size_t count)
1310 if (!capable(CAP_SYS_ADMIN))
1313 err = strict_strtoul(buf, 10, &val);
1314 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
1316 zv_max_mean_zsize = val;
1321 * setting zv_page_count_policy_percent via sysfs sets an upper bound of
1322 * persistent (e.g. swap) pages that will be retained according to:
1323 * (zv_page_count_policy_percent * totalram_pages) / 100)
1324 * when that limit is reached, further puts will be rejected (until
1325 * some pages have been flushed). Note that, due to compression,
1326 * this number may exceed 100; it defaults to 75 and we set an
1327 * arbitary limit of 150. A poor choice will almost certainly result
1328 * in OOM's, so this value should only be changed prudently.
1330 static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
1331 struct kobj_attribute *attr,
1334 return sprintf(buf, "%u\n", zv_page_count_policy_percent);
1337 static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
1338 struct kobj_attribute *attr,
1339 const char *buf, size_t count)
1344 if (!capable(CAP_SYS_ADMIN))
1347 err = strict_strtoul(buf, 10, &val);
1348 if (err || (val == 0) || (val > 150))
1350 zv_page_count_policy_percent = val;
1354 static struct kobj_attribute zcache_zv_max_zsize_attr = {
1355 .attr = { .name = "zv_max_zsize", .mode = 0644 },
1356 .show = zv_max_zsize_show,
1357 .store = zv_max_zsize_store,
1360 static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
1361 .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
1362 .show = zv_max_mean_zsize_show,
1363 .store = zv_max_mean_zsize_store,
1366 static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
1367 .attr = { .name = "zv_page_count_policy_percent",
1369 .show = zv_page_count_policy_percent_show,
1370 .store = zv_page_count_policy_percent_store,
1375 * zcache core code starts here
1378 /* useful stats not collected by cleancache or frontswap */
1379 static unsigned long zcache_flush_total;
1380 static unsigned long zcache_flush_found;
1381 static unsigned long zcache_flobj_total;
1382 static unsigned long zcache_flobj_found;
1383 static unsigned long zcache_failed_eph_puts;
1384 static unsigned long zcache_nonactive_puts;
1385 static unsigned long zcache_failed_pers_puts;
1388 * Tmem operations assume the poolid implies the invoking client.
1389 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
1390 * RAMster has each client numbered by cluster node, and a KVM version
1391 * of zcache would have one client per guest and each client might
1394 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
1396 struct tmem_pool *pool = NULL;
1397 struct zcache_client *cli = NULL;
1399 if (cli_id == LOCAL_CLIENT)
1402 if (cli_id >= MAX_CLIENTS)
1404 cli = &zcache_clients[cli_id];
1407 atomic_inc(&cli->refcount);
1409 if (poolid < MAX_POOLS_PER_CLIENT) {
1410 pool = cli->tmem_pools[poolid];
1412 atomic_inc(&pool->refcount);
1418 static void zcache_put_pool(struct tmem_pool *pool)
1420 struct zcache_client *cli = NULL;
1425 atomic_dec(&pool->refcount);
1426 atomic_dec(&cli->refcount);
1429 int zcache_new_client(uint16_t cli_id)
1431 struct zcache_client *cli = NULL;
1434 if (cli_id == LOCAL_CLIENT)
1436 else if ((unsigned int)cli_id < MAX_CLIENTS)
1437 cli = &zcache_clients[cli_id];
1443 #ifdef CONFIG_FRONTSWAP
1444 cli->xvpool = xv_create_pool();
1445 if (cli->xvpool == NULL)
1453 /* counters for debugging */
1454 static unsigned long zcache_failed_get_free_pages;
1455 static unsigned long zcache_failed_alloc;
1456 static unsigned long zcache_put_to_flush;
1459 * for now, used named slabs so can easily track usage; later can
1460 * either just use kmalloc, or perhaps add a slab-like allocator
1461 * to more carefully manage total memory utilization
1463 static struct kmem_cache *zcache_objnode_cache;
1464 static struct kmem_cache *zcache_obj_cache;
1465 static struct kmem_cache *ramster_flnode_cache;
1466 static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
1467 static unsigned long zcache_curr_obj_count_max;
1468 static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
1469 static unsigned long zcache_curr_objnode_count_max;
1472 * to avoid memory allocation recursion (e.g. due to direct reclaim), we
1473 * preload all necessary data structures so the hostops callbacks never
1474 * actually do a malloc
1476 struct zcache_preload {
1478 struct tmem_obj *obj;
1480 struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
1481 struct flushlist_node *flnode;
1483 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
1485 static int zcache_do_preload(struct tmem_pool *pool)
1487 struct zcache_preload *kp;
1488 struct tmem_objnode *objnode;
1489 struct tmem_obj *obj;
1490 struct flushlist_node *flnode;
1494 if (unlikely(zcache_objnode_cache == NULL))
1496 if (unlikely(zcache_obj_cache == NULL))
1499 kp = &__get_cpu_var(zcache_preloads);
1500 while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1501 preempt_enable_no_resched();
1502 objnode = kmem_cache_alloc(zcache_objnode_cache,
1504 if (unlikely(objnode == NULL)) {
1505 zcache_failed_alloc++;
1509 kp = &__get_cpu_var(zcache_preloads);
1510 if (kp->nr < ARRAY_SIZE(kp->objnodes))
1511 kp->objnodes[kp->nr++] = objnode;
1513 kmem_cache_free(zcache_objnode_cache, objnode);
1515 preempt_enable_no_resched();
1516 obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1517 if (unlikely(obj == NULL)) {
1518 zcache_failed_alloc++;
1521 flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK);
1522 if (unlikely(flnode == NULL)) {
1523 zcache_failed_alloc++;
1526 if (is_ephemeral(pool)) {
1527 page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1528 if (unlikely(page == NULL)) {
1529 zcache_failed_get_free_pages++;
1530 kmem_cache_free(zcache_obj_cache, obj);
1531 kmem_cache_free(ramster_flnode_cache, flnode);
1536 kp = &__get_cpu_var(zcache_preloads);
1537 if (kp->obj == NULL)
1540 kmem_cache_free(zcache_obj_cache, obj);
1541 if (kp->flnode == NULL)
1542 kp->flnode = flnode;
1544 kmem_cache_free(ramster_flnode_cache, flnode);
1545 if (is_ephemeral(pool)) {
1546 if (kp->page == NULL)
1549 free_page((unsigned long)page);
1556 static int ramster_do_preload_flnode_only(struct tmem_pool *pool)
1558 struct zcache_preload *kp;
1559 struct flushlist_node *flnode;
1562 BUG_ON(!irqs_disabled());
1563 if (unlikely(ramster_flnode_cache == NULL))
1565 kp = &__get_cpu_var(zcache_preloads);
1566 flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
1567 if (unlikely(flnode == NULL) && kp->flnode == NULL)
1568 BUG(); /* FIXME handle more gracefully, but how??? */
1569 else if (kp->flnode == NULL)
1570 kp->flnode = flnode;
1572 kmem_cache_free(ramster_flnode_cache, flnode);
1576 static void *zcache_get_free_page(void)
1578 struct zcache_preload *kp;
1581 kp = &__get_cpu_var(zcache_preloads);
1583 BUG_ON(page == NULL);
1588 static void zcache_free_page(void *p)
1590 free_page((unsigned long)p);
1594 * zcache implementation for tmem host ops
1597 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1599 struct tmem_objnode *objnode = NULL;
1600 unsigned long count;
1601 struct zcache_preload *kp;
1603 kp = &__get_cpu_var(zcache_preloads);
1606 objnode = kp->objnodes[kp->nr - 1];
1607 BUG_ON(objnode == NULL);
1608 kp->objnodes[kp->nr - 1] = NULL;
1610 count = atomic_inc_return(&zcache_curr_objnode_count);
1611 if (count > zcache_curr_objnode_count_max)
1612 zcache_curr_objnode_count_max = count;
1617 static void zcache_objnode_free(struct tmem_objnode *objnode,
1618 struct tmem_pool *pool)
1620 atomic_dec(&zcache_curr_objnode_count);
1621 BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1622 kmem_cache_free(zcache_objnode_cache, objnode);
1625 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1627 struct tmem_obj *obj = NULL;
1628 unsigned long count;
1629 struct zcache_preload *kp;
1631 kp = &__get_cpu_var(zcache_preloads);
1633 BUG_ON(obj == NULL);
1635 count = atomic_inc_return(&zcache_curr_obj_count);
1636 if (count > zcache_curr_obj_count_max)
1637 zcache_curr_obj_count_max = count;
1641 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1643 atomic_dec(&zcache_curr_obj_count);
1644 BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1645 kmem_cache_free(zcache_obj_cache, obj);
1648 static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
1650 struct flushlist_node *flnode = NULL;
1651 struct zcache_preload *kp;
1654 kp = &__get_cpu_var(zcache_preloads);
1655 flnode = kp->flnode;
1656 BUG_ON(flnode == NULL);
1658 count = atomic_inc_return(&ramster_curr_flnode_count);
1659 if (count > ramster_curr_flnode_count_max)
1660 ramster_curr_flnode_count_max = count;
1664 static void ramster_flnode_free(struct flushlist_node *flnode,
1665 struct tmem_pool *pool)
1667 atomic_dec(&ramster_curr_flnode_count);
1668 BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0);
1669 kmem_cache_free(ramster_flnode_cache, flnode);
1672 static struct tmem_hostops zcache_hostops = {
1673 .obj_alloc = zcache_obj_alloc,
1674 .obj_free = zcache_obj_free,
1675 .objnode_alloc = zcache_objnode_alloc,
1676 .objnode_free = zcache_objnode_free,
1680 * zcache implementations for PAM page descriptor ops
1683 static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1684 static unsigned long zcache_curr_eph_pampd_count_max;
1685 static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1686 static unsigned long zcache_curr_pers_pampd_count_max;
1688 /* forward reference */
1689 static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
1691 static int zcache_pampd_eph_create(char *data, size_t size, bool raw,
1692 struct tmem_pool *pool, struct tmem_oid *oid,
1693 uint32_t index, void **pampd)
1698 struct zcache_client *cli = pool->client;
1699 uint16_t client_id = get_client_id_from_client(cli);
1700 struct page *page = NULL;
1701 unsigned long count;
1704 page = virt_to_page(data);
1705 ret = zcache_compress(page, &cdata, &clen);
1708 if (clen == 0 || clen > zbud_max_buddy_size()) {
1709 zcache_compress_poor++;
1713 *pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1714 index, page, cdata, clen);
1715 if (*pampd == NULL) {
1720 count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1721 if (count > zcache_curr_eph_pampd_count_max)
1722 zcache_curr_eph_pampd_count_max = count;
1723 if (client_id != LOCAL_CLIENT) {
1724 count = atomic_inc_return(&ramster_foreign_eph_pampd_count);
1725 if (count > ramster_foreign_eph_pampd_count_max)
1726 ramster_foreign_eph_pampd_count_max = count;
1732 static int zcache_pampd_pers_create(char *data, size_t size, bool raw,
1733 struct tmem_pool *pool, struct tmem_oid *oid,
1734 uint32_t index, void **pampd)
1739 struct zcache_client *cli = pool->client;
1741 unsigned long count;
1742 unsigned long zv_mean_zsize;
1744 long curr_pers_pampd_count;
1746 #ifdef RAMSTER_TESTING
1747 static bool pampd_neg_warned;
1750 curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) -
1751 atomic_read(&ramster_remote_pers_pages);
1752 #ifdef RAMSTER_TESTING
1753 /* should always be positive, but warn if accounting is off */
1754 if (!pampd_neg_warned) {
1755 pr_warn("ramster: bad accounting for curr_pers_pampd_count\n");
1756 pampd_neg_warned = true;
1759 if (curr_pers_pampd_count >
1760 (zv_page_count_policy_percent * totalram_pages) / 100) {
1761 zcache_policy_percent_exceeded++;
1766 page = virt_to_page(data);
1767 if (zcache_compress(page, &cdata, &clen) == 0)
1769 /* reject if compression is too poor */
1770 if (clen > zv_max_zsize) {
1771 zcache_compress_poor++;
1774 /* reject if mean compression is too poor */
1775 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1776 total_zsize = xv_get_total_size_bytes(cli->xvpool);
1777 zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count);
1778 if (zv_mean_zsize > zv_max_mean_zsize) {
1779 zcache_mean_compress_poor++;
1784 *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen);
1785 if (*pampd == NULL) {
1790 count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1791 if (count > zcache_curr_pers_pampd_count_max)
1792 zcache_curr_pers_pampd_count_max = count;
1793 if (is_local_client(cli))
1795 zv = *(struct zv_hdr **)pampd;
1796 count = atomic_inc_return(&ramster_foreign_pers_pampd_count);
1797 if (count > ramster_foreign_pers_pampd_count_max)
1798 ramster_foreign_pers_pampd_count_max = count;
1803 static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1804 struct tmem_pool *pool, struct tmem_oid *oid,
1811 BUG_ON(preemptible());
1812 ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool));
1814 ret = zcache_pampd_eph_create(data, size, raw, pool,
1815 oid, index, &pampd);
1817 ret = zcache_pampd_pers_create(data, size, raw, pool,
1818 oid, index, &pampd);
1819 /* FIXME add some counters here for failed creates? */
1824 * fill the pageframe corresponding to the struct page with the data
1825 * from the passed pampd
1827 static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1828 void *pampd, struct tmem_pool *pool,
1829 struct tmem_oid *oid, uint32_t index)
1833 BUG_ON(preemptible());
1834 BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */
1835 BUG_ON(pampd_is_remote(pampd));
1837 zv_copy_from_pampd(data, bufsize, pampd);
1839 zv_decompress(virt_to_page(data), pampd);
1843 static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1844 void *pampd, struct tmem_pool *pool,
1845 struct tmem_oid *oid, uint32_t index)
1848 unsigned long flags;
1849 struct zcache_client *cli = pool->client;
1851 BUG_ON(preemptible());
1852 BUG_ON(pampd_is_remote(pampd));
1853 if (is_ephemeral(pool)) {
1854 local_irq_save(flags);
1856 zbud_copy_from_pampd(data, bufsize, pampd);
1858 ret = zbud_decompress(virt_to_page(data), pampd);
1859 zbud_free_and_delist((struct zbud_hdr *)pampd);
1860 local_irq_restore(flags);
1861 if (!is_local_client(cli)) {
1862 atomic_dec(&ramster_foreign_eph_pampd_count);
1863 WARN_ON_ONCE(atomic_read(&ramster_foreign_eph_pampd_count) < 0);
1865 atomic_dec(&zcache_curr_eph_pampd_count);
1866 WARN_ON_ONCE(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1868 if (is_local_client(cli))
1871 zv_copy_from_pampd(data, bufsize, pampd);
1873 zv_decompress(virt_to_page(data), pampd);
1874 zv_free(cli->xvpool, pampd);
1875 if (!is_local_client(cli)) {
1876 atomic_dec(&ramster_foreign_pers_pampd_count);
1877 WARN_ON_ONCE(atomic_read(&ramster_foreign_pers_pampd_count) < 0);
1879 atomic_dec(&zcache_curr_pers_pampd_count);
1880 WARN_ON_ONCE(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1886 static bool zcache_pampd_is_remote(void *pampd)
1888 return pampd_is_remote(pampd);
1892 * free the pampd and remove it from any zcache lists
1893 * pampd must no longer be pointed to from any tmem data structures!
1895 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1896 struct tmem_oid *oid, uint32_t index, bool acct)
1898 struct zcache_client *cli = pool->client;
1899 bool eph = is_ephemeral(pool);
1902 BUG_ON(preemptible());
1903 if (pampd_is_remote(pampd)) {
1904 WARN_ON(acct == false);
1907 * a NULL oid means to ignore this pampd free
1908 * as the remote freeing will be handled elsewhere
1911 /* FIXME remote flush optional but probably good idea */
1912 /* FIXME get these working properly again */
1913 atomic_dec(&zcache_curr_eph_pampd_count);
1914 WARN_ON_ONCE(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1915 } else if (pampd_is_intransit(pampd)) {
1916 /* did a pers remote get_and_free, so just free local */
1917 pampd = pampd_mask_intransit_and_remote(pampd);
1920 struct flushlist_node *flnode =
1921 ramster_flnode_alloc(pool);
1923 flnode->xh.client_id = pampd_remote_node(pampd);
1924 flnode->xh.pool_id = pool->pool_id;
1925 flnode->xh.oid = *oid;
1926 flnode->xh.index = index;
1927 flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
1928 spin_lock(&zcache_rem_op_list_lock);
1929 list_add(&flnode->rem_op.list, &zcache_rem_op_list);
1930 spin_unlock(&zcache_rem_op_list_lock);
1931 atomic_dec(&zcache_curr_pers_pampd_count);
1932 WARN_ON_ONCE(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1933 atomic_dec(&ramster_remote_pers_pages);
1934 WARN_ON_ONCE(atomic_read(&ramster_remote_pers_pages) < 0);
1937 zbud_free_and_delist((struct zbud_hdr *)pampd);
1938 if (!is_local_client(pool->client)) {
1939 atomic_dec(&ramster_foreign_eph_pampd_count);
1940 WARN_ON_ONCE(atomic_read(&ramster_foreign_eph_pampd_count) < 0);
1943 atomic_dec(&zcache_curr_eph_pampd_count);
1944 /* FIXME get these working properly again */
1945 WARN_ON_ONCE(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1948 zv = (struct zv_hdr *)pampd;
1949 if (!is_local_client(pool->client)) {
1950 atomic_dec(&ramster_foreign_pers_pampd_count);
1951 WARN_ON_ONCE(atomic_read(&ramster_foreign_pers_pampd_count) < 0);
1953 zv_free(cli->xvpool, zv);
1955 atomic_dec(&zcache_curr_pers_pampd_count);
1956 /* FIXME get these working properly again */
1957 WARN_ON_ONCE(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1961 static void zcache_pampd_free_obj(struct tmem_pool *pool,
1962 struct tmem_obj *obj)
1964 struct flushlist_node *flnode;
1966 BUG_ON(preemptible());
1967 if (obj->extra == NULL)
1969 BUG_ON(!pampd_is_remote(obj->extra));
1970 flnode = ramster_flnode_alloc(pool);
1971 flnode->xh.client_id = pampd_remote_node(obj->extra);
1972 flnode->xh.pool_id = pool->pool_id;
1973 flnode->xh.oid = obj->oid;
1974 flnode->xh.index = FLUSH_ENTIRE_OBJECT;
1975 flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
1976 spin_lock(&zcache_rem_op_list_lock);
1977 list_add(&flnode->rem_op.list, &zcache_rem_op_list);
1978 spin_unlock(&zcache_rem_op_list_lock);
1981 void zcache_pampd_new_obj(struct tmem_obj *obj)
1986 int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
1990 if (new_pampd != NULL) {
1991 if (obj->extra == NULL)
1992 obj->extra = new_pampd;
1993 /* enforce that all remote pages in an object reside
1994 * in the same node! */
1995 else if (pampd_remote_node(new_pampd) !=
1996 pampd_remote_node((void *)(obj->extra)))
2004 * Called by the message handler after a (still compressed) page has been
2005 * fetched from the remote machine in response to an "is_remote" tmem_get
2006 * or persistent tmem_localify. For a tmem_get, "extra" is the address of
2007 * the page that is to be filled to succesfully resolve the tmem_get; for
2008 * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
2009 * in the local zcache). "data" points to "size" bytes of (compressed) data
2010 * passed in the message. In the case of a persistent remote get, if
2011 * pre-allocation was successful (see zcache_repatriate_preload), the page
2012 * is placed into both local zcache and at "extra".
2014 int zcache_localify(int pool_id, struct tmem_oid *oidp,
2015 uint32_t index, char *data, size_t size,
2019 unsigned long flags;
2020 struct tmem_pool *pool;
2021 bool ephemeral, delete = false;
2022 size_t clen = PAGE_SIZE;
2023 void *pampd, *saved_hb;
2024 struct tmem_obj *obj;
2026 pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
2027 if (unlikely(pool == NULL))
2028 /* pool doesn't exist anymore */
2030 ephemeral = is_ephemeral(pool);
2031 local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
2032 pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
2033 if (pampd == NULL) {
2034 /* hmmm... must have been a flush while waiting */
2035 #ifdef RAMSTER_TESTING
2036 pr_err("UNTESTED pampd==NULL in zcache_localify\n");
2039 ramster_remote_eph_pages_unsucc_get++;
2041 ramster_remote_pers_pages_unsucc_get++;
2044 } else if (unlikely(!pampd_is_remote(pampd))) {
2045 /* hmmm... must have been a dup put while waiting */
2046 #ifdef RAMSTER_TESTING
2047 pr_err("UNTESTED dup while waiting in zcache_localify\n");
2050 ramster_remote_eph_pages_unsucc_get++;
2052 ramster_remote_pers_pages_unsucc_get++;
2057 } else if (size == 0) {
2058 /* no remote data, delete the local is_remote pampd */
2061 ramster_remote_eph_pages_unsucc_get++;
2067 if (!ephemeral && pampd_is_intransit(pampd)) {
2068 /* localify to zcache */
2069 pampd = pampd_mask_intransit_and_remote(pampd);
2070 zv_copy_to_pampd(pampd, data, size);
2075 if (extra != NULL) {
2076 /* decompress direct-to-memory to complete remotify */
2077 ret = lzo1x_decompress_safe((char *)data, size,
2078 (char *)extra, &clen);
2079 BUG_ON(ret != LZO_E_OK);
2080 BUG_ON(clen != PAGE_SIZE);
2083 ramster_remote_eph_pages_succ_get++;
2085 ramster_remote_pers_pages_succ_get++;
2088 tmem_localify_finish(obj, index, pampd, saved_hb, delete);
2089 zcache_put_pool(pool);
2090 local_irq_restore(flags);
2096 * Called on a remote persistent tmem_get to attempt to preallocate
2097 * local storage for the data contained in the remote persistent page.
2098 * If succesfully preallocated, returns the pampd, marked as remote and
2099 * in_transit. Else returns NULL. Note that the appropriate tmem data
2100 * structure must be locked.
2102 static void *zcache_pampd_repatriate_preload(void *pampd,
2103 struct tmem_pool *pool,
2104 struct tmem_oid *oid,
2108 int clen = pampd_remote_size(pampd);
2109 void *ret_pampd = NULL;
2110 unsigned long flags;
2112 if (!pampd_is_remote(pampd))
2114 if (is_ephemeral(pool))
2116 if (pampd_is_intransit(pampd)) {
2118 * to avoid multiple allocations (and maybe a memory leak)
2119 * don't preallocate if already in the process of being
2126 local_irq_save(flags);
2127 ret_pampd = (void *)zv_alloc(pool, oid, index, clen);
2128 if (ret_pampd != NULL) {
2130 * a pampd is marked intransit if it is remote and space has
2131 * been allocated for it locally (note, only happens for
2132 * persistent pages, in which case the remote copy is freed)
2134 ret_pampd = pampd_mark_intransit(ret_pampd);
2135 atomic_dec(&ramster_remote_pers_pages);
2136 WARN_ON_ONCE(atomic_read(&ramster_remote_pers_pages) < 0);
2138 ramster_pers_pages_remote_nomem++;
2139 local_irq_restore(flags);
2145 * Called on a remote tmem_get to invoke a message to fetch the page.
2146 * Might sleep so no tmem locks can be held. "extra" is passed
2147 * all the way through the round-trip messaging to zcache_localify.
2149 static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd,
2150 struct tmem_pool *pool,
2151 struct tmem_oid *oid, uint32_t index,
2152 bool free, void *extra)
2154 struct tmem_xhandle xh;
2157 if (pampd_is_intransit(real_pampd))
2158 /* have local space pre-reserved, so free remote copy */
2160 xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
2161 /* unreliable request/response for now */
2162 ret = ramster_remote_async_get(&xh, free,
2163 pampd_remote_node(fake_pampd),
2164 pampd_remote_size(fake_pampd),
2165 pampd_remote_cksum(fake_pampd),
2167 #ifdef RAMSTER_TESTING
2168 if (ret != 0 && ret != -ENOENT)
2169 pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n",
2175 static struct tmem_pamops zcache_pamops = {
2176 .create = zcache_pampd_create,
2177 .get_data = zcache_pampd_get_data,
2178 .free = zcache_pampd_free,
2179 .get_data_and_free = zcache_pampd_get_data_and_free,
2180 .free_obj = zcache_pampd_free_obj,
2181 .is_remote = zcache_pampd_is_remote,
2182 .repatriate_preload = zcache_pampd_repatriate_preload,
2183 .repatriate = zcache_pampd_repatriate,
2184 .new_obj = zcache_pampd_new_obj,
2185 .replace_in_obj = zcache_pampd_replace_in_obj,
2189 * zcache compression/decompression and related per-cpu stuff
2192 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
2193 #define LZO_DSTMEM_PAGE_ORDER 1
2194 static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
2195 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
2197 static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
2200 unsigned char *dmem = __get_cpu_var(zcache_dstmem);
2201 unsigned char *wmem = __get_cpu_var(zcache_workmem);
2204 BUG_ON(!irqs_disabled());
2205 if (unlikely(dmem == NULL || wmem == NULL))
2206 goto out; /* no buffer, so can't compress */
2207 from_va = kmap_atomic(from, KM_USER0);
2209 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
2210 BUG_ON(ret != LZO_E_OK);
2212 kunmap_atomic(from_va, KM_USER0);
2219 static int zcache_cpu_notifier(struct notifier_block *nb,
2220 unsigned long action, void *pcpu)
2222 int cpu = (long)pcpu;
2223 struct zcache_preload *kp;
2226 case CPU_UP_PREPARE:
2227 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
2228 GFP_KERNEL | __GFP_REPEAT,
2229 LZO_DSTMEM_PAGE_ORDER),
2230 per_cpu(zcache_workmem, cpu) =
2231 kzalloc(LZO1X_MEM_COMPRESS,
2232 GFP_KERNEL | __GFP_REPEAT);
2233 per_cpu(zcache_remoteputmem, cpu) =
2234 kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
2237 case CPU_UP_CANCELED:
2238 kfree(per_cpu(zcache_remoteputmem, cpu));
2239 per_cpu(zcache_remoteputmem, cpu) = NULL;
2240 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
2241 LZO_DSTMEM_PAGE_ORDER);
2242 per_cpu(zcache_dstmem, cpu) = NULL;
2243 kfree(per_cpu(zcache_workmem, cpu));
2244 per_cpu(zcache_workmem, cpu) = NULL;
2245 kp = &per_cpu(zcache_preloads, cpu);
2247 kmem_cache_free(zcache_objnode_cache,
2248 kp->objnodes[kp->nr - 1]);
2249 kp->objnodes[kp->nr - 1] = NULL;
2253 kmem_cache_free(zcache_obj_cache, kp->obj);
2257 kmem_cache_free(ramster_flnode_cache, kp->flnode);
2261 free_page((unsigned long)kp->page);
2271 static struct notifier_block zcache_cpu_notifier_block = {
2272 .notifier_call = zcache_cpu_notifier
2276 #define ZCACHE_SYSFS_RO(_name) \
2277 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
2278 struct kobj_attribute *attr, char *buf) \
2280 return sprintf(buf, "%lu\n", zcache_##_name); \
2282 static struct kobj_attribute zcache_##_name##_attr = { \
2283 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2284 .show = zcache_##_name##_show, \
2287 #define ZCACHE_SYSFS_RO_ATOMIC(_name) \
2288 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
2289 struct kobj_attribute *attr, char *buf) \
2291 return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
2293 static struct kobj_attribute zcache_##_name##_attr = { \
2294 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2295 .show = zcache_##_name##_show, \
2298 #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
2299 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
2300 struct kobj_attribute *attr, char *buf) \
2302 return _func(buf); \
2304 static struct kobj_attribute zcache_##_name##_attr = { \
2305 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2306 .show = zcache_##_name##_show, \
2309 ZCACHE_SYSFS_RO(curr_obj_count_max);
2310 ZCACHE_SYSFS_RO(curr_objnode_count_max);
2311 ZCACHE_SYSFS_RO(flush_total);
2312 ZCACHE_SYSFS_RO(flush_found);
2313 ZCACHE_SYSFS_RO(flobj_total);
2314 ZCACHE_SYSFS_RO(flobj_found);
2315 ZCACHE_SYSFS_RO(failed_eph_puts);
2316 ZCACHE_SYSFS_RO(nonactive_puts);
2317 ZCACHE_SYSFS_RO(failed_pers_puts);
2318 ZCACHE_SYSFS_RO(zbud_curr_zbytes);
2319 ZCACHE_SYSFS_RO(zbud_cumul_zpages);
2320 ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
2321 ZCACHE_SYSFS_RO(zbud_buddied_count);
2322 ZCACHE_SYSFS_RO(evicted_raw_pages);
2323 ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
2324 ZCACHE_SYSFS_RO(evicted_buddied_pages);
2325 ZCACHE_SYSFS_RO(failed_get_free_pages);
2326 ZCACHE_SYSFS_RO(failed_alloc);
2327 ZCACHE_SYSFS_RO(put_to_flush);
2328 ZCACHE_SYSFS_RO(compress_poor);
2329 ZCACHE_SYSFS_RO(mean_compress_poor);
2330 ZCACHE_SYSFS_RO(policy_percent_exceeded);
2331 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
2332 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
2333 ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
2334 ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
2335 ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
2336 zbud_show_unbuddied_list_counts);
2337 ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
2338 zbud_show_cumul_chunk_counts);
2339 ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
2340 zv_curr_dist_counts_show);
2341 ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
2342 zv_cumul_dist_counts_show);
2344 static struct attribute *zcache_attrs[] = {
2345 &zcache_curr_obj_count_attr.attr,
2346 &zcache_curr_obj_count_max_attr.attr,
2347 &zcache_curr_objnode_count_attr.attr,
2348 &zcache_curr_objnode_count_max_attr.attr,
2349 &zcache_flush_total_attr.attr,
2350 &zcache_flobj_total_attr.attr,
2351 &zcache_flush_found_attr.attr,
2352 &zcache_flobj_found_attr.attr,
2353 &zcache_failed_eph_puts_attr.attr,
2354 &zcache_nonactive_puts_attr.attr,
2355 &zcache_failed_pers_puts_attr.attr,
2356 &zcache_policy_percent_exceeded_attr.attr,
2357 &zcache_compress_poor_attr.attr,
2358 &zcache_mean_compress_poor_attr.attr,
2359 &zcache_zbud_curr_raw_pages_attr.attr,
2360 &zcache_zbud_curr_zpages_attr.attr,
2361 &zcache_zbud_curr_zbytes_attr.attr,
2362 &zcache_zbud_cumul_zpages_attr.attr,
2363 &zcache_zbud_cumul_zbytes_attr.attr,
2364 &zcache_zbud_buddied_count_attr.attr,
2365 &zcache_evicted_raw_pages_attr.attr,
2366 &zcache_evicted_unbuddied_pages_attr.attr,
2367 &zcache_evicted_buddied_pages_attr.attr,
2368 &zcache_failed_get_free_pages_attr.attr,
2369 &zcache_failed_alloc_attr.attr,
2370 &zcache_put_to_flush_attr.attr,
2371 &zcache_zbud_unbuddied_list_counts_attr.attr,
2372 &zcache_zbud_cumul_chunk_counts_attr.attr,
2373 &zcache_zv_curr_dist_counts_attr.attr,
2374 &zcache_zv_cumul_dist_counts_attr.attr,
2375 &zcache_zv_max_zsize_attr.attr,
2376 &zcache_zv_max_mean_zsize_attr.attr,
2377 &zcache_zv_page_count_policy_percent_attr.attr,
2381 static struct attribute_group zcache_attr_group = {
2382 .attrs = zcache_attrs,
2386 #define RAMSTER_SYSFS_RO(_name) \
2387 static ssize_t ramster_##_name##_show(struct kobject *kobj, \
2388 struct kobj_attribute *attr, char *buf) \
2390 return sprintf(buf, "%lu\n", ramster_##_name); \
2392 static struct kobj_attribute ramster_##_name##_attr = { \
2393 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2394 .show = ramster_##_name##_show, \
2397 #define RAMSTER_SYSFS_RW(_name) \
2398 static ssize_t ramster_##_name##_show(struct kobject *kobj, \
2399 struct kobj_attribute *attr, char *buf) \
2401 return sprintf(buf, "%lu\n", ramster_##_name); \
2403 static ssize_t ramster_##_name##_store(struct kobject *kobj, \
2404 struct kobj_attribute *attr, const char *buf, size_t count) \
2407 unsigned long enable; \
2408 err = strict_strtoul(buf, 10, &enable); \
2411 ramster_##_name = enable; \
2414 static struct kobj_attribute ramster_##_name##_attr = { \
2415 .attr = { .name = __stringify(_name), .mode = 0644 }, \
2416 .show = ramster_##_name##_show, \
2417 .store = ramster_##_name##_store, \
2420 #define RAMSTER_SYSFS_RO_ATOMIC(_name) \
2421 static ssize_t ramster_##_name##_show(struct kobject *kobj, \
2422 struct kobj_attribute *attr, char *buf) \
2424 return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
2426 static struct kobj_attribute ramster_##_name##_attr = { \
2427 .attr = { .name = __stringify(_name), .mode = 0444 }, \
2428 .show = ramster_##_name##_show, \
2431 RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
2432 RAMSTER_SYSFS_RW(pers_remotify_enable);
2433 RAMSTER_SYSFS_RW(eph_remotify_enable);
2434 RAMSTER_SYSFS_RO(eph_pages_remoted);
2435 RAMSTER_SYSFS_RO(eph_pages_remote_failed);
2436 RAMSTER_SYSFS_RO(pers_pages_remoted);
2437 RAMSTER_SYSFS_RO(pers_pages_remote_failed);
2438 RAMSTER_SYSFS_RO(pers_pages_remote_nomem);
2439 RAMSTER_SYSFS_RO(remote_pages_flushed);
2440 RAMSTER_SYSFS_RO(remote_page_flushes_failed);
2441 RAMSTER_SYSFS_RO(remote_objects_flushed);
2442 RAMSTER_SYSFS_RO(remote_object_flushes_failed);
2443 RAMSTER_SYSFS_RO(remote_eph_pages_succ_get);
2444 RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get);
2445 RAMSTER_SYSFS_RO(remote_pers_pages_succ_get);
2446 RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get);
2447 RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count);
2448 RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max);
2449 RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count);
2450 RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max);
2451 RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count);
2452 RAMSTER_SYSFS_RO(curr_flnode_count_max);
2454 #define MANUAL_NODES 8
2455 static bool ramster_nodes_manual_up[MANUAL_NODES];
2456 static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
2457 struct kobj_attribute *attr, char *buf)
2461 for (i = 0; i < MANUAL_NODES; i++)
2462 if (ramster_nodes_manual_up[i])
2463 p += sprintf(p, "%d ", i);
2464 p += sprintf(p, "\n");
2468 static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
2469 struct kobj_attribute *attr, const char *buf, size_t count)
2472 unsigned long node_num;
2474 err = strict_strtoul(buf, 10, &node_num);
2476 pr_err("bad strtoul?\n");
2479 if (node_num >= MANUAL_NODES) {
2480 pr_err("bad node_num=%lu?\n", node_num);
2483 if (ramster_nodes_manual_up[node_num]) {
2484 pr_err("node %d already up, ignoring\n", (int)node_num);
2486 ramster_nodes_manual_up[node_num] = true;
2487 o2net_hb_node_up_manual((int)node_num);
2492 static struct kobj_attribute ramster_manual_node_up_attr = {
2493 .attr = { .name = "manual_node_up", .mode = 0644 },
2494 .show = ramster_manual_node_up_show,
2495 .store = ramster_manual_node_up_store,
2498 static struct attribute *ramster_attrs[] = {
2499 &ramster_pers_remotify_enable_attr.attr,
2500 &ramster_eph_remotify_enable_attr.attr,
2501 &ramster_remote_pers_pages_attr.attr,
2502 &ramster_eph_pages_remoted_attr.attr,
2503 &ramster_eph_pages_remote_failed_attr.attr,
2504 &ramster_pers_pages_remoted_attr.attr,
2505 &ramster_pers_pages_remote_failed_attr.attr,
2506 &ramster_pers_pages_remote_nomem_attr.attr,
2507 &ramster_remote_pages_flushed_attr.attr,
2508 &ramster_remote_page_flushes_failed_attr.attr,
2509 &ramster_remote_objects_flushed_attr.attr,
2510 &ramster_remote_object_flushes_failed_attr.attr,
2511 &ramster_remote_eph_pages_succ_get_attr.attr,
2512 &ramster_remote_eph_pages_unsucc_get_attr.attr,
2513 &ramster_remote_pers_pages_succ_get_attr.attr,
2514 &ramster_remote_pers_pages_unsucc_get_attr.attr,
2515 &ramster_foreign_eph_pampd_count_attr.attr,
2516 &ramster_foreign_eph_pampd_count_max_attr.attr,
2517 &ramster_foreign_pers_pampd_count_attr.attr,
2518 &ramster_foreign_pers_pampd_count_max_attr.attr,
2519 &ramster_curr_flnode_count_attr.attr,
2520 &ramster_curr_flnode_count_max_attr.attr,
2521 &ramster_manual_node_up_attr.attr,
2525 static struct attribute_group ramster_attr_group = {
2526 .attrs = ramster_attrs,
2530 #endif /* CONFIG_SYSFS */
2532 * When zcache is disabled ("frozen"), pools can be created and destroyed,
2533 * but all puts (and thus all other operations that require memory allocation)
2534 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
2535 * data consistency requires all puts while frozen to be converted into
2538 static bool zcache_freeze;
2541 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
2543 static int shrink_zcache_memory(struct shrinker *shrink,
2544 struct shrink_control *sc)
2547 int nr = sc->nr_to_scan;
2548 gfp_t gfp_mask = sc->gfp_mask;
2551 if (!(gfp_mask & __GFP_FS))
2552 /* does this case really need to be skipped? */
2554 zbud_evict_pages(nr);
2556 ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
2561 static struct shrinker zcache_shrinker = {
2562 .shrink = shrink_zcache_memory,
2563 .seeks = DEFAULT_SEEKS,
2567 * zcache shims between cleancache/frontswap ops and tmem
2570 int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp,
2571 uint32_t index, char *data, size_t size,
2572 bool raw, int ephemeral)
2574 struct tmem_pool *pool;
2577 BUG_ON(!irqs_disabled());
2578 pool = zcache_get_pool_by_id(cli_id, pool_id);
2579 if (unlikely(pool == NULL))
2581 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
2582 /* preload does preempt_disable on success */
2583 ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral);
2585 if (is_ephemeral(pool))
2586 zcache_failed_eph_puts++;
2588 zcache_failed_pers_puts++;
2590 zcache_put_pool(pool);
2591 preempt_enable_no_resched();
2593 zcache_put_to_flush++;
2594 if (atomic_read(&pool->obj_count) > 0)
2595 /* the put fails whether the flush succeeds or not */
2596 (void)tmem_flush_page(pool, oidp, index);
2597 zcache_put_pool(pool);
2603 int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
2604 uint32_t index, char *data, size_t *sizep,
2605 bool raw, int get_and_free)
2607 struct tmem_pool *pool;
2612 BUG_ON(irqs_disabled());
2613 BUG_ON(in_softirq());
2615 pool = zcache_get_pool_by_id(cli_id, pool_id);
2616 eph = is_ephemeral(pool);
2617 if (likely(pool != NULL)) {
2618 if (atomic_read(&pool->obj_count) > 0)
2619 ret = tmem_get(pool, oidp, index, data, sizep,
2621 zcache_put_pool(pool);
2623 WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, "
2624 "bad things are very likely to happen soon\n");
2625 #ifdef RAMSTER_TESTING
2626 if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
2627 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
2630 BUG(); /* FIXME... don't need this anymore??? let's ensure */
2634 int zcache_flush(int cli_id, int pool_id,
2635 struct tmem_oid *oidp, uint32_t index)
2637 struct tmem_pool *pool;
2639 unsigned long flags;
2641 local_irq_save(flags);
2642 zcache_flush_total++;
2643 pool = zcache_get_pool_by_id(cli_id, pool_id);
2644 ramster_do_preload_flnode_only(pool);
2645 if (likely(pool != NULL)) {
2646 if (atomic_read(&pool->obj_count) > 0)
2647 ret = tmem_flush_page(pool, oidp, index);
2648 zcache_put_pool(pool);
2651 zcache_flush_found++;
2652 local_irq_restore(flags);
2656 int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
2658 struct tmem_pool *pool;
2660 unsigned long flags;
2662 local_irq_save(flags);
2663 zcache_flobj_total++;
2664 pool = zcache_get_pool_by_id(cli_id, pool_id);
2665 ramster_do_preload_flnode_only(pool);
2666 if (likely(pool != NULL)) {
2667 if (atomic_read(&pool->obj_count) > 0)
2668 ret = tmem_flush_object(pool, oidp);
2669 zcache_put_pool(pool);
2672 zcache_flobj_found++;
2673 local_irq_restore(flags);
2677 int zcache_client_destroy_pool(int cli_id, int pool_id)
2679 struct tmem_pool *pool = NULL;
2680 struct zcache_client *cli = NULL;
2685 if (cli_id == LOCAL_CLIENT)
2687 else if ((unsigned int)cli_id < MAX_CLIENTS)
2688 cli = &zcache_clients[cli_id];
2691 atomic_inc(&cli->refcount);
2692 pool = cli->tmem_pools[pool_id];
2695 cli->tmem_pools[pool_id] = NULL;
2696 /* wait for pool activity on other cpus to quiesce */
2697 while (atomic_read(&pool->refcount) != 0)
2699 atomic_dec(&cli->refcount);
2701 ret = tmem_destroy_pool(pool);
2704 pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id);
2709 static int zcache_destroy_pool(int pool_id)
2711 return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
2714 int zcache_new_pool(uint16_t cli_id, uint32_t flags)
2717 struct tmem_pool *pool;
2718 struct zcache_client *cli = NULL;
2720 if (cli_id == LOCAL_CLIENT)
2722 else if ((unsigned int)cli_id < MAX_CLIENTS)
2723 cli = &zcache_clients[cli_id];
2726 atomic_inc(&cli->refcount);
2727 pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
2729 pr_info("ramster: pool creation failed: out of memory\n");
2733 for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
2734 if (cli->tmem_pools[poolid] == NULL)
2736 if (poolid >= MAX_POOLS_PER_CLIENT) {
2737 pr_info("ramster: pool creation failed: max exceeded\n");
2742 atomic_set(&pool->refcount, 0);
2744 pool->pool_id = poolid;
2745 tmem_new_pool(pool, flags);
2746 cli->tmem_pools[poolid] = pool;
2747 pr_info("ramster: created %s tmem pool, id=%d, client=%d\n",
2748 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
2752 atomic_dec(&cli->refcount);
2756 static int zcache_local_new_pool(uint32_t flags)
2758 return zcache_new_pool(LOCAL_CLIENT, flags);
2761 int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
2763 struct tmem_pool *pool;
2764 struct zcache_client *cli = NULL;
2765 uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST;
2768 if (cli_id == LOCAL_CLIENT)
2770 if (pool_id >= MAX_POOLS_PER_CLIENT)
2772 else if ((unsigned int)cli_id < MAX_CLIENTS)
2773 cli = &zcache_clients[cli_id];
2774 if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap))
2775 BUG(); /* FIXME, handle more gracefully later */
2776 if (!cli->allocated) {
2777 if (zcache_new_client(cli_id))
2778 BUG(); /* FIXME, handle more gracefully later */
2779 cli = &zcache_clients[cli_id];
2781 atomic_inc(&cli->refcount);
2782 pool = cli->tmem_pools[pool_id];
2784 if (pool->persistent && ephemeral) {
2785 pr_err("zcache_autocreate_pool: type mismatch\n");
2791 pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
2793 pr_info("ramster: pool creation failed: out of memory\n");
2796 atomic_set(&pool->refcount, 0);
2798 pool->pool_id = pool_id;
2799 tmem_new_pool(pool, flags);
2800 cli->tmem_pools[pool_id] = pool;
2801 pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
2802 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
2807 BUG(); /* FIXME, handle more gracefully later */
2808 /* pr_err("zcache_autocreate_pool: failed\n"); */
2810 atomic_dec(&cli->refcount);
2815 * Two kernel functionalities currently can be layered on top of tmem.
2816 * These are "cleancache" which is used as a second-chance cache for clean
2817 * page cache pages; and "frontswap" which is used for swap pages
2818 * to avoid writes to disk. A generic "shim" is provided here for each
2819 * to translate in-kernel semantics to zcache semantics.
2822 #ifdef CONFIG_CLEANCACHE
2823 static void zcache_cleancache_put_page(int pool_id,
2824 struct cleancache_filekey key,
2825 pgoff_t index, struct page *page)
2827 u32 ind = (u32) index;
2828 struct tmem_oid oid = *(struct tmem_oid *)&key;
2830 #ifdef __PG_WAS_ACTIVE
2831 if (!PageWasActive(page)) {
2832 zcache_nonactive_puts++;
2836 if (likely(ind == index)) {
2837 char *kva = page_address(page);
2839 (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index,
2840 kva, PAGE_SIZE, 0, 1);
2844 static int zcache_cleancache_get_page(int pool_id,
2845 struct cleancache_filekey key,
2846 pgoff_t index, struct page *page)
2848 u32 ind = (u32) index;
2849 struct tmem_oid oid = *(struct tmem_oid *)&key;
2853 if (likely(ind == index)) {
2854 char *kva = page_address(page);
2855 size_t size = PAGE_SIZE;
2857 ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index,
2859 #ifdef __PG_WAS_ACTIVE
2861 SetPageWasActive(page);
2868 static void zcache_cleancache_flush_page(int pool_id,
2869 struct cleancache_filekey key,
2872 u32 ind = (u32) index;
2873 struct tmem_oid oid = *(struct tmem_oid *)&key;
2875 if (likely(ind == index))
2876 (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind);
2879 static void zcache_cleancache_flush_inode(int pool_id,
2880 struct cleancache_filekey key)
2882 struct tmem_oid oid = *(struct tmem_oid *)&key;
2884 (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
2887 static void zcache_cleancache_flush_fs(int pool_id)
2890 (void)zcache_destroy_pool(pool_id);
2893 static int zcache_cleancache_init_fs(size_t pagesize)
2895 BUG_ON(sizeof(struct cleancache_filekey) !=
2896 sizeof(struct tmem_oid));
2897 BUG_ON(pagesize != PAGE_SIZE);
2898 return zcache_local_new_pool(0);
2901 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
2903 /* shared pools are unsupported and map to private */
2904 BUG_ON(sizeof(struct cleancache_filekey) !=
2905 sizeof(struct tmem_oid));
2906 BUG_ON(pagesize != PAGE_SIZE);
2907 return zcache_local_new_pool(0);
2910 static struct cleancache_ops zcache_cleancache_ops = {
2911 .put_page = zcache_cleancache_put_page,
2912 .get_page = zcache_cleancache_get_page,
2913 .invalidate_page = zcache_cleancache_flush_page,
2914 .invalidate_inode = zcache_cleancache_flush_inode,
2915 .invalidate_fs = zcache_cleancache_flush_fs,
2916 .init_shared_fs = zcache_cleancache_init_shared_fs,
2917 .init_fs = zcache_cleancache_init_fs
2920 struct cleancache_ops zcache_cleancache_register_ops(void)
2922 struct cleancache_ops old_ops =
2923 cleancache_register_ops(&zcache_cleancache_ops);
2929 #ifdef CONFIG_FRONTSWAP
2930 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
2931 static int zcache_frontswap_poolid = -1;
2934 * Swizzling increases objects per swaptype, increasing tmem concurrency
2935 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
2938 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
2939 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
2940 #define iswiz(_ind) (_ind >> SWIZ_BITS)
2942 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
2944 struct tmem_oid oid = { .oid = { 0 } };
2945 oid.oid[0] = _oswiz(type, ind);
2949 static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
2952 u64 ind64 = (u64)offset;
2953 u32 ind = (u32)offset;
2954 struct tmem_oid oid = oswiz(type, ind);
2956 unsigned long flags;
2959 BUG_ON(!PageLocked(page));
2960 if (likely(ind64 == ind)) {
2961 local_irq_save(flags);
2962 kva = page_address(page);
2963 ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid,
2964 &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0);
2965 local_irq_restore(flags);
2970 /* returns 0 if the page was successfully gotten from frontswap, -1 if
2971 * was not present (should never happen!) */
2972 static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
2975 u64 ind64 = (u64)offset;
2976 u32 ind = (u32)offset;
2977 struct tmem_oid oid = oswiz(type, ind);
2980 preempt_disable(); /* FIXME, remove this? */
2981 BUG_ON(!PageLocked(page));
2982 if (likely(ind64 == ind)) {
2983 char *kva = page_address(page);
2984 size_t size = PAGE_SIZE;
2986 ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid,
2987 &oid, iswiz(ind), kva, &size, 0, -1);
2989 preempt_enable(); /* FIXME, remove this? */
2993 /* flush a single page from frontswap */
2994 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
2996 u64 ind64 = (u64)offset;
2997 u32 ind = (u32)offset;
2998 struct tmem_oid oid = oswiz(type, ind);
3000 if (likely(ind64 == ind))
3001 (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid,
3005 /* flush all pages from the passed swaptype */
3006 static void zcache_frontswap_flush_area(unsigned type)
3008 struct tmem_oid oid;
3011 for (ind = SWIZ_MASK; ind >= 0; ind--) {
3012 oid = oswiz(type, ind);
3013 (void)zcache_flush_object(LOCAL_CLIENT,
3014 zcache_frontswap_poolid, &oid);
3018 static void zcache_frontswap_init(unsigned ignored)
3020 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
3021 if (zcache_frontswap_poolid < 0)
3022 zcache_frontswap_poolid =
3023 zcache_local_new_pool(TMEM_POOL_PERSIST);
3026 static struct frontswap_ops zcache_frontswap_ops = {
3027 .put_page = zcache_frontswap_put_page,
3028 .get_page = zcache_frontswap_get_page,
3029 .invalidate_page = zcache_frontswap_flush_page,
3030 .invalidate_area = zcache_frontswap_flush_area,
3031 .init = zcache_frontswap_init
3034 struct frontswap_ops zcache_frontswap_register_ops(void)
3036 struct frontswap_ops old_ops =
3037 frontswap_register_ops(&zcache_frontswap_ops);
3044 * frontswap selfshrinking
3047 #ifdef CONFIG_FRONTSWAP
3048 /* In HZ, controls frequency of worker invocation. */
3049 static unsigned int selfshrink_interval __read_mostly = 5;
3051 static void selfshrink_process(struct work_struct *work);
3052 static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
3054 /* Enable/disable with sysfs. */
3055 static bool frontswap_selfshrinking __read_mostly;
3057 /* Enable/disable with kernel boot option. */
3058 static bool use_frontswap_selfshrink __initdata = true;
3061 * The default values for the following parameters were deemed reasonable
3062 * by experimentation, may be workload-dependent, and can all be
3063 * adjusted via sysfs.
3066 /* Control rate for frontswap shrinking. Higher hysteresis is slower. */
3067 static unsigned int frontswap_hysteresis __read_mostly = 20;
3070 * Number of selfshrink worker invocations to wait before observing that
3071 * frontswap selfshrinking should commence. Note that selfshrinking does
3072 * not use a separate worker thread.
3074 static unsigned int frontswap_inertia __read_mostly = 3;
3076 /* Countdown to next invocation of frontswap_shrink() */
3077 static unsigned long frontswap_inertia_counter;
3080 * Invoked by the selfshrink worker thread, uses current number of pages
3081 * in frontswap (frontswap_curr_pages()), previous status, and control
3082 * values (hysteresis and inertia) to determine if frontswap should be
3083 * shrunk and what the new frontswap size should be. Note that
3084 * frontswap_shrink is essentially a partial swapoff that immediately
3085 * transfers pages from the "swap device" (frontswap) back into kernel
3086 * RAM; despite the name, frontswap "shrinking" is very different from
3087 * the "shrinker" interface used by the kernel MM subsystem to reclaim
3090 static void frontswap_selfshrink(void)
3092 static unsigned long cur_frontswap_pages;
3093 static unsigned long last_frontswap_pages;
3094 static unsigned long tgt_frontswap_pages;
3096 last_frontswap_pages = cur_frontswap_pages;
3097 cur_frontswap_pages = frontswap_curr_pages();
3098 if (!cur_frontswap_pages ||
3099 (cur_frontswap_pages > last_frontswap_pages)) {
3100 frontswap_inertia_counter = frontswap_inertia;
3103 if (frontswap_inertia_counter && --frontswap_inertia_counter)
3105 if (cur_frontswap_pages <= frontswap_hysteresis)
3106 tgt_frontswap_pages = 0;
3108 tgt_frontswap_pages = cur_frontswap_pages -
3109 (cur_frontswap_pages / frontswap_hysteresis);
3110 frontswap_shrink(tgt_frontswap_pages);
3113 static int __init ramster_nofrontswap_selfshrink_setup(char *s)
3115 use_frontswap_selfshrink = false;
3119 __setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
3121 static void selfshrink_process(struct work_struct *work)
3123 if (frontswap_selfshrinking && frontswap_enabled) {
3124 frontswap_selfshrink();
3125 schedule_delayed_work(&selfshrink_worker,
3126 selfshrink_interval * HZ);
3130 static int ramster_enabled;
3132 static int __init ramster_selfshrink_init(void)
3134 frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink;
3135 if (frontswap_selfshrinking)
3136 pr_info("ramster: Initializing frontswap "
3137 "selfshrinking driver.\n");
3141 schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ);
3146 subsys_initcall(ramster_selfshrink_init);
3150 * zcache initialization
3151 * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
3155 static int ramster_enabled;
3157 static int __init enable_ramster(char *s)
3159 ramster_enabled = 1;
3162 __setup("ramster", enable_ramster);
3164 /* allow independent dynamic disabling of cleancache and frontswap */
3166 static int use_cleancache = 1;
3168 static int __init no_cleancache(char *s)
3170 pr_info("INIT no_cleancache called\n");
3176 * FIXME: need to guarantee this gets checked before zcache_init is called
3177 * What is the correct way to achieve this?
3179 early_param("nocleancache", no_cleancache);
3181 static int use_frontswap = 1;
3183 static int __init no_frontswap(char *s)
3185 pr_info("INIT no_frontswap called\n");
3190 __setup("nofrontswap", no_frontswap);
3192 static int __init zcache_init(void)
3197 ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
3198 ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
3200 pr_err("ramster: can't create sysfs\n");
3203 #endif /* CONFIG_SYSFS */
3204 #if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
3205 if (ramster_enabled) {
3208 (void)ramster_o2net_register_handlers();
3209 tmem_register_hostops(&zcache_hostops);
3210 tmem_register_pamops(&zcache_pamops);
3211 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
3213 pr_err("ramster: can't register cpu notifier\n");
3216 for_each_online_cpu(cpu) {
3217 void *pcpu = (void *)(long)cpu;
3218 zcache_cpu_notifier(&zcache_cpu_notifier_block,
3219 CPU_UP_PREPARE, pcpu);
3222 zcache_objnode_cache = kmem_cache_create("zcache_objnode",
3223 sizeof(struct tmem_objnode), 0, 0, NULL);
3224 zcache_obj_cache = kmem_cache_create("zcache_obj",
3225 sizeof(struct tmem_obj), 0, 0, NULL);
3226 ramster_flnode_cache = kmem_cache_create("ramster_flnode",
3227 sizeof(struct flushlist_node), 0, 0, NULL);
3229 #ifdef CONFIG_CLEANCACHE
3230 pr_info("INIT ramster_enabled=%d use_cleancache=%d\n",
3231 ramster_enabled, use_cleancache);
3232 if (ramster_enabled && use_cleancache) {
3233 struct cleancache_ops old_ops;
3236 register_shrinker(&zcache_shrinker);
3237 old_ops = zcache_cleancache_register_ops();
3238 pr_info("ramster: cleancache enabled using kernel "
3239 "transcendent memory and compression buddies\n");
3240 if (old_ops.init_fs != NULL)
3241 pr_warning("ramster: cleancache_ops overridden");
3244 #ifdef CONFIG_FRONTSWAP
3245 pr_info("INIT ramster_enabled=%d use_frontswap=%d\n",
3246 ramster_enabled, use_frontswap);
3247 if (ramster_enabled && use_frontswap) {
3248 struct frontswap_ops old_ops;
3250 zcache_new_client(LOCAL_CLIENT);
3251 old_ops = zcache_frontswap_register_ops();
3252 pr_info("ramster: frontswap enabled using kernel "
3253 "transcendent memory and xvmalloc\n");
3254 if (old_ops.init != NULL)
3255 pr_warning("ramster: frontswap_ops overridden");
3257 if (ramster_enabled && (use_frontswap || use_cleancache))
3258 ramster_remotify_init();
3264 module_init(zcache_init)