Merge remote-tracking branch 'asoc/fix/compress' into asoc-linus
[firefly-linux-kernel-4.4.55.git] / drivers / xen / tmem.c
1 /*
2  * Xen implementation for transcendent memory (tmem)
3  *
4  * Copyright (C) 2009-2011 Oracle Corp.  All rights reserved.
5  * Author: Dan Magenheimer
6  */
7
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/cleancache.h>
14 #include <linux/frontswap.h>
15
16 #include <xen/xen.h>
17 #include <xen/interface/xen.h>
18 #include <asm/xen/hypercall.h>
19 #include <asm/xen/page.h>
20 #include <asm/xen/hypervisor.h>
21 #include <xen/tmem.h>
22
23 #ifndef CONFIG_XEN_TMEM_MODULE
24 bool __read_mostly tmem_enabled = false;
25
26 static int __init enable_tmem(char *s)
27 {
28         tmem_enabled = true;
29         return 1;
30 }
31 __setup("tmem", enable_tmem);
32 #endif
33
34 #ifdef CONFIG_CLEANCACHE
35 static bool cleancache __read_mostly = true;
36 module_param(cleancache, bool, S_IRUGO);
37 static bool selfballooning __read_mostly = true;
38 module_param(selfballooning, bool, S_IRUGO);
39 #endif /* CONFIG_CLEANCACHE */
40
41 #ifdef CONFIG_FRONTSWAP
42 static bool frontswap __read_mostly = true;
43 module_param(frontswap, bool, S_IRUGO);
44 #endif /* CONFIG_FRONTSWAP */
45
46 #ifdef CONFIG_XEN_SELFBALLOONING
47 static bool selfshrinking __read_mostly = true;
48 module_param(selfshrinking, bool, S_IRUGO);
49 #endif /* CONFIG_XEN_SELFBALLOONING */
50
51 #define TMEM_CONTROL               0
52 #define TMEM_NEW_POOL              1
53 #define TMEM_DESTROY_POOL          2
54 #define TMEM_NEW_PAGE              3
55 #define TMEM_PUT_PAGE              4
56 #define TMEM_GET_PAGE              5
57 #define TMEM_FLUSH_PAGE            6
58 #define TMEM_FLUSH_OBJECT          7
59 #define TMEM_READ                  8
60 #define TMEM_WRITE                 9
61 #define TMEM_XCHG                 10
62
63 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
64 #define TMEM_POOL_PERSIST          1
65 #define TMEM_POOL_SHARED           2
66 #define TMEM_POOL_PAGESIZE_SHIFT   4
67 #define TMEM_VERSION_SHIFT        24
68
69
70 struct tmem_pool_uuid {
71         u64 uuid_lo;
72         u64 uuid_hi;
73 };
74
75 struct tmem_oid {
76         u64 oid[3];
77 };
78
79 #define TMEM_POOL_PRIVATE_UUID  { 0, 0 }
80
81 /* flags for tmem_ops.new_pool */
82 #define TMEM_POOL_PERSIST          1
83 #define TMEM_POOL_SHARED           2
84
85 /* xen tmem foundation ops/hypercalls */
86
87 static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
88         u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
89 {
90         struct tmem_op op;
91         int rc = 0;
92
93         op.cmd = tmem_cmd;
94         op.pool_id = tmem_pool;
95         op.u.gen.oid[0] = oid.oid[0];
96         op.u.gen.oid[1] = oid.oid[1];
97         op.u.gen.oid[2] = oid.oid[2];
98         op.u.gen.index = index;
99         op.u.gen.tmem_offset = tmem_offset;
100         op.u.gen.pfn_offset = pfn_offset;
101         op.u.gen.len = len;
102         set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
103         rc = HYPERVISOR_tmem_op(&op);
104         return rc;
105 }
106
107 static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
108                                 u32 flags, unsigned long pagesize)
109 {
110         struct tmem_op op;
111         int rc = 0, pageshift;
112
113         for (pageshift = 0; pagesize != 1; pageshift++)
114                 pagesize >>= 1;
115         flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
116         flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
117         op.cmd = TMEM_NEW_POOL;
118         op.u.new.uuid[0] = uuid.uuid_lo;
119         op.u.new.uuid[1] = uuid.uuid_hi;
120         op.u.new.flags = flags;
121         rc = HYPERVISOR_tmem_op(&op);
122         return rc;
123 }
124
125 /* xen generic tmem ops */
126
127 static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
128                              u32 index, unsigned long pfn)
129 {
130         unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
131
132         return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
133                 gmfn, 0, 0, 0);
134 }
135
136 static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
137                              u32 index, unsigned long pfn)
138 {
139         unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
140
141         return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
142                 gmfn, 0, 0, 0);
143 }
144
145 static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
146 {
147         return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
148                 0, 0, 0, 0);
149 }
150
151 static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
152 {
153         return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
154 }
155
156
157 #ifdef CONFIG_CLEANCACHE
158 static int xen_tmem_destroy_pool(u32 pool_id)
159 {
160         struct tmem_oid oid = { { 0 } };
161
162         return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
163 }
164
165 /* cleancache ops */
166
167 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
168                                      pgoff_t index, struct page *page)
169 {
170         u32 ind = (u32) index;
171         struct tmem_oid oid = *(struct tmem_oid *)&key;
172         unsigned long pfn = page_to_pfn(page);
173
174         if (pool < 0)
175                 return;
176         if (ind != index)
177                 return;
178         mb(); /* ensure page is quiescent; tmem may address it with an alias */
179         (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
180 }
181
182 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
183                                     pgoff_t index, struct page *page)
184 {
185         u32 ind = (u32) index;
186         struct tmem_oid oid = *(struct tmem_oid *)&key;
187         unsigned long pfn = page_to_pfn(page);
188         int ret;
189
190         /* translate return values to linux semantics */
191         if (pool < 0)
192                 return -1;
193         if (ind != index)
194                 return -1;
195         ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
196         if (ret == 1)
197                 return 0;
198         else
199                 return -1;
200 }
201
202 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
203                                        pgoff_t index)
204 {
205         u32 ind = (u32) index;
206         struct tmem_oid oid = *(struct tmem_oid *)&key;
207
208         if (pool < 0)
209                 return;
210         if (ind != index)
211                 return;
212         (void)xen_tmem_flush_page((u32)pool, oid, ind);
213 }
214
215 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
216 {
217         struct tmem_oid oid = *(struct tmem_oid *)&key;
218
219         if (pool < 0)
220                 return;
221         (void)xen_tmem_flush_object((u32)pool, oid);
222 }
223
224 static void tmem_cleancache_flush_fs(int pool)
225 {
226         if (pool < 0)
227                 return;
228         (void)xen_tmem_destroy_pool((u32)pool);
229 }
230
231 static int tmem_cleancache_init_fs(size_t pagesize)
232 {
233         struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
234
235         return xen_tmem_new_pool(uuid_private, 0, pagesize);
236 }
237
238 static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
239 {
240         struct tmem_pool_uuid shared_uuid;
241
242         shared_uuid.uuid_lo = *(u64 *)uuid;
243         shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
244         return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
245 }
246
247 static struct cleancache_ops tmem_cleancache_ops = {
248         .put_page = tmem_cleancache_put_page,
249         .get_page = tmem_cleancache_get_page,
250         .invalidate_page = tmem_cleancache_flush_page,
251         .invalidate_inode = tmem_cleancache_flush_inode,
252         .invalidate_fs = tmem_cleancache_flush_fs,
253         .init_shared_fs = tmem_cleancache_init_shared_fs,
254         .init_fs = tmem_cleancache_init_fs
255 };
256 #endif
257
258 #ifdef CONFIG_FRONTSWAP
259 /* frontswap tmem operations */
260
261 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
262 static int tmem_frontswap_poolid;
263
264 /*
265  * Swizzling increases objects per swaptype, increasing tmem concurrency
266  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
267  */
268 #define SWIZ_BITS               4
269 #define SWIZ_MASK               ((1 << SWIZ_BITS) - 1)
270 #define _oswiz(_type, _ind)     ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
271 #define iswiz(_ind)             (_ind >> SWIZ_BITS)
272
273 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
274 {
275         struct tmem_oid oid = { .oid = { 0 } };
276         oid.oid[0] = _oswiz(type, ind);
277         return oid;
278 }
279
280 /* returns 0 if the page was successfully put into frontswap, -1 if not */
281 static int tmem_frontswap_store(unsigned type, pgoff_t offset,
282                                    struct page *page)
283 {
284         u64 ind64 = (u64)offset;
285         u32 ind = (u32)offset;
286         unsigned long pfn = page_to_pfn(page);
287         int pool = tmem_frontswap_poolid;
288         int ret;
289
290         if (pool < 0)
291                 return -1;
292         if (ind64 != ind)
293                 return -1;
294         mb(); /* ensure page is quiescent; tmem may address it with an alias */
295         ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
296         /* translate Xen tmem return values to linux semantics */
297         if (ret == 1)
298                 return 0;
299         else
300                 return -1;
301 }
302
303 /*
304  * returns 0 if the page was successfully gotten from frontswap, -1 if
305  * was not present (should never happen!)
306  */
307 static int tmem_frontswap_load(unsigned type, pgoff_t offset,
308                                    struct page *page)
309 {
310         u64 ind64 = (u64)offset;
311         u32 ind = (u32)offset;
312         unsigned long pfn = page_to_pfn(page);
313         int pool = tmem_frontswap_poolid;
314         int ret;
315
316         if (pool < 0)
317                 return -1;
318         if (ind64 != ind)
319                 return -1;
320         ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
321         /* translate Xen tmem return values to linux semantics */
322         if (ret == 1)
323                 return 0;
324         else
325                 return -1;
326 }
327
328 /* flush a single page from frontswap */
329 static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
330 {
331         u64 ind64 = (u64)offset;
332         u32 ind = (u32)offset;
333         int pool = tmem_frontswap_poolid;
334
335         if (pool < 0)
336                 return;
337         if (ind64 != ind)
338                 return;
339         (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
340 }
341
342 /* flush all pages from the passed swaptype */
343 static void tmem_frontswap_flush_area(unsigned type)
344 {
345         int pool = tmem_frontswap_poolid;
346         int ind;
347
348         if (pool < 0)
349                 return;
350         for (ind = SWIZ_MASK; ind >= 0; ind--)
351                 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
352 }
353
354 static void tmem_frontswap_init(unsigned ignored)
355 {
356         struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
357
358         /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
359         if (tmem_frontswap_poolid < 0)
360                 tmem_frontswap_poolid =
361                     xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
362 }
363
364 static struct frontswap_ops tmem_frontswap_ops = {
365         .store = tmem_frontswap_store,
366         .load = tmem_frontswap_load,
367         .invalidate_page = tmem_frontswap_flush_page,
368         .invalidate_area = tmem_frontswap_flush_area,
369         .init = tmem_frontswap_init
370 };
371 #endif
372
373 static int xen_tmem_init(void)
374 {
375         if (!xen_domain())
376                 return 0;
377 #ifdef CONFIG_FRONTSWAP
378         if (tmem_enabled && frontswap) {
379                 char *s = "";
380                 struct frontswap_ops *old_ops =
381                         frontswap_register_ops(&tmem_frontswap_ops);
382
383                 tmem_frontswap_poolid = -1;
384                 if (IS_ERR(old_ops) || old_ops) {
385                         if (IS_ERR(old_ops))
386                                 return PTR_ERR(old_ops);
387                         s = " (WARNING: frontswap_ops overridden)";
388                 }
389                 printk(KERN_INFO "frontswap enabled, RAM provided by "
390                                  "Xen Transcendent Memory%s\n", s);
391         }
392 #endif
393 #ifdef CONFIG_CLEANCACHE
394         BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
395         if (tmem_enabled && cleancache) {
396                 char *s = "";
397                 struct cleancache_ops *old_ops =
398                         cleancache_register_ops(&tmem_cleancache_ops);
399                 if (old_ops)
400                         s = " (WARNING: cleancache_ops overridden)";
401                 printk(KERN_INFO "cleancache enabled, RAM provided by "
402                                  "Xen Transcendent Memory%s\n", s);
403         }
404 #endif
405 #ifdef CONFIG_XEN_SELFBALLOONING
406         /*
407          * There is no point of driving pages to the swap system if they
408          * aren't going anywhere in tmem universe.
409          */
410         if (!frontswap) {
411                 selfshrinking = false;
412                 selfballooning = false;
413         }
414         xen_selfballoon_init(selfballooning, selfshrinking);
415 #endif
416         return 0;
417 }
418
419 module_init(xen_tmem_init)
420 MODULE_LICENSE("GPL");
421 MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
422 MODULE_DESCRIPTION("Shim to Xen transcendent memory");