blob: e3600be4e7fabe54660d5adb1d2f7446b482ff00 [file] [log] [blame]
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06001/*
2 * Xen implementation for transcendent memory (tmem)
3 *
Dan Magenheimerafec6e02011-06-17 15:06:20 -06004 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06005 * Author: Dan Magenheimer
6 */
7
Dan Magenheimer10a7a0772013-04-30 15:27:00 -07008#include <linux/module.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06009#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/init.h>
12#include <linux/pagemap.h>
13#include <linux/cleancache.h>
14
Dan Magenheimerafec6e02011-06-17 15:06:20 -060015/* temporary ifdef until include/linux/frontswap.h is upstream */
16#ifdef CONFIG_FRONTSWAP
17#include <linux/frontswap.h>
18#endif
19
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060020#include <xen/xen.h>
21#include <xen/interface/xen.h>
22#include <asm/xen/hypercall.h>
23#include <asm/xen/page.h>
24#include <asm/xen/hypervisor.h>
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040025#include <xen/tmem.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060026
27#define TMEM_CONTROL 0
28#define TMEM_NEW_POOL 1
29#define TMEM_DESTROY_POOL 2
30#define TMEM_NEW_PAGE 3
31#define TMEM_PUT_PAGE 4
32#define TMEM_GET_PAGE 5
33#define TMEM_FLUSH_PAGE 6
34#define TMEM_FLUSH_OBJECT 7
35#define TMEM_READ 8
36#define TMEM_WRITE 9
37#define TMEM_XCHG 10
38
39/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
40#define TMEM_POOL_PERSIST 1
41#define TMEM_POOL_SHARED 2
42#define TMEM_POOL_PAGESIZE_SHIFT 4
43#define TMEM_VERSION_SHIFT 24
44
45
46struct tmem_pool_uuid {
47 u64 uuid_lo;
48 u64 uuid_hi;
49};
50
51struct tmem_oid {
52 u64 oid[3];
53};
54
55#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
56
57/* flags for tmem_ops.new_pool */
58#define TMEM_POOL_PERSIST 1
59#define TMEM_POOL_SHARED 2
60
61/* xen tmem foundation ops/hypercalls */
62
63static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
64 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
65{
66 struct tmem_op op;
67 int rc = 0;
68
69 op.cmd = tmem_cmd;
70 op.pool_id = tmem_pool;
71 op.u.gen.oid[0] = oid.oid[0];
72 op.u.gen.oid[1] = oid.oid[1];
73 op.u.gen.oid[2] = oid.oid[2];
74 op.u.gen.index = index;
75 op.u.gen.tmem_offset = tmem_offset;
76 op.u.gen.pfn_offset = pfn_offset;
77 op.u.gen.len = len;
78 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
79 rc = HYPERVISOR_tmem_op(&op);
80 return rc;
81}
82
83static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
84 u32 flags, unsigned long pagesize)
85{
86 struct tmem_op op;
87 int rc = 0, pageshift;
88
89 for (pageshift = 0; pagesize != 1; pageshift++)
90 pagesize >>= 1;
91 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
92 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
93 op.cmd = TMEM_NEW_POOL;
94 op.u.new.uuid[0] = uuid.uuid_lo;
95 op.u.new.uuid[1] = uuid.uuid_hi;
96 op.u.new.flags = flags;
97 rc = HYPERVISOR_tmem_op(&op);
98 return rc;
99}
100
101/* xen generic tmem ops */
102
103static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
104 u32 index, unsigned long pfn)
105{
106 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
107
108 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
109 gmfn, 0, 0, 0);
110}
111
112static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
113 u32 index, unsigned long pfn)
114{
115 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
116
117 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
118 gmfn, 0, 0, 0);
119}
120
121static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
122{
123 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
124 0, 0, 0, 0);
125}
126
127static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
128{
129 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
130}
131
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700132#ifndef CONFIG_XEN_TMEM_MODULE
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000133bool __read_mostly tmem_enabled = false;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600134
135static int __init enable_tmem(char *s)
136{
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000137 tmem_enabled = true;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600138 return 1;
139}
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600140__setup("tmem", enable_tmem);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700141#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600142
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600143#ifdef CONFIG_CLEANCACHE
144static int xen_tmem_destroy_pool(u32 pool_id)
145{
146 struct tmem_oid oid = { { 0 } };
147
148 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
149}
150
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600151/* cleancache ops */
152
153static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
154 pgoff_t index, struct page *page)
155{
156 u32 ind = (u32) index;
157 struct tmem_oid oid = *(struct tmem_oid *)&key;
158 unsigned long pfn = page_to_pfn(page);
159
160 if (pool < 0)
161 return;
162 if (ind != index)
163 return;
164 mb(); /* ensure page is quiescent; tmem may address it with an alias */
165 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
166}
167
168static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
169 pgoff_t index, struct page *page)
170{
171 u32 ind = (u32) index;
172 struct tmem_oid oid = *(struct tmem_oid *)&key;
173 unsigned long pfn = page_to_pfn(page);
174 int ret;
175
176 /* translate return values to linux semantics */
177 if (pool < 0)
178 return -1;
179 if (ind != index)
180 return -1;
181 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
182 if (ret == 1)
183 return 0;
184 else
185 return -1;
186}
187
188static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
189 pgoff_t index)
190{
191 u32 ind = (u32) index;
192 struct tmem_oid oid = *(struct tmem_oid *)&key;
193
194 if (pool < 0)
195 return;
196 if (ind != index)
197 return;
198 (void)xen_tmem_flush_page((u32)pool, oid, ind);
199}
200
201static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
202{
203 struct tmem_oid oid = *(struct tmem_oid *)&key;
204
205 if (pool < 0)
206 return;
207 (void)xen_tmem_flush_object((u32)pool, oid);
208}
209
210static void tmem_cleancache_flush_fs(int pool)
211{
212 if (pool < 0)
213 return;
214 (void)xen_tmem_destroy_pool((u32)pool);
215}
216
217static int tmem_cleancache_init_fs(size_t pagesize)
218{
219 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
220
221 return xen_tmem_new_pool(uuid_private, 0, pagesize);
222}
223
224static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
225{
226 struct tmem_pool_uuid shared_uuid;
227
228 shared_uuid.uuid_lo = *(u64 *)uuid;
229 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
230 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
231}
232
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700233static bool disable_cleancache __read_mostly;
234static bool disable_selfballooning __read_mostly;
235#ifdef CONFIG_XEN_TMEM_MODULE
236module_param(disable_cleancache, bool, S_IRUGO);
237module_param(disable_selfballooning, bool, S_IRUGO);
238#else
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600239static int __init no_cleancache(char *s)
240{
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700241 disable_cleancache = true;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600242 return 1;
243}
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600244__setup("nocleancache", no_cleancache);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700245#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600246
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700247static struct cleancache_ops tmem_cleancache_ops = {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600248 .put_page = tmem_cleancache_put_page,
249 .get_page = tmem_cleancache_get_page,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500250 .invalidate_page = tmem_cleancache_flush_page,
251 .invalidate_inode = tmem_cleancache_flush_inode,
252 .invalidate_fs = tmem_cleancache_flush_fs,
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600253 .init_shared_fs = tmem_cleancache_init_shared_fs,
254 .init_fs = tmem_cleancache_init_fs
255};
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600256#endif
257
258#ifdef CONFIG_FRONTSWAP
259/* frontswap tmem operations */
260
261/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
262static int tmem_frontswap_poolid;
263
264/*
265 * Swizzling increases objects per swaptype, increasing tmem concurrency
266 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
267 */
268#define SWIZ_BITS 4
269#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
270#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
271#define iswiz(_ind) (_ind >> SWIZ_BITS)
272
273static inline struct tmem_oid oswiz(unsigned type, u32 ind)
274{
275 struct tmem_oid oid = { .oid = { 0 } };
276 oid.oid[0] = _oswiz(type, ind);
277 return oid;
278}
279
280/* returns 0 if the page was successfully put into frontswap, -1 if not */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400281static int tmem_frontswap_store(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600282 struct page *page)
283{
284 u64 ind64 = (u64)offset;
285 u32 ind = (u32)offset;
286 unsigned long pfn = page_to_pfn(page);
287 int pool = tmem_frontswap_poolid;
288 int ret;
289
290 if (pool < 0)
291 return -1;
292 if (ind64 != ind)
293 return -1;
294 mb(); /* ensure page is quiescent; tmem may address it with an alias */
295 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
296 /* translate Xen tmem return values to linux semantics */
297 if (ret == 1)
298 return 0;
299 else
300 return -1;
301}
302
303/*
304 * returns 0 if the page was successfully gotten from frontswap, -1 if
305 * was not present (should never happen!)
306 */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400307static int tmem_frontswap_load(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600308 struct page *page)
309{
310 u64 ind64 = (u64)offset;
311 u32 ind = (u32)offset;
312 unsigned long pfn = page_to_pfn(page);
313 int pool = tmem_frontswap_poolid;
314 int ret;
315
316 if (pool < 0)
317 return -1;
318 if (ind64 != ind)
319 return -1;
320 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
321 /* translate Xen tmem return values to linux semantics */
322 if (ret == 1)
323 return 0;
324 else
325 return -1;
326}
327
328/* flush a single page from frontswap */
329static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
330{
331 u64 ind64 = (u64)offset;
332 u32 ind = (u32)offset;
333 int pool = tmem_frontswap_poolid;
334
335 if (pool < 0)
336 return;
337 if (ind64 != ind)
338 return;
339 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
340}
341
342/* flush all pages from the passed swaptype */
343static void tmem_frontswap_flush_area(unsigned type)
344{
345 int pool = tmem_frontswap_poolid;
346 int ind;
347
348 if (pool < 0)
349 return;
350 for (ind = SWIZ_MASK; ind >= 0; ind--)
351 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
352}
353
354static void tmem_frontswap_init(unsigned ignored)
355{
356 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
357
358 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
359 if (tmem_frontswap_poolid < 0)
360 tmem_frontswap_poolid =
361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
362}
363
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700364static bool disable_frontswap __read_mostly;
365static bool disable_frontswap_selfshrinking __read_mostly;
366#ifdef CONFIG_XEN_TMEM_MODULE
367module_param(disable_frontswap, bool, S_IRUGO);
368module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
369#else
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600370static int __init no_frontswap(char *s)
371{
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700372 disable_frontswap = true;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600373 return 1;
374}
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600375__setup("nofrontswap", no_frontswap);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700376#endif
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600377
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700378static struct frontswap_ops tmem_frontswap_ops = {
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400379 .store = tmem_frontswap_store,
380 .load = tmem_frontswap_load,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500381 .invalidate_page = tmem_frontswap_flush_page,
382 .invalidate_area = tmem_frontswap_flush_area,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600383 .init = tmem_frontswap_init
384};
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700385#else /* CONFIG_FRONTSWAP */
386#define disable_frontswap_selfshrinking 1
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600387#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600388
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700389static int xen_tmem_init(void)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600390{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600391 if (!xen_domain())
392 return 0;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600393#ifdef CONFIG_FRONTSWAP
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700394 if (tmem_enabled && !disable_frontswap) {
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600395 char *s = "";
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700396 struct frontswap_ops *old_ops =
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600397 frontswap_register_ops(&tmem_frontswap_ops);
398
399 tmem_frontswap_poolid = -1;
Konrad Rzeszutek Wilkf42158f2013-04-30 15:27:01 -0700400 if (IS_ERR(old_ops) || old_ops) {
401 if (IS_ERR(old_ops))
402 return PTR_ERR(old_ops);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600403 s = " (WARNING: frontswap_ops overridden)";
Konrad Rzeszutek Wilkf42158f2013-04-30 15:27:01 -0700404 }
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600405 printk(KERN_INFO "frontswap enabled, RAM provided by "
Konrad Rzeszutek Wilk22230c12013-02-01 14:10:44 -0500406 "Xen Transcendent Memory%s\n", s);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600407 }
408#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600409#ifdef CONFIG_CLEANCACHE
410 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700411 if (tmem_enabled && !disable_cleancache) {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600412 char *s = "";
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700413 struct cleancache_ops *old_ops =
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600414 cleancache_register_ops(&tmem_cleancache_ops);
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700415 if (old_ops)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600416 s = " (WARNING: cleancache_ops overridden)";
417 printk(KERN_INFO "cleancache enabled, RAM provided by "
418 "Xen Transcendent Memory%s\n", s);
419 }
420#endif
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700421#ifdef CONFIG_XEN_SELFBALLOONING
422 xen_selfballoon_init(!disable_selfballooning,
423 !disable_frontswap_selfshrinking);
424#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600425 return 0;
426}
427
428module_init(xen_tmem_init)
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700429MODULE_LICENSE("GPL");
430MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
431MODULE_DESCRIPTION("Shim to Xen transcendent memory");