blob: 3ee836d425819df808fb905483b8e6993de98fcd [file] [log] [blame]
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06001/*
2 * Xen implementation for transcendent memory (tmem)
3 *
Dan Magenheimerafec6e02011-06-17 15:06:20 -06004 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06005 * Author: Dan Magenheimer
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
11#include <linux/pagemap.h>
12#include <linux/cleancache.h>
13
Dan Magenheimerafec6e02011-06-17 15:06:20 -060014/* temporary ifdef until include/linux/frontswap.h is upstream */
15#ifdef CONFIG_FRONTSWAP
16#include <linux/frontswap.h>
17#endif
18
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060019#include <xen/xen.h>
20#include <xen/interface/xen.h>
21#include <asm/xen/hypercall.h>
22#include <asm/xen/page.h>
23#include <asm/xen/hypervisor.h>
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040024#include <xen/tmem.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060025
26#define TMEM_CONTROL 0
27#define TMEM_NEW_POOL 1
28#define TMEM_DESTROY_POOL 2
29#define TMEM_NEW_PAGE 3
30#define TMEM_PUT_PAGE 4
31#define TMEM_GET_PAGE 5
32#define TMEM_FLUSH_PAGE 6
33#define TMEM_FLUSH_OBJECT 7
34#define TMEM_READ 8
35#define TMEM_WRITE 9
36#define TMEM_XCHG 10
37
38/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
39#define TMEM_POOL_PERSIST 1
40#define TMEM_POOL_SHARED 2
41#define TMEM_POOL_PAGESIZE_SHIFT 4
42#define TMEM_VERSION_SHIFT 24
43
44
45struct tmem_pool_uuid {
46 u64 uuid_lo;
47 u64 uuid_hi;
48};
49
50struct tmem_oid {
51 u64 oid[3];
52};
53
54#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
55
56/* flags for tmem_ops.new_pool */
57#define TMEM_POOL_PERSIST 1
58#define TMEM_POOL_SHARED 2
59
60/* xen tmem foundation ops/hypercalls */
61
62static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
63 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
64{
65 struct tmem_op op;
66 int rc = 0;
67
68 op.cmd = tmem_cmd;
69 op.pool_id = tmem_pool;
70 op.u.gen.oid[0] = oid.oid[0];
71 op.u.gen.oid[1] = oid.oid[1];
72 op.u.gen.oid[2] = oid.oid[2];
73 op.u.gen.index = index;
74 op.u.gen.tmem_offset = tmem_offset;
75 op.u.gen.pfn_offset = pfn_offset;
76 op.u.gen.len = len;
77 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
78 rc = HYPERVISOR_tmem_op(&op);
79 return rc;
80}
81
82static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
83 u32 flags, unsigned long pagesize)
84{
85 struct tmem_op op;
86 int rc = 0, pageshift;
87
88 for (pageshift = 0; pagesize != 1; pageshift++)
89 pagesize >>= 1;
90 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
91 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
92 op.cmd = TMEM_NEW_POOL;
93 op.u.new.uuid[0] = uuid.uuid_lo;
94 op.u.new.uuid[1] = uuid.uuid_hi;
95 op.u.new.flags = flags;
96 rc = HYPERVISOR_tmem_op(&op);
97 return rc;
98}
99
100/* xen generic tmem ops */
101
102static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
103 u32 index, unsigned long pfn)
104{
105 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
106
107 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
108 gmfn, 0, 0, 0);
109}
110
111static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
112 u32 index, unsigned long pfn)
113{
114 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
115
116 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
117 gmfn, 0, 0, 0);
118}
119
120static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
121{
122 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
123 0, 0, 0, 0);
124}
125
126static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
127{
128 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
129}
130
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000131bool __read_mostly tmem_enabled = false;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600132
133static int __init enable_tmem(char *s)
134{
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000135 tmem_enabled = true;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600136 return 1;
137}
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600138__setup("tmem", enable_tmem);
139
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600140#ifdef CONFIG_CLEANCACHE
141static int xen_tmem_destroy_pool(u32 pool_id)
142{
143 struct tmem_oid oid = { { 0 } };
144
145 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
146}
147
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600148/* cleancache ops */
149
150static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
151 pgoff_t index, struct page *page)
152{
153 u32 ind = (u32) index;
154 struct tmem_oid oid = *(struct tmem_oid *)&key;
155 unsigned long pfn = page_to_pfn(page);
156
157 if (pool < 0)
158 return;
159 if (ind != index)
160 return;
161 mb(); /* ensure page is quiescent; tmem may address it with an alias */
162 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
163}
164
165static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
166 pgoff_t index, struct page *page)
167{
168 u32 ind = (u32) index;
169 struct tmem_oid oid = *(struct tmem_oid *)&key;
170 unsigned long pfn = page_to_pfn(page);
171 int ret;
172
173 /* translate return values to linux semantics */
174 if (pool < 0)
175 return -1;
176 if (ind != index)
177 return -1;
178 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
179 if (ret == 1)
180 return 0;
181 else
182 return -1;
183}
184
185static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
186 pgoff_t index)
187{
188 u32 ind = (u32) index;
189 struct tmem_oid oid = *(struct tmem_oid *)&key;
190
191 if (pool < 0)
192 return;
193 if (ind != index)
194 return;
195 (void)xen_tmem_flush_page((u32)pool, oid, ind);
196}
197
198static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
199{
200 struct tmem_oid oid = *(struct tmem_oid *)&key;
201
202 if (pool < 0)
203 return;
204 (void)xen_tmem_flush_object((u32)pool, oid);
205}
206
207static void tmem_cleancache_flush_fs(int pool)
208{
209 if (pool < 0)
210 return;
211 (void)xen_tmem_destroy_pool((u32)pool);
212}
213
214static int tmem_cleancache_init_fs(size_t pagesize)
215{
216 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
217
218 return xen_tmem_new_pool(uuid_private, 0, pagesize);
219}
220
221static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
222{
223 struct tmem_pool_uuid shared_uuid;
224
225 shared_uuid.uuid_lo = *(u64 *)uuid;
226 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
227 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
228}
229
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000230static bool __initdata use_cleancache = true;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600231
232static int __init no_cleancache(char *s)
233{
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000234 use_cleancache = false;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600235 return 1;
236}
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600237__setup("nocleancache", no_cleancache);
238
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000239static struct cleancache_ops __initdata tmem_cleancache_ops = {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600240 .put_page = tmem_cleancache_put_page,
241 .get_page = tmem_cleancache_get_page,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500242 .invalidate_page = tmem_cleancache_flush_page,
243 .invalidate_inode = tmem_cleancache_flush_inode,
244 .invalidate_fs = tmem_cleancache_flush_fs,
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600245 .init_shared_fs = tmem_cleancache_init_shared_fs,
246 .init_fs = tmem_cleancache_init_fs
247};
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600248#endif
249
250#ifdef CONFIG_FRONTSWAP
251/* frontswap tmem operations */
252
253/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
254static int tmem_frontswap_poolid;
255
256/*
257 * Swizzling increases objects per swaptype, increasing tmem concurrency
258 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
259 */
260#define SWIZ_BITS 4
261#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
262#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
263#define iswiz(_ind) (_ind >> SWIZ_BITS)
264
265static inline struct tmem_oid oswiz(unsigned type, u32 ind)
266{
267 struct tmem_oid oid = { .oid = { 0 } };
268 oid.oid[0] = _oswiz(type, ind);
269 return oid;
270}
271
272/* returns 0 if the page was successfully put into frontswap, -1 if not */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400273static int tmem_frontswap_store(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600274 struct page *page)
275{
276 u64 ind64 = (u64)offset;
277 u32 ind = (u32)offset;
278 unsigned long pfn = page_to_pfn(page);
279 int pool = tmem_frontswap_poolid;
280 int ret;
281
282 if (pool < 0)
283 return -1;
284 if (ind64 != ind)
285 return -1;
286 mb(); /* ensure page is quiescent; tmem may address it with an alias */
287 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
288 /* translate Xen tmem return values to linux semantics */
289 if (ret == 1)
290 return 0;
291 else
292 return -1;
293}
294
295/*
296 * returns 0 if the page was successfully gotten from frontswap, -1 if
297 * was not present (should never happen!)
298 */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400299static int tmem_frontswap_load(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600300 struct page *page)
301{
302 u64 ind64 = (u64)offset;
303 u32 ind = (u32)offset;
304 unsigned long pfn = page_to_pfn(page);
305 int pool = tmem_frontswap_poolid;
306 int ret;
307
308 if (pool < 0)
309 return -1;
310 if (ind64 != ind)
311 return -1;
312 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
313 /* translate Xen tmem return values to linux semantics */
314 if (ret == 1)
315 return 0;
316 else
317 return -1;
318}
319
320/* flush a single page from frontswap */
321static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
322{
323 u64 ind64 = (u64)offset;
324 u32 ind = (u32)offset;
325 int pool = tmem_frontswap_poolid;
326
327 if (pool < 0)
328 return;
329 if (ind64 != ind)
330 return;
331 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
332}
333
334/* flush all pages from the passed swaptype */
335static void tmem_frontswap_flush_area(unsigned type)
336{
337 int pool = tmem_frontswap_poolid;
338 int ind;
339
340 if (pool < 0)
341 return;
342 for (ind = SWIZ_MASK; ind >= 0; ind--)
343 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
344}
345
346static void tmem_frontswap_init(unsigned ignored)
347{
348 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
349
350 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
351 if (tmem_frontswap_poolid < 0)
352 tmem_frontswap_poolid =
353 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
354}
355
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000356static bool __initdata use_frontswap = true;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600357
358static int __init no_frontswap(char *s)
359{
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000360 use_frontswap = false;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600361 return 1;
362}
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600363__setup("nofrontswap", no_frontswap);
364
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000365static struct frontswap_ops __initdata tmem_frontswap_ops = {
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400366 .store = tmem_frontswap_store,
367 .load = tmem_frontswap_load,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500368 .invalidate_page = tmem_frontswap_flush_page,
369 .invalidate_area = tmem_frontswap_flush_area,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600370 .init = tmem_frontswap_init
371};
372#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600373
374static int __init xen_tmem_init(void)
375{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600376 if (!xen_domain())
377 return 0;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600378#ifdef CONFIG_FRONTSWAP
379 if (tmem_enabled && use_frontswap) {
380 char *s = "";
381 struct frontswap_ops old_ops =
382 frontswap_register_ops(&tmem_frontswap_ops);
383
384 tmem_frontswap_poolid = -1;
385 if (old_ops.init != NULL)
386 s = " (WARNING: frontswap_ops overridden)";
387 printk(KERN_INFO "frontswap enabled, RAM provided by "
Konrad Rzeszutek Wilk22230c12013-02-01 14:10:44 -0500388 "Xen Transcendent Memory%s\n", s);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600389 }
390#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600391#ifdef CONFIG_CLEANCACHE
392 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
393 if (tmem_enabled && use_cleancache) {
394 char *s = "";
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600395 struct cleancache_ops old_ops =
396 cleancache_register_ops(&tmem_cleancache_ops);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600397 if (old_ops.init_fs != NULL)
398 s = " (WARNING: cleancache_ops overridden)";
399 printk(KERN_INFO "cleancache enabled, RAM provided by "
400 "Xen Transcendent Memory%s\n", s);
401 }
402#endif
403 return 0;
404}
405
406module_init(xen_tmem_init)