blob: cc072c66c766523e4191c5cf4cd40762b77645a9 [file] [log] [blame]
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06001/*
2 * Xen implementation for transcendent memory (tmem)
3 *
Dan Magenheimerafec6e02011-06-17 15:06:20 -06004 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06005 * Author: Dan Magenheimer
6 */
7
Dan Magenheimer10a7a0772013-04-30 15:27:00 -07008#include <linux/module.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06009#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/init.h>
12#include <linux/pagemap.h>
13#include <linux/cleancache.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060014#include <linux/frontswap.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060015
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060016#include <xen/xen.h>
17#include <xen/interface/xen.h>
18#include <asm/xen/hypercall.h>
19#include <asm/xen/page.h>
20#include <asm/xen/hypervisor.h>
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040021#include <xen/tmem.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060022
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040023#ifndef CONFIG_XEN_TMEM_MODULE
24bool __read_mostly tmem_enabled = false;
25
26static int __init enable_tmem(char *s)
27{
28 tmem_enabled = true;
29 return 1;
30}
31__setup("tmem", enable_tmem);
32#endif
33
34#ifdef CONFIG_CLEANCACHE
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040035static bool cleancache __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040036module_param(cleancache, bool, S_IRUGO);
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -040037static bool selfballooning __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040038module_param(selfballooning, bool, S_IRUGO);
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040039#endif /* CONFIG_CLEANCACHE */
40
41#ifdef CONFIG_FRONTSWAP
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040042static bool frontswap __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040043module_param(frontswap, bool, S_IRUGO);
Frederico Cadete1d7004f2013-05-25 22:48:57 +020044#else /* CONFIG_FRONTSWAP */
45#define frontswap (0)
Konrad Rzeszutek Wilke8f9cb02013-05-08 15:58:06 -040046#endif /* CONFIG_FRONTSWAP */
47
Konrad Rzeszutek Wilk23972c62013-05-08 16:57:35 -040048#ifdef CONFIG_XEN_SELFBALLOONING
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -040049static bool selfshrinking __read_mostly = true;
50module_param(selfshrinking, bool, S_IRUGO);
Konrad Rzeszutek Wilk23972c62013-05-08 16:57:35 -040051#endif /* CONFIG_XEN_SELFBALLOONING */
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040052
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060053#define TMEM_CONTROL 0
54#define TMEM_NEW_POOL 1
55#define TMEM_DESTROY_POOL 2
56#define TMEM_NEW_PAGE 3
57#define TMEM_PUT_PAGE 4
58#define TMEM_GET_PAGE 5
59#define TMEM_FLUSH_PAGE 6
60#define TMEM_FLUSH_OBJECT 7
61#define TMEM_READ 8
62#define TMEM_WRITE 9
63#define TMEM_XCHG 10
64
65/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
66#define TMEM_POOL_PERSIST 1
67#define TMEM_POOL_SHARED 2
68#define TMEM_POOL_PAGESIZE_SHIFT 4
69#define TMEM_VERSION_SHIFT 24
70
71
72struct tmem_pool_uuid {
73 u64 uuid_lo;
74 u64 uuid_hi;
75};
76
77struct tmem_oid {
78 u64 oid[3];
79};
80
81#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
82
83/* flags for tmem_ops.new_pool */
84#define TMEM_POOL_PERSIST 1
85#define TMEM_POOL_SHARED 2
86
87/* xen tmem foundation ops/hypercalls */
88
89static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
90 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
91{
92 struct tmem_op op;
93 int rc = 0;
94
95 op.cmd = tmem_cmd;
96 op.pool_id = tmem_pool;
97 op.u.gen.oid[0] = oid.oid[0];
98 op.u.gen.oid[1] = oid.oid[1];
99 op.u.gen.oid[2] = oid.oid[2];
100 op.u.gen.index = index;
101 op.u.gen.tmem_offset = tmem_offset;
102 op.u.gen.pfn_offset = pfn_offset;
103 op.u.gen.len = len;
104 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
105 rc = HYPERVISOR_tmem_op(&op);
106 return rc;
107}
108
109static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
110 u32 flags, unsigned long pagesize)
111{
112 struct tmem_op op;
113 int rc = 0, pageshift;
114
115 for (pageshift = 0; pagesize != 1; pageshift++)
116 pagesize >>= 1;
117 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
118 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
119 op.cmd = TMEM_NEW_POOL;
120 op.u.new.uuid[0] = uuid.uuid_lo;
121 op.u.new.uuid[1] = uuid.uuid_hi;
122 op.u.new.flags = flags;
123 rc = HYPERVISOR_tmem_op(&op);
124 return rc;
125}
126
127/* xen generic tmem ops */
128
129static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
130 u32 index, unsigned long pfn)
131{
132 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
133
134 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
135 gmfn, 0, 0, 0);
136}
137
138static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
139 u32 index, unsigned long pfn)
140{
141 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
142
143 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
144 gmfn, 0, 0, 0);
145}
146
147static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
148{
149 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
150 0, 0, 0, 0);
151}
152
153static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
154{
155 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
156}
157
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600158
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600159#ifdef CONFIG_CLEANCACHE
160static int xen_tmem_destroy_pool(u32 pool_id)
161{
162 struct tmem_oid oid = { { 0 } };
163
164 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
165}
166
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600167/* cleancache ops */
168
169static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
170 pgoff_t index, struct page *page)
171{
172 u32 ind = (u32) index;
173 struct tmem_oid oid = *(struct tmem_oid *)&key;
174 unsigned long pfn = page_to_pfn(page);
175
176 if (pool < 0)
177 return;
178 if (ind != index)
179 return;
180 mb(); /* ensure page is quiescent; tmem may address it with an alias */
181 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
182}
183
184static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
185 pgoff_t index, struct page *page)
186{
187 u32 ind = (u32) index;
188 struct tmem_oid oid = *(struct tmem_oid *)&key;
189 unsigned long pfn = page_to_pfn(page);
190 int ret;
191
192 /* translate return values to linux semantics */
193 if (pool < 0)
194 return -1;
195 if (ind != index)
196 return -1;
197 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
198 if (ret == 1)
199 return 0;
200 else
201 return -1;
202}
203
204static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
205 pgoff_t index)
206{
207 u32 ind = (u32) index;
208 struct tmem_oid oid = *(struct tmem_oid *)&key;
209
210 if (pool < 0)
211 return;
212 if (ind != index)
213 return;
214 (void)xen_tmem_flush_page((u32)pool, oid, ind);
215}
216
217static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
218{
219 struct tmem_oid oid = *(struct tmem_oid *)&key;
220
221 if (pool < 0)
222 return;
223 (void)xen_tmem_flush_object((u32)pool, oid);
224}
225
226static void tmem_cleancache_flush_fs(int pool)
227{
228 if (pool < 0)
229 return;
230 (void)xen_tmem_destroy_pool((u32)pool);
231}
232
233static int tmem_cleancache_init_fs(size_t pagesize)
234{
235 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
236
237 return xen_tmem_new_pool(uuid_private, 0, pagesize);
238}
239
240static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
241{
242 struct tmem_pool_uuid shared_uuid;
243
244 shared_uuid.uuid_lo = *(u64 *)uuid;
245 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
246 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
247}
248
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700249static struct cleancache_ops tmem_cleancache_ops = {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600250 .put_page = tmem_cleancache_put_page,
251 .get_page = tmem_cleancache_get_page,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500252 .invalidate_page = tmem_cleancache_flush_page,
253 .invalidate_inode = tmem_cleancache_flush_inode,
254 .invalidate_fs = tmem_cleancache_flush_fs,
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600255 .init_shared_fs = tmem_cleancache_init_shared_fs,
256 .init_fs = tmem_cleancache_init_fs
257};
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600258#endif
259
260#ifdef CONFIG_FRONTSWAP
261/* frontswap tmem operations */
262
263/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
264static int tmem_frontswap_poolid;
265
266/*
267 * Swizzling increases objects per swaptype, increasing tmem concurrency
268 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
269 */
270#define SWIZ_BITS 4
271#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
272#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
273#define iswiz(_ind) (_ind >> SWIZ_BITS)
274
275static inline struct tmem_oid oswiz(unsigned type, u32 ind)
276{
277 struct tmem_oid oid = { .oid = { 0 } };
278 oid.oid[0] = _oswiz(type, ind);
279 return oid;
280}
281
282/* returns 0 if the page was successfully put into frontswap, -1 if not */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400283static int tmem_frontswap_store(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600284 struct page *page)
285{
286 u64 ind64 = (u64)offset;
287 u32 ind = (u32)offset;
288 unsigned long pfn = page_to_pfn(page);
289 int pool = tmem_frontswap_poolid;
290 int ret;
291
292 if (pool < 0)
293 return -1;
294 if (ind64 != ind)
295 return -1;
296 mb(); /* ensure page is quiescent; tmem may address it with an alias */
297 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
298 /* translate Xen tmem return values to linux semantics */
299 if (ret == 1)
300 return 0;
301 else
302 return -1;
303}
304
305/*
306 * returns 0 if the page was successfully gotten from frontswap, -1 if
307 * was not present (should never happen!)
308 */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400309static int tmem_frontswap_load(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600310 struct page *page)
311{
312 u64 ind64 = (u64)offset;
313 u32 ind = (u32)offset;
314 unsigned long pfn = page_to_pfn(page);
315 int pool = tmem_frontswap_poolid;
316 int ret;
317
318 if (pool < 0)
319 return -1;
320 if (ind64 != ind)
321 return -1;
322 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
323 /* translate Xen tmem return values to linux semantics */
324 if (ret == 1)
325 return 0;
326 else
327 return -1;
328}
329
330/* flush a single page from frontswap */
331static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
332{
333 u64 ind64 = (u64)offset;
334 u32 ind = (u32)offset;
335 int pool = tmem_frontswap_poolid;
336
337 if (pool < 0)
338 return;
339 if (ind64 != ind)
340 return;
341 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
342}
343
344/* flush all pages from the passed swaptype */
345static void tmem_frontswap_flush_area(unsigned type)
346{
347 int pool = tmem_frontswap_poolid;
348 int ind;
349
350 if (pool < 0)
351 return;
352 for (ind = SWIZ_MASK; ind >= 0; ind--)
353 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
354}
355
356static void tmem_frontswap_init(unsigned ignored)
357{
358 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
359
360 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
361 if (tmem_frontswap_poolid < 0)
362 tmem_frontswap_poolid =
363 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
364}
365
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700366static struct frontswap_ops tmem_frontswap_ops = {
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400367 .store = tmem_frontswap_store,
368 .load = tmem_frontswap_load,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500369 .invalidate_page = tmem_frontswap_flush_page,
370 .invalidate_area = tmem_frontswap_flush_area,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600371 .init = tmem_frontswap_init
372};
373#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600374
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700375static int xen_tmem_init(void)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600376{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600377 if (!xen_domain())
378 return 0;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600379#ifdef CONFIG_FRONTSWAP
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -0400380 if (tmem_enabled && frontswap) {
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600381 char *s = "";
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700382 struct frontswap_ops *old_ops =
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600383 frontswap_register_ops(&tmem_frontswap_ops);
384
385 tmem_frontswap_poolid = -1;
Konrad Rzeszutek Wilkf42158f2013-04-30 15:27:01 -0700386 if (IS_ERR(old_ops) || old_ops) {
387 if (IS_ERR(old_ops))
388 return PTR_ERR(old_ops);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600389 s = " (WARNING: frontswap_ops overridden)";
Konrad Rzeszutek Wilkf42158f2013-04-30 15:27:01 -0700390 }
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600391 printk(KERN_INFO "frontswap enabled, RAM provided by "
Konrad Rzeszutek Wilk22230c12013-02-01 14:10:44 -0500392 "Xen Transcendent Memory%s\n", s);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600393 }
394#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600395#ifdef CONFIG_CLEANCACHE
396 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -0400397 if (tmem_enabled && cleancache) {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600398 char *s = "";
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700399 struct cleancache_ops *old_ops =
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600400 cleancache_register_ops(&tmem_cleancache_ops);
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700401 if (old_ops)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600402 s = " (WARNING: cleancache_ops overridden)";
403 printk(KERN_INFO "cleancache enabled, RAM provided by "
404 "Xen Transcendent Memory%s\n", s);
405 }
406#endif
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700407#ifdef CONFIG_XEN_SELFBALLOONING
Konrad Rzeszutek Wilk37d46e12013-05-14 13:56:42 -0400408 /*
409 * There is no point of driving pages to the swap system if they
410 * aren't going anywhere in tmem universe.
411 */
412 if (!frontswap) {
413 selfshrinking = false;
414 selfballooning = false;
415 }
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -0400416 xen_selfballoon_init(selfballooning, selfshrinking);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700417#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600418 return 0;
419}
420
421module_init(xen_tmem_init)
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700422MODULE_LICENSE("GPL");
423MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
424MODULE_DESCRIPTION("Shim to Xen transcendent memory");