blob: 04e7b3b29bac898ca66baac7aa96eadba2e3dc35 [file] [log] [blame]
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06001/*
2 * Xen implementation for transcendent memory (tmem)
3 *
Dan Magenheimerafec6e02011-06-17 15:06:20 -06004 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06005 * Author: Dan Magenheimer
6 */
7
Joe Perches283c0972013-06-28 03:21:41 -07008#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
9
Dan Magenheimer10a7a0772013-04-30 15:27:00 -070010#include <linux/module.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060011#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/pagemap.h>
15#include <linux/cleancache.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060016#include <linux/frontswap.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060017
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060018#include <xen/xen.h>
19#include <xen/interface/xen.h>
Julien Gralla9fd60e2015-06-17 15:28:02 +010020#include <xen/page.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060021#include <asm/xen/hypercall.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060022#include <asm/xen/hypervisor.h>
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040023#include <xen/tmem.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060024
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040025#ifndef CONFIG_XEN_TMEM_MODULE
26bool __read_mostly tmem_enabled = false;
27
28static int __init enable_tmem(char *s)
29{
30 tmem_enabled = true;
31 return 1;
32}
33__setup("tmem", enable_tmem);
34#endif
35
36#ifdef CONFIG_CLEANCACHE
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040037static bool cleancache __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040038module_param(cleancache, bool, S_IRUGO);
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -040039static bool selfballooning __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040040module_param(selfballooning, bool, S_IRUGO);
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040041#endif /* CONFIG_CLEANCACHE */
42
43#ifdef CONFIG_FRONTSWAP
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040044static bool frontswap __read_mostly = true;
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -040045module_param(frontswap, bool, S_IRUGO);
Frederico Cadete1d7004f2013-05-25 22:48:57 +020046#else /* CONFIG_FRONTSWAP */
47#define frontswap (0)
Konrad Rzeszutek Wilke8f9cb02013-05-08 15:58:06 -040048#endif /* CONFIG_FRONTSWAP */
49
Konrad Rzeszutek Wilk23972c62013-05-08 16:57:35 -040050#ifdef CONFIG_XEN_SELFBALLOONING
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -040051static bool selfshrinking __read_mostly = true;
52module_param(selfshrinking, bool, S_IRUGO);
Konrad Rzeszutek Wilk23972c62013-05-08 16:57:35 -040053#endif /* CONFIG_XEN_SELFBALLOONING */
Konrad Rzeszutek Wilk0cb401d2013-05-08 15:50:59 -040054
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060055#define TMEM_CONTROL 0
56#define TMEM_NEW_POOL 1
57#define TMEM_DESTROY_POOL 2
58#define TMEM_NEW_PAGE 3
59#define TMEM_PUT_PAGE 4
60#define TMEM_GET_PAGE 5
61#define TMEM_FLUSH_PAGE 6
62#define TMEM_FLUSH_OBJECT 7
63#define TMEM_READ 8
64#define TMEM_WRITE 9
65#define TMEM_XCHG 10
66
67/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
68#define TMEM_POOL_PERSIST 1
69#define TMEM_POOL_SHARED 2
70#define TMEM_POOL_PAGESIZE_SHIFT 4
71#define TMEM_VERSION_SHIFT 24
72
73
74struct tmem_pool_uuid {
75 u64 uuid_lo;
76 u64 uuid_hi;
77};
78
79struct tmem_oid {
80 u64 oid[3];
81};
82
83#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
84
85/* flags for tmem_ops.new_pool */
86#define TMEM_POOL_PERSIST 1
87#define TMEM_POOL_SHARED 2
88
89/* xen tmem foundation ops/hypercalls */
90
91static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
92 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
93{
94 struct tmem_op op;
95 int rc = 0;
96
97 op.cmd = tmem_cmd;
98 op.pool_id = tmem_pool;
99 op.u.gen.oid[0] = oid.oid[0];
100 op.u.gen.oid[1] = oid.oid[1];
101 op.u.gen.oid[2] = oid.oid[2];
102 op.u.gen.index = index;
103 op.u.gen.tmem_offset = tmem_offset;
104 op.u.gen.pfn_offset = pfn_offset;
105 op.u.gen.len = len;
106 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
107 rc = HYPERVISOR_tmem_op(&op);
108 return rc;
109}
110
111static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
112 u32 flags, unsigned long pagesize)
113{
114 struct tmem_op op;
115 int rc = 0, pageshift;
116
117 for (pageshift = 0; pagesize != 1; pageshift++)
118 pagesize >>= 1;
119 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
120 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
121 op.cmd = TMEM_NEW_POOL;
122 op.u.new.uuid[0] = uuid.uuid_lo;
123 op.u.new.uuid[1] = uuid.uuid_hi;
124 op.u.new.flags = flags;
125 rc = HYPERVISOR_tmem_op(&op);
126 return rc;
127}
128
129/* xen generic tmem ops */
130
131static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100132 u32 index, struct page *page)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600133{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600134 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100135 xen_page_to_gfn(page), 0, 0, 0);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600136}
137
138static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100139 u32 index, struct page *page)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600140{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600141 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
Julien Gralla76e3cc2015-08-07 17:34:38 +0100142 xen_page_to_gfn(page), 0, 0, 0);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600143}
144
145static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
146{
147 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
148 0, 0, 0, 0);
149}
150
151static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
152{
153 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
154}
155
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600156
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600157#ifdef CONFIG_CLEANCACHE
158static int xen_tmem_destroy_pool(u32 pool_id)
159{
160 struct tmem_oid oid = { { 0 } };
161
162 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
163}
164
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600165/* cleancache ops */
166
167static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
168 pgoff_t index, struct page *page)
169{
170 u32 ind = (u32) index;
171 struct tmem_oid oid = *(struct tmem_oid *)&key;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600172
173 if (pool < 0)
174 return;
175 if (ind != index)
176 return;
177 mb(); /* ensure page is quiescent; tmem may address it with an alias */
Julien Gralla76e3cc2015-08-07 17:34:38 +0100178 (void)xen_tmem_put_page((u32)pool, oid, ind, page);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600179}
180
181static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
182 pgoff_t index, struct page *page)
183{
184 u32 ind = (u32) index;
185 struct tmem_oid oid = *(struct tmem_oid *)&key;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600186 int ret;
187
188 /* translate return values to linux semantics */
189 if (pool < 0)
190 return -1;
191 if (ind != index)
192 return -1;
Julien Gralla76e3cc2015-08-07 17:34:38 +0100193 ret = xen_tmem_get_page((u32)pool, oid, ind, page);
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600194 if (ret == 1)
195 return 0;
196 else
197 return -1;
198}
199
200static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
201 pgoff_t index)
202{
203 u32 ind = (u32) index;
204 struct tmem_oid oid = *(struct tmem_oid *)&key;
205
206 if (pool < 0)
207 return;
208 if (ind != index)
209 return;
210 (void)xen_tmem_flush_page((u32)pool, oid, ind);
211}
212
213static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
214{
215 struct tmem_oid oid = *(struct tmem_oid *)&key;
216
217 if (pool < 0)
218 return;
219 (void)xen_tmem_flush_object((u32)pool, oid);
220}
221
222static void tmem_cleancache_flush_fs(int pool)
223{
224 if (pool < 0)
225 return;
226 (void)xen_tmem_destroy_pool((u32)pool);
227}
228
229static int tmem_cleancache_init_fs(size_t pagesize)
230{
231 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
232
233 return xen_tmem_new_pool(uuid_private, 0, pagesize);
234}
235
Christoph Hellwig85787092017-05-10 15:06:33 +0200236static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600237{
238 struct tmem_pool_uuid shared_uuid;
239
Christoph Hellwig85787092017-05-10 15:06:33 +0200240 shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
241 shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600242 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
243}
244
Julia Lawallb3c6de42016-01-21 16:47:29 +0100245static const struct cleancache_ops tmem_cleancache_ops = {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600246 .put_page = tmem_cleancache_put_page,
247 .get_page = tmem_cleancache_get_page,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500248 .invalidate_page = tmem_cleancache_flush_page,
249 .invalidate_inode = tmem_cleancache_flush_inode,
250 .invalidate_fs = tmem_cleancache_flush_fs,
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600251 .init_shared_fs = tmem_cleancache_init_shared_fs,
252 .init_fs = tmem_cleancache_init_fs
253};
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600254#endif
255
256#ifdef CONFIG_FRONTSWAP
257/* frontswap tmem operations */
258
259/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
260static int tmem_frontswap_poolid;
261
262/*
263 * Swizzling increases objects per swaptype, increasing tmem concurrency
264 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
265 */
266#define SWIZ_BITS 4
267#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
268#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
269#define iswiz(_ind) (_ind >> SWIZ_BITS)
270
271static inline struct tmem_oid oswiz(unsigned type, u32 ind)
272{
273 struct tmem_oid oid = { .oid = { 0 } };
274 oid.oid[0] = _oswiz(type, ind);
275 return oid;
276}
277
278/* returns 0 if the page was successfully put into frontswap, -1 if not */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400279static int tmem_frontswap_store(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600280 struct page *page)
281{
282 u64 ind64 = (u64)offset;
283 u32 ind = (u32)offset;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600284 int pool = tmem_frontswap_poolid;
285 int ret;
286
Huang Ying7ba71662018-02-21 14:45:39 -0800287 /* THP isn't supported */
288 if (PageTransHuge(page))
289 return -1;
290
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600291 if (pool < 0)
292 return -1;
293 if (ind64 != ind)
294 return -1;
295 mb(); /* ensure page is quiescent; tmem may address it with an alias */
Julien Gralla76e3cc2015-08-07 17:34:38 +0100296 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600297 /* translate Xen tmem return values to linux semantics */
298 if (ret == 1)
299 return 0;
300 else
301 return -1;
302}
303
304/*
305 * returns 0 if the page was successfully gotten from frontswap, -1 if
306 * was not present (should never happen!)
307 */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400308static int tmem_frontswap_load(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600309 struct page *page)
310{
311 u64 ind64 = (u64)offset;
312 u32 ind = (u32)offset;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600313 int pool = tmem_frontswap_poolid;
314 int ret;
315
316 if (pool < 0)
317 return -1;
318 if (ind64 != ind)
319 return -1;
Julien Gralla76e3cc2015-08-07 17:34:38 +0100320 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600321 /* translate Xen tmem return values to linux semantics */
322 if (ret == 1)
323 return 0;
324 else
325 return -1;
326}
327
328/* flush a single page from frontswap */
329static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
330{
331 u64 ind64 = (u64)offset;
332 u32 ind = (u32)offset;
333 int pool = tmem_frontswap_poolid;
334
335 if (pool < 0)
336 return;
337 if (ind64 != ind)
338 return;
339 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
340}
341
342/* flush all pages from the passed swaptype */
343static void tmem_frontswap_flush_area(unsigned type)
344{
345 int pool = tmem_frontswap_poolid;
346 int ind;
347
348 if (pool < 0)
349 return;
350 for (ind = SWIZ_MASK; ind >= 0; ind--)
351 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
352}
353
354static void tmem_frontswap_init(unsigned ignored)
355{
356 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
357
358 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
359 if (tmem_frontswap_poolid < 0)
360 tmem_frontswap_poolid =
361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
362}
363
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700364static struct frontswap_ops tmem_frontswap_ops = {
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400365 .store = tmem_frontswap_store,
366 .load = tmem_frontswap_load,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500367 .invalidate_page = tmem_frontswap_flush_page,
368 .invalidate_area = tmem_frontswap_flush_area,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600369 .init = tmem_frontswap_init
370};
371#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600372
Jan Beulich144c61a2015-01-23 08:37:01 +0000373static int __init xen_tmem_init(void)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600374{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600375 if (!xen_domain())
376 return 0;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600377#ifdef CONFIG_FRONTSWAP
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -0400378 if (tmem_enabled && frontswap) {
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600379 char *s = "";
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600380
381 tmem_frontswap_poolid = -1;
Dan Streetmand1dc6f12015-06-24 16:58:18 -0700382 frontswap_register_ops(&tmem_frontswap_ops);
Joe Perches283c0972013-06-28 03:21:41 -0700383 pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
384 s);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600385 }
386#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600387#ifdef CONFIG_CLEANCACHE
Jan Beulich01b720f2015-05-28 13:04:33 +0100388 BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
Konrad Rzeszutek Wilk9fd19652013-05-08 17:10:08 -0400389 if (tmem_enabled && cleancache) {
Vladimir Davydov53d85c92015-04-14 15:46:45 -0700390 int err;
391
392 err = cleancache_register_ops(&tmem_cleancache_ops);
393 if (err)
394 pr_warn("xen-tmem: failed to enable cleancache: %d\n",
395 err);
396 else
397 pr_info("cleancache enabled, RAM provided by "
398 "Xen Transcendent Memory\n");
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600399 }
400#endif
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700401#ifdef CONFIG_XEN_SELFBALLOONING
Konrad Rzeszutek Wilk37d46e12013-05-14 13:56:42 -0400402 /*
403 * There is no point of driving pages to the swap system if they
404 * aren't going anywhere in tmem universe.
405 */
406 if (!frontswap) {
407 selfshrinking = false;
408 selfballooning = false;
409 }
Konrad Rzeszutek Wilk2ca62b02013-05-08 17:12:44 -0400410 xen_selfballoon_init(selfballooning, selfshrinking);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700411#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600412 return 0;
413}
414
415module_init(xen_tmem_init)
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700416MODULE_LICENSE("GPL");
417MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
418MODULE_DESCRIPTION("Shim to Xen transcendent memory");