Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Xen implementation for transcendent memory (tmem) |
| 3 | * |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 4 | * Copyright (C) 2009-2011 Oracle Corp. All rights reserved. |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 5 | * Author: Dan Magenheimer |
| 6 | */ |
| 7 | |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 8 | #include <linux/module.h> |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/cleancache.h> |
| 14 | |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 15 | /* temporary ifdef until include/linux/frontswap.h is upstream */ |
| 16 | #ifdef CONFIG_FRONTSWAP |
| 17 | #include <linux/frontswap.h> |
| 18 | #endif |
| 19 | |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 20 | #include <xen/xen.h> |
| 21 | #include <xen/interface/xen.h> |
| 22 | #include <asm/xen/hypercall.h> |
| 23 | #include <asm/xen/page.h> |
| 24 | #include <asm/xen/hypervisor.h> |
Konrad Rzeszutek Wilk | b8b0f55 | 2012-08-21 14:49:34 -0400 | [diff] [blame] | 25 | #include <xen/tmem.h> |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 26 | |
| 27 | #define TMEM_CONTROL 0 |
| 28 | #define TMEM_NEW_POOL 1 |
| 29 | #define TMEM_DESTROY_POOL 2 |
| 30 | #define TMEM_NEW_PAGE 3 |
| 31 | #define TMEM_PUT_PAGE 4 |
| 32 | #define TMEM_GET_PAGE 5 |
| 33 | #define TMEM_FLUSH_PAGE 6 |
| 34 | #define TMEM_FLUSH_OBJECT 7 |
| 35 | #define TMEM_READ 8 |
| 36 | #define TMEM_WRITE 9 |
| 37 | #define TMEM_XCHG 10 |
| 38 | |
| 39 | /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ |
| 40 | #define TMEM_POOL_PERSIST 1 |
| 41 | #define TMEM_POOL_SHARED 2 |
| 42 | #define TMEM_POOL_PAGESIZE_SHIFT 4 |
| 43 | #define TMEM_VERSION_SHIFT 24 |
| 44 | |
| 45 | |
| 46 | struct tmem_pool_uuid { |
| 47 | u64 uuid_lo; |
| 48 | u64 uuid_hi; |
| 49 | }; |
| 50 | |
| 51 | struct tmem_oid { |
| 52 | u64 oid[3]; |
| 53 | }; |
| 54 | |
| 55 | #define TMEM_POOL_PRIVATE_UUID { 0, 0 } |
| 56 | |
| 57 | /* flags for tmem_ops.new_pool */ |
| 58 | #define TMEM_POOL_PERSIST 1 |
| 59 | #define TMEM_POOL_SHARED 2 |
| 60 | |
| 61 | /* xen tmem foundation ops/hypercalls */ |
| 62 | |
| 63 | static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, |
| 64 | u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len) |
| 65 | { |
| 66 | struct tmem_op op; |
| 67 | int rc = 0; |
| 68 | |
| 69 | op.cmd = tmem_cmd; |
| 70 | op.pool_id = tmem_pool; |
| 71 | op.u.gen.oid[0] = oid.oid[0]; |
| 72 | op.u.gen.oid[1] = oid.oid[1]; |
| 73 | op.u.gen.oid[2] = oid.oid[2]; |
| 74 | op.u.gen.index = index; |
| 75 | op.u.gen.tmem_offset = tmem_offset; |
| 76 | op.u.gen.pfn_offset = pfn_offset; |
| 77 | op.u.gen.len = len; |
| 78 | set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn); |
| 79 | rc = HYPERVISOR_tmem_op(&op); |
| 80 | return rc; |
| 81 | } |
| 82 | |
| 83 | static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, |
| 84 | u32 flags, unsigned long pagesize) |
| 85 | { |
| 86 | struct tmem_op op; |
| 87 | int rc = 0, pageshift; |
| 88 | |
| 89 | for (pageshift = 0; pagesize != 1; pageshift++) |
| 90 | pagesize >>= 1; |
| 91 | flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT; |
| 92 | flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT; |
| 93 | op.cmd = TMEM_NEW_POOL; |
| 94 | op.u.new.uuid[0] = uuid.uuid_lo; |
| 95 | op.u.new.uuid[1] = uuid.uuid_hi; |
| 96 | op.u.new.flags = flags; |
| 97 | rc = HYPERVISOR_tmem_op(&op); |
| 98 | return rc; |
| 99 | } |
| 100 | |
| 101 | /* xen generic tmem ops */ |
| 102 | |
| 103 | static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, |
| 104 | u32 index, unsigned long pfn) |
| 105 | { |
| 106 | unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; |
| 107 | |
| 108 | return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, |
| 109 | gmfn, 0, 0, 0); |
| 110 | } |
| 111 | |
| 112 | static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, |
| 113 | u32 index, unsigned long pfn) |
| 114 | { |
| 115 | unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; |
| 116 | |
| 117 | return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, |
| 118 | gmfn, 0, 0, 0); |
| 119 | } |
| 120 | |
| 121 | static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) |
| 122 | { |
| 123 | return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index, |
| 124 | 0, 0, 0, 0); |
| 125 | } |
| 126 | |
| 127 | static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) |
| 128 | { |
| 129 | return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); |
| 130 | } |
| 131 | |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 132 | #ifndef CONFIG_XEN_TMEM_MODULE |
Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 133 | bool __read_mostly tmem_enabled = false; |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 134 | |
| 135 | static int __init enable_tmem(char *s) |
| 136 | { |
Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 137 | tmem_enabled = true; |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 138 | return 1; |
| 139 | } |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 140 | __setup("tmem", enable_tmem); |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 141 | #endif |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 142 | |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 143 | #ifdef CONFIG_CLEANCACHE |
| 144 | static int xen_tmem_destroy_pool(u32 pool_id) |
| 145 | { |
| 146 | struct tmem_oid oid = { { 0 } }; |
| 147 | |
| 148 | return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0); |
| 149 | } |
| 150 | |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 151 | /* cleancache ops */ |
| 152 | |
| 153 | static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, |
| 154 | pgoff_t index, struct page *page) |
| 155 | { |
| 156 | u32 ind = (u32) index; |
| 157 | struct tmem_oid oid = *(struct tmem_oid *)&key; |
| 158 | unsigned long pfn = page_to_pfn(page); |
| 159 | |
| 160 | if (pool < 0) |
| 161 | return; |
| 162 | if (ind != index) |
| 163 | return; |
| 164 | mb(); /* ensure page is quiescent; tmem may address it with an alias */ |
| 165 | (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); |
| 166 | } |
| 167 | |
| 168 | static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, |
| 169 | pgoff_t index, struct page *page) |
| 170 | { |
| 171 | u32 ind = (u32) index; |
| 172 | struct tmem_oid oid = *(struct tmem_oid *)&key; |
| 173 | unsigned long pfn = page_to_pfn(page); |
| 174 | int ret; |
| 175 | |
| 176 | /* translate return values to linux semantics */ |
| 177 | if (pool < 0) |
| 178 | return -1; |
| 179 | if (ind != index) |
| 180 | return -1; |
| 181 | ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); |
| 182 | if (ret == 1) |
| 183 | return 0; |
| 184 | else |
| 185 | return -1; |
| 186 | } |
| 187 | |
| 188 | static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, |
| 189 | pgoff_t index) |
| 190 | { |
| 191 | u32 ind = (u32) index; |
| 192 | struct tmem_oid oid = *(struct tmem_oid *)&key; |
| 193 | |
| 194 | if (pool < 0) |
| 195 | return; |
| 196 | if (ind != index) |
| 197 | return; |
| 198 | (void)xen_tmem_flush_page((u32)pool, oid, ind); |
| 199 | } |
| 200 | |
| 201 | static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) |
| 202 | { |
| 203 | struct tmem_oid oid = *(struct tmem_oid *)&key; |
| 204 | |
| 205 | if (pool < 0) |
| 206 | return; |
| 207 | (void)xen_tmem_flush_object((u32)pool, oid); |
| 208 | } |
| 209 | |
| 210 | static void tmem_cleancache_flush_fs(int pool) |
| 211 | { |
| 212 | if (pool < 0) |
| 213 | return; |
| 214 | (void)xen_tmem_destroy_pool((u32)pool); |
| 215 | } |
| 216 | |
| 217 | static int tmem_cleancache_init_fs(size_t pagesize) |
| 218 | { |
| 219 | struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID; |
| 220 | |
| 221 | return xen_tmem_new_pool(uuid_private, 0, pagesize); |
| 222 | } |
| 223 | |
| 224 | static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) |
| 225 | { |
| 226 | struct tmem_pool_uuid shared_uuid; |
| 227 | |
| 228 | shared_uuid.uuid_lo = *(u64 *)uuid; |
| 229 | shared_uuid.uuid_hi = *(u64 *)(&uuid[8]); |
| 230 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); |
| 231 | } |
| 232 | |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 233 | static bool disable_cleancache __read_mostly; |
| 234 | static bool disable_selfballooning __read_mostly; |
| 235 | #ifdef CONFIG_XEN_TMEM_MODULE |
| 236 | module_param(disable_cleancache, bool, S_IRUGO); |
| 237 | module_param(disable_selfballooning, bool, S_IRUGO); |
| 238 | #else |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 239 | static int __init no_cleancache(char *s) |
| 240 | { |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 241 | disable_cleancache = true; |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 242 | return 1; |
| 243 | } |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 244 | __setup("nocleancache", no_cleancache); |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 245 | #endif |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 246 | |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame] | 247 | static struct cleancache_ops tmem_cleancache_ops = { |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 248 | .put_page = tmem_cleancache_put_page, |
| 249 | .get_page = tmem_cleancache_get_page, |
Dan Magenheimer | 91c6cc9 | 2012-01-12 14:03:25 -0500 | [diff] [blame] | 250 | .invalidate_page = tmem_cleancache_flush_page, |
| 251 | .invalidate_inode = tmem_cleancache_flush_inode, |
| 252 | .invalidate_fs = tmem_cleancache_flush_fs, |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 253 | .init_shared_fs = tmem_cleancache_init_shared_fs, |
| 254 | .init_fs = tmem_cleancache_init_fs |
| 255 | }; |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 256 | #endif |
| 257 | |
| 258 | #ifdef CONFIG_FRONTSWAP |
| 259 | /* frontswap tmem operations */ |
| 260 | |
| 261 | /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ |
| 262 | static int tmem_frontswap_poolid; |
| 263 | |
| 264 | /* |
| 265 | * Swizzling increases objects per swaptype, increasing tmem concurrency |
| 266 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS |
| 267 | */ |
| 268 | #define SWIZ_BITS 4 |
| 269 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) |
| 270 | #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) |
| 271 | #define iswiz(_ind) (_ind >> SWIZ_BITS) |
| 272 | |
| 273 | static inline struct tmem_oid oswiz(unsigned type, u32 ind) |
| 274 | { |
| 275 | struct tmem_oid oid = { .oid = { 0 } }; |
| 276 | oid.oid[0] = _oswiz(type, ind); |
| 277 | return oid; |
| 278 | } |
| 279 | |
| 280 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 281 | static int tmem_frontswap_store(unsigned type, pgoff_t offset, |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 282 | struct page *page) |
| 283 | { |
| 284 | u64 ind64 = (u64)offset; |
| 285 | u32 ind = (u32)offset; |
| 286 | unsigned long pfn = page_to_pfn(page); |
| 287 | int pool = tmem_frontswap_poolid; |
| 288 | int ret; |
| 289 | |
| 290 | if (pool < 0) |
| 291 | return -1; |
| 292 | if (ind64 != ind) |
| 293 | return -1; |
| 294 | mb(); /* ensure page is quiescent; tmem may address it with an alias */ |
| 295 | ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); |
| 296 | /* translate Xen tmem return values to linux semantics */ |
| 297 | if (ret == 1) |
| 298 | return 0; |
| 299 | else |
| 300 | return -1; |
| 301 | } |
| 302 | |
| 303 | /* |
| 304 | * returns 0 if the page was successfully gotten from frontswap, -1 if |
| 305 | * was not present (should never happen!) |
| 306 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 307 | static int tmem_frontswap_load(unsigned type, pgoff_t offset, |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 308 | struct page *page) |
| 309 | { |
| 310 | u64 ind64 = (u64)offset; |
| 311 | u32 ind = (u32)offset; |
| 312 | unsigned long pfn = page_to_pfn(page); |
| 313 | int pool = tmem_frontswap_poolid; |
| 314 | int ret; |
| 315 | |
| 316 | if (pool < 0) |
| 317 | return -1; |
| 318 | if (ind64 != ind) |
| 319 | return -1; |
| 320 | ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); |
| 321 | /* translate Xen tmem return values to linux semantics */ |
| 322 | if (ret == 1) |
| 323 | return 0; |
| 324 | else |
| 325 | return -1; |
| 326 | } |
| 327 | |
| 328 | /* flush a single page from frontswap */ |
| 329 | static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset) |
| 330 | { |
| 331 | u64 ind64 = (u64)offset; |
| 332 | u32 ind = (u32)offset; |
| 333 | int pool = tmem_frontswap_poolid; |
| 334 | |
| 335 | if (pool < 0) |
| 336 | return; |
| 337 | if (ind64 != ind) |
| 338 | return; |
| 339 | (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind)); |
| 340 | } |
| 341 | |
| 342 | /* flush all pages from the passed swaptype */ |
| 343 | static void tmem_frontswap_flush_area(unsigned type) |
| 344 | { |
| 345 | int pool = tmem_frontswap_poolid; |
| 346 | int ind; |
| 347 | |
| 348 | if (pool < 0) |
| 349 | return; |
| 350 | for (ind = SWIZ_MASK; ind >= 0; ind--) |
| 351 | (void)xen_tmem_flush_object(pool, oswiz(type, ind)); |
| 352 | } |
| 353 | |
| 354 | static void tmem_frontswap_init(unsigned ignored) |
| 355 | { |
| 356 | struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID; |
| 357 | |
| 358 | /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ |
| 359 | if (tmem_frontswap_poolid < 0) |
| 360 | tmem_frontswap_poolid = |
| 361 | xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); |
| 362 | } |
| 363 | |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 364 | static bool disable_frontswap __read_mostly; |
| 365 | static bool disable_frontswap_selfshrinking __read_mostly; |
| 366 | #ifdef CONFIG_XEN_TMEM_MODULE |
| 367 | module_param(disable_frontswap, bool, S_IRUGO); |
| 368 | module_param(disable_frontswap_selfshrinking, bool, S_IRUGO); |
| 369 | #else |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 370 | static int __init no_frontswap(char *s) |
| 371 | { |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 372 | disable_frontswap = true; |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 373 | return 1; |
| 374 | } |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 375 | __setup("nofrontswap", no_frontswap); |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 376 | #endif |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 377 | |
Konrad Rzeszutek Wilk | 1e01c96 | 2013-04-30 15:26:51 -0700 | [diff] [blame] | 378 | static struct frontswap_ops tmem_frontswap_ops = { |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 379 | .store = tmem_frontswap_store, |
| 380 | .load = tmem_frontswap_load, |
Dan Magenheimer | 91c6cc9 | 2012-01-12 14:03:25 -0500 | [diff] [blame] | 381 | .invalidate_page = tmem_frontswap_flush_page, |
| 382 | .invalidate_area = tmem_frontswap_flush_area, |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 383 | .init = tmem_frontswap_init |
| 384 | }; |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 385 | #else /* CONFIG_FRONTSWAP */ |
| 386 | #define disable_frontswap_selfshrinking 1 |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 387 | #endif |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 388 | |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 389 | static int xen_tmem_init(void) |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 390 | { |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 391 | if (!xen_domain()) |
| 392 | return 0; |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 393 | #ifdef CONFIG_FRONTSWAP |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 394 | if (tmem_enabled && !disable_frontswap) { |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 395 | char *s = ""; |
Konrad Rzeszutek Wilk | 1e01c96 | 2013-04-30 15:26:51 -0700 | [diff] [blame] | 396 | struct frontswap_ops *old_ops = |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 397 | frontswap_register_ops(&tmem_frontswap_ops); |
| 398 | |
| 399 | tmem_frontswap_poolid = -1; |
Konrad Rzeszutek Wilk | f42158f | 2013-04-30 15:27:01 -0700 | [diff] [blame] | 400 | if (IS_ERR(old_ops) || old_ops) { |
| 401 | if (IS_ERR(old_ops)) |
| 402 | return PTR_ERR(old_ops); |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 403 | s = " (WARNING: frontswap_ops overridden)"; |
Konrad Rzeszutek Wilk | f42158f | 2013-04-30 15:27:01 -0700 | [diff] [blame] | 404 | } |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 405 | printk(KERN_INFO "frontswap enabled, RAM provided by " |
Konrad Rzeszutek Wilk | 22230c1 | 2013-02-01 14:10:44 -0500 | [diff] [blame] | 406 | "Xen Transcendent Memory%s\n", s); |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 407 | } |
| 408 | #endif |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 409 | #ifdef CONFIG_CLEANCACHE |
| 410 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 411 | if (tmem_enabled && !disable_cleancache) { |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 412 | char *s = ""; |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame] | 413 | struct cleancache_ops *old_ops = |
Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 414 | cleancache_register_ops(&tmem_cleancache_ops); |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame] | 415 | if (old_ops) |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 416 | s = " (WARNING: cleancache_ops overridden)"; |
| 417 | printk(KERN_INFO "cleancache enabled, RAM provided by " |
| 418 | "Xen Transcendent Memory%s\n", s); |
| 419 | } |
| 420 | #endif |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 421 | #ifdef CONFIG_XEN_SELFBALLOONING |
| 422 | xen_selfballoon_init(!disable_selfballooning, |
| 423 | !disable_frontswap_selfshrinking); |
| 424 | #endif |
Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | module_init(xen_tmem_init) |
Dan Magenheimer | 10a7a077 | 2013-04-30 15:27:00 -0700 | [diff] [blame] | 429 | MODULE_LICENSE("GPL"); |
| 430 | MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>"); |
| 431 | MODULE_DESCRIPTION("Shim to Xen transcendent memory"); |