Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Frontswap frontend |
| 3 | * |
| 4 | * This code provides the generic "frontend" layer to call a matching |
| 5 | * "backend" driver implementation of frontswap. See |
| 6 | * Documentation/vm/frontswap.txt for more information. |
| 7 | * |
| 8 | * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. |
| 9 | * Author: Dan Magenheimer |
| 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. |
| 12 | */ |
| 13 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 14 | #include <linux/mman.h> |
| 15 | #include <linux/swap.h> |
| 16 | #include <linux/swapops.h> |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 17 | #include <linux/security.h> |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 18 | #include <linux/module.h> |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 19 | #include <linux/debugfs.h> |
| 20 | #include <linux/frontswap.h> |
| 21 | #include <linux/swapfile.h> |
| 22 | |
| 23 | /* |
| 24 | * frontswap_ops is set by frontswap_register_ops to contain the pointers |
| 25 | * to the frontswap "backend" implementation functions. |
| 26 | */ |
| 27 | static struct frontswap_ops frontswap_ops __read_mostly; |
| 28 | |
| 29 | /* |
| 30 | * This global enablement flag reduces overhead on systems where frontswap_ops |
| 31 | * has not been registered, so is preferred to the slower alternative: a |
| 32 | * function call that checks a non-global. |
| 33 | */ |
| 34 | bool frontswap_enabled __read_mostly; |
| 35 | EXPORT_SYMBOL(frontswap_enabled); |
| 36 | |
| 37 | /* |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 38 | * If enabled, frontswap_store will return failure even on success. As |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 39 | * a result, the swap subsystem will always write the page to swap, in |
| 40 | * effect converting frontswap into a writethrough cache. In this mode, |
| 41 | * there is no direct reduction in swap writes, but a frontswap backend |
| 42 | * can unilaterally "reclaim" any pages in use with no data loss, thus |
| 43 | * providing increases control over maximum memory usage due to frontswap. |
| 44 | */ |
| 45 | static bool frontswap_writethrough_enabled __read_mostly; |
| 46 | |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 47 | /* |
| 48 | * If enabled, the underlying tmem implementation is capable of doing |
| 49 | * exclusive gets, so frontswap_load, on a successful tmem_get must |
| 50 | * mark the page as no longer in frontswap AND mark it dirty. |
| 51 | */ |
| 52 | static bool frontswap_tmem_exclusive_gets_enabled __read_mostly; |
| 53 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 54 | #ifdef CONFIG_DEBUG_FS |
| 55 | /* |
| 56 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is |
| 57 | * properly configured). These are for information only so are not protected |
| 58 | * against increment races. |
| 59 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 60 | static u64 frontswap_loads; |
| 61 | static u64 frontswap_succ_stores; |
| 62 | static u64 frontswap_failed_stores; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 63 | static u64 frontswap_invalidates; |
| 64 | |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 65 | static inline void inc_frontswap_loads(void) { |
| 66 | frontswap_loads++; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 67 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 68 | static inline void inc_frontswap_succ_stores(void) { |
| 69 | frontswap_succ_stores++; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 70 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 71 | static inline void inc_frontswap_failed_stores(void) { |
| 72 | frontswap_failed_stores++; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 73 | } |
| 74 | static inline void inc_frontswap_invalidates(void) { |
| 75 | frontswap_invalidates++; |
| 76 | } |
| 77 | #else |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 78 | static inline void inc_frontswap_loads(void) { } |
| 79 | static inline void inc_frontswap_succ_stores(void) { } |
| 80 | static inline void inc_frontswap_failed_stores(void) { } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 81 | static inline void inc_frontswap_invalidates(void) { } |
| 82 | #endif |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 83 | |
| 84 | /* |
| 85 | * Due to the asynchronous nature of the backends loading potentially |
| 86 | * _after_ the swap system has been activated, we have chokepoints |
| 87 | * on all frontswap functions to not call the backend until the backend |
| 88 | * has registered. |
| 89 | * |
| 90 | * Specifically when no backend is registered (nobody called |
| 91 | * frontswap_register_ops) all calls to frontswap_init (which is done via |
| 92 | * swapon -> enable_swap_info -> frontswap_init) are registered and remembered |
| 93 | * (via the setting of need_init bitmap) but fail to create tmem_pools. When a |
| 94 | * backend registers with frontswap at some later point the previous |
| 95 | * calls to frontswap_init are executed (by iterating over the need_init |
| 96 | * bitmap) to create tmem_pools and set the respective poolids. All of that is |
| 97 | * guarded by us using atomic bit operations on the 'need_init' bitmap. |
| 98 | * |
| 99 | * This would not guards us against the user deciding to call swapoff right as |
| 100 | * we are calling the backend to initialize (so swapon is in action). |
| 101 | * Fortunatly for us, the swapon_mutex has been taked by the callee so we are |
| 102 | * OK. The other scenario where calls to frontswap_store (called via |
| 103 | * swap_writepage) is racing with frontswap_invalidate_area (called via |
| 104 | * swapoff) is again guarded by the swap subsystem. |
| 105 | * |
| 106 | * While no backend is registered all calls to frontswap_[store|load| |
| 107 | * invalidate_area|invalidate_page] are ignored or fail. |
| 108 | * |
| 109 | * The time between the backend being registered and the swap file system |
| 110 | * calling the backend (via the frontswap_* functions) is indeterminate as |
| 111 | * backend_registered is not atomic_t (or a value guarded by a spinlock). |
| 112 | * That is OK as we are comfortable missing some of these calls to the newly |
| 113 | * registered backend. |
| 114 | * |
| 115 | * Obviously the opposite (unloading the backend) must be done after all |
| 116 | * the frontswap_[store|load|invalidate_area|invalidate_page] start |
| 117 | * ignorning or failing the requests - at which point backend_registered |
| 118 | * would have to be made in some fashion atomic. |
| 119 | */ |
| 120 | static DECLARE_BITMAP(need_init, MAX_SWAPFILES); |
| 121 | static bool backend_registered __read_mostly; |
| 122 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 123 | /* |
| 124 | * Register operations for frontswap, returning previous thus allowing |
| 125 | * detection of multiple backends and possible nesting. |
| 126 | */ |
| 127 | struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) |
| 128 | { |
| 129 | struct frontswap_ops old = frontswap_ops; |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 130 | int i; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 131 | |
| 132 | frontswap_ops = *ops; |
| 133 | frontswap_enabled = true; |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 134 | |
| 135 | for (i = 0; i < MAX_SWAPFILES; i++) { |
| 136 | if (test_and_clear_bit(i, need_init)) |
| 137 | (*frontswap_ops.init)(i); |
| 138 | } |
| 139 | /* |
| 140 | * We MUST have backend_registered set _after_ the frontswap_init's |
| 141 | * have been called. Otherwise __frontswap_store might fail. Hence |
| 142 | * the barrier to make sure compiler does not re-order us. |
| 143 | */ |
| 144 | barrier(); |
| 145 | backend_registered = true; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 146 | return old; |
| 147 | } |
| 148 | EXPORT_SYMBOL(frontswap_register_ops); |
| 149 | |
| 150 | /* |
| 151 | * Enable/disable frontswap writethrough (see above). |
| 152 | */ |
| 153 | void frontswap_writethrough(bool enable) |
| 154 | { |
| 155 | frontswap_writethrough_enabled = enable; |
| 156 | } |
| 157 | EXPORT_SYMBOL(frontswap_writethrough); |
| 158 | |
| 159 | /* |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 160 | * Enable/disable frontswap exclusive gets (see above). |
| 161 | */ |
| 162 | void frontswap_tmem_exclusive_gets(bool enable) |
| 163 | { |
| 164 | frontswap_tmem_exclusive_gets_enabled = enable; |
| 165 | } |
| 166 | EXPORT_SYMBOL(frontswap_tmem_exclusive_gets); |
| 167 | |
| 168 | /* |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 169 | * Called when a swap device is swapon'd. |
| 170 | */ |
| 171 | void __frontswap_init(unsigned type) |
| 172 | { |
| 173 | struct swap_info_struct *sis = swap_info[type]; |
| 174 | |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 175 | if (backend_registered) { |
| 176 | BUG_ON(sis == NULL); |
| 177 | if (sis->frontswap_map == NULL) |
| 178 | return; |
| 179 | (*frontswap_ops.init)(type); |
| 180 | } else { |
| 181 | BUG_ON(type > MAX_SWAPFILES); |
| 182 | set_bit(type, need_init); |
| 183 | } |
| 184 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 185 | } |
| 186 | EXPORT_SYMBOL(__frontswap_init); |
| 187 | |
Sasha Levin | 611edfe | 2012-06-10 12:51:07 +0200 | [diff] [blame] | 188 | static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) |
| 189 | { |
| 190 | frontswap_clear(sis, offset); |
| 191 | atomic_dec(&sis->frontswap_pages); |
| 192 | } |
| 193 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 194 | /* |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 195 | * "Store" data from a page to frontswap and associate it with the page's |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 196 | * swaptype and offset. Page must be locked and in the swap cache. |
| 197 | * If frontswap already contains a page with matching swaptype and |
Wanpeng Li | 1d00015 | 2012-06-16 20:37:48 +0800 | [diff] [blame] | 198 | * offset, the frontswap implementation may either overwrite the data and |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 199 | * return success or invalidate the page from frontswap and return failure. |
| 200 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 201 | int __frontswap_store(struct page *page) |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 202 | { |
| 203 | int ret = -1, dup = 0; |
| 204 | swp_entry_t entry = { .val = page_private(page), }; |
| 205 | int type = swp_type(entry); |
| 206 | struct swap_info_struct *sis = swap_info[type]; |
| 207 | pgoff_t offset = swp_offset(entry); |
| 208 | |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 209 | if (!backend_registered) { |
| 210 | inc_frontswap_failed_stores(); |
| 211 | return ret; |
| 212 | } |
| 213 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 214 | BUG_ON(!PageLocked(page)); |
| 215 | BUG_ON(sis == NULL); |
| 216 | if (frontswap_test(sis, offset)) |
| 217 | dup = 1; |
Sasha Levin | ef38359 | 2012-06-10 12:50:59 +0200 | [diff] [blame] | 218 | ret = frontswap_ops.store(type, offset, page); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 219 | if (ret == 0) { |
| 220 | frontswap_set(sis, offset); |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 221 | inc_frontswap_succ_stores(); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 222 | if (!dup) |
| 223 | atomic_inc(&sis->frontswap_pages); |
Sasha Levin | d9674dd | 2012-06-10 12:51:04 +0200 | [diff] [blame] | 224 | } else { |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 225 | /* |
| 226 | failed dup always results in automatic invalidate of |
| 227 | the (older) page from frontswap |
| 228 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 229 | inc_frontswap_failed_stores(); |
Sasha Levin | 611edfe | 2012-06-10 12:51:07 +0200 | [diff] [blame] | 230 | if (dup) |
| 231 | __frontswap_clear(sis, offset); |
Sasha Levin | 4bb3e31 | 2012-06-10 12:51:00 +0200 | [diff] [blame] | 232 | } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 233 | if (frontswap_writethrough_enabled) |
| 234 | /* report failure so swap also writes to swap device */ |
| 235 | ret = -1; |
| 236 | return ret; |
| 237 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 238 | EXPORT_SYMBOL(__frontswap_store); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 239 | |
| 240 | /* |
| 241 | * "Get" data from frontswap associated with swaptype and offset that were |
| 242 | * specified when the data was put to frontswap and use it to fill the |
| 243 | * specified page with data. Page must be locked and in the swap cache. |
| 244 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 245 | int __frontswap_load(struct page *page) |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 246 | { |
| 247 | int ret = -1; |
| 248 | swp_entry_t entry = { .val = page_private(page), }; |
| 249 | int type = swp_type(entry); |
| 250 | struct swap_info_struct *sis = swap_info[type]; |
| 251 | pgoff_t offset = swp_offset(entry); |
| 252 | |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 253 | if (!backend_registered) |
| 254 | return ret; |
| 255 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 256 | BUG_ON(!PageLocked(page)); |
| 257 | BUG_ON(sis == NULL); |
| 258 | if (frontswap_test(sis, offset)) |
Sasha Levin | ef38359 | 2012-06-10 12:50:59 +0200 | [diff] [blame] | 259 | ret = frontswap_ops.load(type, offset, page); |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 260 | if (ret == 0) { |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 261 | inc_frontswap_loads(); |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 262 | if (frontswap_tmem_exclusive_gets_enabled) { |
| 263 | SetPageDirty(page); |
| 264 | frontswap_clear(sis, offset); |
| 265 | } |
| 266 | } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 267 | return ret; |
| 268 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 269 | EXPORT_SYMBOL(__frontswap_load); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 270 | |
| 271 | /* |
| 272 | * Invalidate any data from frontswap associated with the specified swaptype |
| 273 | * and offset so that a subsequent "get" will fail. |
| 274 | */ |
| 275 | void __frontswap_invalidate_page(unsigned type, pgoff_t offset) |
| 276 | { |
| 277 | struct swap_info_struct *sis = swap_info[type]; |
| 278 | |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 279 | if (!backend_registered) |
| 280 | return; |
| 281 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 282 | BUG_ON(sis == NULL); |
| 283 | if (frontswap_test(sis, offset)) { |
Sasha Levin | ef38359 | 2012-06-10 12:50:59 +0200 | [diff] [blame] | 284 | frontswap_ops.invalidate_page(type, offset); |
Sasha Levin | 611edfe | 2012-06-10 12:51:07 +0200 | [diff] [blame] | 285 | __frontswap_clear(sis, offset); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 286 | inc_frontswap_invalidates(); |
| 287 | } |
| 288 | } |
| 289 | EXPORT_SYMBOL(__frontswap_invalidate_page); |
| 290 | |
| 291 | /* |
| 292 | * Invalidate all data from frontswap associated with all offsets for the |
| 293 | * specified swaptype. |
| 294 | */ |
| 295 | void __frontswap_invalidate_area(unsigned type) |
| 296 | { |
| 297 | struct swap_info_struct *sis = swap_info[type]; |
| 298 | |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 299 | if (backend_registered) { |
| 300 | BUG_ON(sis == NULL); |
| 301 | if (sis->frontswap_map == NULL) |
| 302 | return; |
| 303 | (*frontswap_ops.invalidate_area)(type); |
| 304 | atomic_set(&sis->frontswap_pages, 0); |
| 305 | memset(sis->frontswap_map, 0, sis->max / sizeof(long)); |
| 306 | } |
| 307 | clear_bit(type, need_init); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 308 | } |
| 309 | EXPORT_SYMBOL(__frontswap_invalidate_area); |
| 310 | |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 311 | static unsigned long __frontswap_curr_pages(void) |
| 312 | { |
| 313 | int type; |
| 314 | unsigned long totalpages = 0; |
| 315 | struct swap_info_struct *si = NULL; |
| 316 | |
| 317 | assert_spin_locked(&swap_lock); |
| 318 | for (type = swap_list.head; type >= 0; type = si->next) { |
| 319 | si = swap_info[type]; |
| 320 | totalpages += atomic_read(&si->frontswap_pages); |
| 321 | } |
| 322 | return totalpages; |
| 323 | } |
| 324 | |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 325 | static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, |
| 326 | int *swapid) |
| 327 | { |
| 328 | int ret = -EINVAL; |
| 329 | struct swap_info_struct *si = NULL; |
| 330 | int si_frontswap_pages; |
| 331 | unsigned long total_pages_to_unuse = total; |
| 332 | unsigned long pages = 0, pages_to_unuse = 0; |
| 333 | int type; |
| 334 | |
| 335 | assert_spin_locked(&swap_lock); |
| 336 | for (type = swap_list.head; type >= 0; type = si->next) { |
| 337 | si = swap_info[type]; |
| 338 | si_frontswap_pages = atomic_read(&si->frontswap_pages); |
| 339 | if (total_pages_to_unuse < si_frontswap_pages) { |
| 340 | pages = pages_to_unuse = total_pages_to_unuse; |
| 341 | } else { |
| 342 | pages = si_frontswap_pages; |
| 343 | pages_to_unuse = 0; /* unuse all */ |
| 344 | } |
| 345 | /* ensure there is enough RAM to fetch pages from frontswap */ |
| 346 | if (security_vm_enough_memory_mm(current->mm, pages)) { |
| 347 | ret = -ENOMEM; |
| 348 | continue; |
| 349 | } |
| 350 | vm_unacct_memory(pages); |
| 351 | *unused = pages_to_unuse; |
| 352 | *swapid = type; |
| 353 | ret = 0; |
| 354 | break; |
| 355 | } |
| 356 | |
| 357 | return ret; |
| 358 | } |
| 359 | |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 360 | /* |
| 361 | * Used to check if it's necessory and feasible to unuse pages. |
| 362 | * Return 1 when nothing to do, 0 when need to shink pages, |
| 363 | * error code when there is an error. |
| 364 | */ |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 365 | static int __frontswap_shrink(unsigned long target_pages, |
| 366 | unsigned long *pages_to_unuse, |
| 367 | int *type) |
| 368 | { |
| 369 | unsigned long total_pages = 0, total_pages_to_unuse; |
| 370 | |
| 371 | assert_spin_locked(&swap_lock); |
| 372 | |
| 373 | total_pages = __frontswap_curr_pages(); |
| 374 | if (total_pages <= target_pages) { |
| 375 | /* Nothing to do */ |
| 376 | *pages_to_unuse = 0; |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 377 | return 1; |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 378 | } |
| 379 | total_pages_to_unuse = total_pages - target_pages; |
| 380 | return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type); |
| 381 | } |
| 382 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 383 | /* |
| 384 | * Frontswap, like a true swap device, may unnecessarily retain pages |
| 385 | * under certain circumstances; "shrink" frontswap is essentially a |
| 386 | * "partial swapoff" and works by calling try_to_unuse to attempt to |
| 387 | * unuse enough frontswap pages to attempt to -- subject to memory |
| 388 | * constraints -- reduce the number of pages in frontswap to the |
| 389 | * number given in the parameter target_pages. |
| 390 | */ |
| 391 | void frontswap_shrink(unsigned long target_pages) |
| 392 | { |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 393 | unsigned long pages_to_unuse = 0; |
Seth Jennings | 6b982fc | 2012-07-30 14:47:44 -0500 | [diff] [blame] | 394 | int uninitialized_var(type), ret; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 395 | |
| 396 | /* |
| 397 | * we don't want to hold swap_lock while doing a very |
| 398 | * lengthy try_to_unuse, but swap_list may change |
| 399 | * so restart scan from swap_list.head each time |
| 400 | */ |
| 401 | spin_lock(&swap_lock); |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 402 | ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 403 | spin_unlock(&swap_lock); |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 404 | if (ret == 0) |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 405 | try_to_unuse(type, true, pages_to_unuse); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 406 | return; |
| 407 | } |
| 408 | EXPORT_SYMBOL(frontswap_shrink); |
| 409 | |
| 410 | /* |
| 411 | * Count and return the number of frontswap pages across all |
| 412 | * swap devices. This is exported so that backend drivers can |
| 413 | * determine current usage without reading debugfs. |
| 414 | */ |
| 415 | unsigned long frontswap_curr_pages(void) |
| 416 | { |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 417 | unsigned long totalpages = 0; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 418 | |
| 419 | spin_lock(&swap_lock); |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 420 | totalpages = __frontswap_curr_pages(); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 421 | spin_unlock(&swap_lock); |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 422 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 423 | return totalpages; |
| 424 | } |
| 425 | EXPORT_SYMBOL(frontswap_curr_pages); |
| 426 | |
| 427 | static int __init init_frontswap(void) |
| 428 | { |
| 429 | #ifdef CONFIG_DEBUG_FS |
| 430 | struct dentry *root = debugfs_create_dir("frontswap", NULL); |
| 431 | if (root == NULL) |
| 432 | return -ENXIO; |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 433 | debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); |
| 434 | debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); |
| 435 | debugfs_create_u64("failed_stores", S_IRUGO, root, |
| 436 | &frontswap_failed_stores); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 437 | debugfs_create_u64("invalidates", S_IRUGO, |
| 438 | root, &frontswap_invalidates); |
| 439 | #endif |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame^] | 440 | frontswap_enabled = 1; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 441 | return 0; |
| 442 | } |
| 443 | |
| 444 | module_init(init_frontswap); |