blob: aed95251072b27ac2cbfd8820ed5d7bf29e9502d [file] [log] [blame]
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001/*
2 * VMware Balloon driver.
3 *
Xavier Deguillardf220a802015-08-06 15:17:58 -07004 * Copyright (C) 2000-2013, VMware, Inc. All Rights Reserved.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
Dmitry Torokhov73b35d02014-06-20 10:14:58 -070020 * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
21 * Philip Moltmann <moltmann@vmware.com>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040022 */
23
24/*
25 * This is VMware physical memory management driver for Linux. The driver
26 * acts like a "balloon" that can be inflated to reclaim physical pages by
27 * reserving them in the guest and invalidating them in the monitor,
28 * freeing up the underlying machine pages so they can be allocated to
29 * other guests. The balloon can also be deflated to allow the guest to
30 * use more physical memory. Higher level policies can control the sizes
31 * of balloons in VMs in order to manage physical memory resources.
32 */
33
34//#define DEBUG
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <linux/mm.h>
Xavier Deguillardf220a802015-08-06 15:17:58 -070040#include <linux/vmalloc.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040041#include <linux/sched.h>
42#include <linux/module.h>
43#include <linux/workqueue.h>
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
H. Peter Anvina10a5692010-05-09 01:13:42 -070046#include <asm/hypervisor.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040047
48MODULE_AUTHOR("VMware, Inc.");
49MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
Philip P. Moltmann33d268e2015-08-06 15:18:01 -070050MODULE_VERSION("1.3.4.0-k");
Dmitry Torokhov453dc652010-04-23 13:18:08 -040051MODULE_ALIAS("dmi:*:svnVMware*:*");
52MODULE_ALIAS("vmware_vmmemctl");
53MODULE_LICENSE("GPL");
54
55/*
56 * Various constants controlling rate of inflaint/deflating balloon,
57 * measured in pages.
58 */
59
60/*
Dmitry Torokhov453dc652010-04-23 13:18:08 -040061 * Rates of memory allocaton when guest experiences memory pressure
62 * (driver performs sleeping allocations).
63 */
64#define VMW_BALLOON_RATE_ALLOC_MIN 512U
65#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
66#define VMW_BALLOON_RATE_ALLOC_INC 16U
67
68/*
Dmitry Torokhov453dc652010-04-23 13:18:08 -040069 * When guest is under memory pressure, use a reduced page allocation
70 * rate for next several cycles.
71 */
72#define VMW_BALLOON_SLOW_CYCLES 4
73
74/*
75 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
76 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
77 * __GFP_NOWARN, to suppress page allocation failure warnings.
78 */
79#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
80
81/*
82 * Use GFP_HIGHUSER when executing in a separate kernel thread
83 * context and allocation can sleep. This is less stressful to
84 * the guest memory system, since it allows the thread to block
85 * while memory is reclaimed, and won't take pages from emergency
86 * low-memory pools.
87 */
88#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
89
Dmitry Torokhov55adaa42010-06-04 14:14:52 -070090/* Maximum number of refused pages we accumulate during inflation cycle */
91#define VMW_BALLOON_MAX_REFUSED 16
Dmitry Torokhov453dc652010-04-23 13:18:08 -040092
93/*
94 * Hypervisor communication port definitions.
95 */
96#define VMW_BALLOON_HV_PORT 0x5670
97#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
Dmitry Torokhov453dc652010-04-23 13:18:08 -040098#define VMW_BALLOON_GUEST_ID 1 /* Linux */
99
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700100enum vmwballoon_capabilities {
101 /*
102 * Bit 0 is reserved and not associated to any capability.
103 */
104 VMW_BALLOON_BASIC_CMDS = (1 << 1),
105 VMW_BALLOON_BATCHED_CMDS = (1 << 2)
106};
107
Xavier Deguillardf220a802015-08-06 15:17:58 -0700108#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
109 | VMW_BALLOON_BATCHED_CMDS)
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700110
Xavier Deguillardf220a802015-08-06 15:17:58 -0700111/*
112 * Backdoor commands availability:
113 *
114 * START, GET_TARGET and GUEST_ID are always available,
115 *
116 * VMW_BALLOON_BASIC_CMDS:
117 * LOCK and UNLOCK commands,
118 * VMW_BALLOON_BATCHED_CMDS:
119 * BATCHED_LOCK and BATCHED_UNLOCK commands.
120 */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400121#define VMW_BALLOON_CMD_START 0
122#define VMW_BALLOON_CMD_GET_TARGET 1
123#define VMW_BALLOON_CMD_LOCK 2
124#define VMW_BALLOON_CMD_UNLOCK 3
125#define VMW_BALLOON_CMD_GUEST_ID 4
Xavier Deguillardf220a802015-08-06 15:17:58 -0700126#define VMW_BALLOON_CMD_BATCHED_LOCK 6
127#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400128
129/* error codes */
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700130#define VMW_BALLOON_SUCCESS 0
131#define VMW_BALLOON_FAILURE -1
132#define VMW_BALLOON_ERROR_CMD_INVALID 1
133#define VMW_BALLOON_ERROR_PPN_INVALID 2
134#define VMW_BALLOON_ERROR_PPN_LOCKED 3
135#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
136#define VMW_BALLOON_ERROR_PPN_PINNED 5
137#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
138#define VMW_BALLOON_ERROR_RESET 7
139#define VMW_BALLOON_ERROR_BUSY 8
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400140
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700141#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
142
Xavier Deguillardf220a802015-08-06 15:17:58 -0700143/* Batch page description */
144
145/*
146 * Layout of a page in the batch page:
147 *
148 * +-------------+----------+--------+
149 * | | | |
150 * | Page number | Reserved | Status |
151 * | | | |
152 * +-------------+----------+--------+
153 * 64 PAGE_SHIFT 6 0
154 *
155 * For now only 4K pages are supported, but we can easily support large pages
156 * by using bits in the reserved field.
157 *
158 * The reserved field should be set to 0.
159 */
160#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
161#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
162#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
163
164struct vmballoon_batch_page {
165 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
166};
167
168static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
169{
170 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
171}
172
173static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
174 int idx)
175{
176 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
177}
178
179static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
180 u64 pa)
181{
182 batch->pages[idx] = pa;
183}
184
185
186#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700187({ \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700188 unsigned long __status, __dummy1, __dummy2, __dummy3; \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700189 __asm__ __volatile__ ("inl %%dx" : \
190 "=a"(__status), \
191 "=c"(__dummy1), \
192 "=d"(__dummy2), \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700193 "=b"(result), \
194 "=S" (__dummy3) : \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700195 "0"(VMW_BALLOON_HV_MAGIC), \
196 "1"(VMW_BALLOON_CMD_##cmd), \
197 "2"(VMW_BALLOON_HV_PORT), \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700198 "3"(arg1), \
199 "4" (arg2) : \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700200 "memory"); \
201 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
202 result = __dummy1; \
203 result &= -1UL; \
204 __status & -1UL; \
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400205})
206
207#ifdef CONFIG_DEBUG_FS
208struct vmballoon_stats {
209 unsigned int timer;
210
Rakib Mullick2ca02df2011-11-02 13:40:07 -0700211 /* allocation statistics */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400212 unsigned int alloc;
213 unsigned int alloc_fail;
214 unsigned int sleep_alloc;
215 unsigned int sleep_alloc_fail;
216 unsigned int refused_alloc;
217 unsigned int refused_free;
218 unsigned int free;
219
220 /* monitor operations */
221 unsigned int lock;
222 unsigned int lock_fail;
223 unsigned int unlock;
224 unsigned int unlock_fail;
225 unsigned int target;
226 unsigned int target_fail;
227 unsigned int start;
228 unsigned int start_fail;
229 unsigned int guest_type;
230 unsigned int guest_type_fail;
231};
232
233#define STATS_INC(stat) (stat)++
234#else
235#define STATS_INC(stat)
236#endif
237
Xavier Deguillardf220a802015-08-06 15:17:58 -0700238struct vmballoon;
239
240struct vmballoon_ops {
241 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
Xavier Deguillard4670de42015-08-06 15:17:59 -0700242 int (*lock)(struct vmballoon *b, unsigned int num_pages,
243 unsigned int *target);
244 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
245 unsigned int *target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700246};
247
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400248struct vmballoon {
249
250 /* list of reserved physical pages */
251 struct list_head pages;
252
253 /* transient list of non-balloonable pages */
254 struct list_head refused_pages;
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700255 unsigned int n_refused_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400256
257 /* balloon size in pages */
258 unsigned int size;
259 unsigned int target;
260
261 /* reset flag */
262 bool reset_required;
263
264 /* adjustment rates (pages per second) */
265 unsigned int rate_alloc;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400266
267 /* slowdown page allocations for next few cycles */
268 unsigned int slow_allocation_cycles;
269
Xavier Deguillardf220a802015-08-06 15:17:58 -0700270 unsigned long capabilities;
271
272 struct vmballoon_batch_page *batch_page;
273 unsigned int batch_max_pages;
274 struct page *page;
275
276 const struct vmballoon_ops *ops;
277
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400278#ifdef CONFIG_DEBUG_FS
279 /* statistics */
280 struct vmballoon_stats stats;
281
282 /* debugfs file exporting statistics */
283 struct dentry *dbg_entry;
284#endif
285
286 struct sysinfo sysinfo;
287
288 struct delayed_work dwork;
289};
290
291static struct vmballoon balloon;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400292
293/*
294 * Send "start" command to the host, communicating supported version
295 * of the protocol.
296 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700297static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400298{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700299 unsigned long status, capabilities, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400300
301 STATS_INC(b->stats.start);
302
Xavier Deguillardf220a802015-08-06 15:17:58 -0700303 status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
304
305 switch (status) {
306 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
307 b->capabilities = capabilities;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400308 return true;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700309 case VMW_BALLOON_SUCCESS:
310 b->capabilities = VMW_BALLOON_BASIC_CMDS;
311 return true;
312 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400313
314 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
315 STATS_INC(b->stats.start_fail);
316 return false;
317}
318
319static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
320{
321 switch (status) {
322 case VMW_BALLOON_SUCCESS:
323 return true;
324
325 case VMW_BALLOON_ERROR_RESET:
326 b->reset_required = true;
327 /* fall through */
328
329 default:
330 return false;
331 }
332}
333
334/*
335 * Communicate guest type to the host so that it can adjust ballooning
336 * algorithm to the one most appropriate for the guest. This command
337 * is normally issued after sending "start" command and is part of
338 * standard reset sequence.
339 */
340static bool vmballoon_send_guest_id(struct vmballoon *b)
341{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700342 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400343
Xavier Deguillardf220a802015-08-06 15:17:58 -0700344 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
345 dummy);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400346
347 STATS_INC(b->stats.guest_type);
348
349 if (vmballoon_check_status(b, status))
350 return true;
351
352 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
353 STATS_INC(b->stats.guest_type_fail);
354 return false;
355}
356
357/*
358 * Retrieve desired balloon size from the host.
359 */
360static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
361{
362 unsigned long status;
363 unsigned long target;
364 unsigned long limit;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700365 unsigned long dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400366 u32 limit32;
367
368 /*
369 * si_meminfo() is cheap. Moreover, we want to provide dynamic
370 * max balloon size later. So let us call si_meminfo() every
371 * iteration.
372 */
373 si_meminfo(&b->sysinfo);
374 limit = b->sysinfo.totalram;
375
376 /* Ensure limit fits in 32-bits */
377 limit32 = (u32)limit;
378 if (limit != limit32)
379 return false;
380
381 /* update stats */
382 STATS_INC(b->stats.target);
383
Xavier Deguillardf220a802015-08-06 15:17:58 -0700384 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400385 if (vmballoon_check_status(b, status)) {
386 *new_target = target;
387 return true;
388 }
389
390 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
391 STATS_INC(b->stats.target_fail);
392 return false;
393}
394
395/*
396 * Notify the host about allocated page so that host can use it without
397 * fear that guest will need it. Host may reject some pages, we need to
398 * check the return value and maybe submit a different page.
399 */
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100400static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
Xavier Deguillard4670de42015-08-06 15:17:59 -0700401 unsigned int *hv_status, unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400402{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700403 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400404 u32 pfn32;
405
406 pfn32 = (u32)pfn;
407 if (pfn32 != pfn)
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100408 return -1;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400409
410 STATS_INC(b->stats.lock);
411
Xavier Deguillard4670de42015-08-06 15:17:59 -0700412 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400413 if (vmballoon_check_status(b, status))
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100414 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400415
416 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
417 STATS_INC(b->stats.lock_fail);
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100418 return 1;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400419}
420
Xavier Deguillardf220a802015-08-06 15:17:58 -0700421static int vmballoon_send_batched_lock(struct vmballoon *b,
Xavier Deguillard4670de42015-08-06 15:17:59 -0700422 unsigned int num_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700423{
Xavier Deguillard4670de42015-08-06 15:17:59 -0700424 unsigned long status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700425 unsigned long pfn = page_to_pfn(b->page);
426
427 STATS_INC(b->stats.lock);
428
Xavier Deguillard4670de42015-08-06 15:17:59 -0700429 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, *target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700430 if (vmballoon_check_status(b, status))
431 return 0;
432
433 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
434 STATS_INC(b->stats.lock_fail);
435 return 1;
436}
437
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400438/*
439 * Notify the host that guest intends to release given page back into
440 * the pool of available (to the guest) pages.
441 */
Xavier Deguillard4670de42015-08-06 15:17:59 -0700442static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
443 unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400444{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700445 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400446 u32 pfn32;
447
448 pfn32 = (u32)pfn;
449 if (pfn32 != pfn)
450 return false;
451
452 STATS_INC(b->stats.unlock);
453
Xavier Deguillard4670de42015-08-06 15:17:59 -0700454 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400455 if (vmballoon_check_status(b, status))
456 return true;
457
458 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
459 STATS_INC(b->stats.unlock_fail);
460 return false;
461}
462
Xavier Deguillardf220a802015-08-06 15:17:58 -0700463static bool vmballoon_send_batched_unlock(struct vmballoon *b,
Xavier Deguillard4670de42015-08-06 15:17:59 -0700464 unsigned int num_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700465{
Xavier Deguillard4670de42015-08-06 15:17:59 -0700466 unsigned long status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700467 unsigned long pfn = page_to_pfn(b->page);
468
469 STATS_INC(b->stats.unlock);
470
Xavier Deguillard4670de42015-08-06 15:17:59 -0700471 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages, *target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700472 if (vmballoon_check_status(b, status))
473 return true;
474
475 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
476 STATS_INC(b->stats.unlock_fail);
477 return false;
478}
479
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400480/*
481 * Quickly release all pages allocated for the balloon. This function is
482 * called when host decides to "reset" balloon for one reason or another.
483 * Unlike normal "deflate" we do not (shall not) notify host of the pages
484 * being released.
485 */
486static void vmballoon_pop(struct vmballoon *b)
487{
488 struct page *page, *next;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400489
490 list_for_each_entry_safe(page, next, &b->pages, lru) {
491 list_del(&page->lru);
492 __free_page(page);
493 STATS_INC(b->stats.free);
494 b->size--;
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700495 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400496 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400497
Xavier Deguillardf220a802015-08-06 15:17:58 -0700498 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
499 if (b->batch_page)
500 vunmap(b->batch_page);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400501
Xavier Deguillardf220a802015-08-06 15:17:58 -0700502 if (b->page)
503 __free_page(b->page);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400504 }
505}
506
507/*
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700508 * Notify the host of a ballooned page. If host rejects the page put it on the
509 * refuse list, those refused page are then released at the end of the
510 * inflation cycle.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400511 */
Xavier Deguillard4670de42015-08-06 15:17:59 -0700512static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
513 unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400514{
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700515 int locked, hv_status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700516 struct page *page = b->page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400517
Xavier Deguillard4670de42015-08-06 15:17:59 -0700518 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
519 target);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700520 if (locked > 0) {
521 STATS_INC(b->stats.refused_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400522
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700523 if (hv_status == VMW_BALLOON_ERROR_RESET ||
524 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
525 __free_page(page);
526 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400527 }
528
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700529 /*
530 * Place page on the list of non-balloonable pages
531 * and retry allocation, unless we already accumulated
532 * too many of them, in which case take a breather.
533 */
534 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
535 b->n_refused_pages++;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400536 list_add(&page->lru, &b->refused_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700537 } else {
538 __free_page(page);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400539 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700540 return -EIO;
541 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400542
543 /* track allocated page */
544 list_add(&page->lru, &b->pages);
545
546 /* update balloon size */
547 b->size++;
548
549 return 0;
550}
551
Xavier Deguillardf220a802015-08-06 15:17:58 -0700552static int vmballoon_lock_batched_page(struct vmballoon *b,
Xavier Deguillard4670de42015-08-06 15:17:59 -0700553 unsigned int num_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700554{
555 int locked, i;
556
Xavier Deguillard4670de42015-08-06 15:17:59 -0700557 locked = vmballoon_send_batched_lock(b, num_pages, target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700558 if (locked > 0) {
559 for (i = 0; i < num_pages; i++) {
560 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
561 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
562
563 __free_page(p);
564 }
565
566 return -EIO;
567 }
568
569 for (i = 0; i < num_pages; i++) {
570 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
571 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
572
573 locked = vmballoon_batch_get_status(b->batch_page, i);
574
575 switch (locked) {
576 case VMW_BALLOON_SUCCESS:
577 list_add(&p->lru, &b->pages);
578 b->size++;
579 break;
580 case VMW_BALLOON_ERROR_PPN_PINNED:
581 case VMW_BALLOON_ERROR_PPN_INVALID:
582 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
583 list_add(&p->lru, &b->refused_pages);
584 b->n_refused_pages++;
585 break;
586 }
587 /* Fallthrough */
588 case VMW_BALLOON_ERROR_RESET:
589 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
590 __free_page(p);
591 break;
592 default:
593 /* This should never happen */
594 WARN_ON_ONCE(true);
595 }
596 }
597
598 return 0;
599}
600
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400601/*
602 * Release the page allocated for the balloon. Note that we first notify
603 * the host so it can make sure the page will be available for the guest
604 * to use, if needed.
605 */
Xavier Deguillard4670de42015-08-06 15:17:59 -0700606static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
607 unsigned int *target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400608{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700609 struct page *page = b->page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400610
Xavier Deguillard4670de42015-08-06 15:17:59 -0700611 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
Xavier Deguillardf220a802015-08-06 15:17:58 -0700612 list_add(&page->lru, &b->pages);
613 return -EIO;
614 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400615
616 /* deallocate page */
617 __free_page(page);
618 STATS_INC(b->stats.free);
619
620 /* update balloon size */
621 b->size--;
622
623 return 0;
624}
625
Xavier Deguillardf220a802015-08-06 15:17:58 -0700626static int vmballoon_unlock_batched_page(struct vmballoon *b,
Xavier Deguillard4670de42015-08-06 15:17:59 -0700627 unsigned int num_pages, unsigned int *target)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700628{
629 int locked, i, ret = 0;
630 bool hv_success;
631
Xavier Deguillard4670de42015-08-06 15:17:59 -0700632 hv_success = vmballoon_send_batched_unlock(b, num_pages, target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700633 if (!hv_success)
634 ret = -EIO;
635
636 for (i = 0; i < num_pages; i++) {
637 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
638 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
639
640 locked = vmballoon_batch_get_status(b->batch_page, i);
641 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
642 /*
643 * That page wasn't successfully unlocked by the
644 * hypervisor, re-add it to the list of pages owned by
645 * the balloon driver.
646 */
647 list_add(&p->lru, &b->pages);
648 } else {
649 /* deallocate page */
650 __free_page(p);
651 STATS_INC(b->stats.free);
652
653 /* update balloon size */
654 b->size--;
655 }
656 }
657
658 return ret;
659}
660
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400661/*
662 * Release pages that were allocated while attempting to inflate the
663 * balloon but were refused by the host for one reason or another.
664 */
665static void vmballoon_release_refused_pages(struct vmballoon *b)
666{
667 struct page *page, *next;
668
669 list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
670 list_del(&page->lru);
671 __free_page(page);
672 STATS_INC(b->stats.refused_free);
673 }
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700674
675 b->n_refused_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400676}
677
Xavier Deguillardf220a802015-08-06 15:17:58 -0700678static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
679{
680 b->page = p;
681}
682
683static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
684 struct page *p)
685{
686 vmballoon_batch_set_pa(b->batch_page, idx,
687 (u64)page_to_pfn(p) << PAGE_SHIFT);
688}
689
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400690/*
691 * Inflate the balloon towards its target size. Note that we try to limit
692 * the rate of allocation to make sure we are not choking the rest of the
693 * system.
694 */
695static void vmballoon_inflate(struct vmballoon *b)
696{
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700697 unsigned rate;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400698 unsigned int allocations = 0;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700699 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400700 int error = 0;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700701 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400702
703 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
704
705 /*
706 * First try NOSLEEP page allocations to inflate balloon.
707 *
708 * If we do not throttle nosleep allocations, we can drain all
709 * free pages in the guest quickly (if the balloon target is high).
710 * As a side-effect, draining free pages helps to inform (force)
711 * the guest to start swapping if balloon target is not met yet,
712 * which is a desired behavior. However, balloon driver can consume
713 * all available CPU cycles if too many pages are allocated in a
714 * second. Therefore, we throttle nosleep allocations even when
715 * the guest is not under memory pressure. OTOH, if we have already
716 * predicted that the guest is under memory pressure, then we
717 * slowdown page allocations considerably.
718 */
719
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400720 /*
721 * Start with no sleep allocation rate which may be higher
722 * than sleeping allocation rate.
723 */
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700724 rate = b->slow_allocation_cycles ? b->rate_alloc : UINT_MAX;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400725
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700726 pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
Xavier Deguillard4670de42015-08-06 15:17:59 -0700727 __func__, b->target - b->size, rate, b->rate_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400728
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700729 while (!b->reset_required &&
730 b->size < b->target && num_pages < b->target - b->size) {
Xavier Deguillard4670de42015-08-06 15:17:59 -0700731 struct page *page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400732
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700733 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
734 STATS_INC(b->stats.alloc);
735 else
736 STATS_INC(b->stats.sleep_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400737
Xavier Deguillard4670de42015-08-06 15:17:59 -0700738 page = alloc_page(flags);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700739 if (!page) {
740 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400741 /*
742 * CANSLEEP page allocation failed, so guest
743 * is under severe memory pressure. Quickly
744 * decrease allocation rate.
745 */
746 b->rate_alloc = max(b->rate_alloc / 2,
747 VMW_BALLOON_RATE_ALLOC_MIN);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700748 STATS_INC(b->stats.sleep_alloc_fail);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400749 break;
750 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700751 STATS_INC(b->stats.alloc_fail);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400752
753 /*
754 * NOSLEEP page allocation failed, so the guest is
755 * under memory pressure. Let us slow down page
756 * allocations for next few cycles so that the guest
757 * gets out of memory pressure. Also, if we already
758 * allocated b->rate_alloc pages, let's pause,
759 * otherwise switch to sleeping allocations.
760 */
761 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
762
Xavier Deguillard4670de42015-08-06 15:17:59 -0700763 if (allocations >= b->rate_alloc)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400764 break;
765
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700766 flags = VMW_PAGE_ALLOC_CANSLEEP;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400767 /* Lower rate for sleeping allocations. */
768 rate = b->rate_alloc;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700769 continue;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400770 }
771
Xavier Deguillardf220a802015-08-06 15:17:58 -0700772 b->ops->add_page(b, num_pages++, page);
773 if (num_pages == b->batch_max_pages) {
Xavier Deguillard4670de42015-08-06 15:17:59 -0700774 error = b->ops->lock(b, num_pages, &b->target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700775 num_pages = 0;
776 if (error)
777 break;
778 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700779
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700780 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400781
Xavier Deguillard4670de42015-08-06 15:17:59 -0700782 if (allocations >= rate) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400783 /* We allocated enough pages, let's take a break. */
784 break;
785 }
786 }
787
Xavier Deguillardf220a802015-08-06 15:17:58 -0700788 if (num_pages > 0)
Xavier Deguillard4670de42015-08-06 15:17:59 -0700789 b->ops->lock(b, num_pages, &b->target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700790
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400791 /*
792 * We reached our goal without failures so try increasing
793 * allocation rate.
794 */
Xavier Deguillard4670de42015-08-06 15:17:59 -0700795 if (error == 0 && allocations >= b->rate_alloc) {
796 unsigned int mult = allocations / b->rate_alloc;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400797
798 b->rate_alloc =
799 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
800 VMW_BALLOON_RATE_ALLOC_MAX);
801 }
802
803 vmballoon_release_refused_pages(b);
804}
805
806/*
807 * Decrease the size of the balloon allowing guest to use more memory.
808 */
809static void vmballoon_deflate(struct vmballoon *b)
810{
811 struct page *page, *next;
812 unsigned int i = 0;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700813 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400814 int error;
815
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700816 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400817
818 /* free pages to reach target */
819 list_for_each_entry_safe(page, next, &b->pages, lru) {
Xavier Deguillardf220a802015-08-06 15:17:58 -0700820 list_del(&page->lru);
821 b->ops->add_page(b, num_pages++, page);
822
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700823
Xavier Deguillardf220a802015-08-06 15:17:58 -0700824 if (num_pages == b->batch_max_pages) {
Xavier Deguillard4670de42015-08-06 15:17:59 -0700825 error = b->ops->unlock(b, num_pages, &b->target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700826 num_pages = 0;
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700827 if (error)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700828 return;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400829 }
830
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700831 if (b->reset_required || ++i >= b->size - b->target)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400832 break;
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700833
834 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400835 }
836
Xavier Deguillardf220a802015-08-06 15:17:58 -0700837 if (num_pages > 0)
Xavier Deguillard4670de42015-08-06 15:17:59 -0700838 b->ops->unlock(b, num_pages, &b->target);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700839}
840
841static const struct vmballoon_ops vmballoon_basic_ops = {
842 .add_page = vmballoon_add_page,
843 .lock = vmballoon_lock_page,
844 .unlock = vmballoon_unlock_page
845};
846
847static const struct vmballoon_ops vmballoon_batched_ops = {
848 .add_page = vmballoon_add_batched_page,
849 .lock = vmballoon_lock_batched_page,
850 .unlock = vmballoon_unlock_batched_page
851};
852
853static bool vmballoon_init_batching(struct vmballoon *b)
854{
855 b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
856 if (!b->page)
857 return false;
858
859 b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
860 if (!b->batch_page) {
861 __free_page(b->page);
862 return false;
863 }
864
865 return true;
866}
867
868/*
869 * Perform standard reset sequence by popping the balloon (in case it
870 * is not empty) and then restarting protocol. This operation normally
871 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
872 */
873static void vmballoon_reset(struct vmballoon *b)
874{
875 /* free all pages, skipping monitor unlock */
876 vmballoon_pop(b);
877
878 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
879 return;
880
881 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
882 b->ops = &vmballoon_batched_ops;
883 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
884 if (!vmballoon_init_batching(b)) {
885 /*
886 * We failed to initialize batching, inform the monitor
887 * about it by sending a null capability.
888 *
889 * The guest will retry in one second.
890 */
891 vmballoon_send_start(b, 0);
892 return;
893 }
894 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
895 b->ops = &vmballoon_basic_ops;
896 b->batch_max_pages = 1;
897 }
898
899 b->reset_required = false;
900 if (!vmballoon_send_guest_id(b))
901 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400902}
903
904/*
905 * Balloon work function: reset protocol, if needed, get the new size and
906 * adjust balloon as needed. Repeat in 1 sec.
907 */
908static void vmballoon_work(struct work_struct *work)
909{
910 struct delayed_work *dwork = to_delayed_work(work);
911 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
912 unsigned int target;
913
914 STATS_INC(b->stats.timer);
915
916 if (b->reset_required)
917 vmballoon_reset(b);
918
919 if (b->slow_allocation_cycles > 0)
920 b->slow_allocation_cycles--;
921
922 if (vmballoon_send_get_target(b, &target)) {
923 /* update target, adjust size */
924 b->target = target;
925
926 if (b->size < target)
927 vmballoon_inflate(b);
928 else if (b->size > target)
929 vmballoon_deflate(b);
930 }
931
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -0700932 /*
933 * We are using a freezable workqueue so that balloon operations are
934 * stopped while the system transitions to/from sleep/hibernation.
935 */
936 queue_delayed_work(system_freezable_wq,
937 dwork, round_jiffies_relative(HZ));
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400938}
939
940/*
941 * DEBUGFS Interface
942 */
943#ifdef CONFIG_DEBUG_FS
944
945static int vmballoon_debug_show(struct seq_file *f, void *offset)
946{
947 struct vmballoon *b = f->private;
948 struct vmballoon_stats *stats = &b->stats;
949
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -0700950 /* format capabilities info */
951 seq_printf(f,
952 "balloon capabilities: %#4x\n"
953 "used capabilities: %#4lx\n",
954 VMW_BALLOON_CAPABILITIES, b->capabilities);
955
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400956 /* format size info */
957 seq_printf(f,
958 "target: %8d pages\n"
959 "current: %8d pages\n",
960 b->target, b->size);
961
962 /* format rate info */
963 seq_printf(f,
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700964 "rateSleepAlloc: %8d pages/sec\n",
965 b->rate_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400966
967 seq_printf(f,
968 "\n"
969 "timer: %8u\n"
970 "start: %8u (%4u failed)\n"
971 "guestType: %8u (%4u failed)\n"
972 "lock: %8u (%4u failed)\n"
973 "unlock: %8u (%4u failed)\n"
974 "target: %8u (%4u failed)\n"
975 "primNoSleepAlloc: %8u (%4u failed)\n"
976 "primCanSleepAlloc: %8u (%4u failed)\n"
977 "primFree: %8u\n"
978 "errAlloc: %8u\n"
979 "errFree: %8u\n",
980 stats->timer,
981 stats->start, stats->start_fail,
982 stats->guest_type, stats->guest_type_fail,
983 stats->lock, stats->lock_fail,
984 stats->unlock, stats->unlock_fail,
985 stats->target, stats->target_fail,
986 stats->alloc, stats->alloc_fail,
987 stats->sleep_alloc, stats->sleep_alloc_fail,
988 stats->free,
989 stats->refused_alloc, stats->refused_free);
990
991 return 0;
992}
993
994static int vmballoon_debug_open(struct inode *inode, struct file *file)
995{
996 return single_open(file, vmballoon_debug_show, inode->i_private);
997}
998
999static const struct file_operations vmballoon_debug_fops = {
1000 .owner = THIS_MODULE,
1001 .open = vmballoon_debug_open,
1002 .read = seq_read,
1003 .llseek = seq_lseek,
1004 .release = single_release,
1005};
1006
1007static int __init vmballoon_debugfs_init(struct vmballoon *b)
1008{
1009 int error;
1010
1011 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1012 &vmballoon_debug_fops);
1013 if (IS_ERR(b->dbg_entry)) {
1014 error = PTR_ERR(b->dbg_entry);
1015 pr_err("failed to create debugfs entry, error: %d\n", error);
1016 return error;
1017 }
1018
1019 return 0;
1020}
1021
1022static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1023{
1024 debugfs_remove(b->dbg_entry);
1025}
1026
1027#else
1028
1029static inline int vmballoon_debugfs_init(struct vmballoon *b)
1030{
1031 return 0;
1032}
1033
1034static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1035{
1036}
1037
1038#endif /* CONFIG_DEBUG_FS */
1039
1040static int __init vmballoon_init(void)
1041{
1042 int error;
1043
1044 /*
1045 * Check if we are running on VMware's hypervisor and bail out
1046 * if we are not.
1047 */
H. Peter Anvina10a5692010-05-09 01:13:42 -07001048 if (x86_hyper != &x86_hyper_vmware)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001049 return -ENODEV;
1050
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001051 INIT_LIST_HEAD(&balloon.pages);
1052 INIT_LIST_HEAD(&balloon.refused_pages);
1053
1054 /* initialize rates */
1055 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001056
1057 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1058
1059 /*
1060 * Start balloon.
1061 */
Xavier Deguillardf220a802015-08-06 15:17:58 -07001062 if (!vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES)) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001063 pr_err("failed to send start command to the host\n");
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001064 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001065 }
1066
Xavier Deguillardf220a802015-08-06 15:17:58 -07001067 if ((balloon.capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1068 balloon.ops = &vmballoon_batched_ops;
1069 balloon.batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1070 if (!vmballoon_init_batching(&balloon)) {
1071 pr_err("failed to init batching\n");
1072 return -EIO;
1073 }
1074 } else if ((balloon.capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1075 balloon.ops = &vmballoon_basic_ops;
1076 balloon.batch_max_pages = 1;
1077 }
1078
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001079 if (!vmballoon_send_guest_id(&balloon)) {
1080 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001081 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001082 }
1083
1084 error = vmballoon_debugfs_init(&balloon);
1085 if (error)
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001086 return error;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001087
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001088 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001089
1090 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001091}
1092module_init(vmballoon_init);
1093
1094static void __exit vmballoon_exit(void)
1095{
1096 cancel_delayed_work_sync(&balloon.dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001097
1098 vmballoon_debugfs_exit(&balloon);
1099
1100 /*
1101 * Deallocate all reserved memory, and reset connection with monitor.
1102 * Reset connection before deallocating memory to avoid potential for
1103 * additional spurious resets from guest touching deallocated pages.
1104 */
Xavier Deguillardf220a802015-08-06 15:17:58 -07001105 vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001106 vmballoon_pop(&balloon);
1107}
1108module_exit(vmballoon_exit);