blob: 64f275e330700bd09e37a3c64bc71679d56b911b [file] [log] [blame]
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001/*
2 * VMware Balloon driver.
3 *
Xavier Deguillardf220a802015-08-06 15:17:58 -07004 * Copyright (C) 2000-2013, VMware, Inc. All Rights Reserved.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
Dmitry Torokhov73b35d02014-06-20 10:14:58 -070020 * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
21 * Philip Moltmann <moltmann@vmware.com>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040022 */
23
24/*
25 * This is VMware physical memory management driver for Linux. The driver
26 * acts like a "balloon" that can be inflated to reclaim physical pages by
27 * reserving them in the guest and invalidating them in the monitor,
28 * freeing up the underlying machine pages so they can be allocated to
29 * other guests. The balloon can also be deflated to allow the guest to
30 * use more physical memory. Higher level policies can control the sizes
31 * of balloons in VMs in order to manage physical memory resources.
32 */
33
34//#define DEBUG
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <linux/mm.h>
Xavier Deguillardf220a802015-08-06 15:17:58 -070040#include <linux/vmalloc.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040041#include <linux/sched.h>
42#include <linux/module.h>
43#include <linux/workqueue.h>
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
H. Peter Anvina10a5692010-05-09 01:13:42 -070046#include <asm/hypervisor.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040047
48MODULE_AUTHOR("VMware, Inc.");
49MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
Xavier Deguillardf220a802015-08-06 15:17:58 -070050MODULE_VERSION("1.3.1.0-k");
Dmitry Torokhov453dc652010-04-23 13:18:08 -040051MODULE_ALIAS("dmi:*:svnVMware*:*");
52MODULE_ALIAS("vmware_vmmemctl");
53MODULE_LICENSE("GPL");
54
55/*
56 * Various constants controlling rate of inflaint/deflating balloon,
57 * measured in pages.
58 */
59
60/*
61 * Rate of allocating memory when there is no memory pressure
62 * (driver performs non-sleeping allocations).
63 */
64#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
65
66/*
67 * Rates of memory allocaton when guest experiences memory pressure
68 * (driver performs sleeping allocations).
69 */
70#define VMW_BALLOON_RATE_ALLOC_MIN 512U
71#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
72#define VMW_BALLOON_RATE_ALLOC_INC 16U
73
74/*
75 * Rates for releasing pages while deflating balloon.
76 */
77#define VMW_BALLOON_RATE_FREE_MIN 512U
78#define VMW_BALLOON_RATE_FREE_MAX 16384U
79#define VMW_BALLOON_RATE_FREE_INC 16U
80
81/*
82 * When guest is under memory pressure, use a reduced page allocation
83 * rate for next several cycles.
84 */
85#define VMW_BALLOON_SLOW_CYCLES 4
86
87/*
88 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
89 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
90 * __GFP_NOWARN, to suppress page allocation failure warnings.
91 */
92#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
93
94/*
95 * Use GFP_HIGHUSER when executing in a separate kernel thread
96 * context and allocation can sleep. This is less stressful to
97 * the guest memory system, since it allows the thread to block
98 * while memory is reclaimed, and won't take pages from emergency
99 * low-memory pools.
100 */
101#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
102
103/* Maximum number of page allocations without yielding processor */
104#define VMW_BALLOON_YIELD_THRESHOLD 1024
105
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700106/* Maximum number of refused pages we accumulate during inflation cycle */
107#define VMW_BALLOON_MAX_REFUSED 16
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400108
109/*
110 * Hypervisor communication port definitions.
111 */
112#define VMW_BALLOON_HV_PORT 0x5670
113#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400114#define VMW_BALLOON_GUEST_ID 1 /* Linux */
115
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700116enum vmwballoon_capabilities {
117 /*
118 * Bit 0 is reserved and not associated to any capability.
119 */
120 VMW_BALLOON_BASIC_CMDS = (1 << 1),
121 VMW_BALLOON_BATCHED_CMDS = (1 << 2)
122};
123
Xavier Deguillardf220a802015-08-06 15:17:58 -0700124#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
125 | VMW_BALLOON_BATCHED_CMDS)
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700126
Xavier Deguillardf220a802015-08-06 15:17:58 -0700127/*
128 * Backdoor commands availability:
129 *
130 * START, GET_TARGET and GUEST_ID are always available,
131 *
132 * VMW_BALLOON_BASIC_CMDS:
133 * LOCK and UNLOCK commands,
134 * VMW_BALLOON_BATCHED_CMDS:
135 * BATCHED_LOCK and BATCHED_UNLOCK commands.
136 */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400137#define VMW_BALLOON_CMD_START 0
138#define VMW_BALLOON_CMD_GET_TARGET 1
139#define VMW_BALLOON_CMD_LOCK 2
140#define VMW_BALLOON_CMD_UNLOCK 3
141#define VMW_BALLOON_CMD_GUEST_ID 4
Xavier Deguillardf220a802015-08-06 15:17:58 -0700142#define VMW_BALLOON_CMD_BATCHED_LOCK 6
143#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400144
145/* error codes */
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700146#define VMW_BALLOON_SUCCESS 0
147#define VMW_BALLOON_FAILURE -1
148#define VMW_BALLOON_ERROR_CMD_INVALID 1
149#define VMW_BALLOON_ERROR_PPN_INVALID 2
150#define VMW_BALLOON_ERROR_PPN_LOCKED 3
151#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
152#define VMW_BALLOON_ERROR_PPN_PINNED 5
153#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
154#define VMW_BALLOON_ERROR_RESET 7
155#define VMW_BALLOON_ERROR_BUSY 8
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400156
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700157#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
158
Xavier Deguillardf220a802015-08-06 15:17:58 -0700159/* Batch page description */
160
161/*
162 * Layout of a page in the batch page:
163 *
164 * +-------------+----------+--------+
165 * | | | |
166 * | Page number | Reserved | Status |
167 * | | | |
168 * +-------------+----------+--------+
169 * 64 PAGE_SHIFT 6 0
170 *
171 * For now only 4K pages are supported, but we can easily support large pages
172 * by using bits in the reserved field.
173 *
174 * The reserved field should be set to 0.
175 */
176#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
177#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
178#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
179
180struct vmballoon_batch_page {
181 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
182};
183
184static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
185{
186 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
187}
188
189static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
190 int idx)
191{
192 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
193}
194
195static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
196 u64 pa)
197{
198 batch->pages[idx] = pa;
199}
200
201
202#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700203({ \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700204 unsigned long __status, __dummy1, __dummy2, __dummy3; \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700205 __asm__ __volatile__ ("inl %%dx" : \
206 "=a"(__status), \
207 "=c"(__dummy1), \
208 "=d"(__dummy2), \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700209 "=b"(result), \
210 "=S" (__dummy3) : \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700211 "0"(VMW_BALLOON_HV_MAGIC), \
212 "1"(VMW_BALLOON_CMD_##cmd), \
213 "2"(VMW_BALLOON_HV_PORT), \
Xavier Deguillardf220a802015-08-06 15:17:58 -0700214 "3"(arg1), \
215 "4" (arg2) : \
Xavier Deguillardeb79100f2015-06-12 11:43:23 -0700216 "memory"); \
217 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
218 result = __dummy1; \
219 result &= -1UL; \
220 __status & -1UL; \
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400221})
222
223#ifdef CONFIG_DEBUG_FS
224struct vmballoon_stats {
225 unsigned int timer;
226
Rakib Mullick2ca02df2011-11-02 13:40:07 -0700227 /* allocation statistics */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400228 unsigned int alloc;
229 unsigned int alloc_fail;
230 unsigned int sleep_alloc;
231 unsigned int sleep_alloc_fail;
232 unsigned int refused_alloc;
233 unsigned int refused_free;
234 unsigned int free;
235
236 /* monitor operations */
237 unsigned int lock;
238 unsigned int lock_fail;
239 unsigned int unlock;
240 unsigned int unlock_fail;
241 unsigned int target;
242 unsigned int target_fail;
243 unsigned int start;
244 unsigned int start_fail;
245 unsigned int guest_type;
246 unsigned int guest_type_fail;
247};
248
249#define STATS_INC(stat) (stat)++
250#else
251#define STATS_INC(stat)
252#endif
253
Xavier Deguillardf220a802015-08-06 15:17:58 -0700254struct vmballoon;
255
256struct vmballoon_ops {
257 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
258 int (*lock)(struct vmballoon *b, unsigned int num_pages);
259 int (*unlock)(struct vmballoon *b, unsigned int num_pages);
260};
261
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400262struct vmballoon {
263
264 /* list of reserved physical pages */
265 struct list_head pages;
266
267 /* transient list of non-balloonable pages */
268 struct list_head refused_pages;
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700269 unsigned int n_refused_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400270
271 /* balloon size in pages */
272 unsigned int size;
273 unsigned int target;
274
275 /* reset flag */
276 bool reset_required;
277
278 /* adjustment rates (pages per second) */
279 unsigned int rate_alloc;
280 unsigned int rate_free;
281
282 /* slowdown page allocations for next few cycles */
283 unsigned int slow_allocation_cycles;
284
Xavier Deguillardf220a802015-08-06 15:17:58 -0700285 unsigned long capabilities;
286
287 struct vmballoon_batch_page *batch_page;
288 unsigned int batch_max_pages;
289 struct page *page;
290
291 const struct vmballoon_ops *ops;
292
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400293#ifdef CONFIG_DEBUG_FS
294 /* statistics */
295 struct vmballoon_stats stats;
296
297 /* debugfs file exporting statistics */
298 struct dentry *dbg_entry;
299#endif
300
301 struct sysinfo sysinfo;
302
303 struct delayed_work dwork;
304};
305
306static struct vmballoon balloon;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400307
308/*
309 * Send "start" command to the host, communicating supported version
310 * of the protocol.
311 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700312static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400313{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700314 unsigned long status, capabilities, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400315
316 STATS_INC(b->stats.start);
317
Xavier Deguillardf220a802015-08-06 15:17:58 -0700318 status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
319
320 switch (status) {
321 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
322 b->capabilities = capabilities;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400323 return true;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700324 case VMW_BALLOON_SUCCESS:
325 b->capabilities = VMW_BALLOON_BASIC_CMDS;
326 return true;
327 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400328
329 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
330 STATS_INC(b->stats.start_fail);
331 return false;
332}
333
334static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
335{
336 switch (status) {
337 case VMW_BALLOON_SUCCESS:
338 return true;
339
340 case VMW_BALLOON_ERROR_RESET:
341 b->reset_required = true;
342 /* fall through */
343
344 default:
345 return false;
346 }
347}
348
349/*
350 * Communicate guest type to the host so that it can adjust ballooning
351 * algorithm to the one most appropriate for the guest. This command
352 * is normally issued after sending "start" command and is part of
353 * standard reset sequence.
354 */
355static bool vmballoon_send_guest_id(struct vmballoon *b)
356{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700357 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400358
Xavier Deguillardf220a802015-08-06 15:17:58 -0700359 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
360 dummy);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400361
362 STATS_INC(b->stats.guest_type);
363
364 if (vmballoon_check_status(b, status))
365 return true;
366
367 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
368 STATS_INC(b->stats.guest_type_fail);
369 return false;
370}
371
372/*
373 * Retrieve desired balloon size from the host.
374 */
375static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
376{
377 unsigned long status;
378 unsigned long target;
379 unsigned long limit;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700380 unsigned long dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400381 u32 limit32;
382
383 /*
384 * si_meminfo() is cheap. Moreover, we want to provide dynamic
385 * max balloon size later. So let us call si_meminfo() every
386 * iteration.
387 */
388 si_meminfo(&b->sysinfo);
389 limit = b->sysinfo.totalram;
390
391 /* Ensure limit fits in 32-bits */
392 limit32 = (u32)limit;
393 if (limit != limit32)
394 return false;
395
396 /* update stats */
397 STATS_INC(b->stats.target);
398
Xavier Deguillardf220a802015-08-06 15:17:58 -0700399 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400400 if (vmballoon_check_status(b, status)) {
401 *new_target = target;
402 return true;
403 }
404
405 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
406 STATS_INC(b->stats.target_fail);
407 return false;
408}
409
410/*
411 * Notify the host about allocated page so that host can use it without
412 * fear that guest will need it. Host may reject some pages, we need to
413 * check the return value and maybe submit a different page.
414 */
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100415static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
Dmitry Torokhovd27a0c02011-01-12 17:01:07 -0800416 unsigned int *hv_status)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400417{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700418 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400419 u32 pfn32;
420
421 pfn32 = (u32)pfn;
422 if (pfn32 != pfn)
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100423 return -1;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400424
425 STATS_INC(b->stats.lock);
426
Xavier Deguillardf220a802015-08-06 15:17:58 -0700427 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, dummy);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400428 if (vmballoon_check_status(b, status))
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100429 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400430
431 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
432 STATS_INC(b->stats.lock_fail);
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100433 return 1;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400434}
435
Xavier Deguillardf220a802015-08-06 15:17:58 -0700436static int vmballoon_send_batched_lock(struct vmballoon *b,
437 unsigned int num_pages)
438{
439 unsigned long status, dummy;
440 unsigned long pfn = page_to_pfn(b->page);
441
442 STATS_INC(b->stats.lock);
443
444 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, dummy);
445 if (vmballoon_check_status(b, status))
446 return 0;
447
448 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
449 STATS_INC(b->stats.lock_fail);
450 return 1;
451}
452
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400453/*
454 * Notify the host that guest intends to release given page back into
455 * the pool of available (to the guest) pages.
456 */
457static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
458{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700459 unsigned long status, dummy = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400460 u32 pfn32;
461
462 pfn32 = (u32)pfn;
463 if (pfn32 != pfn)
464 return false;
465
466 STATS_INC(b->stats.unlock);
467
Xavier Deguillardf220a802015-08-06 15:17:58 -0700468 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, dummy);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400469 if (vmballoon_check_status(b, status))
470 return true;
471
472 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
473 STATS_INC(b->stats.unlock_fail);
474 return false;
475}
476
Xavier Deguillardf220a802015-08-06 15:17:58 -0700477static bool vmballoon_send_batched_unlock(struct vmballoon *b,
478 unsigned int num_pages)
479{
480 unsigned long status, dummy;
481 unsigned long pfn = page_to_pfn(b->page);
482
483 STATS_INC(b->stats.unlock);
484
485 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages, dummy);
486 if (vmballoon_check_status(b, status))
487 return true;
488
489 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
490 STATS_INC(b->stats.unlock_fail);
491 return false;
492}
493
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400494/*
495 * Quickly release all pages allocated for the balloon. This function is
496 * called when host decides to "reset" balloon for one reason or another.
497 * Unlike normal "deflate" we do not (shall not) notify host of the pages
498 * being released.
499 */
500static void vmballoon_pop(struct vmballoon *b)
501{
502 struct page *page, *next;
503 unsigned int count = 0;
504
505 list_for_each_entry_safe(page, next, &b->pages, lru) {
506 list_del(&page->lru);
507 __free_page(page);
508 STATS_INC(b->stats.free);
509 b->size--;
510
511 if (++count >= b->rate_free) {
512 count = 0;
513 cond_resched();
514 }
515 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400516
Xavier Deguillardf220a802015-08-06 15:17:58 -0700517 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
518 if (b->batch_page)
519 vunmap(b->batch_page);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400520
Xavier Deguillardf220a802015-08-06 15:17:58 -0700521 if (b->page)
522 __free_page(b->page);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400523 }
524}
525
526/*
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700527 * Notify the host of a ballooned page. If host rejects the page put it on the
528 * refuse list, those refused page are then released at the end of the
529 * inflation cycle.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400530 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700531static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400532{
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700533 int locked, hv_status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700534 struct page *page = b->page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400535
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700536 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
537 if (locked > 0) {
538 STATS_INC(b->stats.refused_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400539
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700540 if (hv_status == VMW_BALLOON_ERROR_RESET ||
541 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
542 __free_page(page);
543 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400544 }
545
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700546 /*
547 * Place page on the list of non-balloonable pages
548 * and retry allocation, unless we already accumulated
549 * too many of them, in which case take a breather.
550 */
551 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
552 b->n_refused_pages++;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400553 list_add(&page->lru, &b->refused_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700554 } else {
555 __free_page(page);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400556 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700557 return -EIO;
558 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400559
560 /* track allocated page */
561 list_add(&page->lru, &b->pages);
562
563 /* update balloon size */
564 b->size++;
565
566 return 0;
567}
568
Xavier Deguillardf220a802015-08-06 15:17:58 -0700569static int vmballoon_lock_batched_page(struct vmballoon *b,
570 unsigned int num_pages)
571{
572 int locked, i;
573
574 locked = vmballoon_send_batched_lock(b, num_pages);
575 if (locked > 0) {
576 for (i = 0; i < num_pages; i++) {
577 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
578 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
579
580 __free_page(p);
581 }
582
583 return -EIO;
584 }
585
586 for (i = 0; i < num_pages; i++) {
587 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
588 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
589
590 locked = vmballoon_batch_get_status(b->batch_page, i);
591
592 switch (locked) {
593 case VMW_BALLOON_SUCCESS:
594 list_add(&p->lru, &b->pages);
595 b->size++;
596 break;
597 case VMW_BALLOON_ERROR_PPN_PINNED:
598 case VMW_BALLOON_ERROR_PPN_INVALID:
599 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
600 list_add(&p->lru, &b->refused_pages);
601 b->n_refused_pages++;
602 break;
603 }
604 /* Fallthrough */
605 case VMW_BALLOON_ERROR_RESET:
606 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
607 __free_page(p);
608 break;
609 default:
610 /* This should never happen */
611 WARN_ON_ONCE(true);
612 }
613 }
614
615 return 0;
616}
617
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400618/*
619 * Release the page allocated for the balloon. Note that we first notify
620 * the host so it can make sure the page will be available for the guest
621 * to use, if needed.
622 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700623static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400624{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700625 struct page *page = b->page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400626
Xavier Deguillardf220a802015-08-06 15:17:58 -0700627 if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) {
628 list_add(&page->lru, &b->pages);
629 return -EIO;
630 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400631
632 /* deallocate page */
633 __free_page(page);
634 STATS_INC(b->stats.free);
635
636 /* update balloon size */
637 b->size--;
638
639 return 0;
640}
641
Xavier Deguillardf220a802015-08-06 15:17:58 -0700642static int vmballoon_unlock_batched_page(struct vmballoon *b,
643 unsigned int num_pages)
644{
645 int locked, i, ret = 0;
646 bool hv_success;
647
648 hv_success = vmballoon_send_batched_unlock(b, num_pages);
649 if (!hv_success)
650 ret = -EIO;
651
652 for (i = 0; i < num_pages; i++) {
653 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
654 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
655
656 locked = vmballoon_batch_get_status(b->batch_page, i);
657 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
658 /*
659 * That page wasn't successfully unlocked by the
660 * hypervisor, re-add it to the list of pages owned by
661 * the balloon driver.
662 */
663 list_add(&p->lru, &b->pages);
664 } else {
665 /* deallocate page */
666 __free_page(p);
667 STATS_INC(b->stats.free);
668
669 /* update balloon size */
670 b->size--;
671 }
672 }
673
674 return ret;
675}
676
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400677/*
678 * Release pages that were allocated while attempting to inflate the
679 * balloon but were refused by the host for one reason or another.
680 */
681static void vmballoon_release_refused_pages(struct vmballoon *b)
682{
683 struct page *page, *next;
684
685 list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
686 list_del(&page->lru);
687 __free_page(page);
688 STATS_INC(b->stats.refused_free);
689 }
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700690
691 b->n_refused_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400692}
693
Xavier Deguillardf220a802015-08-06 15:17:58 -0700694static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
695{
696 b->page = p;
697}
698
699static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
700 struct page *p)
701{
702 vmballoon_batch_set_pa(b->batch_page, idx,
703 (u64)page_to_pfn(p) << PAGE_SHIFT);
704}
705
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400706/*
707 * Inflate the balloon towards its target size. Note that we try to limit
708 * the rate of allocation to make sure we are not choking the rest of the
709 * system.
710 */
711static void vmballoon_inflate(struct vmballoon *b)
712{
713 unsigned int goal;
714 unsigned int rate;
715 unsigned int i;
716 unsigned int allocations = 0;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700717 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400718 int error = 0;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700719 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400720
721 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
722
723 /*
724 * First try NOSLEEP page allocations to inflate balloon.
725 *
726 * If we do not throttle nosleep allocations, we can drain all
727 * free pages in the guest quickly (if the balloon target is high).
728 * As a side-effect, draining free pages helps to inform (force)
729 * the guest to start swapping if balloon target is not met yet,
730 * which is a desired behavior. However, balloon driver can consume
731 * all available CPU cycles if too many pages are allocated in a
732 * second. Therefore, we throttle nosleep allocations even when
733 * the guest is not under memory pressure. OTOH, if we have already
734 * predicted that the guest is under memory pressure, then we
735 * slowdown page allocations considerably.
736 */
737
738 goal = b->target - b->size;
739 /*
740 * Start with no sleep allocation rate which may be higher
741 * than sleeping allocation rate.
742 */
743 rate = b->slow_allocation_cycles ?
744 b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
745
746 pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
747 __func__, goal, rate, b->rate_alloc);
748
749 for (i = 0; i < goal; i++) {
Xavier Deguillardf220a802015-08-06 15:17:58 -0700750 struct page *page = alloc_page(flags);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400751
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700752 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
753 STATS_INC(b->stats.alloc);
754 else
755 STATS_INC(b->stats.sleep_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400756
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700757 if (!page) {
758 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400759 /*
760 * CANSLEEP page allocation failed, so guest
761 * is under severe memory pressure. Quickly
762 * decrease allocation rate.
763 */
764 b->rate_alloc = max(b->rate_alloc / 2,
765 VMW_BALLOON_RATE_ALLOC_MIN);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700766 STATS_INC(b->stats.sleep_alloc_fail);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400767 break;
768 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700769 STATS_INC(b->stats.alloc_fail);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400770
771 /*
772 * NOSLEEP page allocation failed, so the guest is
773 * under memory pressure. Let us slow down page
774 * allocations for next few cycles so that the guest
775 * gets out of memory pressure. Also, if we already
776 * allocated b->rate_alloc pages, let's pause,
777 * otherwise switch to sleeping allocations.
778 */
779 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
780
781 if (i >= b->rate_alloc)
782 break;
783
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700784 flags = VMW_PAGE_ALLOC_CANSLEEP;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400785 /* Lower rate for sleeping allocations. */
786 rate = b->rate_alloc;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700787 continue;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400788 }
789
Xavier Deguillardf220a802015-08-06 15:17:58 -0700790 b->ops->add_page(b, num_pages++, page);
791 if (num_pages == b->batch_max_pages) {
792 error = b->ops->lock(b, num_pages);
793 num_pages = 0;
794 if (error)
795 break;
796 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700797
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400798 if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
799 cond_resched();
800 allocations = 0;
801 }
802
803 if (i >= rate) {
804 /* We allocated enough pages, let's take a break. */
805 break;
806 }
807 }
808
Xavier Deguillardf220a802015-08-06 15:17:58 -0700809 if (num_pages > 0)
810 b->ops->lock(b, num_pages);
811
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400812 /*
813 * We reached our goal without failures so try increasing
814 * allocation rate.
815 */
816 if (error == 0 && i >= b->rate_alloc) {
817 unsigned int mult = i / b->rate_alloc;
818
819 b->rate_alloc =
820 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
821 VMW_BALLOON_RATE_ALLOC_MAX);
822 }
823
824 vmballoon_release_refused_pages(b);
825}
826
827/*
828 * Decrease the size of the balloon allowing guest to use more memory.
829 */
830static void vmballoon_deflate(struct vmballoon *b)
831{
832 struct page *page, *next;
833 unsigned int i = 0;
834 unsigned int goal;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700835 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400836 int error;
837
838 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
839
840 /* limit deallocation rate */
841 goal = min(b->size - b->target, b->rate_free);
842
843 pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
844
845 /* free pages to reach target */
846 list_for_each_entry_safe(page, next, &b->pages, lru) {
Xavier Deguillardf220a802015-08-06 15:17:58 -0700847 list_del(&page->lru);
848 b->ops->add_page(b, num_pages++, page);
849
850 if (num_pages == b->batch_max_pages) {
851 error = b->ops->unlock(b, num_pages);
852 num_pages = 0;
853 if (error) {
854 /* quickly decrease rate in case of error */
855 b->rate_free = max(b->rate_free / 2,
856 VMW_BALLOON_RATE_FREE_MIN);
857 return;
858 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400859 }
860
861 if (++i >= goal)
862 break;
863 }
864
Xavier Deguillardf220a802015-08-06 15:17:58 -0700865 if (num_pages > 0)
866 b->ops->unlock(b, num_pages);
867
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400868 /* slowly increase rate if there were no errors */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700869 if (error == 0)
870 b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
871 VMW_BALLOON_RATE_FREE_MAX);
872}
873
874static const struct vmballoon_ops vmballoon_basic_ops = {
875 .add_page = vmballoon_add_page,
876 .lock = vmballoon_lock_page,
877 .unlock = vmballoon_unlock_page
878};
879
880static const struct vmballoon_ops vmballoon_batched_ops = {
881 .add_page = vmballoon_add_batched_page,
882 .lock = vmballoon_lock_batched_page,
883 .unlock = vmballoon_unlock_batched_page
884};
885
886static bool vmballoon_init_batching(struct vmballoon *b)
887{
888 b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
889 if (!b->page)
890 return false;
891
892 b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
893 if (!b->batch_page) {
894 __free_page(b->page);
895 return false;
896 }
897
898 return true;
899}
900
901/*
902 * Perform standard reset sequence by popping the balloon (in case it
903 * is not empty) and then restarting protocol. This operation normally
904 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
905 */
906static void vmballoon_reset(struct vmballoon *b)
907{
908 /* free all pages, skipping monitor unlock */
909 vmballoon_pop(b);
910
911 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
912 return;
913
914 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
915 b->ops = &vmballoon_batched_ops;
916 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
917 if (!vmballoon_init_batching(b)) {
918 /*
919 * We failed to initialize batching, inform the monitor
920 * about it by sending a null capability.
921 *
922 * The guest will retry in one second.
923 */
924 vmballoon_send_start(b, 0);
925 return;
926 }
927 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
928 b->ops = &vmballoon_basic_ops;
929 b->batch_max_pages = 1;
930 }
931
932 b->reset_required = false;
933 if (!vmballoon_send_guest_id(b))
934 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400935}
936
937/*
938 * Balloon work function: reset protocol, if needed, get the new size and
939 * adjust balloon as needed. Repeat in 1 sec.
940 */
941static void vmballoon_work(struct work_struct *work)
942{
943 struct delayed_work *dwork = to_delayed_work(work);
944 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
945 unsigned int target;
946
947 STATS_INC(b->stats.timer);
948
949 if (b->reset_required)
950 vmballoon_reset(b);
951
952 if (b->slow_allocation_cycles > 0)
953 b->slow_allocation_cycles--;
954
955 if (vmballoon_send_get_target(b, &target)) {
956 /* update target, adjust size */
957 b->target = target;
958
959 if (b->size < target)
960 vmballoon_inflate(b);
961 else if (b->size > target)
962 vmballoon_deflate(b);
963 }
964
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -0700965 /*
966 * We are using a freezable workqueue so that balloon operations are
967 * stopped while the system transitions to/from sleep/hibernation.
968 */
969 queue_delayed_work(system_freezable_wq,
970 dwork, round_jiffies_relative(HZ));
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400971}
972
973/*
974 * DEBUGFS Interface
975 */
976#ifdef CONFIG_DEBUG_FS
977
978static int vmballoon_debug_show(struct seq_file *f, void *offset)
979{
980 struct vmballoon *b = f->private;
981 struct vmballoon_stats *stats = &b->stats;
982
983 /* format size info */
984 seq_printf(f,
985 "target: %8d pages\n"
986 "current: %8d pages\n",
987 b->target, b->size);
988
989 /* format rate info */
990 seq_printf(f,
991 "rateNoSleepAlloc: %8d pages/sec\n"
992 "rateSleepAlloc: %8d pages/sec\n"
993 "rateFree: %8d pages/sec\n",
994 VMW_BALLOON_NOSLEEP_ALLOC_MAX,
995 b->rate_alloc, b->rate_free);
996
997 seq_printf(f,
998 "\n"
999 "timer: %8u\n"
1000 "start: %8u (%4u failed)\n"
1001 "guestType: %8u (%4u failed)\n"
1002 "lock: %8u (%4u failed)\n"
1003 "unlock: %8u (%4u failed)\n"
1004 "target: %8u (%4u failed)\n"
1005 "primNoSleepAlloc: %8u (%4u failed)\n"
1006 "primCanSleepAlloc: %8u (%4u failed)\n"
1007 "primFree: %8u\n"
1008 "errAlloc: %8u\n"
1009 "errFree: %8u\n",
1010 stats->timer,
1011 stats->start, stats->start_fail,
1012 stats->guest_type, stats->guest_type_fail,
1013 stats->lock, stats->lock_fail,
1014 stats->unlock, stats->unlock_fail,
1015 stats->target, stats->target_fail,
1016 stats->alloc, stats->alloc_fail,
1017 stats->sleep_alloc, stats->sleep_alloc_fail,
1018 stats->free,
1019 stats->refused_alloc, stats->refused_free);
1020
1021 return 0;
1022}
1023
1024static int vmballoon_debug_open(struct inode *inode, struct file *file)
1025{
1026 return single_open(file, vmballoon_debug_show, inode->i_private);
1027}
1028
1029static const struct file_operations vmballoon_debug_fops = {
1030 .owner = THIS_MODULE,
1031 .open = vmballoon_debug_open,
1032 .read = seq_read,
1033 .llseek = seq_lseek,
1034 .release = single_release,
1035};
1036
1037static int __init vmballoon_debugfs_init(struct vmballoon *b)
1038{
1039 int error;
1040
1041 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1042 &vmballoon_debug_fops);
1043 if (IS_ERR(b->dbg_entry)) {
1044 error = PTR_ERR(b->dbg_entry);
1045 pr_err("failed to create debugfs entry, error: %d\n", error);
1046 return error;
1047 }
1048
1049 return 0;
1050}
1051
1052static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1053{
1054 debugfs_remove(b->dbg_entry);
1055}
1056
1057#else
1058
1059static inline int vmballoon_debugfs_init(struct vmballoon *b)
1060{
1061 return 0;
1062}
1063
1064static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1065{
1066}
1067
1068#endif /* CONFIG_DEBUG_FS */
1069
1070static int __init vmballoon_init(void)
1071{
1072 int error;
1073
1074 /*
1075 * Check if we are running on VMware's hypervisor and bail out
1076 * if we are not.
1077 */
H. Peter Anvina10a5692010-05-09 01:13:42 -07001078 if (x86_hyper != &x86_hyper_vmware)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001079 return -ENODEV;
1080
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001081 INIT_LIST_HEAD(&balloon.pages);
1082 INIT_LIST_HEAD(&balloon.refused_pages);
1083
1084 /* initialize rates */
1085 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
1086 balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
1087
1088 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1089
1090 /*
1091 * Start balloon.
1092 */
Xavier Deguillardf220a802015-08-06 15:17:58 -07001093 if (!vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES)) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001094 pr_err("failed to send start command to the host\n");
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001095 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001096 }
1097
Xavier Deguillardf220a802015-08-06 15:17:58 -07001098 if ((balloon.capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1099 balloon.ops = &vmballoon_batched_ops;
1100 balloon.batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1101 if (!vmballoon_init_batching(&balloon)) {
1102 pr_err("failed to init batching\n");
1103 return -EIO;
1104 }
1105 } else if ((balloon.capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1106 balloon.ops = &vmballoon_basic_ops;
1107 balloon.batch_max_pages = 1;
1108 }
1109
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001110 if (!vmballoon_send_guest_id(&balloon)) {
1111 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001112 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001113 }
1114
1115 error = vmballoon_debugfs_init(&balloon);
1116 if (error)
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001117 return error;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001118
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001119 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001120
1121 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001122}
1123module_init(vmballoon_init);
1124
1125static void __exit vmballoon_exit(void)
1126{
1127 cancel_delayed_work_sync(&balloon.dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001128
1129 vmballoon_debugfs_exit(&balloon);
1130
1131 /*
1132 * Deallocate all reserved memory, and reset connection with monitor.
1133 * Reset connection before deallocating memory to avoid potential for
1134 * additional spurious resets from guest touching deallocated pages.
1135 */
Xavier Deguillardf220a802015-08-06 15:17:58 -07001136 vmballoon_send_start(&balloon, VMW_BALLOON_CAPABILITIES);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001137 vmballoon_pop(&balloon);
1138}
1139module_exit(vmballoon_exit);