blob: 826dda414166ac8358d99fe15ccff8c46b926d39 [file] [log] [blame]
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -07001/******************************************************************************
2 * balloon.c
3 *
4 * Xen balloon driver - enables returning/claiming memory to/from Xen.
5 *
6 * Copyright (c) 2003, B Dragovic
7 * Copyright (c) 2003-2004, M Williamson, K Fraser
8 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/sched.h>
38#include <linux/errno.h>
39#include <linux/mm.h>
40#include <linux/bootmem.h>
41#include <linux/pagemap.h>
42#include <linux/highmem.h>
43#include <linux/mutex.h>
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -070044#include <linux/list.h>
45#include <linux/sysdev.h>
46
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -070047#include <asm/page.h>
48#include <asm/pgalloc.h>
49#include <asm/pgtable.h>
50#include <asm/uaccess.h>
51#include <asm/tlb.h>
52
Jeremy Fitzhardingeecbf29c2008-12-16 12:37:07 -080053#include <asm/xen/hypervisor.h>
54#include <asm/xen/hypercall.h>
Jeremy Fitzhardinge1ccbf532009-10-06 15:11:14 -070055
56#include <xen/xen.h>
Jeremy Fitzhardingeecbf29c2008-12-16 12:37:07 -080057#include <xen/interface/xen.h>
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -070058#include <xen/interface/memory.h>
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -070059#include <xen/xenbus.h>
60#include <xen/features.h>
61#include <xen/page.h>
62
63#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
64
Jeremy Fitzhardinge167e6cf2008-07-24 16:27:52 -070065#define BALLOON_CLASS_NAME "xen_memory"
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -070066
67struct balloon_stats {
68 /* We aim for 'current allocation' == 'target allocation'. */
69 unsigned long current_pages;
70 unsigned long target_pages;
71 /* We may hit the hard limit in Xen. If we do then we remember it. */
72 unsigned long hard_limit;
73 /*
74 * Drivers may alter the memory reservation independently, but they
75 * must inform the balloon driver so we avoid hitting the hard limit.
76 */
77 unsigned long driver_pages;
78 /* Number of pages in high- and low-memory balloons. */
79 unsigned long balloon_low;
80 unsigned long balloon_high;
81};
82
83static DEFINE_MUTEX(balloon_mutex);
84
85static struct sys_device balloon_sysdev;
86
87static int register_balloon(struct sys_device *sysdev);
88
89/*
90 * Protects atomic reservation decrease/increase against concurrent increases.
91 * Also protects non-atomic updates of current_pages and driver_pages, and
92 * balloon lists.
93 */
94static DEFINE_SPINLOCK(balloon_lock);
95
96static struct balloon_stats balloon_stats;
97
98/* We increase/decrease in batches which fit in a page */
99static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
100
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700101#ifdef CONFIG_HIGHMEM
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700102#define inc_totalhigh_pages() (totalhigh_pages++)
103#define dec_totalhigh_pages() (totalhigh_pages--)
104#else
105#define inc_totalhigh_pages() do {} while(0)
106#define dec_totalhigh_pages() do {} while(0)
107#endif
108
109/* List of ballooned pages, threaded through the mem_map array. */
110static LIST_HEAD(ballooned_pages);
111
112/* Main work function, always executed in process context. */
113static void balloon_process(struct work_struct *work);
114static DECLARE_WORK(balloon_worker, balloon_process);
115static struct timer_list balloon_timer;
116
117/* When ballooning out (allocating memory to return to Xen) we don't really
118 want the kernel to try too hard since that can trigger the oom killer. */
119#define GFP_BALLOON \
120 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
121
122static void scrub_page(struct page *page)
123{
124#ifdef CONFIG_XEN_SCRUB_PAGES
Jeremy Fitzhardinge26a3e992008-11-17 09:35:00 -0800125 clear_highpage(page);
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700126#endif
127}
128
129/* balloon_append: add the given page to the balloon. */
130static void balloon_append(struct page *page)
131{
132 /* Lowmem is re-populated first, so highmem pages go at list tail. */
133 if (PageHighMem(page)) {
134 list_add_tail(&page->lru, &ballooned_pages);
135 balloon_stats.balloon_high++;
136 dec_totalhigh_pages();
137 } else {
138 list_add(&page->lru, &ballooned_pages);
139 balloon_stats.balloon_low++;
140 }
141}
142
143/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
144static struct page *balloon_retrieve(void)
145{
146 struct page *page;
147
148 if (list_empty(&ballooned_pages))
149 return NULL;
150
151 page = list_entry(ballooned_pages.next, struct page, lru);
152 list_del(&page->lru);
153
154 if (PageHighMem(page)) {
155 balloon_stats.balloon_high--;
156 inc_totalhigh_pages();
157 }
158 else
159 balloon_stats.balloon_low--;
160
161 return page;
162}
163
164static struct page *balloon_first_page(void)
165{
166 if (list_empty(&ballooned_pages))
167 return NULL;
168 return list_entry(ballooned_pages.next, struct page, lru);
169}
170
171static struct page *balloon_next_page(struct page *page)
172{
173 struct list_head *next = page->lru.next;
174 if (next == &ballooned_pages)
175 return NULL;
176 return list_entry(next, struct page, lru);
177}
178
179static void balloon_alarm(unsigned long unused)
180{
181 schedule_work(&balloon_worker);
182}
183
184static unsigned long current_target(void)
185{
186 unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit);
187
188 target = min(target,
189 balloon_stats.current_pages +
190 balloon_stats.balloon_low +
191 balloon_stats.balloon_high);
192
193 return target;
194}
195
196static int increase_reservation(unsigned long nr_pages)
197{
198 unsigned long pfn, i, flags;
199 struct page *page;
200 long rc;
201 struct xen_memory_reservation reservation = {
202 .address_bits = 0,
203 .extent_order = 0,
204 .domid = DOMID_SELF
205 };
206
207 if (nr_pages > ARRAY_SIZE(frame_list))
208 nr_pages = ARRAY_SIZE(frame_list);
209
210 spin_lock_irqsave(&balloon_lock, flags);
211
212 page = balloon_first_page();
213 for (i = 0; i < nr_pages; i++) {
214 BUG_ON(page == NULL);
Joe Perchesa419aef2009-08-18 11:18:35 -0700215 frame_list[i] = page_to_pfn(page);
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700216 page = balloon_next_page(page);
217 }
218
Isaku Yamahataa90971e2008-05-26 23:31:14 +0100219 set_xen_guest_handle(reservation.extent_start, frame_list);
Jeremy Fitzhardingefde28e82008-07-24 16:28:00 -0700220 reservation.nr_extents = nr_pages;
221 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700222 if (rc < nr_pages) {
223 if (rc > 0) {
224 int ret;
225
226 /* We hit the Xen hard limit: reprobe. */
227 reservation.nr_extents = rc;
228 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
Jeremy Fitzhardingefde28e82008-07-24 16:28:00 -0700229 &reservation);
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700230 BUG_ON(ret != rc);
231 }
232 if (rc >= 0)
233 balloon_stats.hard_limit = (balloon_stats.current_pages + rc -
234 balloon_stats.driver_pages);
235 goto out;
236 }
237
238 for (i = 0; i < nr_pages; i++) {
239 page = balloon_retrieve();
240 BUG_ON(page == NULL);
241
242 pfn = page_to_pfn(page);
243 BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
244 phys_to_machine_mapping_valid(pfn));
245
246 set_phys_to_machine(pfn, frame_list[i]);
247
248 /* Link back into the page tables if not highmem. */
249 if (pfn < max_low_pfn) {
250 int ret;
251 ret = HYPERVISOR_update_va_mapping(
252 (unsigned long)__va(pfn << PAGE_SHIFT),
253 mfn_pte(frame_list[i], PAGE_KERNEL),
254 0);
255 BUG_ON(ret);
256 }
257
258 /* Relinquish the page back to the allocator. */
259 ClearPageReserved(page);
260 init_page_count(page);
261 __free_page(page);
262 }
263
264 balloon_stats.current_pages += nr_pages;
265 totalram_pages = balloon_stats.current_pages;
266
267 out:
268 spin_unlock_irqrestore(&balloon_lock, flags);
269
270 return 0;
271}
272
273static int decrease_reservation(unsigned long nr_pages)
274{
275 unsigned long pfn, i, flags;
276 struct page *page;
277 int need_sleep = 0;
278 int ret;
279 struct xen_memory_reservation reservation = {
280 .address_bits = 0,
281 .extent_order = 0,
282 .domid = DOMID_SELF
283 };
284
285 if (nr_pages > ARRAY_SIZE(frame_list))
286 nr_pages = ARRAY_SIZE(frame_list);
287
288 for (i = 0; i < nr_pages; i++) {
289 if ((page = alloc_page(GFP_BALLOON)) == NULL) {
290 nr_pages = i;
291 need_sleep = 1;
292 break;
293 }
294
295 pfn = page_to_pfn(page);
296 frame_list[i] = pfn_to_mfn(pfn);
297
298 scrub_page(page);
Dan Magenheimer1058a752009-01-22 14:36:08 -0800299
Ian Campbellff4ce8c2009-01-23 16:26:21 +0000300 if (!PageHighMem(page)) {
301 ret = HYPERVISOR_update_va_mapping(
302 (unsigned long)__va(pfn << PAGE_SHIFT),
303 __pte_ma(0), 0);
304 BUG_ON(ret);
305 }
306
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700307 }
308
309 /* Ensure that ballooned highmem pages don't have kmaps. */
310 kmap_flush_unused();
311 flush_tlb_all();
312
313 spin_lock_irqsave(&balloon_lock, flags);
314
315 /* No more mappings: invalidate P2M and add to balloon. */
316 for (i = 0; i < nr_pages; i++) {
317 pfn = mfn_to_pfn(frame_list[i]);
318 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
319 balloon_append(pfn_to_page(pfn));
320 }
321
Isaku Yamahataa90971e2008-05-26 23:31:14 +0100322 set_xen_guest_handle(reservation.extent_start, frame_list);
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700323 reservation.nr_extents = nr_pages;
324 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
325 BUG_ON(ret != nr_pages);
326
327 balloon_stats.current_pages -= nr_pages;
328 totalram_pages = balloon_stats.current_pages;
329
330 spin_unlock_irqrestore(&balloon_lock, flags);
331
332 return need_sleep;
333}
334
335/*
336 * We avoid multiple worker processes conflicting via the balloon mutex.
337 * We may of course race updates of the target counts (which are protected
338 * by the balloon lock), or with changes to the Xen hard limit, but we will
339 * recover from these in time.
340 */
341static void balloon_process(struct work_struct *work)
342{
343 int need_sleep = 0;
344 long credit;
345
346 mutex_lock(&balloon_mutex);
347
348 do {
349 credit = current_target() - balloon_stats.current_pages;
350 if (credit > 0)
351 need_sleep = (increase_reservation(credit) != 0);
352 if (credit < 0)
353 need_sleep = (decrease_reservation(-credit) != 0);
354
355#ifndef CONFIG_PREEMPT
356 if (need_resched())
357 schedule();
358#endif
359 } while ((credit != 0) && !need_sleep);
360
361 /* Schedule more work if there is some still to be done. */
362 if (current_target() != balloon_stats.current_pages)
363 mod_timer(&balloon_timer, jiffies + HZ);
364
365 mutex_unlock(&balloon_mutex);
366}
367
368/* Resets the Xen limit, sets new target, and kicks off processing. */
Adrian Bunk955d6f12008-05-26 23:31:17 +0100369static void balloon_set_new_target(unsigned long target)
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700370{
371 /* No need for lock. Not read-modify-write updates. */
372 balloon_stats.hard_limit = ~0UL;
373 balloon_stats.target_pages = target;
374 schedule_work(&balloon_worker);
375}
376
377static struct xenbus_watch target_watch =
378{
379 .node = "memory/target"
380};
381
382/* React to a change in the target key */
383static void watch_target(struct xenbus_watch *watch,
384 const char **vec, unsigned int len)
385{
386 unsigned long long new_target;
387 int err;
388
389 err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
390 if (err != 1) {
391 /* This is ok (for domain0 at least) - so just return */
392 return;
393 }
394
395 /* The given memory/target value is in KiB, so it needs converting to
396 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
397 */
398 balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
399}
400
401static int balloon_init_watcher(struct notifier_block *notifier,
402 unsigned long event,
403 void *data)
404{
405 int err;
406
407 err = register_xenbus_watch(&target_watch);
408 if (err)
409 printk(KERN_ERR "Failed to set balloon watcher\n");
410
411 return NOTIFY_DONE;
412}
413
414static struct notifier_block xenstore_notifier;
415
416static int __init balloon_init(void)
417{
418 unsigned long pfn;
419 struct page *page;
420
Jeremy Fitzhardinge6e833582008-08-19 13:16:17 -0700421 if (!xen_pv_domain())
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700422 return -ENODEV;
423
424 pr_info("xen_balloon: Initialising balloon driver.\n");
425
426 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
427 totalram_pages = balloon_stats.current_pages;
428 balloon_stats.target_pages = balloon_stats.current_pages;
429 balloon_stats.balloon_low = 0;
430 balloon_stats.balloon_high = 0;
431 balloon_stats.driver_pages = 0UL;
432 balloon_stats.hard_limit = ~0UL;
433
434 init_timer(&balloon_timer);
435 balloon_timer.data = 0;
436 balloon_timer.function = balloon_alarm;
437
438 register_balloon(&balloon_sysdev);
439
440 /* Initialise the balloon with excess memory space. */
441 for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
442 page = pfn_to_page(pfn);
443 if (!PageReserved(page))
444 balloon_append(page);
445 }
446
447 target_watch.callback = watch_target;
448 xenstore_notifier.notifier_call = balloon_init_watcher;
449
450 register_xenstore_notifier(&xenstore_notifier);
451
452 return 0;
453}
454
455subsys_initcall(balloon_init);
456
457static void balloon_exit(void)
458{
459 /* XXX - release balloon here */
460 return;
461}
462
463module_exit(balloon_exit);
464
Jeremy Fitzhardinge167e6cf2008-07-24 16:27:52 -0700465#define BALLOON_SHOW(name, format, args...) \
466 static ssize_t show_##name(struct sys_device *dev, \
467 struct sysdev_attribute *attr, \
468 char *buf) \
469 { \
470 return sprintf(buf, format, ##args); \
471 } \
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700472 static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
473
474BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
475BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
476BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
477BALLOON_SHOW(hard_limit_kb,
478 (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n",
479 (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
480BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
481
Jeremy Fitzhardinge167e6cf2008-07-24 16:27:52 -0700482static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
483 char *buf)
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700484{
485 return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
486}
487
488static ssize_t store_target_kb(struct sys_device *dev,
Andi Kleen4a0b2b42008-07-01 18:48:41 +0200489 struct sysdev_attribute *attr,
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700490 const char *buf,
491 size_t count)
492{
Jeremy Fitzhardinge167e6cf2008-07-24 16:27:52 -0700493 char *endchar;
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700494 unsigned long long target_bytes;
495
496 if (!capable(CAP_SYS_ADMIN))
497 return -EPERM;
498
Jeremy Fitzhardinge618b2c82009-01-28 16:50:20 -0800499 target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700500
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700501 balloon_set_new_target(target_bytes >> PAGE_SHIFT);
502
503 return count;
504}
505
506static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
507 show_target_kb, store_target_kb);
508
Jeremy Fitzhardinge618b2c82009-01-28 16:50:20 -0800509
510static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
511 char *buf)
512{
513 return sprintf(buf, "%llu\n",
Jan Beulich06926982009-05-05 13:57:52 +0100514 (unsigned long long)balloon_stats.target_pages
515 << PAGE_SHIFT);
Jeremy Fitzhardinge618b2c82009-01-28 16:50:20 -0800516}
517
518static ssize_t store_target(struct sys_device *dev,
519 struct sysdev_attribute *attr,
520 const char *buf,
521 size_t count)
522{
523 char *endchar;
524 unsigned long long target_bytes;
525
526 if (!capable(CAP_SYS_ADMIN))
527 return -EPERM;
528
529 target_bytes = memparse(buf, &endchar);
530
531 balloon_set_new_target(target_bytes >> PAGE_SHIFT);
532
533 return count;
534}
535
536static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
537 show_target, store_target);
538
539
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700540static struct sysdev_attribute *balloon_attrs[] = {
541 &attr_target_kb,
Jeremy Fitzhardinge618b2c82009-01-28 16:50:20 -0800542 &attr_target,
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700543};
544
545static struct attribute *balloon_info_attrs[] = {
546 &attr_current_kb.attr,
547 &attr_low_kb.attr,
548 &attr_high_kb.attr,
549 &attr_hard_limit_kb.attr,
550 &attr_driver_kb.attr,
551 NULL
552};
553
554static struct attribute_group balloon_info_group = {
555 .name = "info",
556 .attrs = balloon_info_attrs,
557};
558
559static struct sysdev_class balloon_sysdev_class = {
560 .name = BALLOON_CLASS_NAME,
561};
562
563static int register_balloon(struct sys_device *sysdev)
564{
565 int i, error;
566
567 error = sysdev_class_register(&balloon_sysdev_class);
568 if (error)
569 return error;
570
571 sysdev->id = 0;
572 sysdev->cls = &balloon_sysdev_class;
573
574 error = sysdev_register(sysdev);
575 if (error) {
576 sysdev_class_unregister(&balloon_sysdev_class);
577 return error;
578 }
579
580 for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
581 error = sysdev_create_file(sysdev, balloon_attrs[i]);
582 if (error)
583 goto fail;
584 }
585
586 error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
587 if (error)
588 goto fail;
589
590 return 0;
591
592 fail:
593 while (--i >= 0)
594 sysdev_remove_file(sysdev, balloon_attrs[i]);
595 sysdev_unregister(sysdev);
596 sysdev_class_unregister(&balloon_sysdev_class);
597 return error;
598}
599
Jeremy Fitzhardinge17758262008-04-02 10:54:13 -0700600MODULE_LICENSE("GPL");