Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 1 | /* |
| 2 | * mm/balloon_compaction.c |
| 3 | * |
| 4 | * Common interface for making balloon pages movable by compaction. |
| 5 | * |
| 6 | * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com> |
| 7 | */ |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/export.h> |
| 11 | #include <linux/balloon_compaction.h> |
| 12 | |
| 13 | /* |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 14 | * balloon_page_enqueue - allocates a new page and inserts it into the balloon |
| 15 | * page list. |
Bogdan Sikora | bdb428c | 2015-12-27 14:58:23 +0100 | [diff] [blame] | 16 | * @b_dev_info: balloon device descriptor where we will insert a new page to |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 17 | * |
| 18 | * Driver must call it to properly allocate a new enlisted balloon page |
Bogdan Sikora | bdb428c | 2015-12-27 14:58:23 +0100 | [diff] [blame] | 19 | * before definitively removing it from the guest system. |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 20 | * This function returns the page address for the recently enqueued page or |
| 21 | * NULL in the case we fail to allocate a new page this turn. |
| 22 | */ |
| 23 | struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) |
| 24 | { |
| 25 | unsigned long flags; |
| 26 | struct page *page = alloc_page(balloon_mapping_gfp_mask() | |
| 27 | __GFP_NOMEMALLOC | __GFP_NORETRY); |
| 28 | if (!page) |
| 29 | return NULL; |
| 30 | |
| 31 | /* |
| 32 | * Block others from accessing the 'page' when we get around to |
| 33 | * establishing additional references. We should be the only one |
| 34 | * holding a reference to the 'page' at this point. |
| 35 | */ |
| 36 | BUG_ON(!trylock_page(page)); |
| 37 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
Konstantin Khlebnikov | 9d1ba80 | 2014-10-09 15:29:29 -0700 | [diff] [blame] | 38 | balloon_page_insert(b_dev_info, page); |
Konstantin Khlebnikov | 09316c0 | 2014-10-09 15:29:32 -0700 | [diff] [blame] | 39 | __count_vm_event(BALLOON_INFLATE); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 40 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
| 41 | unlock_page(page); |
| 42 | return page; |
| 43 | } |
| 44 | EXPORT_SYMBOL_GPL(balloon_page_enqueue); |
| 45 | |
| 46 | /* |
| 47 | * balloon_page_dequeue - removes a page from balloon's page list and returns |
| 48 | * the its address to allow the driver release the page. |
| 49 | * @b_dev_info: balloon device decriptor where we will grab a page from. |
| 50 | * |
| 51 | * Driver must call it to properly de-allocate a previous enlisted balloon page |
| 52 | * before definetively releasing it back to the guest system. |
| 53 | * This function returns the page address for the recently dequeued page or |
| 54 | * NULL in the case we find balloon's page list temporarily empty due to |
| 55 | * compaction isolated pages. |
| 56 | */ |
| 57 | struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) |
| 58 | { |
| 59 | struct page *page, *tmp; |
| 60 | unsigned long flags; |
| 61 | bool dequeued_page; |
| 62 | |
| 63 | dequeued_page = false; |
Minchan Kim | 21ea9fb | 2015-12-28 08:35:13 +0900 | [diff] [blame] | 64 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 65 | list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { |
| 66 | /* |
| 67 | * Block others from accessing the 'page' while we get around |
| 68 | * establishing additional references and preparing the 'page' |
| 69 | * to be released by the balloon driver. |
| 70 | */ |
| 71 | if (trylock_page(page)) { |
Konstantin Khlebnikov | 4d88e6f | 2014-10-29 14:51:02 -0700 | [diff] [blame] | 72 | #ifdef CONFIG_BALLOON_COMPACTION |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 73 | if (!PagePrivate(page)) { |
| 74 | /* raced with isolation */ |
| 75 | unlock_page(page); |
| 76 | continue; |
| 77 | } |
Konstantin Khlebnikov | 4d88e6f | 2014-10-29 14:51:02 -0700 | [diff] [blame] | 78 | #endif |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 79 | balloon_page_delete(page); |
Konstantin Khlebnikov | 09316c0 | 2014-10-09 15:29:32 -0700 | [diff] [blame] | 80 | __count_vm_event(BALLOON_DEFLATE); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 81 | unlock_page(page); |
| 82 | dequeued_page = true; |
| 83 | break; |
| 84 | } |
| 85 | } |
Minchan Kim | 21ea9fb | 2015-12-28 08:35:13 +0900 | [diff] [blame] | 86 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 87 | |
| 88 | if (!dequeued_page) { |
| 89 | /* |
| 90 | * If we are unable to dequeue a balloon page because the page |
| 91 | * list is empty and there is no isolated pages, then something |
| 92 | * went out of track and some balloon pages are lost. |
| 93 | * BUG() here, otherwise the balloon driver may get stuck into |
| 94 | * an infinite loop while attempting to release all its pages. |
| 95 | */ |
| 96 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
| 97 | if (unlikely(list_empty(&b_dev_info->pages) && |
| 98 | !b_dev_info->isolated_pages)) |
| 99 | BUG(); |
| 100 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
| 101 | page = NULL; |
| 102 | } |
| 103 | return page; |
| 104 | } |
| 105 | EXPORT_SYMBOL_GPL(balloon_page_dequeue); |
| 106 | |
| 107 | #ifdef CONFIG_BALLOON_COMPACTION |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 108 | |
| 109 | static inline void __isolate_balloon_page(struct page *page) |
| 110 | { |
Konstantin Khlebnikov | 9d1ba80 | 2014-10-09 15:29:29 -0700 | [diff] [blame] | 111 | struct balloon_dev_info *b_dev_info = balloon_page_device(page); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 112 | unsigned long flags; |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 113 | |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 114 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 115 | ClearPagePrivate(page); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 116 | list_del(&page->lru); |
| 117 | b_dev_info->isolated_pages++; |
| 118 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
| 119 | } |
| 120 | |
| 121 | static inline void __putback_balloon_page(struct page *page) |
| 122 | { |
Konstantin Khlebnikov | 9d1ba80 | 2014-10-09 15:29:29 -0700 | [diff] [blame] | 123 | struct balloon_dev_info *b_dev_info = balloon_page_device(page); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 124 | unsigned long flags; |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 125 | |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 126 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 127 | SetPagePrivate(page); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 128 | list_add(&page->lru, &b_dev_info->pages); |
| 129 | b_dev_info->isolated_pages--; |
| 130 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); |
| 131 | } |
| 132 | |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 133 | /* __isolate_lru_page() counterpart for a ballooned page */ |
| 134 | bool balloon_page_isolate(struct page *page) |
| 135 | { |
| 136 | /* |
| 137 | * Avoid burning cycles with pages that are yet under __free_pages(), |
| 138 | * or just got freed under us. |
| 139 | * |
| 140 | * In case we 'win' a race for a balloon page being freed under us and |
| 141 | * raise its refcount preventing __free_pages() from doing its job |
| 142 | * the put_page() at the end of this block will take care of |
| 143 | * release this page, thus avoiding a nasty leakage. |
| 144 | */ |
| 145 | if (likely(get_page_unless_zero(page))) { |
| 146 | /* |
| 147 | * As balloon pages are not isolated from LRU lists, concurrent |
| 148 | * compaction threads can race against page migration functions |
| 149 | * as well as race against the balloon driver releasing a page. |
| 150 | * |
| 151 | * In order to avoid having an already isolated balloon page |
| 152 | * being (wrongly) re-isolated while it is under migration, |
| 153 | * or to avoid attempting to isolate pages being released by |
| 154 | * the balloon driver, lets be sure we have the page lock |
| 155 | * before proceeding with the balloon page isolation steps. |
| 156 | */ |
| 157 | if (likely(trylock_page(page))) { |
| 158 | /* |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 159 | * A ballooned page, by default, has PagePrivate set. |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 160 | * Prevent concurrent compaction threads from isolating |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 161 | * an already isolated balloon page by clearing it. |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 162 | */ |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 163 | if (balloon_page_movable(page)) { |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 164 | __isolate_balloon_page(page); |
| 165 | unlock_page(page); |
| 166 | return true; |
| 167 | } |
| 168 | unlock_page(page); |
| 169 | } |
| 170 | put_page(page); |
| 171 | } |
| 172 | return false; |
| 173 | } |
| 174 | |
| 175 | /* putback_lru_page() counterpart for a ballooned page */ |
| 176 | void balloon_page_putback(struct page *page) |
| 177 | { |
| 178 | /* |
| 179 | * 'lock_page()' stabilizes the page and prevents races against |
| 180 | * concurrent isolation threads attempting to re-isolate it. |
| 181 | */ |
| 182 | lock_page(page); |
| 183 | |
| 184 | if (__is_movable_balloon_page(page)) { |
| 185 | __putback_balloon_page(page); |
| 186 | /* drop the extra ref count taken for page isolation */ |
| 187 | put_page(page); |
| 188 | } else { |
| 189 | WARN_ON(1); |
Dave Hansen | f0b791a | 2014-01-23 15:52:49 -0800 | [diff] [blame] | 190 | dump_page(page, "not movable balloon page"); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 191 | } |
| 192 | unlock_page(page); |
| 193 | } |
| 194 | |
| 195 | /* move_to_new_page() counterpart for a ballooned page */ |
| 196 | int balloon_page_migrate(struct page *newpage, |
| 197 | struct page *page, enum migrate_mode mode) |
| 198 | { |
Konstantin Khlebnikov | 9d1ba80 | 2014-10-09 15:29:29 -0700 | [diff] [blame] | 199 | struct balloon_dev_info *balloon = balloon_page_device(page); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 200 | int rc = -EAGAIN; |
| 201 | |
Hugh Dickins | 7db7671 | 2015-11-05 18:49:49 -0800 | [diff] [blame] | 202 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 203 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 204 | |
| 205 | if (WARN_ON(!__is_movable_balloon_page(page))) { |
Dave Hansen | f0b791a | 2014-01-23 15:52:49 -0800 | [diff] [blame] | 206 | dump_page(page, "not movable balloon page"); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 207 | return rc; |
| 208 | } |
| 209 | |
Konstantin Khlebnikov | 9d1ba80 | 2014-10-09 15:29:29 -0700 | [diff] [blame] | 210 | if (balloon && balloon->migratepage) |
| 211 | rc = balloon->migratepage(balloon, newpage, page, mode); |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 212 | |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 213 | return rc; |
| 214 | } |
| 215 | #endif /* CONFIG_BALLOON_COMPACTION */ |