| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Memory Migration functionality - linux/mm/migration.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | 
|  | 5 | * | 
|  | 6 | * Page migration was first developed in the context of the memory hotplug | 
|  | 7 | * project. The main authors of the migration code are: | 
|  | 8 | * | 
|  | 9 | * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | 
|  | 10 | * Hirokazu Takahashi <taka@valinux.co.jp> | 
|  | 11 | * Dave Hansen <haveblue@us.ibm.com> | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 | * Christoph Lameter | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 13 | */ | 
|  | 14 |  | 
|  | 15 | #include <linux/migrate.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 16 | #include <linux/export.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 17 | #include <linux/swap.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 22 | #include <linux/nsproxy.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 23 | #include <linux/pagevec.h> | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 24 | #include <linux/ksm.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 25 | #include <linux/rmap.h> | 
|  | 26 | #include <linux/topology.h> | 
|  | 27 | #include <linux/cpu.h> | 
|  | 28 | #include <linux/cpuset.h> | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 29 | #include <linux/writeback.h> | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 30 | #include <linux/mempolicy.h> | 
|  | 31 | #include <linux/vmalloc.h> | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 32 | #include <linux/security.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> | 
| Adrian Bunk | 4f5ca26 | 2008-07-23 21:27:02 -0700 | [diff] [blame] | 34 | #include <linux/syscalls.h> | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 35 | #include <linux/hugetlb.h> | 
| Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 36 | #include <linux/hugetlb_cgroup.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/gfp.h> | 
| Rafael Aquini | bf6bddf | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 38 | #include <linux/balloon_compaction.h> | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 39 | #include <linux/mmu_notifier.h> | 
| Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 40 | #include <linux/page_idle.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 41 |  | 
| Michal Nazarewicz | 0d1836c | 2010-12-21 17:24:26 -0800 | [diff] [blame] | 42 | #include <asm/tlbflush.h> | 
|  | 43 |  | 
| Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 44 | #define CREATE_TRACE_POINTS | 
|  | 45 | #include <trace/events/migrate.h> | 
|  | 46 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 47 | #include "internal.h" | 
|  | 48 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 49 | /* | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 50 | * migrate_prep() needs to be called before we start compiling a list of pages | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 51 | * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is | 
|  | 52 | * undesirable, use migrate_prep_local() | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 53 | */ | 
|  | 54 | int migrate_prep(void) | 
|  | 55 | { | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 56 | /* | 
|  | 57 | * Clear the LRU lists so pages can be isolated. | 
|  | 58 | * Note that pages may be moved off the LRU after we have | 
|  | 59 | * drained them. Those pages will fail to migrate like other | 
|  | 60 | * pages that may be busy. | 
|  | 61 | */ | 
|  | 62 | lru_add_drain_all(); | 
|  | 63 |  | 
|  | 64 | return 0; | 
|  | 65 | } | 
|  | 66 |  | 
| Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 67 | /* Do the necessary work of migrate_prep but not if it involves other CPUs */ | 
|  | 68 | int migrate_prep_local(void) | 
|  | 69 | { | 
|  | 70 | lru_add_drain(); | 
|  | 71 |  | 
|  | 72 | return 0; | 
|  | 73 | } | 
|  | 74 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 75 | /* | 
| Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 76 | * Put previously isolated pages back onto the appropriate lists | 
|  | 77 | * from where they were once taken off for compaction/migration. | 
|  | 78 | * | 
| Joonsoo Kim | 59c82b7 | 2014-01-21 15:51:17 -0800 | [diff] [blame] | 79 | * This function shall be used whenever the isolated pageset has been | 
|  | 80 | * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() | 
|  | 81 | * and isolate_huge_page(). | 
| Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 82 | */ | 
|  | 83 | void putback_movable_pages(struct list_head *l) | 
|  | 84 | { | 
|  | 85 | struct page *page; | 
|  | 86 | struct page *page2; | 
|  | 87 |  | 
|  | 88 | list_for_each_entry_safe(page, page2, l, lru) { | 
| Naoya Horiguchi | 31caf66 | 2013-09-11 14:21:59 -0700 | [diff] [blame] | 89 | if (unlikely(PageHuge(page))) { | 
|  | 90 | putback_active_hugepage(page); | 
|  | 91 | continue; | 
|  | 92 | } | 
| Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 93 | list_del(&page->lru); | 
|  | 94 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 
|  | 95 | page_is_file_cache(page)); | 
| Rafael Aquini | 117aad1 | 2013-09-30 13:45:16 -0700 | [diff] [blame] | 96 | if (unlikely(isolated_balloon_page(page))) | 
| Rafael Aquini | bf6bddf | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 97 | balloon_page_putback(page); | 
|  | 98 | else | 
|  | 99 | putback_lru_page(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 100 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 101 | } | 
|  | 102 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 103 | /* | 
|  | 104 | * Restore a potential migration pte to a working pte entry | 
|  | 105 | */ | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 106 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | 
|  | 107 | unsigned long addr, void *old) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 108 | { | 
|  | 109 | struct mm_struct *mm = vma->vm_mm; | 
|  | 110 | swp_entry_t entry; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 111 | pmd_t *pmd; | 
|  | 112 | pte_t *ptep, pte; | 
|  | 113 | spinlock_t *ptl; | 
|  | 114 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 115 | if (unlikely(PageHuge(new))) { | 
|  | 116 | ptep = huge_pte_offset(mm, addr); | 
|  | 117 | if (!ptep) | 
|  | 118 | goto out; | 
| Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 119 | ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 120 | } else { | 
| Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 121 | pmd = mm_find_pmd(mm, addr); | 
|  | 122 | if (!pmd) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 123 | goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 124 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 125 | ptep = pte_offset_map(pmd, addr); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 126 |  | 
| Hugh Dickins | 486cf46 | 2011-10-19 12:50:35 -0700 | [diff] [blame] | 127 | /* | 
|  | 128 | * Peek to check is_swap_pte() before taking ptlock?  No, we | 
|  | 129 | * can race mremap's move_ptes(), which skips anon_vma lock. | 
|  | 130 | */ | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 131 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 132 | ptl = pte_lockptr(mm, pmd); | 
|  | 133 | } | 
|  | 134 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 135 | spin_lock(ptl); | 
|  | 136 | pte = *ptep; | 
|  | 137 | if (!is_swap_pte(pte)) | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 138 | goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 139 |  | 
|  | 140 | entry = pte_to_swp_entry(pte); | 
|  | 141 |  | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 142 | if (!is_migration_entry(entry) || | 
|  | 143 | migration_entry_to_page(entry) != old) | 
|  | 144 | goto unlock; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 145 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 146 | get_page(new); | 
|  | 147 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 
| Cyrill Gorcunov | c3d16e1 | 2013-10-16 13:46:51 -0700 | [diff] [blame] | 148 | if (pte_swp_soft_dirty(*ptep)) | 
|  | 149 | pte = pte_mksoft_dirty(pte); | 
| Mel Gorman | d3cb8bf | 2014-10-02 19:47:41 +0100 | [diff] [blame] | 150 |  | 
|  | 151 | /* Recheck VMA as permissions can change since migration started  */ | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 152 | if (is_write_migration_entry(entry)) | 
| Mel Gorman | d3cb8bf | 2014-10-02 19:47:41 +0100 | [diff] [blame] | 153 | pte = maybe_mkwrite(pte, vma); | 
|  | 154 |  | 
| Andi Kleen | 3ef8fd7 | 2010-10-11 16:03:21 +0200 | [diff] [blame] | 155 | #ifdef CONFIG_HUGETLB_PAGE | 
| Tony Lu | be7517d | 2013-02-04 14:28:46 -0800 | [diff] [blame] | 156 | if (PageHuge(new)) { | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 157 | pte = pte_mkhuge(pte); | 
| Tony Lu | be7517d | 2013-02-04 14:28:46 -0800 | [diff] [blame] | 158 | pte = arch_make_huge_pte(pte, vma, new, 0); | 
|  | 159 | } | 
| Andi Kleen | 3ef8fd7 | 2010-10-11 16:03:21 +0200 | [diff] [blame] | 160 | #endif | 
| Leonid Yegoshin | c2cc499 | 2013-05-24 15:55:18 -0700 | [diff] [blame] | 161 | flush_dcache_page(new); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 162 | set_pte_at(mm, addr, ptep, pte); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 163 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 164 | if (PageHuge(new)) { | 
|  | 165 | if (PageAnon(new)) | 
|  | 166 | hugepage_add_anon_rmap(new, vma, addr); | 
|  | 167 | else | 
|  | 168 | page_dup_rmap(new); | 
|  | 169 | } else if (PageAnon(new)) | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 170 | page_add_anon_rmap(new, vma, addr); | 
|  | 171 | else | 
|  | 172 | page_add_file_rmap(new); | 
|  | 173 |  | 
|  | 174 | /* No need to invalidate - it was non-present before */ | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 175 | update_mmu_cache(vma, addr, ptep); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 176 | unlock: | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 177 | pte_unmap_unlock(ptep, ptl); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 178 | out: | 
|  | 179 | return SWAP_AGAIN; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 180 | } | 
|  | 181 |  | 
|  | 182 | /* | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 183 | * Get rid of all migration entries and replace them by | 
|  | 184 | * references to the indicated page. | 
|  | 185 | */ | 
|  | 186 | static void remove_migration_ptes(struct page *old, struct page *new) | 
|  | 187 | { | 
| Joonsoo Kim | 051ac83 | 2014-01-21 15:49:48 -0800 | [diff] [blame] | 188 | struct rmap_walk_control rwc = { | 
|  | 189 | .rmap_one = remove_migration_pte, | 
|  | 190 | .arg = old, | 
|  | 191 | }; | 
|  | 192 |  | 
|  | 193 | rmap_walk(new, &rwc); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 194 | } | 
|  | 195 |  | 
|  | 196 | /* | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 197 | * Something used the pte of a page under migration. We need to | 
|  | 198 | * get to the page and wait until migration is finished. | 
|  | 199 | * When we return from this function the fault will be retried. | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 200 | */ | 
| Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 201 | void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, | 
| Naoya Horiguchi | 30dad30 | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 202 | spinlock_t *ptl) | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 203 | { | 
| Naoya Horiguchi | 30dad30 | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 204 | pte_t pte; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 205 | swp_entry_t entry; | 
|  | 206 | struct page *page; | 
|  | 207 |  | 
| Naoya Horiguchi | 30dad30 | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 208 | spin_lock(ptl); | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 209 | pte = *ptep; | 
|  | 210 | if (!is_swap_pte(pte)) | 
|  | 211 | goto out; | 
|  | 212 |  | 
|  | 213 | entry = pte_to_swp_entry(pte); | 
|  | 214 | if (!is_migration_entry(entry)) | 
|  | 215 | goto out; | 
|  | 216 |  | 
|  | 217 | page = migration_entry_to_page(entry); | 
|  | 218 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 219 | /* | 
|  | 220 | * Once radix-tree replacement of page migration started, page_count | 
|  | 221 | * *must* be zero. And, we don't want to call wait_on_page_locked() | 
|  | 222 | * against a page without get_page(). | 
|  | 223 | * So, we use get_page_unless_zero(), here. Even failed, page fault | 
|  | 224 | * will occur again. | 
|  | 225 | */ | 
|  | 226 | if (!get_page_unless_zero(page)) | 
|  | 227 | goto out; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 228 | pte_unmap_unlock(ptep, ptl); | 
|  | 229 | wait_on_page_locked(page); | 
|  | 230 | put_page(page); | 
|  | 231 | return; | 
|  | 232 | out: | 
|  | 233 | pte_unmap_unlock(ptep, ptl); | 
|  | 234 | } | 
|  | 235 |  | 
| Naoya Horiguchi | 30dad30 | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 236 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
|  | 237 | unsigned long address) | 
|  | 238 | { | 
|  | 239 | spinlock_t *ptl = pte_lockptr(mm, pmd); | 
|  | 240 | pte_t *ptep = pte_offset_map(pmd, address); | 
|  | 241 | __migration_entry_wait(mm, ptep, ptl); | 
|  | 242 | } | 
|  | 243 |  | 
| Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 244 | void migration_entry_wait_huge(struct vm_area_struct *vma, | 
|  | 245 | struct mm_struct *mm, pte_t *pte) | 
| Naoya Horiguchi | 30dad30 | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 246 | { | 
| Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 247 | spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); | 
| Naoya Horiguchi | 30dad30 | 2013-06-12 14:05:04 -0700 | [diff] [blame] | 248 | __migration_entry_wait(mm, pte, ptl); | 
|  | 249 | } | 
|  | 250 |  | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 251 | #ifdef CONFIG_BLOCK | 
|  | 252 | /* Returns true if all buffers are successfully locked */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 253 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, | 
|  | 254 | enum migrate_mode mode) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 255 | { | 
|  | 256 | struct buffer_head *bh = head; | 
|  | 257 |  | 
|  | 258 | /* Simple case, sync compaction */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 259 | if (mode != MIGRATE_ASYNC) { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 260 | do { | 
|  | 261 | get_bh(bh); | 
|  | 262 | lock_buffer(bh); | 
|  | 263 | bh = bh->b_this_page; | 
|  | 264 |  | 
|  | 265 | } while (bh != head); | 
|  | 266 |  | 
|  | 267 | return true; | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | /* async case, we cannot block on lock_buffer so use trylock_buffer */ | 
|  | 271 | do { | 
|  | 272 | get_bh(bh); | 
|  | 273 | if (!trylock_buffer(bh)) { | 
|  | 274 | /* | 
|  | 275 | * We failed to lock the buffer and cannot stall in | 
|  | 276 | * async migration. Release the taken locks | 
|  | 277 | */ | 
|  | 278 | struct buffer_head *failed_bh = bh; | 
|  | 279 | put_bh(failed_bh); | 
|  | 280 | bh = head; | 
|  | 281 | while (bh != failed_bh) { | 
|  | 282 | unlock_buffer(bh); | 
|  | 283 | put_bh(bh); | 
|  | 284 | bh = bh->b_this_page; | 
|  | 285 | } | 
|  | 286 | return false; | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | bh = bh->b_this_page; | 
|  | 290 | } while (bh != head); | 
|  | 291 | return true; | 
|  | 292 | } | 
|  | 293 | #else | 
|  | 294 | static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 295 | enum migrate_mode mode) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 296 | { | 
|  | 297 | return true; | 
|  | 298 | } | 
|  | 299 | #endif /* CONFIG_BLOCK */ | 
|  | 300 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 301 | /* | 
| Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 302 | * Replace the page in the mapping. | 
| Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 303 | * | 
|  | 304 | * The number of remaining references must be: | 
|  | 305 | * 1 for anonymous pages without a mapping | 
|  | 306 | * 2 for pages with a mapping | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 307 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 308 | */ | 
| Gu Zheng | 36bc08c | 2013-07-16 17:56:16 +0800 | [diff] [blame] | 309 | int migrate_page_move_mapping(struct address_space *mapping, | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 310 | struct page *newpage, struct page *page, | 
| Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 311 | struct buffer_head *head, enum migrate_mode mode, | 
|  | 312 | int extra_count) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 313 | { | 
| Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 314 | int expected_count = 1 + extra_count; | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 315 | void **pslot; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 316 |  | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 317 | if (!mapping) { | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 318 | /* Anonymous page without mapping */ | 
| Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 319 | if (page_count(page) != expected_count) | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 320 | return -EAGAIN; | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 321 | return MIGRATEPAGE_SUCCESS; | 
| Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 322 | } | 
|  | 323 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 324 | spin_lock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 325 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 326 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
|  | 327 | page_index(page)); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 328 |  | 
| Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 329 | expected_count += 1 + page_has_private(page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 330 | if (page_count(page) != expected_count || | 
| Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 331 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 332 | spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 333 | return -EAGAIN; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 334 | } | 
|  | 335 |  | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 336 | if (!page_freeze_refs(page, expected_count)) { | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 337 | spin_unlock_irq(&mapping->tree_lock); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 338 | return -EAGAIN; | 
|  | 339 | } | 
|  | 340 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 341 | /* | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 342 | * In the async migration case of moving a page with buffers, lock the | 
|  | 343 | * buffers using trylock before the mapping is moved. If the mapping | 
|  | 344 | * was moved, we later failed to lock the buffers and could not move | 
|  | 345 | * the mapping back due to an elevated page count, we would have to | 
|  | 346 | * block waiting on other references to be dropped. | 
|  | 347 | */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 348 | if (mode == MIGRATE_ASYNC && head && | 
|  | 349 | !buffer_migrate_lock_buffers(head, mode)) { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 350 | page_unfreeze_refs(page, expected_count); | 
|  | 351 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 352 | return -EAGAIN; | 
|  | 353 | } | 
|  | 354 |  | 
|  | 355 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 356 | * Now we know that no one else is looking at the page. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 357 | */ | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 358 | get_page(newpage);	/* add cache reference */ | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 359 | if (PageSwapCache(page)) { | 
|  | 360 | SetPageSwapCache(newpage); | 
|  | 361 | set_page_private(newpage, page_private(page)); | 
|  | 362 | } | 
|  | 363 |  | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 364 | radix_tree_replace_slot(pslot, newpage); | 
|  | 365 |  | 
|  | 366 | /* | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 367 | * Drop cache reference from old page by unfreezing | 
|  | 368 | * to one less reference. | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 369 | * We know this isn't the last reference. | 
|  | 370 | */ | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 371 | page_unfreeze_refs(page, expected_count - 1); | 
| Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 372 |  | 
| Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 373 | /* | 
|  | 374 | * If moved to a different zone then also account | 
|  | 375 | * the page for that zone. Other VM counters will be | 
|  | 376 | * taken care of when we establish references to the | 
|  | 377 | * new page and drop references to the old page. | 
|  | 378 | * | 
|  | 379 | * Note that anonymous pages are accounted for | 
|  | 380 | * via NR_FILE_PAGES and NR_ANON_PAGES if they | 
|  | 381 | * are mapped to swap space. | 
|  | 382 | */ | 
|  | 383 | __dec_zone_page_state(page, NR_FILE_PAGES); | 
|  | 384 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 
| Andrea Arcangeli | 99a15e2 | 2011-06-16 12:56:19 -0700 | [diff] [blame] | 385 | if (!PageSwapCache(page) && PageSwapBacked(page)) { | 
| KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 386 | __dec_zone_page_state(page, NR_SHMEM); | 
|  | 387 | __inc_zone_page_state(newpage, NR_SHMEM); | 
|  | 388 | } | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 389 | spin_unlock_irq(&mapping->tree_lock); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 390 |  | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 391 | return MIGRATEPAGE_SUCCESS; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 392 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 393 |  | 
|  | 394 | /* | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 395 | * The expected number of remaining references is the same as that | 
|  | 396 | * of migrate_page_move_mapping(). | 
|  | 397 | */ | 
|  | 398 | int migrate_huge_page_move_mapping(struct address_space *mapping, | 
|  | 399 | struct page *newpage, struct page *page) | 
|  | 400 | { | 
|  | 401 | int expected_count; | 
|  | 402 | void **pslot; | 
|  | 403 |  | 
|  | 404 | if (!mapping) { | 
|  | 405 | if (page_count(page) != 1) | 
|  | 406 | return -EAGAIN; | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 407 | return MIGRATEPAGE_SUCCESS; | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 408 | } | 
|  | 409 |  | 
|  | 410 | spin_lock_irq(&mapping->tree_lock); | 
|  | 411 |  | 
|  | 412 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 
|  | 413 | page_index(page)); | 
|  | 414 |  | 
|  | 415 | expected_count = 2 + page_has_private(page); | 
|  | 416 | if (page_count(page) != expected_count || | 
| Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 417 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 418 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 419 | return -EAGAIN; | 
|  | 420 | } | 
|  | 421 |  | 
|  | 422 | if (!page_freeze_refs(page, expected_count)) { | 
|  | 423 | spin_unlock_irq(&mapping->tree_lock); | 
|  | 424 | return -EAGAIN; | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 | get_page(newpage); | 
|  | 428 |  | 
|  | 429 | radix_tree_replace_slot(pslot, newpage); | 
|  | 430 |  | 
| Jacobo Giralt | 937a94c | 2012-01-10 15:07:11 -0800 | [diff] [blame] | 431 | page_unfreeze_refs(page, expected_count - 1); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 432 |  | 
|  | 433 | spin_unlock_irq(&mapping->tree_lock); | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 434 | return MIGRATEPAGE_SUCCESS; | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 435 | } | 
|  | 436 |  | 
|  | 437 | /* | 
| Dave Hansen | 30b0a10 | 2013-11-21 14:31:58 -0800 | [diff] [blame] | 438 | * Gigantic pages are so large that we do not guarantee that page++ pointer | 
|  | 439 | * arithmetic will work across the entire page.  We need something more | 
|  | 440 | * specialized. | 
|  | 441 | */ | 
|  | 442 | static void __copy_gigantic_page(struct page *dst, struct page *src, | 
|  | 443 | int nr_pages) | 
|  | 444 | { | 
|  | 445 | int i; | 
|  | 446 | struct page *dst_base = dst; | 
|  | 447 | struct page *src_base = src; | 
|  | 448 |  | 
|  | 449 | for (i = 0; i < nr_pages; ) { | 
|  | 450 | cond_resched(); | 
|  | 451 | copy_highpage(dst, src); | 
|  | 452 |  | 
|  | 453 | i++; | 
|  | 454 | dst = mem_map_next(dst, dst_base, i); | 
|  | 455 | src = mem_map_next(src, src_base, i); | 
|  | 456 | } | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | static void copy_huge_page(struct page *dst, struct page *src) | 
|  | 460 | { | 
|  | 461 | int i; | 
|  | 462 | int nr_pages; | 
|  | 463 |  | 
|  | 464 | if (PageHuge(src)) { | 
|  | 465 | /* hugetlbfs page */ | 
|  | 466 | struct hstate *h = page_hstate(src); | 
|  | 467 | nr_pages = pages_per_huge_page(h); | 
|  | 468 |  | 
|  | 469 | if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { | 
|  | 470 | __copy_gigantic_page(dst, src, nr_pages); | 
|  | 471 | return; | 
|  | 472 | } | 
|  | 473 | } else { | 
|  | 474 | /* thp page */ | 
|  | 475 | BUG_ON(!PageTransHuge(src)); | 
|  | 476 | nr_pages = hpage_nr_pages(src); | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | for (i = 0; i < nr_pages; i++) { | 
|  | 480 | cond_resched(); | 
|  | 481 | copy_highpage(dst + i, src + i); | 
|  | 482 | } | 
|  | 483 | } | 
|  | 484 |  | 
|  | 485 | /* | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 486 | * Copy the page to its new location | 
|  | 487 | */ | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 488 | void migrate_page_copy(struct page *newpage, struct page *page) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 489 | { | 
| Rik van Riel | 7851a45 | 2013-10-07 11:29:23 +0100 | [diff] [blame] | 490 | int cpupid; | 
|  | 491 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 492 | if (PageHuge(page) || PageTransHuge(page)) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 493 | copy_huge_page(newpage, page); | 
|  | 494 | else | 
|  | 495 | copy_highpage(newpage, page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 496 |  | 
|  | 497 | if (PageError(page)) | 
|  | 498 | SetPageError(newpage); | 
|  | 499 | if (PageReferenced(page)) | 
|  | 500 | SetPageReferenced(newpage); | 
|  | 501 | if (PageUptodate(page)) | 
|  | 502 | SetPageUptodate(newpage); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 503 | if (TestClearPageActive(page)) { | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 504 | VM_BUG_ON_PAGE(PageUnevictable(page), page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 505 | SetPageActive(newpage); | 
| Lee Schermerhorn | 418b27e | 2009-12-14 17:59:54 -0800 | [diff] [blame] | 506 | } else if (TestClearPageUnevictable(page)) | 
|  | 507 | SetPageUnevictable(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 508 | if (PageChecked(page)) | 
|  | 509 | SetPageChecked(newpage); | 
|  | 510 | if (PageMappedToDisk(page)) | 
|  | 511 | SetPageMappedToDisk(newpage); | 
|  | 512 |  | 
|  | 513 | if (PageDirty(page)) { | 
|  | 514 | clear_page_dirty_for_io(page); | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 515 | /* | 
|  | 516 | * Want to mark the page and the radix tree as dirty, and | 
|  | 517 | * redo the accounting that clear_page_dirty_for_io undid, | 
|  | 518 | * but we can't use set_page_dirty because that function | 
|  | 519 | * is actually a signal that all of the page has become dirty. | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 520 | * Whereas only part of our page may be dirty. | 
| Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 521 | */ | 
| Hugh Dickins | 752dc18 | 2012-06-02 00:27:47 -0700 | [diff] [blame] | 522 | if (PageSwapBacked(page)) | 
|  | 523 | SetPageDirty(newpage); | 
|  | 524 | else | 
|  | 525 | __set_page_dirty_nobuffers(newpage); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 526 | } | 
|  | 527 |  | 
| Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 528 | if (page_is_young(page)) | 
|  | 529 | set_page_young(newpage); | 
|  | 530 | if (page_is_idle(page)) | 
|  | 531 | set_page_idle(newpage); | 
|  | 532 |  | 
| Rik van Riel | 7851a45 | 2013-10-07 11:29:23 +0100 | [diff] [blame] | 533 | /* | 
|  | 534 | * Copy NUMA information to the new page, to prevent over-eager | 
|  | 535 | * future migrations of this same page. | 
|  | 536 | */ | 
|  | 537 | cpupid = page_cpupid_xchg_last(page, -1); | 
|  | 538 | page_cpupid_xchg_last(newpage, cpupid); | 
|  | 539 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 540 | mlock_migrate_page(newpage, page); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 541 | ksm_migrate_page(newpage, page); | 
| Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 542 | /* | 
|  | 543 | * Please do not reorder this without considering how mm/ksm.c's | 
|  | 544 | * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). | 
|  | 545 | */ | 
| Naoya Horiguchi | b3b3a99 | 2015-04-15 16:13:15 -0700 | [diff] [blame] | 546 | if (PageSwapCache(page)) | 
|  | 547 | ClearPageSwapCache(page); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 548 | ClearPagePrivate(page); | 
|  | 549 | set_page_private(page, 0); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 550 |  | 
|  | 551 | /* | 
|  | 552 | * If any waiters have accumulated on the new page then | 
|  | 553 | * wake them up. | 
|  | 554 | */ | 
|  | 555 | if (PageWriteback(newpage)) | 
|  | 556 | end_page_writeback(newpage); | 
|  | 557 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 558 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 559 | /************************************************************ | 
|  | 560 | *                    Migration functions | 
|  | 561 | ***********************************************************/ | 
|  | 562 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 563 | /* | 
|  | 564 | * Common logic to directly migrate a single page suitable for | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 565 | * pages that do not use PagePrivate/PagePrivate2. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 566 | * | 
|  | 567 | * Pages are locked upon entry and exit. | 
|  | 568 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 569 | int migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 570 | struct page *newpage, struct page *page, | 
|  | 571 | enum migrate_mode mode) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 572 | { | 
|  | 573 | int rc; | 
|  | 574 |  | 
|  | 575 | BUG_ON(PageWriteback(page));	/* Writeback must be complete */ | 
|  | 576 |  | 
| Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 577 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 578 |  | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 579 | if (rc != MIGRATEPAGE_SUCCESS) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 580 | return rc; | 
|  | 581 |  | 
|  | 582 | migrate_page_copy(newpage, page); | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 583 | return MIGRATEPAGE_SUCCESS; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 584 | } | 
|  | 585 | EXPORT_SYMBOL(migrate_page); | 
|  | 586 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 587 | #ifdef CONFIG_BLOCK | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 588 | /* | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 589 | * Migration function for pages with buffers. This function can only be used | 
|  | 590 | * if the underlying filesystem guarantees that no other references to "page" | 
|  | 591 | * exist. | 
|  | 592 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 593 | int buffer_migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 594 | struct page *newpage, struct page *page, enum migrate_mode mode) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 595 | { | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 596 | struct buffer_head *bh, *head; | 
|  | 597 | int rc; | 
|  | 598 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 599 | if (!page_has_buffers(page)) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 600 | return migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 601 |  | 
|  | 602 | head = page_buffers(page); | 
|  | 603 |  | 
| Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 604 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 605 |  | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 606 | if (rc != MIGRATEPAGE_SUCCESS) | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 607 | return rc; | 
|  | 608 |  | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 609 | /* | 
|  | 610 | * In the async case, migrate_page_move_mapping locked the buffers | 
|  | 611 | * with an IRQ-safe spinlock held. In the sync case, the buffers | 
|  | 612 | * need to be locked now | 
|  | 613 | */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 614 | if (mode != MIGRATE_ASYNC) | 
|  | 615 | BUG_ON(!buffer_migrate_lock_buffers(head, mode)); | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 616 |  | 
|  | 617 | ClearPagePrivate(page); | 
|  | 618 | set_page_private(newpage, page_private(page)); | 
|  | 619 | set_page_private(page, 0); | 
|  | 620 | put_page(page); | 
|  | 621 | get_page(newpage); | 
|  | 622 |  | 
|  | 623 | bh = head; | 
|  | 624 | do { | 
|  | 625 | set_bh_page(bh, newpage, bh_offset(bh)); | 
|  | 626 | bh = bh->b_this_page; | 
|  | 627 |  | 
|  | 628 | } while (bh != head); | 
|  | 629 |  | 
|  | 630 | SetPagePrivate(newpage); | 
|  | 631 |  | 
|  | 632 | migrate_page_copy(newpage, page); | 
|  | 633 |  | 
|  | 634 | bh = head; | 
|  | 635 | do { | 
|  | 636 | unlock_buffer(bh); | 
|  | 637 | put_bh(bh); | 
|  | 638 | bh = bh->b_this_page; | 
|  | 639 |  | 
|  | 640 | } while (bh != head); | 
|  | 641 |  | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 642 | return MIGRATEPAGE_SUCCESS; | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 643 | } | 
|  | 644 | EXPORT_SYMBOL(buffer_migrate_page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 645 | #endif | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 646 |  | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 647 | /* | 
|  | 648 | * Writeback a page to clean the dirty state | 
|  | 649 | */ | 
|  | 650 | static int writeout(struct address_space *mapping, struct page *page) | 
|  | 651 | { | 
|  | 652 | struct writeback_control wbc = { | 
|  | 653 | .sync_mode = WB_SYNC_NONE, | 
|  | 654 | .nr_to_write = 1, | 
|  | 655 | .range_start = 0, | 
|  | 656 | .range_end = LLONG_MAX, | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 657 | .for_reclaim = 1 | 
|  | 658 | }; | 
|  | 659 | int rc; | 
|  | 660 |  | 
|  | 661 | if (!mapping->a_ops->writepage) | 
|  | 662 | /* No write method for the address space */ | 
|  | 663 | return -EINVAL; | 
|  | 664 |  | 
|  | 665 | if (!clear_page_dirty_for_io(page)) | 
|  | 666 | /* Someone else already triggered a write */ | 
|  | 667 | return -EAGAIN; | 
|  | 668 |  | 
|  | 669 | /* | 
|  | 670 | * A dirty page may imply that the underlying filesystem has | 
|  | 671 | * the page on some queue. So the page must be clean for | 
|  | 672 | * migration. Writeout may mean we loose the lock and the | 
|  | 673 | * page state is no longer what we checked for earlier. | 
|  | 674 | * At this point we know that the migration attempt cannot | 
|  | 675 | * be successful. | 
|  | 676 | */ | 
|  | 677 | remove_migration_ptes(page, page); | 
|  | 678 |  | 
|  | 679 | rc = mapping->a_ops->writepage(page, &wbc); | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 680 |  | 
|  | 681 | if (rc != AOP_WRITEPAGE_ACTIVATE) | 
|  | 682 | /* unlocked. Relock */ | 
|  | 683 | lock_page(page); | 
|  | 684 |  | 
| Hugh Dickins | bda8550 | 2008-11-19 15:36:36 -0800 | [diff] [blame] | 685 | return (rc < 0) ? -EIO : -EAGAIN; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 686 | } | 
|  | 687 |  | 
|  | 688 | /* | 
|  | 689 | * Default handling if a filesystem does not provide a migration function. | 
|  | 690 | */ | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 691 | static int fallback_migrate_page(struct address_space *mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 692 | struct page *newpage, struct page *page, enum migrate_mode mode) | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 693 | { | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 694 | if (PageDirty(page)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 695 | /* Only writeback pages in full synchronous migration */ | 
|  | 696 | if (mode != MIGRATE_SYNC) | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 697 | return -EBUSY; | 
| Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 698 | return writeout(mapping, page); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 699 | } | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 700 |  | 
|  | 701 | /* | 
|  | 702 | * Buffers may be managed in a filesystem specific way. | 
|  | 703 | * We must have no buffers or drop them. | 
|  | 704 | */ | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 705 | if (page_has_private(page) && | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 706 | !try_to_release_page(page, GFP_KERNEL)) | 
|  | 707 | return -EAGAIN; | 
|  | 708 |  | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 709 | return migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 710 | } | 
|  | 711 |  | 
| Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 712 | /* | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 713 | * Move a page to a newly allocated page | 
|  | 714 | * The page is locked and all ptes have been successfully removed. | 
|  | 715 | * | 
|  | 716 | * The new page will have replaced the old page if this function | 
|  | 717 | * is successful. | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 718 | * | 
|  | 719 | * Return value: | 
|  | 720 | *   < 0 - error code | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 721 | *  MIGRATEPAGE_SUCCESS - success | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 722 | */ | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 723 | static int move_to_new_page(struct page *newpage, struct page *page, | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 724 | int page_was_mapped, enum migrate_mode mode) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 725 | { | 
|  | 726 | struct address_space *mapping; | 
|  | 727 | int rc; | 
|  | 728 |  | 
|  | 729 | /* | 
|  | 730 | * Block others from accessing the page when we get around to | 
|  | 731 | * establishing additional references. We are the only one | 
|  | 732 | * holding a reference to the new page at this point. | 
|  | 733 | */ | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 734 | if (!trylock_page(newpage)) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 735 | BUG(); | 
|  | 736 |  | 
|  | 737 | /* Prepare mapping for the new page.*/ | 
|  | 738 | newpage->index = page->index; | 
|  | 739 | newpage->mapping = page->mapping; | 
| Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 740 | if (PageSwapBacked(page)) | 
|  | 741 | SetPageSwapBacked(newpage); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 742 |  | 
| Greg Thelen | 0610c25 | 2015-10-01 15:37:02 -0700 | [diff] [blame^] | 743 | /* | 
|  | 744 | * Indirectly called below, migrate_page_copy() copies PG_dirty and thus | 
|  | 745 | * needs newpage's memcg set to transfer memcg dirty page accounting. | 
|  | 746 | * So perform memcg migration in two steps: | 
|  | 747 | * 1. set newpage->mem_cgroup (here) | 
|  | 748 | * 2. clear page->mem_cgroup (below) | 
|  | 749 | */ | 
|  | 750 | set_page_memcg(newpage, page_memcg(page)); | 
|  | 751 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 752 | mapping = page_mapping(page); | 
|  | 753 | if (!mapping) | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 754 | rc = migrate_page(mapping, newpage, page, mode); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 755 | else if (mapping->a_ops->migratepage) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 756 | /* | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 757 | * Most pages have a mapping and most filesystems provide a | 
|  | 758 | * migratepage callback. Anonymous pages are part of swap | 
|  | 759 | * space which also has its own migratepage callback. This | 
|  | 760 | * is the most common path for page migration. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 761 | */ | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 762 | rc = mapping->a_ops->migratepage(mapping, | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 763 | newpage, page, mode); | 
| Mel Gorman | b969c4a | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 764 | else | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 765 | rc = fallback_migrate_page(mapping, newpage, page, mode); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 766 |  | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 767 | if (rc != MIGRATEPAGE_SUCCESS) { | 
| Greg Thelen | 0610c25 | 2015-10-01 15:37:02 -0700 | [diff] [blame^] | 768 | set_page_memcg(newpage, NULL); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 769 | newpage->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 770 | } else { | 
| Greg Thelen | 0610c25 | 2015-10-01 15:37:02 -0700 | [diff] [blame^] | 771 | set_page_memcg(page, NULL); | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 772 | if (page_was_mapped) | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 773 | remove_migration_ptes(page, newpage); | 
| Konstantin Khlebnikov | 35512ec | 2012-02-03 15:37:13 -0800 | [diff] [blame] | 774 | page->mapping = NULL; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 775 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 776 |  | 
|  | 777 | unlock_page(newpage); | 
|  | 778 |  | 
|  | 779 | return rc; | 
|  | 780 | } | 
|  | 781 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 782 | static int __unmap_and_move(struct page *page, struct page *newpage, | 
| Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 783 | int force, enum migrate_mode mode) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 784 | { | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 785 | int rc = -EAGAIN; | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 786 | int page_was_mapped = 0; | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 787 | struct anon_vma *anon_vma = NULL; | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 788 |  | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 789 | if (!trylock_page(page)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 790 | if (!force || mode == MIGRATE_ASYNC) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 791 | goto out; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 792 |  | 
|  | 793 | /* | 
|  | 794 | * It's not safe for direct compaction to call lock_page. | 
|  | 795 | * For example, during page readahead pages are added locked | 
|  | 796 | * to the LRU. Later, when the IO completes the pages are | 
|  | 797 | * marked uptodate and unlocked. However, the queueing | 
|  | 798 | * could be merging multiple pages for one bio (e.g. | 
|  | 799 | * mpage_readpages). If an allocation happens for the | 
|  | 800 | * second or third page, the process can end up locking | 
|  | 801 | * the same page twice and deadlocking. Rather than | 
|  | 802 | * trying to be clever about what pages can be locked, | 
|  | 803 | * avoid the use of lock_page for direct compaction | 
|  | 804 | * altogether. | 
|  | 805 | */ | 
|  | 806 | if (current->flags & PF_MEMALLOC) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 807 | goto out; | 
| Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 808 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 809 | lock_page(page); | 
|  | 810 | } | 
|  | 811 |  | 
|  | 812 | if (PageWriteback(page)) { | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 813 | /* | 
| Jianguo Wu | fed5b64 | 2013-04-29 15:07:58 -0700 | [diff] [blame] | 814 | * Only in the case of a full synchronous migration is it | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 815 | * necessary to wait for PageWriteback. In the async case, | 
|  | 816 | * the retry loop is too short and in the sync-light case, | 
|  | 817 | * the overhead of stalling is too much | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 818 | */ | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 819 | if (mode != MIGRATE_SYNC) { | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 820 | rc = -EBUSY; | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 821 | goto out_unlock; | 
| Andrea Arcangeli | 11bc82d | 2011-03-22 16:33:11 -0700 | [diff] [blame] | 822 | } | 
|  | 823 | if (!force) | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 824 | goto out_unlock; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 825 | wait_on_page_writeback(page); | 
|  | 826 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 827 | /* | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 828 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | 
|  | 829 | * we cannot notice that anon_vma is freed while we migrates a page. | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 830 | * This get_anon_vma() delays freeing anon_vma pointer until the end | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 831 | * of migration. File cache pages are no problem because of page_lock() | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 832 | * File Caches may use write_page() or lock_page() in migration, then, | 
|  | 833 | * just care Anon page here. | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 834 | */ | 
| Hugh Dickins | b79bc0a | 2013-02-22 16:35:13 -0800 | [diff] [blame] | 835 | if (PageAnon(page) && !PageKsm(page)) { | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 836 | /* | 
| Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 837 | * Only page_lock_anon_vma_read() understands the subtleties of | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 838 | * getting a hold on an anon_vma from outside one of its mms. | 
|  | 839 | */ | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 840 | anon_vma = page_get_anon_vma(page); | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 841 | if (anon_vma) { | 
|  | 842 | /* | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 843 | * Anon page | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 844 | */ | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 845 | } else if (PageSwapCache(page)) { | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 846 | /* | 
|  | 847 | * We cannot be sure that the anon_vma of an unmapped | 
|  | 848 | * swapcache page is safe to use because we don't | 
|  | 849 | * know in advance if the VMA that this page belonged | 
|  | 850 | * to still exists. If the VMA and others sharing the | 
|  | 851 | * data have been freed, then the anon_vma could | 
|  | 852 | * already be invalid. | 
|  | 853 | * | 
|  | 854 | * To avoid this possibility, swapcache pages get | 
|  | 855 | * migrated but are not remapped when migration | 
|  | 856 | * completes | 
|  | 857 | */ | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 858 | } else { | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 859 | goto out_unlock; | 
| Mel Gorman | 3fe2011 | 2010-05-24 14:32:20 -0700 | [diff] [blame] | 860 | } | 
| KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 861 | } | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 862 |  | 
| Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 863 | if (unlikely(isolated_balloon_page(page))) { | 
| Rafael Aquini | bf6bddf | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 864 | /* | 
|  | 865 | * A ballooned page does not need any special attention from | 
|  | 866 | * physical to virtual reverse mapping procedures. | 
|  | 867 | * Skip any attempt to unmap PTEs or to remap swap cache, | 
|  | 868 | * in order to avoid burning cycles at rmap level, and perform | 
|  | 869 | * the page migration right away (proteced by page lock). | 
|  | 870 | */ | 
|  | 871 | rc = balloon_page_migrate(newpage, page, mode); | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 872 | goto out_unlock; | 
| Rafael Aquini | bf6bddf | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 873 | } | 
|  | 874 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 875 | /* | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 876 | * Corner case handling: | 
|  | 877 | * 1. When a new swap-cache page is read into, it is added to the LRU | 
|  | 878 | * and treated as swapcache but it has no rmap yet. | 
|  | 879 | * Calling try_to_unmap() against a page->mapping==NULL page will | 
|  | 880 | * trigger a BUG.  So handle it here. | 
|  | 881 | * 2. An orphaned page (see truncate_complete_page) might have | 
|  | 882 | * fs-private metadata. The page can be picked up due to memory | 
|  | 883 | * offlining.  Everywhere else except page reclaim, the page is | 
|  | 884 | * invisible to the vm, so the page can not be migrated.  So try to | 
|  | 885 | * free the metadata, so the page can be freed. | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 886 | */ | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 887 | if (!page->mapping) { | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 888 | VM_BUG_ON_PAGE(PageAnon(page), page); | 
| Hugh Dickins | 1ce82b6 | 2011-01-13 15:47:30 -0800 | [diff] [blame] | 889 | if (page_has_private(page)) { | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 890 | try_to_free_buffers(page); | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 891 | goto out_unlock; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 892 | } | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 893 | goto skip_unmap; | 
| Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 894 | } | 
|  | 895 |  | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 896 | /* Establish migration ptes or remove ptes */ | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 897 | if (page_mapped(page)) { | 
|  | 898 | try_to_unmap(page, | 
| Wanpeng Li | da1b13c | 2015-09-08 15:03:27 -0700 | [diff] [blame] | 899 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 900 | page_was_mapped = 1; | 
|  | 901 | } | 
| KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 902 |  | 
| Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 903 | skip_unmap: | 
| Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 904 | if (!page_mapped(page)) | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 905 | rc = move_to_new_page(newpage, page, page_was_mapped, mode); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 906 |  | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 907 | if (rc && page_was_mapped) | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 908 | remove_migration_ptes(page, page); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 909 |  | 
|  | 910 | /* Drop an anon_vma reference if we took one */ | 
| Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 911 | if (anon_vma) | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 912 | put_anon_vma(anon_vma); | 
| Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 913 |  | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 914 | out_unlock: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 915 | unlock_page(page); | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 916 | out: | 
|  | 917 | return rc; | 
|  | 918 | } | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 919 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 920 | /* | 
| Geert Uytterhoeven | ef2a515 | 2015-04-14 15:44:22 -0700 | [diff] [blame] | 921 | * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work | 
|  | 922 | * around it. | 
|  | 923 | */ | 
|  | 924 | #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM) | 
|  | 925 | #define ICE_noinline noinline | 
|  | 926 | #else | 
|  | 927 | #define ICE_noinline | 
|  | 928 | #endif | 
|  | 929 |  | 
|  | 930 | /* | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 931 | * Obtain the lock on page, remove all ptes and migrate the page | 
|  | 932 | * to the newly allocated page in newpage. | 
|  | 933 | */ | 
| Geert Uytterhoeven | ef2a515 | 2015-04-14 15:44:22 -0700 | [diff] [blame] | 934 | static ICE_noinline int unmap_and_move(new_page_t get_new_page, | 
|  | 935 | free_page_t put_new_page, | 
|  | 936 | unsigned long private, struct page *page, | 
| Naoya Horiguchi | add05ce | 2015-06-24 16:56:50 -0700 | [diff] [blame] | 937 | int force, enum migrate_mode mode, | 
|  | 938 | enum migrate_reason reason) | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 939 | { | 
|  | 940 | int rc = 0; | 
|  | 941 | int *result = NULL; | 
|  | 942 | struct page *newpage = get_new_page(page, private, &result); | 
|  | 943 |  | 
|  | 944 | if (!newpage) | 
|  | 945 | return -ENOMEM; | 
|  | 946 |  | 
|  | 947 | if (page_count(page) == 1) { | 
|  | 948 | /* page was freed from under us. So we are done. */ | 
|  | 949 | goto out; | 
|  | 950 | } | 
|  | 951 |  | 
|  | 952 | if (unlikely(PageTransHuge(page))) | 
|  | 953 | if (unlikely(split_huge_page(page))) | 
|  | 954 | goto out; | 
|  | 955 |  | 
| Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 956 | rc = __unmap_and_move(page, newpage, force, mode); | 
| Rafael Aquini | bf6bddf | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 957 |  | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 958 | out: | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 959 | if (rc != -EAGAIN) { | 
| Minchan Kim | 0dabec9 | 2011-10-31 17:06:57 -0700 | [diff] [blame] | 960 | /* | 
|  | 961 | * A page that has been migrated has all references | 
|  | 962 | * removed and will be freed. A page that has not been | 
|  | 963 | * migrated will have kepts its references and be | 
|  | 964 | * restored. | 
|  | 965 | */ | 
|  | 966 | list_del(&page->lru); | 
| KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 967 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 
| Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 968 | page_is_file_cache(page)); | 
| Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 969 | /* Soft-offlined page shouldn't go through lru cache list */ | 
| Wanpeng Li | da1b13c | 2015-09-08 15:03:27 -0700 | [diff] [blame] | 970 | if (reason == MR_MEMORY_FAILURE) { | 
| Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 971 | put_page(page); | 
| Wanpeng Li | da1b13c | 2015-09-08 15:03:27 -0700 | [diff] [blame] | 972 | if (!test_set_page_hwpoison(page)) | 
|  | 973 | num_poisoned_pages_inc(); | 
|  | 974 | } else | 
| Naoya Horiguchi | add05ce | 2015-06-24 16:56:50 -0700 | [diff] [blame] | 975 | putback_lru_page(page); | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 976 | } | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 977 |  | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 978 | /* | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 979 | * If migration was not successful and there's a freeing callback, use | 
|  | 980 | * it.  Otherwise, putback_lru_page() will drop the reference grabbed | 
|  | 981 | * during isolation. | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 982 | */ | 
| Hugh Dickins | 8bdd638 | 2014-07-26 12:58:23 -0700 | [diff] [blame] | 983 | if (rc != MIGRATEPAGE_SUCCESS && put_new_page) { | 
|  | 984 | ClearPageSwapBacked(newpage); | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 985 | put_new_page(newpage, private); | 
| Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 986 | } else if (unlikely(__is_movable_balloon_page(newpage))) { | 
|  | 987 | /* drop our reference, page already in the balloon */ | 
|  | 988 | put_page(newpage); | 
| Hugh Dickins | 8bdd638 | 2014-07-26 12:58:23 -0700 | [diff] [blame] | 989 | } else | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 990 | putback_lru_page(newpage); | 
|  | 991 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 992 | if (result) { | 
|  | 993 | if (rc) | 
|  | 994 | *result = rc; | 
|  | 995 | else | 
|  | 996 | *result = page_to_nid(newpage); | 
|  | 997 | } | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 998 | return rc; | 
|  | 999 | } | 
|  | 1000 |  | 
|  | 1001 | /* | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1002 | * Counterpart of unmap_and_move_page() for hugepage migration. | 
|  | 1003 | * | 
|  | 1004 | * This function doesn't wait the completion of hugepage I/O | 
|  | 1005 | * because there is no race between I/O and migration for hugepage. | 
|  | 1006 | * Note that currently hugepage I/O occurs only in direct I/O | 
|  | 1007 | * where no lock is held and PG_writeback is irrelevant, | 
|  | 1008 | * and writeback status of all subpages are counted in the reference | 
|  | 1009 | * count of the head page (i.e. if all subpages of a 2MB hugepage are | 
|  | 1010 | * under direct I/O, the reference of the head page is 512 and a bit more.) | 
|  | 1011 | * This means that when we try to migrate hugepage whose subpages are | 
|  | 1012 | * doing direct I/O, some references remain after try_to_unmap() and | 
|  | 1013 | * hugepage migration fails without data corruption. | 
|  | 1014 | * | 
|  | 1015 | * There is also no race when direct I/O is issued on the page under migration, | 
|  | 1016 | * because then pte is replaced with migration swap entry and direct I/O code | 
|  | 1017 | * will wait in the page fault for migration to complete. | 
|  | 1018 | */ | 
|  | 1019 | static int unmap_and_move_huge_page(new_page_t get_new_page, | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1020 | free_page_t put_new_page, unsigned long private, | 
|  | 1021 | struct page *hpage, int force, | 
|  | 1022 | enum migrate_mode mode) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1023 | { | 
|  | 1024 | int rc = 0; | 
|  | 1025 | int *result = NULL; | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 1026 | int page_was_mapped = 0; | 
| Joonsoo Kim | 32665f2 | 2014-01-21 15:51:15 -0800 | [diff] [blame] | 1027 | struct page *new_hpage; | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1028 | struct anon_vma *anon_vma = NULL; | 
|  | 1029 |  | 
| Naoya Horiguchi | 83467ef | 2013-09-11 14:22:11 -0700 | [diff] [blame] | 1030 | /* | 
|  | 1031 | * Movability of hugepages depends on architectures and hugepage size. | 
|  | 1032 | * This check is necessary because some callers of hugepage migration | 
|  | 1033 | * like soft offline and memory hotremove don't walk through page | 
|  | 1034 | * tables or check whether the hugepage is pmd-based or not before | 
|  | 1035 | * kicking migration. | 
|  | 1036 | */ | 
| Naoya Horiguchi | 100873d | 2014-06-04 16:10:56 -0700 | [diff] [blame] | 1037 | if (!hugepage_migration_supported(page_hstate(hpage))) { | 
| Joonsoo Kim | 32665f2 | 2014-01-21 15:51:15 -0800 | [diff] [blame] | 1038 | putback_active_hugepage(hpage); | 
| Naoya Horiguchi | 83467ef | 2013-09-11 14:22:11 -0700 | [diff] [blame] | 1039 | return -ENOSYS; | 
| Joonsoo Kim | 32665f2 | 2014-01-21 15:51:15 -0800 | [diff] [blame] | 1040 | } | 
| Naoya Horiguchi | 83467ef | 2013-09-11 14:22:11 -0700 | [diff] [blame] | 1041 |  | 
| Joonsoo Kim | 32665f2 | 2014-01-21 15:51:15 -0800 | [diff] [blame] | 1042 | new_hpage = get_new_page(hpage, private, &result); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1043 | if (!new_hpage) | 
|  | 1044 | return -ENOMEM; | 
|  | 1045 |  | 
|  | 1046 | rc = -EAGAIN; | 
|  | 1047 |  | 
|  | 1048 | if (!trylock_page(hpage)) { | 
| Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1049 | if (!force || mode != MIGRATE_SYNC) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1050 | goto out; | 
|  | 1051 | lock_page(hpage); | 
|  | 1052 | } | 
|  | 1053 |  | 
| Peter Zijlstra | 746b18d | 2011-05-24 17:12:10 -0700 | [diff] [blame] | 1054 | if (PageAnon(hpage)) | 
|  | 1055 | anon_vma = page_get_anon_vma(hpage); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1056 |  | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 1057 | if (page_mapped(hpage)) { | 
|  | 1058 | try_to_unmap(hpage, | 
|  | 1059 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 
|  | 1060 | page_was_mapped = 1; | 
|  | 1061 | } | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1062 |  | 
|  | 1063 | if (!page_mapped(hpage)) | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 1064 | rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode); | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1065 |  | 
| Hugh Dickins | 2ebba6b | 2014-12-12 16:56:19 -0800 | [diff] [blame] | 1066 | if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped) | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1067 | remove_migration_ptes(hpage, hpage); | 
|  | 1068 |  | 
| Hugh Dickins | fd4a466 | 2011-01-13 15:47:31 -0800 | [diff] [blame] | 1069 | if (anon_vma) | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 1070 | put_anon_vma(anon_vma); | 
| Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 1071 |  | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1072 | if (rc == MIGRATEPAGE_SUCCESS) | 
| Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 1073 | hugetlb_cgroup_migrate(hpage, new_hpage); | 
|  | 1074 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1075 | unlock_page(hpage); | 
| Hillf Danton | 0976133 | 2011-12-08 14:34:20 -0800 | [diff] [blame] | 1076 | out: | 
| Naoya Horiguchi | b8ec1ce | 2013-09-11 14:22:01 -0700 | [diff] [blame] | 1077 | if (rc != -EAGAIN) | 
|  | 1078 | putback_active_hugepage(hpage); | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1079 |  | 
|  | 1080 | /* | 
|  | 1081 | * If migration was not successful and there's a freeing callback, use | 
|  | 1082 | * it.  Otherwise, put_page() will drop the reference grabbed during | 
|  | 1083 | * isolation. | 
|  | 1084 | */ | 
|  | 1085 | if (rc != MIGRATEPAGE_SUCCESS && put_new_page) | 
|  | 1086 | put_new_page(new_hpage, private); | 
|  | 1087 | else | 
| Naoya Horiguchi | 3aaa76e | 2015-09-22 14:59:14 -0700 | [diff] [blame] | 1088 | putback_active_hugepage(new_hpage); | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1089 |  | 
| Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1090 | if (result) { | 
|  | 1091 | if (rc) | 
|  | 1092 | *result = rc; | 
|  | 1093 | else | 
|  | 1094 | *result = page_to_nid(new_hpage); | 
|  | 1095 | } | 
|  | 1096 | return rc; | 
|  | 1097 | } | 
|  | 1098 |  | 
|  | 1099 | /* | 
| Srivatsa S. Bhat | c73e5c9 | 2013-04-29 15:08:16 -0700 | [diff] [blame] | 1100 | * migrate_pages - migrate the pages specified in a list, to the free pages | 
|  | 1101 | *		   supplied as the target for the page migration | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1102 | * | 
| Srivatsa S. Bhat | c73e5c9 | 2013-04-29 15:08:16 -0700 | [diff] [blame] | 1103 | * @from:		The list of pages to be migrated. | 
|  | 1104 | * @get_new_page:	The function used to allocate free pages to be used | 
|  | 1105 | *			as the target of the page migration. | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1106 | * @put_new_page:	The function used to free target pages if migration | 
|  | 1107 | *			fails, or NULL if no special handling is necessary. | 
| Srivatsa S. Bhat | c73e5c9 | 2013-04-29 15:08:16 -0700 | [diff] [blame] | 1108 | * @private:		Private data to be passed on to get_new_page() | 
|  | 1109 | * @mode:		The migration mode that specifies the constraints for | 
|  | 1110 | *			page migration, if any. | 
|  | 1111 | * @reason:		The reason for page migration. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1112 | * | 
| Srivatsa S. Bhat | c73e5c9 | 2013-04-29 15:08:16 -0700 | [diff] [blame] | 1113 | * The function returns after 10 attempts or if no pages are movable any more | 
|  | 1114 | * because the list has become empty or no retryable pages exist any more. | 
|  | 1115 | * The caller should call putback_lru_pages() to return pages to the LRU | 
| Minchan Kim | 28bd657 | 2011-01-25 15:07:26 -0800 | [diff] [blame] | 1116 | * or free list only if ret != 0. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1117 | * | 
| Srivatsa S. Bhat | c73e5c9 | 2013-04-29 15:08:16 -0700 | [diff] [blame] | 1118 | * Returns the number of pages that were not migrated, or an error code. | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1119 | */ | 
| Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 1120 | int migrate_pages(struct list_head *from, new_page_t get_new_page, | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1121 | free_page_t put_new_page, unsigned long private, | 
|  | 1122 | enum migrate_mode mode, int reason) | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1123 | { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1124 | int retry = 1; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1125 | int nr_failed = 0; | 
| Mel Gorman | 5647bc2 | 2012-10-19 10:46:20 +0100 | [diff] [blame] | 1126 | int nr_succeeded = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1127 | int pass = 0; | 
|  | 1128 | struct page *page; | 
|  | 1129 | struct page *page2; | 
|  | 1130 | int swapwrite = current->flags & PF_SWAPWRITE; | 
|  | 1131 | int rc; | 
|  | 1132 |  | 
|  | 1133 | if (!swapwrite) | 
|  | 1134 | current->flags |= PF_SWAPWRITE; | 
|  | 1135 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1136 | for(pass = 0; pass < 10 && retry; pass++) { | 
|  | 1137 | retry = 0; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1138 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1139 | list_for_each_entry_safe(page, page2, from, lru) { | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1140 | cond_resched(); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1141 |  | 
| Naoya Horiguchi | 31caf66 | 2013-09-11 14:21:59 -0700 | [diff] [blame] | 1142 | if (PageHuge(page)) | 
|  | 1143 | rc = unmap_and_move_huge_page(get_new_page, | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1144 | put_new_page, private, page, | 
|  | 1145 | pass > 2, mode); | 
| Naoya Horiguchi | 31caf66 | 2013-09-11 14:21:59 -0700 | [diff] [blame] | 1146 | else | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1147 | rc = unmap_and_move(get_new_page, put_new_page, | 
| Naoya Horiguchi | add05ce | 2015-06-24 16:56:50 -0700 | [diff] [blame] | 1148 | private, page, pass > 2, mode, | 
|  | 1149 | reason); | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1150 |  | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1151 | switch(rc) { | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1152 | case -ENOMEM: | 
|  | 1153 | goto out; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1154 | case -EAGAIN: | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1155 | retry++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1156 | break; | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 1157 | case MIGRATEPAGE_SUCCESS: | 
| Mel Gorman | 5647bc2 | 2012-10-19 10:46:20 +0100 | [diff] [blame] | 1158 | nr_succeeded++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1159 | break; | 
|  | 1160 | default: | 
| Naoya Horiguchi | 354a336 | 2014-01-21 15:51:14 -0800 | [diff] [blame] | 1161 | /* | 
|  | 1162 | * Permanent failure (-EBUSY, -ENOSYS, etc.): | 
|  | 1163 | * unlike -EAGAIN case, the failed page is | 
|  | 1164 | * removed from migration page list and not | 
|  | 1165 | * retried in the next outer loop. | 
|  | 1166 | */ | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1167 | nr_failed++; | 
| Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 1168 | break; | 
| Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 1169 | } | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1170 | } | 
|  | 1171 | } | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 1172 | rc = nr_failed + retry; | 
| Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 1173 | out: | 
| Mel Gorman | 5647bc2 | 2012-10-19 10:46:20 +0100 | [diff] [blame] | 1174 | if (nr_succeeded) | 
|  | 1175 | count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); | 
|  | 1176 | if (nr_failed) | 
|  | 1177 | count_vm_events(PGMIGRATE_FAIL, nr_failed); | 
| Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 1178 | trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); | 
|  | 1179 |  | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1180 | if (!swapwrite) | 
|  | 1181 | current->flags &= ~PF_SWAPWRITE; | 
|  | 1182 |  | 
| Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 1183 | return rc; | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1184 | } | 
|  | 1185 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1186 | #ifdef CONFIG_NUMA | 
|  | 1187 | /* | 
|  | 1188 | * Move a list of individual pages | 
|  | 1189 | */ | 
|  | 1190 | struct page_to_node { | 
|  | 1191 | unsigned long addr; | 
|  | 1192 | struct page *page; | 
|  | 1193 | int node; | 
|  | 1194 | int status; | 
|  | 1195 | }; | 
|  | 1196 |  | 
|  | 1197 | static struct page *new_page_node(struct page *p, unsigned long private, | 
|  | 1198 | int **result) | 
|  | 1199 | { | 
|  | 1200 | struct page_to_node *pm = (struct page_to_node *)private; | 
|  | 1201 |  | 
|  | 1202 | while (pm->node != MAX_NUMNODES && pm->page != p) | 
|  | 1203 | pm++; | 
|  | 1204 |  | 
|  | 1205 | if (pm->node == MAX_NUMNODES) | 
|  | 1206 | return NULL; | 
|  | 1207 |  | 
|  | 1208 | *result = &pm->status; | 
|  | 1209 |  | 
| Naoya Horiguchi | e632a93 | 2013-09-11 14:22:04 -0700 | [diff] [blame] | 1210 | if (PageHuge(p)) | 
|  | 1211 | return alloc_huge_page_node(page_hstate(compound_head(p)), | 
|  | 1212 | pm->node); | 
|  | 1213 | else | 
| Vlastimil Babka | 96db800 | 2015-09-08 15:03:50 -0700 | [diff] [blame] | 1214 | return __alloc_pages_node(pm->node, | 
| Johannes Weiner | e97ca8e | 2014-03-10 15:49:43 -0700 | [diff] [blame] | 1215 | GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1216 | } | 
|  | 1217 |  | 
|  | 1218 | /* | 
|  | 1219 | * Move a set of pages as indicated in the pm array. The addr | 
|  | 1220 | * field must be set to the virtual address of the page to be moved | 
|  | 1221 | * and the node number must contain a valid target node. | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1222 | * The pm array ends with node = MAX_NUMNODES. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1223 | */ | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1224 | static int do_move_page_to_node_array(struct mm_struct *mm, | 
|  | 1225 | struct page_to_node *pm, | 
|  | 1226 | int migrate_all) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1227 | { | 
|  | 1228 | int err; | 
|  | 1229 | struct page_to_node *pp; | 
|  | 1230 | LIST_HEAD(pagelist); | 
|  | 1231 |  | 
|  | 1232 | down_read(&mm->mmap_sem); | 
|  | 1233 |  | 
|  | 1234 | /* | 
|  | 1235 | * Build a list of pages to migrate | 
|  | 1236 | */ | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1237 | for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | 
|  | 1238 | struct vm_area_struct *vma; | 
|  | 1239 | struct page *page; | 
|  | 1240 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1241 | err = -EFAULT; | 
|  | 1242 | vma = find_vma(mm, pp->addr); | 
| Gleb Natapov | 70384dc | 2010-10-26 14:22:07 -0700 | [diff] [blame] | 1243 | if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1244 | goto set_status; | 
|  | 1245 |  | 
| Kirill A. Shutemov | d899844 | 2015-09-04 15:47:53 -0700 | [diff] [blame] | 1246 | /* FOLL_DUMP to ignore special (like zero) pages */ | 
|  | 1247 | page = follow_page(vma, pp->addr, | 
|  | 1248 | FOLL_GET | FOLL_SPLIT | FOLL_DUMP); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1249 |  | 
|  | 1250 | err = PTR_ERR(page); | 
|  | 1251 | if (IS_ERR(page)) | 
|  | 1252 | goto set_status; | 
|  | 1253 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1254 | err = -ENOENT; | 
|  | 1255 | if (!page) | 
|  | 1256 | goto set_status; | 
|  | 1257 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1258 | pp->page = page; | 
|  | 1259 | err = page_to_nid(page); | 
|  | 1260 |  | 
|  | 1261 | if (err == pp->node) | 
|  | 1262 | /* | 
|  | 1263 | * Node already in the right place | 
|  | 1264 | */ | 
|  | 1265 | goto put_and_set; | 
|  | 1266 |  | 
|  | 1267 | err = -EACCES; | 
|  | 1268 | if (page_mapcount(page) > 1 && | 
|  | 1269 | !migrate_all) | 
|  | 1270 | goto put_and_set; | 
|  | 1271 |  | 
| Naoya Horiguchi | e632a93 | 2013-09-11 14:22:04 -0700 | [diff] [blame] | 1272 | if (PageHuge(page)) { | 
| Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 1273 | if (PageHead(page)) | 
|  | 1274 | isolate_huge_page(page, &pagelist); | 
| Naoya Horiguchi | e632a93 | 2013-09-11 14:22:04 -0700 | [diff] [blame] | 1275 | goto put_and_set; | 
|  | 1276 | } | 
|  | 1277 |  | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 1278 | err = isolate_lru_page(page); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 1279 | if (!err) { | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 1280 | list_add_tail(&page->lru, &pagelist); | 
| KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 1281 | inc_zone_page_state(page, NR_ISOLATED_ANON + | 
|  | 1282 | page_is_file_cache(page)); | 
|  | 1283 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1284 | put_and_set: | 
|  | 1285 | /* | 
|  | 1286 | * Either remove the duplicate refcount from | 
|  | 1287 | * isolate_lru_page() or drop the page ref if it was | 
|  | 1288 | * not isolated. | 
|  | 1289 | */ | 
|  | 1290 | put_page(page); | 
|  | 1291 | set_status: | 
|  | 1292 | pp->status = err; | 
|  | 1293 | } | 
|  | 1294 |  | 
| Brice Goglin | e78bbfa | 2008-10-18 20:27:15 -0700 | [diff] [blame] | 1295 | err = 0; | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1296 | if (!list_empty(&pagelist)) { | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1297 | err = migrate_pages(&pagelist, new_page_node, NULL, | 
| Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 1298 | (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1299 | if (err) | 
| Naoya Horiguchi | e632a93 | 2013-09-11 14:22:04 -0700 | [diff] [blame] | 1300 | putback_movable_pages(&pagelist); | 
| Minchan Kim | cf608ac | 2010-10-26 14:21:29 -0700 | [diff] [blame] | 1301 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1302 |  | 
|  | 1303 | up_read(&mm->mmap_sem); | 
|  | 1304 | return err; | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | /* | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1308 | * Migrate an array of page address onto an array of nodes and fill | 
|  | 1309 | * the corresponding array of status. | 
|  | 1310 | */ | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1311 | static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1312 | unsigned long nr_pages, | 
|  | 1313 | const void __user * __user *pages, | 
|  | 1314 | const int __user *nodes, | 
|  | 1315 | int __user *status, int flags) | 
|  | 1316 | { | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1317 | struct page_to_node *pm; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1318 | unsigned long chunk_nr_pages; | 
|  | 1319 | unsigned long chunk_start; | 
|  | 1320 | int err; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1321 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1322 | err = -ENOMEM; | 
|  | 1323 | pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); | 
|  | 1324 | if (!pm) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1325 | goto out; | 
| Brice Goglin | 35282a2 | 2009-06-16 15:32:43 -0700 | [diff] [blame] | 1326 |  | 
|  | 1327 | migrate_prep(); | 
|  | 1328 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1329 | /* | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1330 | * Store a chunk of page_to_node array in a page, | 
|  | 1331 | * but keep the last one as a marker | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1332 | */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1333 | chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1334 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1335 | for (chunk_start = 0; | 
|  | 1336 | chunk_start < nr_pages; | 
|  | 1337 | chunk_start += chunk_nr_pages) { | 
|  | 1338 | int j; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1339 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1340 | if (chunk_start + chunk_nr_pages > nr_pages) | 
|  | 1341 | chunk_nr_pages = nr_pages - chunk_start; | 
|  | 1342 |  | 
|  | 1343 | /* fill the chunk pm with addrs and nodes from user-space */ | 
|  | 1344 | for (j = 0; j < chunk_nr_pages; j++) { | 
|  | 1345 | const void __user *p; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1346 | int node; | 
|  | 1347 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1348 | err = -EFAULT; | 
|  | 1349 | if (get_user(p, pages + j + chunk_start)) | 
|  | 1350 | goto out_pm; | 
|  | 1351 | pm[j].addr = (unsigned long) p; | 
|  | 1352 |  | 
|  | 1353 | if (get_user(node, nodes + j + chunk_start)) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1354 | goto out_pm; | 
|  | 1355 |  | 
|  | 1356 | err = -ENODEV; | 
| Linus Torvalds | 6f5a55f | 2010-02-05 16:16:50 -0800 | [diff] [blame] | 1357 | if (node < 0 || node >= MAX_NUMNODES) | 
|  | 1358 | goto out_pm; | 
|  | 1359 |  | 
| Lai Jiangshan | 389162c | 2012-12-12 13:51:30 -0800 | [diff] [blame] | 1360 | if (!node_state(node, N_MEMORY)) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1361 | goto out_pm; | 
|  | 1362 |  | 
|  | 1363 | err = -EACCES; | 
|  | 1364 | if (!node_isset(node, task_nodes)) | 
|  | 1365 | goto out_pm; | 
|  | 1366 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1367 | pm[j].node = node; | 
|  | 1368 | } | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1369 |  | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1370 | /* End marker for this chunk */ | 
|  | 1371 | pm[chunk_nr_pages].node = MAX_NUMNODES; | 
|  | 1372 |  | 
|  | 1373 | /* Migrate this chunk */ | 
|  | 1374 | err = do_move_page_to_node_array(mm, pm, | 
|  | 1375 | flags & MPOL_MF_MOVE_ALL); | 
|  | 1376 | if (err < 0) | 
|  | 1377 | goto out_pm; | 
|  | 1378 |  | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1379 | /* Return status information */ | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1380 | for (j = 0; j < chunk_nr_pages; j++) | 
|  | 1381 | if (put_user(pm[j].status, status + j + chunk_start)) { | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1382 | err = -EFAULT; | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1383 | goto out_pm; | 
|  | 1384 | } | 
|  | 1385 | } | 
|  | 1386 | err = 0; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1387 |  | 
|  | 1388 | out_pm: | 
| Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 1389 | free_page((unsigned long)pm); | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1390 | out: | 
|  | 1391 | return err; | 
|  | 1392 | } | 
|  | 1393 |  | 
|  | 1394 | /* | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1395 | * Determine the nodes of an array of pages and store it in an array of status. | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1396 | */ | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1397 | static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, | 
|  | 1398 | const void __user **pages, int *status) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1399 | { | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1400 | unsigned long i; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1401 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1402 | down_read(&mm->mmap_sem); | 
|  | 1403 |  | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1404 | for (i = 0; i < nr_pages; i++) { | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1405 | unsigned long addr = (unsigned long)(*pages); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1406 | struct vm_area_struct *vma; | 
|  | 1407 | struct page *page; | 
| KOSAKI Motohiro | c095adb | 2008-12-16 16:06:43 +0900 | [diff] [blame] | 1408 | int err = -EFAULT; | 
| Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1409 |  | 
|  | 1410 | vma = find_vma(mm, addr); | 
| Gleb Natapov | 70384dc | 2010-10-26 14:22:07 -0700 | [diff] [blame] | 1411 | if (!vma || addr < vma->vm_start) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1412 | goto set_status; | 
|  | 1413 |  | 
| Kirill A. Shutemov | d899844 | 2015-09-04 15:47:53 -0700 | [diff] [blame] | 1414 | /* FOLL_DUMP to ignore special (like zero) pages */ | 
|  | 1415 | page = follow_page(vma, addr, FOLL_DUMP); | 
| Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 1416 |  | 
|  | 1417 | err = PTR_ERR(page); | 
|  | 1418 | if (IS_ERR(page)) | 
|  | 1419 | goto set_status; | 
|  | 1420 |  | 
| Kirill A. Shutemov | d899844 | 2015-09-04 15:47:53 -0700 | [diff] [blame] | 1421 | err = page ? page_to_nid(page) : -ENOENT; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1422 | set_status: | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1423 | *status = err; | 
|  | 1424 |  | 
|  | 1425 | pages++; | 
|  | 1426 | status++; | 
|  | 1427 | } | 
|  | 1428 |  | 
|  | 1429 | up_read(&mm->mmap_sem); | 
|  | 1430 | } | 
|  | 1431 |  | 
|  | 1432 | /* | 
|  | 1433 | * Determine the nodes of a user array of pages and store it in | 
|  | 1434 | * a user array of status. | 
|  | 1435 | */ | 
|  | 1436 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, | 
|  | 1437 | const void __user * __user *pages, | 
|  | 1438 | int __user *status) | 
|  | 1439 | { | 
|  | 1440 | #define DO_PAGES_STAT_CHUNK_NR 16 | 
|  | 1441 | const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; | 
|  | 1442 | int chunk_status[DO_PAGES_STAT_CHUNK_NR]; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1443 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1444 | while (nr_pages) { | 
|  | 1445 | unsigned long chunk_nr; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1446 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1447 | chunk_nr = nr_pages; | 
|  | 1448 | if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) | 
|  | 1449 | chunk_nr = DO_PAGES_STAT_CHUNK_NR; | 
|  | 1450 |  | 
|  | 1451 | if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) | 
|  | 1452 | break; | 
| Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1453 |  | 
|  | 1454 | do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); | 
|  | 1455 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1456 | if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) | 
|  | 1457 | break; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1458 |  | 
| H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1459 | pages += chunk_nr; | 
|  | 1460 | status += chunk_nr; | 
|  | 1461 | nr_pages -= chunk_nr; | 
|  | 1462 | } | 
|  | 1463 | return nr_pages ? -EFAULT : 0; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1464 | } | 
|  | 1465 |  | 
|  | 1466 | /* | 
|  | 1467 | * Move a list of pages in the address space of the currently executing | 
|  | 1468 | * process. | 
|  | 1469 | */ | 
| Heiko Carstens | 938bb9f | 2009-01-14 14:14:30 +0100 | [diff] [blame] | 1470 | SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | 
|  | 1471 | const void __user * __user *, pages, | 
|  | 1472 | const int __user *, nodes, | 
|  | 1473 | int __user *, status, int, flags) | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1474 | { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1475 | const struct cred *cred = current_cred(), *tcred; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1476 | struct task_struct *task; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1477 | struct mm_struct *mm; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1478 | int err; | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1479 | nodemask_t task_nodes; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1480 |  | 
|  | 1481 | /* Check flags */ | 
|  | 1482 | if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | 
|  | 1483 | return -EINVAL; | 
|  | 1484 |  | 
|  | 1485 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | 
|  | 1486 | return -EPERM; | 
|  | 1487 |  | 
|  | 1488 | /* Find the mm_struct */ | 
| Greg Thelen | a879bf5 | 2011-02-25 14:44:13 -0800 | [diff] [blame] | 1489 | rcu_read_lock(); | 
| Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1490 | task = pid ? find_task_by_vpid(pid) : current; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1491 | if (!task) { | 
| Greg Thelen | a879bf5 | 2011-02-25 14:44:13 -0800 | [diff] [blame] | 1492 | rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1493 | return -ESRCH; | 
|  | 1494 | } | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1495 | get_task_struct(task); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1496 |  | 
|  | 1497 | /* | 
|  | 1498 | * Check if this process has the right to modify the specified | 
|  | 1499 | * process. The right exists if the process has administrative | 
|  | 1500 | * capabilities, superuser privileges or the same | 
|  | 1501 | * userid as the target process. | 
|  | 1502 | */ | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1503 | tcred = __task_cred(task); | 
| Eric W. Biederman | b38a86e | 2012-03-12 15:48:24 -0700 | [diff] [blame] | 1504 | if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && | 
|  | 1505 | !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) && | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1506 | !capable(CAP_SYS_NICE)) { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1507 | rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1508 | err = -EPERM; | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1509 | goto out; | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1510 | } | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1511 | rcu_read_unlock(); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1512 |  | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1513 | err = security_task_movememory(task); | 
|  | 1514 | if (err) | 
| Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1515 | goto out; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1516 |  | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1517 | task_nodes = cpuset_mems_allowed(task); | 
|  | 1518 | mm = get_task_mm(task); | 
|  | 1519 | put_task_struct(task); | 
|  | 1520 |  | 
| Sasha Levin | 6e8b09e | 2012-04-25 16:01:53 -0700 | [diff] [blame] | 1521 | if (!mm) | 
|  | 1522 | return -EINVAL; | 
|  | 1523 |  | 
|  | 1524 | if (nodes) | 
|  | 1525 | err = do_pages_move(mm, task_nodes, nr_pages, pages, | 
|  | 1526 | nodes, status, flags); | 
|  | 1527 | else | 
|  | 1528 | err = do_pages_stat(mm, nr_pages, pages, status); | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1529 |  | 
|  | 1530 | mmput(mm); | 
|  | 1531 | return err; | 
| David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1532 |  | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1533 | out: | 
| Christoph Lameter | 3268c63 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 1534 | put_task_struct(task); | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1535 | return err; | 
|  | 1536 | } | 
| Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1537 |  | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1538 | #ifdef CONFIG_NUMA_BALANCING | 
|  | 1539 | /* | 
|  | 1540 | * Returns true if this is a safe migration target node for misplaced NUMA | 
|  | 1541 | * pages. Currently it only checks the watermarks which crude | 
|  | 1542 | */ | 
|  | 1543 | static bool migrate_balanced_pgdat(struct pglist_data *pgdat, | 
| Mel Gorman | 3abef4e | 2013-02-22 16:34:27 -0800 | [diff] [blame] | 1544 | unsigned long nr_migrate_pages) | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1545 | { | 
|  | 1546 | int z; | 
|  | 1547 | for (z = pgdat->nr_zones - 1; z >= 0; z--) { | 
|  | 1548 | struct zone *zone = pgdat->node_zones + z; | 
|  | 1549 |  | 
|  | 1550 | if (!populated_zone(zone)) | 
|  | 1551 | continue; | 
|  | 1552 |  | 
| Lisa Du | 6e543d5 | 2013-09-11 14:22:36 -0700 | [diff] [blame] | 1553 | if (!zone_reclaimable(zone)) | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1554 | continue; | 
|  | 1555 |  | 
|  | 1556 | /* Avoid waking kswapd by allocating pages_to_migrate pages. */ | 
|  | 1557 | if (!zone_watermark_ok(zone, 0, | 
|  | 1558 | high_wmark_pages(zone) + | 
|  | 1559 | nr_migrate_pages, | 
|  | 1560 | 0, 0)) | 
|  | 1561 | continue; | 
|  | 1562 | return true; | 
|  | 1563 | } | 
|  | 1564 | return false; | 
|  | 1565 | } | 
|  | 1566 |  | 
|  | 1567 | static struct page *alloc_misplaced_dst_page(struct page *page, | 
|  | 1568 | unsigned long data, | 
|  | 1569 | int **result) | 
|  | 1570 | { | 
|  | 1571 | int nid = (int) data; | 
|  | 1572 | struct page *newpage; | 
|  | 1573 |  | 
| Vlastimil Babka | 96db800 | 2015-09-08 15:03:50 -0700 | [diff] [blame] | 1574 | newpage = __alloc_pages_node(nid, | 
| Johannes Weiner | e97ca8e | 2014-03-10 15:49:43 -0700 | [diff] [blame] | 1575 | (GFP_HIGHUSER_MOVABLE | | 
|  | 1576 | __GFP_THISNODE | __GFP_NOMEMALLOC | | 
|  | 1577 | __GFP_NORETRY | __GFP_NOWARN) & | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1578 | ~GFP_IOFS, 0); | 
| Hillf Danton | bac0382 | 2012-11-27 14:46:24 +0000 | [diff] [blame] | 1579 |  | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1580 | return newpage; | 
|  | 1581 | } | 
|  | 1582 |  | 
|  | 1583 | /* | 
| Mel Gorman | a8f6077 | 2012-11-14 21:41:46 +0000 | [diff] [blame] | 1584 | * page migration rate limiting control. | 
|  | 1585 | * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs | 
|  | 1586 | * window of time. Default here says do not migrate more than 1280M per second. | 
|  | 1587 | */ | 
|  | 1588 | static unsigned int migrate_interval_millisecs __read_mostly = 100; | 
|  | 1589 | static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); | 
|  | 1590 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1591 | /* Returns true if the node is migrate rate-limited after the update */ | 
| Mel Gorman | 1c30e01 | 2014-01-21 15:50:58 -0800 | [diff] [blame] | 1592 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, | 
|  | 1593 | unsigned long nr_pages) | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1594 | { | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1595 | /* | 
|  | 1596 | * Rate-limit the amount of data that is being migrated to a node. | 
|  | 1597 | * Optimal placement is no good if the memory bus is saturated and | 
|  | 1598 | * all the time is being spent migrating! | 
|  | 1599 | */ | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1600 | if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { | 
| Mel Gorman | 1c5e9c2 | 2014-01-21 15:50:59 -0800 | [diff] [blame] | 1601 | spin_lock(&pgdat->numabalancing_migrate_lock); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1602 | pgdat->numabalancing_migrate_nr_pages = 0; | 
|  | 1603 | pgdat->numabalancing_migrate_next_window = jiffies + | 
|  | 1604 | msecs_to_jiffies(migrate_interval_millisecs); | 
| Mel Gorman | 1c5e9c2 | 2014-01-21 15:50:59 -0800 | [diff] [blame] | 1605 | spin_unlock(&pgdat->numabalancing_migrate_lock); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1606 | } | 
| Mel Gorman | af1839d | 2014-01-21 15:51:01 -0800 | [diff] [blame] | 1607 | if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) { | 
|  | 1608 | trace_mm_numa_migrate_ratelimit(current, pgdat->node_id, | 
|  | 1609 | nr_pages); | 
| Mel Gorman | 1c5e9c2 | 2014-01-21 15:50:59 -0800 | [diff] [blame] | 1610 | return true; | 
| Mel Gorman | af1839d | 2014-01-21 15:51:01 -0800 | [diff] [blame] | 1611 | } | 
| Mel Gorman | 1c5e9c2 | 2014-01-21 15:50:59 -0800 | [diff] [blame] | 1612 |  | 
|  | 1613 | /* | 
|  | 1614 | * This is an unlocked non-atomic update so errors are possible. | 
|  | 1615 | * The consequences are failing to migrate when we potentiall should | 
|  | 1616 | * have which is not severe enough to warrant locking. If it is ever | 
|  | 1617 | * a problem, it can be converted to a per-cpu counter. | 
|  | 1618 | */ | 
|  | 1619 | pgdat->numabalancing_migrate_nr_pages += nr_pages; | 
|  | 1620 | return false; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1621 | } | 
|  | 1622 |  | 
| Mel Gorman | 1c30e01 | 2014-01-21 15:50:58 -0800 | [diff] [blame] | 1623 | static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1624 | { | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1625 | int page_lru; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1626 |  | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1627 | VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); | 
| Mel Gorman | 3abef4e | 2013-02-22 16:34:27 -0800 | [diff] [blame] | 1628 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1629 | /* Avoid migrating to a node that is nearly full */ | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1630 | if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) | 
|  | 1631 | return 0; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1632 |  | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1633 | if (isolate_lru_page(page)) | 
|  | 1634 | return 0; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1635 |  | 
|  | 1636 | /* | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1637 | * migrate_misplaced_transhuge_page() skips page migration's usual | 
|  | 1638 | * check on page_count(), so we must do it here, now that the page | 
|  | 1639 | * has been isolated: a GUP pin, or any other pin, prevents migration. | 
|  | 1640 | * The expected page count is 3: 1 for page's mapcount and 1 for the | 
|  | 1641 | * caller's pin and 1 for the reference taken by isolate_lru_page(). | 
|  | 1642 | */ | 
|  | 1643 | if (PageTransHuge(page) && page_count(page) != 3) { | 
|  | 1644 | putback_lru_page(page); | 
|  | 1645 | return 0; | 
|  | 1646 | } | 
|  | 1647 |  | 
|  | 1648 | page_lru = page_is_file_cache(page); | 
|  | 1649 | mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, | 
|  | 1650 | hpage_nr_pages(page)); | 
|  | 1651 |  | 
|  | 1652 | /* | 
|  | 1653 | * Isolating the page has taken another reference, so the | 
|  | 1654 | * caller's reference can be safely dropped without the page | 
|  | 1655 | * disappearing underneath us during migration. | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1656 | */ | 
|  | 1657 | put_page(page); | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1658 | return 1; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1659 | } | 
|  | 1660 |  | 
| Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1661 | bool pmd_trans_migrating(pmd_t pmd) | 
|  | 1662 | { | 
|  | 1663 | struct page *page = pmd_page(pmd); | 
|  | 1664 | return PageLocked(page); | 
|  | 1665 | } | 
|  | 1666 |  | 
| Mel Gorman | a8f6077 | 2012-11-14 21:41:46 +0000 | [diff] [blame] | 1667 | /* | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1668 | * Attempt to migrate a misplaced page to the specified destination | 
|  | 1669 | * node. Caller is expected to have an elevated reference count on | 
|  | 1670 | * the page that will be dropped by this function before returning. | 
|  | 1671 | */ | 
| Mel Gorman | 1bc115d | 2013-10-07 11:29:05 +0100 | [diff] [blame] | 1672 | int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, | 
|  | 1673 | int node) | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1674 | { | 
| Mel Gorman | a8f6077 | 2012-11-14 21:41:46 +0000 | [diff] [blame] | 1675 | pg_data_t *pgdat = NODE_DATA(node); | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1676 | int isolated; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1677 | int nr_remaining; | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1678 | LIST_HEAD(migratepages); | 
|  | 1679 |  | 
|  | 1680 | /* | 
| Mel Gorman | 1bc115d | 2013-10-07 11:29:05 +0100 | [diff] [blame] | 1681 | * Don't migrate file pages that are mapped in multiple processes | 
|  | 1682 | * with execute permissions as they are probably shared libraries. | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1683 | */ | 
| Mel Gorman | 1bc115d | 2013-10-07 11:29:05 +0100 | [diff] [blame] | 1684 | if (page_mapcount(page) != 1 && page_is_file_cache(page) && | 
|  | 1685 | (vma->vm_flags & VM_EXEC)) | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1686 | goto out; | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1687 |  | 
| Mel Gorman | a8f6077 | 2012-11-14 21:41:46 +0000 | [diff] [blame] | 1688 | /* | 
|  | 1689 | * Rate-limit the amount of data that is being migrated to a node. | 
|  | 1690 | * Optimal placement is no good if the memory bus is saturated and | 
|  | 1691 | * all the time is being spent migrating! | 
|  | 1692 | */ | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1693 | if (numamigrate_update_ratelimit(pgdat, 1)) | 
| Mel Gorman | a8f6077 | 2012-11-14 21:41:46 +0000 | [diff] [blame] | 1694 | goto out; | 
| Mel Gorman | a8f6077 | 2012-11-14 21:41:46 +0000 | [diff] [blame] | 1695 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1696 | isolated = numamigrate_isolate_page(pgdat, page); | 
|  | 1697 | if (!isolated) | 
|  | 1698 | goto out; | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1699 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1700 | list_add(&page->lru, &migratepages); | 
| Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 1701 | nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, | 
| David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 1702 | NULL, node, MIGRATE_ASYNC, | 
|  | 1703 | MR_NUMA_MISPLACED); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1704 | if (nr_remaining) { | 
| Joonsoo Kim | 59c82b7 | 2014-01-21 15:51:17 -0800 | [diff] [blame] | 1705 | if (!list_empty(&migratepages)) { | 
|  | 1706 | list_del(&page->lru); | 
|  | 1707 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 
|  | 1708 | page_is_file_cache(page)); | 
|  | 1709 | putback_lru_page(page); | 
|  | 1710 | } | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1711 | isolated = 0; | 
|  | 1712 | } else | 
|  | 1713 | count_vm_numa_event(NUMA_PAGE_MIGRATE); | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1714 | BUG_ON(!list_empty(&migratepages)); | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1715 | return isolated; | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1716 |  | 
|  | 1717 | out: | 
|  | 1718 | put_page(page); | 
|  | 1719 | return 0; | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1720 | } | 
| Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 1721 | #endif /* CONFIG_NUMA_BALANCING */ | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1722 |  | 
| Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 1723 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1724 | /* | 
|  | 1725 | * Migrates a THP to a given target node. page must be locked and is unlocked | 
|  | 1726 | * before returning. | 
|  | 1727 | */ | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1728 | int migrate_misplaced_transhuge_page(struct mm_struct *mm, | 
|  | 1729 | struct vm_area_struct *vma, | 
|  | 1730 | pmd_t *pmd, pmd_t entry, | 
|  | 1731 | unsigned long address, | 
|  | 1732 | struct page *page, int node) | 
|  | 1733 | { | 
| Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1734 | spinlock_t *ptl; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1735 | pg_data_t *pgdat = NODE_DATA(node); | 
|  | 1736 | int isolated = 0; | 
|  | 1737 | struct page *new_page = NULL; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1738 | int page_lru = page_is_file_cache(page); | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1739 | unsigned long mmun_start = address & HPAGE_PMD_MASK; | 
|  | 1740 | unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1741 | pmd_t orig_entry; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1742 |  | 
|  | 1743 | /* | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1744 | * Rate-limit the amount of data that is being migrated to a node. | 
|  | 1745 | * Optimal placement is no good if the memory bus is saturated and | 
|  | 1746 | * all the time is being spent migrating! | 
|  | 1747 | */ | 
| Mel Gorman | d28d4335 | 2012-11-29 09:24:36 +0000 | [diff] [blame] | 1748 | if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR)) | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1749 | goto out_dropref; | 
|  | 1750 |  | 
|  | 1751 | new_page = alloc_pages_node(node, | 
| Johannes Weiner | e97ca8e | 2014-03-10 15:49:43 -0700 | [diff] [blame] | 1752 | (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, | 
|  | 1753 | HPAGE_PMD_ORDER); | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1754 | if (!new_page) | 
|  | 1755 | goto out_fail; | 
|  | 1756 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1757 | isolated = numamigrate_isolate_page(pgdat, page); | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1758 | if (!isolated) { | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1759 | put_page(new_page); | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1760 | goto out_fail; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1761 | } | 
|  | 1762 |  | 
| Mel Gorman | b0943d6 | 2013-12-18 17:08:46 -0800 | [diff] [blame] | 1763 | if (mm_tlb_flush_pending(mm)) | 
|  | 1764 | flush_tlb_range(vma, mmun_start, mmun_end); | 
|  | 1765 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1766 | /* Prepare a page as a migration target */ | 
|  | 1767 | __set_page_locked(new_page); | 
|  | 1768 | SetPageSwapBacked(new_page); | 
|  | 1769 |  | 
|  | 1770 | /* anon mapping, we can simply copy page->mapping to the new page: */ | 
|  | 1771 | new_page->mapping = page->mapping; | 
|  | 1772 | new_page->index = page->index; | 
|  | 1773 | migrate_page_copy(new_page, page); | 
|  | 1774 | WARN_ON(PageLRU(new_page)); | 
|  | 1775 |  | 
|  | 1776 | /* Recheck the target PMD */ | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1777 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 
| Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1778 | ptl = pmd_lock(mm, pmd); | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1779 | if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { | 
|  | 1780 | fail_putback: | 
| Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1781 | spin_unlock(ptl); | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1782 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1783 |  | 
|  | 1784 | /* Reverse changes made by migrate_page_copy() */ | 
|  | 1785 | if (TestClearPageActive(new_page)) | 
|  | 1786 | SetPageActive(page); | 
|  | 1787 | if (TestClearPageUnevictable(new_page)) | 
|  | 1788 | SetPageUnevictable(page); | 
|  | 1789 | mlock_migrate_page(page, new_page); | 
|  | 1790 |  | 
|  | 1791 | unlock_page(new_page); | 
|  | 1792 | put_page(new_page);		/* Free it */ | 
|  | 1793 |  | 
| Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1794 | /* Retake the callers reference and putback on LRU */ | 
|  | 1795 | get_page(page); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1796 | putback_lru_page(page); | 
| Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1797 | mod_zone_page_state(page_zone(page), | 
|  | 1798 | NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); | 
| Mel Gorman | eb4489f6 | 2013-12-18 17:08:39 -0800 | [diff] [blame] | 1799 |  | 
|  | 1800 | goto out_unlock; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1801 | } | 
|  | 1802 |  | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1803 | orig_entry = *pmd; | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1804 | entry = mk_pmd(new_page, vma->vm_page_prot); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1805 | entry = pmd_mkhuge(entry); | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1806 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1807 |  | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1808 | /* | 
|  | 1809 | * Clear the old entry under pagetable lock and establish the new PTE. | 
|  | 1810 | * Any parallel GUP will either observe the old page blocking on the | 
|  | 1811 | * page lock, block on the page table lock or observe the new page. | 
|  | 1812 | * The SetPageUptodate on the new page and page_add_new_anon_rmap | 
|  | 1813 | * guarantee the copy is visible before the pagetable update. | 
|  | 1814 | */ | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1815 | flush_cache_range(vma, mmun_start, mmun_end); | 
| Mel Gorman | 11de992 | 2014-06-04 16:07:41 -0700 | [diff] [blame] | 1816 | page_add_anon_rmap(new_page, vma, mmun_start); | 
| Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1817 | pmdp_huge_clear_flush_notify(vma, mmun_start, pmd); | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1818 | set_pmd_at(mm, mmun_start, pmd, entry); | 
|  | 1819 | flush_tlb_range(vma, mmun_start, mmun_end); | 
| Stephen Rothwell | ce4a9cc | 2012-12-10 19:50:57 +1100 | [diff] [blame] | 1820 | update_mmu_cache_pmd(vma, address, &entry); | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1821 |  | 
|  | 1822 | if (page_count(page) != 2) { | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1823 | set_pmd_at(mm, mmun_start, pmd, orig_entry); | 
|  | 1824 | flush_tlb_range(vma, mmun_start, mmun_end); | 
| Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 1825 | mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1826 | update_mmu_cache_pmd(vma, address, &entry); | 
|  | 1827 | page_remove_rmap(new_page); | 
|  | 1828 | goto fail_putback; | 
|  | 1829 | } | 
|  | 1830 |  | 
| Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 1831 | mem_cgroup_migrate(page, new_page, false); | 
|  | 1832 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1833 | page_remove_rmap(page); | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1834 |  | 
| Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1835 | spin_unlock(ptl); | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1836 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1837 |  | 
| Mel Gorman | 11de992 | 2014-06-04 16:07:41 -0700 | [diff] [blame] | 1838 | /* Take an "isolate" reference and put new page on the LRU. */ | 
|  | 1839 | get_page(new_page); | 
|  | 1840 | putback_lru_page(new_page); | 
|  | 1841 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1842 | unlock_page(new_page); | 
|  | 1843 | unlock_page(page); | 
|  | 1844 | put_page(page);			/* Drop the rmap reference */ | 
|  | 1845 | put_page(page);			/* Drop the LRU isolation reference */ | 
|  | 1846 |  | 
|  | 1847 | count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); | 
|  | 1848 | count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); | 
|  | 1849 |  | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1850 | mod_zone_page_state(page_zone(page), | 
|  | 1851 | NR_ISOLATED_ANON + page_lru, | 
|  | 1852 | -HPAGE_PMD_NR); | 
|  | 1853 | return isolated; | 
|  | 1854 |  | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1855 | out_fail: | 
|  | 1856 | count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1857 | out_dropref: | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1858 | ptl = pmd_lock(mm, pmd); | 
|  | 1859 | if (pmd_same(*pmd, entry)) { | 
| Mel Gorman | 4d94246 | 2015-02-12 14:58:28 -0800 | [diff] [blame] | 1860 | entry = pmd_modify(entry, vma->vm_page_prot); | 
| Mel Gorman | f714f4f | 2013-12-18 17:08:33 -0800 | [diff] [blame] | 1861 | set_pmd_at(mm, mmun_start, pmd, entry); | 
| Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1862 | update_mmu_cache_pmd(vma, address, &entry); | 
|  | 1863 | } | 
|  | 1864 | spin_unlock(ptl); | 
| Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1865 |  | 
| Mel Gorman | eb4489f6 | 2013-12-18 17:08:39 -0800 | [diff] [blame] | 1866 | out_unlock: | 
| Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1867 | unlock_page(page); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1868 | put_page(page); | 
| Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1869 | return 0; | 
|  | 1870 | } | 
| Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 1871 | #endif /* CONFIG_NUMA_BALANCING */ | 
|  | 1872 |  | 
|  | 1873 | #endif /* CONFIG_NUMA */ |