Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2 | * Memory merging support. |
| 3 | * |
| 4 | * This code enables dynamic sharing of identical pages found in different |
| 5 | * memory areas, even if they are not shared by fork() |
| 6 | * |
Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 8 | * Authors: |
| 9 | * Izik Eidus |
| 10 | * Andrea Arcangeli |
| 11 | * Chris Wright |
Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 12 | * Hugh Dickins |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 13 | * |
| 14 | * This work is licensed under the terms of the GNU GPL, version 2. |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | #include <linux/errno.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 18 | #include <linux/mm.h> |
| 19 | #include <linux/fs.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 20 | #include <linux/mman.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 21 | #include <linux/sched.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame^] | 22 | #include <linux/sched/mm.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 23 | #include <linux/rwsem.h> |
| 24 | #include <linux/pagemap.h> |
| 25 | #include <linux/rmap.h> |
| 26 | #include <linux/spinlock.h> |
| 27 | #include <linux/jhash.h> |
| 28 | #include <linux/delay.h> |
| 29 | #include <linux/kthread.h> |
| 30 | #include <linux/wait.h> |
| 31 | #include <linux/slab.h> |
| 32 | #include <linux/rbtree.h> |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 33 | #include <linux/memory.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 34 | #include <linux/mmu_notifier.h> |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 35 | #include <linux/swap.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 36 | #include <linux/ksm.h> |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 37 | #include <linux/hashtable.h> |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 38 | #include <linux/freezer.h> |
David Rientjes | 72788c3 | 2011-05-24 17:11:40 -0700 | [diff] [blame] | 39 | #include <linux/oom.h> |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 40 | #include <linux/numa.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 41 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 42 | #include <asm/tlbflush.h> |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 43 | #include "internal.h" |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 44 | |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 45 | #ifdef CONFIG_NUMA |
| 46 | #define NUMA(x) (x) |
| 47 | #define DO_NUMA(x) do { (x); } while (0) |
| 48 | #else |
| 49 | #define NUMA(x) (0) |
| 50 | #define DO_NUMA(x) do { } while (0) |
| 51 | #endif |
| 52 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 53 | /* |
| 54 | * A few notes about the KSM scanning process, |
| 55 | * to make it easier to understand the data structures below: |
| 56 | * |
| 57 | * In order to reduce excessive scanning, KSM sorts the memory pages by their |
| 58 | * contents into a data structure that holds pointers to the pages' locations. |
| 59 | * |
| 60 | * Since the contents of the pages may change at any moment, KSM cannot just |
| 61 | * insert the pages into a normal sorted tree and expect it to find anything. |
| 62 | * Therefore KSM uses two data structures - the stable and the unstable tree. |
| 63 | * |
| 64 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted |
| 65 | * by their contents. Because each such page is write-protected, searching on |
| 66 | * this tree is fully assured to be working (except when pages are unmapped), |
| 67 | * and therefore this tree is called the stable tree. |
| 68 | * |
| 69 | * In addition to the stable tree, KSM uses a second data structure called the |
| 70 | * unstable tree: this tree holds pointers to pages which have been found to |
| 71 | * be "unchanged for a period of time". The unstable tree sorts these pages |
| 72 | * by their contents, but since they are not write-protected, KSM cannot rely |
| 73 | * upon the unstable tree to work correctly - the unstable tree is liable to |
| 74 | * be corrupted as its contents are modified, and so it is called unstable. |
| 75 | * |
| 76 | * KSM solves this problem by several techniques: |
| 77 | * |
| 78 | * 1) The unstable tree is flushed every time KSM completes scanning all |
| 79 | * memory areas, and then the tree is rebuilt again from the beginning. |
| 80 | * 2) KSM will only insert into the unstable tree, pages whose hash value |
| 81 | * has not changed since the previous scan of all memory areas. |
| 82 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the |
| 83 | * colors of the nodes and not on their contents, assuring that even when |
| 84 | * the tree gets "corrupted" it won't get out of balance, so scanning time |
| 85 | * remains the same (also, searching and inserting nodes in an rbtree uses |
| 86 | * the same algorithm, so we have no overhead when we flush and rebuild). |
| 87 | * 4) KSM never flushes the stable tree, which means that even if it were to |
| 88 | * take 10 attempts to find a page in the unstable tree, once it is found, |
| 89 | * it is secured in the stable tree. (When we scan a new page, we first |
| 90 | * compare it against the stable tree, and then against the unstable tree.) |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 91 | * |
| 92 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple |
| 93 | * stable trees and multiple unstable trees: one of each for each NUMA node. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 94 | */ |
| 95 | |
| 96 | /** |
| 97 | * struct mm_slot - ksm information per mm that is being scanned |
| 98 | * @link: link to the mm_slots hash list |
| 99 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 100 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 101 | * @mm: the mm that this information is valid for |
| 102 | */ |
| 103 | struct mm_slot { |
| 104 | struct hlist_node link; |
| 105 | struct list_head mm_list; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 106 | struct rmap_item *rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 107 | struct mm_struct *mm; |
| 108 | }; |
| 109 | |
| 110 | /** |
| 111 | * struct ksm_scan - cursor for scanning |
| 112 | * @mm_slot: the current mm_slot we are scanning |
| 113 | * @address: the next address inside that to be scanned |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 114 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 115 | * @seqnr: count of completed full scans (needed when removing unstable node) |
| 116 | * |
| 117 | * There is only the one ksm_scan instance of this cursor structure. |
| 118 | */ |
| 119 | struct ksm_scan { |
| 120 | struct mm_slot *mm_slot; |
| 121 | unsigned long address; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 122 | struct rmap_item **rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 123 | unsigned long seqnr; |
| 124 | }; |
| 125 | |
| 126 | /** |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 127 | * struct stable_node - node of the stable rbtree |
| 128 | * @node: rb node of this ksm page in the stable tree |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 129 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
| 130 | * @list: linked into migrate_nodes, pending placement in the proper node tree |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 131 | * @hlist: hlist head of rmap_items using this ksm page |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 132 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
| 133 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 134 | */ |
| 135 | struct stable_node { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 136 | union { |
| 137 | struct rb_node node; /* when node of stable tree */ |
| 138 | struct { /* when listed for migration */ |
| 139 | struct list_head *head; |
| 140 | struct list_head list; |
| 141 | }; |
| 142 | }; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 143 | struct hlist_head hlist; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 144 | unsigned long kpfn; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 145 | #ifdef CONFIG_NUMA |
| 146 | int nid; |
| 147 | #endif |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 148 | }; |
| 149 | |
| 150 | /** |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 151 | * struct rmap_item - reverse mapping item for virtual addresses |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 152 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 153 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 154 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 155 | * @mm: the memory structure this rmap_item is pointing into |
| 156 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
| 157 | * @oldchecksum: previous checksum of the page at that virtual address |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 158 | * @node: rb node of this rmap_item in the unstable tree |
| 159 | * @head: pointer to stable_node heading this list in the stable tree |
| 160 | * @hlist: link into hlist of rmap_items hanging off that stable_node |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 161 | */ |
| 162 | struct rmap_item { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 163 | struct rmap_item *rmap_list; |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 164 | union { |
| 165 | struct anon_vma *anon_vma; /* when stable */ |
| 166 | #ifdef CONFIG_NUMA |
| 167 | int nid; /* when node of unstable tree */ |
| 168 | #endif |
| 169 | }; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 170 | struct mm_struct *mm; |
| 171 | unsigned long address; /* + low bits used for flags below */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 172 | unsigned int oldchecksum; /* when unstable */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 173 | union { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 174 | struct rb_node node; /* when node of unstable tree */ |
| 175 | struct { /* when listed from stable tree */ |
| 176 | struct stable_node *head; |
| 177 | struct hlist_node hlist; |
| 178 | }; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 179 | }; |
| 180 | }; |
| 181 | |
| 182 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 183 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
| 184 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 185 | |
| 186 | /* The stable and unstable tree heads */ |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 187 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
| 188 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; |
| 189 | static struct rb_root *root_stable_tree = one_stable_tree; |
| 190 | static struct rb_root *root_unstable_tree = one_unstable_tree; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 191 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 192 | /* Recently migrated nodes of stable tree, pending proper placement */ |
| 193 | static LIST_HEAD(migrate_nodes); |
| 194 | |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 195 | #define MM_SLOTS_HASH_BITS 10 |
| 196 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 197 | |
| 198 | static struct mm_slot ksm_mm_head = { |
| 199 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), |
| 200 | }; |
| 201 | static struct ksm_scan ksm_scan = { |
| 202 | .mm_slot = &ksm_mm_head, |
| 203 | }; |
| 204 | |
| 205 | static struct kmem_cache *rmap_item_cache; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 206 | static struct kmem_cache *stable_node_cache; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 207 | static struct kmem_cache *mm_slot_cache; |
| 208 | |
| 209 | /* The number of nodes in the stable tree */ |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 210 | static unsigned long ksm_pages_shared; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 211 | |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 212 | /* The number of page slots additionally sharing those nodes */ |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 213 | static unsigned long ksm_pages_sharing; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 214 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 215 | /* The number of nodes in the unstable tree */ |
| 216 | static unsigned long ksm_pages_unshared; |
| 217 | |
| 218 | /* The number of rmap_items in use: to calculate pages_volatile */ |
| 219 | static unsigned long ksm_rmap_items; |
| 220 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 221 | /* Number of pages ksmd should scan in one batch */ |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 222 | static unsigned int ksm_thread_pages_to_scan = 100; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 223 | |
| 224 | /* Milliseconds ksmd should sleep between batches */ |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 225 | static unsigned int ksm_thread_sleep_millisecs = 20; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 226 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 227 | /* Checksum of an empty (zeroed) page */ |
| 228 | static unsigned int zero_checksum __read_mostly; |
| 229 | |
| 230 | /* Whether to merge empty (zeroed) pages with actual zero pages */ |
| 231 | static bool ksm_use_zero_pages __read_mostly; |
| 232 | |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 233 | #ifdef CONFIG_NUMA |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 234 | /* Zeroed when merging across nodes is not allowed */ |
| 235 | static unsigned int ksm_merge_across_nodes = 1; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 236 | static int ksm_nr_node_ids = 1; |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 237 | #else |
| 238 | #define ksm_merge_across_nodes 1U |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 239 | #define ksm_nr_node_ids 1 |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 240 | #endif |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 241 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 242 | #define KSM_RUN_STOP 0 |
| 243 | #define KSM_RUN_MERGE 1 |
| 244 | #define KSM_RUN_UNMERGE 2 |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 245 | #define KSM_RUN_OFFLINE 4 |
| 246 | static unsigned long ksm_run = KSM_RUN_STOP; |
| 247 | static void wait_while_offlining(void); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 248 | |
| 249 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
| 250 | static DEFINE_MUTEX(ksm_thread_mutex); |
| 251 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
| 252 | |
| 253 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ |
| 254 | sizeof(struct __struct), __alignof__(struct __struct),\ |
| 255 | (__flags), NULL) |
| 256 | |
| 257 | static int __init ksm_slab_init(void) |
| 258 | { |
| 259 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); |
| 260 | if (!rmap_item_cache) |
| 261 | goto out; |
| 262 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 263 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
| 264 | if (!stable_node_cache) |
| 265 | goto out_free1; |
| 266 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 267 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
| 268 | if (!mm_slot_cache) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 269 | goto out_free2; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 270 | |
| 271 | return 0; |
| 272 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 273 | out_free2: |
| 274 | kmem_cache_destroy(stable_node_cache); |
| 275 | out_free1: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 276 | kmem_cache_destroy(rmap_item_cache); |
| 277 | out: |
| 278 | return -ENOMEM; |
| 279 | } |
| 280 | |
| 281 | static void __init ksm_slab_free(void) |
| 282 | { |
| 283 | kmem_cache_destroy(mm_slot_cache); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 284 | kmem_cache_destroy(stable_node_cache); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 285 | kmem_cache_destroy(rmap_item_cache); |
| 286 | mm_slot_cache = NULL; |
| 287 | } |
| 288 | |
| 289 | static inline struct rmap_item *alloc_rmap_item(void) |
| 290 | { |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 291 | struct rmap_item *rmap_item; |
| 292 | |
zhong jiang | 5b398e4 | 2016-09-28 15:22:30 -0700 | [diff] [blame] | 293 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | |
| 294 | __GFP_NORETRY | __GFP_NOWARN); |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 295 | if (rmap_item) |
| 296 | ksm_rmap_items++; |
| 297 | return rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | static inline void free_rmap_item(struct rmap_item *rmap_item) |
| 301 | { |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 302 | ksm_rmap_items--; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 303 | rmap_item->mm = NULL; /* debug safety */ |
| 304 | kmem_cache_free(rmap_item_cache, rmap_item); |
| 305 | } |
| 306 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 307 | static inline struct stable_node *alloc_stable_node(void) |
| 308 | { |
zhong jiang | 6213055 | 2016-10-07 17:01:19 -0700 | [diff] [blame] | 309 | /* |
| 310 | * The allocation can take too long with GFP_KERNEL when memory is under |
| 311 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH |
| 312 | * grants access to memory reserves, helping to avoid this problem. |
| 313 | */ |
| 314 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 315 | } |
| 316 | |
| 317 | static inline void free_stable_node(struct stable_node *stable_node) |
| 318 | { |
| 319 | kmem_cache_free(stable_node_cache, stable_node); |
| 320 | } |
| 321 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 322 | static inline struct mm_slot *alloc_mm_slot(void) |
| 323 | { |
| 324 | if (!mm_slot_cache) /* initialization failed */ |
| 325 | return NULL; |
| 326 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); |
| 327 | } |
| 328 | |
| 329 | static inline void free_mm_slot(struct mm_slot *mm_slot) |
| 330 | { |
| 331 | kmem_cache_free(mm_slot_cache, mm_slot); |
| 332 | } |
| 333 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 334 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
| 335 | { |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 336 | struct mm_slot *slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 337 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 338 | hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 339 | if (slot->mm == mm) |
| 340 | return slot; |
| 341 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 342 | return NULL; |
| 343 | } |
| 344 | |
| 345 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
| 346 | struct mm_slot *mm_slot) |
| 347 | { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 348 | mm_slot->mm = mm; |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 349 | hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 350 | } |
| 351 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 352 | /* |
Hugh Dickins | a913e18 | 2009-09-21 17:02:26 -0700 | [diff] [blame] | 353 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's |
| 354 | * page tables after it has passed through ksm_exit() - which, if necessary, |
| 355 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set |
| 356 | * a special flag: they can just back out as soon as mm_users goes to zero. |
| 357 | * ksm_test_exit() is used throughout to make this test for exit: in some |
| 358 | * places for correctness, in some places just to avoid unnecessary work. |
| 359 | */ |
| 360 | static inline bool ksm_test_exit(struct mm_struct *mm) |
| 361 | { |
| 362 | return atomic_read(&mm->mm_users) == 0; |
| 363 | } |
| 364 | |
| 365 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 366 | * We use break_ksm to break COW on a ksm page: it's a stripped down |
| 367 | * |
Dave Hansen | d4edcf0 | 2016-02-12 13:01:56 -0800 | [diff] [blame] | 368 | * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 369 | * put_page(page); |
| 370 | * |
| 371 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, |
| 372 | * in case the application has unmapped and remapped mm,addr meanwhile. |
| 373 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP |
| 374 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 375 | * |
| 376 | * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context |
| 377 | * of the process that owns 'vma'. We also do not want to enforce |
| 378 | * protection keys here anyway. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 379 | */ |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 380 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 381 | { |
| 382 | struct page *page; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 383 | int ret = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 384 | |
| 385 | do { |
| 386 | cond_resched(); |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 387 | page = follow_page(vma, addr, |
| 388 | FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); |
Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 389 | if (IS_ERR_OR_NULL(page)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 390 | break; |
| 391 | if (PageKsm(page)) |
Kirill A. Shutemov | dcddffd | 2016-07-26 15:25:18 -0700 | [diff] [blame] | 392 | ret = handle_mm_fault(vma, addr, |
| 393 | FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 394 | else |
| 395 | ret = VM_FAULT_WRITE; |
| 396 | put_page(page); |
Linus Torvalds | 33692f2 | 2015-01-29 10:51:32 -0800 | [diff] [blame] | 397 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 398 | /* |
| 399 | * We must loop because handle_mm_fault() may back out if there's |
| 400 | * any difficulty e.g. if pte accessed bit gets updated concurrently. |
| 401 | * |
| 402 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that |
| 403 | * COW has been broken, even if the vma does not permit VM_WRITE; |
| 404 | * but note that a concurrent fault might break PageKsm for us. |
| 405 | * |
| 406 | * VM_FAULT_SIGBUS could occur if we race with truncation of the |
| 407 | * backing file, which also invalidates anonymous pages: that's |
| 408 | * okay, that truncation will have unmapped the PageKsm for us. |
| 409 | * |
| 410 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting |
| 411 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the |
| 412 | * current task has TIF_MEMDIE set, and will be OOM killed on return |
| 413 | * to user; and ksmd, having no mm, would never be chosen for that. |
| 414 | * |
| 415 | * But if the mm is in a limited mem_cgroup, then the fault may fail |
| 416 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and |
| 417 | * even ksmd can fail in this way - though it's usually breaking ksm |
| 418 | * just to undo a merge it made a moment before, so unlikely to oom. |
| 419 | * |
| 420 | * That's a pity: we might therefore have more kernel pages allocated |
| 421 | * than we're counting as nodes in the stable tree; but ksm_do_scan |
| 422 | * will retry to break_cow on each pass, so should recover the page |
| 423 | * in due course. The important thing is to not let VM_MERGEABLE |
| 424 | * be cleared while any such pages might remain in the area. |
| 425 | */ |
| 426 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 427 | } |
| 428 | |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 429 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
| 430 | unsigned long addr) |
| 431 | { |
| 432 | struct vm_area_struct *vma; |
| 433 | if (ksm_test_exit(mm)) |
| 434 | return NULL; |
| 435 | vma = find_vma(mm, addr); |
| 436 | if (!vma || vma->vm_start > addr) |
| 437 | return NULL; |
| 438 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
| 439 | return NULL; |
| 440 | return vma; |
| 441 | } |
| 442 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 443 | static void break_cow(struct rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 444 | { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 445 | struct mm_struct *mm = rmap_item->mm; |
| 446 | unsigned long addr = rmap_item->address; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 447 | struct vm_area_struct *vma; |
| 448 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 449 | /* |
| 450 | * It is not an accident that whenever we want to break COW |
| 451 | * to undo, we also need to drop a reference to the anon_vma. |
| 452 | */ |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 453 | put_anon_vma(rmap_item->anon_vma); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 454 | |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 455 | down_read(&mm->mmap_sem); |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 456 | vma = find_mergeable_vma(mm, addr); |
| 457 | if (vma) |
| 458 | break_ksm(vma, addr); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 459 | up_read(&mm->mmap_sem); |
| 460 | } |
| 461 | |
| 462 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) |
| 463 | { |
| 464 | struct mm_struct *mm = rmap_item->mm; |
| 465 | unsigned long addr = rmap_item->address; |
| 466 | struct vm_area_struct *vma; |
| 467 | struct page *page; |
| 468 | |
| 469 | down_read(&mm->mmap_sem); |
Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 470 | vma = find_mergeable_vma(mm, addr); |
| 471 | if (!vma) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 472 | goto out; |
| 473 | |
| 474 | page = follow_page(vma, addr, FOLL_GET); |
Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 475 | if (IS_ERR_OR_NULL(page)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 476 | goto out; |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 477 | if (PageAnon(page)) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 478 | flush_anon_page(vma, page, addr); |
| 479 | flush_dcache_page(page); |
| 480 | } else { |
| 481 | put_page(page); |
Andrea Arcangeli | c8f95ed | 2015-11-05 18:49:19 -0800 | [diff] [blame] | 482 | out: |
| 483 | page = NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 484 | } |
| 485 | up_read(&mm->mmap_sem); |
| 486 | return page; |
| 487 | } |
| 488 | |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 489 | /* |
| 490 | * This helper is used for getting right index into array of tree roots. |
| 491 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for |
| 492 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, |
| 493 | * every node has its own stable and unstable tree. |
| 494 | */ |
| 495 | static inline int get_kpfn_nid(unsigned long kpfn) |
| 496 | { |
Hugh Dickins | d8fc16a | 2013-03-08 12:43:34 -0800 | [diff] [blame] | 497 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 498 | } |
| 499 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 500 | static void remove_node_from_stable_tree(struct stable_node *stable_node) |
| 501 | { |
| 502 | struct rmap_item *rmap_item; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 503 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 504 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 505 | if (rmap_item->hlist.next) |
| 506 | ksm_pages_sharing--; |
| 507 | else |
| 508 | ksm_pages_shared--; |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 509 | put_anon_vma(rmap_item->anon_vma); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 510 | rmap_item->address &= PAGE_MASK; |
| 511 | cond_resched(); |
| 512 | } |
| 513 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 514 | if (stable_node->head == &migrate_nodes) |
| 515 | list_del(&stable_node->list); |
| 516 | else |
| 517 | rb_erase(&stable_node->node, |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 518 | root_stable_tree + NUMA(stable_node->nid)); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 519 | free_stable_node(stable_node); |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * get_ksm_page: checks if the page indicated by the stable node |
| 524 | * is still its ksm page, despite having held no reference to it. |
| 525 | * In which case we can trust the content of the page, and it |
| 526 | * returns the gotten page; but if the page has now been zapped, |
| 527 | * remove the stale node from the stable tree and return NULL. |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 528 | * But beware, the stable node's page might be being migrated. |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 529 | * |
| 530 | * You would expect the stable_node to hold a reference to the ksm page. |
| 531 | * But if it increments the page's count, swapping out has to wait for |
| 532 | * ksmd to come around again before it can free the page, which may take |
| 533 | * seconds or even minutes: much too unresponsive. So instead we use a |
| 534 | * "keyhole reference": access to the ksm page from the stable node peeps |
| 535 | * out through its keyhole to see if that page still holds the right key, |
| 536 | * pointing back to this stable node. This relies on freeing a PageAnon |
| 537 | * page to reset its page->mapping to NULL, and relies on no other use of |
| 538 | * a page to put something that might look like our key in page->mapping. |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 539 | * is on its way to being freed; but it is an anomaly to bear in mind. |
| 540 | */ |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 541 | static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 542 | { |
| 543 | struct page *page; |
| 544 | void *expected_mapping; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 545 | unsigned long kpfn; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 546 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 547 | expected_mapping = (void *)((unsigned long)stable_node | |
| 548 | PAGE_MAPPING_KSM); |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 549 | again: |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 550 | kpfn = READ_ONCE(stable_node->kpfn); |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 551 | page = pfn_to_page(kpfn); |
| 552 | |
| 553 | /* |
| 554 | * page is computed from kpfn, so on most architectures reading |
| 555 | * page->mapping is naturally ordered after reading node->kpfn, |
| 556 | * but on Alpha we need to be more careful. |
| 557 | */ |
| 558 | smp_read_barrier_depends(); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 559 | if (READ_ONCE(page->mapping) != expected_mapping) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 560 | goto stale; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 561 | |
| 562 | /* |
| 563 | * We cannot do anything with the page while its refcount is 0. |
| 564 | * Usually 0 means free, or tail of a higher-order page: in which |
| 565 | * case this node is no longer referenced, and should be freed; |
| 566 | * however, it might mean that the page is under page_freeze_refs(). |
| 567 | * The __remove_mapping() case is easy, again the node is now stale; |
| 568 | * but if page is swapcache in migrate_page_move_mapping(), it might |
| 569 | * still be our page, in which case it's essential to keep the node. |
| 570 | */ |
| 571 | while (!get_page_unless_zero(page)) { |
| 572 | /* |
| 573 | * Another check for page->mapping != expected_mapping would |
| 574 | * work here too. We have chosen the !PageSwapCache test to |
| 575 | * optimize the common case, when the page is or is about to |
| 576 | * be freed: PageSwapCache is cleared (under spin_lock_irq) |
| 577 | * in the freeze_refs section of __remove_mapping(); but Anon |
| 578 | * page->mapping reset to NULL later, in free_pages_prepare(). |
| 579 | */ |
| 580 | if (!PageSwapCache(page)) |
| 581 | goto stale; |
| 582 | cpu_relax(); |
| 583 | } |
| 584 | |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 585 | if (READ_ONCE(page->mapping) != expected_mapping) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 586 | put_page(page); |
| 587 | goto stale; |
| 588 | } |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 589 | |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 590 | if (lock_it) { |
Hugh Dickins | 8aafa6a | 2013-02-22 16:35:06 -0800 | [diff] [blame] | 591 | lock_page(page); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 592 | if (READ_ONCE(page->mapping) != expected_mapping) { |
Hugh Dickins | 8aafa6a | 2013-02-22 16:35:06 -0800 | [diff] [blame] | 593 | unlock_page(page); |
| 594 | put_page(page); |
| 595 | goto stale; |
| 596 | } |
| 597 | } |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 598 | return page; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 599 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 600 | stale: |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 601 | /* |
| 602 | * We come here from above when page->mapping or !PageSwapCache |
| 603 | * suggests that the node is stale; but it might be under migration. |
| 604 | * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), |
| 605 | * before checking whether node->kpfn has been changed. |
| 606 | */ |
| 607 | smp_rmb(); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 608 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 609 | goto again; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 610 | remove_node_from_stable_tree(stable_node); |
| 611 | return NULL; |
| 612 | } |
| 613 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 614 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 615 | * Removing rmap_item from stable or unstable tree. |
| 616 | * This function will clean the information from the stable/unstable tree. |
| 617 | */ |
| 618 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) |
| 619 | { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 620 | if (rmap_item->address & STABLE_FLAG) { |
| 621 | struct stable_node *stable_node; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 622 | struct page *page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 623 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 624 | stable_node = rmap_item->head; |
Hugh Dickins | 8aafa6a | 2013-02-22 16:35:06 -0800 | [diff] [blame] | 625 | page = get_ksm_page(stable_node, true); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 626 | if (!page) |
| 627 | goto out; |
| 628 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 629 | hlist_del(&rmap_item->hlist); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 630 | unlock_page(page); |
| 631 | put_page(page); |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 632 | |
Andrea Arcangeli | 98666f8a | 2015-11-05 18:49:13 -0800 | [diff] [blame] | 633 | if (!hlist_empty(&stable_node->hlist)) |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 634 | ksm_pages_sharing--; |
| 635 | else |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 636 | ksm_pages_shared--; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 637 | |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 638 | put_anon_vma(rmap_item->anon_vma); |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 639 | rmap_item->address &= PAGE_MASK; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 640 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 641 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 642 | unsigned char age; |
| 643 | /* |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 644 | * Usually ksmd can and must skip the rb_erase, because |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 645 | * root_unstable_tree was already reset to RB_ROOT. |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 646 | * But be careful when an mm is exiting: do the rb_erase |
| 647 | * if this rmap_item was inserted by this scan, rather |
| 648 | * than left over from before. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 649 | */ |
| 650 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 651 | BUG_ON(age > 1); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 652 | if (!age) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 653 | rb_erase(&rmap_item->node, |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 654 | root_unstable_tree + NUMA(rmap_item->nid)); |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 655 | ksm_pages_unshared--; |
| 656 | rmap_item->address &= PAGE_MASK; |
| 657 | } |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 658 | out: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 659 | cond_resched(); /* we're called from many long loops */ |
| 660 | } |
| 661 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 662 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 663 | struct rmap_item **rmap_list) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 664 | { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 665 | while (*rmap_list) { |
| 666 | struct rmap_item *rmap_item = *rmap_list; |
| 667 | *rmap_list = rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 668 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 669 | free_rmap_item(rmap_item); |
| 670 | } |
| 671 | } |
| 672 | |
| 673 | /* |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 674 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 675 | * than check every pte of a given vma, the locking doesn't quite work for |
| 676 | * that - an rmap_item is assigned to the stable tree after inserting ksm |
| 677 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing |
| 678 | * rmap_items from parent to child at fork time (so as not to waste time |
| 679 | * if exit comes before the next scan reaches it). |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 680 | * |
| 681 | * Similarly, although we'd like to remove rmap_items (so updating counts |
| 682 | * and freeing memory) when unmerging an area, it's easier to leave that |
| 683 | * to the next pass of ksmd - consider, for example, how ksmd might be |
| 684 | * in cmp_and_merge_page on one of the rmap_items we would be removing. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 685 | */ |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 686 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
| 687 | unsigned long start, unsigned long end) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 688 | { |
| 689 | unsigned long addr; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 690 | int err = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 691 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 692 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 693 | if (ksm_test_exit(vma->vm_mm)) |
| 694 | break; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 695 | if (signal_pending(current)) |
| 696 | err = -ERESTARTSYS; |
| 697 | else |
| 698 | err = break_ksm(vma, addr); |
| 699 | } |
| 700 | return err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 701 | } |
| 702 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 703 | #ifdef CONFIG_SYSFS |
| 704 | /* |
| 705 | * Only called through the sysfs control interface: |
| 706 | */ |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 707 | static int remove_stable_node(struct stable_node *stable_node) |
| 708 | { |
| 709 | struct page *page; |
| 710 | int err; |
| 711 | |
| 712 | page = get_ksm_page(stable_node, true); |
| 713 | if (!page) { |
| 714 | /* |
| 715 | * get_ksm_page did remove_node_from_stable_tree itself. |
| 716 | */ |
| 717 | return 0; |
| 718 | } |
| 719 | |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 720 | if (WARN_ON_ONCE(page_mapped(page))) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 721 | /* |
Hugh Dickins | 8fdb3db | 2013-02-22 16:36:03 -0800 | [diff] [blame] | 722 | * This should not happen: but if it does, just refuse to let |
| 723 | * merge_across_nodes be switched - there is no need to panic. |
| 724 | */ |
| 725 | err = -EBUSY; |
| 726 | } else { |
| 727 | /* |
| 728 | * The stable node did not yet appear stale to get_ksm_page(), |
| 729 | * since that allows for an unmapped ksm page to be recognized |
| 730 | * right up until it is freed; but the node is safe to remove. |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 731 | * This page might be in a pagevec waiting to be freed, |
| 732 | * or it might be PageSwapCache (perhaps under writeback), |
| 733 | * or it might have been removed from swapcache a moment ago. |
| 734 | */ |
| 735 | set_page_stable_node(page, NULL); |
| 736 | remove_node_from_stable_tree(stable_node); |
| 737 | err = 0; |
| 738 | } |
| 739 | |
| 740 | unlock_page(page); |
| 741 | put_page(page); |
| 742 | return err; |
| 743 | } |
| 744 | |
| 745 | static int remove_all_stable_nodes(void) |
| 746 | { |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 747 | struct stable_node *stable_node, *next; |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 748 | int nid; |
| 749 | int err = 0; |
| 750 | |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 751 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 752 | while (root_stable_tree[nid].rb_node) { |
| 753 | stable_node = rb_entry(root_stable_tree[nid].rb_node, |
| 754 | struct stable_node, node); |
| 755 | if (remove_stable_node(stable_node)) { |
| 756 | err = -EBUSY; |
| 757 | break; /* proceed to next nid */ |
| 758 | } |
| 759 | cond_resched(); |
| 760 | } |
| 761 | } |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 762 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 763 | if (remove_stable_node(stable_node)) |
| 764 | err = -EBUSY; |
| 765 | cond_resched(); |
| 766 | } |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 767 | return err; |
| 768 | } |
| 769 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 770 | static int unmerge_and_remove_all_rmap_items(void) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 771 | { |
| 772 | struct mm_slot *mm_slot; |
| 773 | struct mm_struct *mm; |
| 774 | struct vm_area_struct *vma; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 775 | int err = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 776 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 777 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 778 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 779 | struct mm_slot, mm_list); |
| 780 | spin_unlock(&ksm_mmlist_lock); |
| 781 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 782 | for (mm_slot = ksm_scan.mm_slot; |
| 783 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 784 | mm = mm_slot->mm; |
| 785 | down_read(&mm->mmap_sem); |
| 786 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 787 | if (ksm_test_exit(mm)) |
| 788 | break; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 789 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
| 790 | continue; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 791 | err = unmerge_ksm_pages(vma, |
| 792 | vma->vm_start, vma->vm_end); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 793 | if (err) |
| 794 | goto error; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 795 | } |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 796 | |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 797 | remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); |
Zhou Chengming | 7496fea | 2016-05-12 15:42:21 -0700 | [diff] [blame] | 798 | up_read(&mm->mmap_sem); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 799 | |
| 800 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 801 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 802 | struct mm_slot, mm_list); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 803 | if (ksm_test_exit(mm)) { |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 804 | hash_del(&mm_slot->link); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 805 | list_del(&mm_slot->mm_list); |
| 806 | spin_unlock(&ksm_mmlist_lock); |
| 807 | |
| 808 | free_mm_slot(mm_slot); |
| 809 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 810 | mmdrop(mm); |
Zhou Chengming | 7496fea | 2016-05-12 15:42:21 -0700 | [diff] [blame] | 811 | } else |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 812 | spin_unlock(&ksm_mmlist_lock); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 813 | } |
| 814 | |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 815 | /* Clean up stable nodes, but don't worry if some are still busy */ |
| 816 | remove_all_stable_nodes(); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 817 | ksm_scan.seqnr = 0; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 818 | return 0; |
| 819 | |
| 820 | error: |
| 821 | up_read(&mm->mmap_sem); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 822 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 823 | ksm_scan.mm_slot = &ksm_mm_head; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 824 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 825 | return err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 826 | } |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 827 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 828 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 829 | static u32 calc_checksum(struct page *page) |
| 830 | { |
| 831 | u32 checksum; |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 832 | void *addr = kmap_atomic(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 833 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 834 | kunmap_atomic(addr); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 835 | return checksum; |
| 836 | } |
| 837 | |
| 838 | static int memcmp_pages(struct page *page1, struct page *page2) |
| 839 | { |
| 840 | char *addr1, *addr2; |
| 841 | int ret; |
| 842 | |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 843 | addr1 = kmap_atomic(page1); |
| 844 | addr2 = kmap_atomic(page2); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 845 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 846 | kunmap_atomic(addr2); |
| 847 | kunmap_atomic(addr1); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 848 | return ret; |
| 849 | } |
| 850 | |
| 851 | static inline int pages_identical(struct page *page1, struct page *page2) |
| 852 | { |
| 853 | return !memcmp_pages(page1, page2); |
| 854 | } |
| 855 | |
| 856 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, |
| 857 | pte_t *orig_pte) |
| 858 | { |
| 859 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 860 | struct page_vma_mapped_walk pvmw = { |
| 861 | .page = page, |
| 862 | .vma = vma, |
| 863 | }; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 864 | int swapped; |
| 865 | int err = -EFAULT; |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 866 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 867 | unsigned long mmun_end; /* For mmu_notifiers */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 868 | |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 869 | pvmw.address = page_address_in_vma(page, vma); |
| 870 | if (pvmw.address == -EFAULT) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 871 | goto out; |
| 872 | |
Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 873 | BUG_ON(PageTransCompound(page)); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 874 | |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 875 | mmun_start = pvmw.address; |
| 876 | mmun_end = pvmw.address + PAGE_SIZE; |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 877 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
| 878 | |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 879 | if (!page_vma_mapped_walk(&pvmw)) |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 880 | goto out_mn; |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 881 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) |
| 882 | goto out_unlock; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 883 | |
Aneesh Kumar K.V | 595cd8f | 2017-02-24 14:59:19 -0800 | [diff] [blame] | 884 | if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || |
| 885 | (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 886 | pte_t entry; |
| 887 | |
| 888 | swapped = PageSwapCache(page); |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 889 | flush_cache_page(vma, pvmw.address, page_to_pfn(page)); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 890 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 891 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 892 | * take any lock, therefore the check that we are going to make |
| 893 | * with the pagecount against the mapcount is racey and |
| 894 | * O_DIRECT can happen right after the check. |
| 895 | * So we clear the pte and flush the tlb before the check |
| 896 | * this assure us that no O_DIRECT can happen after the check |
| 897 | * or in the middle of the check. |
| 898 | */ |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 899 | entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 900 | /* |
| 901 | * Check that no O_DIRECT or similar I/O is in progress on the |
| 902 | * page |
| 903 | */ |
Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 904 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 905 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 906 | goto out_unlock; |
| 907 | } |
Hugh Dickins | 4e31635 | 2010-10-02 17:49:08 -0700 | [diff] [blame] | 908 | if (pte_dirty(entry)) |
| 909 | set_page_dirty(page); |
Aneesh Kumar K.V | 595cd8f | 2017-02-24 14:59:19 -0800 | [diff] [blame] | 910 | |
| 911 | if (pte_protnone(entry)) |
| 912 | entry = pte_mkclean(pte_clear_savedwrite(entry)); |
| 913 | else |
| 914 | entry = pte_mkclean(pte_wrprotect(entry)); |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 915 | set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 916 | } |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 917 | *orig_pte = *pvmw.pte; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 918 | err = 0; |
| 919 | |
| 920 | out_unlock: |
Kirill A. Shutemov | 36eaff3 | 2017-02-24 14:58:04 -0800 | [diff] [blame] | 921 | page_vma_mapped_walk_done(&pvmw); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 922 | out_mn: |
| 923 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 924 | out: |
| 925 | return err; |
| 926 | } |
| 927 | |
| 928 | /** |
| 929 | * replace_page - replace page in vma by new ksm page |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 930 | * @vma: vma that holds the pte pointing to page |
| 931 | * @page: the page we are replacing by kpage |
| 932 | * @kpage: the ksm page we replace page by |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 933 | * @orig_pte: the original value of the pte |
| 934 | * |
| 935 | * Returns 0 on success, -EFAULT on failure. |
| 936 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 937 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
| 938 | struct page *kpage, pte_t orig_pte) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 939 | { |
| 940 | struct mm_struct *mm = vma->vm_mm; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 941 | pmd_t *pmd; |
| 942 | pte_t *ptep; |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 943 | pte_t newpte; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 944 | spinlock_t *ptl; |
| 945 | unsigned long addr; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 946 | int err = -EFAULT; |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 947 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 948 | unsigned long mmun_end; /* For mmu_notifiers */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 949 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 950 | addr = page_address_in_vma(page, vma); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 951 | if (addr == -EFAULT) |
| 952 | goto out; |
| 953 | |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 954 | pmd = mm_find_pmd(mm, addr); |
| 955 | if (!pmd) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 956 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 957 | |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 958 | mmun_start = addr; |
| 959 | mmun_end = addr + PAGE_SIZE; |
| 960 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
| 961 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 962 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 963 | if (!pte_same(*ptep, orig_pte)) { |
| 964 | pte_unmap_unlock(ptep, ptl); |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 965 | goto out_mn; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 966 | } |
| 967 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 968 | /* |
| 969 | * No need to check ksm_use_zero_pages here: we can only have a |
| 970 | * zero_page here if ksm_use_zero_pages was enabled alreaady. |
| 971 | */ |
| 972 | if (!is_zero_pfn(page_to_pfn(kpage))) { |
| 973 | get_page(kpage); |
| 974 | page_add_anon_rmap(kpage, vma, addr, false); |
| 975 | newpte = mk_pte(kpage, vma->vm_page_prot); |
| 976 | } else { |
| 977 | newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), |
| 978 | vma->vm_page_prot)); |
| 979 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 980 | |
| 981 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 982 | ptep_clear_flush_notify(vma, addr, ptep); |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 983 | set_pte_at_notify(mm, addr, ptep, newpte); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 984 | |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 985 | page_remove_rmap(page, false); |
Hugh Dickins | ae52a2a | 2011-01-13 15:46:28 -0800 | [diff] [blame] | 986 | if (!page_mapped(page)) |
| 987 | try_to_free_swap(page); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 988 | put_page(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 989 | |
| 990 | pte_unmap_unlock(ptep, ptl); |
| 991 | err = 0; |
Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 992 | out_mn: |
| 993 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 994 | out: |
| 995 | return err; |
| 996 | } |
| 997 | |
| 998 | /* |
| 999 | * try_to_merge_one_page - take two pages and merge them into one |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1000 | * @vma: the vma that holds the pte pointing to page |
| 1001 | * @page: the PageAnon page that we want to replace with kpage |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1002 | * @kpage: the PageKsm page that we want to map instead of page, |
| 1003 | * or NULL the first time when we want to use page as kpage. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1004 | * |
| 1005 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
| 1006 | */ |
| 1007 | static int try_to_merge_one_page(struct vm_area_struct *vma, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1008 | struct page *page, struct page *kpage) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1009 | { |
| 1010 | pte_t orig_pte = __pte(0); |
| 1011 | int err = -EFAULT; |
| 1012 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1013 | if (page == kpage) /* ksm page forked */ |
| 1014 | return 0; |
| 1015 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1016 | if (!PageAnon(page)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1017 | goto out; |
| 1018 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1019 | /* |
| 1020 | * We need the page lock to read a stable PageSwapCache in |
| 1021 | * write_protect_page(). We use trylock_page() instead of |
| 1022 | * lock_page() because we don't want to wait here - we |
| 1023 | * prefer to continue scanning and merging different pages, |
| 1024 | * then come back to this page when it is unlocked. |
| 1025 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1026 | if (!trylock_page(page)) |
Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 1027 | goto out; |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 1028 | |
| 1029 | if (PageTransCompound(page)) { |
| 1030 | err = split_huge_page(page); |
| 1031 | if (err) |
| 1032 | goto out_unlock; |
| 1033 | } |
| 1034 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1035 | /* |
| 1036 | * If this anonymous page is mapped only here, its pte may need |
| 1037 | * to be write-protected. If it's mapped elsewhere, all of its |
| 1038 | * ptes are necessarily already write-protected. But in either |
| 1039 | * case, we need to lock and check page_count is not raised. |
| 1040 | */ |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1041 | if (write_protect_page(vma, page, &orig_pte) == 0) { |
| 1042 | if (!kpage) { |
| 1043 | /* |
| 1044 | * While we hold page lock, upgrade page from |
| 1045 | * PageAnon+anon_vma to PageKsm+NULL stable_node: |
| 1046 | * stable_tree_insert() will update stable_node. |
| 1047 | */ |
| 1048 | set_page_stable_node(page, NULL); |
| 1049 | mark_page_accessed(page); |
Minchan Kim | 337ed7e | 2016-01-15 16:55:15 -0800 | [diff] [blame] | 1050 | /* |
| 1051 | * Page reclaim just frees a clean page with no dirty |
| 1052 | * ptes: make sure that the ksm page would be swapped. |
| 1053 | */ |
| 1054 | if (!PageDirty(page)) |
| 1055 | SetPageDirty(page); |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1056 | err = 0; |
| 1057 | } else if (pages_identical(page, kpage)) |
| 1058 | err = replace_page(vma, page, kpage, orig_pte); |
| 1059 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1060 | |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1061 | if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 1062 | munlock_vma_page(page); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1063 | if (!PageMlocked(kpage)) { |
| 1064 | unlock_page(page); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1065 | lock_page(kpage); |
| 1066 | mlock_vma_page(kpage); |
| 1067 | page = kpage; /* for final unlock */ |
| 1068 | } |
| 1069 | } |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 1070 | |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 1071 | out_unlock: |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1072 | unlock_page(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1073 | out: |
| 1074 | return err; |
| 1075 | } |
| 1076 | |
| 1077 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1078 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, |
| 1079 | * but no new kernel page is allocated: kpage must already be a ksm page. |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1080 | * |
| 1081 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1082 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1083 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
| 1084 | struct page *page, struct page *kpage) |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1085 | { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1086 | struct mm_struct *mm = rmap_item->mm; |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1087 | struct vm_area_struct *vma; |
| 1088 | int err = -EFAULT; |
| 1089 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1090 | down_read(&mm->mmap_sem); |
Andrea Arcangeli | 85c6e8d | 2015-11-05 18:49:16 -0800 | [diff] [blame] | 1091 | vma = find_mergeable_vma(mm, rmap_item->address); |
| 1092 | if (!vma) |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1093 | goto out; |
| 1094 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1095 | err = try_to_merge_one_page(vma, page, kpage); |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1096 | if (err) |
| 1097 | goto out; |
| 1098 | |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 1099 | /* Unstable nid is in union with stable anon_vma: remove first */ |
| 1100 | remove_rmap_item_from_tree(rmap_item); |
| 1101 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1102 | /* Must get reference to anon_vma while still holding mmap_sem */ |
Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 1103 | rmap_item->anon_vma = vma->anon_vma; |
| 1104 | get_anon_vma(vma->anon_vma); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1105 | out: |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1106 | up_read(&mm->mmap_sem); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1107 | return err; |
| 1108 | } |
| 1109 | |
| 1110 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1111 | * try_to_merge_two_pages - take two identical pages and prepare them |
| 1112 | * to be merged into one page. |
| 1113 | * |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1114 | * This function returns the kpage if we successfully merged two identical |
| 1115 | * pages into one ksm page, NULL otherwise. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1116 | * |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1117 | * Note that this function upgrades page to ksm page: if one of the pages |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1118 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
| 1119 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1120 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
| 1121 | struct page *page, |
| 1122 | struct rmap_item *tree_rmap_item, |
| 1123 | struct page *tree_page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1124 | { |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1125 | int err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1126 | |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1127 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1128 | if (!err) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1129 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1130 | tree_page, page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1131 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1132 | * If that fails, we have a ksm page with only one pte |
| 1133 | * pointing to it: so break it. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1134 | */ |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1135 | if (err) |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1136 | break_cow(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1137 | } |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 1138 | return err ? NULL : page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1139 | } |
| 1140 | |
| 1141 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1142 | * stable_tree_search - search for page inside the stable tree |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1143 | * |
| 1144 | * This function checks if there is a page inside the stable tree |
| 1145 | * with identical content to the page that we are scanning right now. |
| 1146 | * |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1147 | * This function returns the stable tree node of identical content if found, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1148 | * NULL otherwise. |
| 1149 | */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1150 | static struct page *stable_tree_search(struct page *page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1151 | { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1152 | int nid; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1153 | struct rb_root *root; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1154 | struct rb_node **new; |
| 1155 | struct rb_node *parent; |
| 1156 | struct stable_node *stable_node; |
| 1157 | struct stable_node *page_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1158 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1159 | page_node = page_stable_node(page); |
| 1160 | if (page_node && page_node->head != &migrate_nodes) { |
| 1161 | /* ksm page forked */ |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1162 | get_page(page); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1163 | return page; |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1164 | } |
| 1165 | |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1166 | nid = get_kpfn_nid(page_to_pfn(page)); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1167 | root = root_stable_tree + nid; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1168 | again: |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1169 | new = &root->rb_node; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1170 | parent = NULL; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1171 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1172 | while (*new) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1173 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1174 | int ret; |
| 1175 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1176 | cond_resched(); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1177 | stable_node = rb_entry(*new, struct stable_node, node); |
Hugh Dickins | 8aafa6a | 2013-02-22 16:35:06 -0800 | [diff] [blame] | 1178 | tree_page = get_ksm_page(stable_node, false); |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 1179 | if (!tree_page) { |
| 1180 | /* |
| 1181 | * If we walked over a stale stable_node, |
| 1182 | * get_ksm_page() will call rb_erase() and it |
| 1183 | * may rebalance the tree from under us. So |
| 1184 | * restart the search from scratch. Returning |
| 1185 | * NULL would be safe too, but we'd generate |
| 1186 | * false negative insertions just because some |
| 1187 | * stable_node was stale. |
| 1188 | */ |
| 1189 | goto again; |
| 1190 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1191 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1192 | ret = memcmp_pages(page, tree_page); |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1193 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1194 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1195 | parent = *new; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1196 | if (ret < 0) |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1197 | new = &parent->rb_left; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1198 | else if (ret > 0) |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1199 | new = &parent->rb_right; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1200 | else { |
| 1201 | /* |
| 1202 | * Lock and unlock the stable_node's page (which |
| 1203 | * might already have been migrated) so that page |
| 1204 | * migration is sure to notice its raised count. |
| 1205 | * It would be more elegant to return stable_node |
| 1206 | * than kpage, but that involves more changes. |
| 1207 | */ |
| 1208 | tree_page = get_ksm_page(stable_node, true); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1209 | if (tree_page) { |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1210 | unlock_page(tree_page); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1211 | if (get_kpfn_nid(stable_node->kpfn) != |
| 1212 | NUMA(stable_node->nid)) { |
| 1213 | put_page(tree_page); |
| 1214 | goto replace; |
| 1215 | } |
| 1216 | return tree_page; |
| 1217 | } |
| 1218 | /* |
| 1219 | * There is now a place for page_node, but the tree may |
| 1220 | * have been rebalanced, so re-evaluate parent and new. |
| 1221 | */ |
| 1222 | if (page_node) |
| 1223 | goto again; |
| 1224 | return NULL; |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 1225 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1226 | } |
| 1227 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1228 | if (!page_node) |
| 1229 | return NULL; |
| 1230 | |
| 1231 | list_del(&page_node->list); |
| 1232 | DO_NUMA(page_node->nid = nid); |
| 1233 | rb_link_node(&page_node->node, parent, new); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1234 | rb_insert_color(&page_node->node, root); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1235 | get_page(page); |
| 1236 | return page; |
| 1237 | |
| 1238 | replace: |
| 1239 | if (page_node) { |
| 1240 | list_del(&page_node->list); |
| 1241 | DO_NUMA(page_node->nid = nid); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1242 | rb_replace_node(&stable_node->node, &page_node->node, root); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1243 | get_page(page); |
| 1244 | } else { |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1245 | rb_erase(&stable_node->node, root); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1246 | page = NULL; |
| 1247 | } |
| 1248 | stable_node->head = &migrate_nodes; |
| 1249 | list_add(&stable_node->list, stable_node->head); |
| 1250 | return page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1251 | } |
| 1252 | |
| 1253 | /* |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 1254 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1255 | * into the stable tree. |
| 1256 | * |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1257 | * This function returns the stable tree node just allocated on success, |
| 1258 | * NULL otherwise. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1259 | */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1260 | static struct stable_node *stable_tree_insert(struct page *kpage) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1261 | { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1262 | int nid; |
| 1263 | unsigned long kpfn; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1264 | struct rb_root *root; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1265 | struct rb_node **new; |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 1266 | struct rb_node *parent; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1267 | struct stable_node *stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1268 | |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1269 | kpfn = page_to_pfn(kpage); |
| 1270 | nid = get_kpfn_nid(kpfn); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1271 | root = root_stable_tree + nid; |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 1272 | again: |
| 1273 | parent = NULL; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1274 | new = &root->rb_node; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1275 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1276 | while (*new) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1277 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1278 | int ret; |
| 1279 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1280 | cond_resched(); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1281 | stable_node = rb_entry(*new, struct stable_node, node); |
Hugh Dickins | 8aafa6a | 2013-02-22 16:35:06 -0800 | [diff] [blame] | 1282 | tree_page = get_ksm_page(stable_node, false); |
Andrea Arcangeli | f2e5ff8 | 2015-11-05 18:49:10 -0800 | [diff] [blame] | 1283 | if (!tree_page) { |
| 1284 | /* |
| 1285 | * If we walked over a stale stable_node, |
| 1286 | * get_ksm_page() will call rb_erase() and it |
| 1287 | * may rebalance the tree from under us. So |
| 1288 | * restart the search from scratch. Returning |
| 1289 | * NULL would be safe too, but we'd generate |
| 1290 | * false negative insertions just because some |
| 1291 | * stable_node was stale. |
| 1292 | */ |
| 1293 | goto again; |
| 1294 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1295 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1296 | ret = memcmp_pages(kpage, tree_page); |
| 1297 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1298 | |
| 1299 | parent = *new; |
| 1300 | if (ret < 0) |
| 1301 | new = &parent->rb_left; |
| 1302 | else if (ret > 0) |
| 1303 | new = &parent->rb_right; |
| 1304 | else { |
| 1305 | /* |
| 1306 | * It is not a bug that stable_tree_search() didn't |
| 1307 | * find this node: because at that time our page was |
| 1308 | * not yet write-protected, so may have changed since. |
| 1309 | */ |
| 1310 | return NULL; |
| 1311 | } |
| 1312 | } |
| 1313 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1314 | stable_node = alloc_stable_node(); |
| 1315 | if (!stable_node) |
| 1316 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1317 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1318 | INIT_HLIST_HEAD(&stable_node->hlist); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1319 | stable_node->kpfn = kpfn; |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1320 | set_page_stable_node(kpage, stable_node); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1321 | DO_NUMA(stable_node->nid = nid); |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 1322 | rb_link_node(&stable_node->node, parent, new); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1323 | rb_insert_color(&stable_node->node, root); |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1324 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1325 | return stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1326 | } |
| 1327 | |
| 1328 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1329 | * unstable_tree_search_insert - search for identical page, |
| 1330 | * else insert rmap_item into the unstable tree. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1331 | * |
| 1332 | * This function searches for a page in the unstable tree identical to the |
| 1333 | * page currently being scanned; and if no identical page is found in the |
| 1334 | * tree, we insert rmap_item as a new object into the unstable tree. |
| 1335 | * |
| 1336 | * This function returns pointer to rmap_item found to be identical |
| 1337 | * to the currently scanned page, NULL otherwise. |
| 1338 | * |
| 1339 | * This function does both searching and inserting, because they share |
| 1340 | * the same walking algorithm in an rbtree. |
| 1341 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1342 | static |
| 1343 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, |
| 1344 | struct page *page, |
| 1345 | struct page **tree_pagep) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1346 | { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1347 | struct rb_node **new; |
| 1348 | struct rb_root *root; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1349 | struct rb_node *parent = NULL; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1350 | int nid; |
| 1351 | |
| 1352 | nid = get_kpfn_nid(page_to_pfn(page)); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1353 | root = root_unstable_tree + nid; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1354 | new = &root->rb_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1355 | |
| 1356 | while (*new) { |
| 1357 | struct rmap_item *tree_rmap_item; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1358 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1359 | int ret; |
| 1360 | |
Hugh Dickins | d178f27 | 2009-11-09 15:58:23 +0000 | [diff] [blame] | 1361 | cond_resched(); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1362 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1363 | tree_page = get_mergeable_page(tree_rmap_item); |
Andrea Arcangeli | c8f95ed | 2015-11-05 18:49:19 -0800 | [diff] [blame] | 1364 | if (!tree_page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1365 | return NULL; |
| 1366 | |
| 1367 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1368 | * Don't substitute a ksm page for a forked page. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1369 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1370 | if (page == tree_page) { |
| 1371 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1372 | return NULL; |
| 1373 | } |
| 1374 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1375 | ret = memcmp_pages(page, tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1376 | |
| 1377 | parent = *new; |
| 1378 | if (ret < 0) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1379 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1380 | new = &parent->rb_left; |
| 1381 | } else if (ret > 0) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1382 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1383 | new = &parent->rb_right; |
Hugh Dickins | b599cbd | 2013-02-22 16:36:05 -0800 | [diff] [blame] | 1384 | } else if (!ksm_merge_across_nodes && |
| 1385 | page_to_nid(tree_page) != nid) { |
| 1386 | /* |
| 1387 | * If tree_page has been migrated to another NUMA node, |
| 1388 | * it will be flushed out and put in the right unstable |
| 1389 | * tree next time: only merge with it when across_nodes. |
| 1390 | */ |
| 1391 | put_page(tree_page); |
| 1392 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1393 | } else { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1394 | *tree_pagep = tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1395 | return tree_rmap_item; |
| 1396 | } |
| 1397 | } |
| 1398 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1399 | rmap_item->address |= UNSTABLE_FLAG; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1400 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
Hugh Dickins | e850dcf | 2013-02-22 16:35:03 -0800 | [diff] [blame] | 1401 | DO_NUMA(rmap_item->nid = nid); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1402 | rb_link_node(&rmap_item->node, parent, new); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1403 | rb_insert_color(&rmap_item->node, root); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1404 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1405 | ksm_pages_unshared++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1406 | return NULL; |
| 1407 | } |
| 1408 | |
| 1409 | /* |
| 1410 | * stable_tree_append - add another rmap_item to the linked list of |
| 1411 | * rmap_items hanging off a given node of the stable tree, all sharing |
| 1412 | * the same ksm page. |
| 1413 | */ |
| 1414 | static void stable_tree_append(struct rmap_item *rmap_item, |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1415 | struct stable_node *stable_node) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1416 | { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1417 | rmap_item->head = stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1418 | rmap_item->address |= STABLE_FLAG; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1419 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 1420 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1421 | if (rmap_item->hlist.next) |
| 1422 | ksm_pages_sharing++; |
| 1423 | else |
| 1424 | ksm_pages_shared++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1425 | } |
| 1426 | |
| 1427 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1428 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
| 1429 | * if not, compare checksum to previous and if it's the same, see if page can |
| 1430 | * be inserted into the unstable tree, or merged with a page already there and |
| 1431 | * both transferred to the stable tree. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1432 | * |
| 1433 | * @page: the page that we are searching identical page to. |
| 1434 | * @rmap_item: the reverse mapping into the virtual address of this page |
| 1435 | */ |
| 1436 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) |
| 1437 | { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1438 | struct rmap_item *tree_rmap_item; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1439 | struct page *tree_page = NULL; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1440 | struct stable_node *stable_node; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1441 | struct page *kpage; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1442 | unsigned int checksum; |
| 1443 | int err; |
| 1444 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1445 | stable_node = page_stable_node(page); |
| 1446 | if (stable_node) { |
| 1447 | if (stable_node->head != &migrate_nodes && |
| 1448 | get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) { |
| 1449 | rb_erase(&stable_node->node, |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1450 | root_stable_tree + NUMA(stable_node->nid)); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1451 | stable_node->head = &migrate_nodes; |
| 1452 | list_add(&stable_node->list, stable_node->head); |
| 1453 | } |
| 1454 | if (stable_node->head != &migrate_nodes && |
| 1455 | rmap_item->head == stable_node) |
| 1456 | return; |
| 1457 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1458 | |
| 1459 | /* We first start with searching the page inside the stable tree */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1460 | kpage = stable_tree_search(page); |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1461 | if (kpage == page && rmap_item->head == stable_node) { |
| 1462 | put_page(kpage); |
| 1463 | return; |
| 1464 | } |
| 1465 | |
| 1466 | remove_rmap_item_from_tree(rmap_item); |
| 1467 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1468 | if (kpage) { |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1469 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1470 | if (!err) { |
| 1471 | /* |
| 1472 | * The page was successfully merged: |
| 1473 | * add its rmap_item to the stable tree. |
| 1474 | */ |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1475 | lock_page(kpage); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1476 | stable_tree_append(rmap_item, page_stable_node(kpage)); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1477 | unlock_page(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1478 | } |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1479 | put_page(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1480 | return; |
| 1481 | } |
| 1482 | |
| 1483 | /* |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1484 | * If the hash value of the page has changed from the last time |
| 1485 | * we calculated it, this page is changing frequently: therefore we |
| 1486 | * don't want to insert it in the unstable tree, and we don't want |
| 1487 | * to waste our time searching for something identical to it there. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1488 | */ |
| 1489 | checksum = calc_checksum(page); |
| 1490 | if (rmap_item->oldchecksum != checksum) { |
| 1491 | rmap_item->oldchecksum = checksum; |
| 1492 | return; |
| 1493 | } |
| 1494 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 1495 | /* |
| 1496 | * Same checksum as an empty page. We attempt to merge it with the |
| 1497 | * appropriate zero page if the user enabled this via sysfs. |
| 1498 | */ |
| 1499 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { |
| 1500 | struct vm_area_struct *vma; |
| 1501 | |
| 1502 | vma = find_mergeable_vma(rmap_item->mm, rmap_item->address); |
| 1503 | err = try_to_merge_one_page(vma, page, |
| 1504 | ZERO_PAGE(rmap_item->address)); |
| 1505 | /* |
| 1506 | * In case of failure, the page was not really empty, so we |
| 1507 | * need to continue. Otherwise we're done. |
| 1508 | */ |
| 1509 | if (!err) |
| 1510 | return; |
| 1511 | } |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1512 | tree_rmap_item = |
| 1513 | unstable_tree_search_insert(rmap_item, page, &tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1514 | if (tree_rmap_item) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1515 | kpage = try_to_merge_two_pages(rmap_item, page, |
| 1516 | tree_rmap_item, tree_page); |
| 1517 | put_page(tree_page); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1518 | if (kpage) { |
Hugh Dickins | bc56620 | 2013-02-22 16:36:06 -0800 | [diff] [blame] | 1519 | /* |
| 1520 | * The pages were successfully merged: insert new |
| 1521 | * node in the stable tree and add both rmap_items. |
| 1522 | */ |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1523 | lock_page(kpage); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1524 | stable_node = stable_tree_insert(kpage); |
| 1525 | if (stable_node) { |
| 1526 | stable_tree_append(tree_rmap_item, stable_node); |
| 1527 | stable_tree_append(rmap_item, stable_node); |
| 1528 | } |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1529 | unlock_page(kpage); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1530 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1531 | /* |
| 1532 | * If we fail to insert the page into the stable tree, |
| 1533 | * we will have 2 virtual addresses that are pointing |
| 1534 | * to a ksm page left outside the stable tree, |
| 1535 | * in which case we need to break_cow on both. |
| 1536 | */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1537 | if (!stable_node) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1538 | break_cow(tree_rmap_item); |
| 1539 | break_cow(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1540 | } |
| 1541 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1542 | } |
| 1543 | } |
| 1544 | |
| 1545 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1546 | struct rmap_item **rmap_list, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1547 | unsigned long addr) |
| 1548 | { |
| 1549 | struct rmap_item *rmap_item; |
| 1550 | |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1551 | while (*rmap_list) { |
| 1552 | rmap_item = *rmap_list; |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1553 | if ((rmap_item->address & PAGE_MASK) == addr) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1554 | return rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1555 | if (rmap_item->address > addr) |
| 1556 | break; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1557 | *rmap_list = rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1558 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1559 | free_rmap_item(rmap_item); |
| 1560 | } |
| 1561 | |
| 1562 | rmap_item = alloc_rmap_item(); |
| 1563 | if (rmap_item) { |
| 1564 | /* It has already been zeroed */ |
| 1565 | rmap_item->mm = mm_slot->mm; |
| 1566 | rmap_item->address = addr; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1567 | rmap_item->rmap_list = *rmap_list; |
| 1568 | *rmap_list = rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1569 | } |
| 1570 | return rmap_item; |
| 1571 | } |
| 1572 | |
| 1573 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) |
| 1574 | { |
| 1575 | struct mm_struct *mm; |
| 1576 | struct mm_slot *slot; |
| 1577 | struct vm_area_struct *vma; |
| 1578 | struct rmap_item *rmap_item; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1579 | int nid; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1580 | |
| 1581 | if (list_empty(&ksm_mm_head.mm_list)) |
| 1582 | return NULL; |
| 1583 | |
| 1584 | slot = ksm_scan.mm_slot; |
| 1585 | if (slot == &ksm_mm_head) { |
Hugh Dickins | 2919bfd | 2011-01-13 15:47:29 -0800 | [diff] [blame] | 1586 | /* |
| 1587 | * A number of pages can hang around indefinitely on per-cpu |
| 1588 | * pagevecs, raised page count preventing write_protect_page |
| 1589 | * from merging them. Though it doesn't really matter much, |
| 1590 | * it is puzzling to see some stuck in pages_volatile until |
| 1591 | * other activity jostles them out, and they also prevented |
| 1592 | * LTP's KSM test from succeeding deterministically; so drain |
| 1593 | * them here (here rather than on entry to ksm_do_scan(), |
| 1594 | * so we don't IPI too often when pages_to_scan is set low). |
| 1595 | */ |
| 1596 | lru_add_drain_all(); |
| 1597 | |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1598 | /* |
| 1599 | * Whereas stale stable_nodes on the stable_tree itself |
| 1600 | * get pruned in the regular course of stable_tree_search(), |
| 1601 | * those moved out to the migrate_nodes list can accumulate: |
| 1602 | * so prune them once before each full scan. |
| 1603 | */ |
| 1604 | if (!ksm_merge_across_nodes) { |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 1605 | struct stable_node *stable_node, *next; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1606 | struct page *page; |
| 1607 | |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 1608 | list_for_each_entry_safe(stable_node, next, |
| 1609 | &migrate_nodes, list) { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1610 | page = get_ksm_page(stable_node, false); |
| 1611 | if (page) |
| 1612 | put_page(page); |
| 1613 | cond_resched(); |
| 1614 | } |
| 1615 | } |
| 1616 | |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 1617 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 1618 | root_unstable_tree[nid] = RB_ROOT; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1619 | |
| 1620 | spin_lock(&ksm_mmlist_lock); |
| 1621 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); |
| 1622 | ksm_scan.mm_slot = slot; |
| 1623 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | 2b47261 | 2011-06-15 15:08:58 -0700 | [diff] [blame] | 1624 | /* |
| 1625 | * Although we tested list_empty() above, a racing __ksm_exit |
| 1626 | * of the last mm on the list may have removed it since then. |
| 1627 | */ |
| 1628 | if (slot == &ksm_mm_head) |
| 1629 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1630 | next_mm: |
| 1631 | ksm_scan.address = 0; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1632 | ksm_scan.rmap_list = &slot->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1633 | } |
| 1634 | |
| 1635 | mm = slot->mm; |
| 1636 | down_read(&mm->mmap_sem); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1637 | if (ksm_test_exit(mm)) |
| 1638 | vma = NULL; |
| 1639 | else |
| 1640 | vma = find_vma(mm, ksm_scan.address); |
| 1641 | |
| 1642 | for (; vma; vma = vma->vm_next) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1643 | if (!(vma->vm_flags & VM_MERGEABLE)) |
| 1644 | continue; |
| 1645 | if (ksm_scan.address < vma->vm_start) |
| 1646 | ksm_scan.address = vma->vm_start; |
| 1647 | if (!vma->anon_vma) |
| 1648 | ksm_scan.address = vma->vm_end; |
| 1649 | |
| 1650 | while (ksm_scan.address < vma->vm_end) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1651 | if (ksm_test_exit(mm)) |
| 1652 | break; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1653 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
Andrea Arcangeli | 21ae5b0 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 1654 | if (IS_ERR_OR_NULL(*page)) { |
| 1655 | ksm_scan.address += PAGE_SIZE; |
| 1656 | cond_resched(); |
| 1657 | continue; |
| 1658 | } |
Kirill A. Shutemov | f765f54 | 2016-01-15 16:53:03 -0800 | [diff] [blame] | 1659 | if (PageAnon(*page)) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1660 | flush_anon_page(vma, *page, ksm_scan.address); |
| 1661 | flush_dcache_page(*page); |
| 1662 | rmap_item = get_next_rmap_item(slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1663 | ksm_scan.rmap_list, ksm_scan.address); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1664 | if (rmap_item) { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1665 | ksm_scan.rmap_list = |
| 1666 | &rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1667 | ksm_scan.address += PAGE_SIZE; |
| 1668 | } else |
| 1669 | put_page(*page); |
| 1670 | up_read(&mm->mmap_sem); |
| 1671 | return rmap_item; |
| 1672 | } |
Andrea Arcangeli | 21ae5b0 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 1673 | put_page(*page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1674 | ksm_scan.address += PAGE_SIZE; |
| 1675 | cond_resched(); |
| 1676 | } |
| 1677 | } |
| 1678 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1679 | if (ksm_test_exit(mm)) { |
| 1680 | ksm_scan.address = 0; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1681 | ksm_scan.rmap_list = &slot->rmap_list; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1682 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1683 | /* |
| 1684 | * Nuke all the rmap_items that are above this current rmap: |
| 1685 | * because there were no VM_MERGEABLE vmas with such addresses. |
| 1686 | */ |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1687 | remove_trailing_rmap_items(slot, ksm_scan.rmap_list); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1688 | |
| 1689 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1690 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
| 1691 | struct mm_slot, mm_list); |
| 1692 | if (ksm_scan.address == 0) { |
| 1693 | /* |
| 1694 | * We've completed a full scan of all vmas, holding mmap_sem |
| 1695 | * throughout, and found no VM_MERGEABLE: so do the same as |
| 1696 | * __ksm_exit does to remove this mm from all our lists now. |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1697 | * This applies either when cleaning up after __ksm_exit |
| 1698 | * (but beware: we can reach here even before __ksm_exit), |
| 1699 | * or when all VM_MERGEABLE areas have been unmapped (and |
| 1700 | * mmap_sem then protects against race with MADV_MERGEABLE). |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1701 | */ |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 1702 | hash_del(&slot->link); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1703 | list_del(&slot->mm_list); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1704 | spin_unlock(&ksm_mmlist_lock); |
| 1705 | |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1706 | free_mm_slot(slot); |
| 1707 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1708 | up_read(&mm->mmap_sem); |
| 1709 | mmdrop(mm); |
| 1710 | } else { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1711 | up_read(&mm->mmap_sem); |
Zhou Chengming | 7496fea | 2016-05-12 15:42:21 -0700 | [diff] [blame] | 1712 | /* |
| 1713 | * up_read(&mm->mmap_sem) first because after |
| 1714 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may |
| 1715 | * already have been freed under us by __ksm_exit() |
| 1716 | * because the "mm_slot" is still hashed and |
| 1717 | * ksm_scan.mm_slot doesn't point to it anymore. |
| 1718 | */ |
| 1719 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1720 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1721 | |
| 1722 | /* Repeat until we've completed scanning the whole list */ |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1723 | slot = ksm_scan.mm_slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1724 | if (slot != &ksm_mm_head) |
| 1725 | goto next_mm; |
| 1726 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1727 | ksm_scan.seqnr++; |
| 1728 | return NULL; |
| 1729 | } |
| 1730 | |
| 1731 | /** |
| 1732 | * ksm_do_scan - the ksm scanner main worker function. |
| 1733 | * @scan_npages - number of pages we want to scan before we return. |
| 1734 | */ |
| 1735 | static void ksm_do_scan(unsigned int scan_npages) |
| 1736 | { |
| 1737 | struct rmap_item *rmap_item; |
Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 1738 | struct page *uninitialized_var(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1739 | |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1740 | while (scan_npages-- && likely(!freezing(current))) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1741 | cond_resched(); |
| 1742 | rmap_item = scan_get_next_rmap_item(&page); |
| 1743 | if (!rmap_item) |
| 1744 | return; |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 1745 | cmp_and_merge_page(page, rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1746 | put_page(page); |
| 1747 | } |
| 1748 | } |
| 1749 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1750 | static int ksmd_should_run(void) |
| 1751 | { |
| 1752 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); |
| 1753 | } |
| 1754 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1755 | static int ksm_scan_thread(void *nothing) |
| 1756 | { |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1757 | set_freezable(); |
Izik Eidus | 339aa62 | 2009-09-21 17:02:07 -0700 | [diff] [blame] | 1758 | set_user_nice(current, 5); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1759 | |
| 1760 | while (!kthread_should_stop()) { |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1761 | mutex_lock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 1762 | wait_while_offlining(); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1763 | if (ksmd_should_run()) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1764 | ksm_do_scan(ksm_thread_pages_to_scan); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1765 | mutex_unlock(&ksm_thread_mutex); |
| 1766 | |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1767 | try_to_freeze(); |
| 1768 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1769 | if (ksmd_should_run()) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1770 | schedule_timeout_interruptible( |
| 1771 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); |
| 1772 | } else { |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1773 | wait_event_freezable(ksm_thread_wait, |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1774 | ksmd_should_run() || kthread_should_stop()); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1775 | } |
| 1776 | } |
| 1777 | return 0; |
| 1778 | } |
| 1779 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1780 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 1781 | unsigned long end, int advice, unsigned long *vm_flags) |
| 1782 | { |
| 1783 | struct mm_struct *mm = vma->vm_mm; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1784 | int err; |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1785 | |
| 1786 | switch (advice) { |
| 1787 | case MADV_MERGEABLE: |
| 1788 | /* |
| 1789 | * Be somewhat over-protective for now! |
| 1790 | */ |
| 1791 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | |
| 1792 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 1793 | VM_HUGETLB | VM_MIXEDMAP)) |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1794 | return 0; /* just ignore the advice */ |
| 1795 | |
Konstantin Khlebnikov | cc2383e | 2012-10-08 16:28:37 -0700 | [diff] [blame] | 1796 | #ifdef VM_SAO |
| 1797 | if (*vm_flags & VM_SAO) |
| 1798 | return 0; |
| 1799 | #endif |
| 1800 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1801 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
| 1802 | err = __ksm_enter(mm); |
| 1803 | if (err) |
| 1804 | return err; |
| 1805 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1806 | |
| 1807 | *vm_flags |= VM_MERGEABLE; |
| 1808 | break; |
| 1809 | |
| 1810 | case MADV_UNMERGEABLE: |
| 1811 | if (!(*vm_flags & VM_MERGEABLE)) |
| 1812 | return 0; /* just ignore the advice */ |
| 1813 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1814 | if (vma->anon_vma) { |
| 1815 | err = unmerge_ksm_pages(vma, start, end); |
| 1816 | if (err) |
| 1817 | return err; |
| 1818 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1819 | |
| 1820 | *vm_flags &= ~VM_MERGEABLE; |
| 1821 | break; |
| 1822 | } |
| 1823 | |
| 1824 | return 0; |
| 1825 | } |
| 1826 | |
| 1827 | int __ksm_enter(struct mm_struct *mm) |
| 1828 | { |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1829 | struct mm_slot *mm_slot; |
| 1830 | int needs_wakeup; |
| 1831 | |
| 1832 | mm_slot = alloc_mm_slot(); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1833 | if (!mm_slot) |
| 1834 | return -ENOMEM; |
| 1835 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1836 | /* Check ksm_run too? Would need tighter locking */ |
| 1837 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); |
| 1838 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1839 | spin_lock(&ksm_mmlist_lock); |
| 1840 | insert_to_mm_slots_hash(mm, mm_slot); |
| 1841 | /* |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1842 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
| 1843 | * insert just behind the scanning cursor, to let the area settle |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1844 | * down a little; when fork is followed by immediate exec, we don't |
| 1845 | * want ksmd to waste time setting up and tearing down an rmap_list. |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1846 | * |
| 1847 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its |
| 1848 | * scanning cursor, otherwise KSM pages in newly forked mms will be |
| 1849 | * missed: then we might as well insert at the end of the list. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1850 | */ |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1851 | if (ksm_run & KSM_RUN_UNMERGE) |
| 1852 | list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); |
| 1853 | else |
| 1854 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1855 | spin_unlock(&ksm_mmlist_lock); |
| 1856 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1857 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 1858 | mmgrab(mm); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1859 | |
| 1860 | if (needs_wakeup) |
| 1861 | wake_up_interruptible(&ksm_thread_wait); |
| 1862 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1863 | return 0; |
| 1864 | } |
| 1865 | |
Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 1866 | void __ksm_exit(struct mm_struct *mm) |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1867 | { |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1868 | struct mm_slot *mm_slot; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1869 | int easy_to_free = 0; |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1870 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1871 | /* |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1872 | * This process is exiting: if it's straightforward (as is the |
| 1873 | * case when ksmd was never running), free mm_slot immediately. |
| 1874 | * But if it's at the cursor or has rmap_items linked to it, use |
| 1875 | * mmap_sem to synchronize with any break_cows before pagetables |
| 1876 | * are freed, and leave the mm_slot on the list for ksmd to free. |
| 1877 | * Beware: ksm may already have noticed it exiting and freed the slot. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1878 | */ |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1879 | |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1880 | spin_lock(&ksm_mmlist_lock); |
| 1881 | mm_slot = get_mm_slot(mm); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1882 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1883 | if (!mm_slot->rmap_list) { |
Sasha Levin | 4ca3a69 | 2013-02-22 16:32:28 -0800 | [diff] [blame] | 1884 | hash_del(&mm_slot->link); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1885 | list_del(&mm_slot->mm_list); |
| 1886 | easy_to_free = 1; |
| 1887 | } else { |
| 1888 | list_move(&mm_slot->mm_list, |
| 1889 | &ksm_scan.mm_slot->mm_list); |
| 1890 | } |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1891 | } |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1892 | spin_unlock(&ksm_mmlist_lock); |
| 1893 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1894 | if (easy_to_free) { |
| 1895 | free_mm_slot(mm_slot); |
| 1896 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
| 1897 | mmdrop(mm); |
| 1898 | } else if (mm_slot) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1899 | down_write(&mm->mmap_sem); |
| 1900 | up_write(&mm->mmap_sem); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1901 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1902 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1903 | |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1904 | struct page *ksm_might_need_to_copy(struct page *page, |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1905 | struct vm_area_struct *vma, unsigned long address) |
| 1906 | { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1907 | struct anon_vma *anon_vma = page_anon_vma(page); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1908 | struct page *new_page; |
| 1909 | |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 1910 | if (PageKsm(page)) { |
| 1911 | if (page_stable_node(page) && |
| 1912 | !(ksm_run & KSM_RUN_UNMERGE)) |
| 1913 | return page; /* no need to copy it */ |
| 1914 | } else if (!anon_vma) { |
| 1915 | return page; /* no need to copy it */ |
| 1916 | } else if (anon_vma->root == vma->anon_vma->root && |
| 1917 | page->index == linear_page_index(vma, address)) { |
| 1918 | return page; /* still no need to copy it */ |
| 1919 | } |
| 1920 | if (!PageUptodate(page)) |
| 1921 | return page; /* let do_swap_page report the error */ |
| 1922 | |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1923 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 1924 | if (new_page) { |
| 1925 | copy_user_highpage(new_page, page, address, vma); |
| 1926 | |
| 1927 | SetPageDirty(new_page); |
| 1928 | __SetPageUptodate(new_page); |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 1929 | __SetPageLocked(new_page); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1930 | } |
| 1931 | |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1932 | return new_page; |
| 1933 | } |
| 1934 | |
Joonsoo Kim | 051ac83 | 2014-01-21 15:49:48 -0800 | [diff] [blame] | 1935 | int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1936 | { |
| 1937 | struct stable_node *stable_node; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1938 | struct rmap_item *rmap_item; |
| 1939 | int ret = SWAP_AGAIN; |
| 1940 | int search_new_forks = 0; |
| 1941 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1942 | VM_BUG_ON_PAGE(!PageKsm(page), page); |
Joonsoo Kim | 9f32624 | 2014-01-21 15:49:53 -0800 | [diff] [blame] | 1943 | |
| 1944 | /* |
| 1945 | * Rely on the page lock to protect against concurrent modifications |
| 1946 | * to that page's node of the stable tree. |
| 1947 | */ |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1948 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1949 | |
| 1950 | stable_node = page_stable_node(page); |
| 1951 | if (!stable_node) |
| 1952 | return ret; |
| 1953 | again: |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 1954 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1955 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1956 | struct anon_vma_chain *vmac; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1957 | struct vm_area_struct *vma; |
| 1958 | |
Andrea Arcangeli | ad12695 | 2015-11-05 18:49:07 -0800 | [diff] [blame] | 1959 | cond_resched(); |
Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1960 | anon_vma_lock_read(anon_vma); |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 1961 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
| 1962 | 0, ULONG_MAX) { |
Andrea Arcangeli | ad12695 | 2015-11-05 18:49:07 -0800 | [diff] [blame] | 1963 | cond_resched(); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1964 | vma = vmac->vma; |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1965 | if (rmap_item->address < vma->vm_start || |
| 1966 | rmap_item->address >= vma->vm_end) |
| 1967 | continue; |
| 1968 | /* |
| 1969 | * Initially we examine only the vma which covers this |
| 1970 | * rmap_item; but later, if there is still work to do, |
| 1971 | * we examine covering vmas in other mms: in case they |
| 1972 | * were forked from the original since ksmd passed. |
| 1973 | */ |
| 1974 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
| 1975 | continue; |
| 1976 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 1977 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
| 1978 | continue; |
| 1979 | |
Joonsoo Kim | 051ac83 | 2014-01-21 15:49:48 -0800 | [diff] [blame] | 1980 | ret = rwc->rmap_one(page, vma, |
| 1981 | rmap_item->address, rwc->arg); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1982 | if (ret != SWAP_AGAIN) { |
Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1983 | anon_vma_unlock_read(anon_vma); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1984 | goto out; |
| 1985 | } |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 1986 | if (rwc->done && rwc->done(page)) { |
| 1987 | anon_vma_unlock_read(anon_vma); |
| 1988 | goto out; |
| 1989 | } |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1990 | } |
Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1991 | anon_vma_unlock_read(anon_vma); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1992 | } |
| 1993 | if (!search_new_forks++) |
| 1994 | goto again; |
| 1995 | out: |
| 1996 | return ret; |
| 1997 | } |
| 1998 | |
Joonsoo Kim | 5262950 | 2014-01-21 15:49:50 -0800 | [diff] [blame] | 1999 | #ifdef CONFIG_MIGRATION |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2000 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
| 2001 | { |
| 2002 | struct stable_node *stable_node; |
| 2003 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2004 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
| 2005 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
| 2006 | VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2007 | |
| 2008 | stable_node = page_stable_node(newpage); |
| 2009 | if (stable_node) { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2010 | VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2011 | stable_node->kpfn = page_to_pfn(newpage); |
Hugh Dickins | c8d6553 | 2013-02-22 16:35:10 -0800 | [diff] [blame] | 2012 | /* |
| 2013 | * newpage->mapping was set in advance; now we need smp_wmb() |
| 2014 | * to make sure that the new stable_node->kpfn is visible |
| 2015 | * to get_ksm_page() before it can see that oldpage->mapping |
| 2016 | * has gone stale (or that PageSwapCache has been cleared). |
| 2017 | */ |
| 2018 | smp_wmb(); |
| 2019 | set_page_stable_node(oldpage, NULL); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 2020 | } |
| 2021 | } |
| 2022 | #endif /* CONFIG_MIGRATION */ |
| 2023 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2024 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2025 | static void wait_while_offlining(void) |
| 2026 | { |
| 2027 | while (ksm_run & KSM_RUN_OFFLINE) { |
| 2028 | mutex_unlock(&ksm_thread_mutex); |
| 2029 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 2030 | TASK_UNINTERRUPTIBLE); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2031 | mutex_lock(&ksm_thread_mutex); |
| 2032 | } |
| 2033 | } |
| 2034 | |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2035 | static void ksm_check_stable_tree(unsigned long start_pfn, |
| 2036 | unsigned long end_pfn) |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2037 | { |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 2038 | struct stable_node *stable_node, *next; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2039 | struct rb_node *node; |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2040 | int nid; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2041 | |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2042 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
| 2043 | node = rb_first(root_stable_tree + nid); |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2044 | while (node) { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2045 | stable_node = rb_entry(node, struct stable_node, node); |
| 2046 | if (stable_node->kpfn >= start_pfn && |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2047 | stable_node->kpfn < end_pfn) { |
| 2048 | /* |
| 2049 | * Don't get_ksm_page, page has already gone: |
| 2050 | * which is why we keep kpfn instead of page* |
| 2051 | */ |
| 2052 | remove_node_from_stable_tree(stable_node); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2053 | node = rb_first(root_stable_tree + nid); |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2054 | } else |
| 2055 | node = rb_next(node); |
| 2056 | cond_resched(); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2057 | } |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2058 | } |
Geliang Tang | 0364041 | 2016-01-14 15:20:54 -0800 | [diff] [blame] | 2059 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
Hugh Dickins | 4146d2d | 2013-02-22 16:35:11 -0800 | [diff] [blame] | 2060 | if (stable_node->kpfn >= start_pfn && |
| 2061 | stable_node->kpfn < end_pfn) |
| 2062 | remove_node_from_stable_tree(stable_node); |
| 2063 | cond_resched(); |
| 2064 | } |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2065 | } |
| 2066 | |
| 2067 | static int ksm_memory_callback(struct notifier_block *self, |
| 2068 | unsigned long action, void *arg) |
| 2069 | { |
| 2070 | struct memory_notify *mn = arg; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2071 | |
| 2072 | switch (action) { |
| 2073 | case MEM_GOING_OFFLINE: |
| 2074 | /* |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2075 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
| 2076 | * and remove_all_stable_nodes() while memory is going offline: |
| 2077 | * it is unsafe for them to touch the stable tree at this time. |
| 2078 | * But unmerge_ksm_pages(), rmap lookups and other entry points |
| 2079 | * which do not need the ksm_thread_mutex are all safe. |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2080 | */ |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2081 | mutex_lock(&ksm_thread_mutex); |
| 2082 | ksm_run |= KSM_RUN_OFFLINE; |
| 2083 | mutex_unlock(&ksm_thread_mutex); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2084 | break; |
| 2085 | |
| 2086 | case MEM_OFFLINE: |
| 2087 | /* |
| 2088 | * Most of the work is done by page migration; but there might |
| 2089 | * be a few stable_nodes left over, still pointing to struct |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2090 | * pages which have been offlined: prune those from the tree, |
| 2091 | * otherwise get_ksm_page() might later try to access a |
| 2092 | * non-existent struct page. |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2093 | */ |
Hugh Dickins | ee0ea59 | 2013-02-22 16:35:05 -0800 | [diff] [blame] | 2094 | ksm_check_stable_tree(mn->start_pfn, |
| 2095 | mn->start_pfn + mn->nr_pages); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2096 | /* fallthrough */ |
| 2097 | |
| 2098 | case MEM_CANCEL_OFFLINE: |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2099 | mutex_lock(&ksm_thread_mutex); |
| 2100 | ksm_run &= ~KSM_RUN_OFFLINE; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2101 | mutex_unlock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2102 | |
| 2103 | smp_mb(); /* wake_up_bit advises this */ |
| 2104 | wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2105 | break; |
| 2106 | } |
| 2107 | return NOTIFY_OK; |
| 2108 | } |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2109 | #else |
| 2110 | static void wait_while_offlining(void) |
| 2111 | { |
| 2112 | } |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2113 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 2114 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2115 | #ifdef CONFIG_SYSFS |
| 2116 | /* |
| 2117 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |
| 2118 | */ |
| 2119 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2120 | #define KSM_ATTR_RO(_name) \ |
| 2121 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| 2122 | #define KSM_ATTR(_name) \ |
| 2123 | static struct kobj_attribute _name##_attr = \ |
| 2124 | __ATTR(_name, 0644, _name##_show, _name##_store) |
| 2125 | |
| 2126 | static ssize_t sleep_millisecs_show(struct kobject *kobj, |
| 2127 | struct kobj_attribute *attr, char *buf) |
| 2128 | { |
| 2129 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); |
| 2130 | } |
| 2131 | |
| 2132 | static ssize_t sleep_millisecs_store(struct kobject *kobj, |
| 2133 | struct kobj_attribute *attr, |
| 2134 | const char *buf, size_t count) |
| 2135 | { |
| 2136 | unsigned long msecs; |
| 2137 | int err; |
| 2138 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 2139 | err = kstrtoul(buf, 10, &msecs); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2140 | if (err || msecs > UINT_MAX) |
| 2141 | return -EINVAL; |
| 2142 | |
| 2143 | ksm_thread_sleep_millisecs = msecs; |
| 2144 | |
| 2145 | return count; |
| 2146 | } |
| 2147 | KSM_ATTR(sleep_millisecs); |
| 2148 | |
| 2149 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
| 2150 | struct kobj_attribute *attr, char *buf) |
| 2151 | { |
| 2152 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); |
| 2153 | } |
| 2154 | |
| 2155 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
| 2156 | struct kobj_attribute *attr, |
| 2157 | const char *buf, size_t count) |
| 2158 | { |
| 2159 | int err; |
| 2160 | unsigned long nr_pages; |
| 2161 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 2162 | err = kstrtoul(buf, 10, &nr_pages); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2163 | if (err || nr_pages > UINT_MAX) |
| 2164 | return -EINVAL; |
| 2165 | |
| 2166 | ksm_thread_pages_to_scan = nr_pages; |
| 2167 | |
| 2168 | return count; |
| 2169 | } |
| 2170 | KSM_ATTR(pages_to_scan); |
| 2171 | |
| 2172 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, |
| 2173 | char *buf) |
| 2174 | { |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2175 | return sprintf(buf, "%lu\n", ksm_run); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2176 | } |
| 2177 | |
| 2178 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, |
| 2179 | const char *buf, size_t count) |
| 2180 | { |
| 2181 | int err; |
| 2182 | unsigned long flags; |
| 2183 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 2184 | err = kstrtoul(buf, 10, &flags); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2185 | if (err || flags > UINT_MAX) |
| 2186 | return -EINVAL; |
| 2187 | if (flags > KSM_RUN_UNMERGE) |
| 2188 | return -EINVAL; |
| 2189 | |
| 2190 | /* |
| 2191 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. |
| 2192 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, |
Hugh Dickins | d0f209f | 2009-12-14 17:59:34 -0800 | [diff] [blame] | 2193 | * breaking COW to free the pages_shared (but leaves mm_slots |
| 2194 | * on the list for when ksmd may be set running again). |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2195 | */ |
| 2196 | |
| 2197 | mutex_lock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2198 | wait_while_offlining(); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2199 | if (ksm_run != flags) { |
| 2200 | ksm_run = flags; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2201 | if (flags & KSM_RUN_UNMERGE) { |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 2202 | set_current_oom_origin(); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2203 | err = unmerge_and_remove_all_rmap_items(); |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 2204 | clear_current_oom_origin(); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 2205 | if (err) { |
| 2206 | ksm_run = KSM_RUN_STOP; |
| 2207 | count = err; |
| 2208 | } |
| 2209 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2210 | } |
| 2211 | mutex_unlock(&ksm_thread_mutex); |
| 2212 | |
| 2213 | if (flags & KSM_RUN_MERGE) |
| 2214 | wake_up_interruptible(&ksm_thread_wait); |
| 2215 | |
| 2216 | return count; |
| 2217 | } |
| 2218 | KSM_ATTR(run); |
| 2219 | |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2220 | #ifdef CONFIG_NUMA |
| 2221 | static ssize_t merge_across_nodes_show(struct kobject *kobj, |
| 2222 | struct kobj_attribute *attr, char *buf) |
| 2223 | { |
| 2224 | return sprintf(buf, "%u\n", ksm_merge_across_nodes); |
| 2225 | } |
| 2226 | |
| 2227 | static ssize_t merge_across_nodes_store(struct kobject *kobj, |
| 2228 | struct kobj_attribute *attr, |
| 2229 | const char *buf, size_t count) |
| 2230 | { |
| 2231 | int err; |
| 2232 | unsigned long knob; |
| 2233 | |
| 2234 | err = kstrtoul(buf, 10, &knob); |
| 2235 | if (err) |
| 2236 | return err; |
| 2237 | if (knob > 1) |
| 2238 | return -EINVAL; |
| 2239 | |
| 2240 | mutex_lock(&ksm_thread_mutex); |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2241 | wait_while_offlining(); |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2242 | if (ksm_merge_across_nodes != knob) { |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2243 | if (ksm_pages_shared || remove_all_stable_nodes()) |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2244 | err = -EBUSY; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2245 | else if (root_stable_tree == one_stable_tree) { |
| 2246 | struct rb_root *buf; |
| 2247 | /* |
| 2248 | * This is the first time that we switch away from the |
| 2249 | * default of merging across nodes: must now allocate |
| 2250 | * a buffer to hold as many roots as may be needed. |
| 2251 | * Allocate stable and unstable together: |
| 2252 | * MAXSMP NODES_SHIFT 10 will use 16kB. |
| 2253 | */ |
Joe Perches | bafe1e1 | 2013-11-12 15:07:10 -0800 | [diff] [blame] | 2254 | buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), |
| 2255 | GFP_KERNEL); |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2256 | /* Let us assume that RB_ROOT is NULL is zero */ |
| 2257 | if (!buf) |
| 2258 | err = -ENOMEM; |
| 2259 | else { |
| 2260 | root_stable_tree = buf; |
| 2261 | root_unstable_tree = buf + nr_node_ids; |
| 2262 | /* Stable tree is empty but not the unstable */ |
| 2263 | root_unstable_tree[0] = one_unstable_tree[0]; |
| 2264 | } |
| 2265 | } |
| 2266 | if (!err) { |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2267 | ksm_merge_across_nodes = knob; |
Hugh Dickins | ef53d16 | 2013-02-22 16:36:12 -0800 | [diff] [blame] | 2268 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
| 2269 | } |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2270 | } |
| 2271 | mutex_unlock(&ksm_thread_mutex); |
| 2272 | |
| 2273 | return err ? err : count; |
| 2274 | } |
| 2275 | KSM_ATTR(merge_across_nodes); |
| 2276 | #endif |
| 2277 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 2278 | static ssize_t use_zero_pages_show(struct kobject *kobj, |
| 2279 | struct kobj_attribute *attr, char *buf) |
| 2280 | { |
| 2281 | return sprintf(buf, "%u\n", ksm_use_zero_pages); |
| 2282 | } |
| 2283 | static ssize_t use_zero_pages_store(struct kobject *kobj, |
| 2284 | struct kobj_attribute *attr, |
| 2285 | const char *buf, size_t count) |
| 2286 | { |
| 2287 | int err; |
| 2288 | bool value; |
| 2289 | |
| 2290 | err = kstrtobool(buf, &value); |
| 2291 | if (err) |
| 2292 | return -EINVAL; |
| 2293 | |
| 2294 | ksm_use_zero_pages = value; |
| 2295 | |
| 2296 | return count; |
| 2297 | } |
| 2298 | KSM_ATTR(use_zero_pages); |
| 2299 | |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 2300 | static ssize_t pages_shared_show(struct kobject *kobj, |
| 2301 | struct kobj_attribute *attr, char *buf) |
| 2302 | { |
| 2303 | return sprintf(buf, "%lu\n", ksm_pages_shared); |
| 2304 | } |
| 2305 | KSM_ATTR_RO(pages_shared); |
| 2306 | |
| 2307 | static ssize_t pages_sharing_show(struct kobject *kobj, |
| 2308 | struct kobj_attribute *attr, char *buf) |
| 2309 | { |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 2310 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 2311 | } |
| 2312 | KSM_ATTR_RO(pages_sharing); |
| 2313 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 2314 | static ssize_t pages_unshared_show(struct kobject *kobj, |
| 2315 | struct kobj_attribute *attr, char *buf) |
| 2316 | { |
| 2317 | return sprintf(buf, "%lu\n", ksm_pages_unshared); |
| 2318 | } |
| 2319 | KSM_ATTR_RO(pages_unshared); |
| 2320 | |
| 2321 | static ssize_t pages_volatile_show(struct kobject *kobj, |
| 2322 | struct kobj_attribute *attr, char *buf) |
| 2323 | { |
| 2324 | long ksm_pages_volatile; |
| 2325 | |
| 2326 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared |
| 2327 | - ksm_pages_sharing - ksm_pages_unshared; |
| 2328 | /* |
| 2329 | * It was not worth any locking to calculate that statistic, |
| 2330 | * but it might therefore sometimes be negative: conceal that. |
| 2331 | */ |
| 2332 | if (ksm_pages_volatile < 0) |
| 2333 | ksm_pages_volatile = 0; |
| 2334 | return sprintf(buf, "%ld\n", ksm_pages_volatile); |
| 2335 | } |
| 2336 | KSM_ATTR_RO(pages_volatile); |
| 2337 | |
| 2338 | static ssize_t full_scans_show(struct kobject *kobj, |
| 2339 | struct kobj_attribute *attr, char *buf) |
| 2340 | { |
| 2341 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); |
| 2342 | } |
| 2343 | KSM_ATTR_RO(full_scans); |
| 2344 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2345 | static struct attribute *ksm_attrs[] = { |
| 2346 | &sleep_millisecs_attr.attr, |
| 2347 | &pages_to_scan_attr.attr, |
| 2348 | &run_attr.attr, |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 2349 | &pages_shared_attr.attr, |
| 2350 | &pages_sharing_attr.attr, |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 2351 | &pages_unshared_attr.attr, |
| 2352 | &pages_volatile_attr.attr, |
| 2353 | &full_scans_attr.attr, |
Petr Holasek | 90bd6fd | 2013-02-22 16:35:00 -0800 | [diff] [blame] | 2354 | #ifdef CONFIG_NUMA |
| 2355 | &merge_across_nodes_attr.attr, |
| 2356 | #endif |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 2357 | &use_zero_pages_attr.attr, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2358 | NULL, |
| 2359 | }; |
| 2360 | |
| 2361 | static struct attribute_group ksm_attr_group = { |
| 2362 | .attrs = ksm_attrs, |
| 2363 | .name = "ksm", |
| 2364 | }; |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2365 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2366 | |
| 2367 | static int __init ksm_init(void) |
| 2368 | { |
| 2369 | struct task_struct *ksm_thread; |
| 2370 | int err; |
| 2371 | |
Claudio Imbrenda | e86c59b | 2017-02-24 14:55:39 -0800 | [diff] [blame] | 2372 | /* The correct value depends on page size and endianness */ |
| 2373 | zero_checksum = calc_checksum(ZERO_PAGE(0)); |
| 2374 | /* Default to false for backwards compatibility */ |
| 2375 | ksm_use_zero_pages = false; |
| 2376 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2377 | err = ksm_slab_init(); |
| 2378 | if (err) |
| 2379 | goto out; |
| 2380 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2381 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
| 2382 | if (IS_ERR(ksm_thread)) { |
Paul McQuade | 25acde3 | 2014-10-09 15:29:09 -0700 | [diff] [blame] | 2383 | pr_err("ksm: creating kthread failed\n"); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2384 | err = PTR_ERR(ksm_thread); |
Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 2385 | goto out_free; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2386 | } |
| 2387 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2388 | #ifdef CONFIG_SYSFS |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2389 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
| 2390 | if (err) { |
Paul McQuade | 25acde3 | 2014-10-09 15:29:09 -0700 | [diff] [blame] | 2391 | pr_err("ksm: register sysfs failed\n"); |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2392 | kthread_stop(ksm_thread); |
Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 2393 | goto out_free; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2394 | } |
Hugh Dickins | c73602a | 2009-10-07 16:32:22 -0700 | [diff] [blame] | 2395 | #else |
| 2396 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ |
| 2397 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2398 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2399 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2400 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Hugh Dickins | ef4d43a | 2013-02-22 16:35:16 -0800 | [diff] [blame] | 2401 | /* There is no significance to this priority 100 */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2402 | hotplug_memory_notifier(ksm_memory_callback, 100); |
| 2403 | #endif |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2404 | return 0; |
| 2405 | |
Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 2406 | out_free: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2407 | ksm_slab_free(); |
| 2408 | out: |
| 2409 | return err; |
| 2410 | } |
Paul Gortmaker | a64fb3c | 2014-01-23 15:53:30 -0800 | [diff] [blame] | 2411 | subsys_initcall(ksm_init); |