Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2 | * Memory merging support. |
| 3 | * |
| 4 | * This code enables dynamic sharing of identical pages found in different |
| 5 | * memory areas, even if they are not shared by fork() |
| 6 | * |
Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 8 | * Authors: |
| 9 | * Izik Eidus |
| 10 | * Andrea Arcangeli |
| 11 | * Chris Wright |
Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 12 | * Hugh Dickins |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 13 | * |
| 14 | * This work is licensed under the terms of the GNU GPL, version 2. |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | #include <linux/errno.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 18 | #include <linux/mm.h> |
| 19 | #include <linux/fs.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 20 | #include <linux/mman.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 21 | #include <linux/sched.h> |
| 22 | #include <linux/rwsem.h> |
| 23 | #include <linux/pagemap.h> |
| 24 | #include <linux/rmap.h> |
| 25 | #include <linux/spinlock.h> |
| 26 | #include <linux/jhash.h> |
| 27 | #include <linux/delay.h> |
| 28 | #include <linux/kthread.h> |
| 29 | #include <linux/wait.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/rbtree.h> |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 32 | #include <linux/memory.h> |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 33 | #include <linux/mmu_notifier.h> |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 34 | #include <linux/swap.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 35 | #include <linux/ksm.h> |
| 36 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 37 | #include <asm/tlbflush.h> |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 38 | #include "internal.h" |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 39 | |
| 40 | /* |
| 41 | * A few notes about the KSM scanning process, |
| 42 | * to make it easier to understand the data structures below: |
| 43 | * |
| 44 | * In order to reduce excessive scanning, KSM sorts the memory pages by their |
| 45 | * contents into a data structure that holds pointers to the pages' locations. |
| 46 | * |
| 47 | * Since the contents of the pages may change at any moment, KSM cannot just |
| 48 | * insert the pages into a normal sorted tree and expect it to find anything. |
| 49 | * Therefore KSM uses two data structures - the stable and the unstable tree. |
| 50 | * |
| 51 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted |
| 52 | * by their contents. Because each such page is write-protected, searching on |
| 53 | * this tree is fully assured to be working (except when pages are unmapped), |
| 54 | * and therefore this tree is called the stable tree. |
| 55 | * |
| 56 | * In addition to the stable tree, KSM uses a second data structure called the |
| 57 | * unstable tree: this tree holds pointers to pages which have been found to |
| 58 | * be "unchanged for a period of time". The unstable tree sorts these pages |
| 59 | * by their contents, but since they are not write-protected, KSM cannot rely |
| 60 | * upon the unstable tree to work correctly - the unstable tree is liable to |
| 61 | * be corrupted as its contents are modified, and so it is called unstable. |
| 62 | * |
| 63 | * KSM solves this problem by several techniques: |
| 64 | * |
| 65 | * 1) The unstable tree is flushed every time KSM completes scanning all |
| 66 | * memory areas, and then the tree is rebuilt again from the beginning. |
| 67 | * 2) KSM will only insert into the unstable tree, pages whose hash value |
| 68 | * has not changed since the previous scan of all memory areas. |
| 69 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the |
| 70 | * colors of the nodes and not on their contents, assuring that even when |
| 71 | * the tree gets "corrupted" it won't get out of balance, so scanning time |
| 72 | * remains the same (also, searching and inserting nodes in an rbtree uses |
| 73 | * the same algorithm, so we have no overhead when we flush and rebuild). |
| 74 | * 4) KSM never flushes the stable tree, which means that even if it were to |
| 75 | * take 10 attempts to find a page in the unstable tree, once it is found, |
| 76 | * it is secured in the stable tree. (When we scan a new page, we first |
| 77 | * compare it against the stable tree, and then against the unstable tree.) |
| 78 | */ |
| 79 | |
| 80 | /** |
| 81 | * struct mm_slot - ksm information per mm that is being scanned |
| 82 | * @link: link to the mm_slots hash list |
| 83 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 84 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 85 | * @mm: the mm that this information is valid for |
| 86 | */ |
| 87 | struct mm_slot { |
| 88 | struct hlist_node link; |
| 89 | struct list_head mm_list; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 90 | struct rmap_item *rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 91 | struct mm_struct *mm; |
| 92 | }; |
| 93 | |
| 94 | /** |
| 95 | * struct ksm_scan - cursor for scanning |
| 96 | * @mm_slot: the current mm_slot we are scanning |
| 97 | * @address: the next address inside that to be scanned |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 98 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 99 | * @seqnr: count of completed full scans (needed when removing unstable node) |
| 100 | * |
| 101 | * There is only the one ksm_scan instance of this cursor structure. |
| 102 | */ |
| 103 | struct ksm_scan { |
| 104 | struct mm_slot *mm_slot; |
| 105 | unsigned long address; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 106 | struct rmap_item **rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 107 | unsigned long seqnr; |
| 108 | }; |
| 109 | |
| 110 | /** |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 111 | * struct stable_node - node of the stable rbtree |
| 112 | * @node: rb node of this ksm page in the stable tree |
| 113 | * @hlist: hlist head of rmap_items using this ksm page |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 114 | * @kpfn: page frame number of this ksm page |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 115 | */ |
| 116 | struct stable_node { |
| 117 | struct rb_node node; |
| 118 | struct hlist_head hlist; |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 119 | unsigned long kpfn; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 120 | }; |
| 121 | |
| 122 | /** |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 123 | * struct rmap_item - reverse mapping item for virtual addresses |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 124 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 125 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 126 | * @mm: the memory structure this rmap_item is pointing into |
| 127 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
| 128 | * @oldchecksum: previous checksum of the page at that virtual address |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 129 | * @node: rb node of this rmap_item in the unstable tree |
| 130 | * @head: pointer to stable_node heading this list in the stable tree |
| 131 | * @hlist: link into hlist of rmap_items hanging off that stable_node |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 132 | */ |
| 133 | struct rmap_item { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 134 | struct rmap_item *rmap_list; |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 135 | struct anon_vma *anon_vma; /* when stable */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 136 | struct mm_struct *mm; |
| 137 | unsigned long address; /* + low bits used for flags below */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 138 | unsigned int oldchecksum; /* when unstable */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 139 | union { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 140 | struct rb_node node; /* when node of unstable tree */ |
| 141 | struct { /* when listed from stable tree */ |
| 142 | struct stable_node *head; |
| 143 | struct hlist_node hlist; |
| 144 | }; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 145 | }; |
| 146 | }; |
| 147 | |
| 148 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 149 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
| 150 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 151 | |
| 152 | /* The stable and unstable tree heads */ |
| 153 | static struct rb_root root_stable_tree = RB_ROOT; |
| 154 | static struct rb_root root_unstable_tree = RB_ROOT; |
| 155 | |
| 156 | #define MM_SLOTS_HASH_HEADS 1024 |
| 157 | static struct hlist_head *mm_slots_hash; |
| 158 | |
| 159 | static struct mm_slot ksm_mm_head = { |
| 160 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), |
| 161 | }; |
| 162 | static struct ksm_scan ksm_scan = { |
| 163 | .mm_slot = &ksm_mm_head, |
| 164 | }; |
| 165 | |
| 166 | static struct kmem_cache *rmap_item_cache; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 167 | static struct kmem_cache *stable_node_cache; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 168 | static struct kmem_cache *mm_slot_cache; |
| 169 | |
| 170 | /* The number of nodes in the stable tree */ |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 171 | static unsigned long ksm_pages_shared; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 172 | |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 173 | /* The number of page slots additionally sharing those nodes */ |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 174 | static unsigned long ksm_pages_sharing; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 175 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 176 | /* The number of nodes in the unstable tree */ |
| 177 | static unsigned long ksm_pages_unshared; |
| 178 | |
| 179 | /* The number of rmap_items in use: to calculate pages_volatile */ |
| 180 | static unsigned long ksm_rmap_items; |
| 181 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 182 | /* Limit on the number of unswappable pages used */ |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 183 | static unsigned long ksm_max_kernel_pages; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 184 | |
| 185 | /* Number of pages ksmd should scan in one batch */ |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 186 | static unsigned int ksm_thread_pages_to_scan = 100; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 187 | |
| 188 | /* Milliseconds ksmd should sleep between batches */ |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 189 | static unsigned int ksm_thread_sleep_millisecs = 20; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 190 | |
| 191 | #define KSM_RUN_STOP 0 |
| 192 | #define KSM_RUN_MERGE 1 |
| 193 | #define KSM_RUN_UNMERGE 2 |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 194 | static unsigned int ksm_run = KSM_RUN_STOP; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 195 | |
| 196 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
| 197 | static DEFINE_MUTEX(ksm_thread_mutex); |
| 198 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
| 199 | |
| 200 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ |
| 201 | sizeof(struct __struct), __alignof__(struct __struct),\ |
| 202 | (__flags), NULL) |
| 203 | |
| 204 | static int __init ksm_slab_init(void) |
| 205 | { |
| 206 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); |
| 207 | if (!rmap_item_cache) |
| 208 | goto out; |
| 209 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 210 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
| 211 | if (!stable_node_cache) |
| 212 | goto out_free1; |
| 213 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 214 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
| 215 | if (!mm_slot_cache) |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 216 | goto out_free2; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 217 | |
| 218 | return 0; |
| 219 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 220 | out_free2: |
| 221 | kmem_cache_destroy(stable_node_cache); |
| 222 | out_free1: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 223 | kmem_cache_destroy(rmap_item_cache); |
| 224 | out: |
| 225 | return -ENOMEM; |
| 226 | } |
| 227 | |
| 228 | static void __init ksm_slab_free(void) |
| 229 | { |
| 230 | kmem_cache_destroy(mm_slot_cache); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 231 | kmem_cache_destroy(stable_node_cache); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 232 | kmem_cache_destroy(rmap_item_cache); |
| 233 | mm_slot_cache = NULL; |
| 234 | } |
| 235 | |
| 236 | static inline struct rmap_item *alloc_rmap_item(void) |
| 237 | { |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 238 | struct rmap_item *rmap_item; |
| 239 | |
| 240 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); |
| 241 | if (rmap_item) |
| 242 | ksm_rmap_items++; |
| 243 | return rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | static inline void free_rmap_item(struct rmap_item *rmap_item) |
| 247 | { |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 248 | ksm_rmap_items--; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 249 | rmap_item->mm = NULL; /* debug safety */ |
| 250 | kmem_cache_free(rmap_item_cache, rmap_item); |
| 251 | } |
| 252 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 253 | static inline struct stable_node *alloc_stable_node(void) |
| 254 | { |
| 255 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); |
| 256 | } |
| 257 | |
| 258 | static inline void free_stable_node(struct stable_node *stable_node) |
| 259 | { |
| 260 | kmem_cache_free(stable_node_cache, stable_node); |
| 261 | } |
| 262 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 263 | static inline struct mm_slot *alloc_mm_slot(void) |
| 264 | { |
| 265 | if (!mm_slot_cache) /* initialization failed */ |
| 266 | return NULL; |
| 267 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); |
| 268 | } |
| 269 | |
| 270 | static inline void free_mm_slot(struct mm_slot *mm_slot) |
| 271 | { |
| 272 | kmem_cache_free(mm_slot_cache, mm_slot); |
| 273 | } |
| 274 | |
| 275 | static int __init mm_slots_hash_init(void) |
| 276 | { |
| 277 | mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), |
| 278 | GFP_KERNEL); |
| 279 | if (!mm_slots_hash) |
| 280 | return -ENOMEM; |
| 281 | return 0; |
| 282 | } |
| 283 | |
| 284 | static void __init mm_slots_hash_free(void) |
| 285 | { |
| 286 | kfree(mm_slots_hash); |
| 287 | } |
| 288 | |
| 289 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
| 290 | { |
| 291 | struct mm_slot *mm_slot; |
| 292 | struct hlist_head *bucket; |
| 293 | struct hlist_node *node; |
| 294 | |
| 295 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) |
| 296 | % MM_SLOTS_HASH_HEADS]; |
| 297 | hlist_for_each_entry(mm_slot, node, bucket, link) { |
| 298 | if (mm == mm_slot->mm) |
| 299 | return mm_slot; |
| 300 | } |
| 301 | return NULL; |
| 302 | } |
| 303 | |
| 304 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
| 305 | struct mm_slot *mm_slot) |
| 306 | { |
| 307 | struct hlist_head *bucket; |
| 308 | |
| 309 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) |
| 310 | % MM_SLOTS_HASH_HEADS]; |
| 311 | mm_slot->mm = mm; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 312 | hlist_add_head(&mm_slot->link, bucket); |
| 313 | } |
| 314 | |
| 315 | static inline int in_stable_tree(struct rmap_item *rmap_item) |
| 316 | { |
| 317 | return rmap_item->address & STABLE_FLAG; |
| 318 | } |
| 319 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 320 | static void hold_anon_vma(struct rmap_item *rmap_item, |
| 321 | struct anon_vma *anon_vma) |
| 322 | { |
| 323 | rmap_item->anon_vma = anon_vma; |
| 324 | atomic_inc(&anon_vma->ksm_refcount); |
| 325 | } |
| 326 | |
| 327 | static void drop_anon_vma(struct rmap_item *rmap_item) |
| 328 | { |
| 329 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
| 330 | |
| 331 | if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) { |
| 332 | int empty = list_empty(&anon_vma->head); |
| 333 | spin_unlock(&anon_vma->lock); |
| 334 | if (empty) |
| 335 | anon_vma_free(anon_vma); |
| 336 | } |
| 337 | } |
| 338 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 339 | /* |
Hugh Dickins | a913e18 | 2009-09-21 17:02:26 -0700 | [diff] [blame] | 340 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's |
| 341 | * page tables after it has passed through ksm_exit() - which, if necessary, |
| 342 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set |
| 343 | * a special flag: they can just back out as soon as mm_users goes to zero. |
| 344 | * ksm_test_exit() is used throughout to make this test for exit: in some |
| 345 | * places for correctness, in some places just to avoid unnecessary work. |
| 346 | */ |
| 347 | static inline bool ksm_test_exit(struct mm_struct *mm) |
| 348 | { |
| 349 | return atomic_read(&mm->mm_users) == 0; |
| 350 | } |
| 351 | |
| 352 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 353 | * We use break_ksm to break COW on a ksm page: it's a stripped down |
| 354 | * |
| 355 | * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) |
| 356 | * put_page(page); |
| 357 | * |
| 358 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, |
| 359 | * in case the application has unmapped and remapped mm,addr meanwhile. |
| 360 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP |
| 361 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. |
| 362 | */ |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 363 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 364 | { |
| 365 | struct page *page; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 366 | int ret = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 367 | |
| 368 | do { |
| 369 | cond_resched(); |
| 370 | page = follow_page(vma, addr, FOLL_GET); |
| 371 | if (!page) |
| 372 | break; |
| 373 | if (PageKsm(page)) |
| 374 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
| 375 | FAULT_FLAG_WRITE); |
| 376 | else |
| 377 | ret = VM_FAULT_WRITE; |
| 378 | put_page(page); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 379 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); |
| 380 | /* |
| 381 | * We must loop because handle_mm_fault() may back out if there's |
| 382 | * any difficulty e.g. if pte accessed bit gets updated concurrently. |
| 383 | * |
| 384 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that |
| 385 | * COW has been broken, even if the vma does not permit VM_WRITE; |
| 386 | * but note that a concurrent fault might break PageKsm for us. |
| 387 | * |
| 388 | * VM_FAULT_SIGBUS could occur if we race with truncation of the |
| 389 | * backing file, which also invalidates anonymous pages: that's |
| 390 | * okay, that truncation will have unmapped the PageKsm for us. |
| 391 | * |
| 392 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting |
| 393 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the |
| 394 | * current task has TIF_MEMDIE set, and will be OOM killed on return |
| 395 | * to user; and ksmd, having no mm, would never be chosen for that. |
| 396 | * |
| 397 | * But if the mm is in a limited mem_cgroup, then the fault may fail |
| 398 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and |
| 399 | * even ksmd can fail in this way - though it's usually breaking ksm |
| 400 | * just to undo a merge it made a moment before, so unlikely to oom. |
| 401 | * |
| 402 | * That's a pity: we might therefore have more kernel pages allocated |
| 403 | * than we're counting as nodes in the stable tree; but ksm_do_scan |
| 404 | * will retry to break_cow on each pass, so should recover the page |
| 405 | * in due course. The important thing is to not let VM_MERGEABLE |
| 406 | * be cleared while any such pages might remain in the area. |
| 407 | */ |
| 408 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 409 | } |
| 410 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 411 | static void break_cow(struct rmap_item *rmap_item) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 412 | { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 413 | struct mm_struct *mm = rmap_item->mm; |
| 414 | unsigned long addr = rmap_item->address; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 415 | struct vm_area_struct *vma; |
| 416 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 417 | /* |
| 418 | * It is not an accident that whenever we want to break COW |
| 419 | * to undo, we also need to drop a reference to the anon_vma. |
| 420 | */ |
| 421 | drop_anon_vma(rmap_item); |
| 422 | |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 423 | down_read(&mm->mmap_sem); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 424 | if (ksm_test_exit(mm)) |
| 425 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 426 | vma = find_vma(mm, addr); |
| 427 | if (!vma || vma->vm_start > addr) |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 428 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 429 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 430 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 431 | break_ksm(vma, addr); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 432 | out: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 433 | up_read(&mm->mmap_sem); |
| 434 | } |
| 435 | |
| 436 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) |
| 437 | { |
| 438 | struct mm_struct *mm = rmap_item->mm; |
| 439 | unsigned long addr = rmap_item->address; |
| 440 | struct vm_area_struct *vma; |
| 441 | struct page *page; |
| 442 | |
| 443 | down_read(&mm->mmap_sem); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 444 | if (ksm_test_exit(mm)) |
| 445 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 446 | vma = find_vma(mm, addr); |
| 447 | if (!vma || vma->vm_start > addr) |
| 448 | goto out; |
| 449 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
| 450 | goto out; |
| 451 | |
| 452 | page = follow_page(vma, addr, FOLL_GET); |
| 453 | if (!page) |
| 454 | goto out; |
| 455 | if (PageAnon(page)) { |
| 456 | flush_anon_page(vma, page, addr); |
| 457 | flush_dcache_page(page); |
| 458 | } else { |
| 459 | put_page(page); |
| 460 | out: page = NULL; |
| 461 | } |
| 462 | up_read(&mm->mmap_sem); |
| 463 | return page; |
| 464 | } |
| 465 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 466 | static void remove_node_from_stable_tree(struct stable_node *stable_node) |
| 467 | { |
| 468 | struct rmap_item *rmap_item; |
| 469 | struct hlist_node *hlist; |
| 470 | |
| 471 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
| 472 | if (rmap_item->hlist.next) |
| 473 | ksm_pages_sharing--; |
| 474 | else |
| 475 | ksm_pages_shared--; |
| 476 | drop_anon_vma(rmap_item); |
| 477 | rmap_item->address &= PAGE_MASK; |
| 478 | cond_resched(); |
| 479 | } |
| 480 | |
| 481 | rb_erase(&stable_node->node, &root_stable_tree); |
| 482 | free_stable_node(stable_node); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * get_ksm_page: checks if the page indicated by the stable node |
| 487 | * is still its ksm page, despite having held no reference to it. |
| 488 | * In which case we can trust the content of the page, and it |
| 489 | * returns the gotten page; but if the page has now been zapped, |
| 490 | * remove the stale node from the stable tree and return NULL. |
| 491 | * |
| 492 | * You would expect the stable_node to hold a reference to the ksm page. |
| 493 | * But if it increments the page's count, swapping out has to wait for |
| 494 | * ksmd to come around again before it can free the page, which may take |
| 495 | * seconds or even minutes: much too unresponsive. So instead we use a |
| 496 | * "keyhole reference": access to the ksm page from the stable node peeps |
| 497 | * out through its keyhole to see if that page still holds the right key, |
| 498 | * pointing back to this stable node. This relies on freeing a PageAnon |
| 499 | * page to reset its page->mapping to NULL, and relies on no other use of |
| 500 | * a page to put something that might look like our key in page->mapping. |
| 501 | * |
| 502 | * include/linux/pagemap.h page_cache_get_speculative() is a good reference, |
| 503 | * but this is different - made simpler by ksm_thread_mutex being held, but |
| 504 | * interesting for assuming that no other use of the struct page could ever |
| 505 | * put our expected_mapping into page->mapping (or a field of the union which |
| 506 | * coincides with page->mapping). The RCU calls are not for KSM at all, but |
| 507 | * to keep the page_count protocol described with page_cache_get_speculative. |
| 508 | * |
| 509 | * Note: it is possible that get_ksm_page() will return NULL one moment, |
| 510 | * then page the next, if the page is in between page_freeze_refs() and |
| 511 | * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page |
| 512 | * is on its way to being freed; but it is an anomaly to bear in mind. |
| 513 | */ |
| 514 | static struct page *get_ksm_page(struct stable_node *stable_node) |
| 515 | { |
| 516 | struct page *page; |
| 517 | void *expected_mapping; |
| 518 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 519 | page = pfn_to_page(stable_node->kpfn); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 520 | expected_mapping = (void *)stable_node + |
| 521 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); |
| 522 | rcu_read_lock(); |
| 523 | if (page->mapping != expected_mapping) |
| 524 | goto stale; |
| 525 | if (!get_page_unless_zero(page)) |
| 526 | goto stale; |
| 527 | if (page->mapping != expected_mapping) { |
| 528 | put_page(page); |
| 529 | goto stale; |
| 530 | } |
| 531 | rcu_read_unlock(); |
| 532 | return page; |
| 533 | stale: |
| 534 | rcu_read_unlock(); |
| 535 | remove_node_from_stable_tree(stable_node); |
| 536 | return NULL; |
| 537 | } |
| 538 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 539 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 540 | * Removing rmap_item from stable or unstable tree. |
| 541 | * This function will clean the information from the stable/unstable tree. |
| 542 | */ |
| 543 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) |
| 544 | { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 545 | if (rmap_item->address & STABLE_FLAG) { |
| 546 | struct stable_node *stable_node; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 547 | struct page *page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 548 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 549 | stable_node = rmap_item->head; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 550 | page = get_ksm_page(stable_node); |
| 551 | if (!page) |
| 552 | goto out; |
| 553 | |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 554 | lock_page(page); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 555 | hlist_del(&rmap_item->hlist); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 556 | unlock_page(page); |
| 557 | put_page(page); |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 558 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 559 | if (stable_node->hlist.first) |
| 560 | ksm_pages_sharing--; |
| 561 | else |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 562 | ksm_pages_shared--; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 563 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 564 | drop_anon_vma(rmap_item); |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 565 | rmap_item->address &= PAGE_MASK; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 566 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 567 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 568 | unsigned char age; |
| 569 | /* |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 570 | * Usually ksmd can and must skip the rb_erase, because |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 571 | * root_unstable_tree was already reset to RB_ROOT. |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 572 | * But be careful when an mm is exiting: do the rb_erase |
| 573 | * if this rmap_item was inserted by this scan, rather |
| 574 | * than left over from before. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 575 | */ |
| 576 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 577 | BUG_ON(age > 1); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 578 | if (!age) |
| 579 | rb_erase(&rmap_item->node, &root_unstable_tree); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 580 | |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 581 | ksm_pages_unshared--; |
| 582 | rmap_item->address &= PAGE_MASK; |
| 583 | } |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 584 | out: |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 585 | cond_resched(); /* we're called from many long loops */ |
| 586 | } |
| 587 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 588 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 589 | struct rmap_item **rmap_list) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 590 | { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 591 | while (*rmap_list) { |
| 592 | struct rmap_item *rmap_item = *rmap_list; |
| 593 | *rmap_list = rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 594 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 595 | free_rmap_item(rmap_item); |
| 596 | } |
| 597 | } |
| 598 | |
| 599 | /* |
| 600 | * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather |
| 601 | * than check every pte of a given vma, the locking doesn't quite work for |
| 602 | * that - an rmap_item is assigned to the stable tree after inserting ksm |
| 603 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing |
| 604 | * rmap_items from parent to child at fork time (so as not to waste time |
| 605 | * if exit comes before the next scan reaches it). |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 606 | * |
| 607 | * Similarly, although we'd like to remove rmap_items (so updating counts |
| 608 | * and freeing memory) when unmerging an area, it's easier to leave that |
| 609 | * to the next pass of ksmd - consider, for example, how ksmd might be |
| 610 | * in cmp_and_merge_page on one of the rmap_items we would be removing. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 611 | */ |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 612 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
| 613 | unsigned long start, unsigned long end) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 614 | { |
| 615 | unsigned long addr; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 616 | int err = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 617 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 618 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 619 | if (ksm_test_exit(vma->vm_mm)) |
| 620 | break; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 621 | if (signal_pending(current)) |
| 622 | err = -ERESTARTSYS; |
| 623 | else |
| 624 | err = break_ksm(vma, addr); |
| 625 | } |
| 626 | return err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 627 | } |
| 628 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 629 | #ifdef CONFIG_SYSFS |
| 630 | /* |
| 631 | * Only called through the sysfs control interface: |
| 632 | */ |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 633 | static int unmerge_and_remove_all_rmap_items(void) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 634 | { |
| 635 | struct mm_slot *mm_slot; |
| 636 | struct mm_struct *mm; |
| 637 | struct vm_area_struct *vma; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 638 | int err = 0; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 639 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 640 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 641 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 642 | struct mm_slot, mm_list); |
| 643 | spin_unlock(&ksm_mmlist_lock); |
| 644 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 645 | for (mm_slot = ksm_scan.mm_slot; |
| 646 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 647 | mm = mm_slot->mm; |
| 648 | down_read(&mm->mmap_sem); |
| 649 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 650 | if (ksm_test_exit(mm)) |
| 651 | break; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 652 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
| 653 | continue; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 654 | err = unmerge_ksm_pages(vma, |
| 655 | vma->vm_start, vma->vm_end); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 656 | if (err) |
| 657 | goto error; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 658 | } |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 659 | |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 660 | remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 661 | |
| 662 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 663 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 664 | struct mm_slot, mm_list); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 665 | if (ksm_test_exit(mm)) { |
| 666 | hlist_del(&mm_slot->link); |
| 667 | list_del(&mm_slot->mm_list); |
| 668 | spin_unlock(&ksm_mmlist_lock); |
| 669 | |
| 670 | free_mm_slot(mm_slot); |
| 671 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
| 672 | up_read(&mm->mmap_sem); |
| 673 | mmdrop(mm); |
| 674 | } else { |
| 675 | spin_unlock(&ksm_mmlist_lock); |
| 676 | up_read(&mm->mmap_sem); |
| 677 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 678 | } |
| 679 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 680 | ksm_scan.seqnr = 0; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 681 | return 0; |
| 682 | |
| 683 | error: |
| 684 | up_read(&mm->mmap_sem); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 685 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 686 | ksm_scan.mm_slot = &ksm_mm_head; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 687 | spin_unlock(&ksm_mmlist_lock); |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 688 | return err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 689 | } |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 690 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 691 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 692 | static u32 calc_checksum(struct page *page) |
| 693 | { |
| 694 | u32 checksum; |
| 695 | void *addr = kmap_atomic(page, KM_USER0); |
| 696 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); |
| 697 | kunmap_atomic(addr, KM_USER0); |
| 698 | return checksum; |
| 699 | } |
| 700 | |
| 701 | static int memcmp_pages(struct page *page1, struct page *page2) |
| 702 | { |
| 703 | char *addr1, *addr2; |
| 704 | int ret; |
| 705 | |
| 706 | addr1 = kmap_atomic(page1, KM_USER0); |
| 707 | addr2 = kmap_atomic(page2, KM_USER1); |
| 708 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
| 709 | kunmap_atomic(addr2, KM_USER1); |
| 710 | kunmap_atomic(addr1, KM_USER0); |
| 711 | return ret; |
| 712 | } |
| 713 | |
| 714 | static inline int pages_identical(struct page *page1, struct page *page2) |
| 715 | { |
| 716 | return !memcmp_pages(page1, page2); |
| 717 | } |
| 718 | |
| 719 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, |
| 720 | pte_t *orig_pte) |
| 721 | { |
| 722 | struct mm_struct *mm = vma->vm_mm; |
| 723 | unsigned long addr; |
| 724 | pte_t *ptep; |
| 725 | spinlock_t *ptl; |
| 726 | int swapped; |
| 727 | int err = -EFAULT; |
| 728 | |
| 729 | addr = page_address_in_vma(page, vma); |
| 730 | if (addr == -EFAULT) |
| 731 | goto out; |
| 732 | |
| 733 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
| 734 | if (!ptep) |
| 735 | goto out; |
| 736 | |
| 737 | if (pte_write(*ptep)) { |
| 738 | pte_t entry; |
| 739 | |
| 740 | swapped = PageSwapCache(page); |
| 741 | flush_cache_page(vma, addr, page_to_pfn(page)); |
| 742 | /* |
| 743 | * Ok this is tricky, when get_user_pages_fast() run it doesnt |
| 744 | * take any lock, therefore the check that we are going to make |
| 745 | * with the pagecount against the mapcount is racey and |
| 746 | * O_DIRECT can happen right after the check. |
| 747 | * So we clear the pte and flush the tlb before the check |
| 748 | * this assure us that no O_DIRECT can happen after the check |
| 749 | * or in the middle of the check. |
| 750 | */ |
| 751 | entry = ptep_clear_flush(vma, addr, ptep); |
| 752 | /* |
| 753 | * Check that no O_DIRECT or similar I/O is in progress on the |
| 754 | * page |
| 755 | */ |
Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 756 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 757 | set_pte_at_notify(mm, addr, ptep, entry); |
| 758 | goto out_unlock; |
| 759 | } |
| 760 | entry = pte_wrprotect(entry); |
| 761 | set_pte_at_notify(mm, addr, ptep, entry); |
| 762 | } |
| 763 | *orig_pte = *ptep; |
| 764 | err = 0; |
| 765 | |
| 766 | out_unlock: |
| 767 | pte_unmap_unlock(ptep, ptl); |
| 768 | out: |
| 769 | return err; |
| 770 | } |
| 771 | |
| 772 | /** |
| 773 | * replace_page - replace page in vma by new ksm page |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 774 | * @vma: vma that holds the pte pointing to page |
| 775 | * @page: the page we are replacing by kpage |
| 776 | * @kpage: the ksm page we replace page by |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 777 | * @orig_pte: the original value of the pte |
| 778 | * |
| 779 | * Returns 0 on success, -EFAULT on failure. |
| 780 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 781 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
| 782 | struct page *kpage, pte_t orig_pte) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 783 | { |
| 784 | struct mm_struct *mm = vma->vm_mm; |
| 785 | pgd_t *pgd; |
| 786 | pud_t *pud; |
| 787 | pmd_t *pmd; |
| 788 | pte_t *ptep; |
| 789 | spinlock_t *ptl; |
| 790 | unsigned long addr; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 791 | int err = -EFAULT; |
| 792 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 793 | addr = page_address_in_vma(page, vma); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 794 | if (addr == -EFAULT) |
| 795 | goto out; |
| 796 | |
| 797 | pgd = pgd_offset(mm, addr); |
| 798 | if (!pgd_present(*pgd)) |
| 799 | goto out; |
| 800 | |
| 801 | pud = pud_offset(pgd, addr); |
| 802 | if (!pud_present(*pud)) |
| 803 | goto out; |
| 804 | |
| 805 | pmd = pmd_offset(pud, addr); |
| 806 | if (!pmd_present(*pmd)) |
| 807 | goto out; |
| 808 | |
| 809 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 810 | if (!pte_same(*ptep, orig_pte)) { |
| 811 | pte_unmap_unlock(ptep, ptl); |
| 812 | goto out; |
| 813 | } |
| 814 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 815 | get_page(kpage); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 816 | page_add_anon_rmap(kpage, vma, addr); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 817 | |
| 818 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
| 819 | ptep_clear_flush(vma, addr, ptep); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 820 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 821 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 822 | page_remove_rmap(page); |
| 823 | put_page(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 824 | |
| 825 | pte_unmap_unlock(ptep, ptl); |
| 826 | err = 0; |
| 827 | out: |
| 828 | return err; |
| 829 | } |
| 830 | |
| 831 | /* |
| 832 | * try_to_merge_one_page - take two pages and merge them into one |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 833 | * @vma: the vma that holds the pte pointing to page |
| 834 | * @page: the PageAnon page that we want to replace with kpage |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 835 | * @kpage: the PageKsm page that we want to map instead of page, |
| 836 | * or NULL the first time when we want to use page as kpage. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 837 | * |
| 838 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
| 839 | */ |
| 840 | static int try_to_merge_one_page(struct vm_area_struct *vma, |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 841 | struct page *page, struct page *kpage) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 842 | { |
| 843 | pte_t orig_pte = __pte(0); |
| 844 | int err = -EFAULT; |
| 845 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 846 | if (page == kpage) /* ksm page forked */ |
| 847 | return 0; |
| 848 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 849 | if (!(vma->vm_flags & VM_MERGEABLE)) |
| 850 | goto out; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 851 | if (!PageAnon(page)) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 852 | goto out; |
| 853 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 854 | /* |
| 855 | * We need the page lock to read a stable PageSwapCache in |
| 856 | * write_protect_page(). We use trylock_page() instead of |
| 857 | * lock_page() because we don't want to wait here - we |
| 858 | * prefer to continue scanning and merging different pages, |
| 859 | * then come back to this page when it is unlocked. |
| 860 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 861 | if (!trylock_page(page)) |
Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 862 | goto out; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 863 | /* |
| 864 | * If this anonymous page is mapped only here, its pte may need |
| 865 | * to be write-protected. If it's mapped elsewhere, all of its |
| 866 | * ptes are necessarily already write-protected. But in either |
| 867 | * case, we need to lock and check page_count is not raised. |
| 868 | */ |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 869 | if (write_protect_page(vma, page, &orig_pte) == 0) { |
| 870 | if (!kpage) { |
| 871 | /* |
| 872 | * While we hold page lock, upgrade page from |
| 873 | * PageAnon+anon_vma to PageKsm+NULL stable_node: |
| 874 | * stable_tree_insert() will update stable_node. |
| 875 | */ |
| 876 | set_page_stable_node(page, NULL); |
| 877 | mark_page_accessed(page); |
| 878 | err = 0; |
| 879 | } else if (pages_identical(page, kpage)) |
| 880 | err = replace_page(vma, page, kpage, orig_pte); |
| 881 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 882 | |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 883 | if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 884 | munlock_vma_page(page); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 885 | if (!PageMlocked(kpage)) { |
| 886 | unlock_page(page); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 887 | lock_page(kpage); |
| 888 | mlock_vma_page(kpage); |
| 889 | page = kpage; /* for final unlock */ |
| 890 | } |
| 891 | } |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 892 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 893 | unlock_page(page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 894 | out: |
| 895 | return err; |
| 896 | } |
| 897 | |
| 898 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 899 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, |
| 900 | * but no new kernel page is allocated: kpage must already be a ksm page. |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 901 | * |
| 902 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 903 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 904 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
| 905 | struct page *page, struct page *kpage) |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 906 | { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 907 | struct mm_struct *mm = rmap_item->mm; |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 908 | struct vm_area_struct *vma; |
| 909 | int err = -EFAULT; |
| 910 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 911 | down_read(&mm->mmap_sem); |
| 912 | if (ksm_test_exit(mm)) |
| 913 | goto out; |
| 914 | vma = find_vma(mm, rmap_item->address); |
| 915 | if (!vma || vma->vm_start > rmap_item->address) |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 916 | goto out; |
| 917 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 918 | err = try_to_merge_one_page(vma, page, kpage); |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 919 | if (err) |
| 920 | goto out; |
| 921 | |
| 922 | /* Must get reference to anon_vma while still holding mmap_sem */ |
| 923 | hold_anon_vma(rmap_item, vma->anon_vma); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 924 | out: |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 925 | up_read(&mm->mmap_sem); |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 926 | return err; |
| 927 | } |
| 928 | |
| 929 | /* |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 930 | * try_to_merge_two_pages - take two identical pages and prepare them |
| 931 | * to be merged into one page. |
| 932 | * |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 933 | * This function returns the kpage if we successfully merged two identical |
| 934 | * pages into one ksm page, NULL otherwise. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 935 | * |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 936 | * Note that this function upgrades page to ksm page: if one of the pages |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 937 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
| 938 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 939 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
| 940 | struct page *page, |
| 941 | struct rmap_item *tree_rmap_item, |
| 942 | struct page *tree_page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 943 | { |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 944 | int err; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 945 | |
| 946 | /* |
| 947 | * The number of nodes in the stable tree |
| 948 | * is the number of kernel pages that we hold. |
| 949 | */ |
| 950 | if (ksm_max_kernel_pages && |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 951 | ksm_max_kernel_pages <= ksm_pages_shared) |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 952 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 953 | |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 954 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 955 | if (!err) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 956 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 957 | tree_page, page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 958 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 959 | * If that fails, we have a ksm page with only one pte |
| 960 | * pointing to it: so break it. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 961 | */ |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 962 | if (err) |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 963 | break_cow(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 964 | } |
Hugh Dickins | 80e14822 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 965 | return err ? NULL : page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 966 | } |
| 967 | |
| 968 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 969 | * stable_tree_search - search for page inside the stable tree |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 970 | * |
| 971 | * This function checks if there is a page inside the stable tree |
| 972 | * with identical content to the page that we are scanning right now. |
| 973 | * |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 974 | * This function returns the stable tree node of identical content if found, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 975 | * NULL otherwise. |
| 976 | */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 977 | static struct page *stable_tree_search(struct page *page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 978 | { |
| 979 | struct rb_node *node = root_stable_tree.rb_node; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 980 | struct stable_node *stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 981 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 982 | stable_node = page_stable_node(page); |
| 983 | if (stable_node) { /* ksm page forked */ |
| 984 | get_page(page); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 985 | return page; |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 986 | } |
| 987 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 988 | while (node) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 989 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 990 | int ret; |
| 991 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 992 | cond_resched(); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 993 | stable_node = rb_entry(node, struct stable_node, node); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 994 | tree_page = get_ksm_page(stable_node); |
| 995 | if (!tree_page) |
| 996 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 997 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 998 | ret = memcmp_pages(page, tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 999 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1000 | if (ret < 0) { |
| 1001 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1002 | node = node->rb_left; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1003 | } else if (ret > 0) { |
| 1004 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1005 | node = node->rb_right; |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1006 | } else |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 1007 | return tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1008 | } |
| 1009 | |
| 1010 | return NULL; |
| 1011 | } |
| 1012 | |
| 1013 | /* |
| 1014 | * stable_tree_insert - insert rmap_item pointing to new ksm page |
| 1015 | * into the stable tree. |
| 1016 | * |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1017 | * This function returns the stable tree node just allocated on success, |
| 1018 | * NULL otherwise. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1019 | */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1020 | static struct stable_node *stable_tree_insert(struct page *kpage) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1021 | { |
| 1022 | struct rb_node **new = &root_stable_tree.rb_node; |
| 1023 | struct rb_node *parent = NULL; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1024 | struct stable_node *stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1025 | |
| 1026 | while (*new) { |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1027 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1028 | int ret; |
| 1029 | |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1030 | cond_resched(); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1031 | stable_node = rb_entry(*new, struct stable_node, node); |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1032 | tree_page = get_ksm_page(stable_node); |
| 1033 | if (!tree_page) |
| 1034 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1035 | |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1036 | ret = memcmp_pages(kpage, tree_page); |
| 1037 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1038 | |
| 1039 | parent = *new; |
| 1040 | if (ret < 0) |
| 1041 | new = &parent->rb_left; |
| 1042 | else if (ret > 0) |
| 1043 | new = &parent->rb_right; |
| 1044 | else { |
| 1045 | /* |
| 1046 | * It is not a bug that stable_tree_search() didn't |
| 1047 | * find this node: because at that time our page was |
| 1048 | * not yet write-protected, so may have changed since. |
| 1049 | */ |
| 1050 | return NULL; |
| 1051 | } |
| 1052 | } |
| 1053 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1054 | stable_node = alloc_stable_node(); |
| 1055 | if (!stable_node) |
| 1056 | return NULL; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1057 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1058 | rb_link_node(&stable_node->node, parent, new); |
| 1059 | rb_insert_color(&stable_node->node, &root_stable_tree); |
| 1060 | |
| 1061 | INIT_HLIST_HEAD(&stable_node->hlist); |
| 1062 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 1063 | stable_node->kpfn = page_to_pfn(kpage); |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1064 | set_page_stable_node(kpage, stable_node); |
| 1065 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1066 | return stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1067 | } |
| 1068 | |
| 1069 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1070 | * unstable_tree_search_insert - search for identical page, |
| 1071 | * else insert rmap_item into the unstable tree. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1072 | * |
| 1073 | * This function searches for a page in the unstable tree identical to the |
| 1074 | * page currently being scanned; and if no identical page is found in the |
| 1075 | * tree, we insert rmap_item as a new object into the unstable tree. |
| 1076 | * |
| 1077 | * This function returns pointer to rmap_item found to be identical |
| 1078 | * to the currently scanned page, NULL otherwise. |
| 1079 | * |
| 1080 | * This function does both searching and inserting, because they share |
| 1081 | * the same walking algorithm in an rbtree. |
| 1082 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1083 | static |
| 1084 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, |
| 1085 | struct page *page, |
| 1086 | struct page **tree_pagep) |
| 1087 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1088 | { |
| 1089 | struct rb_node **new = &root_unstable_tree.rb_node; |
| 1090 | struct rb_node *parent = NULL; |
| 1091 | |
| 1092 | while (*new) { |
| 1093 | struct rmap_item *tree_rmap_item; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1094 | struct page *tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1095 | int ret; |
| 1096 | |
Hugh Dickins | d178f27 | 2009-11-09 15:58:23 +0000 | [diff] [blame] | 1097 | cond_resched(); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1098 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1099 | tree_page = get_mergeable_page(tree_rmap_item); |
| 1100 | if (!tree_page) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1101 | return NULL; |
| 1102 | |
| 1103 | /* |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1104 | * Don't substitute a ksm page for a forked page. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1105 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1106 | if (page == tree_page) { |
| 1107 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1108 | return NULL; |
| 1109 | } |
| 1110 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1111 | ret = memcmp_pages(page, tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1112 | |
| 1113 | parent = *new; |
| 1114 | if (ret < 0) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1115 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1116 | new = &parent->rb_left; |
| 1117 | } else if (ret > 0) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1118 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1119 | new = &parent->rb_right; |
| 1120 | } else { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1121 | *tree_pagep = tree_page; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1122 | return tree_rmap_item; |
| 1123 | } |
| 1124 | } |
| 1125 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1126 | rmap_item->address |= UNSTABLE_FLAG; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1127 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
| 1128 | rb_link_node(&rmap_item->node, parent, new); |
| 1129 | rb_insert_color(&rmap_item->node, &root_unstable_tree); |
| 1130 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1131 | ksm_pages_unshared++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1132 | return NULL; |
| 1133 | } |
| 1134 | |
| 1135 | /* |
| 1136 | * stable_tree_append - add another rmap_item to the linked list of |
| 1137 | * rmap_items hanging off a given node of the stable tree, all sharing |
| 1138 | * the same ksm page. |
| 1139 | */ |
| 1140 | static void stable_tree_append(struct rmap_item *rmap_item, |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1141 | struct stable_node *stable_node) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1142 | { |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1143 | rmap_item->head = stable_node; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1144 | rmap_item->address |= STABLE_FLAG; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1145 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 1146 | |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1147 | if (rmap_item->hlist.next) |
| 1148 | ksm_pages_sharing++; |
| 1149 | else |
| 1150 | ksm_pages_shared++; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1151 | } |
| 1152 | |
| 1153 | /* |
Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1154 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
| 1155 | * if not, compare checksum to previous and if it's the same, see if page can |
| 1156 | * be inserted into the unstable tree, or merged with a page already there and |
| 1157 | * both transferred to the stable tree. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1158 | * |
| 1159 | * @page: the page that we are searching identical page to. |
| 1160 | * @rmap_item: the reverse mapping into the virtual address of this page |
| 1161 | */ |
| 1162 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) |
| 1163 | { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1164 | struct rmap_item *tree_rmap_item; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1165 | struct page *tree_page = NULL; |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1166 | struct stable_node *stable_node; |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1167 | struct page *kpage; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1168 | unsigned int checksum; |
| 1169 | int err; |
| 1170 | |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1171 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1172 | |
| 1173 | /* We first start with searching the page inside the stable tree */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 1174 | kpage = stable_tree_search(page); |
| 1175 | if (kpage) { |
Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1176 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1177 | if (!err) { |
| 1178 | /* |
| 1179 | * The page was successfully merged: |
| 1180 | * add its rmap_item to the stable tree. |
| 1181 | */ |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1182 | lock_page(kpage); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 1183 | stable_tree_append(rmap_item, page_stable_node(kpage)); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1184 | unlock_page(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1185 | } |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1186 | put_page(kpage); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1187 | return; |
| 1188 | } |
| 1189 | |
| 1190 | /* |
Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1191 | * If the hash value of the page has changed from the last time |
| 1192 | * we calculated it, this page is changing frequently: therefore we |
| 1193 | * don't want to insert it in the unstable tree, and we don't want |
| 1194 | * to waste our time searching for something identical to it there. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1195 | */ |
| 1196 | checksum = calc_checksum(page); |
| 1197 | if (rmap_item->oldchecksum != checksum) { |
| 1198 | rmap_item->oldchecksum = checksum; |
| 1199 | return; |
| 1200 | } |
| 1201 | |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1202 | tree_rmap_item = |
| 1203 | unstable_tree_search_insert(rmap_item, page, &tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1204 | if (tree_rmap_item) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1205 | kpage = try_to_merge_two_pages(rmap_item, page, |
| 1206 | tree_rmap_item, tree_page); |
| 1207 | put_page(tree_page); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1208 | /* |
| 1209 | * As soon as we merge this page, we want to remove the |
| 1210 | * rmap_item of the page we have merged with from the unstable |
| 1211 | * tree, and insert it instead as new node in the stable tree. |
| 1212 | */ |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1213 | if (kpage) { |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1214 | remove_rmap_item_from_tree(tree_rmap_item); |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1215 | |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1216 | lock_page(kpage); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1217 | stable_node = stable_tree_insert(kpage); |
| 1218 | if (stable_node) { |
| 1219 | stable_tree_append(tree_rmap_item, stable_node); |
| 1220 | stable_tree_append(rmap_item, stable_node); |
| 1221 | } |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1222 | unlock_page(kpage); |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1223 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1224 | /* |
| 1225 | * If we fail to insert the page into the stable tree, |
| 1226 | * we will have 2 virtual addresses that are pointing |
| 1227 | * to a ksm page left outside the stable tree, |
| 1228 | * in which case we need to break_cow on both. |
| 1229 | */ |
Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1230 | if (!stable_node) { |
Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1231 | break_cow(tree_rmap_item); |
| 1232 | break_cow(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1233 | } |
| 1234 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1235 | } |
| 1236 | } |
| 1237 | |
| 1238 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1239 | struct rmap_item **rmap_list, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1240 | unsigned long addr) |
| 1241 | { |
| 1242 | struct rmap_item *rmap_item; |
| 1243 | |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1244 | while (*rmap_list) { |
| 1245 | rmap_item = *rmap_list; |
Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1246 | if ((rmap_item->address & PAGE_MASK) == addr) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1247 | return rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1248 | if (rmap_item->address > addr) |
| 1249 | break; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1250 | *rmap_list = rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1251 | remove_rmap_item_from_tree(rmap_item); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1252 | free_rmap_item(rmap_item); |
| 1253 | } |
| 1254 | |
| 1255 | rmap_item = alloc_rmap_item(); |
| 1256 | if (rmap_item) { |
| 1257 | /* It has already been zeroed */ |
| 1258 | rmap_item->mm = mm_slot->mm; |
| 1259 | rmap_item->address = addr; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1260 | rmap_item->rmap_list = *rmap_list; |
| 1261 | *rmap_list = rmap_item; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1262 | } |
| 1263 | return rmap_item; |
| 1264 | } |
| 1265 | |
| 1266 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) |
| 1267 | { |
| 1268 | struct mm_struct *mm; |
| 1269 | struct mm_slot *slot; |
| 1270 | struct vm_area_struct *vma; |
| 1271 | struct rmap_item *rmap_item; |
| 1272 | |
| 1273 | if (list_empty(&ksm_mm_head.mm_list)) |
| 1274 | return NULL; |
| 1275 | |
| 1276 | slot = ksm_scan.mm_slot; |
| 1277 | if (slot == &ksm_mm_head) { |
| 1278 | root_unstable_tree = RB_ROOT; |
| 1279 | |
| 1280 | spin_lock(&ksm_mmlist_lock); |
| 1281 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); |
| 1282 | ksm_scan.mm_slot = slot; |
| 1283 | spin_unlock(&ksm_mmlist_lock); |
| 1284 | next_mm: |
| 1285 | ksm_scan.address = 0; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1286 | ksm_scan.rmap_list = &slot->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1287 | } |
| 1288 | |
| 1289 | mm = slot->mm; |
| 1290 | down_read(&mm->mmap_sem); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1291 | if (ksm_test_exit(mm)) |
| 1292 | vma = NULL; |
| 1293 | else |
| 1294 | vma = find_vma(mm, ksm_scan.address); |
| 1295 | |
| 1296 | for (; vma; vma = vma->vm_next) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1297 | if (!(vma->vm_flags & VM_MERGEABLE)) |
| 1298 | continue; |
| 1299 | if (ksm_scan.address < vma->vm_start) |
| 1300 | ksm_scan.address = vma->vm_start; |
| 1301 | if (!vma->anon_vma) |
| 1302 | ksm_scan.address = vma->vm_end; |
| 1303 | |
| 1304 | while (ksm_scan.address < vma->vm_end) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1305 | if (ksm_test_exit(mm)) |
| 1306 | break; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1307 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
| 1308 | if (*page && PageAnon(*page)) { |
| 1309 | flush_anon_page(vma, *page, ksm_scan.address); |
| 1310 | flush_dcache_page(*page); |
| 1311 | rmap_item = get_next_rmap_item(slot, |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1312 | ksm_scan.rmap_list, ksm_scan.address); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1313 | if (rmap_item) { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1314 | ksm_scan.rmap_list = |
| 1315 | &rmap_item->rmap_list; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1316 | ksm_scan.address += PAGE_SIZE; |
| 1317 | } else |
| 1318 | put_page(*page); |
| 1319 | up_read(&mm->mmap_sem); |
| 1320 | return rmap_item; |
| 1321 | } |
| 1322 | if (*page) |
| 1323 | put_page(*page); |
| 1324 | ksm_scan.address += PAGE_SIZE; |
| 1325 | cond_resched(); |
| 1326 | } |
| 1327 | } |
| 1328 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1329 | if (ksm_test_exit(mm)) { |
| 1330 | ksm_scan.address = 0; |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1331 | ksm_scan.rmap_list = &slot->rmap_list; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1332 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1333 | /* |
| 1334 | * Nuke all the rmap_items that are above this current rmap: |
| 1335 | * because there were no VM_MERGEABLE vmas with such addresses. |
| 1336 | */ |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1337 | remove_trailing_rmap_items(slot, ksm_scan.rmap_list); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1338 | |
| 1339 | spin_lock(&ksm_mmlist_lock); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1340 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
| 1341 | struct mm_slot, mm_list); |
| 1342 | if (ksm_scan.address == 0) { |
| 1343 | /* |
| 1344 | * We've completed a full scan of all vmas, holding mmap_sem |
| 1345 | * throughout, and found no VM_MERGEABLE: so do the same as |
| 1346 | * __ksm_exit does to remove this mm from all our lists now. |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1347 | * This applies either when cleaning up after __ksm_exit |
| 1348 | * (but beware: we can reach here even before __ksm_exit), |
| 1349 | * or when all VM_MERGEABLE areas have been unmapped (and |
| 1350 | * mmap_sem then protects against race with MADV_MERGEABLE). |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1351 | */ |
| 1352 | hlist_del(&slot->link); |
| 1353 | list_del(&slot->mm_list); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1354 | spin_unlock(&ksm_mmlist_lock); |
| 1355 | |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1356 | free_mm_slot(slot); |
| 1357 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1358 | up_read(&mm->mmap_sem); |
| 1359 | mmdrop(mm); |
| 1360 | } else { |
| 1361 | spin_unlock(&ksm_mmlist_lock); |
| 1362 | up_read(&mm->mmap_sem); |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1363 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1364 | |
| 1365 | /* Repeat until we've completed scanning the whole list */ |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1366 | slot = ksm_scan.mm_slot; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1367 | if (slot != &ksm_mm_head) |
| 1368 | goto next_mm; |
| 1369 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1370 | ksm_scan.seqnr++; |
| 1371 | return NULL; |
| 1372 | } |
| 1373 | |
| 1374 | /** |
| 1375 | * ksm_do_scan - the ksm scanner main worker function. |
| 1376 | * @scan_npages - number of pages we want to scan before we return. |
| 1377 | */ |
| 1378 | static void ksm_do_scan(unsigned int scan_npages) |
| 1379 | { |
| 1380 | struct rmap_item *rmap_item; |
| 1381 | struct page *page; |
| 1382 | |
| 1383 | while (scan_npages--) { |
| 1384 | cond_resched(); |
| 1385 | rmap_item = scan_get_next_rmap_item(&page); |
| 1386 | if (!rmap_item) |
| 1387 | return; |
| 1388 | if (!PageKsm(page) || !in_stable_tree(rmap_item)) |
| 1389 | cmp_and_merge_page(page, rmap_item); |
| 1390 | put_page(page); |
| 1391 | } |
| 1392 | } |
| 1393 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1394 | static int ksmd_should_run(void) |
| 1395 | { |
| 1396 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); |
| 1397 | } |
| 1398 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1399 | static int ksm_scan_thread(void *nothing) |
| 1400 | { |
Izik Eidus | 339aa62 | 2009-09-21 17:02:07 -0700 | [diff] [blame] | 1401 | set_user_nice(current, 5); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1402 | |
| 1403 | while (!kthread_should_stop()) { |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1404 | mutex_lock(&ksm_thread_mutex); |
| 1405 | if (ksmd_should_run()) |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1406 | ksm_do_scan(ksm_thread_pages_to_scan); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1407 | mutex_unlock(&ksm_thread_mutex); |
| 1408 | |
| 1409 | if (ksmd_should_run()) { |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1410 | schedule_timeout_interruptible( |
| 1411 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); |
| 1412 | } else { |
| 1413 | wait_event_interruptible(ksm_thread_wait, |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1414 | ksmd_should_run() || kthread_should_stop()); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1415 | } |
| 1416 | } |
| 1417 | return 0; |
| 1418 | } |
| 1419 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1420 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 1421 | unsigned long end, int advice, unsigned long *vm_flags) |
| 1422 | { |
| 1423 | struct mm_struct *mm = vma->vm_mm; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1424 | int err; |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1425 | |
| 1426 | switch (advice) { |
| 1427 | case MADV_MERGEABLE: |
| 1428 | /* |
| 1429 | * Be somewhat over-protective for now! |
| 1430 | */ |
| 1431 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | |
| 1432 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | |
| 1433 | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1434 | VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1435 | return 0; /* just ignore the advice */ |
| 1436 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1437 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
| 1438 | err = __ksm_enter(mm); |
| 1439 | if (err) |
| 1440 | return err; |
| 1441 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1442 | |
| 1443 | *vm_flags |= VM_MERGEABLE; |
| 1444 | break; |
| 1445 | |
| 1446 | case MADV_UNMERGEABLE: |
| 1447 | if (!(*vm_flags & VM_MERGEABLE)) |
| 1448 | return 0; /* just ignore the advice */ |
| 1449 | |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1450 | if (vma->anon_vma) { |
| 1451 | err = unmerge_ksm_pages(vma, start, end); |
| 1452 | if (err) |
| 1453 | return err; |
| 1454 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1455 | |
| 1456 | *vm_flags &= ~VM_MERGEABLE; |
| 1457 | break; |
| 1458 | } |
| 1459 | |
| 1460 | return 0; |
| 1461 | } |
| 1462 | |
| 1463 | int __ksm_enter(struct mm_struct *mm) |
| 1464 | { |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1465 | struct mm_slot *mm_slot; |
| 1466 | int needs_wakeup; |
| 1467 | |
| 1468 | mm_slot = alloc_mm_slot(); |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1469 | if (!mm_slot) |
| 1470 | return -ENOMEM; |
| 1471 | |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1472 | /* Check ksm_run too? Would need tighter locking */ |
| 1473 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); |
| 1474 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1475 | spin_lock(&ksm_mmlist_lock); |
| 1476 | insert_to_mm_slots_hash(mm, mm_slot); |
| 1477 | /* |
| 1478 | * Insert just behind the scanning cursor, to let the area settle |
| 1479 | * down a little; when fork is followed by immediate exec, we don't |
| 1480 | * want ksmd to waste time setting up and tearing down an rmap_list. |
| 1481 | */ |
| 1482 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); |
| 1483 | spin_unlock(&ksm_mmlist_lock); |
| 1484 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1485 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1486 | atomic_inc(&mm->mm_count); |
Hugh Dickins | 6e158384 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1487 | |
| 1488 | if (needs_wakeup) |
| 1489 | wake_up_interruptible(&ksm_thread_wait); |
| 1490 | |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1491 | return 0; |
| 1492 | } |
| 1493 | |
Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 1494 | void __ksm_exit(struct mm_struct *mm) |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1495 | { |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1496 | struct mm_slot *mm_slot; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1497 | int easy_to_free = 0; |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1498 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1499 | /* |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1500 | * This process is exiting: if it's straightforward (as is the |
| 1501 | * case when ksmd was never running), free mm_slot immediately. |
| 1502 | * But if it's at the cursor or has rmap_items linked to it, use |
| 1503 | * mmap_sem to synchronize with any break_cows before pagetables |
| 1504 | * are freed, and leave the mm_slot on the list for ksmd to free. |
| 1505 | * Beware: ksm may already have noticed it exiting and freed the slot. |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1506 | */ |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1507 | |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1508 | spin_lock(&ksm_mmlist_lock); |
| 1509 | mm_slot = get_mm_slot(mm); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1510 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1511 | if (!mm_slot->rmap_list) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1512 | hlist_del(&mm_slot->link); |
| 1513 | list_del(&mm_slot->mm_list); |
| 1514 | easy_to_free = 1; |
| 1515 | } else { |
| 1516 | list_move(&mm_slot->mm_list, |
| 1517 | &ksm_scan.mm_slot->mm_list); |
| 1518 | } |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1519 | } |
Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1520 | spin_unlock(&ksm_mmlist_lock); |
| 1521 | |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1522 | if (easy_to_free) { |
| 1523 | free_mm_slot(mm_slot); |
| 1524 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
| 1525 | mmdrop(mm); |
| 1526 | } else if (mm_slot) { |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1527 | down_write(&mm->mmap_sem); |
| 1528 | up_write(&mm->mmap_sem); |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1529 | } |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1530 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1531 | |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1532 | struct page *ksm_does_need_to_copy(struct page *page, |
| 1533 | struct vm_area_struct *vma, unsigned long address) |
| 1534 | { |
| 1535 | struct page *new_page; |
| 1536 | |
| 1537 | unlock_page(page); /* any racers will COW it, not modify it */ |
| 1538 | |
| 1539 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 1540 | if (new_page) { |
| 1541 | copy_user_highpage(new_page, page, address, vma); |
| 1542 | |
| 1543 | SetPageDirty(new_page); |
| 1544 | __SetPageUptodate(new_page); |
| 1545 | SetPageSwapBacked(new_page); |
| 1546 | __set_page_locked(new_page); |
| 1547 | |
| 1548 | if (page_evictable(new_page, vma)) |
| 1549 | lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); |
| 1550 | else |
| 1551 | add_page_to_unevictable_list(new_page); |
| 1552 | } |
| 1553 | |
| 1554 | page_cache_release(page); |
| 1555 | return new_page; |
| 1556 | } |
| 1557 | |
| 1558 | int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, |
| 1559 | unsigned long *vm_flags) |
| 1560 | { |
| 1561 | struct stable_node *stable_node; |
| 1562 | struct rmap_item *rmap_item; |
| 1563 | struct hlist_node *hlist; |
| 1564 | unsigned int mapcount = page_mapcount(page); |
| 1565 | int referenced = 0; |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1566 | int search_new_forks = 0; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1567 | |
| 1568 | VM_BUG_ON(!PageKsm(page)); |
| 1569 | VM_BUG_ON(!PageLocked(page)); |
| 1570 | |
| 1571 | stable_node = page_stable_node(page); |
| 1572 | if (!stable_node) |
| 1573 | return 0; |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1574 | again: |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1575 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1576 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
| 1577 | struct vm_area_struct *vma; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1578 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1579 | spin_lock(&anon_vma->lock); |
| 1580 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
| 1581 | if (rmap_item->address < vma->vm_start || |
| 1582 | rmap_item->address >= vma->vm_end) |
| 1583 | continue; |
| 1584 | /* |
| 1585 | * Initially we examine only the vma which covers this |
| 1586 | * rmap_item; but later, if there is still work to do, |
| 1587 | * we examine covering vmas in other mms: in case they |
| 1588 | * were forked from the original since ksmd passed. |
| 1589 | */ |
| 1590 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
| 1591 | continue; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1592 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1593 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) |
| 1594 | continue; |
| 1595 | |
| 1596 | referenced += page_referenced_one(page, vma, |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1597 | rmap_item->address, &mapcount, vm_flags); |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1598 | if (!search_new_forks || !mapcount) |
| 1599 | break; |
| 1600 | } |
| 1601 | spin_unlock(&anon_vma->lock); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1602 | if (!mapcount) |
| 1603 | goto out; |
| 1604 | } |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1605 | if (!search_new_forks++) |
| 1606 | goto again; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1607 | out: |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1608 | return referenced; |
| 1609 | } |
| 1610 | |
| 1611 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) |
| 1612 | { |
| 1613 | struct stable_node *stable_node; |
| 1614 | struct hlist_node *hlist; |
| 1615 | struct rmap_item *rmap_item; |
| 1616 | int ret = SWAP_AGAIN; |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1617 | int search_new_forks = 0; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1618 | |
| 1619 | VM_BUG_ON(!PageKsm(page)); |
| 1620 | VM_BUG_ON(!PageLocked(page)); |
| 1621 | |
| 1622 | stable_node = page_stable_node(page); |
| 1623 | if (!stable_node) |
| 1624 | return SWAP_FAIL; |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1625 | again: |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1626 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1627 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
| 1628 | struct vm_area_struct *vma; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1629 | |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1630 | spin_lock(&anon_vma->lock); |
| 1631 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
| 1632 | if (rmap_item->address < vma->vm_start || |
| 1633 | rmap_item->address >= vma->vm_end) |
| 1634 | continue; |
| 1635 | /* |
| 1636 | * Initially we examine only the vma which covers this |
| 1637 | * rmap_item; but later, if there is still work to do, |
| 1638 | * we examine covering vmas in other mms: in case they |
| 1639 | * were forked from the original since ksmd passed. |
| 1640 | */ |
| 1641 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
| 1642 | continue; |
| 1643 | |
| 1644 | ret = try_to_unmap_one(page, vma, |
| 1645 | rmap_item->address, flags); |
| 1646 | if (ret != SWAP_AGAIN || !page_mapped(page)) { |
| 1647 | spin_unlock(&anon_vma->lock); |
| 1648 | goto out; |
| 1649 | } |
| 1650 | } |
| 1651 | spin_unlock(&anon_vma->lock); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1652 | } |
Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1653 | if (!search_new_forks++) |
| 1654 | goto again; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1655 | out: |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1656 | return ret; |
| 1657 | } |
| 1658 | |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1659 | #ifdef CONFIG_MIGRATION |
| 1660 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, |
| 1661 | struct vm_area_struct *, unsigned long, void *), void *arg) |
| 1662 | { |
| 1663 | struct stable_node *stable_node; |
| 1664 | struct hlist_node *hlist; |
| 1665 | struct rmap_item *rmap_item; |
| 1666 | int ret = SWAP_AGAIN; |
| 1667 | int search_new_forks = 0; |
| 1668 | |
| 1669 | VM_BUG_ON(!PageKsm(page)); |
| 1670 | VM_BUG_ON(!PageLocked(page)); |
| 1671 | |
| 1672 | stable_node = page_stable_node(page); |
| 1673 | if (!stable_node) |
| 1674 | return ret; |
| 1675 | again: |
| 1676 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
| 1677 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
| 1678 | struct vm_area_struct *vma; |
| 1679 | |
| 1680 | spin_lock(&anon_vma->lock); |
| 1681 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
| 1682 | if (rmap_item->address < vma->vm_start || |
| 1683 | rmap_item->address >= vma->vm_end) |
| 1684 | continue; |
| 1685 | /* |
| 1686 | * Initially we examine only the vma which covers this |
| 1687 | * rmap_item; but later, if there is still work to do, |
| 1688 | * we examine covering vmas in other mms: in case they |
| 1689 | * were forked from the original since ksmd passed. |
| 1690 | */ |
| 1691 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
| 1692 | continue; |
| 1693 | |
| 1694 | ret = rmap_one(page, vma, rmap_item->address, arg); |
| 1695 | if (ret != SWAP_AGAIN) { |
| 1696 | spin_unlock(&anon_vma->lock); |
| 1697 | goto out; |
| 1698 | } |
| 1699 | } |
| 1700 | spin_unlock(&anon_vma->lock); |
| 1701 | } |
| 1702 | if (!search_new_forks++) |
| 1703 | goto again; |
| 1704 | out: |
| 1705 | return ret; |
| 1706 | } |
| 1707 | |
| 1708 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
| 1709 | { |
| 1710 | struct stable_node *stable_node; |
| 1711 | |
| 1712 | VM_BUG_ON(!PageLocked(oldpage)); |
| 1713 | VM_BUG_ON(!PageLocked(newpage)); |
| 1714 | VM_BUG_ON(newpage->mapping != oldpage->mapping); |
| 1715 | |
| 1716 | stable_node = page_stable_node(newpage); |
| 1717 | if (stable_node) { |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 1718 | VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); |
| 1719 | stable_node->kpfn = page_to_pfn(newpage); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1720 | } |
| 1721 | } |
| 1722 | #endif /* CONFIG_MIGRATION */ |
| 1723 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 1724 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 1725 | static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, |
| 1726 | unsigned long end_pfn) |
| 1727 | { |
| 1728 | struct rb_node *node; |
| 1729 | |
| 1730 | for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { |
| 1731 | struct stable_node *stable_node; |
| 1732 | |
| 1733 | stable_node = rb_entry(node, struct stable_node, node); |
| 1734 | if (stable_node->kpfn >= start_pfn && |
| 1735 | stable_node->kpfn < end_pfn) |
| 1736 | return stable_node; |
| 1737 | } |
| 1738 | return NULL; |
| 1739 | } |
| 1740 | |
| 1741 | static int ksm_memory_callback(struct notifier_block *self, |
| 1742 | unsigned long action, void *arg) |
| 1743 | { |
| 1744 | struct memory_notify *mn = arg; |
| 1745 | struct stable_node *stable_node; |
| 1746 | |
| 1747 | switch (action) { |
| 1748 | case MEM_GOING_OFFLINE: |
| 1749 | /* |
| 1750 | * Keep it very simple for now: just lock out ksmd and |
| 1751 | * MADV_UNMERGEABLE while any memory is going offline. |
| 1752 | */ |
| 1753 | mutex_lock(&ksm_thread_mutex); |
| 1754 | break; |
| 1755 | |
| 1756 | case MEM_OFFLINE: |
| 1757 | /* |
| 1758 | * Most of the work is done by page migration; but there might |
| 1759 | * be a few stable_nodes left over, still pointing to struct |
| 1760 | * pages which have been offlined: prune those from the tree. |
| 1761 | */ |
| 1762 | while ((stable_node = ksm_check_stable_tree(mn->start_pfn, |
| 1763 | mn->start_pfn + mn->nr_pages)) != NULL) |
| 1764 | remove_node_from_stable_tree(stable_node); |
| 1765 | /* fallthrough */ |
| 1766 | |
| 1767 | case MEM_CANCEL_OFFLINE: |
| 1768 | mutex_unlock(&ksm_thread_mutex); |
| 1769 | break; |
| 1770 | } |
| 1771 | return NOTIFY_OK; |
| 1772 | } |
| 1773 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 1774 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1775 | #ifdef CONFIG_SYSFS |
| 1776 | /* |
| 1777 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |
| 1778 | */ |
| 1779 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1780 | #define KSM_ATTR_RO(_name) \ |
| 1781 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| 1782 | #define KSM_ATTR(_name) \ |
| 1783 | static struct kobj_attribute _name##_attr = \ |
| 1784 | __ATTR(_name, 0644, _name##_show, _name##_store) |
| 1785 | |
| 1786 | static ssize_t sleep_millisecs_show(struct kobject *kobj, |
| 1787 | struct kobj_attribute *attr, char *buf) |
| 1788 | { |
| 1789 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); |
| 1790 | } |
| 1791 | |
| 1792 | static ssize_t sleep_millisecs_store(struct kobject *kobj, |
| 1793 | struct kobj_attribute *attr, |
| 1794 | const char *buf, size_t count) |
| 1795 | { |
| 1796 | unsigned long msecs; |
| 1797 | int err; |
| 1798 | |
| 1799 | err = strict_strtoul(buf, 10, &msecs); |
| 1800 | if (err || msecs > UINT_MAX) |
| 1801 | return -EINVAL; |
| 1802 | |
| 1803 | ksm_thread_sleep_millisecs = msecs; |
| 1804 | |
| 1805 | return count; |
| 1806 | } |
| 1807 | KSM_ATTR(sleep_millisecs); |
| 1808 | |
| 1809 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
| 1810 | struct kobj_attribute *attr, char *buf) |
| 1811 | { |
| 1812 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); |
| 1813 | } |
| 1814 | |
| 1815 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
| 1816 | struct kobj_attribute *attr, |
| 1817 | const char *buf, size_t count) |
| 1818 | { |
| 1819 | int err; |
| 1820 | unsigned long nr_pages; |
| 1821 | |
| 1822 | err = strict_strtoul(buf, 10, &nr_pages); |
| 1823 | if (err || nr_pages > UINT_MAX) |
| 1824 | return -EINVAL; |
| 1825 | |
| 1826 | ksm_thread_pages_to_scan = nr_pages; |
| 1827 | |
| 1828 | return count; |
| 1829 | } |
| 1830 | KSM_ATTR(pages_to_scan); |
| 1831 | |
| 1832 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, |
| 1833 | char *buf) |
| 1834 | { |
| 1835 | return sprintf(buf, "%u\n", ksm_run); |
| 1836 | } |
| 1837 | |
| 1838 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, |
| 1839 | const char *buf, size_t count) |
| 1840 | { |
| 1841 | int err; |
| 1842 | unsigned long flags; |
| 1843 | |
| 1844 | err = strict_strtoul(buf, 10, &flags); |
| 1845 | if (err || flags > UINT_MAX) |
| 1846 | return -EINVAL; |
| 1847 | if (flags > KSM_RUN_UNMERGE) |
| 1848 | return -EINVAL; |
| 1849 | |
| 1850 | /* |
| 1851 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. |
| 1852 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1853 | * breaking COW to free the unswappable pages_shared (but leaves |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1854 | * mm_slots on the list for when ksmd may be set running again). |
| 1855 | */ |
| 1856 | |
| 1857 | mutex_lock(&ksm_thread_mutex); |
| 1858 | if (ksm_run != flags) { |
| 1859 | ksm_run = flags; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1860 | if (flags & KSM_RUN_UNMERGE) { |
Hugh Dickins | 35451be | 2009-09-21 17:02:27 -0700 | [diff] [blame] | 1861 | current->flags |= PF_OOM_ORIGIN; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1862 | err = unmerge_and_remove_all_rmap_items(); |
Hugh Dickins | 35451be | 2009-09-21 17:02:27 -0700 | [diff] [blame] | 1863 | current->flags &= ~PF_OOM_ORIGIN; |
Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1864 | if (err) { |
| 1865 | ksm_run = KSM_RUN_STOP; |
| 1866 | count = err; |
| 1867 | } |
| 1868 | } |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1869 | } |
| 1870 | mutex_unlock(&ksm_thread_mutex); |
| 1871 | |
| 1872 | if (flags & KSM_RUN_MERGE) |
| 1873 | wake_up_interruptible(&ksm_thread_wait); |
| 1874 | |
| 1875 | return count; |
| 1876 | } |
| 1877 | KSM_ATTR(run); |
| 1878 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1879 | static ssize_t max_kernel_pages_store(struct kobject *kobj, |
| 1880 | struct kobj_attribute *attr, |
| 1881 | const char *buf, size_t count) |
| 1882 | { |
| 1883 | int err; |
| 1884 | unsigned long nr_pages; |
| 1885 | |
| 1886 | err = strict_strtoul(buf, 10, &nr_pages); |
| 1887 | if (err) |
| 1888 | return -EINVAL; |
| 1889 | |
| 1890 | ksm_max_kernel_pages = nr_pages; |
| 1891 | |
| 1892 | return count; |
| 1893 | } |
| 1894 | |
| 1895 | static ssize_t max_kernel_pages_show(struct kobject *kobj, |
| 1896 | struct kobj_attribute *attr, char *buf) |
| 1897 | { |
| 1898 | return sprintf(buf, "%lu\n", ksm_max_kernel_pages); |
| 1899 | } |
| 1900 | KSM_ATTR(max_kernel_pages); |
| 1901 | |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1902 | static ssize_t pages_shared_show(struct kobject *kobj, |
| 1903 | struct kobj_attribute *attr, char *buf) |
| 1904 | { |
| 1905 | return sprintf(buf, "%lu\n", ksm_pages_shared); |
| 1906 | } |
| 1907 | KSM_ATTR_RO(pages_shared); |
| 1908 | |
| 1909 | static ssize_t pages_sharing_show(struct kobject *kobj, |
| 1910 | struct kobj_attribute *attr, char *buf) |
| 1911 | { |
Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 1912 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1913 | } |
| 1914 | KSM_ATTR_RO(pages_sharing); |
| 1915 | |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1916 | static ssize_t pages_unshared_show(struct kobject *kobj, |
| 1917 | struct kobj_attribute *attr, char *buf) |
| 1918 | { |
| 1919 | return sprintf(buf, "%lu\n", ksm_pages_unshared); |
| 1920 | } |
| 1921 | KSM_ATTR_RO(pages_unshared); |
| 1922 | |
| 1923 | static ssize_t pages_volatile_show(struct kobject *kobj, |
| 1924 | struct kobj_attribute *attr, char *buf) |
| 1925 | { |
| 1926 | long ksm_pages_volatile; |
| 1927 | |
| 1928 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared |
| 1929 | - ksm_pages_sharing - ksm_pages_unshared; |
| 1930 | /* |
| 1931 | * It was not worth any locking to calculate that statistic, |
| 1932 | * but it might therefore sometimes be negative: conceal that. |
| 1933 | */ |
| 1934 | if (ksm_pages_volatile < 0) |
| 1935 | ksm_pages_volatile = 0; |
| 1936 | return sprintf(buf, "%ld\n", ksm_pages_volatile); |
| 1937 | } |
| 1938 | KSM_ATTR_RO(pages_volatile); |
| 1939 | |
| 1940 | static ssize_t full_scans_show(struct kobject *kobj, |
| 1941 | struct kobj_attribute *attr, char *buf) |
| 1942 | { |
| 1943 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); |
| 1944 | } |
| 1945 | KSM_ATTR_RO(full_scans); |
| 1946 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1947 | static struct attribute *ksm_attrs[] = { |
| 1948 | &sleep_millisecs_attr.attr, |
| 1949 | &pages_to_scan_attr.attr, |
| 1950 | &run_attr.attr, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1951 | &max_kernel_pages_attr.attr, |
Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1952 | &pages_shared_attr.attr, |
| 1953 | &pages_sharing_attr.attr, |
Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1954 | &pages_unshared_attr.attr, |
| 1955 | &pages_volatile_attr.attr, |
| 1956 | &full_scans_attr.attr, |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1957 | NULL, |
| 1958 | }; |
| 1959 | |
| 1960 | static struct attribute_group ksm_attr_group = { |
| 1961 | .attrs = ksm_attrs, |
| 1962 | .name = "ksm", |
| 1963 | }; |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1964 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1965 | |
| 1966 | static int __init ksm_init(void) |
| 1967 | { |
| 1968 | struct task_struct *ksm_thread; |
| 1969 | int err; |
| 1970 | |
Hugh Dickins | c73602a | 2009-10-07 16:32:22 -0700 | [diff] [blame] | 1971 | ksm_max_kernel_pages = totalram_pages / 4; |
Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 1972 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1973 | err = ksm_slab_init(); |
| 1974 | if (err) |
| 1975 | goto out; |
| 1976 | |
| 1977 | err = mm_slots_hash_init(); |
| 1978 | if (err) |
| 1979 | goto out_free1; |
| 1980 | |
| 1981 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
| 1982 | if (IS_ERR(ksm_thread)) { |
| 1983 | printk(KERN_ERR "ksm: creating kthread failed\n"); |
| 1984 | err = PTR_ERR(ksm_thread); |
| 1985 | goto out_free2; |
| 1986 | } |
| 1987 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1988 | #ifdef CONFIG_SYSFS |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1989 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
| 1990 | if (err) { |
| 1991 | printk(KERN_ERR "ksm: register sysfs failed\n"); |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1992 | kthread_stop(ksm_thread); |
| 1993 | goto out_free2; |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1994 | } |
Hugh Dickins | c73602a | 2009-10-07 16:32:22 -0700 | [diff] [blame] | 1995 | #else |
| 1996 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ |
| 1997 | |
Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1998 | #endif /* CONFIG_SYSFS */ |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1999 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame^] | 2000 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 2001 | /* |
| 2002 | * Choose a high priority since the callback takes ksm_thread_mutex: |
| 2003 | * later callbacks could only be taking locks which nest within that. |
| 2004 | */ |
| 2005 | hotplug_memory_notifier(ksm_memory_callback, 100); |
| 2006 | #endif |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2007 | return 0; |
| 2008 | |
Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2009 | out_free2: |
| 2010 | mm_slots_hash_free(); |
| 2011 | out_free1: |
| 2012 | ksm_slab_free(); |
| 2013 | out: |
| 2014 | return err; |
| 2015 | } |
| 2016 | module_init(ksm_init) |