blob: d7cc4bbd7e1bd4278e946e5845360499a9b24de4 [file] [log] [blame]
Johannes Weinera5289102014-04-03 14:47:51 -07001/*
2 * Workingset detection
3 *
4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
5 */
6
7#include <linux/memcontrol.h>
8#include <linux/writeback.h>
9#include <linux/pagemap.h>
10#include <linux/atomic.h>
11#include <linux/module.h>
12#include <linux/swap.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15
16/*
17 * Double CLOCK lists
18 *
19 * Per zone, two clock lists are maintained for file pages: the
20 * inactive and the active list. Freshly faulted pages start out at
21 * the head of the inactive list and page reclaim scans pages from the
22 * tail. Pages that are accessed multiple times on the inactive list
23 * are promoted to the active list, to protect them from reclaim,
24 * whereas active pages are demoted to the inactive list when the
25 * active list grows too big.
26 *
27 * fault ------------------------+
28 * |
29 * +--------------+ | +-------------+
30 * reclaim <- | inactive | <-+-- demotion | active | <--+
31 * +--------------+ +-------------+ |
32 * | |
33 * +-------------- promotion ------------------+
34 *
35 *
36 * Access frequency and refault distance
37 *
38 * A workload is thrashing when its pages are frequently used but they
39 * are evicted from the inactive list every time before another access
40 * would have promoted them to the active list.
41 *
42 * In cases where the average access distance between thrashing pages
43 * is bigger than the size of memory there is nothing that can be
44 * done - the thrashing set could never fit into memory under any
45 * circumstance.
46 *
47 * However, the average access distance could be bigger than the
48 * inactive list, yet smaller than the size of memory. In this case,
49 * the set could fit into memory if it weren't for the currently
50 * active pages - which may be used more, hopefully less frequently:
51 *
52 * +-memory available to cache-+
53 * | |
54 * +-inactive------+-active----+
55 * a b | c d e f g h i | J K L M N |
56 * +---------------+-----------+
57 *
58 * It is prohibitively expensive to accurately track access frequency
59 * of pages. But a reasonable approximation can be made to measure
60 * thrashing on the inactive list, after which refaulting pages can be
61 * activated optimistically to compete with the existing active pages.
62 *
63 * Approximating inactive page access frequency - Observations:
64 *
65 * 1. When a page is accessed for the first time, it is added to the
66 * head of the inactive list, slides every existing inactive page
67 * towards the tail by one slot, and pushes the current tail page
68 * out of memory.
69 *
70 * 2. When a page is accessed for the second time, it is promoted to
71 * the active list, shrinking the inactive list by one slot. This
72 * also slides all inactive pages that were faulted into the cache
73 * more recently than the activated page towards the tail of the
74 * inactive list.
75 *
76 * Thus:
77 *
78 * 1. The sum of evictions and activations between any two points in
79 * time indicate the minimum number of inactive pages accessed in
80 * between.
81 *
82 * 2. Moving one inactive page N page slots towards the tail of the
83 * list requires at least N inactive page accesses.
84 *
85 * Combining these:
86 *
87 * 1. When a page is finally evicted from memory, the number of
88 * inactive pages accessed while the page was in cache is at least
89 * the number of page slots on the inactive list.
90 *
91 * 2. In addition, measuring the sum of evictions and activations (E)
92 * at the time of a page's eviction, and comparing it to another
93 * reading (R) at the time the page faults back into memory tells
94 * the minimum number of accesses while the page was not cached.
95 * This is called the refault distance.
96 *
97 * Because the first access of the page was the fault and the second
98 * access the refault, we combine the in-cache distance with the
99 * out-of-cache distance to get the complete minimum access distance
100 * of this page:
101 *
102 * NR_inactive + (R - E)
103 *
104 * And knowing the minimum access distance of a page, we can easily
105 * tell if the page would be able to stay in cache assuming all page
106 * slots in the cache were available:
107 *
108 * NR_inactive + (R - E) <= NR_inactive + NR_active
109 *
110 * which can be further simplified to
111 *
112 * (R - E) <= NR_active
113 *
114 * Put into words, the refault distance (out-of-cache) can be seen as
115 * a deficit in inactive list space (in-cache). If the inactive list
116 * had (R - E) more page slots, the page would not have been evicted
117 * in between accesses, but activated instead. And on a full system,
118 * the only thing eating into inactive list space is active pages.
119 *
120 *
121 * Activating refaulting pages
122 *
123 * All that is known about the active list is that the pages have been
124 * accessed more than once in the past. This means that at any given
125 * time there is actually a good chance that pages on the active list
126 * are no longer in active use.
127 *
128 * So when a refault distance of (R - E) is observed and there are at
129 * least (R - E) active pages, the refaulting page is activated
130 * optimistically in the hope that (R - E) active pages are actually
131 * used less frequently than the refaulting page - or even not used at
132 * all anymore.
133 *
134 * If this is wrong and demotion kicks in, the pages which are truly
135 * used more frequently will be reactivated while the less frequently
136 * used once will be evicted from memory.
137 *
138 * But if this is right, the stale pages will be pushed out of memory
139 * and the used pages get to stay in cache.
140 *
141 *
142 * Implementation
143 *
144 * For each zone's file LRU lists, a counter for inactive evictions
145 * and activations is maintained (zone->inactive_age).
146 *
147 * On eviction, a snapshot of this counter (along with some bits to
148 * identify the zone) is stored in the now empty page cache radix tree
149 * slot of the evicted page. This is called a shadow entry.
150 *
151 * On cache misses for which there are shadow entries, an eligible
152 * refault distance will immediately activate the refaulting page.
153 */
154
Johannes Weiner689c94f2016-03-15 14:57:07 -0700155#define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
Johannes Weiner23047a92016-03-15 14:57:16 -0700156 ZONES_SHIFT + NODES_SHIFT + \
157 MEM_CGROUP_ID_SHIFT)
Johannes Weiner689c94f2016-03-15 14:57:07 -0700158#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
159
Johannes Weiner612e4492016-03-15 14:57:13 -0700160/*
161 * Eviction timestamps need to be able to cover the full range of
162 * actionable refaults. However, bits are tight in the radix tree
163 * entry, and after storing the identifier for the lruvec there might
164 * not be enough left to represent every single actionable refault. In
165 * that case, we have to sacrifice granularity for distance, and group
166 * evictions into coarser buckets by shaving off lower timestamp bits.
167 */
168static unsigned int bucket_order __read_mostly;
169
Johannes Weiner23047a92016-03-15 14:57:16 -0700170static void *pack_shadow(int memcgid, struct zone *zone, unsigned long eviction)
Johannes Weinera5289102014-04-03 14:47:51 -0700171{
Johannes Weiner612e4492016-03-15 14:57:13 -0700172 eviction >>= bucket_order;
Johannes Weiner23047a92016-03-15 14:57:16 -0700173 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
Johannes Weinera5289102014-04-03 14:47:51 -0700174 eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
175 eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
176 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
177
178 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
179}
180
Johannes Weiner23047a92016-03-15 14:57:16 -0700181static void unpack_shadow(void *shadow, int *memcgidp, struct zone **zonep,
Johannes Weiner162453b2016-03-15 14:57:10 -0700182 unsigned long *evictionp)
Johannes Weinera5289102014-04-03 14:47:51 -0700183{
184 unsigned long entry = (unsigned long)shadow;
Johannes Weiner23047a92016-03-15 14:57:16 -0700185 int memcgid, nid, zid;
Johannes Weinera5289102014-04-03 14:47:51 -0700186
187 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
188 zid = entry & ((1UL << ZONES_SHIFT) - 1);
189 entry >>= ZONES_SHIFT;
190 nid = entry & ((1UL << NODES_SHIFT) - 1);
191 entry >>= NODES_SHIFT;
Johannes Weiner23047a92016-03-15 14:57:16 -0700192 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
193 entry >>= MEM_CGROUP_ID_SHIFT;
Johannes Weinera5289102014-04-03 14:47:51 -0700194
Johannes Weiner23047a92016-03-15 14:57:16 -0700195 *memcgidp = memcgid;
Johannes Weiner162453b2016-03-15 14:57:10 -0700196 *zonep = NODE_DATA(nid)->node_zones + zid;
Johannes Weiner612e4492016-03-15 14:57:13 -0700197 *evictionp = entry << bucket_order;
Johannes Weinera5289102014-04-03 14:47:51 -0700198}
199
200/**
201 * workingset_eviction - note the eviction of a page from memory
202 * @mapping: address space the page was backing
203 * @page: the page being evicted
204 *
205 * Returns a shadow entry to be stored in @mapping->page_tree in place
206 * of the evicted @page so that a later refault can be detected.
207 */
208void *workingset_eviction(struct address_space *mapping, struct page *page)
209{
Johannes Weiner23047a92016-03-15 14:57:16 -0700210 struct mem_cgroup *memcg = page_memcg(page);
Johannes Weinera5289102014-04-03 14:47:51 -0700211 struct zone *zone = page_zone(page);
Johannes Weiner23047a92016-03-15 14:57:16 -0700212 int memcgid = mem_cgroup_id(memcg);
Johannes Weinera5289102014-04-03 14:47:51 -0700213 unsigned long eviction;
Johannes Weiner23047a92016-03-15 14:57:16 -0700214 struct lruvec *lruvec;
Johannes Weinera5289102014-04-03 14:47:51 -0700215
Johannes Weiner23047a92016-03-15 14:57:16 -0700216 /* Page is fully exclusive and pins page->mem_cgroup */
217 VM_BUG_ON_PAGE(PageLRU(page), page);
218 VM_BUG_ON_PAGE(page_count(page), page);
219 VM_BUG_ON_PAGE(!PageLocked(page), page);
220
221 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
222 eviction = atomic_long_inc_return(&lruvec->inactive_age);
223 return pack_shadow(memcgid, zone, eviction);
Johannes Weinera5289102014-04-03 14:47:51 -0700224}
225
226/**
227 * workingset_refault - evaluate the refault of a previously evicted page
228 * @shadow: shadow entry of the evicted page
229 *
230 * Calculates and evaluates the refault distance of the previously
231 * evicted page in the context of the zone it was allocated in.
232 *
233 * Returns %true if the page should be activated, %false otherwise.
234 */
235bool workingset_refault(void *shadow)
236{
237 unsigned long refault_distance;
Johannes Weiner23047a92016-03-15 14:57:16 -0700238 unsigned long active_file;
239 struct mem_cgroup *memcg;
Johannes Weiner162453b2016-03-15 14:57:10 -0700240 unsigned long eviction;
Johannes Weiner23047a92016-03-15 14:57:16 -0700241 struct lruvec *lruvec;
Johannes Weiner162453b2016-03-15 14:57:10 -0700242 unsigned long refault;
Johannes Weinera5289102014-04-03 14:47:51 -0700243 struct zone *zone;
Johannes Weiner23047a92016-03-15 14:57:16 -0700244 int memcgid;
Johannes Weinera5289102014-04-03 14:47:51 -0700245
Johannes Weiner23047a92016-03-15 14:57:16 -0700246 unpack_shadow(shadow, &memcgid, &zone, &eviction);
Johannes Weiner162453b2016-03-15 14:57:10 -0700247
Johannes Weiner23047a92016-03-15 14:57:16 -0700248 rcu_read_lock();
249 /*
250 * Look up the memcg associated with the stored ID. It might
251 * have been deleted since the page's eviction.
252 *
253 * Note that in rare events the ID could have been recycled
254 * for a new cgroup that refaults a shared page. This is
255 * impossible to tell from the available data. However, this
256 * should be a rare and limited disturbance, and activations
257 * are always speculative anyway. Ultimately, it's the aging
258 * algorithm's job to shake out the minimum access frequency
259 * for the active cache.
260 *
261 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
262 * would be better if the root_mem_cgroup existed in all
263 * configurations instead.
264 */
265 memcg = mem_cgroup_from_id(memcgid);
266 if (!mem_cgroup_disabled() && !memcg) {
267 rcu_read_unlock();
268 return false;
269 }
270 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
271 refault = atomic_long_read(&lruvec->inactive_age);
272 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
273 rcu_read_unlock();
Johannes Weiner162453b2016-03-15 14:57:10 -0700274
275 /*
276 * The unsigned subtraction here gives an accurate distance
277 * across inactive_age overflows in most cases.
278 *
279 * There is a special case: usually, shadow entries have a
280 * short lifetime and are either refaulted or reclaimed along
281 * with the inode before they get too old. But it is not
282 * impossible for the inactive_age to lap a shadow entry in
283 * the field, which can then can result in a false small
284 * refault distance, leading to a false activation should this
285 * old entry actually refault again. However, earlier kernels
286 * used to deactivate unconditionally with *every* reclaim
287 * invocation for the longest time, so the occasional
288 * inappropriate activation leading to pressure on the active
289 * list is not a problem.
290 */
291 refault_distance = (refault - eviction) & EVICTION_MASK;
292
Johannes Weinera5289102014-04-03 14:47:51 -0700293 inc_zone_state(zone, WORKINGSET_REFAULT);
294
Johannes Weiner23047a92016-03-15 14:57:16 -0700295 if (refault_distance <= active_file) {
Johannes Weinera5289102014-04-03 14:47:51 -0700296 inc_zone_state(zone, WORKINGSET_ACTIVATE);
297 return true;
298 }
299 return false;
300}
301
302/**
303 * workingset_activation - note a page activation
304 * @page: page that is being activated
305 */
306void workingset_activation(struct page *page)
307{
Johannes Weiner55779ec2016-07-28 15:45:10 -0700308 struct mem_cgroup *memcg;
Johannes Weiner23047a92016-03-15 14:57:16 -0700309 struct lruvec *lruvec;
310
Johannes Weiner55779ec2016-07-28 15:45:10 -0700311 rcu_read_lock();
Johannes Weiner23047a92016-03-15 14:57:16 -0700312 /*
313 * Filter non-memcg pages here, e.g. unmap can call
314 * mark_page_accessed() on VDSO pages.
315 *
316 * XXX: See workingset_refault() - this should return
317 * root_mem_cgroup even for !CONFIG_MEMCG.
318 */
Johannes Weiner55779ec2016-07-28 15:45:10 -0700319 memcg = page_memcg_rcu(page);
320 if (!mem_cgroup_disabled() && !memcg)
Johannes Weiner23047a92016-03-15 14:57:16 -0700321 goto out;
Johannes Weiner55779ec2016-07-28 15:45:10 -0700322 lruvec = mem_cgroup_zone_lruvec(page_zone(page), memcg);
Johannes Weiner23047a92016-03-15 14:57:16 -0700323 atomic_long_inc(&lruvec->inactive_age);
324out:
Johannes Weiner55779ec2016-07-28 15:45:10 -0700325 rcu_read_unlock();
Johannes Weinera5289102014-04-03 14:47:51 -0700326}
Johannes Weiner449dd692014-04-03 14:47:56 -0700327
328/*
329 * Shadow entries reflect the share of the working set that does not
330 * fit into memory, so their number depends on the access pattern of
331 * the workload. In most cases, they will refault or get reclaimed
332 * along with the inode, but a (malicious) workload that streams
333 * through files with a total size several times that of available
334 * memory, while preventing the inodes from being reclaimed, can
335 * create excessive amounts of shadow nodes. To keep a lid on this,
336 * track shadow nodes and reclaim them when they grow way past the
337 * point where they would still be useful.
338 */
339
340struct list_lru workingset_shadow_nodes;
341
342static unsigned long count_shadow_nodes(struct shrinker *shrinker,
343 struct shrink_control *sc)
344{
345 unsigned long shadow_nodes;
346 unsigned long max_nodes;
347 unsigned long pages;
348
349 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
350 local_irq_disable();
Vladimir Davydov503c3582015-02-12 14:58:47 -0800351 shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
Johannes Weiner449dd692014-04-03 14:47:56 -0700352 local_irq_enable();
353
Vladimir Davydov0a6b76d2016-03-17 14:18:42 -0700354 if (memcg_kmem_enabled())
355 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
356 LRU_ALL_FILE);
357 else
358 pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
359 node_page_state(sc->nid, NR_INACTIVE_FILE);
Vladimir Davydovcdcbb722016-03-17 14:18:39 -0700360
Johannes Weiner449dd692014-04-03 14:47:56 -0700361 /*
362 * Active cache pages are limited to 50% of memory, and shadow
363 * entries that represent a refault distance bigger than that
364 * do not have any effect. Limit the number of shadow nodes
365 * such that shadow entries do not exceed the number of active
366 * cache pages, assuming a worst-case node population density
367 * of 1/8th on average.
368 *
369 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
370 * each, this will reclaim shadow entries when they consume
371 * ~2% of available memory:
372 *
373 * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
374 */
375 max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
376
377 if (shadow_nodes <= max_nodes)
378 return 0;
379
380 return shadow_nodes - max_nodes;
381}
382
383static enum lru_status shadow_lru_isolate(struct list_head *item,
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800384 struct list_lru_one *lru,
Johannes Weiner449dd692014-04-03 14:47:56 -0700385 spinlock_t *lru_lock,
386 void *arg)
387{
388 struct address_space *mapping;
389 struct radix_tree_node *node;
390 unsigned int i;
391 int ret;
392
393 /*
394 * Page cache insertions and deletions synchroneously maintain
395 * the shadow node LRU under the mapping->tree_lock and the
396 * lru_lock. Because the page cache tree is emptied before
397 * the inode can be destroyed, holding the lru_lock pins any
398 * address_space that has radix tree nodes on the LRU.
399 *
400 * We can then safely transition to the mapping->tree_lock to
401 * pin only the address_space of the particular node we want
402 * to reclaim, take the node off-LRU, and drop the lru_lock.
403 */
404
405 node = container_of(item, struct radix_tree_node, private_list);
406 mapping = node->private_data;
407
408 /* Coming from the list, invert the lock order */
409 if (!spin_trylock(&mapping->tree_lock)) {
410 spin_unlock(lru_lock);
411 ret = LRU_RETRY;
412 goto out;
413 }
414
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800415 list_lru_isolate(lru, item);
Johannes Weiner449dd692014-04-03 14:47:56 -0700416 spin_unlock(lru_lock);
417
418 /*
419 * The nodes should only contain one or more shadow entries,
420 * no pages, so we expect to be able to remove them all and
421 * delete and free the empty node afterwards.
422 */
423
424 BUG_ON(!node->count);
425 BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
426
427 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
428 if (node->slots[i]) {
429 BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
430 node->slots[i] = NULL;
431 BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
432 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
Ross Zwislerf9fe48b2016-01-22 15:10:40 -0800433 BUG_ON(!mapping->nrexceptional);
434 mapping->nrexceptional--;
Johannes Weiner449dd692014-04-03 14:47:56 -0700435 }
436 }
437 BUG_ON(node->count);
438 inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
439 if (!__radix_tree_delete_node(&mapping->page_tree, node))
440 BUG();
441
442 spin_unlock(&mapping->tree_lock);
443 ret = LRU_REMOVED_RETRY;
444out:
445 local_irq_enable();
446 cond_resched();
447 local_irq_disable();
448 spin_lock(lru_lock);
449 return ret;
450}
451
452static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
453 struct shrink_control *sc)
454{
455 unsigned long ret;
456
457 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
458 local_irq_disable();
Vladimir Davydov503c3582015-02-12 14:58:47 -0800459 ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
460 shadow_lru_isolate, NULL);
Johannes Weiner449dd692014-04-03 14:47:56 -0700461 local_irq_enable();
462 return ret;
463}
464
465static struct shrinker workingset_shadow_shrinker = {
466 .count_objects = count_shadow_nodes,
467 .scan_objects = scan_shadow_nodes,
468 .seeks = DEFAULT_SEEKS,
Vladimir Davydov0a6b76d2016-03-17 14:18:42 -0700469 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
Johannes Weiner449dd692014-04-03 14:47:56 -0700470};
471
472/*
473 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
474 * mapping->tree_lock.
475 */
476static struct lock_class_key shadow_nodes_key;
477
478static int __init workingset_init(void)
479{
Johannes Weiner612e4492016-03-15 14:57:13 -0700480 unsigned int timestamp_bits;
481 unsigned int max_order;
Johannes Weiner449dd692014-04-03 14:47:56 -0700482 int ret;
483
Johannes Weiner612e4492016-03-15 14:57:13 -0700484 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
485 /*
486 * Calculate the eviction bucket size to cover the longest
487 * actionable refault distance, which is currently half of
488 * memory (totalram_pages/2). However, memory hotplug may add
489 * some more pages at runtime, so keep working with up to
490 * double the initial memory by using totalram_pages as-is.
491 */
492 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
493 max_order = fls_long(totalram_pages - 1);
494 if (max_order > timestamp_bits)
495 bucket_order = max_order - timestamp_bits;
Anton Blanchardd3d36c42016-07-14 12:07:41 -0700496 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
Johannes Weiner612e4492016-03-15 14:57:13 -0700497 timestamp_bits, max_order, bucket_order);
498
Johannes Weiner449dd692014-04-03 14:47:56 -0700499 ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
500 if (ret)
501 goto err;
502 ret = register_shrinker(&workingset_shadow_shrinker);
503 if (ret)
504 goto err_list_lru;
505 return 0;
506err_list_lru:
507 list_lru_destroy(&workingset_shadow_nodes);
508err:
509 return ret;
510}
511module_init(workingset_init);