blob: 79aee70c3b9ded9c4f3ea7329df444cde164197b [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100011#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080012#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080013#include <linux/memcontrol.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080014
15#ifdef CONFIG_MEMCG_KMEM
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
40#endif /* CONFIG_MEMCG_KMEM */
Dave Chinnera38e4082013-08-28 10:17:58 +100041
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080042#ifdef CONFIG_MEMCG_KMEM
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
45 return !!lru->node[0].memcg_lrus;
46}
47
48static inline struct list_lru_one *
49list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
50{
51 /*
52 * The lock protects the array of per cgroup lists from relocation
53 * (see memcg_update_list_lru_node).
54 */
55 lockdep_assert_held(&nlru->lock);
56 if (nlru->memcg_lrus && idx >= 0)
57 return nlru->memcg_lrus->lru[idx];
58
59 return &nlru->lru;
60}
61
62static inline struct list_lru_one *
63list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
64{
65 struct mem_cgroup *memcg;
66
67 if (!nlru->memcg_lrus)
68 return &nlru->lru;
69
70 memcg = mem_cgroup_from_kmem(ptr);
71 if (!memcg)
72 return &nlru->lru;
73
74 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
75}
76#else
77static inline bool list_lru_memcg_aware(struct list_lru *lru)
78{
79 return false;
80}
81
82static inline struct list_lru_one *
83list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
84{
85 return &nlru->lru;
86}
87
88static inline struct list_lru_one *
89list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
90{
91 return &nlru->lru;
92}
93#endif /* CONFIG_MEMCG_KMEM */
94
Dave Chinnera38e4082013-08-28 10:17:58 +100095bool list_lru_add(struct list_lru *lru, struct list_head *item)
96{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100097 int nid = page_to_nid(virt_to_page(item));
98 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080099 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000100
101 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800102 l = list_lru_from_kmem(nlru, item);
103 WARN_ON_ONCE(l->nr_items < 0);
Dave Chinnera38e4082013-08-28 10:17:58 +1000104 if (list_empty(item)) {
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800105 list_add_tail(item, &l->list);
106 l->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000107 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000108 return true;
109 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000110 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000111 return false;
112}
113EXPORT_SYMBOL_GPL(list_lru_add);
114
115bool list_lru_del(struct list_lru *lru, struct list_head *item)
116{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000117 int nid = page_to_nid(virt_to_page(item));
118 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800119 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000120
121 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800122 l = list_lru_from_kmem(nlru, item);
Dave Chinnera38e4082013-08-28 10:17:58 +1000123 if (!list_empty(item)) {
124 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800125 l->nr_items--;
126 WARN_ON_ONCE(l->nr_items < 0);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000127 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000128 return true;
129 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000130 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000131 return false;
132}
133EXPORT_SYMBOL_GPL(list_lru_del);
134
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800135static unsigned long __list_lru_count_one(struct list_lru *lru,
136 int nid, int memcg_idx)
Dave Chinnera38e4082013-08-28 10:17:58 +1000137{
Glauber Costa6a4f4962013-08-28 10:18:02 +1000138 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800139 struct list_lru_one *l;
140 unsigned long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000141
Glauber Costa6a4f4962013-08-28 10:18:02 +1000142 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800143 l = list_lru_from_memcg_idx(nlru, memcg_idx);
144 WARN_ON_ONCE(l->nr_items < 0);
145 count = l->nr_items;
Glauber Costa6a4f4962013-08-28 10:18:02 +1000146 spin_unlock(&nlru->lock);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000147
148 return count;
149}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800150
151unsigned long list_lru_count_one(struct list_lru *lru,
152 int nid, struct mem_cgroup *memcg)
153{
154 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
155}
156EXPORT_SYMBOL_GPL(list_lru_count_one);
157
158unsigned long list_lru_count_node(struct list_lru *lru, int nid)
159{
160 long count = 0;
161 int memcg_idx;
162
163 count += __list_lru_count_one(lru, nid, -1);
164 if (list_lru_memcg_aware(lru)) {
165 for_each_memcg_cache_index(memcg_idx)
166 count += __list_lru_count_one(lru, nid, memcg_idx);
167 }
168 return count;
169}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000170EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000171
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800172static unsigned long
173__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
174 list_lru_walk_cb isolate, void *cb_arg,
175 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000176{
177
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800178 struct list_lru_node *nlru = &lru->node[nid];
179 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000180 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000181 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000182
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000183 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800184 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +1000185restart:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800186 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000187 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000188
189 /*
190 * decrement nr_to_walk first so that we don't livelock if we
191 * get stuck on large numbesr of LRU_RETRY items
192 */
Russell Kingc56b0972013-10-30 14:16:16 +0000193 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000194 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000195 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000196
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000197 ret = isolate(item, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000198 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700199 case LRU_REMOVED_RETRY:
200 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000201 case LRU_REMOVED:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800202 l->nr_items--;
203 WARN_ON_ONCE(l->nr_items < 0);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000204 isolated++;
Johannes Weiner449dd692014-04-03 14:47:56 -0700205 /*
206 * If the lru lock has been dropped, our list
207 * traversal is now invalid and so we have to
208 * restart from scratch.
209 */
210 if (ret == LRU_REMOVED_RETRY)
211 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000212 break;
213 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800214 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000215 break;
216 case LRU_SKIP:
217 break;
218 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000219 /*
220 * The lru lock has been dropped, our list traversal is
221 * now invalid and so we have to restart from scratch.
222 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700223 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000224 goto restart;
225 default:
226 BUG();
227 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000228 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000229
230 spin_unlock(&nlru->lock);
231 return isolated;
232}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800233
234unsigned long
235list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
236 list_lru_walk_cb isolate, void *cb_arg,
237 unsigned long *nr_to_walk)
238{
239 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
240 isolate, cb_arg, nr_to_walk);
241}
242EXPORT_SYMBOL_GPL(list_lru_walk_one);
243
244unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
245 list_lru_walk_cb isolate, void *cb_arg,
246 unsigned long *nr_to_walk)
247{
248 long isolated = 0;
249 int memcg_idx;
250
251 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
252 nr_to_walk);
253 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
254 for_each_memcg_cache_index(memcg_idx) {
255 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
256 isolate, cb_arg, nr_to_walk);
257 if (*nr_to_walk <= 0)
258 break;
259 }
260 }
261 return isolated;
262}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000263EXPORT_SYMBOL_GPL(list_lru_walk_node);
264
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800265static void init_one_lru(struct list_lru_one *l)
266{
267 INIT_LIST_HEAD(&l->list);
268 l->nr_items = 0;
269}
270
271#ifdef CONFIG_MEMCG_KMEM
272static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
273 int begin, int end)
274{
275 int i;
276
277 for (i = begin; i < end; i++)
278 kfree(memcg_lrus->lru[i]);
279}
280
281static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
282 int begin, int end)
283{
284 int i;
285
286 for (i = begin; i < end; i++) {
287 struct list_lru_one *l;
288
289 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
290 if (!l)
291 goto fail;
292
293 init_one_lru(l);
294 memcg_lrus->lru[i] = l;
295 }
296 return 0;
297fail:
298 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
299 return -ENOMEM;
300}
301
302static int memcg_init_list_lru_node(struct list_lru_node *nlru)
303{
304 int size = memcg_nr_cache_ids;
305
306 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
307 if (!nlru->memcg_lrus)
308 return -ENOMEM;
309
310 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
311 kfree(nlru->memcg_lrus);
312 return -ENOMEM;
313 }
314
315 return 0;
316}
317
318static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
319{
320 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
321 kfree(nlru->memcg_lrus);
322}
323
324static int memcg_update_list_lru_node(struct list_lru_node *nlru,
325 int old_size, int new_size)
326{
327 struct list_lru_memcg *old, *new;
328
329 BUG_ON(old_size > new_size);
330
331 old = nlru->memcg_lrus;
332 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
333 if (!new)
334 return -ENOMEM;
335
336 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
337 kfree(new);
338 return -ENOMEM;
339 }
340
341 memcpy(new, old, old_size * sizeof(void *));
342
343 /*
344 * The lock guarantees that we won't race with a reader
345 * (see list_lru_from_memcg_idx).
346 *
347 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
348 * we have to use IRQ-safe primitives here to avoid deadlock.
349 */
350 spin_lock_irq(&nlru->lock);
351 nlru->memcg_lrus = new;
352 spin_unlock_irq(&nlru->lock);
353
354 kfree(old);
355 return 0;
356}
357
358static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
359 int old_size, int new_size)
360{
361 /* do not bother shrinking the array back to the old size, because we
362 * cannot handle allocation failures here */
363 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
364}
365
366static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
367{
368 int i;
369
370 for (i = 0; i < nr_node_ids; i++) {
371 if (!memcg_aware)
372 lru->node[i].memcg_lrus = NULL;
373 else if (memcg_init_list_lru_node(&lru->node[i]))
374 goto fail;
375 }
376 return 0;
377fail:
378 for (i = i - 1; i >= 0; i--)
379 memcg_destroy_list_lru_node(&lru->node[i]);
380 return -ENOMEM;
381}
382
383static void memcg_destroy_list_lru(struct list_lru *lru)
384{
385 int i;
386
387 if (!list_lru_memcg_aware(lru))
388 return;
389
390 for (i = 0; i < nr_node_ids; i++)
391 memcg_destroy_list_lru_node(&lru->node[i]);
392}
393
394static int memcg_update_list_lru(struct list_lru *lru,
395 int old_size, int new_size)
396{
397 int i;
398
399 if (!list_lru_memcg_aware(lru))
400 return 0;
401
402 for (i = 0; i < nr_node_ids; i++) {
403 if (memcg_update_list_lru_node(&lru->node[i],
404 old_size, new_size))
405 goto fail;
406 }
407 return 0;
408fail:
409 for (i = i - 1; i >= 0; i--)
410 memcg_cancel_update_list_lru_node(&lru->node[i],
411 old_size, new_size);
412 return -ENOMEM;
413}
414
415static void memcg_cancel_update_list_lru(struct list_lru *lru,
416 int old_size, int new_size)
417{
418 int i;
419
420 if (!list_lru_memcg_aware(lru))
421 return;
422
423 for (i = 0; i < nr_node_ids; i++)
424 memcg_cancel_update_list_lru_node(&lru->node[i],
425 old_size, new_size);
426}
427
428int memcg_update_all_list_lrus(int new_size)
429{
430 int ret = 0;
431 struct list_lru *lru;
432 int old_size = memcg_nr_cache_ids;
433
434 mutex_lock(&list_lrus_mutex);
435 list_for_each_entry(lru, &list_lrus, list) {
436 ret = memcg_update_list_lru(lru, old_size, new_size);
437 if (ret)
438 goto fail;
439 }
440out:
441 mutex_unlock(&list_lrus_mutex);
442 return ret;
443fail:
444 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
445 memcg_cancel_update_list_lru(lru, old_size, new_size);
446 goto out;
447}
448#else
449static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
450{
451 return 0;
452}
453
454static void memcg_destroy_list_lru(struct list_lru *lru)
455{
456}
457#endif /* CONFIG_MEMCG_KMEM */
458
459int __list_lru_init(struct list_lru *lru, bool memcg_aware,
460 struct lock_class_key *key)
Dave Chinnera38e4082013-08-28 10:17:58 +1000461{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000462 int i;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000463 size_t size = sizeof(*lru->node) * nr_node_ids;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800464 int err = -ENOMEM;
465
466 memcg_get_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000467
468 lru->node = kzalloc(size, GFP_KERNEL);
469 if (!lru->node)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800470 goto out;
Dave Chinnera38e4082013-08-28 10:17:58 +1000471
Glauber Costa5ca302c2013-08-28 10:18:18 +1000472 for (i = 0; i < nr_node_ids; i++) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000473 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700474 if (key)
475 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800476 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000477 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800478
479 err = memcg_init_list_lru(lru, memcg_aware);
480 if (err) {
481 kfree(lru->node);
482 goto out;
483 }
484
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800485 list_lru_register(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800486out:
487 memcg_put_cache_ids();
488 return err;
Dave Chinnera38e4082013-08-28 10:17:58 +1000489}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800490EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000491
492void list_lru_destroy(struct list_lru *lru)
493{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800494 /* Already destroyed or not yet initialized? */
495 if (!lru->node)
496 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800497
498 memcg_get_cache_ids();
499
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800500 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800501
502 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000503 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800504 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800505
506 memcg_put_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000507}
508EXPORT_SYMBOL_GPL(list_lru_destroy);