blob: 8d9d168c6c38decb9ceffff3978fe42affe16fa9 [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100011#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080012#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080013#include <linux/memcontrol.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080014
15#ifdef CONFIG_MEMCG_KMEM
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
40#endif /* CONFIG_MEMCG_KMEM */
Dave Chinnera38e4082013-08-28 10:17:58 +100041
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080042#ifdef CONFIG_MEMCG_KMEM
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
45 return !!lru->node[0].memcg_lrus;
46}
47
48static inline struct list_lru_one *
49list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
50{
51 /*
52 * The lock protects the array of per cgroup lists from relocation
53 * (see memcg_update_list_lru_node).
54 */
55 lockdep_assert_held(&nlru->lock);
56 if (nlru->memcg_lrus && idx >= 0)
57 return nlru->memcg_lrus->lru[idx];
58
59 return &nlru->lru;
60}
61
62static inline struct list_lru_one *
63list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
64{
65 struct mem_cgroup *memcg;
66
67 if (!nlru->memcg_lrus)
68 return &nlru->lru;
69
70 memcg = mem_cgroup_from_kmem(ptr);
71 if (!memcg)
72 return &nlru->lru;
73
74 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
75}
76#else
77static inline bool list_lru_memcg_aware(struct list_lru *lru)
78{
79 return false;
80}
81
82static inline struct list_lru_one *
83list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
84{
85 return &nlru->lru;
86}
87
88static inline struct list_lru_one *
89list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
90{
91 return &nlru->lru;
92}
93#endif /* CONFIG_MEMCG_KMEM */
94
Dave Chinnera38e4082013-08-28 10:17:58 +100095bool list_lru_add(struct list_lru *lru, struct list_head *item)
96{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100097 int nid = page_to_nid(virt_to_page(item));
98 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080099 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000100
101 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800102 l = list_lru_from_kmem(nlru, item);
103 WARN_ON_ONCE(l->nr_items < 0);
Dave Chinnera38e4082013-08-28 10:17:58 +1000104 if (list_empty(item)) {
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800105 list_add_tail(item, &l->list);
106 l->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000107 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000108 return true;
109 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000110 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000111 return false;
112}
113EXPORT_SYMBOL_GPL(list_lru_add);
114
115bool list_lru_del(struct list_lru *lru, struct list_head *item)
116{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000117 int nid = page_to_nid(virt_to_page(item));
118 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800119 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000120
121 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800122 l = list_lru_from_kmem(nlru, item);
Dave Chinnera38e4082013-08-28 10:17:58 +1000123 if (!list_empty(item)) {
124 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800125 l->nr_items--;
126 WARN_ON_ONCE(l->nr_items < 0);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000127 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000128 return true;
129 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000130 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000131 return false;
132}
133EXPORT_SYMBOL_GPL(list_lru_del);
134
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800135void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
136{
137 list_del_init(item);
138 list->nr_items--;
139}
140EXPORT_SYMBOL_GPL(list_lru_isolate);
141
142void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
143 struct list_head *head)
144{
145 list_move(item, head);
146 list->nr_items--;
147}
148EXPORT_SYMBOL_GPL(list_lru_isolate_move);
149
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800150static unsigned long __list_lru_count_one(struct list_lru *lru,
151 int nid, int memcg_idx)
Dave Chinnera38e4082013-08-28 10:17:58 +1000152{
Glauber Costa6a4f4962013-08-28 10:18:02 +1000153 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800154 struct list_lru_one *l;
155 unsigned long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000156
Glauber Costa6a4f4962013-08-28 10:18:02 +1000157 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800158 l = list_lru_from_memcg_idx(nlru, memcg_idx);
159 WARN_ON_ONCE(l->nr_items < 0);
160 count = l->nr_items;
Glauber Costa6a4f4962013-08-28 10:18:02 +1000161 spin_unlock(&nlru->lock);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000162
163 return count;
164}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800165
166unsigned long list_lru_count_one(struct list_lru *lru,
167 int nid, struct mem_cgroup *memcg)
168{
169 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
170}
171EXPORT_SYMBOL_GPL(list_lru_count_one);
172
173unsigned long list_lru_count_node(struct list_lru *lru, int nid)
174{
175 long count = 0;
176 int memcg_idx;
177
178 count += __list_lru_count_one(lru, nid, -1);
179 if (list_lru_memcg_aware(lru)) {
180 for_each_memcg_cache_index(memcg_idx)
181 count += __list_lru_count_one(lru, nid, memcg_idx);
182 }
183 return count;
184}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000185EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000186
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800187static unsigned long
188__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
189 list_lru_walk_cb isolate, void *cb_arg,
190 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000191{
192
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800193 struct list_lru_node *nlru = &lru->node[nid];
194 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000195 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000196 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000197
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000198 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800199 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +1000200restart:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800201 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000202 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000203
204 /*
205 * decrement nr_to_walk first so that we don't livelock if we
206 * get stuck on large numbesr of LRU_RETRY items
207 */
Russell Kingc56b0972013-10-30 14:16:16 +0000208 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000209 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000210 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000211
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800212 ret = isolate(item, l, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000213 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700214 case LRU_REMOVED_RETRY:
215 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000216 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000217 isolated++;
Johannes Weiner449dd692014-04-03 14:47:56 -0700218 /*
219 * If the lru lock has been dropped, our list
220 * traversal is now invalid and so we have to
221 * restart from scratch.
222 */
223 if (ret == LRU_REMOVED_RETRY)
224 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000225 break;
226 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800227 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000228 break;
229 case LRU_SKIP:
230 break;
231 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000232 /*
233 * The lru lock has been dropped, our list traversal is
234 * now invalid and so we have to restart from scratch.
235 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700236 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000237 goto restart;
238 default:
239 BUG();
240 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000241 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000242
243 spin_unlock(&nlru->lock);
244 return isolated;
245}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800246
247unsigned long
248list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
249 list_lru_walk_cb isolate, void *cb_arg,
250 unsigned long *nr_to_walk)
251{
252 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
253 isolate, cb_arg, nr_to_walk);
254}
255EXPORT_SYMBOL_GPL(list_lru_walk_one);
256
257unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
258 list_lru_walk_cb isolate, void *cb_arg,
259 unsigned long *nr_to_walk)
260{
261 long isolated = 0;
262 int memcg_idx;
263
264 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
265 nr_to_walk);
266 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
267 for_each_memcg_cache_index(memcg_idx) {
268 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
269 isolate, cb_arg, nr_to_walk);
270 if (*nr_to_walk <= 0)
271 break;
272 }
273 }
274 return isolated;
275}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000276EXPORT_SYMBOL_GPL(list_lru_walk_node);
277
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800278static void init_one_lru(struct list_lru_one *l)
279{
280 INIT_LIST_HEAD(&l->list);
281 l->nr_items = 0;
282}
283
284#ifdef CONFIG_MEMCG_KMEM
285static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
286 int begin, int end)
287{
288 int i;
289
290 for (i = begin; i < end; i++)
291 kfree(memcg_lrus->lru[i]);
292}
293
294static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
295 int begin, int end)
296{
297 int i;
298
299 for (i = begin; i < end; i++) {
300 struct list_lru_one *l;
301
302 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
303 if (!l)
304 goto fail;
305
306 init_one_lru(l);
307 memcg_lrus->lru[i] = l;
308 }
309 return 0;
310fail:
311 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
312 return -ENOMEM;
313}
314
315static int memcg_init_list_lru_node(struct list_lru_node *nlru)
316{
317 int size = memcg_nr_cache_ids;
318
319 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
320 if (!nlru->memcg_lrus)
321 return -ENOMEM;
322
323 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
324 kfree(nlru->memcg_lrus);
325 return -ENOMEM;
326 }
327
328 return 0;
329}
330
331static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
332{
333 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
334 kfree(nlru->memcg_lrus);
335}
336
337static int memcg_update_list_lru_node(struct list_lru_node *nlru,
338 int old_size, int new_size)
339{
340 struct list_lru_memcg *old, *new;
341
342 BUG_ON(old_size > new_size);
343
344 old = nlru->memcg_lrus;
345 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
346 if (!new)
347 return -ENOMEM;
348
349 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
350 kfree(new);
351 return -ENOMEM;
352 }
353
354 memcpy(new, old, old_size * sizeof(void *));
355
356 /*
357 * The lock guarantees that we won't race with a reader
358 * (see list_lru_from_memcg_idx).
359 *
360 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
361 * we have to use IRQ-safe primitives here to avoid deadlock.
362 */
363 spin_lock_irq(&nlru->lock);
364 nlru->memcg_lrus = new;
365 spin_unlock_irq(&nlru->lock);
366
367 kfree(old);
368 return 0;
369}
370
371static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
372 int old_size, int new_size)
373{
374 /* do not bother shrinking the array back to the old size, because we
375 * cannot handle allocation failures here */
376 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
377}
378
379static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
380{
381 int i;
382
383 for (i = 0; i < nr_node_ids; i++) {
384 if (!memcg_aware)
385 lru->node[i].memcg_lrus = NULL;
386 else if (memcg_init_list_lru_node(&lru->node[i]))
387 goto fail;
388 }
389 return 0;
390fail:
391 for (i = i - 1; i >= 0; i--)
392 memcg_destroy_list_lru_node(&lru->node[i]);
393 return -ENOMEM;
394}
395
396static void memcg_destroy_list_lru(struct list_lru *lru)
397{
398 int i;
399
400 if (!list_lru_memcg_aware(lru))
401 return;
402
403 for (i = 0; i < nr_node_ids; i++)
404 memcg_destroy_list_lru_node(&lru->node[i]);
405}
406
407static int memcg_update_list_lru(struct list_lru *lru,
408 int old_size, int new_size)
409{
410 int i;
411
412 if (!list_lru_memcg_aware(lru))
413 return 0;
414
415 for (i = 0; i < nr_node_ids; i++) {
416 if (memcg_update_list_lru_node(&lru->node[i],
417 old_size, new_size))
418 goto fail;
419 }
420 return 0;
421fail:
422 for (i = i - 1; i >= 0; i--)
423 memcg_cancel_update_list_lru_node(&lru->node[i],
424 old_size, new_size);
425 return -ENOMEM;
426}
427
428static void memcg_cancel_update_list_lru(struct list_lru *lru,
429 int old_size, int new_size)
430{
431 int i;
432
433 if (!list_lru_memcg_aware(lru))
434 return;
435
436 for (i = 0; i < nr_node_ids; i++)
437 memcg_cancel_update_list_lru_node(&lru->node[i],
438 old_size, new_size);
439}
440
441int memcg_update_all_list_lrus(int new_size)
442{
443 int ret = 0;
444 struct list_lru *lru;
445 int old_size = memcg_nr_cache_ids;
446
447 mutex_lock(&list_lrus_mutex);
448 list_for_each_entry(lru, &list_lrus, list) {
449 ret = memcg_update_list_lru(lru, old_size, new_size);
450 if (ret)
451 goto fail;
452 }
453out:
454 mutex_unlock(&list_lrus_mutex);
455 return ret;
456fail:
457 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
458 memcg_cancel_update_list_lru(lru, old_size, new_size);
459 goto out;
460}
461#else
462static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
463{
464 return 0;
465}
466
467static void memcg_destroy_list_lru(struct list_lru *lru)
468{
469}
470#endif /* CONFIG_MEMCG_KMEM */
471
472int __list_lru_init(struct list_lru *lru, bool memcg_aware,
473 struct lock_class_key *key)
Dave Chinnera38e4082013-08-28 10:17:58 +1000474{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000475 int i;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000476 size_t size = sizeof(*lru->node) * nr_node_ids;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800477 int err = -ENOMEM;
478
479 memcg_get_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000480
481 lru->node = kzalloc(size, GFP_KERNEL);
482 if (!lru->node)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800483 goto out;
Dave Chinnera38e4082013-08-28 10:17:58 +1000484
Glauber Costa5ca302c2013-08-28 10:18:18 +1000485 for (i = 0; i < nr_node_ids; i++) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000486 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700487 if (key)
488 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800489 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000490 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800491
492 err = memcg_init_list_lru(lru, memcg_aware);
493 if (err) {
494 kfree(lru->node);
495 goto out;
496 }
497
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800498 list_lru_register(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800499out:
500 memcg_put_cache_ids();
501 return err;
Dave Chinnera38e4082013-08-28 10:17:58 +1000502}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800503EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000504
505void list_lru_destroy(struct list_lru *lru)
506{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800507 /* Already destroyed or not yet initialized? */
508 if (!lru->node)
509 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800510
511 memcg_get_cache_ids();
512
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800513 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800514
515 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000516 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800517 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800518
519 memcg_put_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000520}
521EXPORT_SYMBOL_GPL(list_lru_destroy);