blob: 28237476b0558d2f051ce1c62858c04bba6e1302 [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100011#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080012#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080013#include <linux/memcontrol.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080014
15#ifdef CONFIG_MEMCG_KMEM
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
40#endif /* CONFIG_MEMCG_KMEM */
Dave Chinnera38e4082013-08-28 10:17:58 +100041
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080042#ifdef CONFIG_MEMCG_KMEM
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
Raghavendra K T145949a2015-11-05 18:46:26 -080045 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080049 return !!lru->node[0].memcg_lrus;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54{
55 /*
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
58 */
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
62
63 return &nlru->lru;
64}
65
66static inline struct list_lru_one *
67list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
68{
69 struct mem_cgroup *memcg;
70
71 if (!nlru->memcg_lrus)
72 return &nlru->lru;
73
74 memcg = mem_cgroup_from_kmem(ptr);
75 if (!memcg)
76 return &nlru->lru;
77
78 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
79}
80#else
81static inline bool list_lru_memcg_aware(struct list_lru *lru)
82{
83 return false;
84}
85
86static inline struct list_lru_one *
87list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
88{
89 return &nlru->lru;
90}
91
92static inline struct list_lru_one *
93list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
94{
95 return &nlru->lru;
96}
97#endif /* CONFIG_MEMCG_KMEM */
98
Dave Chinnera38e4082013-08-28 10:17:58 +100099bool list_lru_add(struct list_lru *lru, struct list_head *item)
100{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000101 int nid = page_to_nid(virt_to_page(item));
102 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800103 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000104
105 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000106 if (list_empty(item)) {
Jeff Layton26f5d762015-09-08 15:03:44 -0700107 l = list_lru_from_kmem(nlru, item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800108 list_add_tail(item, &l->list);
109 l->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000110 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000111 return true;
112 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000113 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000114 return false;
115}
116EXPORT_SYMBOL_GPL(list_lru_add);
117
118bool list_lru_del(struct list_lru *lru, struct list_head *item)
119{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000120 int nid = page_to_nid(virt_to_page(item));
121 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800122 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000123
124 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000125 if (!list_empty(item)) {
Jeff Layton26f5d762015-09-08 15:03:44 -0700126 l = list_lru_from_kmem(nlru, item);
Dave Chinnera38e4082013-08-28 10:17:58 +1000127 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800128 l->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000129 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000130 return true;
131 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000132 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000133 return false;
134}
135EXPORT_SYMBOL_GPL(list_lru_del);
136
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800137void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
138{
139 list_del_init(item);
140 list->nr_items--;
141}
142EXPORT_SYMBOL_GPL(list_lru_isolate);
143
144void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
145 struct list_head *head)
146{
147 list_move(item, head);
148 list->nr_items--;
149}
150EXPORT_SYMBOL_GPL(list_lru_isolate_move);
151
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800152static unsigned long __list_lru_count_one(struct list_lru *lru,
153 int nid, int memcg_idx)
Dave Chinnera38e4082013-08-28 10:17:58 +1000154{
Glauber Costa6a4f4962013-08-28 10:18:02 +1000155 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800156 struct list_lru_one *l;
157 unsigned long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000158
Glauber Costa6a4f4962013-08-28 10:18:02 +1000159 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800160 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800161 count = l->nr_items;
Glauber Costa6a4f4962013-08-28 10:18:02 +1000162 spin_unlock(&nlru->lock);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000163
164 return count;
165}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800166
167unsigned long list_lru_count_one(struct list_lru *lru,
168 int nid, struct mem_cgroup *memcg)
169{
170 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
171}
172EXPORT_SYMBOL_GPL(list_lru_count_one);
173
174unsigned long list_lru_count_node(struct list_lru *lru, int nid)
175{
176 long count = 0;
177 int memcg_idx;
178
179 count += __list_lru_count_one(lru, nid, -1);
180 if (list_lru_memcg_aware(lru)) {
181 for_each_memcg_cache_index(memcg_idx)
182 count += __list_lru_count_one(lru, nid, memcg_idx);
183 }
184 return count;
185}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000186EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000187
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800188static unsigned long
189__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
190 list_lru_walk_cb isolate, void *cb_arg,
191 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000192{
193
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800194 struct list_lru_node *nlru = &lru->node[nid];
195 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000196 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000197 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000198
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000199 spin_lock(&nlru->lock);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800200 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +1000201restart:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800202 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000203 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000204
205 /*
206 * decrement nr_to_walk first so that we don't livelock if we
207 * get stuck on large numbesr of LRU_RETRY items
208 */
Russell Kingc56b0972013-10-30 14:16:16 +0000209 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000210 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000211 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000212
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800213 ret = isolate(item, l, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000214 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700215 case LRU_REMOVED_RETRY:
216 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000217 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000218 isolated++;
Johannes Weiner449dd692014-04-03 14:47:56 -0700219 /*
220 * If the lru lock has been dropped, our list
221 * traversal is now invalid and so we have to
222 * restart from scratch.
223 */
224 if (ret == LRU_REMOVED_RETRY)
225 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000226 break;
227 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800228 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000229 break;
230 case LRU_SKIP:
231 break;
232 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000233 /*
234 * The lru lock has been dropped, our list traversal is
235 * now invalid and so we have to restart from scratch.
236 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700237 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000238 goto restart;
239 default:
240 BUG();
241 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000242 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000243
244 spin_unlock(&nlru->lock);
245 return isolated;
246}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800247
248unsigned long
249list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
250 list_lru_walk_cb isolate, void *cb_arg,
251 unsigned long *nr_to_walk)
252{
253 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
254 isolate, cb_arg, nr_to_walk);
255}
256EXPORT_SYMBOL_GPL(list_lru_walk_one);
257
258unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
259 list_lru_walk_cb isolate, void *cb_arg,
260 unsigned long *nr_to_walk)
261{
262 long isolated = 0;
263 int memcg_idx;
264
265 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
266 nr_to_walk);
267 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
268 for_each_memcg_cache_index(memcg_idx) {
269 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
270 isolate, cb_arg, nr_to_walk);
271 if (*nr_to_walk <= 0)
272 break;
273 }
274 }
275 return isolated;
276}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000277EXPORT_SYMBOL_GPL(list_lru_walk_node);
278
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800279static void init_one_lru(struct list_lru_one *l)
280{
281 INIT_LIST_HEAD(&l->list);
282 l->nr_items = 0;
283}
284
285#ifdef CONFIG_MEMCG_KMEM
286static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
287 int begin, int end)
288{
289 int i;
290
291 for (i = begin; i < end; i++)
292 kfree(memcg_lrus->lru[i]);
293}
294
295static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
296 int begin, int end)
297{
298 int i;
299
300 for (i = begin; i < end; i++) {
301 struct list_lru_one *l;
302
303 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
304 if (!l)
305 goto fail;
306
307 init_one_lru(l);
308 memcg_lrus->lru[i] = l;
309 }
310 return 0;
311fail:
312 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
313 return -ENOMEM;
314}
315
316static int memcg_init_list_lru_node(struct list_lru_node *nlru)
317{
318 int size = memcg_nr_cache_ids;
319
320 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
321 if (!nlru->memcg_lrus)
322 return -ENOMEM;
323
324 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
325 kfree(nlru->memcg_lrus);
326 return -ENOMEM;
327 }
328
329 return 0;
330}
331
332static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
333{
334 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
335 kfree(nlru->memcg_lrus);
336}
337
338static int memcg_update_list_lru_node(struct list_lru_node *nlru,
339 int old_size, int new_size)
340{
341 struct list_lru_memcg *old, *new;
342
343 BUG_ON(old_size > new_size);
344
345 old = nlru->memcg_lrus;
346 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
347 if (!new)
348 return -ENOMEM;
349
350 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
351 kfree(new);
352 return -ENOMEM;
353 }
354
355 memcpy(new, old, old_size * sizeof(void *));
356
357 /*
358 * The lock guarantees that we won't race with a reader
359 * (see list_lru_from_memcg_idx).
360 *
361 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
362 * we have to use IRQ-safe primitives here to avoid deadlock.
363 */
364 spin_lock_irq(&nlru->lock);
365 nlru->memcg_lrus = new;
366 spin_unlock_irq(&nlru->lock);
367
368 kfree(old);
369 return 0;
370}
371
372static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
373 int old_size, int new_size)
374{
375 /* do not bother shrinking the array back to the old size, because we
376 * cannot handle allocation failures here */
377 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
378}
379
380static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
381{
382 int i;
383
Raghavendra K T145949a2015-11-05 18:46:26 -0800384 if (!memcg_aware)
385 return 0;
386
387 for_each_node(i) {
388 if (memcg_init_list_lru_node(&lru->node[i]))
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800389 goto fail;
390 }
391 return 0;
392fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800393 for (i = i - 1; i >= 0; i--) {
394 if (!lru->node[i].memcg_lrus)
395 continue;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800396 memcg_destroy_list_lru_node(&lru->node[i]);
Raghavendra K T145949a2015-11-05 18:46:26 -0800397 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800398 return -ENOMEM;
399}
400
401static void memcg_destroy_list_lru(struct list_lru *lru)
402{
403 int i;
404
405 if (!list_lru_memcg_aware(lru))
406 return;
407
Raghavendra K T145949a2015-11-05 18:46:26 -0800408 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800409 memcg_destroy_list_lru_node(&lru->node[i]);
410}
411
412static int memcg_update_list_lru(struct list_lru *lru,
413 int old_size, int new_size)
414{
415 int i;
416
417 if (!list_lru_memcg_aware(lru))
418 return 0;
419
Raghavendra K T145949a2015-11-05 18:46:26 -0800420 for_each_node(i) {
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800421 if (memcg_update_list_lru_node(&lru->node[i],
422 old_size, new_size))
423 goto fail;
424 }
425 return 0;
426fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800427 for (i = i - 1; i >= 0; i--) {
428 if (!lru->node[i].memcg_lrus)
429 continue;
430
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800431 memcg_cancel_update_list_lru_node(&lru->node[i],
432 old_size, new_size);
Raghavendra K T145949a2015-11-05 18:46:26 -0800433 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800434 return -ENOMEM;
435}
436
437static void memcg_cancel_update_list_lru(struct list_lru *lru,
438 int old_size, int new_size)
439{
440 int i;
441
442 if (!list_lru_memcg_aware(lru))
443 return;
444
Raghavendra K T145949a2015-11-05 18:46:26 -0800445 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800446 memcg_cancel_update_list_lru_node(&lru->node[i],
447 old_size, new_size);
448}
449
450int memcg_update_all_list_lrus(int new_size)
451{
452 int ret = 0;
453 struct list_lru *lru;
454 int old_size = memcg_nr_cache_ids;
455
456 mutex_lock(&list_lrus_mutex);
457 list_for_each_entry(lru, &list_lrus, list) {
458 ret = memcg_update_list_lru(lru, old_size, new_size);
459 if (ret)
460 goto fail;
461 }
462out:
463 mutex_unlock(&list_lrus_mutex);
464 return ret;
465fail:
466 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
467 memcg_cancel_update_list_lru(lru, old_size, new_size);
468 goto out;
469}
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800470
471static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
472 int src_idx, int dst_idx)
473{
474 struct list_lru_one *src, *dst;
475
476 /*
477 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
478 * we have to use IRQ-safe primitives here to avoid deadlock.
479 */
480 spin_lock_irq(&nlru->lock);
481
482 src = list_lru_from_memcg_idx(nlru, src_idx);
483 dst = list_lru_from_memcg_idx(nlru, dst_idx);
484
485 list_splice_init(&src->list, &dst->list);
486 dst->nr_items += src->nr_items;
487 src->nr_items = 0;
488
489 spin_unlock_irq(&nlru->lock);
490}
491
492static void memcg_drain_list_lru(struct list_lru *lru,
493 int src_idx, int dst_idx)
494{
495 int i;
496
497 if (!list_lru_memcg_aware(lru))
498 return;
499
Raghavendra K T145949a2015-11-05 18:46:26 -0800500 for_each_node(i)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800501 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
502}
503
504void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
505{
506 struct list_lru *lru;
507
508 mutex_lock(&list_lrus_mutex);
509 list_for_each_entry(lru, &list_lrus, list)
510 memcg_drain_list_lru(lru, src_idx, dst_idx);
511 mutex_unlock(&list_lrus_mutex);
512}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800513#else
514static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
515{
516 return 0;
517}
518
519static void memcg_destroy_list_lru(struct list_lru *lru)
520{
521}
522#endif /* CONFIG_MEMCG_KMEM */
523
524int __list_lru_init(struct list_lru *lru, bool memcg_aware,
525 struct lock_class_key *key)
Dave Chinnera38e4082013-08-28 10:17:58 +1000526{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000527 int i;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000528 size_t size = sizeof(*lru->node) * nr_node_ids;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800529 int err = -ENOMEM;
530
531 memcg_get_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000532
533 lru->node = kzalloc(size, GFP_KERNEL);
534 if (!lru->node)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800535 goto out;
Dave Chinnera38e4082013-08-28 10:17:58 +1000536
Raghavendra K T145949a2015-11-05 18:46:26 -0800537 for_each_node(i) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000538 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700539 if (key)
540 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800541 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000542 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800543
544 err = memcg_init_list_lru(lru, memcg_aware);
545 if (err) {
546 kfree(lru->node);
547 goto out;
548 }
549
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800550 list_lru_register(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800551out:
552 memcg_put_cache_ids();
553 return err;
Dave Chinnera38e4082013-08-28 10:17:58 +1000554}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800555EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000556
557void list_lru_destroy(struct list_lru *lru)
558{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800559 /* Already destroyed or not yet initialized? */
560 if (!lru->node)
561 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800562
563 memcg_get_cache_ids();
564
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800565 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800566
567 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000568 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800569 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800570
571 memcg_put_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000572}
573EXPORT_SYMBOL_GPL(list_lru_destroy);