blob: 758653dd144314a2ad12f5c6781c2e9a3c19d5c8 [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100011#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080012#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080013#include <linux/memcontrol.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080014
Kirill Tkhai84c07d12018-08-17 15:47:25 -070015#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080016static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080032
Kirill Tkhaifae91d62018-08-17 15:48:10 -070033static int lru_shrinker_id(struct list_lru *lru)
34{
35 return lru->shrinker_id;
36}
37
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080038static inline bool list_lru_memcg_aware(struct list_lru *lru)
39{
Jiri Slaby8b057ad2019-05-31 22:30:26 -070040 return lru->memcg_aware;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080041}
42
43static inline struct list_lru_one *
44list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
45{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070046 struct list_lru_memcg *memcg_lrus;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080047 /*
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070048 * Either lock or RCU protects the array of per cgroup lists
49 * from relocation (see memcg_update_list_lru_node).
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080050 */
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070051 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
52 lockdep_is_held(&nlru->lock));
53 if (memcg_lrus && idx >= 0)
54 return memcg_lrus->lru[idx];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080055 return &nlru->lru;
56}
57
Vladimir Davydovdf406552015-11-05 18:49:04 -080058static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
59{
60 struct page *page;
61
62 if (!memcg_kmem_enabled())
63 return NULL;
64 page = virt_to_head_page(ptr);
65 return page->mem_cgroup;
66}
67
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080068static inline struct list_lru_one *
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070069list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
70 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080071{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070072 struct list_lru_one *l = &nlru->lru;
73 struct mem_cgroup *memcg = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080074
75 if (!nlru->memcg_lrus)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070076 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080077
78 memcg = mem_cgroup_from_kmem(ptr);
79 if (!memcg)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070080 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080081
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070082 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
83out:
84 if (memcg_ptr)
85 *memcg_ptr = memcg;
86 return l;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080087}
88#else
Kirill Tkhaie0295232018-08-17 15:47:21 -070089static void list_lru_register(struct list_lru *lru)
90{
91}
92
93static void list_lru_unregister(struct list_lru *lru)
94{
95}
96
Kirill Tkhaifae91d62018-08-17 15:48:10 -070097static int lru_shrinker_id(struct list_lru *lru)
98{
99 return -1;
100}
101
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800102static inline bool list_lru_memcg_aware(struct list_lru *lru)
103{
104 return false;
105}
106
107static inline struct list_lru_one *
108list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
109{
110 return &nlru->lru;
111}
112
113static inline struct list_lru_one *
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700114list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
115 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800116{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700117 if (memcg_ptr)
118 *memcg_ptr = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800119 return &nlru->lru;
120}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700121#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800122
Dave Chinnera38e4082013-08-28 10:17:58 +1000123bool list_lru_add(struct list_lru *lru, struct list_head *item)
124{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000125 int nid = page_to_nid(virt_to_page(item));
126 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700127 struct mem_cgroup *memcg;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800128 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000129
130 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000131 if (list_empty(item)) {
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700132 l = list_lru_from_kmem(nlru, item, &memcg);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800133 list_add_tail(item, &l->list);
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700134 /* Set shrinker bit if the first element was added */
135 if (!l->nr_items++)
136 memcg_set_shrinker_bit(memcg, nid,
137 lru_shrinker_id(lru));
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700138 nlru->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000139 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000140 return true;
141 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000142 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000143 return false;
144}
145EXPORT_SYMBOL_GPL(list_lru_add);
146
147bool list_lru_del(struct list_lru *lru, struct list_head *item)
148{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000149 int nid = page_to_nid(virt_to_page(item));
150 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800151 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000152
153 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000154 if (!list_empty(item)) {
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700155 l = list_lru_from_kmem(nlru, item, NULL);
Dave Chinnera38e4082013-08-28 10:17:58 +1000156 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800157 l->nr_items--;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700158 nlru->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000159 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000160 return true;
161 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000162 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000163 return false;
164}
165EXPORT_SYMBOL_GPL(list_lru_del);
166
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800167void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
168{
169 list_del_init(item);
170 list->nr_items--;
171}
172EXPORT_SYMBOL_GPL(list_lru_isolate);
173
174void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
175 struct list_head *head)
176{
177 list_move(item, head);
178 list->nr_items--;
179}
180EXPORT_SYMBOL_GPL(list_lru_isolate_move);
181
Andrew Morton930eaac2018-08-17 15:46:11 -0700182unsigned long list_lru_count_one(struct list_lru *lru,
183 int nid, struct mem_cgroup *memcg)
Dave Chinnera38e4082013-08-28 10:17:58 +1000184{
Glauber Costa6a4f4962013-08-28 10:18:02 +1000185 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800186 struct list_lru_one *l;
187 unsigned long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000188
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700189 rcu_read_lock();
Andrew Morton930eaac2018-08-17 15:46:11 -0700190 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800191 count = l->nr_items;
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700192 rcu_read_unlock();
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000193
194 return count;
195}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800196EXPORT_SYMBOL_GPL(list_lru_count_one);
197
198unsigned long list_lru_count_node(struct list_lru *lru, int nid)
199{
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700200 struct list_lru_node *nlru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800201
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700202 nlru = &lru->node[nid];
203 return nlru->nr_items;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800204}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000205EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000206
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800207static unsigned long
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700208__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800209 list_lru_walk_cb isolate, void *cb_arg,
210 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000211{
212
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800213 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000214 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000215 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000216
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800217 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +1000218restart:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800219 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000220 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000221
222 /*
223 * decrement nr_to_walk first so that we don't livelock if we
224 * get stuck on large numbesr of LRU_RETRY items
225 */
Russell Kingc56b0972013-10-30 14:16:16 +0000226 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000227 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000228 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000229
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800230 ret = isolate(item, l, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000231 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700232 case LRU_REMOVED_RETRY:
233 assert_spin_locked(&nlru->lock);
Gustavo A. R. Silva5b568ac2017-11-15 17:38:49 -0800234 /* fall through */
Dave Chinnera38e4082013-08-28 10:17:58 +1000235 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000236 isolated++;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700237 nlru->nr_items--;
Johannes Weiner449dd692014-04-03 14:47:56 -0700238 /*
239 * If the lru lock has been dropped, our list
240 * traversal is now invalid and so we have to
241 * restart from scratch.
242 */
243 if (ret == LRU_REMOVED_RETRY)
244 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000245 break;
246 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800247 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000248 break;
249 case LRU_SKIP:
250 break;
251 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000252 /*
253 * The lru lock has been dropped, our list traversal is
254 * now invalid and so we have to restart from scratch.
255 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700256 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000257 goto restart;
258 default:
259 BUG();
260 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000261 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000262 return isolated;
263}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800264
265unsigned long
266list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
267 list_lru_walk_cb isolate, void *cb_arg,
268 unsigned long *nr_to_walk)
269{
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700270 struct list_lru_node *nlru = &lru->node[nid];
271 unsigned long ret;
272
273 spin_lock(&nlru->lock);
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700274 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
275 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700276 spin_unlock(&nlru->lock);
277 return ret;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800278}
279EXPORT_SYMBOL_GPL(list_lru_walk_one);
280
Sebastian Andrzej Siewior6b51e882018-08-17 15:49:55 -0700281unsigned long
282list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
283 list_lru_walk_cb isolate, void *cb_arg,
284 unsigned long *nr_to_walk)
285{
286 struct list_lru_node *nlru = &lru->node[nid];
287 unsigned long ret;
288
289 spin_lock_irq(&nlru->lock);
290 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
291 nr_to_walk);
292 spin_unlock_irq(&nlru->lock);
293 return ret;
294}
295
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800296unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
297 list_lru_walk_cb isolate, void *cb_arg,
298 unsigned long *nr_to_walk)
299{
300 long isolated = 0;
301 int memcg_idx;
302
Sebastian Andrzej Siewior87a5ffc2018-08-17 15:49:45 -0700303 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
304 nr_to_walk);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800305 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
306 for_each_memcg_cache_index(memcg_idx) {
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700307 struct list_lru_node *nlru = &lru->node[nid];
308
309 spin_lock(&nlru->lock);
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700310 isolated += __list_lru_walk_one(nlru, memcg_idx,
311 isolate, cb_arg,
312 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700313 spin_unlock(&nlru->lock);
314
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800315 if (*nr_to_walk <= 0)
316 break;
317 }
318 }
319 return isolated;
320}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000321EXPORT_SYMBOL_GPL(list_lru_walk_node);
322
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800323static void init_one_lru(struct list_lru_one *l)
324{
325 INIT_LIST_HEAD(&l->list);
326 l->nr_items = 0;
327}
328
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700329#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800330static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
331 int begin, int end)
332{
333 int i;
334
335 for (i = begin; i < end; i++)
336 kfree(memcg_lrus->lru[i]);
337}
338
339static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
340 int begin, int end)
341{
342 int i;
343
344 for (i = begin; i < end; i++) {
345 struct list_lru_one *l;
346
347 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
348 if (!l)
349 goto fail;
350
351 init_one_lru(l);
352 memcg_lrus->lru[i] = l;
353 }
354 return 0;
355fail:
Shakeel Butt553a1f02019-06-13 15:55:49 -0700356 __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800357 return -ENOMEM;
358}
359
360static int memcg_init_list_lru_node(struct list_lru_node *nlru)
361{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700362 struct list_lru_memcg *memcg_lrus;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800363 int size = memcg_nr_cache_ids;
364
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700365 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
366 size * sizeof(void *), GFP_KERNEL);
367 if (!memcg_lrus)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800368 return -ENOMEM;
369
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700370 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
371 kvfree(memcg_lrus);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800372 return -ENOMEM;
373 }
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700374 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800375
376 return 0;
377}
378
379static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
380{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700381 struct list_lru_memcg *memcg_lrus;
382 /*
383 * This is called when shrinker has already been unregistered,
384 * and nobody can use it. So, there is no need to use kvfree_rcu().
385 */
386 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
387 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
388 kvfree(memcg_lrus);
389}
390
391static void kvfree_rcu(struct rcu_head *head)
392{
393 struct list_lru_memcg *mlru;
394
395 mlru = container_of(head, struct list_lru_memcg, rcu);
396 kvfree(mlru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800397}
398
399static int memcg_update_list_lru_node(struct list_lru_node *nlru,
400 int old_size, int new_size)
401{
402 struct list_lru_memcg *old, *new;
403
404 BUG_ON(old_size > new_size);
405
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700406 old = rcu_dereference_protected(nlru->memcg_lrus,
407 lockdep_is_held(&list_lrus_mutex));
408 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800409 if (!new)
410 return -ENOMEM;
411
412 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
Johannes Weinerf80c7da2017-10-03 16:16:10 -0700413 kvfree(new);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800414 return -ENOMEM;
415 }
416
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700417 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800418
419 /*
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700420 * The locking below allows readers that hold nlru->lock avoid taking
421 * rcu_read_lock (see list_lru_from_memcg_idx).
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800422 *
423 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
424 * we have to use IRQ-safe primitives here to avoid deadlock.
425 */
426 spin_lock_irq(&nlru->lock);
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700427 rcu_assign_pointer(nlru->memcg_lrus, new);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800428 spin_unlock_irq(&nlru->lock);
429
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700430 call_rcu(&old->rcu, kvfree_rcu);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800431 return 0;
432}
433
434static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
435 int old_size, int new_size)
436{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700437 struct list_lru_memcg *memcg_lrus;
438
439 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
440 lockdep_is_held(&list_lrus_mutex));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800441 /* do not bother shrinking the array back to the old size, because we
442 * cannot handle allocation failures here */
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700443 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800444}
445
446static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
447{
448 int i;
449
Jiri Slaby8b057ad2019-05-31 22:30:26 -0700450 lru->memcg_aware = memcg_aware;
451
Raghavendra K T145949a2015-11-05 18:46:26 -0800452 if (!memcg_aware)
453 return 0;
454
455 for_each_node(i) {
456 if (memcg_init_list_lru_node(&lru->node[i]))
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800457 goto fail;
458 }
459 return 0;
460fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800461 for (i = i - 1; i >= 0; i--) {
462 if (!lru->node[i].memcg_lrus)
463 continue;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800464 memcg_destroy_list_lru_node(&lru->node[i]);
Raghavendra K T145949a2015-11-05 18:46:26 -0800465 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800466 return -ENOMEM;
467}
468
469static void memcg_destroy_list_lru(struct list_lru *lru)
470{
471 int i;
472
473 if (!list_lru_memcg_aware(lru))
474 return;
475
Raghavendra K T145949a2015-11-05 18:46:26 -0800476 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800477 memcg_destroy_list_lru_node(&lru->node[i]);
478}
479
480static int memcg_update_list_lru(struct list_lru *lru,
481 int old_size, int new_size)
482{
483 int i;
484
485 if (!list_lru_memcg_aware(lru))
486 return 0;
487
Raghavendra K T145949a2015-11-05 18:46:26 -0800488 for_each_node(i) {
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800489 if (memcg_update_list_lru_node(&lru->node[i],
490 old_size, new_size))
491 goto fail;
492 }
493 return 0;
494fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800495 for (i = i - 1; i >= 0; i--) {
496 if (!lru->node[i].memcg_lrus)
497 continue;
498
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800499 memcg_cancel_update_list_lru_node(&lru->node[i],
500 old_size, new_size);
Raghavendra K T145949a2015-11-05 18:46:26 -0800501 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800502 return -ENOMEM;
503}
504
505static void memcg_cancel_update_list_lru(struct list_lru *lru,
506 int old_size, int new_size)
507{
508 int i;
509
510 if (!list_lru_memcg_aware(lru))
511 return;
512
Raghavendra K T145949a2015-11-05 18:46:26 -0800513 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800514 memcg_cancel_update_list_lru_node(&lru->node[i],
515 old_size, new_size);
516}
517
518int memcg_update_all_list_lrus(int new_size)
519{
520 int ret = 0;
521 struct list_lru *lru;
522 int old_size = memcg_nr_cache_ids;
523
524 mutex_lock(&list_lrus_mutex);
525 list_for_each_entry(lru, &list_lrus, list) {
526 ret = memcg_update_list_lru(lru, old_size, new_size);
527 if (ret)
528 goto fail;
529 }
530out:
531 mutex_unlock(&list_lrus_mutex);
532 return ret;
533fail:
534 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
535 memcg_cancel_update_list_lru(lru, old_size, new_size);
536 goto out;
537}
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800538
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700539static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700540 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800541{
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700542 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700543 int dst_idx = dst_memcg->kmemcg_id;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800544 struct list_lru_one *src, *dst;
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700545 bool set;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800546
547 /*
548 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
549 * we have to use IRQ-safe primitives here to avoid deadlock.
550 */
551 spin_lock_irq(&nlru->lock);
552
553 src = list_lru_from_memcg_idx(nlru, src_idx);
554 dst = list_lru_from_memcg_idx(nlru, dst_idx);
555
556 list_splice_init(&src->list, &dst->list);
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700557 set = (!dst->nr_items && src->nr_items);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800558 dst->nr_items += src->nr_items;
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700559 if (set)
560 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800561 src->nr_items = 0;
562
563 spin_unlock_irq(&nlru->lock);
564}
565
566static void memcg_drain_list_lru(struct list_lru *lru,
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700567 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800568{
569 int i;
570
571 if (!list_lru_memcg_aware(lru))
572 return;
573
Raghavendra K T145949a2015-11-05 18:46:26 -0800574 for_each_node(i)
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700575 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800576}
577
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700578void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800579{
580 struct list_lru *lru;
581
582 mutex_lock(&list_lrus_mutex);
583 list_for_each_entry(lru, &list_lrus, list)
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700584 memcg_drain_list_lru(lru, src_idx, dst_memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800585 mutex_unlock(&list_lrus_mutex);
586}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800587#else
588static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
589{
590 return 0;
591}
592
593static void memcg_destroy_list_lru(struct list_lru *lru)
594{
595}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700596#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800597
598int __list_lru_init(struct list_lru *lru, bool memcg_aware,
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700599 struct lock_class_key *key, struct shrinker *shrinker)
Dave Chinnera38e4082013-08-28 10:17:58 +1000600{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000601 int i;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000602 size_t size = sizeof(*lru->node) * nr_node_ids;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800603 int err = -ENOMEM;
604
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700605#ifdef CONFIG_MEMCG_KMEM
606 if (shrinker)
607 lru->shrinker_id = shrinker->id;
608 else
609 lru->shrinker_id = -1;
610#endif
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800611 memcg_get_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000612
613 lru->node = kzalloc(size, GFP_KERNEL);
614 if (!lru->node)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800615 goto out;
Dave Chinnera38e4082013-08-28 10:17:58 +1000616
Raghavendra K T145949a2015-11-05 18:46:26 -0800617 for_each_node(i) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000618 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700619 if (key)
620 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800621 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000622 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800623
624 err = memcg_init_list_lru(lru, memcg_aware);
625 if (err) {
626 kfree(lru->node);
Alexander Polakov1bc11d72016-10-27 17:46:27 -0700627 /* Do this so a list_lru_destroy() doesn't crash: */
628 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800629 goto out;
630 }
631
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800632 list_lru_register(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800633out:
634 memcg_put_cache_ids();
635 return err;
Dave Chinnera38e4082013-08-28 10:17:58 +1000636}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800637EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000638
639void list_lru_destroy(struct list_lru *lru)
640{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800641 /* Already destroyed or not yet initialized? */
642 if (!lru->node)
643 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800644
645 memcg_get_cache_ids();
646
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800647 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800648
649 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000650 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800651 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800652
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700653#ifdef CONFIG_MEMCG_KMEM
654 lru->shrinker_id = -1;
655#endif
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800656 memcg_put_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000657}
658EXPORT_SYMBOL_GPL(list_lru_destroy);