Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 3 | * Authors: David Chinner and Glauber Costa |
| 4 | * |
| 5 | * Generic LRU infrastructure |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 9 | #include <linux/mm.h> |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 10 | #include <linux/list_lru.h> |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 11 | #include <linux/slab.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 12 | #include <linux/mutex.h> |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 13 | #include <linux/memcontrol.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 14 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 15 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 16 | static LIST_HEAD(list_lrus); |
| 17 | static DEFINE_MUTEX(list_lrus_mutex); |
| 18 | |
| 19 | static void list_lru_register(struct list_lru *lru) |
| 20 | { |
| 21 | mutex_lock(&list_lrus_mutex); |
| 22 | list_add(&lru->list, &list_lrus); |
| 23 | mutex_unlock(&list_lrus_mutex); |
| 24 | } |
| 25 | |
| 26 | static void list_lru_unregister(struct list_lru *lru) |
| 27 | { |
| 28 | mutex_lock(&list_lrus_mutex); |
| 29 | list_del(&lru->list); |
| 30 | mutex_unlock(&list_lrus_mutex); |
| 31 | } |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 32 | |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 33 | static int lru_shrinker_id(struct list_lru *lru) |
| 34 | { |
| 35 | return lru->shrinker_id; |
| 36 | } |
| 37 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 38 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 39 | { |
Jiri Slaby | 8b057ad | 2019-05-31 22:30:26 -0700 | [diff] [blame] | 40 | return lru->memcg_aware; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 41 | } |
| 42 | |
| 43 | static inline struct list_lru_one * |
| 44 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 45 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 46 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 47 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 48 | * Either lock or RCU protects the array of per cgroup lists |
| 49 | * from relocation (see memcg_update_list_lru_node). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 50 | */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 51 | memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, |
| 52 | lockdep_is_held(&nlru->lock)); |
| 53 | if (memcg_lrus && idx >= 0) |
| 54 | return memcg_lrus->lru[idx]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 55 | return &nlru->lru; |
| 56 | } |
| 57 | |
Vladimir Davydov | df40655 | 2015-11-05 18:49:04 -0800 | [diff] [blame] | 58 | static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) |
| 59 | { |
| 60 | struct page *page; |
| 61 | |
| 62 | if (!memcg_kmem_enabled()) |
| 63 | return NULL; |
| 64 | page = virt_to_head_page(ptr); |
| 65 | return page->mem_cgroup; |
| 66 | } |
| 67 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 68 | static inline struct list_lru_one * |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 69 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
| 70 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 71 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 72 | struct list_lru_one *l = &nlru->lru; |
| 73 | struct mem_cgroup *memcg = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 74 | |
| 75 | if (!nlru->memcg_lrus) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 76 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 77 | |
| 78 | memcg = mem_cgroup_from_kmem(ptr); |
| 79 | if (!memcg) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 80 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 81 | |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 82 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
| 83 | out: |
| 84 | if (memcg_ptr) |
| 85 | *memcg_ptr = memcg; |
| 86 | return l; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 87 | } |
| 88 | #else |
Kirill Tkhai | e029523 | 2018-08-17 15:47:21 -0700 | [diff] [blame] | 89 | static void list_lru_register(struct list_lru *lru) |
| 90 | { |
| 91 | } |
| 92 | |
| 93 | static void list_lru_unregister(struct list_lru *lru) |
| 94 | { |
| 95 | } |
| 96 | |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 97 | static int lru_shrinker_id(struct list_lru *lru) |
| 98 | { |
| 99 | return -1; |
| 100 | } |
| 101 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 102 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 103 | { |
| 104 | return false; |
| 105 | } |
| 106 | |
| 107 | static inline struct list_lru_one * |
| 108 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 109 | { |
| 110 | return &nlru->lru; |
| 111 | } |
| 112 | |
| 113 | static inline struct list_lru_one * |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 114 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
| 115 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 116 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 117 | if (memcg_ptr) |
| 118 | *memcg_ptr = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 119 | return &nlru->lru; |
| 120 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 121 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 122 | |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 123 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
| 124 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 125 | int nid = page_to_nid(virt_to_page(item)); |
| 126 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 127 | struct mem_cgroup *memcg; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 128 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 129 | |
| 130 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 131 | if (list_empty(item)) { |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 132 | l = list_lru_from_kmem(nlru, item, &memcg); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 133 | list_add_tail(item, &l->list); |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 134 | /* Set shrinker bit if the first element was added */ |
| 135 | if (!l->nr_items++) |
| 136 | memcg_set_shrinker_bit(memcg, nid, |
| 137 | lru_shrinker_id(lru)); |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 138 | nlru->nr_items++; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 139 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 140 | return true; |
| 141 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 142 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 143 | return false; |
| 144 | } |
| 145 | EXPORT_SYMBOL_GPL(list_lru_add); |
| 146 | |
| 147 | bool list_lru_del(struct list_lru *lru, struct list_head *item) |
| 148 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 149 | int nid = page_to_nid(virt_to_page(item)); |
| 150 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 151 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 152 | |
| 153 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 154 | if (!list_empty(item)) { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 155 | l = list_lru_from_kmem(nlru, item, NULL); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 156 | list_del_init(item); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 157 | l->nr_items--; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 158 | nlru->nr_items--; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 159 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 160 | return true; |
| 161 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 162 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 163 | return false; |
| 164 | } |
| 165 | EXPORT_SYMBOL_GPL(list_lru_del); |
| 166 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 167 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
| 168 | { |
| 169 | list_del_init(item); |
| 170 | list->nr_items--; |
| 171 | } |
| 172 | EXPORT_SYMBOL_GPL(list_lru_isolate); |
| 173 | |
| 174 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, |
| 175 | struct list_head *head) |
| 176 | { |
| 177 | list_move(item, head); |
| 178 | list->nr_items--; |
| 179 | } |
| 180 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
| 181 | |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 182 | unsigned long list_lru_count_one(struct list_lru *lru, |
| 183 | int nid, struct mem_cgroup *memcg) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 184 | { |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 185 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 186 | struct list_lru_one *l; |
| 187 | unsigned long count; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 188 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 189 | rcu_read_lock(); |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 190 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 191 | count = l->nr_items; |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 192 | rcu_read_unlock(); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 193 | |
| 194 | return count; |
| 195 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 196 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
| 197 | |
| 198 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) |
| 199 | { |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 200 | struct list_lru_node *nlru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 201 | |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 202 | nlru = &lru->node[nid]; |
| 203 | return nlru->nr_items; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 204 | } |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 205 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 206 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 207 | static unsigned long |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 208 | __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 209 | list_lru_walk_cb isolate, void *cb_arg, |
| 210 | unsigned long *nr_to_walk) |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 211 | { |
| 212 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 213 | struct list_lru_one *l; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 214 | struct list_head *item, *n; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 215 | unsigned long isolated = 0; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 216 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 217 | l = list_lru_from_memcg_idx(nlru, memcg_idx); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 218 | restart: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 219 | list_for_each_safe(item, n, &l->list) { |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 220 | enum lru_status ret; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 221 | |
| 222 | /* |
| 223 | * decrement nr_to_walk first so that we don't livelock if we |
| 224 | * get stuck on large numbesr of LRU_RETRY items |
| 225 | */ |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 226 | if (!*nr_to_walk) |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 227 | break; |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 228 | --*nr_to_walk; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 229 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 230 | ret = isolate(item, l, &nlru->lock, cb_arg); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 231 | switch (ret) { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 232 | case LRU_REMOVED_RETRY: |
| 233 | assert_spin_locked(&nlru->lock); |
Gustavo A. R. Silva | 5b568ac | 2017-11-15 17:38:49 -0800 | [diff] [blame] | 234 | /* fall through */ |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 235 | case LRU_REMOVED: |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 236 | isolated++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 237 | nlru->nr_items--; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 238 | /* |
| 239 | * If the lru lock has been dropped, our list |
| 240 | * traversal is now invalid and so we have to |
| 241 | * restart from scratch. |
| 242 | */ |
| 243 | if (ret == LRU_REMOVED_RETRY) |
| 244 | goto restart; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 245 | break; |
| 246 | case LRU_ROTATE: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 247 | list_move_tail(item, &l->list); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 248 | break; |
| 249 | case LRU_SKIP: |
| 250 | break; |
| 251 | case LRU_RETRY: |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 252 | /* |
| 253 | * The lru lock has been dropped, our list traversal is |
| 254 | * now invalid and so we have to restart from scratch. |
| 255 | */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 256 | assert_spin_locked(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 257 | goto restart; |
| 258 | default: |
| 259 | BUG(); |
| 260 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 261 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 262 | return isolated; |
| 263 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 264 | |
| 265 | unsigned long |
| 266 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 267 | list_lru_walk_cb isolate, void *cb_arg, |
| 268 | unsigned long *nr_to_walk) |
| 269 | { |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 270 | struct list_lru_node *nlru = &lru->node[nid]; |
| 271 | unsigned long ret; |
| 272 | |
| 273 | spin_lock(&nlru->lock); |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 274 | ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, |
| 275 | nr_to_walk); |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 276 | spin_unlock(&nlru->lock); |
| 277 | return ret; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 278 | } |
| 279 | EXPORT_SYMBOL_GPL(list_lru_walk_one); |
| 280 | |
Sebastian Andrzej Siewior | 6b51e88 | 2018-08-17 15:49:55 -0700 | [diff] [blame] | 281 | unsigned long |
| 282 | list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 283 | list_lru_walk_cb isolate, void *cb_arg, |
| 284 | unsigned long *nr_to_walk) |
| 285 | { |
| 286 | struct list_lru_node *nlru = &lru->node[nid]; |
| 287 | unsigned long ret; |
| 288 | |
| 289 | spin_lock_irq(&nlru->lock); |
| 290 | ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, |
| 291 | nr_to_walk); |
| 292 | spin_unlock_irq(&nlru->lock); |
| 293 | return ret; |
| 294 | } |
| 295 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 296 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 297 | list_lru_walk_cb isolate, void *cb_arg, |
| 298 | unsigned long *nr_to_walk) |
| 299 | { |
| 300 | long isolated = 0; |
| 301 | int memcg_idx; |
| 302 | |
Sebastian Andrzej Siewior | 87a5ffc | 2018-08-17 15:49:45 -0700 | [diff] [blame] | 303 | isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, |
| 304 | nr_to_walk); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 305 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
| 306 | for_each_memcg_cache_index(memcg_idx) { |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 307 | struct list_lru_node *nlru = &lru->node[nid]; |
| 308 | |
| 309 | spin_lock(&nlru->lock); |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 310 | isolated += __list_lru_walk_one(nlru, memcg_idx, |
| 311 | isolate, cb_arg, |
| 312 | nr_to_walk); |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 313 | spin_unlock(&nlru->lock); |
| 314 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 315 | if (*nr_to_walk <= 0) |
| 316 | break; |
| 317 | } |
| 318 | } |
| 319 | return isolated; |
| 320 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 321 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
| 322 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 323 | static void init_one_lru(struct list_lru_one *l) |
| 324 | { |
| 325 | INIT_LIST_HEAD(&l->list); |
| 326 | l->nr_items = 0; |
| 327 | } |
| 328 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 329 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 330 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 331 | int begin, int end) |
| 332 | { |
| 333 | int i; |
| 334 | |
| 335 | for (i = begin; i < end; i++) |
| 336 | kfree(memcg_lrus->lru[i]); |
| 337 | } |
| 338 | |
| 339 | static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 340 | int begin, int end) |
| 341 | { |
| 342 | int i; |
| 343 | |
| 344 | for (i = begin; i < end; i++) { |
| 345 | struct list_lru_one *l; |
| 346 | |
| 347 | l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); |
| 348 | if (!l) |
| 349 | goto fail; |
| 350 | |
| 351 | init_one_lru(l); |
| 352 | memcg_lrus->lru[i] = l; |
| 353 | } |
| 354 | return 0; |
| 355 | fail: |
Shakeel Butt | 553a1f0 | 2019-06-13 15:55:49 -0700 | [diff] [blame] | 356 | __memcg_destroy_list_lru_node(memcg_lrus, begin, i); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 357 | return -ENOMEM; |
| 358 | } |
| 359 | |
| 360 | static int memcg_init_list_lru_node(struct list_lru_node *nlru) |
| 361 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 362 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 363 | int size = memcg_nr_cache_ids; |
| 364 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 365 | memcg_lrus = kvmalloc(sizeof(*memcg_lrus) + |
| 366 | size * sizeof(void *), GFP_KERNEL); |
| 367 | if (!memcg_lrus) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 368 | return -ENOMEM; |
| 369 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 370 | if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { |
| 371 | kvfree(memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 372 | return -ENOMEM; |
| 373 | } |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 374 | RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 375 | |
| 376 | return 0; |
| 377 | } |
| 378 | |
| 379 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) |
| 380 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 381 | struct list_lru_memcg *memcg_lrus; |
| 382 | /* |
| 383 | * This is called when shrinker has already been unregistered, |
| 384 | * and nobody can use it. So, there is no need to use kvfree_rcu(). |
| 385 | */ |
| 386 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); |
| 387 | __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); |
| 388 | kvfree(memcg_lrus); |
| 389 | } |
| 390 | |
| 391 | static void kvfree_rcu(struct rcu_head *head) |
| 392 | { |
| 393 | struct list_lru_memcg *mlru; |
| 394 | |
| 395 | mlru = container_of(head, struct list_lru_memcg, rcu); |
| 396 | kvfree(mlru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 397 | } |
| 398 | |
| 399 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
| 400 | int old_size, int new_size) |
| 401 | { |
| 402 | struct list_lru_memcg *old, *new; |
| 403 | |
| 404 | BUG_ON(old_size > new_size); |
| 405 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 406 | old = rcu_dereference_protected(nlru->memcg_lrus, |
| 407 | lockdep_is_held(&list_lrus_mutex)); |
| 408 | new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 409 | if (!new) |
| 410 | return -ENOMEM; |
| 411 | |
| 412 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 413 | kvfree(new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 414 | return -ENOMEM; |
| 415 | } |
| 416 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 417 | memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 418 | |
| 419 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 420 | * The locking below allows readers that hold nlru->lock avoid taking |
| 421 | * rcu_read_lock (see list_lru_from_memcg_idx). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 422 | * |
| 423 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 424 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 425 | */ |
| 426 | spin_lock_irq(&nlru->lock); |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 427 | rcu_assign_pointer(nlru->memcg_lrus, new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 428 | spin_unlock_irq(&nlru->lock); |
| 429 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 430 | call_rcu(&old->rcu, kvfree_rcu); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 431 | return 0; |
| 432 | } |
| 433 | |
| 434 | static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, |
| 435 | int old_size, int new_size) |
| 436 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 437 | struct list_lru_memcg *memcg_lrus; |
| 438 | |
| 439 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, |
| 440 | lockdep_is_held(&list_lrus_mutex)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 441 | /* do not bother shrinking the array back to the old size, because we |
| 442 | * cannot handle allocation failures here */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 443 | __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 444 | } |
| 445 | |
| 446 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 447 | { |
| 448 | int i; |
| 449 | |
Jiri Slaby | 8b057ad | 2019-05-31 22:30:26 -0700 | [diff] [blame] | 450 | lru->memcg_aware = memcg_aware; |
| 451 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 452 | if (!memcg_aware) |
| 453 | return 0; |
| 454 | |
| 455 | for_each_node(i) { |
| 456 | if (memcg_init_list_lru_node(&lru->node[i])) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 457 | goto fail; |
| 458 | } |
| 459 | return 0; |
| 460 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 461 | for (i = i - 1; i >= 0; i--) { |
| 462 | if (!lru->node[i].memcg_lrus) |
| 463 | continue; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 464 | memcg_destroy_list_lru_node(&lru->node[i]); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 465 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 466 | return -ENOMEM; |
| 467 | } |
| 468 | |
| 469 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 470 | { |
| 471 | int i; |
| 472 | |
| 473 | if (!list_lru_memcg_aware(lru)) |
| 474 | return; |
| 475 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 476 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 477 | memcg_destroy_list_lru_node(&lru->node[i]); |
| 478 | } |
| 479 | |
| 480 | static int memcg_update_list_lru(struct list_lru *lru, |
| 481 | int old_size, int new_size) |
| 482 | { |
| 483 | int i; |
| 484 | |
| 485 | if (!list_lru_memcg_aware(lru)) |
| 486 | return 0; |
| 487 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 488 | for_each_node(i) { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 489 | if (memcg_update_list_lru_node(&lru->node[i], |
| 490 | old_size, new_size)) |
| 491 | goto fail; |
| 492 | } |
| 493 | return 0; |
| 494 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 495 | for (i = i - 1; i >= 0; i--) { |
| 496 | if (!lru->node[i].memcg_lrus) |
| 497 | continue; |
| 498 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 499 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 500 | old_size, new_size); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 501 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 502 | return -ENOMEM; |
| 503 | } |
| 504 | |
| 505 | static void memcg_cancel_update_list_lru(struct list_lru *lru, |
| 506 | int old_size, int new_size) |
| 507 | { |
| 508 | int i; |
| 509 | |
| 510 | if (!list_lru_memcg_aware(lru)) |
| 511 | return; |
| 512 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 513 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 514 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 515 | old_size, new_size); |
| 516 | } |
| 517 | |
| 518 | int memcg_update_all_list_lrus(int new_size) |
| 519 | { |
| 520 | int ret = 0; |
| 521 | struct list_lru *lru; |
| 522 | int old_size = memcg_nr_cache_ids; |
| 523 | |
| 524 | mutex_lock(&list_lrus_mutex); |
| 525 | list_for_each_entry(lru, &list_lrus, list) { |
| 526 | ret = memcg_update_list_lru(lru, old_size, new_size); |
| 527 | if (ret) |
| 528 | goto fail; |
| 529 | } |
| 530 | out: |
| 531 | mutex_unlock(&list_lrus_mutex); |
| 532 | return ret; |
| 533 | fail: |
| 534 | list_for_each_entry_continue_reverse(lru, &list_lrus, list) |
| 535 | memcg_cancel_update_list_lru(lru, old_size, new_size); |
| 536 | goto out; |
| 537 | } |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 538 | |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 539 | static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 540 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 541 | { |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 542 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 543 | int dst_idx = dst_memcg->kmemcg_id; |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 544 | struct list_lru_one *src, *dst; |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 545 | bool set; |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 546 | |
| 547 | /* |
| 548 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 549 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 550 | */ |
| 551 | spin_lock_irq(&nlru->lock); |
| 552 | |
| 553 | src = list_lru_from_memcg_idx(nlru, src_idx); |
| 554 | dst = list_lru_from_memcg_idx(nlru, dst_idx); |
| 555 | |
| 556 | list_splice_init(&src->list, &dst->list); |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 557 | set = (!dst->nr_items && src->nr_items); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 558 | dst->nr_items += src->nr_items; |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 559 | if (set) |
| 560 | memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 561 | src->nr_items = 0; |
| 562 | |
| 563 | spin_unlock_irq(&nlru->lock); |
| 564 | } |
| 565 | |
| 566 | static void memcg_drain_list_lru(struct list_lru *lru, |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 567 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 568 | { |
| 569 | int i; |
| 570 | |
| 571 | if (!list_lru_memcg_aware(lru)) |
| 572 | return; |
| 573 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 574 | for_each_node(i) |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 575 | memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 576 | } |
| 577 | |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 578 | void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 579 | { |
| 580 | struct list_lru *lru; |
| 581 | |
| 582 | mutex_lock(&list_lrus_mutex); |
| 583 | list_for_each_entry(lru, &list_lrus, list) |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 584 | memcg_drain_list_lru(lru, src_idx, dst_memcg); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 585 | mutex_unlock(&list_lrus_mutex); |
| 586 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 587 | #else |
| 588 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 589 | { |
| 590 | return 0; |
| 591 | } |
| 592 | |
| 593 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 594 | { |
| 595 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 596 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 597 | |
| 598 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 599 | struct lock_class_key *key, struct shrinker *shrinker) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 600 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 601 | int i; |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 602 | size_t size = sizeof(*lru->node) * nr_node_ids; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 603 | int err = -ENOMEM; |
| 604 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 605 | #ifdef CONFIG_MEMCG_KMEM |
| 606 | if (shrinker) |
| 607 | lru->shrinker_id = shrinker->id; |
| 608 | else |
| 609 | lru->shrinker_id = -1; |
| 610 | #endif |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 611 | memcg_get_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 612 | |
| 613 | lru->node = kzalloc(size, GFP_KERNEL); |
| 614 | if (!lru->node) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 615 | goto out; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 616 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 617 | for_each_node(i) { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 618 | spin_lock_init(&lru->node[i].lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 619 | if (key) |
| 620 | lockdep_set_class(&lru->node[i].lock, key); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 621 | init_one_lru(&lru->node[i].lru); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 622 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 623 | |
| 624 | err = memcg_init_list_lru(lru, memcg_aware); |
| 625 | if (err) { |
| 626 | kfree(lru->node); |
Alexander Polakov | 1bc11d7 | 2016-10-27 17:46:27 -0700 | [diff] [blame] | 627 | /* Do this so a list_lru_destroy() doesn't crash: */ |
| 628 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 629 | goto out; |
| 630 | } |
| 631 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 632 | list_lru_register(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 633 | out: |
| 634 | memcg_put_cache_ids(); |
| 635 | return err; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 636 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 637 | EXPORT_SYMBOL_GPL(__list_lru_init); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 638 | |
| 639 | void list_lru_destroy(struct list_lru *lru) |
| 640 | { |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 641 | /* Already destroyed or not yet initialized? */ |
| 642 | if (!lru->node) |
| 643 | return; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 644 | |
| 645 | memcg_get_cache_ids(); |
| 646 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 647 | list_lru_unregister(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 648 | |
| 649 | memcg_destroy_list_lru(lru); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 650 | kfree(lru->node); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 651 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 652 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 653 | #ifdef CONFIG_MEMCG_KMEM |
| 654 | lru->shrinker_id = -1; |
| 655 | #endif |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 656 | memcg_put_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 657 | } |
| 658 | EXPORT_SYMBOL_GPL(list_lru_destroy); |