blob: f2e966937e395ce14dcd305800d3c2c3e5a6bf5d [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#ifndef _LRU_LIST_H
8#define _LRU_LIST_H
9
10#include <linux/list.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +100011#include <linux/nodemask.h>
Vladimir Davydov503c3582015-02-12 14:58:47 -080012#include <linux/shrinker.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100013
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080014struct mem_cgroup;
15
Dave Chinnera38e4082013-08-28 10:17:58 +100016/* list_lru_walk_cb has to always return one of those */
17enum lru_status {
18 LRU_REMOVED, /* item removed from list */
Johannes Weiner449dd692014-04-03 14:47:56 -070019 LRU_REMOVED_RETRY, /* item removed, but lock has been
20 dropped and reacquired */
Dave Chinnera38e4082013-08-28 10:17:58 +100021 LRU_ROTATE, /* item referenced, give another pass */
22 LRU_SKIP, /* item cannot be locked, skip */
23 LRU_RETRY, /* item not freeable. May drop the lock
24 internally, but has to return locked. */
25};
26
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080027struct list_lru_one {
Dave Chinnera38e4082013-08-28 10:17:58 +100028 struct list_head list;
Vladimir Davydov2788cf02015-02-12 14:59:38 -080029 /* may become negative during memcg reparenting */
Dave Chinnera38e4082013-08-28 10:17:58 +100030 long nr_items;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080031};
32
33struct list_lru_memcg {
34 /* array of per cgroup lists, indexed by memcg_cache_id */
35 struct list_lru_one *lru[0];
36};
37
38struct list_lru_node {
39 /* protects all lists on the node, including per cgroup */
40 spinlock_t lock;
41 /* global list, used for the root cgroup in cgroup aware lrus */
42 struct list_lru_one lru;
Johannes Weiner127424c2016-01-20 15:02:32 -080043#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080044 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
45 struct list_lru_memcg *memcg_lrus;
46#endif
Sahitya Tummalaa48542e2017-07-10 15:49:57 -070047 long nr_items;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100048} ____cacheline_aligned_in_smp;
49
50struct list_lru {
Glauber Costa5ca302c2013-08-28 10:18:18 +100051 struct list_lru_node *node;
Johannes Weiner127424c2016-01-20 15:02:32 -080052#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080053 struct list_head list;
Jiri Slaby0b452762019-05-31 22:30:26 -070054 bool memcg_aware;
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080055#endif
Dave Chinnera38e4082013-08-28 10:17:58 +100056};
57
Glauber Costa5ca302c2013-08-28 10:18:18 +100058void list_lru_destroy(struct list_lru *lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080059int __list_lru_init(struct list_lru *lru, bool memcg_aware,
60 struct lock_class_key *key);
61
62#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
63#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
64#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
65
66int memcg_update_all_list_lrus(int num_memcgs);
Vladimir Davydov2788cf02015-02-12 14:59:38 -080067void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +100068
69/**
70 * list_lru_add: add an element to the lru list's tail
71 * @list_lru: the lru pointer
72 * @item: the item to be added.
73 *
74 * If the element is already part of a list, this function returns doing
75 * nothing. Therefore the caller does not need to keep state about whether or
76 * not the element already belongs in the list and is allowed to lazy update
77 * it. Note however that this is valid for *a* list, not *this* list. If
78 * the caller organize itself in a way that elements can be in more than
79 * one type of list, it is up to the caller to fully remove the item from
80 * the previous list (with list_lru_del() for instance) before moving it
81 * to @list_lru
82 *
83 * Return value: true if the list was updated, false otherwise
84 */
85bool list_lru_add(struct list_lru *lru, struct list_head *item);
86
87/**
88 * list_lru_del: delete an element to the lru list
89 * @list_lru: the lru pointer
90 * @item: the item to be deleted.
91 *
92 * This function works analogously as list_lru_add in terms of list
93 * manipulation. The comments about an element already pertaining to
94 * a list are also valid for list_lru_del.
95 *
96 * Return value: true if the list was updated, false otherwise
97 */
98bool list_lru_del(struct list_lru *lru, struct list_head *item);
99
100/**
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800101 * list_lru_count_one: return the number of objects currently held by @lru
Dave Chinnera38e4082013-08-28 10:17:58 +1000102 * @lru: the lru pointer.
Glauber Costa6a4f4962013-08-28 10:18:02 +1000103 * @nid: the node id to count from.
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800104 * @memcg: the cgroup to count from.
Dave Chinnera38e4082013-08-28 10:17:58 +1000105 *
106 * Always return a non-negative number, 0 for empty lists. There is no
107 * guarantee that the list is not updated while the count is being computed.
108 * Callers that want such a guarantee need to provide an outer lock.
109 */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800110unsigned long list_lru_count_one(struct list_lru *lru,
111 int nid, struct mem_cgroup *memcg);
Glauber Costa6a4f4962013-08-28 10:18:02 +1000112unsigned long list_lru_count_node(struct list_lru *lru, int nid);
Vladimir Davydov503c3582015-02-12 14:58:47 -0800113
114static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
115 struct shrink_control *sc)
116{
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800117 return list_lru_count_one(lru, sc->nid, sc->memcg);
Vladimir Davydov503c3582015-02-12 14:58:47 -0800118}
119
Glauber Costa6a4f4962013-08-28 10:18:02 +1000120static inline unsigned long list_lru_count(struct list_lru *lru)
121{
122 long count = 0;
123 int nid;
124
Vladimir Davydovff0b67e2015-02-12 14:59:04 -0800125 for_each_node_state(nid, N_NORMAL_MEMORY)
Glauber Costa6a4f4962013-08-28 10:18:02 +1000126 count += list_lru_count_node(lru, nid);
127
128 return count;
129}
Dave Chinnera38e4082013-08-28 10:17:58 +1000130
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800131void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
132void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
133 struct list_head *head);
134
135typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
136 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
137
Dave Chinnera38e4082013-08-28 10:17:58 +1000138/**
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800139 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
Dave Chinnera38e4082013-08-28 10:17:58 +1000140 * @lru: the lru pointer.
Glauber Costa6a4f4962013-08-28 10:18:02 +1000141 * @nid: the node id to scan from.
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800142 * @memcg: the cgroup to scan from.
Dave Chinnera38e4082013-08-28 10:17:58 +1000143 * @isolate: callback function that is resposible for deciding what to do with
144 * the item currently being scanned
145 * @cb_arg: opaque type that will be passed to @isolate
146 * @nr_to_walk: how many items to scan.
147 *
148 * This function will scan all elements in a particular list_lru, calling the
149 * @isolate callback for each of those items, along with the current list
150 * spinlock and a caller-provided opaque. The @isolate callback can choose to
151 * drop the lock internally, but *must* return with the lock held. The callback
152 * will return an enum lru_status telling the list_lru infrastructure what to
153 * do with the object being scanned.
154 *
155 * Please note that nr_to_walk does not mean how many objects will be freed,
156 * just how many objects will be scanned.
157 *
158 * Return value: the number of objects effectively removed from the LRU.
159 */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800160unsigned long list_lru_walk_one(struct list_lru *lru,
161 int nid, struct mem_cgroup *memcg,
162 list_lru_walk_cb isolate, void *cb_arg,
163 unsigned long *nr_to_walk);
Glauber Costa6a4f4962013-08-28 10:18:02 +1000164unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
165 list_lru_walk_cb isolate, void *cb_arg,
166 unsigned long *nr_to_walk);
167
168static inline unsigned long
Vladimir Davydov503c3582015-02-12 14:58:47 -0800169list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
170 list_lru_walk_cb isolate, void *cb_arg)
171{
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800172 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
173 &sc->nr_to_scan);
Vladimir Davydov503c3582015-02-12 14:58:47 -0800174}
175
176static inline unsigned long
Glauber Costa6a4f4962013-08-28 10:18:02 +1000177list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
178 void *cb_arg, unsigned long nr_to_walk)
179{
180 long isolated = 0;
181 int nid;
182
Vladimir Davydovff0b67e2015-02-12 14:59:04 -0800183 for_each_node_state(nid, N_NORMAL_MEMORY) {
Glauber Costa6a4f4962013-08-28 10:18:02 +1000184 isolated += list_lru_walk_node(lru, nid, isolate,
185 cb_arg, &nr_to_walk);
186 if (nr_to_walk <= 0)
187 break;
188 }
189 return isolated;
190}
Dave Chinnera38e4082013-08-28 10:17:58 +1000191#endif /* _LRU_LIST_H */