blob: 1efe4ecc02b1f6d14738a3dc7e6ea2e9255f540b [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
11
12bool list_lru_add(struct list_lru *lru, struct list_head *item)
13{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100014 int nid = page_to_nid(virt_to_page(item));
15 struct list_lru_node *nlru = &lru->node[nid];
16
17 spin_lock(&nlru->lock);
18 WARN_ON_ONCE(nlru->nr_items < 0);
Dave Chinnera38e4082013-08-28 10:17:58 +100019 if (list_empty(item)) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +100020 list_add_tail(item, &nlru->list);
21 if (nlru->nr_items++ == 0)
22 node_set(nid, lru->active_nodes);
23 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100024 return true;
25 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +100026 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100027 return false;
28}
29EXPORT_SYMBOL_GPL(list_lru_add);
30
31bool list_lru_del(struct list_lru *lru, struct list_head *item)
32{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100033 int nid = page_to_nid(virt_to_page(item));
34 struct list_lru_node *nlru = &lru->node[nid];
35
36 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100037 if (!list_empty(item)) {
38 list_del_init(item);
Dave Chinner3b1d58a2013-08-28 10:18:00 +100039 if (--nlru->nr_items == 0)
40 node_clear(nid, lru->active_nodes);
41 WARN_ON_ONCE(nlru->nr_items < 0);
42 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100043 return true;
44 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +100045 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100046 return false;
47}
48EXPORT_SYMBOL_GPL(list_lru_del);
49
Dave Chinner3b1d58a2013-08-28 10:18:00 +100050unsigned long list_lru_count(struct list_lru *lru)
Dave Chinnera38e4082013-08-28 10:17:58 +100051{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100052 unsigned long count = 0;
53 int nid;
54
55 for_each_node_mask(nid, lru->active_nodes) {
56 struct list_lru_node *nlru = &lru->node[nid];
57
58 spin_lock(&nlru->lock);
59 WARN_ON_ONCE(nlru->nr_items < 0);
60 count += nlru->nr_items;
61 spin_unlock(&nlru->lock);
62 }
63
64 return count;
65}
66EXPORT_SYMBOL_GPL(list_lru_count);
67
68static unsigned long
69list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
70 void *cb_arg, unsigned long *nr_to_walk)
71{
72
73 struct list_lru_node *nlru = &lru->node[nid];
Dave Chinnera38e4082013-08-28 10:17:58 +100074 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100075 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +100076 /*
77 * If we don't keep state of at which pass we are, we can loop at
78 * LRU_RETRY, since we have no guarantees that the caller will be able
79 * to do something other than retry on the next pass. We handle this by
80 * allowing at most one retry per object. This should not be altered
81 * by any condition other than LRU_RETRY.
82 */
83 bool first_pass = true;
84
Dave Chinner3b1d58a2013-08-28 10:18:00 +100085 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100086restart:
Dave Chinner3b1d58a2013-08-28 10:18:00 +100087 list_for_each_safe(item, n, &nlru->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +100088 enum lru_status ret;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100089 ret = isolate(item, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +100090 switch (ret) {
91 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +100092 if (--nlru->nr_items == 0)
93 node_clear(nid, lru->active_nodes);
94 WARN_ON_ONCE(nlru->nr_items < 0);
95 isolated++;
Dave Chinnera38e4082013-08-28 10:17:58 +100096 break;
97 case LRU_ROTATE:
Dave Chinner3b1d58a2013-08-28 10:18:00 +100098 list_move_tail(item, &nlru->list);
Dave Chinnera38e4082013-08-28 10:17:58 +100099 break;
100 case LRU_SKIP:
101 break;
102 case LRU_RETRY:
103 if (!first_pass) {
104 first_pass = true;
105 break;
106 }
107 first_pass = false;
108 goto restart;
109 default:
110 BUG();
111 }
112
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000113 if ((*nr_to_walk)-- == 0)
Dave Chinnera38e4082013-08-28 10:17:58 +1000114 break;
115
116 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000117
118 spin_unlock(&nlru->lock);
119 return isolated;
120}
121EXPORT_SYMBOL_GPL(list_lru_walk_node);
122
123unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
124 void *cb_arg, unsigned long nr_to_walk)
125{
126 unsigned long isolated = 0;
127 int nid;
128
129 for_each_node_mask(nid, lru->active_nodes) {
130 isolated += list_lru_walk_node(lru, nid, isolate,
131 cb_arg, &nr_to_walk);
132 if (nr_to_walk <= 0)
133 break;
134 }
135 return isolated;
Dave Chinnera38e4082013-08-28 10:17:58 +1000136}
137EXPORT_SYMBOL_GPL(list_lru_walk);
138
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000139static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
140 list_lru_dispose_cb dispose)
141{
142 struct list_lru_node *nlru = &lru->node[nid];
143 LIST_HEAD(dispose_list);
144 unsigned long disposed = 0;
145
146 spin_lock(&nlru->lock);
147 while (!list_empty(&nlru->list)) {
148 list_splice_init(&nlru->list, &dispose_list);
149 disposed += nlru->nr_items;
150 nlru->nr_items = 0;
151 node_clear(nid, lru->active_nodes);
152 spin_unlock(&nlru->lock);
153
154 dispose(&dispose_list);
155
156 spin_lock(&nlru->lock);
157 }
158 spin_unlock(&nlru->lock);
159 return disposed;
160}
161
Dave Chinnera38e4082013-08-28 10:17:58 +1000162unsigned long list_lru_dispose_all(struct list_lru *lru,
163 list_lru_dispose_cb dispose)
164{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000165 unsigned long disposed;
166 unsigned long total = 0;
167 int nid;
Dave Chinnera38e4082013-08-28 10:17:58 +1000168
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000169 do {
170 disposed = 0;
171 for_each_node_mask(nid, lru->active_nodes) {
172 disposed += list_lru_dispose_all_node(lru, nid,
173 dispose);
174 }
175 total += disposed;
176 } while (disposed != 0);
Dave Chinnera38e4082013-08-28 10:17:58 +1000177
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000178 return total;
Dave Chinnera38e4082013-08-28 10:17:58 +1000179}
180
181int list_lru_init(struct list_lru *lru)
182{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000183 int i;
Dave Chinnera38e4082013-08-28 10:17:58 +1000184
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000185 nodes_clear(lru->active_nodes);
186 for (i = 0; i < MAX_NUMNODES; i++) {
187 spin_lock_init(&lru->node[i].lock);
188 INIT_LIST_HEAD(&lru->node[i].list);
189 lru->node[i].nr_items = 0;
190 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000191 return 0;
192}
193EXPORT_SYMBOL_GPL(list_lru_init);