blob: 72467914b85640bb88730cb4a5c54113e531b35a [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100011#include <linux/slab.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100012
13bool list_lru_add(struct list_lru *lru, struct list_head *item)
14{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100015 int nid = page_to_nid(virt_to_page(item));
16 struct list_lru_node *nlru = &lru->node[nid];
17
18 spin_lock(&nlru->lock);
19 WARN_ON_ONCE(nlru->nr_items < 0);
Dave Chinnera38e4082013-08-28 10:17:58 +100020 if (list_empty(item)) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +100021 list_add_tail(item, &nlru->list);
22 if (nlru->nr_items++ == 0)
23 node_set(nid, lru->active_nodes);
24 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100025 return true;
26 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +100027 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100028 return false;
29}
30EXPORT_SYMBOL_GPL(list_lru_add);
31
32bool list_lru_del(struct list_lru *lru, struct list_head *item)
33{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100034 int nid = page_to_nid(virt_to_page(item));
35 struct list_lru_node *nlru = &lru->node[nid];
36
37 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100038 if (!list_empty(item)) {
39 list_del_init(item);
Dave Chinner3b1d58a2013-08-28 10:18:00 +100040 if (--nlru->nr_items == 0)
41 node_clear(nid, lru->active_nodes);
42 WARN_ON_ONCE(nlru->nr_items < 0);
43 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100044 return true;
45 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +100046 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100047 return false;
48}
49EXPORT_SYMBOL_GPL(list_lru_del);
50
Glauber Costa6a4f4962013-08-28 10:18:02 +100051unsigned long
52list_lru_count_node(struct list_lru *lru, int nid)
Dave Chinnera38e4082013-08-28 10:17:58 +100053{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100054 unsigned long count = 0;
Glauber Costa6a4f4962013-08-28 10:18:02 +100055 struct list_lru_node *nlru = &lru->node[nid];
Dave Chinner3b1d58a2013-08-28 10:18:00 +100056
Glauber Costa6a4f4962013-08-28 10:18:02 +100057 spin_lock(&nlru->lock);
58 WARN_ON_ONCE(nlru->nr_items < 0);
59 count += nlru->nr_items;
60 spin_unlock(&nlru->lock);
Dave Chinner3b1d58a2013-08-28 10:18:00 +100061
62 return count;
63}
Glauber Costa6a4f4962013-08-28 10:18:02 +100064EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +100065
Glauber Costa6a4f4962013-08-28 10:18:02 +100066unsigned long
Dave Chinner3b1d58a2013-08-28 10:18:00 +100067list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
68 void *cb_arg, unsigned long *nr_to_walk)
69{
70
71 struct list_lru_node *nlru = &lru->node[nid];
Dave Chinnera38e4082013-08-28 10:17:58 +100072 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100073 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +100074
Dave Chinner3b1d58a2013-08-28 10:18:00 +100075 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100076restart:
Dave Chinner3b1d58a2013-08-28 10:18:00 +100077 list_for_each_safe(item, n, &nlru->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +100078 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +100079
80 /*
81 * decrement nr_to_walk first so that we don't livelock if we
82 * get stuck on large numbesr of LRU_RETRY items
83 */
84 if (--(*nr_to_walk) == 0)
85 break;
86
Dave Chinner3b1d58a2013-08-28 10:18:00 +100087 ret = isolate(item, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +100088 switch (ret) {
89 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +100090 if (--nlru->nr_items == 0)
91 node_clear(nid, lru->active_nodes);
92 WARN_ON_ONCE(nlru->nr_items < 0);
93 isolated++;
Dave Chinnera38e4082013-08-28 10:17:58 +100094 break;
95 case LRU_ROTATE:
Dave Chinner3b1d58a2013-08-28 10:18:00 +100096 list_move_tail(item, &nlru->list);
Dave Chinnera38e4082013-08-28 10:17:58 +100097 break;
98 case LRU_SKIP:
99 break;
100 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000101 /*
102 * The lru lock has been dropped, our list traversal is
103 * now invalid and so we have to restart from scratch.
104 */
Dave Chinnera38e4082013-08-28 10:17:58 +1000105 goto restart;
106 default:
107 BUG();
108 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000109 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000110
111 spin_unlock(&nlru->lock);
112 return isolated;
113}
114EXPORT_SYMBOL_GPL(list_lru_walk_node);
115
Dave Chinnera38e4082013-08-28 10:17:58 +1000116int list_lru_init(struct list_lru *lru)
117{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000118 int i;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000119 size_t size = sizeof(*lru->node) * nr_node_ids;
120
121 lru->node = kzalloc(size, GFP_KERNEL);
122 if (!lru->node)
123 return -ENOMEM;
Dave Chinnera38e4082013-08-28 10:17:58 +1000124
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000125 nodes_clear(lru->active_nodes);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000126 for (i = 0; i < nr_node_ids; i++) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000127 spin_lock_init(&lru->node[i].lock);
128 INIT_LIST_HEAD(&lru->node[i].list);
129 lru->node[i].nr_items = 0;
130 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000131 return 0;
132}
133EXPORT_SYMBOL_GPL(list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000134
135void list_lru_destroy(struct list_lru *lru)
136{
137 kfree(lru->node);
138}
139EXPORT_SYMBOL_GPL(list_lru_destroy);