list_lru: per-node API
This patch adapts the list_lru API to accept an optional node argument, to
be used by NUMA aware shrinking functions. Code that does not care about
the NUMA placement of objects can still call into the very same functions
as before. They will simply iterate over all nodes.
Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
diff --git a/mm/list_lru.c b/mm/list_lru.c
index e77c29f..86cb554 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -47,25 +47,22 @@
}
EXPORT_SYMBOL_GPL(list_lru_del);
-unsigned long list_lru_count(struct list_lru *lru)
+unsigned long
+list_lru_count_node(struct list_lru *lru, int nid)
{
unsigned long count = 0;
- int nid;
+ struct list_lru_node *nlru = &lru->node[nid];
- for_each_node_mask(nid, lru->active_nodes) {
- struct list_lru_node *nlru = &lru->node[nid];
-
- spin_lock(&nlru->lock);
- WARN_ON_ONCE(nlru->nr_items < 0);
- count += nlru->nr_items;
- spin_unlock(&nlru->lock);
- }
+ spin_lock(&nlru->lock);
+ WARN_ON_ONCE(nlru->nr_items < 0);
+ count += nlru->nr_items;
+ spin_unlock(&nlru->lock);
return count;
}
-EXPORT_SYMBOL_GPL(list_lru_count);
+EXPORT_SYMBOL_GPL(list_lru_count_node);
-static unsigned long
+unsigned long
list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
void *cb_arg, unsigned long *nr_to_walk)
{
@@ -115,22 +112,6 @@
}
EXPORT_SYMBOL_GPL(list_lru_walk_node);
-unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
- void *cb_arg, unsigned long nr_to_walk)
-{
- unsigned long isolated = 0;
- int nid;
-
- for_each_node_mask(nid, lru->active_nodes) {
- isolated += list_lru_walk_node(lru, nid, isolate,
- cb_arg, &nr_to_walk);
- if (nr_to_walk <= 0)
- break;
- }
- return isolated;
-}
-EXPORT_SYMBOL_GPL(list_lru_walk);
-
static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
list_lru_dispose_cb dispose)
{