Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 3 | * Authors: David Chinner and Glauber Costa |
| 4 | * |
| 5 | * Generic LRU infrastructure |
| 6 | */ |
| 7 | #ifndef _LRU_LIST_H |
| 8 | #define _LRU_LIST_H |
| 9 | |
| 10 | #include <linux/list.h> |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 11 | #include <linux/nodemask.h> |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 12 | |
| 13 | /* list_lru_walk_cb has to always return one of those */ |
| 14 | enum lru_status { |
| 15 | LRU_REMOVED, /* item removed from list */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 16 | LRU_REMOVED_RETRY, /* item removed, but lock has been |
| 17 | dropped and reacquired */ |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 18 | LRU_ROTATE, /* item referenced, give another pass */ |
| 19 | LRU_SKIP, /* item cannot be locked, skip */ |
| 20 | LRU_RETRY, /* item not freeable. May drop the lock |
| 21 | internally, but has to return locked. */ |
| 22 | }; |
| 23 | |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 24 | struct list_lru_node { |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 25 | spinlock_t lock; |
| 26 | struct list_head list; |
| 27 | /* kept as signed so we can catch imbalance bugs */ |
| 28 | long nr_items; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 29 | } ____cacheline_aligned_in_smp; |
| 30 | |
| 31 | struct list_lru { |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 32 | struct list_lru_node *node; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 33 | nodemask_t active_nodes; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 34 | }; |
| 35 | |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 36 | void list_lru_destroy(struct list_lru *lru); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 37 | int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key); |
| 38 | static inline int list_lru_init(struct list_lru *lru) |
| 39 | { |
| 40 | return list_lru_init_key(lru, NULL); |
| 41 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 42 | |
| 43 | /** |
| 44 | * list_lru_add: add an element to the lru list's tail |
| 45 | * @list_lru: the lru pointer |
| 46 | * @item: the item to be added. |
| 47 | * |
| 48 | * If the element is already part of a list, this function returns doing |
| 49 | * nothing. Therefore the caller does not need to keep state about whether or |
| 50 | * not the element already belongs in the list and is allowed to lazy update |
| 51 | * it. Note however that this is valid for *a* list, not *this* list. If |
| 52 | * the caller organize itself in a way that elements can be in more than |
| 53 | * one type of list, it is up to the caller to fully remove the item from |
| 54 | * the previous list (with list_lru_del() for instance) before moving it |
| 55 | * to @list_lru |
| 56 | * |
| 57 | * Return value: true if the list was updated, false otherwise |
| 58 | */ |
| 59 | bool list_lru_add(struct list_lru *lru, struct list_head *item); |
| 60 | |
| 61 | /** |
| 62 | * list_lru_del: delete an element to the lru list |
| 63 | * @list_lru: the lru pointer |
| 64 | * @item: the item to be deleted. |
| 65 | * |
| 66 | * This function works analogously as list_lru_add in terms of list |
| 67 | * manipulation. The comments about an element already pertaining to |
| 68 | * a list are also valid for list_lru_del. |
| 69 | * |
| 70 | * Return value: true if the list was updated, false otherwise |
| 71 | */ |
| 72 | bool list_lru_del(struct list_lru *lru, struct list_head *item); |
| 73 | |
| 74 | /** |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 75 | * list_lru_count_node: return the number of objects currently held by @lru |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 76 | * @lru: the lru pointer. |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 77 | * @nid: the node id to count from. |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 78 | * |
| 79 | * Always return a non-negative number, 0 for empty lists. There is no |
| 80 | * guarantee that the list is not updated while the count is being computed. |
| 81 | * Callers that want such a guarantee need to provide an outer lock. |
| 82 | */ |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 83 | unsigned long list_lru_count_node(struct list_lru *lru, int nid); |
| 84 | static inline unsigned long list_lru_count(struct list_lru *lru) |
| 85 | { |
| 86 | long count = 0; |
| 87 | int nid; |
| 88 | |
| 89 | for_each_node_mask(nid, lru->active_nodes) |
| 90 | count += list_lru_count_node(lru, nid); |
| 91 | |
| 92 | return count; |
| 93 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 94 | |
| 95 | typedef enum lru_status |
| 96 | (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); |
| 97 | /** |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 98 | * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items. |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 99 | * @lru: the lru pointer. |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 100 | * @nid: the node id to scan from. |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 101 | * @isolate: callback function that is resposible for deciding what to do with |
| 102 | * the item currently being scanned |
| 103 | * @cb_arg: opaque type that will be passed to @isolate |
| 104 | * @nr_to_walk: how many items to scan. |
| 105 | * |
| 106 | * This function will scan all elements in a particular list_lru, calling the |
| 107 | * @isolate callback for each of those items, along with the current list |
| 108 | * spinlock and a caller-provided opaque. The @isolate callback can choose to |
| 109 | * drop the lock internally, but *must* return with the lock held. The callback |
| 110 | * will return an enum lru_status telling the list_lru infrastructure what to |
| 111 | * do with the object being scanned. |
| 112 | * |
| 113 | * Please note that nr_to_walk does not mean how many objects will be freed, |
| 114 | * just how many objects will be scanned. |
| 115 | * |
| 116 | * Return value: the number of objects effectively removed from the LRU. |
| 117 | */ |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 118 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 119 | list_lru_walk_cb isolate, void *cb_arg, |
| 120 | unsigned long *nr_to_walk); |
| 121 | |
| 122 | static inline unsigned long |
| 123 | list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, |
| 124 | void *cb_arg, unsigned long nr_to_walk) |
| 125 | { |
| 126 | long isolated = 0; |
| 127 | int nid; |
| 128 | |
| 129 | for_each_node_mask(nid, lru->active_nodes) { |
| 130 | isolated += list_lru_walk_node(lru, nid, isolate, |
| 131 | cb_arg, &nr_to_walk); |
| 132 | if (nr_to_walk <= 0) |
| 133 | break; |
| 134 | } |
| 135 | return isolated; |
| 136 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 137 | #endif /* _LRU_LIST_H */ |