blob: 7fbb97267556531b0f13a112a5afc7e134234189 [file] [log] [blame]
Rik van Rielb2e18532008-10-18 20:26:30 -07001#ifndef LINUX_MM_INLINE_H
2#define LINUX_MM_INLINE_H
3
4/**
5 * page_is_file_cache - should the page be on a file LRU or anon LRU?
6 * @page: the page to test
7 *
Rik van Riel4f98a2f2008-10-18 20:26:32 -07008 * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
Rik van Rielb2e18532008-10-18 20:26:30 -07009 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
10 * Used by functions that manipulate the LRU lists, to sort a page
11 * onto the right LRU list.
12 *
13 * We would like to get this info without a page flag, but the state
14 * needs to survive until the page is last deleted from the LRU, which
15 * could be as far down as __page_cache_release.
16 */
17static inline int page_is_file_cache(struct page *page)
18{
19 if (PageSwapBacked(page))
20 return 0;
21
22 /* The page is page cache backed by a normal filesystem. */
Rik van Riel4f98a2f2008-10-18 20:26:32 -070023 return LRU_FILE;
Rik van Rielb2e18532008-10-18 20:26:30 -070024}
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026static inline void
Christoph Lameterb69408e2008-10-18 20:26:14 -070027add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
28{
29 list_add(&page->lru, &zone->lru[l].list);
30 __inc_zone_state(zone, NR_LRU_BASE + l);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080031 mem_cgroup_add_lru_list(page, l);
Christoph Lameterb69408e2008-10-18 20:26:14 -070032}
33
34static inline void
35del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
36{
37 list_del(&page->lru);
38 __dec_zone_state(zone, NR_LRU_BASE + l);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080039 mem_cgroup_del_lru_list(page, l);
Christoph Lameterb69408e2008-10-18 20:26:14 -070040}
41
42static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070043del_page_from_lru(struct zone *zone, struct page *page)
44{
Rik van Riel4f98a2f2008-10-18 20:26:32 -070045 enum lru_list l = LRU_BASE;
Christoph Lameterb69408e2008-10-18 20:26:14 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 list_del(&page->lru);
Lee Schermerhorn894bc312008-10-18 20:26:39 -070048 if (PageUnevictable(page)) {
49 __ClearPageUnevictable(page);
50 l = LRU_UNEVICTABLE;
51 } else {
52 if (PageActive(page)) {
53 __ClearPageActive(page);
54 l += LRU_ACTIVE;
55 }
56 l += page_is_file_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 }
Christoph Lameterb69408e2008-10-18 20:26:14 -070058 __dec_zone_state(zone, NR_LRU_BASE + l);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080059 mem_cgroup_del_lru_list(page, l);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
Christoph Lameter21eac812006-01-08 01:00:45 -080061
Christoph Lameterb69408e2008-10-18 20:26:14 -070062/**
63 * page_lru - which LRU list should a page be on?
64 * @page: the page to test
65 *
66 * Returns the LRU list a page should be on, as an index
67 * into the array of LRU lists.
68 */
69static inline enum lru_list page_lru(struct page *page)
70{
71 enum lru_list lru = LRU_BASE;
72
Lee Schermerhorn894bc312008-10-18 20:26:39 -070073 if (PageUnevictable(page))
74 lru = LRU_UNEVICTABLE;
75 else {
76 if (PageActive(page))
77 lru += LRU_ACTIVE;
78 lru += page_is_file_cache(page);
79 }
Christoph Lameterb69408e2008-10-18 20:26:14 -070080
81 return lru;
82}
Rik van Rielb2e18532008-10-18 20:26:30 -070083
84#endif