blob: c948350c378e93cb9144e9c17c52f3445e8f4a80 [file] [log] [blame]
Rik van Rielb2e18532008-10-18 20:26:30 -07001#ifndef LINUX_MM_INLINE_H
2#define LINUX_MM_INLINE_H
3
4/**
5 * page_is_file_cache - should the page be on a file LRU or anon LRU?
6 * @page: the page to test
7 *
Rik van Riel4f98a2f2008-10-18 20:26:32 -07008 * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
Rik van Rielb2e18532008-10-18 20:26:30 -07009 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
10 * Used by functions that manipulate the LRU lists, to sort a page
11 * onto the right LRU list.
12 *
13 * We would like to get this info without a page flag, but the state
14 * needs to survive until the page is last deleted from the LRU, which
15 * could be as far down as __page_cache_release.
16 */
17static inline int page_is_file_cache(struct page *page)
18{
19 if (PageSwapBacked(page))
20 return 0;
21
22 /* The page is page cache backed by a normal filesystem. */
Rik van Riel4f98a2f2008-10-18 20:26:32 -070023 return LRU_FILE;
Rik van Rielb2e18532008-10-18 20:26:30 -070024}
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026static inline void
Christoph Lameterb69408e2008-10-18 20:26:14 -070027add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
28{
29 list_add(&page->lru, &zone->lru[l].list);
30 __inc_zone_state(zone, NR_LRU_BASE + l);
31}
32
33static inline void
34del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
35{
36 list_del(&page->lru);
37 __dec_zone_state(zone, NR_LRU_BASE + l);
38}
39
40static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -070041del_page_from_lru(struct zone *zone, struct page *page)
42{
Rik van Riel4f98a2f2008-10-18 20:26:32 -070043 enum lru_list l = LRU_BASE;
Christoph Lameterb69408e2008-10-18 20:26:14 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 list_del(&page->lru);
Lee Schermerhorn894bc312008-10-18 20:26:39 -070046 if (PageUnevictable(page)) {
47 __ClearPageUnevictable(page);
48 l = LRU_UNEVICTABLE;
49 } else {
50 if (PageActive(page)) {
51 __ClearPageActive(page);
52 l += LRU_ACTIVE;
53 }
54 l += page_is_file_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 }
Christoph Lameterb69408e2008-10-18 20:26:14 -070056 __dec_zone_state(zone, NR_LRU_BASE + l);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
Christoph Lameter21eac812006-01-08 01:00:45 -080058
Christoph Lameterb69408e2008-10-18 20:26:14 -070059/**
60 * page_lru - which LRU list should a page be on?
61 * @page: the page to test
62 *
63 * Returns the LRU list a page should be on, as an index
64 * into the array of LRU lists.
65 */
66static inline enum lru_list page_lru(struct page *page)
67{
68 enum lru_list lru = LRU_BASE;
69
Lee Schermerhorn894bc312008-10-18 20:26:39 -070070 if (PageUnevictable(page))
71 lru = LRU_UNEVICTABLE;
72 else {
73 if (PageActive(page))
74 lru += LRU_ACTIVE;
75 lru += page_is_file_cache(page);
76 }
Christoph Lameterb69408e2008-10-18 20:26:14 -070077
78 return lru;
79}
Rik van Rielb2e18532008-10-18 20:26:30 -070080
Rik van Riel556adec2008-10-18 20:26:34 -070081/**
82 * inactive_anon_is_low - check if anonymous pages need to be deactivated
83 * @zone: zone to check
84 *
85 * Returns true if the zone does not have enough inactive anon pages,
86 * meaning some active anon pages need to be deactivated.
87 */
88static inline int inactive_anon_is_low(struct zone *zone)
89{
90 unsigned long active, inactive;
91
92 active = zone_page_state(zone, NR_ACTIVE_ANON);
93 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
94
95 if (inactive * zone->inactive_ratio < active)
96 return 1;
97
98 return 0;
99}
Rik van Rielb2e18532008-10-18 20:26:30 -0700100#endif