Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 1 | #ifndef LINUX_MM_INLINE_H |
| 2 | #define LINUX_MM_INLINE_H |
| 3 | |
Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 4 | #include <linux/huge_mm.h> |
Lisa Du | 6e543d5 | 2013-09-11 14:22:36 -0700 | [diff] [blame] | 5 | #include <linux/swap.h> |
Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 6 | |
Mel Gorman | bca6759 | 2016-07-28 15:47:05 -0700 | [diff] [blame] | 7 | #ifdef CONFIG_HIGHMEM |
| 8 | extern atomic_t highmem_file_pages; |
| 9 | |
| 10 | static inline void acct_highmem_file_pages(int zid, enum lru_list lru, |
| 11 | int nr_pages) |
| 12 | { |
| 13 | if (is_highmem_idx(zid) && is_file_lru(lru)) |
| 14 | atomic_add(nr_pages, &highmem_file_pages); |
| 15 | } |
| 16 | #else |
| 17 | static inline void acct_highmem_file_pages(int zid, enum lru_list lru, |
| 18 | int nr_pages) |
| 19 | { |
| 20 | } |
| 21 | #endif |
| 22 | |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 23 | /** |
| 24 | * page_is_file_cache - should the page be on a file LRU or anon LRU? |
| 25 | * @page: the page to test |
| 26 | * |
Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 27 | * Returns 1 if @page is page cache page backed by a regular filesystem, |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 28 | * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. |
| 29 | * Used by functions that manipulate the LRU lists, to sort a page |
| 30 | * onto the right LRU list. |
| 31 | * |
| 32 | * We would like to get this info without a page flag, but the state |
| 33 | * needs to survive until the page is last deleted from the LRU, which |
| 34 | * could be as far down as __page_cache_release. |
| 35 | */ |
| 36 | static inline int page_is_file_cache(struct page *page) |
| 37 | { |
Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 38 | return !PageSwapBacked(page); |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 39 | } |
| 40 | |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 41 | static __always_inline void __update_lru_size(struct lruvec *lruvec, |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 42 | enum lru_list lru, enum zone_type zid, |
| 43 | int nr_pages) |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 44 | { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 45 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
| 46 | |
| 47 | __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); |
Minchan Kim | 71c799f | 2016-07-28 15:47:26 -0700 | [diff] [blame^] | 48 | __mod_zone_page_state(&pgdat->node_zones[zid], |
| 49 | NR_ZONE_LRU_BASE + lru, nr_pages); |
Mel Gorman | bca6759 | 2016-07-28 15:47:05 -0700 | [diff] [blame] | 50 | acct_highmem_file_pages(zid, lru, nr_pages); |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | static __always_inline void update_lru_size(struct lruvec *lruvec, |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 54 | enum lru_list lru, enum zone_type zid, |
| 55 | int nr_pages) |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 56 | { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 57 | __update_lru_size(lruvec, lru, zid, nr_pages); |
Mel Gorman | 7ee36a1 | 2016-07-28 15:47:17 -0700 | [diff] [blame] | 58 | #ifdef CONFIG_MEMCG |
| 59 | mem_cgroup_update_lru_size(lruvec, lru, nr_pages); |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 60 | #endif |
| 61 | } |
| 62 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 63 | static __always_inline void add_page_to_lru_list(struct page *page, |
| 64 | struct lruvec *lruvec, enum lru_list lru) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 65 | { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 66 | update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 67 | list_add(&page->lru, &lruvec->lists[lru]); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 68 | } |
| 69 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 70 | static __always_inline void del_page_from_lru_list(struct page *page, |
| 71 | struct lruvec *lruvec, enum lru_list lru) |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 72 | { |
| 73 | list_del(&page->lru); |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 74 | update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 77 | /** |
| 78 | * page_lru_base_type - which LRU list type should a page be on? |
| 79 | * @page: the page to test |
| 80 | * |
| 81 | * Used for LRU list index arithmetic. |
| 82 | * |
| 83 | * Returns the base LRU type - file or anon - @page should be on. |
| 84 | */ |
| 85 | static inline enum lru_list page_lru_base_type(struct page *page) |
| 86 | { |
| 87 | if (page_is_file_cache(page)) |
| 88 | return LRU_INACTIVE_FILE; |
| 89 | return LRU_INACTIVE_ANON; |
| 90 | } |
| 91 | |
Hugh Dickins | 1c1c53d | 2012-01-12 17:20:04 -0800 | [diff] [blame] | 92 | /** |
| 93 | * page_off_lru - which LRU list was page on? clearing its lru flags. |
| 94 | * @page: the page to test |
| 95 | * |
| 96 | * Returns the LRU list a page was on, as an index into the array of LRU |
| 97 | * lists; and clears its Unevictable or Active flags, ready for freeing. |
| 98 | */ |
Konstantin Khlebnikov | 014483b | 2012-05-29 15:06:53 -0700 | [diff] [blame] | 99 | static __always_inline enum lru_list page_off_lru(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | { |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 101 | enum lru_list lru; |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 102 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 103 | if (PageUnevictable(page)) { |
| 104 | __ClearPageUnevictable(page); |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 105 | lru = LRU_UNEVICTABLE; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 106 | } else { |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 107 | lru = page_lru_base_type(page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 108 | if (PageActive(page)) { |
| 109 | __ClearPageActive(page); |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 110 | lru += LRU_ACTIVE; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 111 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | } |
Hugh Dickins | 1c1c53d | 2012-01-12 17:20:04 -0800 | [diff] [blame] | 113 | return lru; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | } |
Christoph Lameter | 21eac81 | 2006-01-08 01:00:45 -0800 | [diff] [blame] | 115 | |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 116 | /** |
| 117 | * page_lru - which LRU list should a page be on? |
| 118 | * @page: the page to test |
| 119 | * |
| 120 | * Returns the LRU list a page should be on, as an index |
| 121 | * into the array of LRU lists. |
| 122 | */ |
Konstantin Khlebnikov | 014483b | 2012-05-29 15:06:53 -0700 | [diff] [blame] | 123 | static __always_inline enum lru_list page_lru(struct page *page) |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 124 | { |
Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 125 | enum lru_list lru; |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 126 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 127 | if (PageUnevictable(page)) |
| 128 | lru = LRU_UNEVICTABLE; |
| 129 | else { |
Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 130 | lru = page_lru_base_type(page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 131 | if (PageActive(page)) |
| 132 | lru += LRU_ACTIVE; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 133 | } |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 134 | return lru; |
| 135 | } |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 136 | |
Geliang Tang | d72ee91 | 2016-01-14 15:22:01 -0800 | [diff] [blame] | 137 | #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) |
| 138 | |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 139 | #endif |