Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 1 | #ifndef LINUX_MM_INLINE_H |
| 2 | #define LINUX_MM_INLINE_H |
| 3 | |
Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 4 | #include <linux/huge_mm.h> |
Lisa Du | 6e543d5 | 2013-09-11 14:22:36 -0700 | [diff] [blame] | 5 | #include <linux/swap.h> |
Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 6 | |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 7 | /** |
| 8 | * page_is_file_cache - should the page be on a file LRU or anon LRU? |
| 9 | * @page: the page to test |
| 10 | * |
Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 11 | * Returns 1 if @page is page cache page backed by a regular filesystem, |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 12 | * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. |
| 13 | * Used by functions that manipulate the LRU lists, to sort a page |
| 14 | * onto the right LRU list. |
| 15 | * |
| 16 | * We would like to get this info without a page flag, but the state |
| 17 | * needs to survive until the page is last deleted from the LRU, which |
| 18 | * could be as far down as __page_cache_release. |
| 19 | */ |
| 20 | static inline int page_is_file_cache(struct page *page) |
| 21 | { |
Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 22 | return !PageSwapBacked(page); |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 23 | } |
| 24 | |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 25 | static __always_inline void __update_lru_size(struct lruvec *lruvec, |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 26 | enum lru_list lru, enum zone_type zid, |
| 27 | int nr_pages) |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 28 | { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 29 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
| 30 | |
| 31 | __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); |
Minchan Kim | 71c799f | 2016-07-28 15:47:26 -0700 | [diff] [blame] | 32 | __mod_zone_page_state(&pgdat->node_zones[zid], |
| 33 | NR_ZONE_LRU_BASE + lru, nr_pages); |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 34 | } |
| 35 | |
| 36 | static __always_inline void update_lru_size(struct lruvec *lruvec, |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 37 | enum lru_list lru, enum zone_type zid, |
| 38 | int nr_pages) |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 39 | { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 40 | __update_lru_size(lruvec, lru, zid, nr_pages); |
Mel Gorman | 7ee36a1 | 2016-07-28 15:47:17 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_MEMCG |
Michal Hocko | b4536f0c8 | 2017-01-10 16:58:04 -0800 | [diff] [blame] | 42 | mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); |
Hugh Dickins | 9d5e6a9 | 2016-05-19 17:12:38 -0700 | [diff] [blame] | 43 | #endif |
| 44 | } |
| 45 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 46 | static __always_inline void add_page_to_lru_list(struct page *page, |
| 47 | struct lruvec *lruvec, enum lru_list lru) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 48 | { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 49 | update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 50 | list_add(&page->lru, &lruvec->lists[lru]); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 51 | } |
| 52 | |
Johannes Weiner | c55e8d0 | 2017-02-24 14:56:23 -0800 | [diff] [blame] | 53 | static __always_inline void add_page_to_lru_list_tail(struct page *page, |
| 54 | struct lruvec *lruvec, enum lru_list lru) |
| 55 | { |
| 56 | update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); |
| 57 | list_add_tail(&page->lru, &lruvec->lists[lru]); |
| 58 | } |
| 59 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 60 | static __always_inline void del_page_from_lru_list(struct page *page, |
| 61 | struct lruvec *lruvec, enum lru_list lru) |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 62 | { |
| 63 | list_del(&page->lru); |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 64 | update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 65 | } |
| 66 | |
Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 67 | /** |
| 68 | * page_lru_base_type - which LRU list type should a page be on? |
| 69 | * @page: the page to test |
| 70 | * |
| 71 | * Used for LRU list index arithmetic. |
| 72 | * |
| 73 | * Returns the base LRU type - file or anon - @page should be on. |
| 74 | */ |
| 75 | static inline enum lru_list page_lru_base_type(struct page *page) |
| 76 | { |
| 77 | if (page_is_file_cache(page)) |
| 78 | return LRU_INACTIVE_FILE; |
| 79 | return LRU_INACTIVE_ANON; |
| 80 | } |
| 81 | |
Hugh Dickins | 1c1c53d | 2012-01-12 17:20:04 -0800 | [diff] [blame] | 82 | /** |
| 83 | * page_off_lru - which LRU list was page on? clearing its lru flags. |
| 84 | * @page: the page to test |
| 85 | * |
| 86 | * Returns the LRU list a page was on, as an index into the array of LRU |
| 87 | * lists; and clears its Unevictable or Active flags, ready for freeing. |
| 88 | */ |
Konstantin Khlebnikov | 014483b | 2012-05-29 15:06:53 -0700 | [diff] [blame] | 89 | static __always_inline enum lru_list page_off_lru(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 91 | enum lru_list lru; |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 92 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 93 | if (PageUnevictable(page)) { |
| 94 | __ClearPageUnevictable(page); |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 95 | lru = LRU_UNEVICTABLE; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 96 | } else { |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 97 | lru = page_lru_base_type(page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 98 | if (PageActive(page)) { |
| 99 | __ClearPageActive(page); |
Hugh Dickins | 4111304 | 2012-01-12 17:20:01 -0800 | [diff] [blame] | 100 | lru += LRU_ACTIVE; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 101 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | } |
Hugh Dickins | 1c1c53d | 2012-01-12 17:20:04 -0800 | [diff] [blame] | 103 | return lru; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
Christoph Lameter | 21eac81 | 2006-01-08 01:00:45 -0800 | [diff] [blame] | 105 | |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 106 | /** |
| 107 | * page_lru - which LRU list should a page be on? |
| 108 | * @page: the page to test |
| 109 | * |
| 110 | * Returns the LRU list a page should be on, as an index |
| 111 | * into the array of LRU lists. |
| 112 | */ |
Konstantin Khlebnikov | 014483b | 2012-05-29 15:06:53 -0700 | [diff] [blame] | 113 | static __always_inline enum lru_list page_lru(struct page *page) |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 114 | { |
Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 115 | enum lru_list lru; |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 116 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 117 | if (PageUnevictable(page)) |
| 118 | lru = LRU_UNEVICTABLE; |
| 119 | else { |
Johannes Weiner | 401a8e1 | 2009-09-21 17:02:58 -0700 | [diff] [blame] | 120 | lru = page_lru_base_type(page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 121 | if (PageActive(page)) |
| 122 | lru += LRU_ACTIVE; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 123 | } |
Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 124 | return lru; |
| 125 | } |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 126 | |
Geliang Tang | d72ee91 | 2016-01-14 15:22:01 -0800 | [diff] [blame] | 127 | #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) |
| 128 | |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 129 | #endif |