Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Macros for manipulating and testing page->flags |
| 3 | */ |
| 4 | |
| 5 | #ifndef PAGE_FLAGS_H |
| 6 | #define PAGE_FLAGS_H |
| 7 | |
Andrew Morton | f886ed4 | 2006-06-23 02:03:06 -0700 | [diff] [blame] | 8 | #include <linux/types.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 9 | #include <linux/bug.h> |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 10 | #include <linux/mmdebug.h> |
Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 11 | #ifndef __GENERATING_BOUNDS_H |
Christoph Lameter | 6d77795 | 2007-05-06 14:49:40 -0700 | [diff] [blame] | 12 | #include <linux/mm_types.h> |
Sam Ravnborg | 01fc0ac | 2009-04-19 21:57:19 +0200 | [diff] [blame] | 13 | #include <generated/bounds.h> |
Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 14 | #endif /* !__GENERATING_BOUNDS_H */ |
Andrew Morton | f886ed4 | 2006-06-23 02:03:06 -0700 | [diff] [blame] | 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | /* |
| 17 | * Various page->flags bits: |
| 18 | * |
| 19 | * PG_reserved is set for special pages, which can never be swapped out. Some |
| 20 | * of them might not even exist (eg empty_bad_page)... |
| 21 | * |
Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 22 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
| 23 | * specific data (which is normally at page->private). It can be used by |
| 24 | * private allocations for its own usage. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * |
Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 26 | * During initiation of disk I/O, PG_locked is set. This bit is set before I/O |
| 27 | * and cleared when writeback _starts_ or when read _completes_. PG_writeback |
| 28 | * is set before writeback starts and cleared when it finishes. |
| 29 | * |
| 30 | * PG_locked also pins a page in pagecache, and blocks truncation of the file |
| 31 | * while it is held. |
| 32 | * |
| 33 | * page_waitqueue(page) is a wait queue of all tasks waiting for the page |
| 34 | * to become unlocked. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | * |
| 36 | * PG_uptodate tells whether the page's contents is valid. When a read |
| 37 | * completes, the page becomes uptodate, unless a disk I/O error happened. |
| 38 | * |
Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 39 | * PG_referenced, PG_reclaim are used for page reclaim for anonymous and |
| 40 | * file-backed pagecache (see mm/vmscan.c). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | * |
| 42 | * PG_error is set to indicate that an I/O error occurred on this page. |
| 43 | * |
| 44 | * PG_arch_1 is an architecture specific page state bit. The generic code |
| 45 | * guarantees that this bit is cleared for a page when it first is entered into |
| 46 | * the page cache. |
| 47 | * |
| 48 | * PG_highmem pages are not permanently mapped into the kernel virtual address |
| 49 | * space, they need to be kmapped separately for doing IO on the pages. The |
| 50 | * struct page (these bits with information) are always mapped into kernel |
| 51 | * address space... |
Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 52 | * |
Andi Kleen | d466f2f | 2009-09-16 11:50:03 +0200 | [diff] [blame] | 53 | * PG_hwpoison indicates that a page got corrupted in hardware and contains |
| 54 | * data with incorrect ECC bits that triggered a machine check. Accessing is |
| 55 | * not safe since it may cause another machine check. Don't touch! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | */ |
| 57 | |
| 58 | /* |
| 59 | * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break |
Andy Whitcroft | 91fc8ab | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 60 | * locked- and dirty-page accounting. |
| 61 | * |
| 62 | * The page flags field is split into two parts, the main flags area |
| 63 | * which extends from the low bits upwards, and the fields area which |
| 64 | * extends from the high bits downwards. |
| 65 | * |
| 66 | * | FIELD | ... | FLAGS | |
Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 67 | * N-1 ^ 0 |
| 68 | * (NR_PAGEFLAGS) |
Andy Whitcroft | 91fc8ab | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 69 | * |
Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 70 | * The fields area is reserved for fields mapping zone, node (for NUMA) and |
| 71 | * SPARSEMEM section (for variants of SPARSEMEM that require section ids like |
| 72 | * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | */ |
Christoph Lameter | e268318 | 2008-04-28 02:12:47 -0700 | [diff] [blame] | 74 | enum pageflags { |
| 75 | PG_locked, /* Page is locked. Don't touch. */ |
| 76 | PG_error, |
| 77 | PG_referenced, |
| 78 | PG_uptodate, |
| 79 | PG_dirty, |
| 80 | PG_lru, |
| 81 | PG_active, |
| 82 | PG_slab, |
| 83 | PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ |
Christoph Lameter | e268318 | 2008-04-28 02:12:47 -0700 | [diff] [blame] | 84 | PG_arch_1, |
| 85 | PG_reserved, |
| 86 | PG_private, /* If pagecache, has fs-private data */ |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 87 | PG_private_2, /* If pagecache, has fs aux data */ |
Christoph Lameter | e268318 | 2008-04-28 02:12:47 -0700 | [diff] [blame] | 88 | PG_writeback, /* Page is under writeback */ |
Christoph Lameter | e20b8cc | 2008-04-28 02:12:55 -0700 | [diff] [blame] | 89 | PG_head, /* A head page */ |
Christoph Lameter | e268318 | 2008-04-28 02:12:47 -0700 | [diff] [blame] | 90 | PG_swapcache, /* Swap page: swp_entry_t in private */ |
| 91 | PG_mappedtodisk, /* Has blocks allocated on-disk */ |
| 92 | PG_reclaim, /* To be reclaimed asap */ |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 93 | PG_swapbacked, /* Page is backed by RAM/swap */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 94 | PG_unevictable, /* Page is "unevictable" */ |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 95 | #ifdef CONFIG_MMU |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 96 | PG_mlocked, /* Page is vma mlocked */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 97 | #endif |
Venkatesh Pallipadi | 46cf98c | 2009-07-10 09:57:37 -0700 | [diff] [blame] | 98 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
Christoph Lameter | 602c4d1 | 2008-04-28 02:12:52 -0700 | [diff] [blame] | 99 | PG_uncached, /* Page has been mapped as uncached */ |
Andrew Morton | f886ed4 | 2006-06-23 02:03:06 -0700 | [diff] [blame] | 100 | #endif |
Andi Kleen | d466f2f | 2009-09-16 11:50:03 +0200 | [diff] [blame] | 101 | #ifdef CONFIG_MEMORY_FAILURE |
| 102 | PG_hwpoison, /* hardware poisoned page. Don't touch */ |
| 103 | #endif |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 104 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) |
| 105 | PG_young, |
| 106 | PG_idle, |
| 107 | #endif |
Andy Whitcroft | 0cad47c | 2008-07-23 21:27:16 -0700 | [diff] [blame] | 108 | __NR_PAGEFLAGS, |
| 109 | |
| 110 | /* Filesystems */ |
| 111 | PG_checked = PG_owner_priv_1, |
| 112 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 113 | /* Two page bits are conscripted by FS-Cache to maintain local caching |
| 114 | * state. These bits are set on pages belonging to the netfs's inodes |
| 115 | * when those inodes are being locally cached. |
| 116 | */ |
| 117 | PG_fscache = PG_private_2, /* page backed by cache */ |
| 118 | |
Andy Whitcroft | 0cad47c | 2008-07-23 21:27:16 -0700 | [diff] [blame] | 119 | /* XEN */ |
Jennifer Herbert | d8ac3dd | 2015-01-05 13:24:09 +0000 | [diff] [blame] | 120 | /* Pinned in Xen as a read-only pagetable page. */ |
Andy Whitcroft | 0cad47c | 2008-07-23 21:27:16 -0700 | [diff] [blame] | 121 | PG_pinned = PG_owner_priv_1, |
Jennifer Herbert | d8ac3dd | 2015-01-05 13:24:09 +0000 | [diff] [blame] | 122 | /* Pinned as part of domain save (see xen_mm_pin_all()). */ |
Andy Whitcroft | 0cad47c | 2008-07-23 21:27:16 -0700 | [diff] [blame] | 123 | PG_savepinned = PG_dirty, |
Jennifer Herbert | d8ac3dd | 2015-01-05 13:24:09 +0000 | [diff] [blame] | 124 | /* Has a grant mapping of another (foreign) domain's page. */ |
| 125 | PG_foreign = PG_owner_priv_1, |
Andy Whitcroft | 8a38082 | 2008-07-23 21:27:18 -0700 | [diff] [blame] | 126 | |
Andy Whitcroft | 9023cb7 | 2008-07-23 21:27:19 -0700 | [diff] [blame] | 127 | /* SLOB */ |
Andy Whitcroft | 9023cb7 | 2008-07-23 21:27:19 -0700 | [diff] [blame] | 128 | PG_slob_free = PG_private, |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 129 | |
| 130 | /* Compound pages. Stored in first tail page's flags */ |
| 131 | PG_double_map = PG_private_2, |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 132 | |
| 133 | /* non-lru isolated movable page */ |
| 134 | PG_isolated = PG_reclaim, |
Christoph Lameter | e268318 | 2008-04-28 02:12:47 -0700 | [diff] [blame] | 135 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 137 | #ifndef __GENERATING_BOUNDS_H |
| 138 | |
Kirill A. Shutemov | 0e6d31a | 2016-01-15 16:51:17 -0800 | [diff] [blame] | 139 | struct page; /* forward declaration */ |
| 140 | |
| 141 | static inline struct page *compound_head(struct page *page) |
| 142 | { |
| 143 | unsigned long head = READ_ONCE(page->compound_head); |
| 144 | |
| 145 | if (unlikely(head & 1)) |
| 146 | return (struct page *) (head - 1); |
| 147 | return page; |
| 148 | } |
| 149 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 150 | static __always_inline int PageTail(struct page *page) |
Kirill A. Shutemov | 0e6d31a | 2016-01-15 16:51:17 -0800 | [diff] [blame] | 151 | { |
| 152 | return READ_ONCE(page->compound_head) & 1; |
| 153 | } |
| 154 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 155 | static __always_inline int PageCompound(struct page *page) |
Kirill A. Shutemov | 0e6d31a | 2016-01-15 16:51:17 -0800 | [diff] [blame] | 156 | { |
| 157 | return test_bit(PG_head, &page->flags) || PageTail(page); |
| 158 | } |
| 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | /* |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 161 | * Page flags policies wrt compound pages |
| 162 | * |
| 163 | * PF_ANY: |
| 164 | * the page flag is relevant for small, head and tail pages. |
| 165 | * |
| 166 | * PF_HEAD: |
| 167 | * for compound page all operations related to the page flag applied to |
| 168 | * head page. |
| 169 | * |
| 170 | * PF_NO_TAIL: |
| 171 | * modifications of the page flag must be done on small or head pages, |
| 172 | * checks can be done on tail pages too. |
| 173 | * |
| 174 | * PF_NO_COMPOUND: |
| 175 | * the page flag is not relevant for compound pages. |
| 176 | */ |
| 177 | #define PF_ANY(page, enforce) page |
| 178 | #define PF_HEAD(page, enforce) compound_head(page) |
| 179 | #define PF_NO_TAIL(page, enforce) ({ \ |
| 180 | VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ |
| 181 | compound_head(page);}) |
Kirill A. Shutemov | 822cdd11 | 2016-01-15 16:52:03 -0800 | [diff] [blame] | 182 | #define PF_NO_COMPOUND(page, enforce) ({ \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 183 | VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ |
| 184 | page;}) |
| 185 | |
| 186 | /* |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 187 | * Macros to create function definitions for page flags |
| 188 | */ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 189 | #define TESTPAGEFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 190 | static __always_inline int Page##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 191 | { return test_bit(PG_##lname, &policy(page, 0)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 192 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 193 | #define SETPAGEFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 194 | static __always_inline void SetPage##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 195 | { set_bit(PG_##lname, &policy(page, 1)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 196 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 197 | #define CLEARPAGEFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 198 | static __always_inline void ClearPage##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 199 | { clear_bit(PG_##lname, &policy(page, 1)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 200 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 201 | #define __SETPAGEFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 202 | static __always_inline void __SetPage##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 203 | { __set_bit(PG_##lname, &policy(page, 1)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 204 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 205 | #define __CLEARPAGEFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 206 | static __always_inline void __ClearPage##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 207 | { __clear_bit(PG_##lname, &policy(page, 1)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 208 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 209 | #define TESTSETFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 210 | static __always_inline int TestSetPage##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 211 | { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 212 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 213 | #define TESTCLEARFLAG(uname, lname, policy) \ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 214 | static __always_inline int TestClearPage##uname(struct page *page) \ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 215 | { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 216 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 217 | #define PAGEFLAG(uname, lname, policy) \ |
| 218 | TESTPAGEFLAG(uname, lname, policy) \ |
| 219 | SETPAGEFLAG(uname, lname, policy) \ |
| 220 | CLEARPAGEFLAG(uname, lname, policy) |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 221 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 222 | #define __PAGEFLAG(uname, lname, policy) \ |
| 223 | TESTPAGEFLAG(uname, lname, policy) \ |
| 224 | __SETPAGEFLAG(uname, lname, policy) \ |
| 225 | __CLEARPAGEFLAG(uname, lname, policy) |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 226 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 227 | #define TESTSCFLAG(uname, lname, policy) \ |
| 228 | TESTSETFLAG(uname, lname, policy) \ |
| 229 | TESTCLEARFLAG(uname, lname, policy) |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 230 | |
Johannes Weiner | 2f3e442 | 2014-08-06 16:05:40 -0700 | [diff] [blame] | 231 | #define TESTPAGEFLAG_FALSE(uname) \ |
| 232 | static inline int Page##uname(const struct page *page) { return 0; } |
| 233 | |
Lee Schermerhorn | 8a7a854 | 2008-10-18 20:26:37 -0700 | [diff] [blame] | 234 | #define SETPAGEFLAG_NOOP(uname) \ |
| 235 | static inline void SetPage##uname(struct page *page) { } |
| 236 | |
| 237 | #define CLEARPAGEFLAG_NOOP(uname) \ |
| 238 | static inline void ClearPage##uname(struct page *page) { } |
| 239 | |
| 240 | #define __CLEARPAGEFLAG_NOOP(uname) \ |
| 241 | static inline void __ClearPage##uname(struct page *page) { } |
| 242 | |
Johannes Weiner | 2f3e442 | 2014-08-06 16:05:40 -0700 | [diff] [blame] | 243 | #define TESTSETFLAG_FALSE(uname) \ |
| 244 | static inline int TestSetPage##uname(struct page *page) { return 0; } |
| 245 | |
Lee Schermerhorn | 8a7a854 | 2008-10-18 20:26:37 -0700 | [diff] [blame] | 246 | #define TESTCLEARFLAG_FALSE(uname) \ |
| 247 | static inline int TestClearPage##uname(struct page *page) { return 0; } |
| 248 | |
Johannes Weiner | 2f3e442 | 2014-08-06 16:05:40 -0700 | [diff] [blame] | 249 | #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \ |
| 250 | SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname) |
| 251 | |
| 252 | #define TESTSCFLAG_FALSE(uname) \ |
| 253 | TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) |
| 254 | |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 255 | __PAGEFLAG(Locked, locked, PF_NO_TAIL) |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 256 | PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) |
Kirill A. Shutemov | 8cb38fa | 2016-01-15 16:51:32 -0800 | [diff] [blame] | 257 | PAGEFLAG(Referenced, referenced, PF_HEAD) |
| 258 | TESTCLEARFLAG(Referenced, referenced, PF_HEAD) |
| 259 | __SETPAGEFLAG(Referenced, referenced, PF_HEAD) |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 260 | PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) |
| 261 | __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) |
Kirill A. Shutemov | 8cb38fa | 2016-01-15 16:51:32 -0800 | [diff] [blame] | 262 | PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) |
| 263 | PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) |
| 264 | TESTCLEARFLAG(Active, active, PF_HEAD) |
Kirill A. Shutemov | dcb351c | 2016-01-15 16:51:35 -0800 | [diff] [blame] | 265 | __PAGEFLAG(Slab, slab, PF_NO_TAIL) |
| 266 | __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 267 | PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ |
Kirill A. Shutemov | c13985f | 2016-01-15 16:51:39 -0800 | [diff] [blame] | 268 | |
| 269 | /* Xen */ |
| 270 | PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) |
| 271 | TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) |
| 272 | PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); |
| 273 | PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); |
| 274 | |
Kirill A. Shutemov | de09d31 | 2016-01-15 16:51:42 -0800 | [diff] [blame] | 275 | PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
| 276 | __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
Kirill A. Shutemov | da5efc4 | 2016-01-15 16:51:46 -0800 | [diff] [blame] | 277 | PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
| 278 | __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
| 279 | __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 280 | |
Christoph Lameter | f94a62e | 2008-04-28 02:12:49 -0700 | [diff] [blame] | 281 | /* |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 282 | * Private page markings that may be used by the filesystem that owns the page |
| 283 | * for its own purposes. |
| 284 | * - PG_private and PG_private_2 cause releasepage() and co to be invoked |
| 285 | */ |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 286 | PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) |
| 287 | __CLEARPAGEFLAG(Private, private, PF_ANY) |
| 288 | PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) |
| 289 | PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) |
| 290 | TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 291 | |
| 292 | /* |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 293 | * Only test-and-set exist for PG_writeback. The unconditional operators are |
| 294 | * risky: they bypass page accounting. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | */ |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 296 | TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) |
| 297 | TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) |
Kirill A. Shutemov | e2f0a0d | 2016-07-26 15:25:59 -0700 | [diff] [blame] | 298 | PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 300 | /* PG_readahead is only used for reads; PG_reclaim is only for writes */ |
Kirill A. Shutemov | e2f0a0d | 2016-07-26 15:25:59 -0700 | [diff] [blame] | 301 | PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) |
| 302 | TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 303 | PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) |
| 304 | TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 306 | #ifdef CONFIG_HIGHMEM |
| 307 | /* |
| 308 | * Must use a macro here due to header dependency issues. page_zone() is not |
| 309 | * available at this point. |
| 310 | */ |
Vineet Gupta | 3ca65c1 | 2015-11-05 18:48:29 -0800 | [diff] [blame] | 311 | #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 312 | #else |
Christoph Lameter | ec7cade | 2008-04-28 02:12:53 -0700 | [diff] [blame] | 313 | PAGEFLAG_FALSE(HighMem) |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 314 | #endif |
| 315 | |
| 316 | #ifdef CONFIG_SWAP |
Kirill A. Shutemov | 50ea78d | 2016-01-15 16:51:49 -0800 | [diff] [blame] | 317 | PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 318 | #else |
Christoph Lameter | ec7cade | 2008-04-28 02:12:53 -0700 | [diff] [blame] | 319 | PAGEFLAG_FALSE(SwapCache) |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 320 | #endif |
| 321 | |
Kirill A. Shutemov | 8cb38fa | 2016-01-15 16:51:32 -0800 | [diff] [blame] | 322 | PAGEFLAG(Unevictable, unevictable, PF_HEAD) |
| 323 | __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) |
| 324 | TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 325 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 326 | #ifdef CONFIG_MMU |
Kirill A. Shutemov | e4f87d5 | 2016-01-15 16:51:53 -0800 | [diff] [blame] | 327 | PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) |
| 328 | __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) |
| 329 | TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) |
David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 330 | #else |
Johannes Weiner | 2f3e442 | 2014-08-06 16:05:40 -0700 | [diff] [blame] | 331 | PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) |
Kirill A. Shutemov | 685eaad | 2016-01-15 16:52:10 -0800 | [diff] [blame] | 332 | TESTSCFLAG_FALSE(Mlocked) |
David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 333 | #endif |
| 334 | |
Venkatesh Pallipadi | 46cf98c | 2009-07-10 09:57:37 -0700 | [diff] [blame] | 335 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
Kirill A. Shutemov | b9d4181 | 2016-01-15 16:51:56 -0800 | [diff] [blame] | 336 | PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) |
Christoph Lameter | 602c4d1 | 2008-04-28 02:12:52 -0700 | [diff] [blame] | 337 | #else |
Christoph Lameter | ec7cade | 2008-04-28 02:12:53 -0700 | [diff] [blame] | 338 | PAGEFLAG_FALSE(Uncached) |
Christoph Lameter | 6a1e7f7 | 2008-04-28 02:12:50 -0700 | [diff] [blame] | 339 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | |
Andi Kleen | d466f2f | 2009-09-16 11:50:03 +0200 | [diff] [blame] | 341 | #ifdef CONFIG_MEMORY_FAILURE |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 342 | PAGEFLAG(HWPoison, hwpoison, PF_ANY) |
| 343 | TESTSCFLAG(HWPoison, hwpoison, PF_ANY) |
Andi Kleen | d466f2f | 2009-09-16 11:50:03 +0200 | [diff] [blame] | 344 | #define __PG_HWPOISON (1UL << PG_hwpoison) |
| 345 | #else |
| 346 | PAGEFLAG_FALSE(HWPoison) |
| 347 | #define __PG_HWPOISON 0 |
| 348 | #endif |
| 349 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 350 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 351 | TESTPAGEFLAG(Young, young, PF_ANY) |
| 352 | SETPAGEFLAG(Young, young, PF_ANY) |
| 353 | TESTCLEARFLAG(Young, young, PF_ANY) |
| 354 | PAGEFLAG(Idle, idle, PF_ANY) |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 355 | #endif |
| 356 | |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 357 | /* |
| 358 | * On an anonymous page mapped into a user virtual memory area, |
| 359 | * page->mapping points to its anon_vma, not to a struct address_space; |
| 360 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. |
| 361 | * |
| 362 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 363 | * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON |
| 364 | * bit; and then page->mapping points, not to an anon_vma, but to a private |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 365 | * structure which KSM associates with that merged page. See ksm.h. |
| 366 | * |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 367 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable |
| 368 | * page and then page->mapping points a struct address_space. |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 369 | * |
| 370 | * Please note that, confusingly, "page_mapping" refers to the inode |
| 371 | * address_space which maps the page from disk; whereas "page_mapped" |
| 372 | * refers to user virtual address space into which the page is mapped. |
| 373 | */ |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 374 | #define PAGE_MAPPING_ANON 0x1 |
| 375 | #define PAGE_MAPPING_MOVABLE 0x2 |
| 376 | #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) |
| 377 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 378 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 379 | static __always_inline int PageMappingFlags(struct page *page) |
Mel Gorman | 1751457 | 2016-05-19 17:13:21 -0700 | [diff] [blame] | 380 | { |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 381 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; |
Mel Gorman | 1751457 | 2016-05-19 17:13:21 -0700 | [diff] [blame] | 382 | } |
| 383 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 384 | static __always_inline int PageAnon(struct page *page) |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 385 | { |
Kirill A. Shutemov | 822cdd11 | 2016-01-15 16:52:03 -0800 | [diff] [blame] | 386 | page = compound_head(page); |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 387 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
| 388 | } |
| 389 | |
| 390 | static __always_inline int __PageMovable(struct page *page) |
| 391 | { |
| 392 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
| 393 | PAGE_MAPPING_MOVABLE; |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | #ifdef CONFIG_KSM |
| 397 | /* |
| 398 | * A KSM page is one of those write-protected "shared pages" or "merged pages" |
| 399 | * which KSM maps into multiple mms, wherever identical anonymous page content |
| 400 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any |
| 401 | * anon_vma, but to that page's node of the stable tree. |
| 402 | */ |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 403 | static __always_inline int PageKsm(struct page *page) |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 404 | { |
Kirill A. Shutemov | 822cdd11 | 2016-01-15 16:52:03 -0800 | [diff] [blame] | 405 | page = compound_head(page); |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 406 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 407 | PAGE_MAPPING_KSM; |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 408 | } |
| 409 | #else |
| 410 | TESTPAGEFLAG_FALSE(Ksm) |
| 411 | #endif |
| 412 | |
Wu Fengguang | 1a9b5b7 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 413 | u64 stable_page_flags(struct page *page); |
| 414 | |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 415 | static inline int PageUptodate(struct page *page) |
| 416 | { |
Kirill A. Shutemov | d2998c4 | 2016-01-15 16:52:00 -0800 | [diff] [blame] | 417 | int ret; |
| 418 | page = compound_head(page); |
| 419 | ret = test_bit(PG_uptodate, &(page)->flags); |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 420 | /* |
| 421 | * Must ensure that the data we read out of the page is loaded |
| 422 | * _after_ we've loaded page->flags to check for PageUptodate. |
| 423 | * We can skip the barrier if the page is not uptodate, because |
| 424 | * we wouldn't be reading anything from it. |
| 425 | * |
| 426 | * See SetPageUptodate() for the other side of the story. |
| 427 | */ |
| 428 | if (ret) |
| 429 | smp_rmb(); |
| 430 | |
| 431 | return ret; |
| 432 | } |
| 433 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 434 | static __always_inline void __SetPageUptodate(struct page *page) |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 435 | { |
Kirill A. Shutemov | d2998c4 | 2016-01-15 16:52:00 -0800 | [diff] [blame] | 436 | VM_BUG_ON_PAGE(PageTail(page), page); |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 437 | smp_wmb(); |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 438 | __set_bit(PG_uptodate, &page->flags); |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 439 | } |
| 440 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 441 | static __always_inline void SetPageUptodate(struct page *page) |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 442 | { |
Kirill A. Shutemov | d2998c4 | 2016-01-15 16:52:00 -0800 | [diff] [blame] | 443 | VM_BUG_ON_PAGE(PageTail(page), page); |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 444 | /* |
| 445 | * Memory barrier must be issued before setting the PG_uptodate bit, |
| 446 | * so that all previous stores issued in order to bring the page |
| 447 | * uptodate are actually visible before PageUptodate becomes true. |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 448 | */ |
| 449 | smp_wmb(); |
Kirill A. Shutemov | df8c94d | 2016-01-15 16:51:28 -0800 | [diff] [blame] | 450 | set_bit(PG_uptodate, &page->flags); |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 451 | } |
| 452 | |
Kirill A. Shutemov | d2998c4 | 2016-01-15 16:52:00 -0800 | [diff] [blame] | 453 | CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | int test_clear_page_writeback(struct page *page); |
Namjae Jeon | 1c8349a | 2014-05-12 08:12:25 -0400 | [diff] [blame] | 456 | int __test_set_page_writeback(struct page *page, bool keep_write); |
| 457 | |
| 458 | #define test_set_page_writeback(page) \ |
| 459 | __test_set_page_writeback(page, false) |
| 460 | #define test_set_page_writeback_keepwrite(page) \ |
| 461 | __test_set_page_writeback(page, true) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | static inline void set_page_writeback(struct page *page) |
| 464 | { |
| 465 | test_set_page_writeback(page); |
| 466 | } |
| 467 | |
Namjae Jeon | 1c8349a | 2014-05-12 08:12:25 -0400 | [diff] [blame] | 468 | static inline void set_page_writeback_keepwrite(struct page *page) |
| 469 | { |
| 470 | test_set_page_writeback_keepwrite(page); |
| 471 | } |
| 472 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 473 | __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 474 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 475 | static __always_inline void set_compound_head(struct page *page, struct page *head) |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 476 | { |
| 477 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); |
| 478 | } |
| 479 | |
Denys Vlasenko | 4b0f326 | 2016-03-17 14:18:24 -0700 | [diff] [blame] | 480 | static __always_inline void clear_compound_head(struct page *page) |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 481 | { |
| 482 | WRITE_ONCE(page->compound_head, 0); |
| 483 | } |
| 484 | |
Andrea Arcangeli | 4e6af67 | 2011-01-13 15:46:44 -0800 | [diff] [blame] | 485 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 486 | static inline void ClearPageCompound(struct page *page) |
| 487 | { |
| 488 | BUG_ON(!PageHead(page)); |
| 489 | ClearPageHead(page); |
| 490 | } |
| 491 | #endif |
Petr Tesarik | b3acc56 | 2014-06-23 13:22:03 -0700 | [diff] [blame] | 492 | |
Yu Zhao | d2a1a1f | 2016-05-20 16:58:16 -0700 | [diff] [blame] | 493 | #define PG_head_mask ((1UL << PG_head)) |
Petr Tesarik | b3acc56 | 2014-06-23 13:22:03 -0700 | [diff] [blame] | 494 | |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 495 | #ifdef CONFIG_HUGETLB_PAGE |
| 496 | int PageHuge(struct page *page); |
| 497 | int PageHeadHuge(struct page *page); |
Naoya Horiguchi | 7e1f049 | 2015-04-15 16:14:41 -0700 | [diff] [blame] | 498 | bool page_huge_active(struct page *page); |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 499 | #else |
| 500 | TESTPAGEFLAG_FALSE(Huge) |
| 501 | TESTPAGEFLAG_FALSE(HeadHuge) |
Naoya Horiguchi | 7e1f049 | 2015-04-15 16:14:41 -0700 | [diff] [blame] | 502 | |
| 503 | static inline bool page_huge_active(struct page *page) |
| 504 | { |
| 505 | return 0; |
| 506 | } |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 507 | #endif |
| 508 | |
Naoya Horiguchi | 7e1f049 | 2015-04-15 16:14:41 -0700 | [diff] [blame] | 509 | |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 510 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 511 | /* |
| 512 | * PageHuge() only returns true for hugetlbfs pages, but not for |
| 513 | * normal or transparent huge pages. |
| 514 | * |
| 515 | * PageTransHuge() returns true for both transparent huge and |
| 516 | * hugetlbfs pages, but not normal pages. PageTransHuge() can only be |
| 517 | * called only in the core VM paths where hugetlbfs pages can't exist. |
| 518 | */ |
| 519 | static inline int PageTransHuge(struct page *page) |
| 520 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 521 | VM_BUG_ON_PAGE(PageTail(page), page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 522 | return PageHead(page); |
| 523 | } |
| 524 | |
Dean Nelson | 385de35 | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 525 | /* |
| 526 | * PageTransCompound returns true for both transparent huge pages |
| 527 | * and hugetlbfs pages, so it should only be called when it's known |
| 528 | * that hugetlbfs pages aren't involved. |
| 529 | */ |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 530 | static inline int PageTransCompound(struct page *page) |
| 531 | { |
| 532 | return PageCompound(page); |
| 533 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 534 | |
Dean Nelson | 385de35 | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 535 | /* |
Andrea Arcangeli | 127393f | 2016-05-05 16:22:20 -0700 | [diff] [blame] | 536 | * PageTransCompoundMap is the same as PageTransCompound, but it also |
| 537 | * guarantees the primary MMU has the entire compound page mapped |
| 538 | * through pmd_trans_huge, which in turn guarantees the secondary MMUs |
| 539 | * can also map the entire compound page. This allows the secondary |
| 540 | * MMUs to call get_user_pages() only once for each compound page and |
| 541 | * to immediately map the entire compound page with a single secondary |
| 542 | * MMU fault. If there will be a pmd split later, the secondary MMUs |
| 543 | * will get an update through the MMU notifier invalidation through |
| 544 | * split_huge_pmd(). |
| 545 | * |
| 546 | * Unlike PageTransCompound, this is safe to be called only while |
| 547 | * split_huge_pmd() cannot run from under us, like if protected by the |
| 548 | * MMU notifier, otherwise it may result in page->_mapcount < 0 false |
| 549 | * positives. |
| 550 | */ |
| 551 | static inline int PageTransCompoundMap(struct page *page) |
| 552 | { |
| 553 | return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; |
| 554 | } |
| 555 | |
| 556 | /* |
Dean Nelson | 385de35 | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 557 | * PageTransTail returns true for both transparent huge pages |
| 558 | * and hugetlbfs pages, so it should only be called when it's known |
| 559 | * that hugetlbfs pages aren't involved. |
| 560 | */ |
| 561 | static inline int PageTransTail(struct page *page) |
| 562 | { |
| 563 | return PageTail(page); |
| 564 | } |
| 565 | |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 566 | /* |
| 567 | * PageDoubleMap indicates that the compound page is mapped with PTEs as well |
| 568 | * as PMDs. |
| 569 | * |
| 570 | * This is required for optimization of rmap operations for THP: we can postpone |
| 571 | * per small page mapcount accounting (and its overhead from atomic operations) |
| 572 | * until the first PMD split. |
| 573 | * |
| 574 | * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up |
| 575 | * by one. This reference will go away with last compound_mapcount. |
| 576 | * |
| 577 | * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). |
| 578 | */ |
| 579 | static inline int PageDoubleMap(struct page *page) |
| 580 | { |
| 581 | return PageHead(page) && test_bit(PG_double_map, &page[1].flags); |
| 582 | } |
| 583 | |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 584 | static inline void SetPageDoubleMap(struct page *page) |
| 585 | { |
| 586 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 587 | set_bit(PG_double_map, &page[1].flags); |
| 588 | } |
| 589 | |
| 590 | static inline void ClearPageDoubleMap(struct page *page) |
| 591 | { |
| 592 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 593 | clear_bit(PG_double_map, &page[1].flags); |
| 594 | } |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 595 | static inline int TestSetPageDoubleMap(struct page *page) |
| 596 | { |
| 597 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 598 | return test_and_set_bit(PG_double_map, &page[1].flags); |
| 599 | } |
| 600 | |
| 601 | static inline int TestClearPageDoubleMap(struct page *page) |
| 602 | { |
| 603 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 604 | return test_and_clear_bit(PG_double_map, &page[1].flags); |
| 605 | } |
| 606 | |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 607 | #else |
Kirill A. Shutemov | d8c1bde | 2016-01-15 16:51:13 -0800 | [diff] [blame] | 608 | TESTPAGEFLAG_FALSE(TransHuge) |
| 609 | TESTPAGEFLAG_FALSE(TransCompound) |
Andrea Arcangeli | 127393f | 2016-05-05 16:22:20 -0700 | [diff] [blame] | 610 | TESTPAGEFLAG_FALSE(TransCompoundMap) |
Kirill A. Shutemov | d8c1bde | 2016-01-15 16:51:13 -0800 | [diff] [blame] | 611 | TESTPAGEFLAG_FALSE(TransTail) |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 612 | PAGEFLAG_FALSE(DoubleMap) |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 613 | TESTSETFLAG_FALSE(DoubleMap) |
| 614 | TESTCLEARFLAG_FALSE(DoubleMap) |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 615 | #endif |
| 616 | |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 617 | /* |
Vladimir Davydov | 632c0a1 | 2016-07-26 15:24:18 -0700 | [diff] [blame] | 618 | * For pages that are never mapped to userspace, page->mapcount may be |
| 619 | * used for storing extra information about page type. Any value used |
| 620 | * for this purpose must be <= -2, but it's better start not too close |
| 621 | * to -2 so that an underflow of the page_mapcount() won't be mistaken |
| 622 | * for a special page. |
| 623 | */ |
| 624 | #define PAGE_MAPCOUNT_OPS(uname, lname) \ |
| 625 | static __always_inline int Page##uname(struct page *page) \ |
| 626 | { \ |
| 627 | return atomic_read(&page->_mapcount) == \ |
| 628 | PAGE_##lname##_MAPCOUNT_VALUE; \ |
| 629 | } \ |
| 630 | static __always_inline void __SetPage##uname(struct page *page) \ |
| 631 | { \ |
| 632 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ |
| 633 | atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ |
| 634 | } \ |
| 635 | static __always_inline void __ClearPage##uname(struct page *page) \ |
| 636 | { \ |
| 637 | VM_BUG_ON_PAGE(!Page##uname(page), page); \ |
| 638 | atomic_set(&page->_mapcount, -1); \ |
| 639 | } |
| 640 | |
| 641 | /* |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 642 | * PageBuddy() indicate that the page is free and in the buddy system |
| 643 | * (see mm/page_alloc.c). |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 644 | */ |
Vladimir Davydov | 632c0a1 | 2016-07-26 15:24:18 -0700 | [diff] [blame] | 645 | #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) |
| 646 | PAGE_MAPCOUNT_OPS(Buddy, BUDDY) |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 647 | |
Vladimir Davydov | 632c0a1 | 2016-07-26 15:24:18 -0700 | [diff] [blame] | 648 | /* |
| 649 | * PageBalloon() is set on pages that are on the balloon page list |
| 650 | * (see mm/balloon_compaction.c). |
| 651 | */ |
| 652 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) |
| 653 | PAGE_MAPCOUNT_OPS(Balloon, BALLOON) |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 654 | |
Vladimir Davydov | 4949148 | 2016-07-26 15:24:24 -0700 | [diff] [blame] | 655 | /* |
| 656 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on |
| 657 | * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. |
| 658 | */ |
| 659 | #define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) |
| 660 | PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) |
| 661 | |
Naoya Horiguchi | 832fc1d | 2016-03-17 14:17:41 -0700 | [diff] [blame] | 662 | extern bool is_free_buddy_page(struct page *page); |
| 663 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 664 | __PAGEFLAG(Isolated, isolated, PF_ANY); |
| 665 | |
Kirill A. Shutemov | e8c6158 | 2015-04-15 16:13:08 -0700 | [diff] [blame] | 666 | /* |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 667 | * If network-based swap is enabled, sl*b must keep track of whether pages |
| 668 | * were allocated from pfmemalloc reserves. |
| 669 | */ |
| 670 | static inline int PageSlabPfmemalloc(struct page *page) |
| 671 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 672 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 673 | return PageActive(page); |
| 674 | } |
| 675 | |
| 676 | static inline void SetPageSlabPfmemalloc(struct page *page) |
| 677 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 678 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 679 | SetPageActive(page); |
| 680 | } |
| 681 | |
| 682 | static inline void __ClearPageSlabPfmemalloc(struct page *page) |
| 683 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 684 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 685 | __ClearPageActive(page); |
| 686 | } |
| 687 | |
| 688 | static inline void ClearPageSlabPfmemalloc(struct page *page) |
| 689 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 690 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 691 | ClearPageActive(page); |
| 692 | } |
| 693 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 694 | #ifdef CONFIG_MMU |
Yu Zhao | d2a1a1f | 2016-05-20 16:58:16 -0700 | [diff] [blame] | 695 | #define __PG_MLOCKED (1UL << PG_mlocked) |
David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 696 | #else |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 697 | #define __PG_MLOCKED 0 |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 698 | #endif |
| 699 | |
Russ Anderson | dfa7e20 | 2008-06-09 11:18:45 -0500 | [diff] [blame] | 700 | /* |
| 701 | * Flags checked when a page is freed. Pages being freed should not have |
| 702 | * these flags set. It they are, there is a problem. |
| 703 | */ |
Hugh Dickins | 79f4b7b | 2009-01-06 14:40:05 -0800 | [diff] [blame] | 704 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
Yu Zhao | d2a1a1f | 2016-05-20 16:58:16 -0700 | [diff] [blame] | 705 | (1UL << PG_lru | 1UL << PG_locked | \ |
| 706 | 1UL << PG_private | 1UL << PG_private_2 | \ |
| 707 | 1UL << PG_writeback | 1UL << PG_reserved | \ |
| 708 | 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ |
| 709 | 1UL << PG_unevictable | __PG_MLOCKED) |
Russ Anderson | dfa7e20 | 2008-06-09 11:18:45 -0500 | [diff] [blame] | 710 | |
| 711 | /* |
| 712 | * Flags checked when a page is prepped for return by the page allocator. |
Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 713 | * Pages being prepped should not have these flags set. It they are set, |
Hugh Dickins | 79f4b7b | 2009-01-06 14:40:05 -0800 | [diff] [blame] | 714 | * there has been a kernel bug or struct page corruption. |
Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 715 | * |
| 716 | * __PG_HWPOISON is exceptional because it needs to be kept beyond page's |
| 717 | * alloc-free cycle to prevent from reusing the page. |
Russ Anderson | dfa7e20 | 2008-06-09 11:18:45 -0500 | [diff] [blame] | 718 | */ |
Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 719 | #define PAGE_FLAGS_CHECK_AT_PREP \ |
Yu Zhao | d2a1a1f | 2016-05-20 16:58:16 -0700 | [diff] [blame] | 720 | (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) |
Russ Anderson | dfa7e20 | 2008-06-09 11:18:45 -0500 | [diff] [blame] | 721 | |
Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 722 | #define PAGE_FLAGS_PRIVATE \ |
Yu Zhao | d2a1a1f | 2016-05-20 16:58:16 -0700 | [diff] [blame] | 723 | (1UL << PG_private | 1UL << PG_private_2) |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 724 | /** |
| 725 | * page_has_private - Determine if page has private stuff |
| 726 | * @page: The page to be checked |
| 727 | * |
| 728 | * Determine if a page has private stuff, indicating that release routines |
| 729 | * should be invoked upon it. |
| 730 | */ |
Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 731 | static inline int page_has_private(struct page *page) |
| 732 | { |
| 733 | return !!(page->flags & PAGE_FLAGS_PRIVATE); |
| 734 | } |
| 735 | |
Kirill A. Shutemov | 95ad975 | 2016-01-15 16:51:21 -0800 | [diff] [blame] | 736 | #undef PF_ANY |
| 737 | #undef PF_HEAD |
| 738 | #undef PF_NO_TAIL |
| 739 | #undef PF_NO_COMPOUND |
Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 740 | #endif /* !__GENERATING_BOUNDS_H */ |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 741 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | #endif /* PAGE_FLAGS_H */ |