blob: 8b5e0a9f2431a7b833bdb836600fe512dfbe0651 [file] [log] [blame]
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001#ifndef _LINUX_PAGE_REF_H
2#define _LINUX_PAGE_REF_H
3
4#include <linux/atomic.h>
5#include <linux/mm_types.h>
6#include <linux/page-flags.h>
Joonsoo Kim95813b82016-03-17 14:19:29 -07007#include <linux/tracepoint-defs.h>
8
9extern struct tracepoint __tracepoint_page_ref_set;
10extern struct tracepoint __tracepoint_page_ref_mod;
11extern struct tracepoint __tracepoint_page_ref_mod_and_test;
12extern struct tracepoint __tracepoint_page_ref_mod_and_return;
13extern struct tracepoint __tracepoint_page_ref_mod_unless;
14extern struct tracepoint __tracepoint_page_ref_freeze;
15extern struct tracepoint __tracepoint_page_ref_unfreeze;
16
17#ifdef CONFIG_DEBUG_PAGE_REF
18
19/*
20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
21 * functions. But due to include header file issues, that is not
22 * feasible. Instead we have to open code the static key functions.
23 *
24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
25 */
26#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
27
28extern void __page_ref_set(struct page *page, int v);
29extern void __page_ref_mod(struct page *page, int v);
30extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
31extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
32extern void __page_ref_mod_unless(struct page *page, int v, int u);
33extern void __page_ref_freeze(struct page *page, int v, int ret);
34extern void __page_ref_unfreeze(struct page *page, int v);
35
36#else
37
38#define page_ref_tracepoint_active(t) false
39
40static inline void __page_ref_set(struct page *page, int v)
41{
42}
43static inline void __page_ref_mod(struct page *page, int v)
44{
45}
46static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
47{
48}
49static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
50{
51}
52static inline void __page_ref_mod_unless(struct page *page, int v, int u)
53{
54}
55static inline void __page_ref_freeze(struct page *page, int v, int ret)
56{
57}
58static inline void __page_ref_unfreeze(struct page *page, int v)
59{
60}
61
62#endif
Joonsoo Kimfe896d12016-03-17 14:19:26 -070063
64static inline int page_ref_count(struct page *page)
65{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070066 return atomic_read(&page->_refcount);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070067}
68
69static inline int page_count(struct page *page)
70{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070071 return atomic_read(&compound_head(page)->_refcount);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070072}
73
74static inline void set_page_count(struct page *page, int v)
75{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070076 atomic_set(&page->_refcount, v);
Joonsoo Kim95813b82016-03-17 14:19:29 -070077 if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
78 __page_ref_set(page, v);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070079}
80
81/*
82 * Setup the page count before being freed into the page allocator for
83 * the first time (boot or memory hotplug)
84 */
85static inline void init_page_count(struct page *page)
86{
87 set_page_count(page, 1);
88}
89
90static inline void page_ref_add(struct page *page, int nr)
91{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070092 atomic_add(nr, &page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -070093 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
94 __page_ref_mod(page, nr);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070095}
96
97static inline void page_ref_sub(struct page *page, int nr)
98{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070099 atomic_sub(nr, &page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
101 __page_ref_mod(page, -nr);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700102}
103
104static inline void page_ref_inc(struct page *page)
105{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700106 atomic_inc(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
108 __page_ref_mod(page, 1);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700109}
110
111static inline void page_ref_dec(struct page *page)
112{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700113 atomic_dec(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
115 __page_ref_mod(page, -1);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700116}
117
118static inline int page_ref_sub_and_test(struct page *page, int nr)
119{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700120 int ret = atomic_sub_and_test(nr, &page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700121
122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
123 __page_ref_mod_and_test(page, -nr, ret);
124 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700125}
126
127static inline int page_ref_dec_and_test(struct page *page)
128{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700129 int ret = atomic_dec_and_test(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700130
131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
132 __page_ref_mod_and_test(page, -1, ret);
133 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700134}
135
136static inline int page_ref_dec_return(struct page *page)
137{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700138 int ret = atomic_dec_return(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700139
140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
141 __page_ref_mod_and_return(page, -1, ret);
142 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700143}
144
145static inline int page_ref_add_unless(struct page *page, int nr, int u)
146{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700147 int ret = atomic_add_unless(&page->_refcount, nr, u);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700148
149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
150 __page_ref_mod_unless(page, nr, ret);
151 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700152}
153
154static inline int page_ref_freeze(struct page *page, int count)
155{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700156 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700157
158 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
159 __page_ref_freeze(page, count, ret);
160 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700161}
162
163static inline void page_ref_unfreeze(struct page *page, int count)
164{
165 VM_BUG_ON_PAGE(page_count(page) != 0, page);
166 VM_BUG_ON(count == 0);
167
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700168 atomic_set(&page->_refcount, count);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700169 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
170 __page_ref_unfreeze(page, count);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700171}
172
173#endif