blob: e880d4cf9e223d1e4ca4b8e4ebe655f599a52b16 [file] [log] [blame]
Vegard Nossumdfec0722008-04-04 00:51:41 +02001#ifndef LINUX_KMEMCHECK_H
2#define LINUX_KMEMCHECK_H
3
4#include <linux/mm_types.h>
5#include <linux/types.h>
6
7#ifdef CONFIG_KMEMCHECK
8extern int kmemcheck_enabled;
9
Vegard Nossum2dff4402008-05-31 15:56:17 +020010/* The slab-related functions. */
Vegard Nossumb1eeab62008-11-25 16:55:53 +010011void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12void kmemcheck_free_shadow(struct page *page, int order);
Vegard Nossum2dff4402008-05-31 15:56:17 +020013void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
Vegard Nossumb1eeab62008-11-25 16:55:53 +010017void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
Vegard Nossum2dff4402008-05-31 15:56:17 +020020void kmemcheck_show_pages(struct page *p, unsigned int n);
21void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23bool kmemcheck_page_is_tracked(struct page *p);
24
25void kmemcheck_mark_unallocated(void *address, unsigned int n);
26void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27void kmemcheck_mark_initialized(void *address, unsigned int n);
28void kmemcheck_mark_freed(void *address, unsigned int n);
29
30void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
Vegard Nossumb1eeab62008-11-25 16:55:53 +010032void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
Vegard Nossum2dff4402008-05-31 15:56:17 +020033
Vegard Nossumdfec0722008-04-04 00:51:41 +020034int kmemcheck_show_addr(unsigned long address);
35int kmemcheck_hide_addr(unsigned long address);
Vegard Nossumfc7d0c92008-08-30 12:16:05 +020036
Pekka Enberg8e019362009-08-27 14:50:00 +010037bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
Vegard Nossumdfec0722008-04-04 00:51:41 +020039#else
40#define kmemcheck_enabled 0
41
Vegard Nossum2dff4402008-05-31 15:56:17 +020042static inline void
Vegard Nossumb1eeab62008-11-25 16:55:53 +010043kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
Vegard Nossum2dff4402008-05-31 15:56:17 +020044{
45}
46
47static inline void
Vegard Nossumb1eeab62008-11-25 16:55:53 +010048kmemcheck_free_shadow(struct page *page, int order)
Vegard Nossum2dff4402008-05-31 15:56:17 +020049{
50}
51
52static inline void
53kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
54 size_t size)
55{
56}
57
58static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
59 size_t size)
60{
61}
62
Vegard Nossumb1eeab62008-11-25 16:55:53 +010063static inline void kmemcheck_pagealloc_alloc(struct page *p,
64 unsigned int order, gfp_t gfpflags)
65{
66}
67
Vegard Nossum2dff4402008-05-31 15:56:17 +020068static inline bool kmemcheck_page_is_tracked(struct page *p)
69{
70 return false;
71}
Vegard Nossumd7002852008-07-20 10:44:54 +020072
73static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
74{
75}
76
77static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
78{
79}
80
81static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
82{
83}
84
85static inline void kmemcheck_mark_freed(void *address, unsigned int n)
86{
87}
Vegard Nossumb1eeab62008-11-25 16:55:53 +010088
89static inline void kmemcheck_mark_unallocated_pages(struct page *p,
90 unsigned int n)
91{
92}
93
94static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
95 unsigned int n)
96{
97}
98
99static inline void kmemcheck_mark_initialized_pages(struct page *p,
100 unsigned int n)
101{
102}
103
Pekka Enberg8e019362009-08-27 14:50:00 +0100104static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
105{
106 return true;
107}
108
Vegard Nossumdfec0722008-04-04 00:51:41 +0200109#endif /* CONFIG_KMEMCHECK */
110
Vegard Nossumfc7d0c92008-08-30 12:16:05 +0200111/*
112 * Bitfield annotations
113 *
114 * How to use: If you have a struct using bitfields, for example
115 *
116 * struct a {
117 * int x:8, y:8;
118 * };
119 *
120 * then this should be rewritten as
121 *
122 * struct a {
123 * kmemcheck_bitfield_begin(flags);
124 * int x:8, y:8;
125 * kmemcheck_bitfield_end(flags);
126 * };
127 *
128 * Now the "flags_begin" and "flags_end" members may be used to refer to the
129 * beginning and end, respectively, of the bitfield (and things like
130 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
131 * fields should be annotated:
132 *
133 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
134 * kmemcheck_annotate_bitfield(a, flags);
135 *
136 * Note: We provide the same definitions for both kmemcheck and non-
137 * kmemcheck kernels. This makes it harder to introduce accidental errors. It
138 * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
139 */
140#define kmemcheck_bitfield_begin(name) \
141 int name##_begin[0];
142
143#define kmemcheck_bitfield_end(name) \
144 int name##_end[0];
145
146#define kmemcheck_annotate_bitfield(ptr, name) \
Johannes Berg181f7c52009-07-06 11:53:03 +0200147 do { \
Andrew Mortonfa081b02009-09-22 16:43:27 -0700148 int _n; \
149 \
Johannes Berg181f7c52009-07-06 11:53:03 +0200150 if (!ptr) \
151 break; \
152 \
Andrew Mortonfa081b02009-09-22 16:43:27 -0700153 _n = (long) &((ptr)->name##_end) \
Vegard Nossumfc7d0c92008-08-30 12:16:05 +0200154 - (long) &((ptr)->name##_begin); \
Jan Beulich8c87df42009-09-22 16:43:52 -0700155 MAYBE_BUILD_BUG_ON(_n < 0); \
Vegard Nossumfc7d0c92008-08-30 12:16:05 +0200156 \
157 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
158 } while (0)
159
160#define kmemcheck_annotate_variable(var) \
161 do { \
162 kmemcheck_mark_initialized(&(var), sizeof(var)); \
163 } while (0) \
164
Vegard Nossumdfec0722008-04-04 00:51:41 +0200165#endif /* LINUX_KMEMCHECK_H */