blob: 47b39b7c7e849189a9115c7669181c8f4f4714bd [file] [log] [blame]
Vegard Nossumdfec0722008-04-04 00:51:41 +02001#ifndef LINUX_KMEMCHECK_H
2#define LINUX_KMEMCHECK_H
3
4#include <linux/mm_types.h>
5#include <linux/types.h>
6
7#ifdef CONFIG_KMEMCHECK
8extern int kmemcheck_enabled;
9
Vegard Nossum2dff4402008-05-31 15:56:17 +020010/* The slab-related functions. */
Vegard Nossumb1eeab62008-11-25 16:55:53 +010011void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12void kmemcheck_free_shadow(struct page *page, int order);
Vegard Nossum2dff4402008-05-31 15:56:17 +020013void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
Vegard Nossumb1eeab62008-11-25 16:55:53 +010017void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
Vegard Nossum2dff4402008-05-31 15:56:17 +020020void kmemcheck_show_pages(struct page *p, unsigned int n);
21void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23bool kmemcheck_page_is_tracked(struct page *p);
24
25void kmemcheck_mark_unallocated(void *address, unsigned int n);
26void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27void kmemcheck_mark_initialized(void *address, unsigned int n);
28void kmemcheck_mark_freed(void *address, unsigned int n);
29
30void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
Vegard Nossumb1eeab62008-11-25 16:55:53 +010032void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
Vegard Nossum2dff4402008-05-31 15:56:17 +020033
Vegard Nossumdfec0722008-04-04 00:51:41 +020034int kmemcheck_show_addr(unsigned long address);
35int kmemcheck_hide_addr(unsigned long address);
Vegard Nossumfc7d0c92008-08-30 12:16:05 +020036
Vegard Nossumdfec0722008-04-04 00:51:41 +020037#else
38#define kmemcheck_enabled 0
39
Vegard Nossum2dff4402008-05-31 15:56:17 +020040static inline void
Vegard Nossumb1eeab62008-11-25 16:55:53 +010041kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
Vegard Nossum2dff4402008-05-31 15:56:17 +020042{
43}
44
45static inline void
Vegard Nossumb1eeab62008-11-25 16:55:53 +010046kmemcheck_free_shadow(struct page *page, int order)
Vegard Nossum2dff4402008-05-31 15:56:17 +020047{
48}
49
50static inline void
51kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
52 size_t size)
53{
54}
55
56static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
57 size_t size)
58{
59}
60
Vegard Nossumb1eeab62008-11-25 16:55:53 +010061static inline void kmemcheck_pagealloc_alloc(struct page *p,
62 unsigned int order, gfp_t gfpflags)
63{
64}
65
Vegard Nossum2dff4402008-05-31 15:56:17 +020066static inline bool kmemcheck_page_is_tracked(struct page *p)
67{
68 return false;
69}
Vegard Nossumd7002852008-07-20 10:44:54 +020070
71static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
72{
73}
74
75static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
76{
77}
78
79static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
80{
81}
82
83static inline void kmemcheck_mark_freed(void *address, unsigned int n)
84{
85}
Vegard Nossumb1eeab62008-11-25 16:55:53 +010086
87static inline void kmemcheck_mark_unallocated_pages(struct page *p,
88 unsigned int n)
89{
90}
91
92static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
93 unsigned int n)
94{
95}
96
97static inline void kmemcheck_mark_initialized_pages(struct page *p,
98 unsigned int n)
99{
100}
101
Vegard Nossumdfec0722008-04-04 00:51:41 +0200102#endif /* CONFIG_KMEMCHECK */
103
Vegard Nossumfc7d0c92008-08-30 12:16:05 +0200104/*
105 * Bitfield annotations
106 *
107 * How to use: If you have a struct using bitfields, for example
108 *
109 * struct a {
110 * int x:8, y:8;
111 * };
112 *
113 * then this should be rewritten as
114 *
115 * struct a {
116 * kmemcheck_bitfield_begin(flags);
117 * int x:8, y:8;
118 * kmemcheck_bitfield_end(flags);
119 * };
120 *
121 * Now the "flags_begin" and "flags_end" members may be used to refer to the
122 * beginning and end, respectively, of the bitfield (and things like
123 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
124 * fields should be annotated:
125 *
126 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
127 * kmemcheck_annotate_bitfield(a, flags);
128 *
129 * Note: We provide the same definitions for both kmemcheck and non-
130 * kmemcheck kernels. This makes it harder to introduce accidental errors. It
131 * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
132 */
133#define kmemcheck_bitfield_begin(name) \
134 int name##_begin[0];
135
136#define kmemcheck_bitfield_end(name) \
137 int name##_end[0];
138
139#define kmemcheck_annotate_bitfield(ptr, name) \
140 do if (ptr) { \
141 int _n = (long) &((ptr)->name##_end) \
142 - (long) &((ptr)->name##_begin); \
143 BUILD_BUG_ON(_n < 0); \
144 \
145 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
146 } while (0)
147
148#define kmemcheck_annotate_variable(var) \
149 do { \
150 kmemcheck_mark_initialized(&(var), sizeof(var)); \
151 } while (0) \
152
Vegard Nossumdfec0722008-04-04 00:51:41 +0200153#endif /* LINUX_KMEMCHECK_H */