blob: 7b1d7bead7d93ceb32dfdd43a4bb3c771ac69551 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Vegard Nossumdfec0722008-04-04 00:51:41 +02002#ifndef LINUX_KMEMCHECK_H
3#define LINUX_KMEMCHECK_H
4
5#include <linux/mm_types.h>
6#include <linux/types.h>
7
8#ifdef CONFIG_KMEMCHECK
9extern int kmemcheck_enabled;
10
Vegard Nossum2dff4402008-05-31 15:56:17 +020011/* The slab-related functions. */
Vegard Nossumb1eeab62008-11-25 16:55:53 +010012void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
13void kmemcheck_free_shadow(struct page *page, int order);
Vegard Nossum2dff4402008-05-31 15:56:17 +020014void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
15 size_t size);
16void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
17
Vegard Nossumb1eeab62008-11-25 16:55:53 +010018void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
19 gfp_t gfpflags);
20
Vegard Nossum2dff4402008-05-31 15:56:17 +020021void kmemcheck_show_pages(struct page *p, unsigned int n);
22void kmemcheck_hide_pages(struct page *p, unsigned int n);
23
24bool kmemcheck_page_is_tracked(struct page *p);
25
26void kmemcheck_mark_unallocated(void *address, unsigned int n);
27void kmemcheck_mark_uninitialized(void *address, unsigned int n);
28void kmemcheck_mark_initialized(void *address, unsigned int n);
29void kmemcheck_mark_freed(void *address, unsigned int n);
30
31void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
32void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
Vegard Nossumb1eeab62008-11-25 16:55:53 +010033void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
Vegard Nossum2dff4402008-05-31 15:56:17 +020034
Vegard Nossumdfec0722008-04-04 00:51:41 +020035int kmemcheck_show_addr(unsigned long address);
36int kmemcheck_hide_addr(unsigned long address);
Vegard Nossumfc7d0c92008-08-30 12:16:05 +020037
Pekka Enberg8e019362009-08-27 14:50:00 +010038bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
39
Vegard Nossume992cd92010-01-08 14:42:35 -080040/*
41 * Bitfield annotations
42 *
43 * How to use: If you have a struct using bitfields, for example
44 *
45 * struct a {
46 * int x:8, y:8;
47 * };
48 *
49 * then this should be rewritten as
50 *
51 * struct a {
52 * kmemcheck_bitfield_begin(flags);
53 * int x:8, y:8;
54 * kmemcheck_bitfield_end(flags);
55 * };
56 *
57 * Now the "flags_begin" and "flags_end" members may be used to refer to the
58 * beginning and end, respectively, of the bitfield (and things like
59 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
60 * fields should be annotated:
61 *
62 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
63 * kmemcheck_annotate_bitfield(a, flags);
64 */
65#define kmemcheck_bitfield_begin(name) \
66 int name##_begin[0];
67
68#define kmemcheck_bitfield_end(name) \
69 int name##_end[0];
70
71#define kmemcheck_annotate_bitfield(ptr, name) \
72 do { \
73 int _n; \
74 \
75 if (!ptr) \
76 break; \
77 \
78 _n = (long) &((ptr)->name##_end) \
79 - (long) &((ptr)->name##_begin); \
Rusty Russell1765e3a2011-01-24 14:45:10 -060080 BUILD_BUG_ON(_n < 0); \
Vegard Nossume992cd92010-01-08 14:42:35 -080081 \
82 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
83 } while (0)
84
85#define kmemcheck_annotate_variable(var) \
86 do { \
87 kmemcheck_mark_initialized(&(var), sizeof(var)); \
88 } while (0) \
89
Vegard Nossumdfec0722008-04-04 00:51:41 +020090#else
91#define kmemcheck_enabled 0
92
Vegard Nossum2dff4402008-05-31 15:56:17 +020093static inline void
Vegard Nossumb1eeab62008-11-25 16:55:53 +010094kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
Vegard Nossum2dff4402008-05-31 15:56:17 +020095{
96}
97
98static inline void
Vegard Nossumb1eeab62008-11-25 16:55:53 +010099kmemcheck_free_shadow(struct page *page, int order)
Vegard Nossum2dff4402008-05-31 15:56:17 +0200100{
101}
102
103static inline void
104kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
105 size_t size)
106{
107}
108
109static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
110 size_t size)
111{
112}
113
Vegard Nossumb1eeab62008-11-25 16:55:53 +0100114static inline void kmemcheck_pagealloc_alloc(struct page *p,
115 unsigned int order, gfp_t gfpflags)
116{
117}
118
Vegard Nossum2dff4402008-05-31 15:56:17 +0200119static inline bool kmemcheck_page_is_tracked(struct page *p)
120{
121 return false;
122}
Vegard Nossumd7002852008-07-20 10:44:54 +0200123
124static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
125{
126}
127
128static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
129{
130}
131
132static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
133{
134}
135
136static inline void kmemcheck_mark_freed(void *address, unsigned int n)
137{
138}
Vegard Nossumb1eeab62008-11-25 16:55:53 +0100139
140static inline void kmemcheck_mark_unallocated_pages(struct page *p,
141 unsigned int n)
142{
143}
144
145static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
146 unsigned int n)
147{
148}
149
150static inline void kmemcheck_mark_initialized_pages(struct page *p,
151 unsigned int n)
152{
153}
154
Pekka Enberg8e019362009-08-27 14:50:00 +0100155static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
156{
157 return true;
158}
159
Vegard Nossume992cd92010-01-08 14:42:35 -0800160#define kmemcheck_bitfield_begin(name)
161#define kmemcheck_bitfield_end(name)
162#define kmemcheck_annotate_bitfield(ptr, name) \
163 do { \
Vegard Nossumfc7d0c92008-08-30 12:16:05 +0200164 } while (0)
165
Vegard Nossume992cd92010-01-08 14:42:35 -0800166#define kmemcheck_annotate_variable(var) \
167 do { \
168 } while (0)
169
170#endif /* CONFIG_KMEMCHECK */
Vegard Nossumfc7d0c92008-08-30 12:16:05 +0200171
Vegard Nossumdfec0722008-04-04 00:51:41 +0200172#endif /* LINUX_KMEMCHECK_H */