blob: cab58bb592d8fa881dccfcddf0dcc7612f05128b [file] [log] [blame]
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001#include <linux/gfp.h>
Vegard Nossum2dff4402008-05-31 15:56:17 +02002#include <linux/mm_types.h>
3#include <linux/mm.h>
4#include <linux/slab.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -07005#include "slab.h"
Vegard Nossum2dff4402008-05-31 15:56:17 +02006#include <linux/kmemcheck.h>
7
Vegard Nossumb1eeab62008-11-25 16:55:53 +01008void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
Vegard Nossum2dff4402008-05-31 15:56:17 +02009{
10 struct page *shadow;
11 int pages;
12 int i;
13
14 pages = 1 << order;
15
16 /*
17 * With kmemcheck enabled, we need to allocate a memory area for the
18 * shadow bits as well.
19 */
Vegard Nossumb1eeab62008-11-25 16:55:53 +010020 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
Vegard Nossum2dff4402008-05-31 15:56:17 +020021 if (!shadow) {
22 if (printk_ratelimit())
23 printk(KERN_ERR "kmemcheck: failed to allocate "
24 "shadow bitmap\n");
25 return;
26 }
27
28 for(i = 0; i < pages; ++i)
29 page[i].shadow = page_address(&shadow[i]);
30
31 /*
32 * Mark it as non-present for the MMU so that our accesses to
33 * this memory will trigger a page fault and let us analyze
34 * the memory accesses.
35 */
36 kmemcheck_hide_pages(page, pages);
Vegard Nossum2dff4402008-05-31 15:56:17 +020037}
38
Vegard Nossumb1eeab62008-11-25 16:55:53 +010039void kmemcheck_free_shadow(struct page *page, int order)
Vegard Nossum2dff4402008-05-31 15:56:17 +020040{
41 struct page *shadow;
42 int pages;
43 int i;
44
Vegard Nossumb1eeab62008-11-25 16:55:53 +010045 if (!kmemcheck_page_is_tracked(page))
46 return;
47
Vegard Nossum2dff4402008-05-31 15:56:17 +020048 pages = 1 << order;
49
50 kmemcheck_show_pages(page, pages);
51
52 shadow = virt_to_page(page[0].shadow);
53
54 for(i = 0; i < pages; ++i)
55 page[i].shadow = NULL;
56
57 __free_pages(shadow, order);
58}
59
60void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
61 size_t size)
62{
63 /*
64 * Has already been memset(), which initializes the shadow for us
65 * as well.
66 */
67 if (gfpflags & __GFP_ZERO)
68 return;
69
70 /* No need to initialize the shadow of a non-tracked slab. */
71 if (s->flags & SLAB_NOTRACK)
72 return;
73
74 if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
75 /*
76 * Allow notracked objects to be allocated from
77 * tracked caches. Note however that these objects
78 * will still get page faults on access, they just
79 * won't ever be flagged as uninitialized. If page
80 * faults are not acceptable, the slab cache itself
81 * should be marked NOTRACK.
82 */
83 kmemcheck_mark_initialized(object, size);
84 } else if (!s->ctor) {
85 /*
86 * New objects should be marked uninitialized before
87 * they're returned to the called.
88 */
89 kmemcheck_mark_uninitialized(object, size);
90 }
91}
92
93void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
94{
95 /* TODO: RCU freeing is unsupported for now; hide false positives. */
96 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
97 kmemcheck_mark_freed(object, size);
98}
Vegard Nossumb1eeab62008-11-25 16:55:53 +010099
100void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
101 gfp_t gfpflags)
102{
103 int pages;
104
105 if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
106 return;
107
108 pages = 1 << order;
109
110 /*
111 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
112 * can become uninitialized by copying uninitialized memory
113 * into them.
114 */
115
116 /* XXX: Can use zone->node for node? */
117 kmemcheck_alloc_shadow(page, order, gfpflags, -1);
118
119 if (gfpflags & __GFP_ZERO)
120 kmemcheck_mark_initialized_pages(page, pages);
121 else
122 kmemcheck_mark_uninitialized_pages(page, pages);
123}