Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 1 | #include <linux/gfp.h> |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 2 | #include <linux/mm_types.h> |
| 3 | #include <linux/mm.h> |
| 4 | #include <linux/slab.h> |
Joonsoo Kim | 07f361b | 2014-10-09 15:26:00 -0700 | [diff] [blame] | 5 | #include "slab.h" |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 6 | #include <linux/kmemcheck.h> |
| 7 | |
Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 8 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 9 | { |
| 10 | struct page *shadow; |
| 11 | int pages; |
| 12 | int i; |
| 13 | |
| 14 | pages = 1 << order; |
| 15 | |
| 16 | /* |
| 17 | * With kmemcheck enabled, we need to allocate a memory area for the |
| 18 | * shadow bits as well. |
| 19 | */ |
Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 20 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 21 | if (!shadow) { |
| 22 | if (printk_ratelimit()) |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 23 | pr_err("kmemcheck: failed to allocate shadow bitmap\n"); |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 24 | return; |
| 25 | } |
| 26 | |
| 27 | for(i = 0; i < pages; ++i) |
| 28 | page[i].shadow = page_address(&shadow[i]); |
| 29 | |
| 30 | /* |
| 31 | * Mark it as non-present for the MMU so that our accesses to |
| 32 | * this memory will trigger a page fault and let us analyze |
| 33 | * the memory accesses. |
| 34 | */ |
| 35 | kmemcheck_hide_pages(page, pages); |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 36 | } |
| 37 | |
Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 38 | void kmemcheck_free_shadow(struct page *page, int order) |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 39 | { |
| 40 | struct page *shadow; |
| 41 | int pages; |
| 42 | int i; |
| 43 | |
Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 44 | if (!kmemcheck_page_is_tracked(page)) |
| 45 | return; |
| 46 | |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 47 | pages = 1 << order; |
| 48 | |
| 49 | kmemcheck_show_pages(page, pages); |
| 50 | |
| 51 | shadow = virt_to_page(page[0].shadow); |
| 52 | |
| 53 | for(i = 0; i < pages; ++i) |
| 54 | page[i].shadow = NULL; |
| 55 | |
| 56 | __free_pages(shadow, order); |
| 57 | } |
| 58 | |
| 59 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, |
| 60 | size_t size) |
| 61 | { |
Jesper Dangaard Brouer | 0142eae | 2016-03-15 14:53:44 -0700 | [diff] [blame] | 62 | if (unlikely(!object)) /* Skip object if allocation failed */ |
| 63 | return; |
| 64 | |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 65 | /* |
| 66 | * Has already been memset(), which initializes the shadow for us |
| 67 | * as well. |
| 68 | */ |
| 69 | if (gfpflags & __GFP_ZERO) |
| 70 | return; |
| 71 | |
| 72 | /* No need to initialize the shadow of a non-tracked slab. */ |
| 73 | if (s->flags & SLAB_NOTRACK) |
| 74 | return; |
| 75 | |
| 76 | if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { |
| 77 | /* |
| 78 | * Allow notracked objects to be allocated from |
| 79 | * tracked caches. Note however that these objects |
| 80 | * will still get page faults on access, they just |
| 81 | * won't ever be flagged as uninitialized. If page |
| 82 | * faults are not acceptable, the slab cache itself |
| 83 | * should be marked NOTRACK. |
| 84 | */ |
| 85 | kmemcheck_mark_initialized(object, size); |
| 86 | } else if (!s->ctor) { |
| 87 | /* |
| 88 | * New objects should be marked uninitialized before |
| 89 | * they're returned to the called. |
| 90 | */ |
| 91 | kmemcheck_mark_uninitialized(object, size); |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) |
| 96 | { |
| 97 | /* TODO: RCU freeing is unsupported for now; hide false positives. */ |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 98 | if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU)) |
Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 99 | kmemcheck_mark_freed(object, size); |
| 100 | } |
Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 101 | |
| 102 | void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, |
| 103 | gfp_t gfpflags) |
| 104 | { |
| 105 | int pages; |
| 106 | |
| 107 | if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) |
| 108 | return; |
| 109 | |
| 110 | pages = 1 << order; |
| 111 | |
| 112 | /* |
| 113 | * NOTE: We choose to track GFP_ZERO pages too; in fact, they |
| 114 | * can become uninitialized by copying uninitialized memory |
| 115 | * into them. |
| 116 | */ |
| 117 | |
| 118 | /* XXX: Can use zone->node for node? */ |
| 119 | kmemcheck_alloc_shadow(page, order, gfpflags, -1); |
| 120 | |
| 121 | if (gfpflags & __GFP_ZERO) |
| 122 | kmemcheck_mark_initialized_pages(page, pages); |
| 123 | else |
| 124 | kmemcheck_mark_uninitialized_pages(page, pages); |
| 125 | } |