blob: 0abd75e49b403745aedcc91ef2e9a03a74a6ff2c [file] [log] [blame]
Akinobu Mita6a11f752009-03-31 15:23:17 -07001#include <linux/kernel.h>
Akinobu Mita8c5fb8e2011-10-31 17:08:10 -07002#include <linux/string.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07003#include <linux/mm.h>
Akinobu Mita64212ec2011-10-31 17:08:38 -07004#include <linux/highmem.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -08005#include <linux/page_ext.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07006#include <linux/poison.h>
Akinobu Mita77311132011-10-31 17:08:05 -07007#include <linux/ratelimit.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07008
Laura Abbott8823b1d2016-03-15 14:56:27 -07009static bool __page_poisoning_enabled __read_mostly;
10static bool want_page_poisoning __read_mostly;
11
12static int early_page_poison_param(char *buf)
13{
14 if (!buf)
15 return -EINVAL;
Minfei Huang2a138dc2016-05-20 16:58:13 -070016 return strtobool(buf, &want_page_poisoning);
Laura Abbott8823b1d2016-03-15 14:56:27 -070017}
18early_param("page_poison", early_page_poison_param);
19
20bool page_poisoning_enabled(void)
21{
22 return __page_poisoning_enabled;
23}
Joonsoo Kime30825f2014-12-12 16:55:49 -080024
25static bool need_page_poisoning(void)
26{
Laura Abbott8823b1d2016-03-15 14:56:27 -070027 return want_page_poisoning;
Joonsoo Kime30825f2014-12-12 16:55:49 -080028}
29
30static void init_page_poisoning(void)
31{
Laura Abbott8823b1d2016-03-15 14:56:27 -070032 /*
33 * page poisoning is debug page alloc for some arches. If either
34 * of those options are enabled, enable poisoning
35 */
36 if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
37 if (!want_page_poisoning && !debug_pagealloc_enabled())
38 return;
39 } else {
40 if (!want_page_poisoning)
41 return;
42 }
Joonsoo Kim031bc572014-12-12 16:55:52 -080043
Laura Abbott8823b1d2016-03-15 14:56:27 -070044 __page_poisoning_enabled = true;
Joonsoo Kime30825f2014-12-12 16:55:49 -080045}
46
47struct page_ext_operations page_poisoning_ops = {
48 .need = need_page_poisoning,
49 .init = init_page_poisoning,
50};
51
Akinobu Mita6a11f752009-03-31 15:23:17 -070052static inline void set_page_poison(struct page *page)
53{
Joonsoo Kime30825f2014-12-12 16:55:49 -080054 struct page_ext *page_ext;
55
56 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -070057 if (unlikely(!page_ext))
58 return;
59
Joonsoo Kime30825f2014-12-12 16:55:49 -080060 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
Akinobu Mita6a11f752009-03-31 15:23:17 -070061}
62
63static inline void clear_page_poison(struct page *page)
64{
Joonsoo Kime30825f2014-12-12 16:55:49 -080065 struct page_ext *page_ext;
66
67 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -070068 if (unlikely(!page_ext))
69 return;
70
Joonsoo Kime30825f2014-12-12 16:55:49 -080071 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
Akinobu Mita6a11f752009-03-31 15:23:17 -070072}
73
Laura Abbott1414c7f2016-03-15 14:56:30 -070074bool page_is_poisoned(struct page *page)
Akinobu Mita6a11f752009-03-31 15:23:17 -070075{
Joonsoo Kime30825f2014-12-12 16:55:49 -080076 struct page_ext *page_ext;
77
78 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -070079 if (unlikely(!page_ext))
Laura Abbott1414c7f2016-03-15 14:56:30 -070080 return false;
81
Joonsoo Kime30825f2014-12-12 16:55:49 -080082 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
Akinobu Mita6a11f752009-03-31 15:23:17 -070083}
84
Akinobu Mita6a11f752009-03-31 15:23:17 -070085static void poison_page(struct page *page)
86{
Akinobu Mita64212ec2011-10-31 17:08:38 -070087 void *addr = kmap_atomic(page);
Akinobu Mita6a11f752009-03-31 15:23:17 -070088
Akinobu Mita6a11f752009-03-31 15:23:17 -070089 set_page_poison(page);
Akinobu Mita6a11f752009-03-31 15:23:17 -070090 memset(addr, PAGE_POISON, PAGE_SIZE);
Akinobu Mita64212ec2011-10-31 17:08:38 -070091 kunmap_atomic(addr);
Akinobu Mita6a11f752009-03-31 15:23:17 -070092}
93
94static void poison_pages(struct page *page, int n)
95{
96 int i;
97
98 for (i = 0; i < n; i++)
99 poison_page(page + i);
100}
101
102static bool single_bit_flip(unsigned char a, unsigned char b)
103{
104 unsigned char error = a ^ b;
105
106 return error && !(error & (error - 1));
107}
108
Prasad Sodagudi87f42022016-02-25 13:01:18 +0530109static void check_poison_mem(struct page *page,
110 unsigned char *mem, size_t bytes)
Akinobu Mita6a11f752009-03-31 15:23:17 -0700111{
Akinobu Mita77311132011-10-31 17:08:05 -0700112 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
Akinobu Mita6a11f752009-03-31 15:23:17 -0700113 unsigned char *start;
114 unsigned char *end;
115
Laura Abbott8823b1d2016-03-15 14:56:27 -0700116 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
117 return;
118
Akinobu Mita8c5fb8e2011-10-31 17:08:10 -0700119 start = memchr_inv(mem, PAGE_POISON, bytes);
120 if (!start)
Akinobu Mita6a11f752009-03-31 15:23:17 -0700121 return;
122
123 for (end = mem + bytes - 1; end > start; end--) {
124 if (*end != PAGE_POISON)
125 break;
126 }
127
Akinobu Mita77311132011-10-31 17:08:05 -0700128 if (!__ratelimit(&ratelimit))
Akinobu Mita6a11f752009-03-31 15:23:17 -0700129 return;
130 else if (start == end && single_bit_flip(*start, PAGE_POISON))
Prasad Sodagudi87f42022016-02-25 13:01:18 +0530131 pr_err("pagealloc: single bit error on page with phys start 0x%lx\n",
132 (unsigned long)page_to_phys(page));
Akinobu Mita6a11f752009-03-31 15:23:17 -0700133 else
Prasad Sodagudi87f42022016-02-25 13:01:18 +0530134 pr_err("pagealloc: memory corruption on page with phys start 0x%lx\n",
135 (unsigned long)page_to_phys(page));
Akinobu Mita6a11f752009-03-31 15:23:17 -0700136
137 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
138 end - start + 1, 1);
Prasad Sodagudi6ba925c2016-02-25 12:57:06 +0530139 BUG_ON(PANIC_CORRUPTION);
Akinobu Mita6a11f752009-03-31 15:23:17 -0700140 dump_stack();
141}
142
Akinobu Mita6a11f752009-03-31 15:23:17 -0700143static void unpoison_page(struct page *page)
144{
Akinobu Mita64212ec2011-10-31 17:08:38 -0700145 void *addr;
Akinobu Mita6a11f752009-03-31 15:23:17 -0700146
Laura Abbott1414c7f2016-03-15 14:56:30 -0700147 if (!page_is_poisoned(page))
Akinobu Mita64212ec2011-10-31 17:08:38 -0700148 return;
149
150 addr = kmap_atomic(page);
Prasad Sodagudi87f42022016-02-25 13:01:18 +0530151 check_poison_mem(page, addr, PAGE_SIZE);
Akinobu Mita64212ec2011-10-31 17:08:38 -0700152 clear_page_poison(page);
153 kunmap_atomic(addr);
Akinobu Mita6a11f752009-03-31 15:23:17 -0700154}
155
156static void unpoison_pages(struct page *page, int n)
157{
158 int i;
159
160 for (i = 0; i < n; i++)
161 unpoison_page(page + i);
162}
163
Laura Abbott8823b1d2016-03-15 14:56:27 -0700164void kernel_poison_pages(struct page *page, int numpages, int enable)
Akinobu Mita6a11f752009-03-31 15:23:17 -0700165{
Laura Abbott8823b1d2016-03-15 14:56:27 -0700166 if (!page_poisoning_enabled())
Joonsoo Kime30825f2014-12-12 16:55:49 -0800167 return;
168
Akinobu Mita6a11f752009-03-31 15:23:17 -0700169 if (enable)
170 unpoison_pages(page, numpages);
171 else
172 poison_pages(page, numpages);
173}
Laura Abbott8823b1d2016-03-15 14:56:27 -0700174
175#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
176void __kernel_map_pages(struct page *page, int numpages, int enable)
177{
178 /* This function does nothing, all work is done via poison pages */
179}
180#endif