blob: a2f6a4e0b2dd6040687023509d778331ff3fdaa4 [file] [log] [blame]
Akinobu Mita6a11f752009-03-31 15:23:17 -07001#include <linux/kernel.h>
Akinobu Mita8c5fb8e2011-10-31 17:08:10 -07002#include <linux/string.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07003#include <linux/mm.h>
Akinobu Mita64212ec2011-10-31 17:08:38 -07004#include <linux/highmem.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -08005#include <linux/page_ext.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07006#include <linux/poison.h>
Akinobu Mita77311132011-10-31 17:08:05 -07007#include <linux/ratelimit.h>
Akinobu Mita6a11f752009-03-31 15:23:17 -07008
Vinayak Menon8275f1d2017-04-05 10:49:14 +05309static bool want_page_poisoning __read_mostly
10 = IS_ENABLED(CONFIG_PAGE_POISONING_ENABLE_DEFAULT);
Laura Abbott8823b1d2016-03-15 14:56:27 -070011
12static int early_page_poison_param(char *buf)
13{
14 if (!buf)
15 return -EINVAL;
Minfei Huang2a138dc2016-05-20 16:58:13 -070016 return strtobool(buf, &want_page_poisoning);
Laura Abbott8823b1d2016-03-15 14:56:27 -070017}
18early_param("page_poison", early_page_poison_param);
19
20bool page_poisoning_enabled(void)
21{
Laura Abbott8823b1d2016-03-15 14:56:27 -070022 /*
Vinayak Menon92821682017-03-31 11:13:06 +110023 * Assumes that debug_pagealloc_enabled is set before
24 * free_all_bootmem.
25 * Page poisoning is debug page alloc for some arches. If
26 * either of those options are enabled, enable poisoning.
Laura Abbott8823b1d2016-03-15 14:56:27 -070027 */
Vinayak Menon92821682017-03-31 11:13:06 +110028 return (want_page_poisoning ||
29 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
30 debug_pagealloc_enabled()));
Akinobu Mita6a11f752009-03-31 15:23:17 -070031}
32
Akinobu Mita6a11f752009-03-31 15:23:17 -070033static void poison_page(struct page *page)
34{
Akinobu Mita64212ec2011-10-31 17:08:38 -070035 void *addr = kmap_atomic(page);
Akinobu Mita6a11f752009-03-31 15:23:17 -070036
Akinobu Mita6a11f752009-03-31 15:23:17 -070037 memset(addr, PAGE_POISON, PAGE_SIZE);
Akinobu Mita64212ec2011-10-31 17:08:38 -070038 kunmap_atomic(addr);
Akinobu Mita6a11f752009-03-31 15:23:17 -070039}
40
41static void poison_pages(struct page *page, int n)
42{
43 int i;
44
45 for (i = 0; i < n; i++)
46 poison_page(page + i);
47}
48
49static bool single_bit_flip(unsigned char a, unsigned char b)
50{
51 unsigned char error = a ^ b;
52
53 return error && !(error & (error - 1));
54}
55
Prasad Sodagudi87f42022016-02-25 13:01:18 +053056static void check_poison_mem(struct page *page,
57 unsigned char *mem, size_t bytes)
Akinobu Mita6a11f752009-03-31 15:23:17 -070058{
Akinobu Mita77311132011-10-31 17:08:05 -070059 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
Akinobu Mita6a11f752009-03-31 15:23:17 -070060 unsigned char *start;
61 unsigned char *end;
62
Laura Abbott8823b1d2016-03-15 14:56:27 -070063 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
64 return;
65
Akinobu Mita8c5fb8e2011-10-31 17:08:10 -070066 start = memchr_inv(mem, PAGE_POISON, bytes);
67 if (!start)
Akinobu Mita6a11f752009-03-31 15:23:17 -070068 return;
69
70 for (end = mem + bytes - 1; end > start; end--) {
71 if (*end != PAGE_POISON)
72 break;
73 }
74
Akinobu Mita77311132011-10-31 17:08:05 -070075 if (!__ratelimit(&ratelimit))
Akinobu Mita6a11f752009-03-31 15:23:17 -070076 return;
77 else if (start == end && single_bit_flip(*start, PAGE_POISON))
Prasad Sodagudi87f42022016-02-25 13:01:18 +053078 pr_err("pagealloc: single bit error on page with phys start 0x%lx\n",
79 (unsigned long)page_to_phys(page));
Akinobu Mita6a11f752009-03-31 15:23:17 -070080 else
Prasad Sodagudi87f42022016-02-25 13:01:18 +053081 pr_err("pagealloc: memory corruption on page with phys start 0x%lx\n",
82 (unsigned long)page_to_phys(page));
Akinobu Mita6a11f752009-03-31 15:23:17 -070083
84 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
85 end - start + 1, 1);
Prasad Sodagudi6ba925c2016-02-25 12:57:06 +053086 BUG_ON(PANIC_CORRUPTION);
Akinobu Mita6a11f752009-03-31 15:23:17 -070087 dump_stack();
88}
89
Akinobu Mita6a11f752009-03-31 15:23:17 -070090static void unpoison_page(struct page *page)
91{
Akinobu Mita64212ec2011-10-31 17:08:38 -070092 void *addr;
Akinobu Mita6a11f752009-03-31 15:23:17 -070093
Akinobu Mita64212ec2011-10-31 17:08:38 -070094 addr = kmap_atomic(page);
Vinayak Menon92821682017-03-31 11:13:06 +110095 /*
96 * Page poisoning when enabled poisons each and every page
97 * that is freed to buddy. Thus no extra check is done to
98 * see if a page was posioned.
99 */
Prasad Sodagudi87f42022016-02-25 13:01:18 +0530100 check_poison_mem(page, addr, PAGE_SIZE);
Akinobu Mita64212ec2011-10-31 17:08:38 -0700101 kunmap_atomic(addr);
Akinobu Mita6a11f752009-03-31 15:23:17 -0700102}
103
104static void unpoison_pages(struct page *page, int n)
105{
106 int i;
107
108 for (i = 0; i < n; i++)
109 unpoison_page(page + i);
110}
111
Laura Abbott8823b1d2016-03-15 14:56:27 -0700112void kernel_poison_pages(struct page *page, int numpages, int enable)
Akinobu Mita6a11f752009-03-31 15:23:17 -0700113{
Laura Abbott8823b1d2016-03-15 14:56:27 -0700114 if (!page_poisoning_enabled())
Joonsoo Kime30825f2014-12-12 16:55:49 -0800115 return;
116
Akinobu Mita6a11f752009-03-31 15:23:17 -0700117 if (enable)
118 unpoison_pages(page, numpages);
119 else
120 poison_pages(page, numpages);
121}
Laura Abbott8823b1d2016-03-15 14:56:27 -0700122
123#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
124void __kernel_map_pages(struct page *page, int numpages, int enable)
125{
126 /* This function does nothing, all work is done via poison pages */
127}
128#endif