Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 2 | #include <linux/seq_file.h> |
| 3 | #include <linux/debugfs.h> |
Heiko Carstens | 549f2bf | 2017-02-13 15:20:18 +0100 | [diff] [blame] | 4 | #include <linux/sched.h> |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 5 | #include <linux/mm.h> |
| 6 | #include <asm/sections.h> |
| 7 | #include <asm/pgtable.h> |
| 8 | |
| 9 | static unsigned long max_addr; |
| 10 | |
| 11 | struct addr_marker { |
| 12 | unsigned long start_address; |
| 13 | const char *name; |
| 14 | }; |
| 15 | |
| 16 | enum address_markers_idx { |
| 17 | IDENTITY_NR = 0, |
| 18 | KERNEL_START_NR, |
| 19 | KERNEL_END_NR, |
| 20 | VMEMMAP_NR, |
| 21 | VMALLOC_NR, |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 22 | MODULES_NR, |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 23 | }; |
| 24 | |
| 25 | static struct addr_marker address_markers[] = { |
| 26 | [IDENTITY_NR] = {0, "Identity Mapping"}, |
| 27 | [KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"}, |
| 28 | [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, |
| 29 | [VMEMMAP_NR] = {0, "vmemmap Area"}, |
| 30 | [VMALLOC_NR] = {0, "vmalloc Area"}, |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 31 | [MODULES_NR] = {0, "Modules Area"}, |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 32 | { -1, NULL } |
| 33 | }; |
| 34 | |
| 35 | struct pg_state { |
| 36 | int level; |
| 37 | unsigned int current_prot; |
| 38 | unsigned long start_address; |
| 39 | unsigned long current_address; |
| 40 | const struct addr_marker *marker; |
| 41 | }; |
| 42 | |
| 43 | static void print_prot(struct seq_file *m, unsigned int pr, int level) |
| 44 | { |
| 45 | static const char * const level_name[] = |
| 46 | { "ASCE", "PGD", "PUD", "PMD", "PTE" }; |
| 47 | |
| 48 | seq_printf(m, "%s ", level_name[level]); |
Heiko Carstens | 1819ed1 | 2013-02-16 11:47:27 +0100 | [diff] [blame] | 49 | if (pr & _PAGE_INVALID) { |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 50 | seq_printf(m, "I\n"); |
Heiko Carstens | 1819ed1 | 2013-02-16 11:47:27 +0100 | [diff] [blame] | 51 | return; |
| 52 | } |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 53 | seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW "); |
| 54 | seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n"); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | static void note_page(struct seq_file *m, struct pg_state *st, |
| 58 | unsigned int new_prot, int level) |
| 59 | { |
| 60 | static const char units[] = "KMGTPE"; |
| 61 | int width = sizeof(unsigned long) * 2; |
| 62 | const char *unit = units; |
| 63 | unsigned int prot, cur; |
| 64 | unsigned long delta; |
| 65 | |
| 66 | /* |
| 67 | * If we have a "break" in the series, we need to flush the state |
| 68 | * that we have now. "break" is either changing perms, levels or |
| 69 | * address space marker. |
| 70 | */ |
| 71 | prot = new_prot; |
| 72 | cur = st->current_prot; |
| 73 | |
| 74 | if (!st->level) { |
| 75 | /* First entry */ |
| 76 | st->current_prot = new_prot; |
| 77 | st->level = level; |
| 78 | st->marker = address_markers; |
| 79 | seq_printf(m, "---[ %s ]---\n", st->marker->name); |
| 80 | } else if (prot != cur || level != st->level || |
| 81 | st->current_address >= st->marker[1].start_address) { |
| 82 | /* Print the actual finished series */ |
| 83 | seq_printf(m, "0x%0*lx-0x%0*lx", |
| 84 | width, st->start_address, |
| 85 | width, st->current_address); |
| 86 | delta = (st->current_address - st->start_address) >> 10; |
| 87 | while (!(delta & 0x3ff) && unit[1]) { |
| 88 | delta >>= 10; |
| 89 | unit++; |
| 90 | } |
| 91 | seq_printf(m, "%9lu%c ", delta, *unit); |
| 92 | print_prot(m, st->current_prot, st->level); |
| 93 | if (st->current_address >= st->marker[1].start_address) { |
| 94 | st->marker++; |
| 95 | seq_printf(m, "---[ %s ]---\n", st->marker->name); |
| 96 | } |
| 97 | st->start_address = st->current_address; |
| 98 | st->current_prot = new_prot; |
| 99 | st->level = level; |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | /* |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 104 | * The actual page table walker functions. In order to keep the |
| 105 | * implementation of print_prot() short, we only check and pass |
| 106 | * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region, |
| 107 | * segment or page table entry is invalid or read-only. |
| 108 | * After all it's just a hint that the current level being walked |
| 109 | * contains an invalid or read-only entry. |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 110 | */ |
| 111 | static void walk_pte_level(struct seq_file *m, struct pg_state *st, |
| 112 | pmd_t *pmd, unsigned long addr) |
| 113 | { |
| 114 | unsigned int prot; |
| 115 | pte_t *pte; |
| 116 | int i; |
| 117 | |
| 118 | for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { |
| 119 | st->current_address = addr; |
| 120 | pte = pte_offset_kernel(pmd, addr); |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 121 | prot = pte_val(*pte) & |
| 122 | (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 123 | note_page(m, st, prot, 4); |
| 124 | addr += PAGE_SIZE; |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | static void walk_pmd_level(struct seq_file *m, struct pg_state *st, |
| 129 | pud_t *pud, unsigned long addr) |
| 130 | { |
| 131 | unsigned int prot; |
| 132 | pmd_t *pmd; |
| 133 | int i; |
| 134 | |
| 135 | for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { |
| 136 | st->current_address = addr; |
| 137 | pmd = pmd_offset(pud, addr); |
| 138 | if (!pmd_none(*pmd)) { |
| 139 | if (pmd_large(*pmd)) { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 140 | prot = pmd_val(*pmd) & |
| 141 | (_SEGMENT_ENTRY_PROTECT | |
| 142 | _SEGMENT_ENTRY_NOEXEC); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 143 | note_page(m, st, prot, 3); |
| 144 | } else |
| 145 | walk_pte_level(m, st, pmd, addr); |
| 146 | } else |
| 147 | note_page(m, st, _PAGE_INVALID, 3); |
| 148 | addr += PMD_SIZE; |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | static void walk_pud_level(struct seq_file *m, struct pg_state *st, |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 153 | p4d_t *p4d, unsigned long addr) |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 154 | { |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 155 | unsigned int prot; |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 156 | pud_t *pud; |
| 157 | int i; |
| 158 | |
| 159 | for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { |
| 160 | st->current_address = addr; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 161 | pud = pud_offset(p4d, addr); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 162 | if (!pud_none(*pud)) |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 163 | if (pud_large(*pud)) { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 164 | prot = pud_val(*pud) & |
| 165 | (_REGION_ENTRY_PROTECT | |
| 166 | _REGION_ENTRY_NOEXEC); |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 167 | note_page(m, st, prot, 2); |
| 168 | } else |
| 169 | walk_pmd_level(m, st, pud, addr); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 170 | else |
| 171 | note_page(m, st, _PAGE_INVALID, 2); |
| 172 | addr += PUD_SIZE; |
| 173 | } |
| 174 | } |
| 175 | |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 176 | static void walk_p4d_level(struct seq_file *m, struct pg_state *st, |
| 177 | pgd_t *pgd, unsigned long addr) |
| 178 | { |
| 179 | p4d_t *p4d; |
| 180 | int i; |
| 181 | |
| 182 | for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { |
| 183 | st->current_address = addr; |
| 184 | p4d = p4d_offset(pgd, addr); |
| 185 | if (!p4d_none(*p4d)) |
| 186 | walk_pud_level(m, st, p4d, addr); |
| 187 | else |
| 188 | note_page(m, st, _PAGE_INVALID, 2); |
| 189 | addr += P4D_SIZE; |
| 190 | } |
| 191 | } |
| 192 | |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 193 | static void walk_pgd_level(struct seq_file *m) |
| 194 | { |
| 195 | unsigned long addr = 0; |
| 196 | struct pg_state st; |
| 197 | pgd_t *pgd; |
| 198 | int i; |
| 199 | |
| 200 | memset(&st, 0, sizeof(st)); |
| 201 | for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) { |
| 202 | st.current_address = addr; |
| 203 | pgd = pgd_offset_k(addr); |
| 204 | if (!pgd_none(*pgd)) |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 205 | walk_p4d_level(m, &st, pgd, addr); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 206 | else |
| 207 | note_page(m, &st, _PAGE_INVALID, 1); |
| 208 | addr += PGDIR_SIZE; |
Heiko Carstens | 549f2bf | 2017-02-13 15:20:18 +0100 | [diff] [blame] | 209 | cond_resched(); |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 210 | } |
| 211 | /* Flush out the last page */ |
| 212 | st.current_address = max_addr; |
| 213 | note_page(m, &st, 0, 0); |
| 214 | } |
| 215 | |
| 216 | static int ptdump_show(struct seq_file *m, void *v) |
| 217 | { |
| 218 | walk_pgd_level(m); |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | static int ptdump_open(struct inode *inode, struct file *filp) |
| 223 | { |
| 224 | return single_open(filp, ptdump_show, NULL); |
| 225 | } |
| 226 | |
| 227 | static const struct file_operations ptdump_fops = { |
| 228 | .open = ptdump_open, |
| 229 | .read = seq_read, |
| 230 | .llseek = seq_lseek, |
| 231 | .release = single_release, |
| 232 | }; |
| 233 | |
| 234 | static int pt_dump_init(void) |
| 235 | { |
| 236 | /* |
| 237 | * Figure out the maximum virtual address being accessible with the |
| 238 | * kernel ASCE. We need this to keep the page table walker functions |
| 239 | * from accessing non-existent entries. |
| 240 | */ |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 241 | max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; |
| 242 | max_addr = 1UL << (max_addr * 11 + 31); |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 243 | address_markers[MODULES_NR].start_address = MODULES_VADDR; |
Heiko Carstens | e76e82d | 2012-10-04 14:46:12 +0200 | [diff] [blame] | 244 | address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; |
| 245 | address_markers[VMALLOC_NR].start_address = VMALLOC_START; |
| 246 | debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); |
| 247 | return 0; |
| 248 | } |
| 249 | device_initcall(pt_dump_init); |