blob: 419199d689280318a46a193b396ad948ff5a5cbc [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Ingo Molnar589ee622017-02-04 00:16:44 +010011#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100012#include <linux/memblock.h>
13#include <linux/of_fdt.h>
14
15#include <asm/pgtable.h>
16#include <asm/pgalloc.h>
17#include <asm/dma.h>
18#include <asm/machdep.h>
19#include <asm/mmu.h>
20#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110021#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100022#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100023#include <asm/trace.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100024
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100025#include <trace/events/thp.h>
26
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053027static int native_register_process_table(unsigned long base, unsigned long pg_sz,
28 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100029{
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053030 unsigned long patb1 = base | table_size | PATB_GR;
31
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100032 partition_tb->patb1 = cpu_to_be64(patb1);
33 return 0;
34}
35
36static __ref void *early_alloc_pgtable(unsigned long size)
37{
38 void *pt;
39
40 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
41 memset(pt, 0, size);
42
43 return pt;
44}
45
46int radix__map_kernel_page(unsigned long ea, unsigned long pa,
47 pgprot_t flags,
48 unsigned int map_page_size)
49{
50 pgd_t *pgdp;
51 pud_t *pudp;
52 pmd_t *pmdp;
53 pte_t *ptep;
54 /*
55 * Make sure task size is correct as per the max adddr
56 */
57 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
58 if (slab_is_available()) {
59 pgdp = pgd_offset_k(ea);
60 pudp = pud_alloc(&init_mm, pgdp, ea);
61 if (!pudp)
62 return -ENOMEM;
63 if (map_page_size == PUD_SIZE) {
64 ptep = (pte_t *)pudp;
65 goto set_the_pte;
66 }
67 pmdp = pmd_alloc(&init_mm, pudp, ea);
68 if (!pmdp)
69 return -ENOMEM;
70 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -060071 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100072 goto set_the_pte;
73 }
74 ptep = pte_alloc_kernel(pmdp, ea);
75 if (!ptep)
76 return -ENOMEM;
77 } else {
78 pgdp = pgd_offset_k(ea);
79 if (pgd_none(*pgdp)) {
80 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
81 BUG_ON(pudp == NULL);
82 pgd_populate(&init_mm, pgdp, pudp);
83 }
84 pudp = pud_offset(pgdp, ea);
85 if (map_page_size == PUD_SIZE) {
86 ptep = (pte_t *)pudp;
87 goto set_the_pte;
88 }
89 if (pud_none(*pudp)) {
90 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
91 BUG_ON(pmdp == NULL);
92 pud_populate(&init_mm, pudp, pmdp);
93 }
94 pmdp = pmd_offset(pudp, ea);
95 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -060096 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100097 goto set_the_pte;
98 }
99 if (!pmd_present(*pmdp)) {
100 ptep = early_alloc_pgtable(PAGE_SIZE);
101 BUG_ON(ptep == NULL);
102 pmd_populate_kernel(&init_mm, pmdp, ptep);
103 }
104 ptep = pte_offset_kernel(pmdp, ea);
105 }
106
107set_the_pte:
108 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
109 smp_wmb();
110 return 0;
111}
112
Reza Arbabb5200ec2017-01-16 13:07:43 -0600113static inline void __meminit print_mapping(unsigned long start,
114 unsigned long end,
115 unsigned long size)
116{
117 if (end <= start)
118 return;
119
120 pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
121}
122
123static int __meminit create_physical_mapping(unsigned long start,
124 unsigned long end)
125{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000126 unsigned long vaddr, addr, mapping_size = 0;
127 pgprot_t prot;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600128
129 start = _ALIGN_UP(start, PAGE_SIZE);
130 for (addr = start; addr < end; addr += mapping_size) {
131 unsigned long gap, previous_size;
132 int rc;
133
134 gap = end - addr;
135 previous_size = mapping_size;
136
137 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
138 mmu_psize_defs[MMU_PAGE_1G].shift)
139 mapping_size = PUD_SIZE;
140 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
141 mmu_psize_defs[MMU_PAGE_2M].shift)
142 mapping_size = PMD_SIZE;
143 else
144 mapping_size = PAGE_SIZE;
145
146 if (mapping_size != previous_size) {
147 print_mapping(start, addr, previous_size);
148 start = addr;
149 }
150
Michael Ellerman9abcc982017-06-06 15:48:57 +1000151 vaddr = (unsigned long)__va(addr);
152
153 if (overlaps_kernel_text(vaddr, vaddr + mapping_size))
154 prot = PAGE_KERNEL_X;
155 else
156 prot = PAGE_KERNEL;
157
158 rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600159 if (rc)
160 return rc;
161 }
162
163 print_mapping(start, addr, mapping_size);
164 return 0;
165}
166
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000167static void __init radix_init_pgtable(void)
168{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000169 unsigned long rts_field;
170 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000171
172 /* We don't support slb for radix */
173 mmu_slb_size = 0;
174 /*
175 * Create the linear mapping, using standard page size for now
176 */
Reza Arbabb5200ec2017-01-16 13:07:43 -0600177 for_each_memblock(memory, reg)
178 WARN_ON(create_physical_mapping(reg->base,
179 reg->base + reg->size));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000180 /*
181 * Allocate Partition table and process table for the
182 * host.
183 */
Suraj Jitindar Singh555c1632016-11-09 16:36:33 +1100184 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000185 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
186 /*
187 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000188 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530189 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000190 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
191 /*
192 * Fill in the partition table. We are suppose to use effective address
193 * of process table here. But our linear mapping also enable us to use
194 * physical address here.
195 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000196 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000197 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100198 asm volatile("ptesync" : : : "memory");
199 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
200 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
201 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000202 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000203}
204
205static void __init radix_init_partition_table(void)
206{
Paul Mackerras9d661952016-11-21 16:00:58 +1100207 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530208
Paul Mackerras9d661952016-11-21 16:00:58 +1100209 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530210 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100211 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
212 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000213
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530214 pr_info("Initializing Radix MMU\n");
215 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000216}
217
218void __init radix_init_native(void)
219{
Michael Ellermaneea81482016-08-04 15:32:06 +1000220 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000221}
222
223static int __init get_idx_from_shift(unsigned int shift)
224{
225 int idx = -1;
226
227 switch (shift) {
228 case 0xc:
229 idx = MMU_PAGE_4K;
230 break;
231 case 0x10:
232 idx = MMU_PAGE_64K;
233 break;
234 case 0x15:
235 idx = MMU_PAGE_2M;
236 break;
237 case 0x1e:
238 idx = MMU_PAGE_1G;
239 break;
240 }
241 return idx;
242}
243
244static int __init radix_dt_scan_page_sizes(unsigned long node,
245 const char *uname, int depth,
246 void *data)
247{
248 int size = 0;
249 int shift, idx;
250 unsigned int ap;
251 const __be32 *prop;
252 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
253
254 /* We are scanning "cpu" nodes only */
255 if (type == NULL || strcmp(type, "cpu") != 0)
256 return 0;
257
258 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
259 if (!prop)
260 return 0;
261
262 pr_info("Page sizes from device-tree:\n");
263 for (; size >= 4; size -= 4, ++prop) {
264
265 struct mmu_psize_def *def;
266
267 /* top 3 bit is AP encoding */
268 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
269 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100270 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000271
272 idx = get_idx_from_shift(shift);
273 if (idx < 0)
274 continue;
275
276 def = &mmu_psize_defs[idx];
277 def->shift = shift;
278 def->ap = ap;
279 }
280
281 /* needed ? */
282 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
283 return 1;
284}
285
Michael Ellerman2537b092016-07-26 21:55:27 +1000286void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000287{
288 int rc;
289
290 /*
291 * Try to find the available page sizes in the device-tree
292 */
293 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
294 if (rc != 0) /* Found */
295 goto found;
296 /*
297 * let's assume we have page 4k and 64k support
298 */
299 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
300 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
301
302 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
303 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
304found:
305#ifdef CONFIG_SPARSEMEM_VMEMMAP
306 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
307 /*
308 * map vmemmap using 2M if available
309 */
310 mmu_vmemmap_psize = MMU_PAGE_2M;
311 }
312#endif /* CONFIG_SPARSEMEM_VMEMMAP */
313 return;
314}
315
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530316static void update_hid_for_radix(void)
317{
318 unsigned long hid0;
319 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
320
321 asm volatile("ptesync": : :"memory");
322 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
323 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
324 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
325 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
326 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
327 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
328 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000329 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
330 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
331
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530332 /*
333 * now switch the HID
334 */
335 hid0 = mfspr(SPRN_HID0);
336 hid0 |= HID0_POWER9_RADIX;
337 mtspr(SPRN_HID0, hid0);
338 asm volatile("isync": : :"memory");
339
340 /* Wait for it to happen */
341 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
342 cpu_relax();
343}
344
Balbir Singhee97b6b2016-11-15 17:56:14 +1100345static void radix_init_amor(void)
346{
347 /*
348 * In HV mode, we init AMOR (Authority Mask Override Register) so that
349 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
350 * Register), enable key 0 and set it to 1.
351 *
352 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
353 */
354 mtspr(SPRN_AMOR, (3ul << 62));
355}
356
Balbir Singh3b10d002016-11-15 17:56:16 +1100357static void radix_init_iamr(void)
358{
359 unsigned long iamr;
360
361 /*
362 * The IAMR should set to 0 on DD1.
363 */
364 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
365 iamr = 0;
366 else
367 iamr = (1ul << 62);
368
369 /*
370 * Radix always uses key0 of the IAMR to determine if an access is
371 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
372 * fetch.
373 */
374 mtspr(SPRN_IAMR, iamr);
375}
376
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000377void __init radix__early_init_mmu(void)
378{
379 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000380
381#ifdef CONFIG_PPC_64K_PAGES
382 /* PAGE_SIZE mappings */
383 mmu_virtual_psize = MMU_PAGE_64K;
384#else
385 mmu_virtual_psize = MMU_PAGE_4K;
386#endif
387
388#ifdef CONFIG_SPARSEMEM_VMEMMAP
389 /* vmemmap mapping */
390 mmu_vmemmap_psize = mmu_virtual_psize;
391#endif
392 /*
393 * initialize page table size
394 */
395 __pte_index_size = RADIX_PTE_INDEX_SIZE;
396 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
397 __pud_index_size = RADIX_PUD_INDEX_SIZE;
398 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
399 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
400 __pte_table_size = RADIX_PTE_TABLE_SIZE;
401 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
402 __pud_table_size = RADIX_PUD_TABLE_SIZE;
403 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
404
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000405 __pmd_val_bits = RADIX_PMD_VAL_BITS;
406 __pud_val_bits = RADIX_PUD_VAL_BITS;
407 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000408
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000409 __kernel_virt_start = RADIX_KERN_VIRT_START;
410 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
411 __vmalloc_start = RADIX_VMALLOC_START;
412 __vmalloc_end = RADIX_VMALLOC_END;
413 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
414 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100415
416#ifdef CONFIG_PCI
417 pci_io_base = ISA_IO_BASE;
418#endif
419
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000420 /*
421 * For now radix also use the same frag size
422 */
423 __pte_frag_nr = H_PTE_FRAG_NR;
424 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000425
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530426 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000427 radix_init_native();
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530428 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
429 update_hid_for_radix();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530430 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530431 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000432 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100433 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100434 } else {
435 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530436 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000437
Paul Mackerras9d661952016-11-21 16:00:58 +1100438 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
439
Balbir Singh3b10d002016-11-15 17:56:16 +1100440 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000441 radix_init_pgtable();
442}
443
444void radix__early_init_mmu_secondary(void)
445{
446 unsigned long lpcr;
447 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530448 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000449 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530450 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Aneesh Kumar K.Vcac4a182016-11-17 15:46:23 +0530451
452 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
453 update_hid_for_radix();
454
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530455 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530456 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530457
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000458 mtspr(SPRN_PTCR,
459 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100460 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530461 }
Balbir Singh3b10d002016-11-15 17:56:16 +1100462 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000463}
464
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530465void radix__mmu_cleanup_all(void)
466{
467 unsigned long lpcr;
468
469 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
470 lpcr = mfspr(SPRN_LPCR);
471 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
472 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100473 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530474 radix__flush_tlb_all();
475 }
476}
477
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000478void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
479 phys_addr_t first_memblock_size)
480{
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000481 /* We don't currently support the first MEMBLOCK not mapping 0
482 * physical on those processors
483 */
484 BUG_ON(first_memblock_base != 0);
485 /*
486 * We limit the allocation that depend on ppc64_rma_size
487 * to first_memblock_size. We also clamp it to 1GB to
488 * avoid some funky things such as RTAS bugs.
489 *
490 * On radix config we really don't have a limitation
491 * on real mode access. But keeping it as above works
492 * well enough.
493 */
494 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
495 /*
496 * Finally limit subsequent allocations. We really don't want
497 * to limit the memblock allocations to rma_size. FIXME!! should
498 * we even limit at all ?
499 */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000500 memblock_set_current_limit(first_memblock_base + first_memblock_size);
501}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000502
Reza Arbab6cc27342017-01-16 13:07:44 -0600503#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600504static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
505{
506 pte_t *pte;
507 int i;
508
509 for (i = 0; i < PTRS_PER_PTE; i++) {
510 pte = pte_start + i;
511 if (!pte_none(*pte))
512 return;
513 }
514
515 pte_free_kernel(&init_mm, pte_start);
516 pmd_clear(pmd);
517}
518
519static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
520{
521 pmd_t *pmd;
522 int i;
523
524 for (i = 0; i < PTRS_PER_PMD; i++) {
525 pmd = pmd_start + i;
526 if (!pmd_none(*pmd))
527 return;
528 }
529
530 pmd_free(&init_mm, pmd_start);
531 pud_clear(pud);
532}
533
534static void remove_pte_table(pte_t *pte_start, unsigned long addr,
535 unsigned long end)
536{
537 unsigned long next;
538 pte_t *pte;
539
540 pte = pte_start + pte_index(addr);
541 for (; addr < end; addr = next, pte++) {
542 next = (addr + PAGE_SIZE) & PAGE_MASK;
543 if (next > end)
544 next = end;
545
546 if (!pte_present(*pte))
547 continue;
548
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600549 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
550 /*
551 * The vmemmap_free() and remove_section_mapping()
552 * codepaths call us with aligned addresses.
553 */
554 WARN_ONCE(1, "%s: unaligned range\n", __func__);
555 continue;
556 }
557
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600558 pte_clear(&init_mm, addr, pte);
559 }
560}
561
562static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
563 unsigned long end)
564{
565 unsigned long next;
566 pte_t *pte_base;
567 pmd_t *pmd;
568
569 pmd = pmd_start + pmd_index(addr);
570 for (; addr < end; addr = next, pmd++) {
571 next = pmd_addr_end(addr, end);
572
573 if (!pmd_present(*pmd))
574 continue;
575
576 if (pmd_huge(*pmd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600577 if (!IS_ALIGNED(addr, PMD_SIZE) ||
578 !IS_ALIGNED(next, PMD_SIZE)) {
579 WARN_ONCE(1, "%s: unaligned range\n", __func__);
580 continue;
581 }
582
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600583 pte_clear(&init_mm, addr, (pte_t *)pmd);
584 continue;
585 }
586
587 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
588 remove_pte_table(pte_base, addr, next);
589 free_pte_table(pte_base, pmd);
590 }
591}
592
593static void remove_pud_table(pud_t *pud_start, unsigned long addr,
594 unsigned long end)
595{
596 unsigned long next;
597 pmd_t *pmd_base;
598 pud_t *pud;
599
600 pud = pud_start + pud_index(addr);
601 for (; addr < end; addr = next, pud++) {
602 next = pud_addr_end(addr, end);
603
604 if (!pud_present(*pud))
605 continue;
606
607 if (pud_huge(*pud)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600608 if (!IS_ALIGNED(addr, PUD_SIZE) ||
609 !IS_ALIGNED(next, PUD_SIZE)) {
610 WARN_ONCE(1, "%s: unaligned range\n", __func__);
611 continue;
612 }
613
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600614 pte_clear(&init_mm, addr, (pte_t *)pud);
615 continue;
616 }
617
618 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
619 remove_pmd_table(pmd_base, addr, next);
620 free_pmd_table(pmd_base, pud);
621 }
622}
623
624static void remove_pagetable(unsigned long start, unsigned long end)
625{
626 unsigned long addr, next;
627 pud_t *pud_base;
628 pgd_t *pgd;
629
630 spin_lock(&init_mm.page_table_lock);
631
632 for (addr = start; addr < end; addr = next) {
633 next = pgd_addr_end(addr, end);
634
635 pgd = pgd_offset_k(addr);
636 if (!pgd_present(*pgd))
637 continue;
638
639 if (pgd_huge(*pgd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600640 if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
641 !IS_ALIGNED(next, PGDIR_SIZE)) {
642 WARN_ONCE(1, "%s: unaligned range\n", __func__);
643 continue;
644 }
645
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600646 pte_clear(&init_mm, addr, (pte_t *)pgd);
647 continue;
648 }
649
650 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
651 remove_pud_table(pud_base, addr, next);
652 }
653
654 spin_unlock(&init_mm.page_table_lock);
655 radix__flush_tlb_kernel_range(start, end);
656}
657
Reza Arbab6cc27342017-01-16 13:07:44 -0600658int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
659{
660 return create_physical_mapping(start, end);
661}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600662
663int radix__remove_section_mapping(unsigned long start, unsigned long end)
664{
665 remove_pagetable(start, end);
666 return 0;
667}
Reza Arbab6cc27342017-01-16 13:07:44 -0600668#endif /* CONFIG_MEMORY_HOTPLUG */
669
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000670#ifdef CONFIG_SPARSEMEM_VMEMMAP
671int __meminit radix__vmemmap_create_mapping(unsigned long start,
672 unsigned long page_size,
673 unsigned long phys)
674{
675 /* Create a PTE encoding */
676 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
677
678 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
679 return 0;
680}
681
682#ifdef CONFIG_MEMORY_HOTPLUG
683void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
684{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600685 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000686}
687#endif
688#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000689
690#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691
692unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
693 pmd_t *pmdp, unsigned long clr,
694 unsigned long set)
695{
696 unsigned long old;
697
698#ifdef CONFIG_DEBUG_VM
699 WARN_ON(!radix__pmd_trans_huge(*pmdp));
700 assert_spin_locked(&mm->page_table_lock);
701#endif
702
703 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
704 trace_hugepage_update(addr, old, clr, set);
705
706 return old;
707}
708
709pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
710 pmd_t *pmdp)
711
712{
713 pmd_t pmd;
714
715 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
716 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
717 /*
718 * khugepaged calls this for normal pmd
719 */
720 pmd = *pmdp;
721 pmd_clear(pmdp);
722 /*FIXME!! Verify whether we need this kick below */
723 kick_all_cpus_sync();
724 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
725 return pmd;
726}
727
728/*
729 * For us pgtable_t is pte_t *. Inorder to save the deposisted
730 * page table, we consider the allocated page table as a list
731 * head. On withdraw we need to make sure we zero out the used
732 * list_head memory area.
733 */
734void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
735 pgtable_t pgtable)
736{
737 struct list_head *lh = (struct list_head *) pgtable;
738
739 assert_spin_locked(pmd_lockptr(mm, pmdp));
740
741 /* FIFO */
742 if (!pmd_huge_pte(mm, pmdp))
743 INIT_LIST_HEAD(lh);
744 else
745 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
746 pmd_huge_pte(mm, pmdp) = pgtable;
747}
748
749pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
750{
751 pte_t *ptep;
752 pgtable_t pgtable;
753 struct list_head *lh;
754
755 assert_spin_locked(pmd_lockptr(mm, pmdp));
756
757 /* FIFO */
758 pgtable = pmd_huge_pte(mm, pmdp);
759 lh = (struct list_head *) pgtable;
760 if (list_empty(lh))
761 pmd_huge_pte(mm, pmdp) = NULL;
762 else {
763 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
764 list_del(lh);
765 }
766 ptep = (pte_t *) pgtable;
767 *ptep = __pte(0);
768 ptep++;
769 *ptep = __pte(0);
770 return pgtable;
771}
772
773
774pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
775 unsigned long addr, pmd_t *pmdp)
776{
777 pmd_t old_pmd;
778 unsigned long old;
779
780 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
781 old_pmd = __pmd(old);
782 /*
783 * Serialize against find_linux_pte_or_hugepte which does lock-less
784 * lookup in page tables with local interrupts disabled. For huge pages
785 * it casts pmd_t to pte_t. Since format of pte_t is different from
786 * pmd_t we want to prevent transit from pmd pointing to page table
787 * to pmd pointing to huge page (and back) while interrupts are disabled.
788 * We clear pmd to possibly replace it with page table pointer in
789 * different code paths. So make sure we wait for the parallel
790 * find_linux_pte_or_hugepage to finish.
791 */
792 kick_all_cpus_sync();
793 return old_pmd;
794}
795
796int radix__has_transparent_hugepage(void)
797{
798 /* For radix 2M at PMD level means thp */
799 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
800 return 1;
801 return 0;
802}
803#endif /* CONFIG_TRANSPARENT_HUGEPAGE */