blob: 8541f18694a4a6d8003b32ad29f7c192f190ef6e [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +10008 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040020#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100021#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100026#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/stddef.h>
29#include <linux/init.h>
30#include <linux/bootmem.h>
31#include <linux/highmem.h>
32#include <linux/initrd.h>
33#include <linux/pagemap.h>
Johannes Berg4e8ad3e2007-05-08 19:25:00 +100034#include <linux/suspend.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100035#include <linux/memblock.h>
David Gibson0895ecd2009-10-26 19:24:31 +000036#include <linux/hugetlb.h>
Anton Blanchardc40dd2f2011-11-02 14:56:12 +000037#include <linux/slab.h>
Anton Blanchard16d0f5c2014-10-14 22:17:47 +110038#include <linux/vmalloc.h>
Oliver O'Halloranb584c252017-06-28 11:32:33 +100039#include <linux/memremap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100040
41#include <asm/pgalloc.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/btext.h>
50#include <asm/tlb.h>
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100051#include <asm/sections.h>
Tony Breedsdb7f37d2008-07-01 11:30:06 +100052#include <asm/sparsemem.h>
Paul Mackerrasab1f9da2005-10-10 21:58:35 +100053#include <asm/vdso.h>
Kumar Gala2c419bd2008-04-23 23:05:20 +100054#include <asm/fixmap.h>
FUJITA Tomonoria9327292010-03-16 13:16:25 +000055#include <asm/swiotlb.h>
sukadev@linux.vnet.ibm.com8a3e3d32011-12-02 12:26:23 +000056#include <asm/rtas.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100057
Paul Mackerras14cf11a2005-09-26 16:04:21 +100058#include "mmu_decl.h"
59
60#ifndef CPU_FTR_COHERENT_ICACHE
61#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
62#define CPU_FTR_NOEXECUTE 0
63#endif
64
Suzuki Poulosea84fcd42012-08-21 01:42:33 +000065unsigned long long memory_limit;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100066
Kumar Gala2c419bd2008-04-23 23:05:20 +100067#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte;
Kumar Gala2c419bd2008-04-23 23:05:20 +100069EXPORT_SYMBOL(kmap_pte);
Valentina Manea8040bda2013-03-10 03:22:39 +000070pgprot_t kmap_prot;
71EXPORT_SYMBOL(kmap_prot);
Oliver O'Hallorandd0b52c2016-05-05 17:54:08 +100072#define TOP_ZONE ZONE_HIGHMEM
Kumar Gala2c419bd2008-04-23 23:05:20 +100073
74static inline pte_t *virt_to_kpte(unsigned long vaddr)
75{
76 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
77 vaddr), vaddr), vaddr);
78}
Oliver O'Hallorandd0b52c2016-05-05 17:54:08 +100079#else
80#define TOP_ZONE ZONE_NORMAL
Kumar Gala2c419bd2008-04-23 23:05:20 +100081#endif
82
Paul Mackerras14cf11a2005-09-26 16:04:21 +100083int page_is_ram(unsigned long pfn)
84{
Paul Mackerras14cf11a2005-09-26 16:04:21 +100085#ifndef CONFIG_PPC64 /* XXX for now */
Roland Dreiera880e762008-09-15 10:43:35 +000086 return pfn < max_pfn;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100087#else
Roland Dreiera880e762008-09-15 10:43:35 +000088 unsigned long paddr = (pfn << PAGE_SHIFT);
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +100089 struct memblock_region *reg;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100090
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +100091 for_each_memblock(memory, reg)
92 if (paddr >= reg->base && paddr < (reg->base + reg->size))
Paul Mackerras14cf11a2005-09-26 16:04:21 +100093 return 1;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100094 return 0;
95#endif
96}
Paul Mackerras14cf11a2005-09-26 16:04:21 +100097
Roland Dreier8b150472005-10-28 17:46:18 -070098pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
Paul Mackerras14cf11a2005-09-26 16:04:21 +100099 unsigned long size, pgprot_t vma_prot)
100{
101 if (ppc_md.phys_mem_access_prot)
Roland Dreier8b150472005-10-28 17:46:18 -0700102 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000103
Roland Dreier8b150472005-10-28 17:46:18 -0700104 if (!page_is_ram(pfn))
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000105 vma_prot = pgprot_noncached(vma_prot);
106
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000107 return vma_prot;
108}
109EXPORT_SYMBOL(phys_mem_access_prot);
110
Paul Mackerras23fd0772005-10-31 13:37:12 +1100111#ifdef CONFIG_MEMORY_HOTPLUG
112
Yasunori Gotobc02af92006-06-27 02:53:30 -0700113#ifdef CONFIG_NUMA
114int memory_add_physaddr_to_nid(u64 start)
115{
116 return hot_add_scn_to_nid(start);
117}
118#endif
119
Benjamin Herrenschmidtfecbfab2016-07-05 15:07:54 +1000120int __weak create_section_mapping(unsigned long start, unsigned long end)
121{
122 return -ENODEV;
123}
124
125int __weak remove_section_mapping(unsigned long start, unsigned long end)
126{
127 return -ENODEV;
128}
129
Michal Hocko3d79a722017-07-06 15:38:21 -0700130int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
Paul Mackerras23fd0772005-10-31 13:37:12 +1100131{
Paul Mackerras23fd0772005-10-31 13:37:12 +1100132 unsigned long start_pfn = start >> PAGE_SHIFT;
133 unsigned long nr_pages = size >> PAGE_SHIFT;
David Gibson1dace6c2016-02-09 13:32:42 +1000134 int rc;
Paul Mackerras23fd0772005-10-31 13:37:12 +1100135
David Gibson438cc812016-12-09 11:07:38 +1100136 resize_hpt_for_hotplug(memblock_phys_mem_size());
137
Andrew Morton2d0eee12006-03-21 23:00:05 -0800138 start = (unsigned long)__va(start);
David Gibson1dace6c2016-02-09 13:32:42 +1000139 rc = create_section_mapping(start, start + size);
140 if (rc) {
141 pr_warning(
142 "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
143 start, start + size, rc);
144 return -EFAULT;
145 }
Mike Kravetz54b79242005-11-07 16:25:48 -0800146
Michal Hocko3d79a722017-07-06 15:38:21 -0700147 return __add_pages(nid, start_pfn, nr_pages, want_memblock);
Paul Mackerras23fd0772005-10-31 13:37:12 +1100148}
Wen Congyang24d335c2013-02-22 16:32:58 -0800149
150#ifdef CONFIG_MEMORY_HOTREMOVE
151int arch_remove_memory(u64 start, u64 size)
152{
153 unsigned long start_pfn = start >> PAGE_SHIFT;
154 unsigned long nr_pages = size >> PAGE_SHIFT;
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000155 struct vmem_altmap *altmap;
156 struct page *page;
Nathan Fontenot9ac8cde2014-01-27 10:54:06 -0600157 int ret;
Wen Congyang24d335c2013-02-22 16:32:58 -0800158
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000159 /*
160 * If we have an altmap then we need to skip over any reserved PFNs
161 * when querying the zone.
162 */
163 page = pfn_to_page(start_pfn);
164 altmap = to_vmem_altmap((unsigned long) page);
165 if (altmap)
166 page += vmem_altmap_offset(altmap);
167
168 ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
Anton Blanchard16d0f5c2014-10-14 22:17:47 +1100169 if (ret)
170 return ret;
171
172 /* Remove htab bolted mappings for this section of memory */
173 start = (unsigned long)__va(start);
174 ret = remove_section_mapping(start, start + size);
175
176 /* Ensure all vmalloc mappings are flushed in case they also
177 * hit that section of memory
178 */
179 vm_unmap_aliases();
Nathan Fontenot9ac8cde2014-01-27 10:54:06 -0600180
David Gibson438cc812016-12-09 11:07:38 +1100181 resize_hpt_for_hotplug(memblock_phys_mem_size());
182
Nathan Fontenot9ac8cde2014-01-27 10:54:06 -0600183 return ret;
Wen Congyang24d335c2013-02-22 16:32:58 -0800184}
185#endif
Nathan Lynch0d579942008-06-04 08:30:54 +1000186#endif /* CONFIG_MEMORY_HOTPLUG */
Badari Pulavartya99824f2008-02-05 00:10:18 -0800187
188/*
189 * walk_memory_resource() needs to make sure there is no holes in a given
Badari Pulavarty9d88a2e2008-04-18 13:33:53 -0700190 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
Yinghai Lu95f72d12010-07-12 14:36:09 +1000191 * Instead it maintains it in memblock.memory structures. Walk through the
Badari Pulavarty9d88a2e2008-04-18 13:33:53 -0700192 * memory regions, find holes and callback for contiguous regions.
Badari Pulavartya99824f2008-02-05 00:10:18 -0800193 */
194int
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -0700195walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
196 void *arg, int (*func)(unsigned long, unsigned long, void *))
Badari Pulavartya99824f2008-02-05 00:10:18 -0800197{
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000198 struct memblock_region *reg;
199 unsigned long end_pfn = start_pfn + nr_pages;
200 unsigned long tstart, tend;
Badari Pulavarty9d88a2e2008-04-18 13:33:53 -0700201 int ret = -1;
202
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000203 for_each_memblock(memory, reg) {
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700204 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
205 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000206 if (tstart >= tend)
207 continue;
208 ret = (*func)(tstart, tend - tstart, arg);
Badari Pulavarty9d88a2e2008-04-18 13:33:53 -0700209 if (ret)
210 break;
Badari Pulavarty9d88a2e2008-04-18 13:33:53 -0700211 }
212 return ret;
Badari Pulavartya99824f2008-02-05 00:10:18 -0800213}
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -0700214EXPORT_SYMBOL_GPL(walk_system_ram_range);
Badari Pulavartya99824f2008-02-05 00:10:18 -0800215
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000216#ifndef CONFIG_NEED_MULTIPLE_NODES
Anton Blanchard10239732014-09-17 22:15:33 +1000217void __init initmem_init(void)
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000218{
Yinghai Lu95f72d12010-07-12 14:36:09 +1000219 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Anton Blanchard10239732014-09-17 22:15:33 +1000220 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000221#ifdef CONFIG_HIGHMEM
Kumar Galad7917ba2008-04-16 05:52:22 +1000222 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000223#endif
224
Cody P Schafer4e8309b2013-01-09 12:40:18 +0000225 /* Place all memblock_regions in the same node and merge contiguous
226 * memblock_regions
227 */
Tang Chene7e8de52014-01-21 15:49:26 -0800228 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700229
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000230 /* XXX need to clip this if using highmem? */
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700231 sparse_memory_present_with_active_regions(0);
Anton Blanchard21098b92014-09-17 22:15:36 +1000232 sparse_init();
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000233}
234
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000235/* mark pages that don't exist as nosave */
236static int __init mark_nonram_nosave(void)
237{
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000238 struct memblock_region *reg, *prev = NULL;
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000239
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000240 for_each_memblock(memory, reg) {
241 if (prev &&
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700242 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
243 register_nosave_region(memblock_region_memory_end_pfn(prev),
244 memblock_region_memory_base_pfn(reg));
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000245 prev = reg;
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000246 }
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000247 return 0;
248}
Scott Wood6db35ad2014-09-18 14:05:02 -0500249#else /* CONFIG_NEED_MULTIPLE_NODES */
250static int __init mark_nonram_nosave(void)
251{
252 return 0;
253}
254#endif
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000255
Scott Wood1c980252014-08-08 18:40:42 -0500256static bool zone_limits_final;
257
Oliver O'Halloran3079abe2016-05-11 19:22:18 +1000258/*
259 * The memory zones past TOP_ZONE are managed by generic mm code.
260 * These should be set to zero since that's what every other
261 * architecture does.
262 */
Scott Wood1c980252014-08-08 18:40:42 -0500263static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
Oliver O'Halloran3079abe2016-05-11 19:22:18 +1000264 [0 ... TOP_ZONE ] = ~0UL,
265 [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
Scott Wood1c980252014-08-08 18:40:42 -0500266};
267
268/*
269 * Restrict the specified zone and all more restrictive zones
270 * to be below the specified pfn. May not be called after
271 * paging_init().
272 */
273void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
274{
275 int i;
276
277 if (WARN_ON(zone_limits_final))
278 return;
279
280 for (i = zone; i >= 0; i--) {
281 if (max_zone_pfns[i] > pfn_limit)
282 max_zone_pfns[i] = pfn_limit;
283 }
284}
285
286/*
287 * Find the least restrictive zone that is entirely below the
288 * specified pfn limit. Returns < 0 if no suitable zone is found.
289 *
290 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
291 * systems -- the DMA limit can be higher than any possible real pfn.
292 */
293int dma_pfn_limit_to_zone(u64 pfn_limit)
294{
Scott Wood1c980252014-08-08 18:40:42 -0500295 int i;
296
Oliver O'Hallorandd0b52c2016-05-05 17:54:08 +1000297 for (i = TOP_ZONE; i >= 0; i--) {
Scott Wood1c980252014-08-08 18:40:42 -0500298 if (max_zone_pfns[i] <= pfn_limit)
299 return i;
300 }
301
302 return -EPERM;
303}
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000304
305/*
306 * paging_init() sets up the page tables - in fact we've already done this.
307 */
308void __init paging_init(void)
309{
Tony Breedsf7ba2992011-07-04 18:44:19 +0000310 unsigned long long total_ram = memblock_phys_mem_size();
Yinghai Lu95f72d12010-07-12 14:36:09 +1000311 phys_addr_t top_of_ram = memblock_end_of_DRAM();
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000312
Kumar Gala2c419bd2008-04-23 23:05:20 +1000313#ifdef CONFIG_PPC32
314 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
315 unsigned long end = __fix_to_virt(FIX_HOLE);
316
317 for (; v < end; v += PAGE_SIZE)
Christophe Leroy4386c092017-05-29 17:31:56 +0200318 map_kernel_page(v, 0, 0); /* XXX gross */
Kumar Gala2c419bd2008-04-23 23:05:20 +1000319#endif
320
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000321#ifdef CONFIG_HIGHMEM
Christophe Leroy4386c092017-05-29 17:31:56 +0200322 map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */
Kumar Gala2c419bd2008-04-23 23:05:20 +1000323 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
324
325 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000326 kmap_prot = PAGE_KERNEL;
327#endif /* CONFIG_HIGHMEM */
328
Tony Breedsf7ba2992011-07-04 18:44:19 +0000329 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
Tony Breedsfb610632008-07-31 13:51:42 +1000330 (unsigned long long)top_of_ram, total_ram);
Olof Johanssone110b282006-04-12 15:25:01 -0500331 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
Stefan Roese2bf30162008-07-10 01:09:23 +1000332 (long int)((top_of_ram - total_ram) >> 20));
Scott Wood1c980252014-08-08 18:40:42 -0500333
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000334#ifdef CONFIG_HIGHMEM
Scott Wood1c980252014-08-08 18:40:42 -0500335 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700336#endif
Oliver O'Hallorandd0b52c2016-05-05 17:54:08 +1000337 limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
Scott Wood1c980252014-08-08 18:40:42 -0500338 zone_limits_final = true;
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700339 free_area_init_nodes(max_zone_pfns);
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000340
341 mark_nonram_nosave();
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000342}
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000343
344void __init mem_init(void)
345{
Scott Wood28efc352013-10-11 19:22:38 -0500346 /*
347 * book3s is limited to 16 page sizes due to encoding this in
348 * a 4-bit field for slices.
349 */
350 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
351
FUJITA Tomonoria9327292010-03-16 13:16:25 +0000352#ifdef CONFIG_SWIOTLB
Jia Hongtao688ba1d2012-08-03 18:14:10 +0800353 swiotlb_init(0);
FUJITA Tomonoria9327292010-03-16 13:16:25 +0000354#endif
355
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000356 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
Jiang Liu602ddc72013-07-03 15:04:32 -0700357 set_max_mapnr(max_pfn);
Jiang Liu0c988532013-07-03 15:03:24 -0700358 free_all_bootmem();
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000359
360#ifdef CONFIG_HIGHMEM
361 {
362 unsigned long pfn, highmem_mapnr;
363
Kumar Galad7917ba2008-04-16 05:52:22 +1000364 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000365 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
Becky Bruce3d41e0f2011-06-28 09:54:46 +0000366 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000367 struct page *page = pfn_to_page(pfn);
Jiang Liu369a9d82013-07-03 15:04:09 -0700368 if (!memblock_is_reserved(paddr))
369 free_highmem_page(page);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000370 }
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000371 }
372#endif /* CONFIG_HIGHMEM */
373
Becky Bruce3160b092011-06-28 14:54:47 -0500374#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
375 /*
376 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
377 * functions.... do it here for the non-smp case.
378 */
379 per_cpu(next_tlbcam_idx, smp_processor_id()) =
380 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
381#endif
382
Jiang Liu369a9d82013-07-03 15:04:09 -0700383 mem_init_print_info(NULL);
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +1000384#ifdef CONFIG_PPC32
385 pr_info("Kernel virtual memory layout:\n");
386 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
387#ifdef CONFIG_HIGHMEM
388 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
389 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
390#endif /* CONFIG_HIGHMEM */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +1000391#ifdef CONFIG_NOT_COHERENT_CACHE
392 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
393 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
394#endif /* CONFIG_NOT_COHERENT_CACHE */
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +1000395 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
396 ioremap_bot, IOREMAP_TOP);
397 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
398 VMALLOC_START, VMALLOC_END);
399#endif /* CONFIG_PPC32 */
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000400}
401
Dave Carroll2773fcc2011-06-18 07:36:39 +0000402void free_initmem(void)
403{
Dave Carrolla9c0f412011-06-18 07:36:40 +0000404 ppc_md.progress = ppc_printk_progress;
Jiang Liu5d585e52013-04-29 15:06:47 -0700405 free_initmem_default(POISON_FREE_INITMEM);
Dave Carroll2773fcc2011-06-18 07:36:39 +0000406}
407
Benjamin Herrenschmidt307cfe72011-06-09 16:52:38 +1000408#ifdef CONFIG_BLK_DEV_INITRD
409void __init free_initrd_mem(unsigned long start, unsigned long end)
410{
Jiang Liudbe67df2013-07-03 15:02:51 -0700411 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Benjamin Herrenschmidt307cfe72011-06-09 16:52:38 +1000412}
413#endif
414
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000415/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000416 * This is called when a page has been modified by the kernel.
417 * It just marks the page as not i-cache clean. We do the i-cache
418 * flush later when the page is given to a user process, if necessary.
419 */
420void flush_dcache_page(struct page *page)
421{
422 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
423 return;
424 /* avoid an atomic op if possible */
425 if (test_bit(PG_arch_1, &page->flags))
426 clear_bit(PG_arch_1, &page->flags);
427}
428EXPORT_SYMBOL(flush_dcache_page);
429
430void flush_dcache_icache_page(struct page *page)
431{
David Gibson0895ecd2009-10-26 19:24:31 +0000432#ifdef CONFIG_HUGETLB_PAGE
433 if (PageCompound(page)) {
434 flush_dcache_icache_hugepage(page);
435 return;
436 }
437#endif
Scott Wood2f7d2b72015-04-15 19:40:23 -0500438#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
439 /* On 8xx there is no need to kmap since highmem is not supported */
440 __flush_dcache_icache(page_address(page));
441#else
442 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
Cong Wang2480b202011-11-25 23:14:16 +0800443 void *start = kmap_atomic(page);
David Gibson0895ecd2009-10-26 19:24:31 +0000444 __flush_dcache_icache(start);
Cong Wang2480b202011-11-25 23:14:16 +0800445 kunmap_atomic(start);
Scott Wood2f7d2b72015-04-15 19:40:23 -0500446 } else {
447 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
David Gibson0895ecd2009-10-26 19:24:31 +0000448 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000449#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000450}
Alexander Graf249ba1e2012-08-03 13:56:33 +0200451EXPORT_SYMBOL(flush_dcache_icache_page);
David Gibson0895ecd2009-10-26 19:24:31 +0000452
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000453void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
454{
455 clear_page(page);
456
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000457 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300458 * We shouldn't have to do this, but some versions of glibc
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000459 * require it (ld.so assumes zero filled pages are icache clean)
460 * - Anton
461 */
David Gibson09f5dc42006-02-06 13:24:53 +1100462 flush_dcache_page(pg);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000463}
464EXPORT_SYMBOL(clear_user_page);
465
466void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
467 struct page *pg)
468{
469 copy_page(vto, vfrom);
470
471 /*
472 * We should be able to use the following optimisation, however
473 * there are two problems.
474 * Firstly a bug in some versions of binutils meant PLT sections
475 * were not marked executable.
476 * Secondly the first word in the GOT section is blrl, used
477 * to establish the GOT address. Until recently the GOT was
478 * not marked executable.
479 * - Anton
480 */
481#if 0
482 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
483 return;
484#endif
485
David Gibson09f5dc42006-02-06 13:24:53 +1100486 flush_dcache_page(pg);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000487}
488
489void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
490 unsigned long addr, int len)
491{
492 unsigned long maddr;
493
494 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
495 flush_icache_range(maddr, maddr + len);
496 kunmap(page);
497}
498EXPORT_SYMBOL(flush_icache_user_range);
499
500/*
501 * This is called at the end of handling a user page fault, when the
502 * fault has been handled by updating a PTE in the linux page tables.
503 * We use it to preload an HPTE into the hash table corresponding to
504 * the updated linux PTE.
505 *
Hugh Dickins01edcd82005-11-23 13:37:39 -0800506 * This must always be called with the pte lock held.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000507 */
508void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
Russell King4b3073e2009-12-18 16:40:18 +0000509 pte_t *ptep)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000510{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100511#ifdef CONFIG_PPC_STD_MMU
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530512 /*
513 * We don't need to worry about _PAGE_PRESENT here because we are
514 * called with either mm->page_table_lock held or ptl lock held
515 */
Gavin Shan171cb712016-02-26 11:26:26 +1100516 unsigned long access, trap;
517
Aneesh Kumar K.Va3dece62016-04-29 23:26:22 +1000518 if (radix_enabled())
519 return;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000520
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000521 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
Russell King4b3073e2009-12-18 16:40:18 +0000522 if (!pte_young(*ptep) || address >= TASK_SIZE)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000523 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100524
525 /* We try to figure out if we are coming from an instruction
526 * access fault and pass that down to __hash_page so we avoid
527 * double-faulting on execution of fresh text. We have to test
528 * for regs NULL since init will get here first thing at boot
529 *
530 * We also avoid filling the hash if not coming from a fault
531 */
Gavin Shan171cb712016-02-26 11:26:26 +1100532
533 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
534 switch (trap) {
535 case 0x300:
536 access = 0UL;
537 break;
538 case 0x400:
539 access = _PAGE_EXEC;
540 break;
541 default:
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000542 return;
Gavin Shan171cb712016-02-26 11:26:26 +1100543 }
544
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100545 hash_preload(vma->vm_mm, address, access, trap);
546#endif /* CONFIG_PPC_STD_MMU */
Becky Bruce41151e72011-06-28 09:54:48 +0000547#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
548 && defined(CONFIG_HUGETLB_PAGE)
549 if (is_vm_hugetlb_page(vma))
Becky Bruced93e4d72011-11-28 14:43:33 +0000550 book3e_hugetlb_preload(vma, address, *ptep);
Becky Bruce41151e72011-06-28 09:54:48 +0000551#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000552}
Anton Blanchardc40dd2f2011-11-02 14:56:12 +0000553
554/*
555 * System memory should not be in /proc/iomem but various tools expect it
556 * (eg kdump).
557 */
Geert Uytterhoeven4f770922013-09-15 11:39:36 +0200558static int __init add_system_ram_resources(void)
Anton Blanchardc40dd2f2011-11-02 14:56:12 +0000559{
560 struct memblock_region *reg;
561
562 for_each_memblock(memory, reg) {
563 struct resource *res;
564 unsigned long base = reg->base;
565 unsigned long size = reg->size;
566
567 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
568 WARN_ON(!res);
569
570 if (res) {
571 res->name = "System RAM";
572 res->start = base;
573 res->end = base + size - 1;
Toshi Kani35d98e92016-01-26 21:57:22 +0100574 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
Anton Blanchardc40dd2f2011-11-02 14:56:12 +0000575 WARN_ON(request_resource(&iomem_resource, res) < 0);
576 }
577 }
578
579 return 0;
580}
581subsys_initcall(add_system_ram_resources);
sukadev@linux.vnet.ibm.com1d54cf22011-08-30 09:19:17 +0000582
583#ifdef CONFIG_STRICT_DEVMEM
584/*
585 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
586 * is valid. The argument is a physical page number.
587 *
588 * Access has to be given to non-kernel-ram areas as well, these contain the
589 * PCI mmio resources as well as potential bios/acpi data regions.
590 */
591int devmem_is_allowed(unsigned long pfn)
592{
Vasant Hegdee256caa2016-01-21 21:45:31 +0530593 if (page_is_rtas_user_buf(pfn))
594 return 1;
Scott Wood6c0cc622015-04-17 16:17:14 -0500595 if (iomem_is_exclusive(PFN_PHYS(pfn)))
sukadev@linux.vnet.ibm.com1d54cf22011-08-30 09:19:17 +0000596 return 0;
597 if (!page_is_ram(pfn))
598 return 1;
599 return 0;
600}
601#endif /* CONFIG_STRICT_DEVMEM */