blob: 0ff8ab5702a9dee660023572f747ce6d2059d917 [file] [log] [blame]
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +00001/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
7 *
8 * -- BenH
9 *
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000010 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp.
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +000012 *
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 */
29
30#include <linux/kernel.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100031#include <linux/export.h>
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +000032#include <linux/mm.h>
33#include <linux/init.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
36#include <linux/preempt.h>
37#include <linux/spinlock.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100038#include <linux/memblock.h>
Dave Kleikamp91b191c2011-07-04 18:38:03 +000039#include <linux/of_fdt.h>
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +000040
41#include <asm/tlbflush.h>
42#include <asm/tlb.h>
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000043#include <asm/code-patching.h>
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +000044
45#include "mmu_decl.h"
46
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000047#ifdef CONFIG_PPC_BOOK3E
48struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
49 [MMU_PAGE_4K] = {
50 .shift = 12,
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +100051 .ind = 20,
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000052 .enc = BOOK3E_PAGESZ_4K,
53 },
54 [MMU_PAGE_16K] = {
55 .shift = 14,
56 .enc = BOOK3E_PAGESZ_16K,
57 },
58 [MMU_PAGE_64K] = {
59 .shift = 16,
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +100060 .ind = 28,
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000061 .enc = BOOK3E_PAGESZ_64K,
62 },
63 [MMU_PAGE_1M] = {
64 .shift = 20,
65 .enc = BOOK3E_PAGESZ_1M,
66 },
67 [MMU_PAGE_16M] = {
68 .shift = 24,
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +100069 .ind = 36,
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +000070 .enc = BOOK3E_PAGESZ_16M,
71 },
72 [MMU_PAGE_256M] = {
73 .shift = 28,
74 .enc = BOOK3E_PAGESZ_256M,
75 },
76 [MMU_PAGE_1G] = {
77 .shift = 30,
78 .enc = BOOK3E_PAGESZ_1GB,
79 },
80};
81static inline int mmu_get_tsize(int psize)
82{
83 return mmu_psize_defs[psize].enc;
84}
85#else
86static inline int mmu_get_tsize(int psize)
87{
88 /* This isn't used on !Book3E for now */
89 return 0;
90}
91#endif
92
93/* The variables below are currently only used on 64-bit Book3E
94 * though this will probably be made common with other nohash
95 * implementations at some point
96 */
97#ifdef CONFIG_PPC64
98
99int mmu_linear_psize; /* Page size used for the linear mapping */
100int mmu_pte_psize; /* Page size used for PTE pages */
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +0000101int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000102int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
103unsigned long linear_map_top; /* Top of linear mapping */
104
105#endif /* CONFIG_PPC64 */
106
Becky Bruce3160b092011-06-28 14:54:47 -0500107#ifdef CONFIG_PPC_FSL_BOOK3E
108/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
109DEFINE_PER_CPU(int, next_tlbcam_idx);
110EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
111#endif
112
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000113/*
114 * Base TLB flushing operations:
115 *
116 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
117 * - flush_tlb_page(vma, vmaddr) flushes one page
118 * - flush_tlb_range(vma, start, end) flushes a range of pages
119 * - flush_tlb_kernel_range(start, end) flushes kernel pages
120 *
121 * - local_* variants of page and mm only apply to the current
122 * processor
123 */
124
125/*
126 * These are the base non-SMP variants of page and mm flushing
127 */
128void local_flush_tlb_mm(struct mm_struct *mm)
129{
130 unsigned int pid;
131
132 preempt_disable();
133 pid = mm->context.id;
134 if (pid != MMU_NO_CONTEXT)
135 _tlbil_pid(pid);
136 preempt_enable();
137}
138EXPORT_SYMBOL(local_flush_tlb_mm);
139
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000140void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
141 int tsize, int ind)
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000142{
143 unsigned int pid;
144
145 preempt_disable();
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000146 pid = mm ? mm->context.id : 0;
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000147 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000148 _tlbil_va(vmaddr, pid, tsize, ind);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000149 preempt_enable();
150}
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000151
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000152void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
153{
154 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000155 mmu_get_tsize(mmu_virtual_psize), 0);
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000156}
157EXPORT_SYMBOL(local_flush_tlb_page);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000158
159/*
160 * And here are the SMP non-local implementations
161 */
162#ifdef CONFIG_SMP
163
Thomas Gleixner3eb93c52010-02-18 02:22:44 +0000164static DEFINE_RAW_SPINLOCK(tlbivax_lock);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000165
Benjamin Herrenschmidtfcce8102009-07-23 23:15:10 +0000166static int mm_is_core_local(struct mm_struct *mm)
167{
168 return cpumask_subset(mm_cpumask(mm),
169 topology_thread_cpumask(smp_processor_id()));
170}
171
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000172struct tlb_flush_param {
173 unsigned long addr;
174 unsigned int pid;
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000175 unsigned int tsize;
176 unsigned int ind;
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000177};
178
179static void do_flush_tlb_mm_ipi(void *param)
180{
181 struct tlb_flush_param *p = param;
182
183 _tlbil_pid(p ? p->pid : 0);
184}
185
186static void do_flush_tlb_page_ipi(void *param)
187{
188 struct tlb_flush_param *p = param;
189
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000190 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000191}
192
193
194/* Note on invalidations and PID:
195 *
196 * We snapshot the PID with preempt disabled. At this point, it can still
197 * change either because:
198 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
199 * - we are invaliating some target that isn't currently running here
200 * and is concurrently acquiring a new PID on another CPU
201 * - some other CPU is re-acquiring a lost PID for this mm
202 * etc...
203 *
204 * However, this shouldn't be a problem as we only guarantee
205 * invalidation of TLB entries present prior to this call, so we
206 * don't care about the PID changing, and invalidating a stale PID
207 * is generally harmless.
208 */
209
210void flush_tlb_mm(struct mm_struct *mm)
211{
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000212 unsigned int pid;
213
214 preempt_disable();
215 pid = mm->context.id;
216 if (unlikely(pid == MMU_NO_CONTEXT))
217 goto no_context;
Benjamin Herrenschmidtfcce8102009-07-23 23:15:10 +0000218 if (!mm_is_core_local(mm)) {
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000219 struct tlb_flush_param p = { .pid = pid };
Rusty Russell56aa4122009-03-15 18:16:43 +0000220 /* Ignores smp_processor_id() even if set. */
221 smp_call_function_many(mm_cpumask(mm),
222 do_flush_tlb_mm_ipi, &p, 1);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000223 }
224 _tlbil_pid(pid);
225 no_context:
226 preempt_enable();
227}
228EXPORT_SYMBOL(flush_tlb_mm);
229
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000230void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
231 int tsize, int ind)
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000232{
Rusty Russell56aa4122009-03-15 18:16:43 +0000233 struct cpumask *cpu_mask;
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000234 unsigned int pid;
235
236 preempt_disable();
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000237 pid = mm ? mm->context.id : 0;
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000238 if (unlikely(pid == MMU_NO_CONTEXT))
239 goto bail;
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000240 cpu_mask = mm_cpumask(mm);
Benjamin Herrenschmidtfcce8102009-07-23 23:15:10 +0000241 if (!mm_is_core_local(mm)) {
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000242 /* If broadcast tlbivax is supported, use it */
243 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
244 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
245 if (lock)
Thomas Gleixner3eb93c52010-02-18 02:22:44 +0000246 raw_spin_lock(&tlbivax_lock);
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000247 _tlbivax_bcast(vmaddr, pid, tsize, ind);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000248 if (lock)
Thomas Gleixner3eb93c52010-02-18 02:22:44 +0000249 raw_spin_unlock(&tlbivax_lock);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000250 goto bail;
251 } else {
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000252 struct tlb_flush_param p = {
253 .pid = pid,
254 .addr = vmaddr,
255 .tsize = tsize,
256 .ind = ind,
257 };
Rusty Russell56aa4122009-03-15 18:16:43 +0000258 /* Ignores smp_processor_id() even if set in cpu_mask */
259 smp_call_function_many(cpu_mask,
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000260 do_flush_tlb_page_ipi, &p, 1);
261 }
262 }
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000263 _tlbil_va(vmaddr, pid, tsize, ind);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000264 bail:
265 preempt_enable();
266}
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000267
268void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
269{
270 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000271 mmu_get_tsize(mmu_virtual_psize), 0);
Benjamin Herrenschmidtd4e167d2009-07-23 23:15:24 +0000272}
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000273EXPORT_SYMBOL(flush_tlb_page);
274
275#endif /* CONFIG_SMP */
276
Dave Kleikamp91b191c2011-07-04 18:38:03 +0000277#ifdef CONFIG_PPC_47x
278void __init early_init_mmu_47x(void)
279{
280#ifdef CONFIG_SMP
281 unsigned long root = of_get_flat_dt_root();
282 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
283 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
284#endif /* CONFIG_SMP */
285}
286#endif /* CONFIG_PPC_47x */
287
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000288/*
289 * Flush kernel TLB entries in the given range
290 */
291void flush_tlb_kernel_range(unsigned long start, unsigned long end)
292{
293#ifdef CONFIG_SMP
294 preempt_disable();
295 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
296 _tlbil_pid(0);
297 preempt_enable();
Dave Liud6a09e02008-12-30 23:42:55 +0000298#else
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000299 _tlbil_pid(0);
Dave Liud6a09e02008-12-30 23:42:55 +0000300#endif
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000301}
302EXPORT_SYMBOL(flush_tlb_kernel_range);
303
304/*
305 * Currently, for range flushing, we just do a full mm flush. This should
306 * be optimized based on a threshold on the size of the range, since
307 * some implementation can stack multiple tlbivax before a tlbsync but
308 * for now, we keep it that way
309 */
310void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
311 unsigned long end)
312
313{
314 flush_tlb_mm(vma->vm_mm);
315}
316EXPORT_SYMBOL(flush_tlb_range);
Benjamin Herrenschmidtc7cc58a12009-07-23 23:15:28 +0000317
318void tlb_flush(struct mmu_gather *tlb)
319{
320 flush_tlb_mm(tlb->mm);
Benjamin Herrenschmidtc7cc58a12009-07-23 23:15:28 +0000321}
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000322
323/*
324 * Below are functions specific to the 64-bit variant of Book3E though that
325 * may change in the future
326 */
327
328#ifdef CONFIG_PPC64
329
330/*
331 * Handling of virtual linear page tables or indirect TLB entries
332 * flushing when PTE pages are freed
333 */
334void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
335{
336 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
337
338 if (book3e_htw_enabled) {
339 unsigned long start = address & PMD_MASK;
340 unsigned long end = address + PMD_SIZE;
341 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
342
343 /* This isn't the most optimal, ideally we would factor out the
344 * while preempt & CPU mask mucking around, or even the IPI but
345 * it will do for now
346 */
347 while (start < end) {
348 __flush_tlb_page(tlb->mm, start, tsize, 1);
349 start += size;
350 }
351 } else {
352 unsigned long rmask = 0xf000000000000000ul;
353 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
354 unsigned long vpte = address & ~rmask;
355
356#ifdef CONFIG_PPC_64K_PAGES
357 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
358#else
359 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
360#endif
361 vpte |= rid;
362 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
363 }
364}
365
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000366static void setup_page_sizes(void)
367{
Kumar Gala988cf862010-10-08 02:13:25 -0500368 unsigned int tlb0cfg;
369 unsigned int tlb0ps;
370 unsigned int eptcfg;
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000371 int i, psize;
372
Kumar Gala988cf862010-10-08 02:13:25 -0500373#ifdef CONFIG_PPC_FSL_BOOK3E
374 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
375
376 if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
377 (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
378 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
379 unsigned int min_pg, max_pg;
380
381 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
382 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
383
384 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
385 struct mmu_psize_def *def;
386 unsigned int shift;
387
388 def = &mmu_psize_defs[psize];
389 shift = def->shift;
390
391 if (shift == 0)
392 continue;
393
394 /* adjust to be in terms of 4^shift Kb */
395 shift = (shift - 10) >> 1;
396
397 if ((shift >= min_pg) && (shift <= max_pg))
398 def->flags |= MMU_PAGE_SIZE_DIRECT;
399 }
400
401 goto no_indirect;
402 }
403#endif
404
405 tlb0cfg = mfspr(SPRN_TLB0CFG);
406 tlb0ps = mfspr(SPRN_TLB0PS);
407 eptcfg = mfspr(SPRN_EPTCFG);
408
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000409 /* Look for supported direct sizes */
410 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
411 struct mmu_psize_def *def = &mmu_psize_defs[psize];
412
413 if (tlb0ps & (1U << (def->shift - 10)))
414 def->flags |= MMU_PAGE_SIZE_DIRECT;
415 }
416
417 /* Indirect page sizes supported ? */
418 if ((tlb0cfg & TLBnCFG_IND) == 0)
419 goto no_indirect;
420
421 /* Now, we only deal with one IND page size for each
422 * direct size. Hopefully all implementations today are
423 * unambiguous, but we might want to be careful in the
424 * future.
425 */
426 for (i = 0; i < 3; i++) {
427 unsigned int ps, sps;
428
429 sps = eptcfg & 0x1f;
430 eptcfg >>= 5;
431 ps = eptcfg & 0x1f;
432 eptcfg >>= 5;
433 if (!ps || !sps)
434 continue;
435 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
436 struct mmu_psize_def *def = &mmu_psize_defs[psize];
437
438 if (ps == (def->shift - 10))
439 def->flags |= MMU_PAGE_SIZE_INDIRECT;
440 if (sps == (def->shift - 10))
441 def->ind = ps + 10;
442 }
443 }
444 no_indirect:
445
446 /* Cleanup array and print summary */
447 pr_info("MMU: Supported page sizes\n");
448 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
449 struct mmu_psize_def *def = &mmu_psize_defs[psize];
450 const char *__page_type_names[] = {
451 "unsupported",
452 "direct",
453 "indirect",
454 "direct & indirect"
455 };
456 if (def->flags == 0) {
457 def->shift = 0;
458 continue;
459 }
460 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
461 __page_type_names[def->flags & 0x3]);
462 }
463}
464
Scott Woodf67f4ef2011-06-22 11:25:42 +0000465static void __patch_exception(int exc, unsigned long addr)
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000466{
467 extern unsigned int interrupt_base_book3e;
Scott Woodf67f4ef2011-06-22 11:25:42 +0000468 unsigned int *ibase = &interrupt_base_book3e;
469
470 /* Our exceptions vectors start with a NOP and -then- a branch
471 * to deal with single stepping from userspace which stops on
472 * the second instruction. Thus we need to patch the second
473 * instruction of the exception, not the first one
474 */
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000475
Scott Woodf67f4ef2011-06-22 11:25:42 +0000476 patch_branch(ibase + (exc / 4) + 1, addr, 0);
477}
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000478
Scott Woodf67f4ef2011-06-22 11:25:42 +0000479#define patch_exception(exc, name) do { \
480 extern unsigned int name; \
481 __patch_exception((exc), (unsigned long)&name); \
482} while (0)
483
484static void setup_mmu_htw(void)
485{
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000486 /* Check if HW tablewalk is present, and if yes, enable it by:
487 *
488 * - patching the TLB miss handlers to branch to the
489 * one dedicates to it
490 *
491 * - setting the global book3e_htw_enabled
492 */
493 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
494
495 if ((tlb0cfg & TLBnCFG_IND) &&
496 (tlb0cfg & TLBnCFG_PT)) {
Scott Woodf67f4ef2011-06-22 11:25:42 +0000497 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
498 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000499 book3e_htw_enabled = 1;
500 }
Kumar Gala32d206e2011-05-19 20:09:28 +0000501 pr_info("MMU: Book3E HW tablewalk %s\n",
502 book3e_htw_enabled ? "enabled" : "not supported");
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000503}
504
505/*
506 * Early initialization of the MMU TLB code
507 */
508static void __early_init_mmu(int boot_cpu)
509{
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000510 unsigned int mas4;
511
512 /* XXX This will have to be decided at runtime, but right
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +0000513 * now our boot and TLB miss code hard wires it. Ideally
514 * we should find out a suitable page size and patch the
515 * TLB miss code (either that or use the PACA to store
516 * the value we want)
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000517 */
518 mmu_linear_psize = MMU_PAGE_1G;
519
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +0000520 /* XXX This should be decided at runtime based on supported
521 * page sizes in the TLB, but for now let's assume 16M is
522 * always there and a good fit (which it probably is)
523 */
524 mmu_vmemmap_psize = MMU_PAGE_16M;
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000525
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000526 /* XXX This code only checks for TLB 0 capabilities and doesn't
527 * check what page size combos are supported by the HW. It
528 * also doesn't handle the case where a separate array holds
529 * the IND entries from the array loaded by the PT.
530 */
531 if (boot_cpu) {
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000532 /* Look for supported page sizes */
533 setup_page_sizes();
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000534
Benjamin Herrenschmidtf2b26c92010-07-09 14:57:43 +1000535 /* Look for HW tablewalk support */
536 setup_mmu_htw();
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000537 }
538
539 /* Set MAS4 based on page table setting */
540
541 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
542 if (book3e_htw_enabled) {
543 mas4 |= mas4 | MAS4_INDD;
544#ifdef CONFIG_PPC_64K_PAGES
545 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
546 mmu_pte_psize = MMU_PAGE_256M;
547#else
548 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
549 mmu_pte_psize = MMU_PAGE_1M;
550#endif
551 } else {
552#ifdef CONFIG_PPC_64K_PAGES
553 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
554#else
555 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
556#endif
557 mmu_pte_psize = mmu_virtual_psize;
558 }
559 mtspr(SPRN_MAS4, mas4);
560
561 /* Set the global containing the top of the linear mapping
562 * for use by the TLB miss code
563 */
Yinghai Lu95f72d12010-07-12 14:36:09 +1000564 linear_map_top = memblock_end_of_DRAM();
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000565
Kumar Gala55fd7662009-10-16 18:48:40 -0500566#ifdef CONFIG_PPC_FSL_BOOK3E
567 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
568 unsigned int num_cams;
569
570 /* use a quarter of the TLBCAM for bolted linear map */
571 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
572 linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
573
574 /* limit memory so we dont have linear faults */
575 memblock_enforce_memory_limit(linear_map_top);
576 memblock_analyze();
Scott Woodf67f4ef2011-06-22 11:25:42 +0000577
578 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
579 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
Kumar Gala55fd7662009-10-16 18:48:40 -0500580 }
581#endif
582
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000583 /* A sync won't hurt us after mucking around with
584 * the MMU configuration
585 */
586 mb();
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700587
588 memblock_set_current_limit(linear_map_top);
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000589}
590
591void __init early_init_mmu(void)
592{
593 __early_init_mmu(1);
594}
595
596void __cpuinit early_init_mmu_secondary(void)
597{
598 __early_init_mmu(0);
599}
600
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700601void setup_initial_memory_limit(phys_addr_t first_memblock_base,
602 phys_addr_t first_memblock_size)
603{
604 /* On Embedded 64-bit, we adjust the RMA size to match
605 * the bolted TLB entry. We know for now that only 1G
606 * entries are supported though that may eventually
607 * change. We crop it to the size of the first MEMBLOCK to
608 * avoid going over total available memory just in case...
609 */
610 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
611
612 /* Finally limit subsequent allocations */
Kumar Gala4a892612010-11-10 12:29:49 +0000613 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700614}
Dave Kleikamp91b191c2011-07-04 18:38:03 +0000615#else /* ! CONFIG_PPC64 */
616void __init early_init_mmu(void)
617{
618#ifdef CONFIG_PPC_47x
619 early_init_mmu_47x();
620#endif
621}
Benjamin Herrenschmidt25d21ad2009-07-23 23:15:47 +0000622#endif /* CONFIG_PPC64 */