blob: 779b74a96b8fd828c82e03df0e066006234100cb [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +10008 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100022#undef DEBUG
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100037#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070041#include <linux/poison.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100042#include <linux/memblock.h>
David Gibsona4fe3ce2009-10-26 19:24:31 +000043#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Paul Mackerras18569c12017-01-30 21:21:34 +110045#include <linux/of_fdt.h>
46#include <linux/libfdt.h>
Oliver O'Halloranb584c252017-06-28 11:32:33 +100047#include <linux/memremap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100048
49#include <asm/pgalloc.h>
50#include <asm/page.h>
51#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052#include <asm/rtas.h>
53#include <asm/io.h>
54#include <asm/mmu_context.h>
55#include <asm/pgtable.h>
56#include <asm/mmu.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080057#include <linux/uaccess.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100058#include <asm/smp.h>
59#include <asm/machdep.h>
60#include <asm/tlb.h>
61#include <asm/eeh.h>
62#include <asm/processor.h>
63#include <asm/mmzone.h>
64#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100065#include <asm/sections.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100066#include <asm/iommu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100067#include <asm/vdso.h>
David Gibson800fc3e2005-11-16 15:43:48 +110068
69#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100070
Michael Ellerman4e003742017-10-19 15:08:43 +110071#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100072#if H_PGTABLE_RANGE > USER_VSID_RANGE
Paul Mackerras14cf11a2005-09-26 16:04:21 +100073#warning Limited user VSID range means pagetable space is wasted
74#endif
Michael Ellerman4e003742017-10-19 15:08:43 +110075#endif /* CONFIG_PPC_BOOK3S_64 */
Paul Mackerras14cf11a2005-09-26 16:04:21 +100076
Kumar Gala37dd2ba2008-04-22 04:22:34 +100077phys_addr_t memstart_addr = ~0;
Sonny Rao79c30952010-08-19 18:08:09 +000078EXPORT_SYMBOL_GPL(memstart_addr);
Kumar Gala37dd2ba2008-04-22 04:22:34 +100079phys_addr_t kernstart_addr;
Sonny Rao79c30952010-08-19 18:08:09 +000080EXPORT_SYMBOL_GPL(kernstart_addr);
Kumar Galad7917ba2008-04-16 05:52:22 +100081
Andy Whitcroftd29eff72007-10-16 01:24:17 -070082#ifdef CONFIG_SPARSEMEM_VMEMMAP
83/*
84 * Given an address within the vmemmap, determine the pfn of the page that
85 * represents the start of the section it is within. Note that we have to
86 * do this by hand as the proffered address may not be correctly aligned.
87 * Subtraction of non-aligned pointers produces undefined results.
88 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +100089static unsigned long __meminit vmemmap_section_start(unsigned long page)
Andy Whitcroftd29eff72007-10-16 01:24:17 -070090{
91 unsigned long offset = page - ((unsigned long)(vmemmap));
92
93 /* Return the pfn of the start of the section. */
94 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
95}
96
97/*
98 * Check if this vmemmap page is already initialised. If any section
99 * which overlaps this vmemmap page is initialised then this page is
100 * initialised already.
101 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000102static int __meminit vmemmap_populated(unsigned long start, int page_size)
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700103{
104 unsigned long end = start + page_size;
Li Zhong16a05bf2014-06-11 16:23:39 +0800105 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700106
107 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
Li Zhong16a05bf2014-06-11 16:23:39 +0800108 if (pfn_valid(page_to_pfn((struct page *)start)))
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700109 return 1;
110
111 return 0;
112}
113
Anshuman Khandual39e46752017-04-06 19:44:50 +0530114/*
115 * vmemmap virtual address space management does not have a traditonal page
116 * table to track which virtual struct pages are backed by physical mapping.
117 * The virtual to physical mappings are tracked in a simple linked list
118 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
119 * all times where as the 'next' list maintains the available
120 * vmemmap_backing structures which have been deleted from the
121 * 'vmemmap_global' list during system runtime (memory hotplug remove
122 * operation). The freed 'vmemmap_backing' structures are reused later when
123 * new requests come in without allocating fresh memory. This pointer also
124 * tracks the allocated 'vmemmap_backing' structures as we allocate one
125 * full page memory at a time when we dont have any.
126 */
Mark Nelson91eea672010-04-21 16:21:03 +0000127struct vmemmap_backing *vmemmap_list;
Li Zhongbd8cb032014-06-11 16:23:36 +0800128static struct vmemmap_backing *next;
Anshuman Khandual39e46752017-04-06 19:44:50 +0530129
130/*
131 * The same pointer 'next' tracks individual chunks inside the allocated
132 * full page during the boot time and again tracks the freeed nodes during
133 * runtime. It is racy but it does not happen as they are separated by the
134 * boot process. Will create problem if some how we have memory hotplug
135 * operation during boot !!
136 */
Li Zhongbd8cb032014-06-11 16:23:36 +0800137static int num_left;
138static int num_freed;
Mark Nelson91eea672010-04-21 16:21:03 +0000139
140static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
141{
Li Zhongbd8cb032014-06-11 16:23:36 +0800142 struct vmemmap_backing *vmem_back;
143 /* get from freed entries first */
144 if (num_freed) {
145 num_freed--;
146 vmem_back = next;
147 next = next->list;
148
149 return vmem_back;
150 }
Mark Nelson91eea672010-04-21 16:21:03 +0000151
152 /* allocate a page when required and hand out chunks */
Li Zhongbd8cb032014-06-11 16:23:36 +0800153 if (!num_left) {
Mark Nelson91eea672010-04-21 16:21:03 +0000154 next = vmemmap_alloc_block(PAGE_SIZE, node);
155 if (unlikely(!next)) {
156 WARN_ON(1);
157 return NULL;
158 }
159 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
160 }
161
162 num_left--;
163
164 return next++;
165}
166
167static __meminit void vmemmap_list_populate(unsigned long phys,
168 unsigned long start,
169 int node)
170{
171 struct vmemmap_backing *vmem_back;
172
173 vmem_back = vmemmap_list_alloc(node);
174 if (unlikely(!vmem_back)) {
175 WARN_ON(1);
176 return;
177 }
178
179 vmem_back->phys = phys;
180 vmem_back->virt_addr = start;
181 vmem_back->list = vmemmap_list;
182
183 vmemmap_list = vmem_back;
184}
185
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100186int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
187 struct vmem_altmap *altmap)
Li Zhong71b0bfe2014-06-11 16:23:38 +0800188{
189 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
190
191 /* Align to the page size of the linear mapping. */
192 start = _ALIGN_DOWN(start, page_size);
193
194 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
195
196 for (; start < end; start += page_size) {
197 void *p;
David Gibson1dace6c2016-02-09 13:32:42 +1000198 int rc;
Li Zhong71b0bfe2014-06-11 16:23:38 +0800199
200 if (vmemmap_populated(start, page_size))
201 continue;
202
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000203 p = __vmemmap_alloc_block_buf(page_size, node, altmap);
Li Zhong71b0bfe2014-06-11 16:23:38 +0800204 if (!p)
205 return -ENOMEM;
206
207 vmemmap_list_populate(__pa(p), start, node);
208
209 pr_debug(" * %016lx..%016lx allocated at %p\n",
210 start, start + page_size, p);
211
David Gibson1dace6c2016-02-09 13:32:42 +1000212 rc = vmemmap_create_mapping(start, page_size, __pa(p));
213 if (rc < 0) {
214 pr_warning(
215 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
216 rc);
217 return -EFAULT;
218 }
Li Zhong71b0bfe2014-06-11 16:23:38 +0800219 }
220
221 return 0;
222}
223
224#ifdef CONFIG_MEMORY_HOTPLUG
Li Zhongbd8cb032014-06-11 16:23:36 +0800225static unsigned long vmemmap_list_free(unsigned long start)
226{
227 struct vmemmap_backing *vmem_back, *vmem_back_prev;
228
229 vmem_back_prev = vmem_back = vmemmap_list;
230
231 /* look for it with prev pointer recorded */
232 for (; vmem_back; vmem_back = vmem_back->list) {
233 if (vmem_back->virt_addr == start)
234 break;
235 vmem_back_prev = vmem_back;
236 }
237
238 if (unlikely(!vmem_back)) {
239 WARN_ON(1);
240 return 0;
241 }
242
243 /* remove it from vmemmap_list */
244 if (vmem_back == vmemmap_list) /* remove head */
245 vmemmap_list = vmem_back->list;
246 else
247 vmem_back_prev->list = vmem_back->list;
248
249 /* next point to this freed entry */
250 vmem_back->list = next;
251 next = vmem_back;
252 num_freed++;
253
254 return vmem_back->phys;
255}
256
Li Zhong71b0bfe2014-06-11 16:23:38 +0800257void __ref vmemmap_free(unsigned long start, unsigned long end)
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700258{
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000259 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000260 unsigned long page_order = get_order(page_size);
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700261
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700262 start = _ALIGN_DOWN(start, page_size);
263
Li Zhong71b0bfe2014-06-11 16:23:38 +0800264 pr_debug("vmemmap_free %lx...%lx\n", start, end);
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +0000265
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700266 for (; start < end; start += page_size) {
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000267 unsigned long nr_pages, addr;
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000268 struct vmem_altmap *altmap;
269 struct page *section_base;
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000270 struct page *page;
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700271
Li Zhong71b0bfe2014-06-11 16:23:38 +0800272 /*
273 * the section has already be marked as invalid, so
274 * vmemmap_populated() true means some other sections still
275 * in this page, so skip it.
276 */
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700277 if (vmemmap_populated(start, page_size))
278 continue;
279
Li Zhong71b0bfe2014-06-11 16:23:38 +0800280 addr = vmemmap_list_free(start);
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000281 if (!addr)
282 continue;
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700283
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000284 page = pfn_to_page(addr >> PAGE_SHIFT);
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000285 section_base = pfn_to_page(vmemmap_section_start(start));
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000286 nr_pages = 1 << page_order;
Mark Nelson91eea672010-04-21 16:21:03 +0000287
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000288 altmap = to_vmem_altmap((unsigned long) section_base);
289 if (altmap) {
290 vmem_altmap_free(altmap, nr_pages);
291 } else if (PageReserved(page)) {
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000292 /* allocated from bootmem */
293 if (page_size < PAGE_SIZE) {
294 /*
295 * this shouldn't happen, but if it is
296 * the case, leave the memory there
297 */
298 WARN_ON_ONCE(1);
299 } else {
300 while (nr_pages--)
301 free_reserved_page(page++);
302 }
303 } else {
304 free_pages((unsigned long)(__va(addr)), page_order);
Li Zhong71b0bfe2014-06-11 16:23:38 +0800305 }
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000306
307 vmemmap_remove_mapping(start, page_size);
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700308 }
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700309}
Li Zhong71b0bfe2014-06-11 16:23:38 +0800310#endif
Nathan Fontenotf7e33342013-09-27 10:18:09 -0500311void register_page_bootmem_memmap(unsigned long section_nr,
312 struct page *start_page, unsigned long size)
313{
314}
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700315
Alexey Kardashevskiy8e0861fa2013-08-28 18:37:42 +1000316/*
317 * We do not have access to the sparsemem vmemmap, so we fallback to
318 * walking the list of sparsemem blocks which we already maintain for
319 * the sake of crashdump. In the long run, we might want to maintain
320 * a tree if performance of that linear walk becomes a problem.
321 *
322 * realmode_pfn_to_page functions can fail due to:
323 * 1) As real sparsemem blocks do not lay in RAM continously (they
324 * are in virtual address space which is not available in the real mode),
325 * the requested page struct can be split between blocks so get_page/put_page
326 * may fail.
327 * 2) When huge pages are used, the get_page/put_page API will fail
328 * in real mode as the linked addresses in the page struct are virtual
329 * too.
330 */
331struct page *realmode_pfn_to_page(unsigned long pfn)
332{
333 struct vmemmap_backing *vmem_back;
334 struct page *page;
335 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
336 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
337
338 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
339 if (pg_va < vmem_back->virt_addr)
340 continue;
341
Li Zhongbd8cb032014-06-11 16:23:36 +0800342 /* After vmemmap_list entry free is possible, need check all */
343 if ((pg_va + sizeof(struct page)) <=
344 (vmem_back->virt_addr + page_size)) {
345 page = (struct page *) (vmem_back->phys + pg_va -
Alexey Kardashevskiy8e0861fa2013-08-28 18:37:42 +1000346 vmem_back->virt_addr);
Li Zhongbd8cb032014-06-11 16:23:36 +0800347 return page;
348 }
Alexey Kardashevskiy8e0861fa2013-08-28 18:37:42 +1000349 }
350
Li Zhongbd8cb032014-06-11 16:23:36 +0800351 /* Probably that page struct is split between real pages */
Alexey Kardashevskiy8e0861fa2013-08-28 18:37:42 +1000352 return NULL;
353}
354EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
355
Aneesh Kumar K.V7e7dc662017-06-28 11:39:28 +0530356#else
Alexey Kardashevskiy8e0861fa2013-08-28 18:37:42 +1000357
358struct page *realmode_pfn_to_page(unsigned long pfn)
359{
360 struct page *page = pfn_to_page(pfn);
361 return page;
362}
363EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
364
Aneesh Kumar K.V7e7dc662017-06-28 11:39:28 +0530365#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000366
Michael Ellerman4e003742017-10-19 15:08:43 +1100367#ifdef CONFIG_PPC_BOOK3S_64
Michael Ellerman1fd6c022017-10-24 17:48:49 +0200368static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
369
Michael Ellermanc610ec62016-07-26 21:29:30 +1000370static int __init parse_disable_radix(char *p)
371{
Michael Ellerman1fd6c022017-10-24 17:48:49 +0200372 bool val;
373
374 if (strlen(p) == 0)
375 val = true;
376 else if (kstrtobool(p, &val))
377 return -EINVAL;
378
379 disable_radix = val;
380
Michael Ellermanc610ec62016-07-26 21:29:30 +1000381 return 0;
382}
383early_param("disable_radix", parse_disable_radix);
384
Paul Mackerras18569c12017-01-30 21:21:34 +1100385/*
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100386 * If we're running under a hypervisor, we need to check the contents of
387 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
388 * radix. If not, we clear the radix feature bit so we fall back to hash.
Paul Mackerras18569c12017-01-30 21:21:34 +1100389 */
Michael Ellerman75599522017-08-08 21:44:08 +1000390static void __init early_check_vec5(void)
Paul Mackerras18569c12017-01-30 21:21:34 +1100391{
392 unsigned long root, chosen;
393 int size;
394 const u8 *vec5;
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100395 u8 mmu_supported;
Paul Mackerras18569c12017-01-30 21:21:34 +1100396
397 root = of_get_flat_dt_root();
398 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100399 if (chosen == -FDT_ERR_NOTFOUND) {
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100400 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100401 return;
402 }
403 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
404 if (!vec5) {
405 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
406 return;
407 }
408 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
409 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
410 return;
411 }
412
413 /* Check for supported configuration */
414 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
415 OV5_FEAT(OV5_MMU_SUPPORT);
416 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
417 /* Hypervisor only supports radix - check enabled && GTSE */
418 if (!early_radix_enabled()) {
419 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
420 }
421 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
422 OV5_FEAT(OV5_RADIX_GTSE))) {
423 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
424 }
425 /* Do radix anyway - the hypervisor said we had to */
426 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
427 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
428 /* Hypervisor only supports hash - disable radix */
429 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
430 }
Paul Mackerras18569c12017-01-30 21:21:34 +1100431}
432
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000433void __init mmu_early_init_devtree(void)
434{
Michael Ellermanc610ec62016-07-26 21:29:30 +1000435 /* Disable radix mode based on kernel command line. */
Paul Mackerrasfc36a902017-03-21 12:38:02 +1100436 if (disable_radix)
Aneesh Kumar K.V5a25b6f2016-07-27 13:19:01 +1000437 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
Michael Ellermanbacf9cf2016-07-26 21:31:59 +1000438
Paul Mackerras18569c12017-01-30 21:21:34 +1100439 /*
440 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
441 * When running bare-metal, we can use radix if we like
442 * even though the ibm,architecture-vec-5 property created by
443 * skiboot doesn't have the necessary bits set.
444 */
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100445 if (!(mfmsr() & MSR_HV))
Paul Mackerras18569c12017-01-30 21:21:34 +1100446 early_check_vec5();
447
Aneesh Kumar K.Vb8f1b4f2016-07-23 14:42:35 +0530448 if (early_radix_enabled())
Michael Ellerman2537b092016-07-26 21:55:27 +1000449 radix__early_init_devtree();
450 else
Michael Ellermanbacf9cf2016-07-26 21:31:59 +1000451 hash__early_init_devtree();
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000452}
Michael Ellerman4e003742017-10-19 15:08:43 +1100453#endif /* CONFIG_PPC_BOOK3S_64 */