blob: ab7d2d996be4d25a352a07488fc1a4aa66a9935a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Paul Mackerras047ea782005-11-19 20:17:32 +11002#ifndef _ASM_POWERPC_PGTABLE_H
3#define _ASM_POWERPC_PGTABLE_H
4
David Gibson9c709f32007-06-13 14:52:56 +10005#ifndef __ASSEMBLY__
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +05306#include <linux/mmdebug.h>
Scott Wood1c980252014-08-08 18:40:42 -05007#include <linux/mmzone.h>
David Gibson9c709f32007-06-13 14:52:56 +10008#include <asm/processor.h> /* For TASK_SIZE */
9#include <asm/mmu.h>
10#include <asm/page.h>
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +000011
David Gibson9c709f32007-06-13 14:52:56 +100012struct mm_struct;
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +000013
David Gibson9c709f32007-06-13 14:52:56 +100014#endif /* !__ASSEMBLY__ */
15
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053016#ifdef CONFIG_PPC_BOOK3S
17#include <asm/book3s/pgtable.h>
18#else
Aneesh Kumar K.V17ed9e32015-12-01 09:06:38 +053019#include <asm/nohash/pgtable.h>
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053020#endif /* !CONFIG_PPC_BOOK3S */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#ifndef __ASSEMBLY__
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +000023
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000024#include <asm/tlbflush.h>
25
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000026/* Keep these as a macros to avoid include dependency mess */
27#define pte_page(x) pfn_to_page(pte_pfn(x))
28#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
29
David Gibson9c709f32007-06-13 14:52:56 +100030/*
31 * ZERO_PAGE is a global shared page that is always zero: used
32 * for zero-mapped memory areas etc..
33 */
34extern unsigned long empty_zero_page[];
35#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
36
37extern pgd_t swapper_pg_dir[];
38
Scott Wood1c980252014-08-08 18:40:42 -050039void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
40int dma_pfn_limit_to_zone(u64 pfn_limit);
David Gibson9c709f32007-06-13 14:52:56 +100041extern void paging_init(void);
42
43/*
44 * kern_addr_valid is intended to indicate whether an address is a valid
45 * kernel address. Most 32-bit archs define it as always true (like this)
46 * but most 64-bit archs actually perform a test. What should we do here?
47 */
48#define kern_addr_valid(addr) (1)
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm-generic/pgtable.h>
Benjamin Herrenschmidt1e3519f2008-07-25 16:21:11 +100051
52
53/*
54 * This gets called at the end of handling a page fault, when
55 * the kernel has put a new PTE into the page table for the process.
56 * We use it to ensure coherency between the i-cache and d-cache
57 * for the page which has just been mapped in.
58 * On machines which use an MMU hash table, we use this to put a
59 * corresponding HPTE into the hash table ahead of time, instead of
60 * waiting for the inevitable extra hash-table miss exception.
61 */
Russell King4b3073e2009-12-18 16:40:18 +000062extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
Benjamin Herrenschmidt1e3519f2008-07-25 16:21:11 +100063
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +000064extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053065 unsigned long end, int write,
66 struct page **pages, int *nr);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +053067#ifndef CONFIG_TRANSPARENT_HUGEPAGE
68#define pmd_large(pmd) 0
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +053069#endif
Alexey Kardashevskiye9ab1a12016-02-15 12:55:03 +110070
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053071/* can we use this in kvm */
Alexey Kardashevskiye9ab1a12016-02-15 12:55:03 +110072unsigned long vmalloc_to_phys(void *vmalloc_addr);
73
Christophe Leroy9b081e12016-12-07 08:47:24 +010074void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
75void pgtable_cache_init(void);
Michael Ellerman029d9252017-07-14 16:51:23 +100076
Christophe Leroy3184cc42017-08-02 15:51:03 +020077#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
Michael Ellerman029d9252017-07-14 16:51:23 +100078void mark_initmem_nx(void);
79#else
80static inline void mark_initmem_nx(void) { }
81#endif
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#endif /* __ASSEMBLY__ */
84
Paul Mackerras047ea782005-11-19 20:17:32 +110085#endif /* _ASM_POWERPC_PGTABLE_H */