Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PGTABLE_H |
| 2 | #define _ASM_POWERPC_PGTABLE_H |
| 3 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 4 | #ifndef __ASSEMBLY__ |
Aneesh Kumar K.V | c34a51c | 2013-11-18 14:58:13 +0530 | [diff] [blame] | 5 | #include <linux/mmdebug.h> |
Scott Wood | 1c98025 | 2014-08-08 18:40:42 -0500 | [diff] [blame] | 6 | #include <linux/mmzone.h> |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 7 | #include <asm/processor.h> /* For TASK_SIZE */ |
| 8 | #include <asm/mmu.h> |
| 9 | #include <asm/page.h> |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 10 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 11 | struct mm_struct; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 12 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 13 | #endif /* !__ASSEMBLY__ */ |
| 14 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 15 | #ifdef CONFIG_PPC_BOOK3S |
| 16 | #include <asm/book3s/pgtable.h> |
| 17 | #else |
Aneesh Kumar K.V | 17ed9e3 | 2015-12-01 09:06:38 +0530 | [diff] [blame] | 18 | #include <asm/nohash/pgtable.h> |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 19 | #endif /* !CONFIG_PPC_BOOK3S */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
| 21 | #ifndef __ASSEMBLY__ |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 22 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 23 | #include <asm/tlbflush.h> |
| 24 | |
Benjamin Herrenschmidt | 7108700 | 2009-03-19 19:34:09 +0000 | [diff] [blame] | 25 | /* Keep these as a macros to avoid include dependency mess */ |
| 26 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 27 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 28 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 29 | /* |
| 30 | * ZERO_PAGE is a global shared page that is always zero: used |
| 31 | * for zero-mapped memory areas etc.. |
| 32 | */ |
| 33 | extern unsigned long empty_zero_page[]; |
| 34 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 35 | |
| 36 | extern pgd_t swapper_pg_dir[]; |
| 37 | |
Scott Wood | 1c98025 | 2014-08-08 18:40:42 -0500 | [diff] [blame] | 38 | void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn); |
| 39 | int dma_pfn_limit_to_zone(u64 pfn_limit); |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 40 | extern void paging_init(void); |
| 41 | |
| 42 | /* |
| 43 | * kern_addr_valid is intended to indicate whether an address is a valid |
| 44 | * kernel address. Most 32-bit archs define it as always true (like this) |
| 45 | * but most 64-bit archs actually perform a test. What should we do here? |
| 46 | */ |
| 47 | #define kern_addr_valid(addr) (1) |
| 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <asm-generic/pgtable.h> |
Benjamin Herrenschmidt | 1e3519f | 2008-07-25 16:21:11 +1000 | [diff] [blame] | 50 | |
| 51 | |
| 52 | /* |
| 53 | * This gets called at the end of handling a page fault, when |
| 54 | * the kernel has put a new PTE into the page table for the process. |
| 55 | * We use it to ensure coherency between the i-cache and d-cache |
| 56 | * for the page which has just been mapped in. |
| 57 | * On machines which use an MMU hash table, we use this to put a |
| 58 | * corresponding HPTE into the hash table ahead of time, instead of |
| 59 | * waiting for the inevitable extra hash-table miss exception. |
| 60 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 61 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
Benjamin Herrenschmidt | 1e3519f | 2008-07-25 16:21:11 +1000 | [diff] [blame] | 62 | |
Aneesh Kumar K.V | e2b3d20 | 2013-04-28 09:37:30 +0000 | [diff] [blame] | 63 | extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 64 | unsigned long end, int write, |
| 65 | struct page **pages, int *nr); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 66 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
| 67 | #define pmd_large(pmd) 0 |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 68 | #endif |
Aneesh Kumar K.V | 691e95f | 2015-03-30 10:41:03 +0530 | [diff] [blame] | 69 | pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, |
Aneesh Kumar K.V | 891121e | 2015-10-09 08:32:21 +0530 | [diff] [blame] | 70 | bool *is_thp, unsigned *shift); |
Aneesh Kumar K.V | 691e95f | 2015-03-30 10:41:03 +0530 | [diff] [blame] | 71 | static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, |
Aneesh Kumar K.V | 891121e | 2015-10-09 08:32:21 +0530 | [diff] [blame] | 72 | bool *is_thp, unsigned *shift) |
Aneesh Kumar K.V | 691e95f | 2015-03-30 10:41:03 +0530 | [diff] [blame] | 73 | { |
Aneesh Kumar K.V | 9af3f56 | 2016-07-26 15:22:39 -0700 | [diff] [blame] | 74 | VM_WARN(!arch_irqs_disabled(), |
| 75 | "%s called with irq enabled\n", __func__); |
Aneesh Kumar K.V | 891121e | 2015-10-09 08:32:21 +0530 | [diff] [blame] | 76 | return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); |
Aneesh Kumar K.V | 691e95f | 2015-03-30 10:41:03 +0530 | [diff] [blame] | 77 | } |
Alexey Kardashevskiy | e9ab1a1 | 2016-02-15 12:55:03 +1100 | [diff] [blame] | 78 | |
| 79 | unsigned long vmalloc_to_phys(void *vmalloc_addr); |
| 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | #endif /* __ASSEMBLY__ */ |
| 82 | |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 83 | #endif /* _ASM_POWERPC_PGTABLE_H */ |