Aneesh Kumar K.V | 75a9b8a | 2016-04-29 23:26:14 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_BOOK3S_32_PGALLOC_H |
| 2 | #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H |
Aneesh Kumar K.V | 101ad5c | 2016-04-29 23:26:13 +1000 | [diff] [blame] | 3 | |
| 4 | #include <linux/threads.h> |
Christophe Leroy | 9b081e1 | 2016-12-07 08:47:24 +0100 | [diff] [blame] | 5 | #include <linux/slab.h> |
Aneesh Kumar K.V | 101ad5c | 2016-04-29 23:26:13 +1000 | [diff] [blame] | 6 | |
Christophe Leroy | 9b081e1 | 2016-12-07 08:47:24 +0100 | [diff] [blame] | 7 | /* |
| 8 | * Functions that deal with pagetables that could be at any level of |
| 9 | * the table need to be passed an "index_size" so they know how to |
| 10 | * handle allocation. For PTE pages (which are linked to a struct |
| 11 | * page for now, and drawn from the main get_free_pages() pool), the |
| 12 | * allocation size will be (2^index_size * sizeof(pointer)) and |
| 13 | * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). |
| 14 | * |
| 15 | * The maximum index size needs to be big enough to allow any |
| 16 | * pagetable sizes we need, but small enough to fit in the low bits of |
| 17 | * any page table pointer. In other words all pagetables, even tiny |
| 18 | * ones, must be aligned to allow at least enough low 0 bits to |
| 19 | * contain this value. This value is also used as a mask, so it must |
| 20 | * be one less than a power of two. |
| 21 | */ |
| 22 | #define MAX_PGTABLE_INDEX_SIZE 0xf |
Aneesh Kumar K.V | 101ad5c | 2016-04-29 23:26:13 +1000 | [diff] [blame] | 23 | |
| 24 | extern void __bad_pte(pmd_t *pmd); |
| 25 | |
Christophe Leroy | 9b081e1 | 2016-12-07 08:47:24 +0100 | [diff] [blame] | 26 | extern struct kmem_cache *pgtable_cache[]; |
| 27 | #define PGT_CACHE(shift) ({ \ |
| 28 | BUG_ON(!(shift)); \ |
| 29 | pgtable_cache[(shift) - 1]; \ |
| 30 | }) |
| 31 | |
| 32 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 33 | { |
| 34 | return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); |
| 35 | } |
| 36 | |
| 37 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 38 | { |
| 39 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); |
| 40 | } |
Aneesh Kumar K.V | 101ad5c | 2016-04-29 23:26:13 +1000 | [diff] [blame] | 41 | |
| 42 | /* |
| 43 | * We don't have any real pmd's, and this code never triggers because |
| 44 | * the pgd will always be present.. |
| 45 | */ |
| 46 | /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ |
| 47 | #define pmd_free(mm, x) do { } while (0) |
| 48 | #define __pmd_free_tlb(tlb,x,a) do { } while (0) |
| 49 | /* #define pgd_populate(mm, pmd, pte) BUG() */ |
| 50 | |
| 51 | #ifndef CONFIG_BOOKE |
| 52 | |
| 53 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, |
| 54 | pte_t *pte) |
| 55 | { |
| 56 | *pmdp = __pmd(__pa(pte) | _PMD_PRESENT); |
| 57 | } |
| 58 | |
| 59 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, |
| 60 | pgtable_t pte_page) |
| 61 | { |
| 62 | *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); |
| 63 | } |
| 64 | |
| 65 | #define pmd_pgtable(pmd) pmd_page(pmd) |
| 66 | #else |
| 67 | |
| 68 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, |
| 69 | pte_t *pte) |
| 70 | { |
| 71 | *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); |
| 72 | } |
| 73 | |
| 74 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, |
| 75 | pgtable_t pte_page) |
| 76 | { |
| 77 | *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); |
| 78 | } |
| 79 | |
| 80 | #define pmd_pgtable(pmd) pmd_page(pmd) |
| 81 | #endif |
| 82 | |
| 83 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); |
| 84 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); |
| 85 | |
| 86 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
| 87 | { |
| 88 | free_page((unsigned long)pte); |
| 89 | } |
| 90 | |
| 91 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) |
| 92 | { |
| 93 | pgtable_page_dtor(ptepage); |
| 94 | __free_page(ptepage); |
| 95 | } |
| 96 | |
| 97 | static inline void pgtable_free(void *table, unsigned index_size) |
| 98 | { |
Christophe Leroy | 9b081e1 | 2016-12-07 08:47:24 +0100 | [diff] [blame] | 99 | if (!index_size) { |
| 100 | free_page((unsigned long)table); |
| 101 | } else { |
| 102 | BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); |
| 103 | kmem_cache_free(PGT_CACHE(index_size), table); |
| 104 | } |
Aneesh Kumar K.V | 101ad5c | 2016-04-29 23:26:13 +1000 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | #define check_pgt_cache() do { } while (0) |
| 108 | |
| 109 | #ifdef CONFIG_SMP |
| 110 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, |
| 111 | void *table, int shift) |
| 112 | { |
| 113 | unsigned long pgf = (unsigned long)table; |
| 114 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 115 | pgf |= shift; |
| 116 | tlb_remove_table(tlb, (void *)pgf); |
| 117 | } |
| 118 | |
| 119 | static inline void __tlb_remove_table(void *_table) |
| 120 | { |
| 121 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); |
| 122 | unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
| 123 | |
| 124 | pgtable_free(table, shift); |
| 125 | } |
| 126 | #else |
| 127 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, |
| 128 | void *table, int shift) |
| 129 | { |
| 130 | pgtable_free(table, shift); |
| 131 | } |
| 132 | #endif |
| 133 | |
| 134 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
| 135 | unsigned long address) |
| 136 | { |
Aneesh Kumar K.V | 101ad5c | 2016-04-29 23:26:13 +1000 | [diff] [blame] | 137 | pgtable_page_dtor(table); |
| 138 | pgtable_free_tlb(tlb, page_address(table), 0); |
| 139 | } |
Aneesh Kumar K.V | 75a9b8a | 2016-04-29 23:26:14 +1000 | [diff] [blame] | 140 | #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ |