David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PGALLOC_64_H |
| 2 | #define _ASM_POWERPC_PGALLOC_64_H |
| 3 | /* |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/cpumask.h> |
| 13 | #include <linux/percpu.h> |
| 14 | |
| 15 | extern struct kmem_cache *pgtable_cache[]; |
| 16 | |
Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 17 | #define PGD_CACHE_NUM 0 |
| 18 | #define PUD_CACHE_NUM 1 |
| 19 | #define PMD_CACHE_NUM 1 |
| 20 | #define HUGEPTE_CACHE_NUM 2 |
| 21 | #define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */ |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 22 | |
| 23 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 24 | { |
| 25 | return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); |
| 26 | } |
| 27 | |
| 28 | static inline void pgd_free(pgd_t *pgd) |
| 29 | { |
| 30 | kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); |
| 31 | } |
| 32 | |
| 33 | #ifndef CONFIG_PPC_64K_PAGES |
| 34 | |
| 35 | #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) |
| 36 | |
| 37 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 38 | { |
| 39 | return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], |
| 40 | GFP_KERNEL|__GFP_REPEAT); |
| 41 | } |
| 42 | |
| 43 | static inline void pud_free(pud_t *pud) |
| 44 | { |
| 45 | kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); |
| 46 | } |
| 47 | |
| 48 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
| 49 | { |
| 50 | pud_set(pud, (unsigned long)pmd); |
| 51 | } |
| 52 | |
| 53 | #define pmd_populate(mm, pmd, pte_page) \ |
| 54 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) |
| 55 | #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) |
| 56 | |
| 57 | |
| 58 | #else /* CONFIG_PPC_64K_PAGES */ |
| 59 | |
| 60 | #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) |
| 61 | |
| 62 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
| 63 | pte_t *pte) |
| 64 | { |
| 65 | pmd_set(pmd, (unsigned long)pte); |
| 66 | } |
| 67 | |
| 68 | #define pmd_populate(mm, pmd, pte_page) \ |
| 69 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) |
| 70 | |
| 71 | #endif /* CONFIG_PPC_64K_PAGES */ |
| 72 | |
| 73 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 74 | { |
| 75 | return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], |
| 76 | GFP_KERNEL|__GFP_REPEAT); |
| 77 | } |
| 78 | |
| 79 | static inline void pmd_free(pmd_t *pmd) |
| 80 | { |
| 81 | kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); |
| 82 | } |
| 83 | |
| 84 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 85 | unsigned long address) |
| 86 | { |
Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 87 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
| 91 | unsigned long address) |
| 92 | { |
Akinobu Mita | a4c28ab | 2007-05-29 20:46:51 +1000 | [diff] [blame] | 93 | pte_t *pte = pte_alloc_one_kernel(mm, address); |
| 94 | return pte ? virt_to_page(pte) : NULL; |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | static inline void pte_free_kernel(pte_t *pte) |
| 98 | { |
Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 99 | free_page((unsigned long)pte); |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static inline void pte_free(struct page *ptepage) |
| 103 | { |
Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 104 | __free_page(ptepage); |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | #define PGF_CACHENUM_MASK 0x3 |
| 108 | |
| 109 | typedef struct pgtable_free { |
| 110 | unsigned long val; |
| 111 | } pgtable_free_t; |
| 112 | |
| 113 | static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, |
| 114 | unsigned long mask) |
| 115 | { |
| 116 | BUG_ON(cachenum > PGF_CACHENUM_MASK); |
| 117 | |
| 118 | return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; |
| 119 | } |
| 120 | |
| 121 | static inline void pgtable_free(pgtable_free_t pgf) |
| 122 | { |
| 123 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); |
| 124 | int cachenum = pgf.val & PGF_CACHENUM_MASK; |
| 125 | |
Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 126 | if (cachenum == PTE_NONCACHE_NUM) |
| 127 | free_page((unsigned long)p); |
| 128 | else |
| 129 | kmem_cache_free(pgtable_cache[cachenum], p); |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); |
| 133 | |
| 134 | #define __pte_free_tlb(tlb, ptepage) \ |
| 135 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ |
Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 136 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 137 | #define __pmd_free_tlb(tlb, pmd) \ |
| 138 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ |
| 139 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) |
| 140 | #ifndef CONFIG_PPC_64K_PAGES |
| 141 | #define __pud_free_tlb(tlb, pud) \ |
| 142 | pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ |
| 143 | PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) |
| 144 | #endif /* CONFIG_PPC_64K_PAGES */ |
| 145 | |
| 146 | #define check_pgt_cache() do { } while (0) |
| 147 | |
| 148 | #endif /* _ASM_POWERPC_PGALLOC_64_H */ |