Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 1 | #ifndef _SPARC64_PGALLOC_H |
| 2 | #define _SPARC64_PGALLOC_H |
| 3 | |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/slab.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 8 | |
| 9 | #include <asm/spitfire.h> |
| 10 | #include <asm/cpudata.h> |
| 11 | #include <asm/cacheflush.h> |
| 12 | #include <asm/page.h> |
| 13 | |
| 14 | /* Page table allocation/freeing. */ |
| 15 | |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 16 | extern struct kmem_cache *pgtable_cache; |
| 17 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 18 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 19 | { |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 20 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 21 | } |
| 22 | |
| 23 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 24 | { |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 25 | kmem_cache_free(pgtable_cache, pgd); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
| 29 | |
| 30 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 31 | { |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 32 | return kmem_cache_alloc(pgtable_cache, |
| 33 | GFP_KERNEL|__GFP_REPEAT); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 34 | } |
| 35 | |
| 36 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
| 37 | { |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 38 | kmem_cache_free(pgtable_cache, pmd); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 42 | unsigned long address) |
| 43 | { |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 44 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| 48 | unsigned long address) |
| 49 | { |
| 50 | struct page *page; |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 51 | pte_t *pte; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 52 | |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 53 | pte = pte_alloc_one_kernel(mm, address); |
| 54 | if (!pte) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 55 | return NULL; |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 56 | page = virt_to_page(pte); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 57 | pgtable_page_ctor(page); |
| 58 | return page; |
| 59 | } |
| 60 | |
| 61 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
| 62 | { |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 63 | free_page((unsigned long)pte); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) |
| 67 | { |
| 68 | pgtable_page_dtor(ptepage); |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 69 | __free_page(ptepage); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 70 | } |
| 71 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 72 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) |
| 73 | #define pmd_populate(MM,PMD,PTE_PAGE) \ |
| 74 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) |
| 75 | #define pmd_pgtable(pmd) pmd_page(pmd) |
| 76 | |
David S. Miller | 4dedbf8 | 2011-07-25 17:12:20 -0700 | [diff] [blame] | 77 | #define check_pgt_cache() do { } while (0) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 78 | |
David S. Miller | 4a0100f | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 79 | static inline void pgtable_free(void *table, bool is_page) |
| 80 | { |
| 81 | if (is_page) |
| 82 | free_page((unsigned long)table); |
| 83 | else |
| 84 | kmem_cache_free(pgtable_cache, table); |
| 85 | } |
| 86 | |
| 87 | #ifdef CONFIG_SMP |
| 88 | |
| 89 | struct mmu_gather; |
| 90 | extern void tlb_remove_table(struct mmu_gather *, void *); |
| 91 | |
| 92 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) |
| 93 | { |
| 94 | unsigned long pgf = (unsigned long)table; |
| 95 | if (is_page) |
| 96 | pgf |= 0x1UL; |
| 97 | tlb_remove_table(tlb, (void *)pgf); |
| 98 | } |
| 99 | |
| 100 | static inline void __tlb_remove_table(void *_table) |
| 101 | { |
| 102 | void *table = (void *)((unsigned long)_table & ~0x1UL); |
| 103 | bool is_page = false; |
| 104 | |
| 105 | if ((unsigned long)_table & 0x1UL) |
| 106 | is_page = true; |
| 107 | pgtable_free(table, is_page); |
| 108 | } |
| 109 | #else /* CONFIG_SMP */ |
| 110 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) |
| 111 | { |
| 112 | pgtable_free(table, is_page); |
| 113 | } |
| 114 | #endif /* !CONFIG_SMP */ |
| 115 | |
| 116 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, |
| 117 | unsigned long address) |
| 118 | { |
| 119 | pgtable_page_dtor(ptepage); |
| 120 | pgtable_free_tlb(tlb, page_address(ptepage), true); |
| 121 | } |
| 122 | |
| 123 | #define __pmd_free_tlb(tlb, pmd, addr) \ |
| 124 | pgtable_free_tlb(tlb, pmd, false) |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 125 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 126 | #endif /* _SPARC64_PGALLOC_H */ |