Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */ |
| 2 | #ifndef _SPARC64_PGALLOC_H |
| 3 | #define _SPARC64_PGALLOC_H |
| 4 | |
| 5 | #include <linux/config.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/mm.h> |
| 9 | |
| 10 | #include <asm/spitfire.h> |
| 11 | #include <asm/cpudata.h> |
| 12 | #include <asm/cacheflush.h> |
David S. Miller | 6a9b490 | 2005-09-19 20:11:57 -0700 | [diff] [blame] | 13 | #include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | /* Page table allocation/freeing. */ |
| 16 | #ifdef CONFIG_SMP |
| 17 | /* Sliiiicck */ |
| 18 | #define pgt_quicklists local_cpu_data() |
| 19 | #else |
| 20 | extern struct pgtable_cache_struct { |
| 21 | unsigned long *pgd_cache; |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 22 | unsigned long *pte_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | unsigned int pgcache_size; |
| 24 | } pgt_quicklists; |
| 25 | #endif |
| 26 | #define pgd_quicklist (pgt_quicklists.pgd_cache) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #define pte_quicklist (pgt_quicklists.pte_cache) |
| 28 | #define pgtable_cache_size (pgt_quicklists.pgcache_size) |
| 29 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 30 | static inline void free_pgd_fast(pgd_t *pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | { |
| 32 | preempt_disable(); |
| 33 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; |
| 34 | pgd_quicklist = (unsigned long *) pgd; |
| 35 | pgtable_cache_size++; |
| 36 | preempt_enable(); |
| 37 | } |
| 38 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 39 | static inline pgd_t *get_pgd_fast(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | { |
| 41 | unsigned long *ret; |
| 42 | |
| 43 | preempt_disable(); |
| 44 | if((ret = pgd_quicklist) != NULL) { |
| 45 | pgd_quicklist = (unsigned long *)(*ret); |
| 46 | ret[0] = 0; |
| 47 | pgtable_cache_size--; |
| 48 | preempt_enable(); |
| 49 | } else { |
| 50 | preempt_enable(); |
| 51 | ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); |
| 52 | if(ret) |
| 53 | memset(ret, 0, PAGE_SIZE); |
| 54 | } |
| 55 | return (pgd_t *)ret; |
| 56 | } |
| 57 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 58 | static inline void free_pgd_slow(pgd_t *pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { |
| 60 | free_page((unsigned long)pgd); |
| 61 | } |
| 62 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
| 64 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 65 | static inline pmd_t *pmd_alloc_one_fast(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | { |
| 67 | unsigned long *ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
| 69 | preempt_disable(); |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 70 | ret = (unsigned long *) pte_quicklist; |
| 71 | if (likely(ret)) { |
| 72 | pte_quicklist = (unsigned long *)(*ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | ret[0] = 0; |
| 74 | pgtable_cache_size--; |
| 75 | } |
| 76 | preempt_enable(); |
| 77 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 78 | return (pmd_t *) ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } |
| 80 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | { |
| 83 | pmd_t *pmd; |
| 84 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 85 | pmd = pmd_alloc_one_fast(); |
| 86 | if (unlikely(!pmd)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); |
| 88 | if (pmd) |
| 89 | memset(pmd, 0, PAGE_SIZE); |
| 90 | } |
| 91 | return pmd; |
| 92 | } |
| 93 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 94 | static inline void free_pmd_fast(pmd_t *pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | preempt_disable(); |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 97 | *(unsigned long *)pmd = (unsigned long) pte_quicklist; |
| 98 | pte_quicklist = (unsigned long *) pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | pgtable_cache_size++; |
| 100 | preempt_enable(); |
| 101 | } |
| 102 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 103 | static inline void free_pmd_slow(pmd_t *pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | { |
| 105 | free_page((unsigned long)pmd); |
| 106 | } |
| 107 | |
| 108 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) |
| 109 | #define pmd_populate(MM,PMD,PTE_PAGE) \ |
| 110 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) |
| 111 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 112 | static inline pte_t *pte_alloc_one_fast(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | unsigned long *ret; |
| 115 | |
| 116 | preempt_disable(); |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 117 | ret = (unsigned long *) pte_quicklist; |
| 118 | if (likely(ret)) { |
| 119 | pte_quicklist = (unsigned long *)(*ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | ret[0] = 0; |
| 121 | pgtable_cache_size--; |
| 122 | } |
| 123 | preempt_enable(); |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 124 | |
| 125 | return (pte_t *) ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } |
| 127 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 128 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | { |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 130 | pte_t *ptep = pte_alloc_one_fast(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 132 | if (likely(ptep)) |
| 133 | return ptep; |
| 134 | |
| 135 | return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
| 136 | } |
| 137 | |
| 138 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 139 | { |
| 140 | pte_t *pte = pte_alloc_one_fast(); |
| 141 | |
| 142 | if (likely(pte)) |
| 143 | return virt_to_page(pte); |
| 144 | |
| 145 | return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); |
| 146 | } |
| 147 | |
| 148 | static inline void free_pte_fast(pte_t *pte) |
| 149 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | preempt_disable(); |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 151 | *(unsigned long *)pte = (unsigned long) pte_quicklist; |
| 152 | pte_quicklist = (unsigned long *) pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | pgtable_cache_size++; |
| 154 | preempt_enable(); |
| 155 | } |
| 156 | |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 157 | static inline void free_pte_slow(pte_t *pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | { |
David S. Miller | 05e28f9 | 2006-01-31 18:30:13 -0800 | [diff] [blame^] | 159 | free_page((unsigned long) pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | static inline void pte_free_kernel(pte_t *pte) |
| 163 | { |
| 164 | free_pte_fast(pte); |
| 165 | } |
| 166 | |
| 167 | static inline void pte_free(struct page *ptepage) |
| 168 | { |
| 169 | free_pte_fast(page_address(ptepage)); |
| 170 | } |
| 171 | |
| 172 | #define pmd_free(pmd) free_pmd_fast(pmd) |
| 173 | #define pgd_free(pgd) free_pgd_fast(pgd) |
| 174 | #define pgd_alloc(mm) get_pgd_fast() |
| 175 | |
| 176 | #endif /* _SPARC64_PGALLOC_H */ |