Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 1 | /* |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
| 3 | * Copyright (C) 2008-2009 PetaLogix |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | |
| 11 | #ifndef _ASM_MICROBLAZE_PGALLOC_H |
| 12 | #define _ASM_MICROBLAZE_PGALLOC_H |
| 13 | |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 14 | #ifdef CONFIG_MMU |
| 15 | |
| 16 | #include <linux/kernel.h> /* For min/max macros */ |
| 17 | #include <linux/highmem.h> |
| 18 | #include <asm/setup.h> |
| 19 | #include <asm/io.h> |
| 20 | #include <asm/page.h> |
| 21 | #include <asm/cache.h> |
Michal Simek | 79bf3a1 | 2010-01-20 15:17:08 +0100 | [diff] [blame] | 22 | #include <asm/pgtable.h> |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 23 | |
| 24 | #define PGDIR_ORDER 0 |
| 25 | |
| 26 | /* |
| 27 | * This is handled very differently on MicroBlaze since out page tables |
| 28 | * are all 0's and I want to be able to use these zero'd pages elsewhere |
| 29 | * as well - it gives us quite a speedup. |
| 30 | * -- Cort |
| 31 | */ |
| 32 | extern struct pgtable_cache_struct { |
| 33 | unsigned long *pgd_cache; |
| 34 | unsigned long *pte_cache; |
| 35 | unsigned long pgtable_cache_sz; |
| 36 | } quicklists; |
| 37 | |
| 38 | #define pgd_quicklist (quicklists.pgd_cache) |
| 39 | #define pmd_quicklist ((unsigned long *)0) |
| 40 | #define pte_quicklist (quicklists.pte_cache) |
| 41 | #define pgtable_cache_size (quicklists.pgtable_cache_sz) |
| 42 | |
| 43 | extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */ |
| 44 | extern atomic_t zero_sz; /* # currently pre-zero'd pages */ |
| 45 | extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */ |
| 46 | extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */ |
| 47 | extern atomic_t zerototal; /* # pages zero'd over time */ |
| 48 | |
| 49 | #define zero_quicklist (zero_cache) |
| 50 | #define zero_cache_sz (zero_sz) |
| 51 | #define zero_cache_calls (zeropage_calls) |
| 52 | #define zero_cache_hits (zeropage_hits) |
| 53 | #define zero_cache_total (zerototal) |
| 54 | |
| 55 | /* |
| 56 | * return a pre-zero'd page from the list, |
| 57 | * return NULL if none available -- Cort |
| 58 | */ |
| 59 | extern unsigned long get_zero_page_fast(void); |
| 60 | |
| 61 | extern void __bad_pte(pmd_t *pmd); |
| 62 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 63 | static inline pgd_t *get_pgd_slow(void) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 64 | { |
| 65 | pgd_t *ret; |
| 66 | |
| 67 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER); |
| 68 | if (ret != NULL) |
| 69 | clear_page(ret); |
| 70 | return ret; |
| 71 | } |
| 72 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 73 | static inline pgd_t *get_pgd_fast(void) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 74 | { |
| 75 | unsigned long *ret; |
| 76 | |
| 77 | ret = pgd_quicklist; |
| 78 | if (ret != NULL) { |
| 79 | pgd_quicklist = (unsigned long *)(*ret); |
| 80 | ret[0] = 0; |
| 81 | pgtable_cache_size--; |
| 82 | } else |
| 83 | ret = (unsigned long *)get_pgd_slow(); |
| 84 | return (pgd_t *)ret; |
| 85 | } |
| 86 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 87 | static inline void free_pgd_fast(pgd_t *pgd) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 88 | { |
| 89 | *(unsigned long **)pgd = pgd_quicklist; |
| 90 | pgd_quicklist = (unsigned long *) pgd; |
| 91 | pgtable_cache_size++; |
| 92 | } |
| 93 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 94 | static inline void free_pgd_slow(pgd_t *pgd) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 95 | { |
| 96 | free_page((unsigned long)pgd); |
| 97 | } |
| 98 | |
| 99 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) |
| 100 | #define pgd_alloc(mm) get_pgd_fast() |
| 101 | |
| 102 | #define pmd_pgtable(pmd) pmd_page(pmd) |
| 103 | |
| 104 | /* |
| 105 | * We don't have any real pmd's, and this code never triggers because |
| 106 | * the pgd will always be present.. |
| 107 | */ |
| 108 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) |
| 109 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 110 | |
Michal Simek | 63f1032 | 2010-04-13 08:59:37 +0200 | [diff] [blame] | 111 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 112 | |
| 113 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
| 114 | unsigned long address) |
| 115 | { |
| 116 | struct page *ptepage; |
| 117 | |
| 118 | #ifdef CONFIG_HIGHPTE |
| 119 | int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; |
| 120 | #else |
| 121 | int flags = GFP_KERNEL | __GFP_REPEAT; |
| 122 | #endif |
| 123 | |
| 124 | ptepage = alloc_pages(flags, 0); |
Kirill A. Shutemov | 8abe734 | 2013-11-14 14:31:21 -0800 | [diff] [blame] | 125 | if (!ptepage) |
| 126 | return NULL; |
| 127 | clear_highpage(ptepage); |
| 128 | if (!pgtable_page_ctor(ptepage)) { |
| 129 | __free_page(ptepage); |
| 130 | return NULL; |
| 131 | } |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 132 | return ptepage; |
| 133 | } |
| 134 | |
| 135 | static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, |
| 136 | unsigned long address) |
| 137 | { |
| 138 | unsigned long *ret; |
| 139 | |
| 140 | ret = pte_quicklist; |
| 141 | if (ret != NULL) { |
| 142 | pte_quicklist = (unsigned long *)(*ret); |
| 143 | ret[0] = 0; |
| 144 | pgtable_cache_size--; |
| 145 | } |
| 146 | return (pte_t *)ret; |
| 147 | } |
| 148 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 149 | static inline void pte_free_fast(pte_t *pte) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 150 | { |
| 151 | *(unsigned long **)pte = pte_quicklist; |
| 152 | pte_quicklist = (unsigned long *) pte; |
| 153 | pgtable_cache_size++; |
| 154 | } |
| 155 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 156 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 157 | { |
| 158 | free_page((unsigned long)pte); |
| 159 | } |
| 160 | |
Michal Simek | b6db0a5 | 2014-12-18 15:51:30 +0100 | [diff] [blame] | 161 | static inline void pte_free_slow(struct page *ptepage) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 162 | { |
| 163 | __free_page(ptepage); |
| 164 | } |
| 165 | |
Kirill A. Shutemov | 8abe734 | 2013-11-14 14:31:21 -0800 | [diff] [blame] | 166 | static inline void pte_free(struct mm_struct *mm, struct page *ptepage) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 167 | { |
Kirill A. Shutemov | 8abe734 | 2013-11-14 14:31:21 -0800 | [diff] [blame] | 168 | pgtable_page_dtor(ptepage); |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 169 | __free_page(ptepage); |
| 170 | } |
| 171 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 172 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 173 | |
Michal Simek | 5f0cb3e | 2010-08-16 11:02:18 +0200 | [diff] [blame] | 174 | #define pmd_populate(mm, pmd, pte) \ |
| 175 | (pmd_val(*(pmd)) = (unsigned long)page_address(pte)) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 176 | |
| 177 | #define pmd_populate_kernel(mm, pmd, pte) \ |
| 178 | (pmd_val(*(pmd)) = (unsigned long) (pte)) |
| 179 | |
| 180 | /* |
| 181 | * We don't have any real pmd's, and this code never triggers because |
| 182 | * the pgd will always be present.. |
| 183 | */ |
| 184 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) |
Michal Simek | d4f1827 | 2009-11-23 10:15:00 +0100 | [diff] [blame] | 185 | #define pmd_free(mm, x) do { } while (0) |
| 186 | #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) |
Michal Simek | 1f84e1e | 2009-05-26 16:30:17 +0200 | [diff] [blame] | 187 | #define pgd_populate(mm, pmd, pte) BUG() |
| 188 | |
| 189 | extern int do_check_pgt_cache(int, int); |
| 190 | |
| 191 | #endif /* CONFIG_MMU */ |
| 192 | |
Michal Simek | d4f1827 | 2009-11-23 10:15:00 +0100 | [diff] [blame] | 193 | #define check_pgt_cache() do { } while (0) |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 194 | |
| 195 | #endif /* _ASM_MICROBLAZE_PGALLOC_H */ |