Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_PGALLOC_H |
| 2 | #define __ASM_SH_PGALLOC_H |
| 3 | |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 4 | #include <linux/quicklist.h> |
| 5 | #include <asm/page.h> |
| 6 | |
| 7 | #define QUICK_PGD 0 /* We preserve special mappings over free */ |
| 8 | #define QUICK_PT 1 /* Other page table pages that are zero on free */ |
| 9 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 10 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
| 11 | pte_t *pte) |
| 12 | { |
| 13 | set_pmd(pmd, __pmd((unsigned long)pte)); |
| 14 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 17 | pgtable_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | { |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 19 | set_pmd(pmd, __pmd((unsigned long)page_address(pte))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | } |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 21 | #define pmd_pgtable(pmd) pmd_page(pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 23 | static inline void pgd_ctor(void *x) |
| 24 | { |
| 25 | pgd_t *pgd = x; |
| 26 | |
| 27 | memcpy(pgd + USER_PTRS_PER_PGD, |
| 28 | swapper_pg_dir + USER_PTRS_PER_PGD, |
| 29 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| 30 | } |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* |
| 33 | * Allocate and free page tables. |
| 34 | */ |
| 35 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 36 | { |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 37 | return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | } |
| 39 | |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 40 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | { |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 42 | quicklist_free(QUICK_PGD, NULL, pgd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 46 | unsigned long address) |
| 47 | { |
Paul Mundt | 1039b9a | 2007-05-08 12:07:21 +0900 | [diff] [blame] | 48 | return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | } |
| 50 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 51 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| 52 | unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | { |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 54 | struct page *page; |
| 55 | void *pg; |
| 56 | |
| 57 | pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); |
| 58 | if (!pg) |
| 59 | return NULL; |
| 60 | page = virt_to_page(pg); |
| 61 | pgtable_page_ctor(page); |
| 62 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | } |
| 64 | |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 65 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | { |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 67 | quicklist_free(QUICK_PT, NULL, pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 70 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | { |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 72 | pgtable_page_dtor(pte); |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 73 | quicklist_free_page(QUICK_PT, NULL, pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | } |
| 75 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 76 | #define __pte_free_tlb(tlb,pte) \ |
| 77 | do { \ |
| 78 | pgtable_page_dtor(pte); \ |
| 79 | tlb_remove_page((tlb), (pte)); \ |
| 80 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * allocating and freeing a pmd is trivial: the 1-entry pmd is |
| 84 | * inside the pgd, so has no extra memory associated with it. |
| 85 | */ |
| 86 | |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 87 | #define pmd_free(mm, x) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #define __pmd_free_tlb(tlb,x) do { } while (0) |
Paul Mundt | 5f8c990 | 2007-05-08 11:55:21 +0900 | [diff] [blame] | 89 | |
| 90 | static inline void check_pgt_cache(void) |
| 91 | { |
| 92 | quicklist_trim(QUICK_PGD, NULL, 25, 16); |
| 93 | quicklist_trim(QUICK_PT, NULL, 25, 16); |
| 94 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #endif /* __ASM_SH_PGALLOC_H */ |