Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_PGALLOC_H |
| 2 | #define __ASM_SH_PGALLOC_H |
| 3 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 4 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
| 5 | pte_t *pte) |
| 6 | { |
| 7 | set_pmd(pmd, __pmd((unsigned long)pte)); |
| 8 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
| 11 | struct page *pte) |
| 12 | { |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 13 | set_pmd(pmd, __pmd((unsigned long)page_address(pte))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | } |
| 15 | |
| 16 | /* |
| 17 | * Allocate and free page tables. |
| 18 | */ |
| 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 20 | { |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 21 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); |
| 22 | |
| 23 | if (pgd) { |
| 24 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
| 25 | memcpy(pgd + USER_PTRS_PER_PGD, |
| 26 | swapper_pg_dir + USER_PTRS_PER_PGD, |
| 27 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| 28 | } |
| 29 | |
| 30 | return pgd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | static inline void pgd_free(pgd_t *pgd) |
| 34 | { |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 35 | free_page((unsigned long)pgd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 39 | unsigned long address) |
| 40 | { |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 41 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
| 45 | unsigned long address) |
| 46 | { |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 47 | return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static inline void pte_free_kernel(pte_t *pte) |
| 51 | { |
| 52 | free_page((unsigned long)pte); |
| 53 | } |
| 54 | |
| 55 | static inline void pte_free(struct page *pte) |
| 56 | { |
| 57 | __free_page(pte); |
| 58 | } |
| 59 | |
| 60 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) |
| 61 | |
| 62 | /* |
| 63 | * allocating and freeing a pmd is trivial: the 1-entry pmd is |
| 64 | * inside the pgd, so has no extra memory associated with it. |
| 65 | */ |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #define pmd_free(x) do { } while (0) |
| 68 | #define __pmd_free_tlb(tlb,x) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define check_pgt_cache() do { } while (0) |
| 70 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #endif /* __ASM_SH_PGALLOC_H */ |