Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * vineetg: June 2011 |
| 9 | * -"/proc/meminfo | grep PageTables" kept on increasing |
| 10 | * Recently added pgtable dtor was not getting called. |
| 11 | * |
| 12 | * vineetg: May 2011 |
| 13 | * -Variable pg-sz means that Page Tables could be variable sized themselves |
| 14 | * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx] |
| 15 | * -Page Table size capped to max 1 to save memory - hence verified. |
| 16 | * -Since these deal with constants, gcc compile-time optimizes them. |
| 17 | * |
| 18 | * vineetg: Nov 2010 |
| 19 | * -Added pgtable ctor/dtor used for pgtable mem accounting |
| 20 | * |
| 21 | * vineetg: April 2010 |
| 22 | * -Switched pgtable_t from being struct page * to unsigned long |
| 23 | * =Needed so that Page Table allocator (pte_alloc_one) is not forced to |
| 24 | * to deal with struct page. Thay way in future we can make it allocate |
| 25 | * multiple PG Tbls in one Page Frame |
| 26 | * =sweet side effect is avoiding calls to ugly page_address( ) from the |
| 27 | * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate |
| 28 | * |
| 29 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 |
| 30 | */ |
| 31 | |
| 32 | #ifndef _ASM_ARC_PGALLOC_H |
| 33 | #define _ASM_ARC_PGALLOC_H |
| 34 | |
| 35 | #include <linux/mm.h> |
| 36 | #include <linux/log2.h> |
| 37 | |
| 38 | static inline void |
| 39 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
| 40 | { |
| 41 | pmd_set(pmd, pte); |
| 42 | } |
| 43 | |
| 44 | static inline void |
| 45 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) |
| 46 | { |
| 47 | pmd_set(pmd, (pte_t *) ptep); |
| 48 | } |
| 49 | |
| 50 | static inline int __get_order_pgd(void) |
| 51 | { |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 52 | return get_order(PTRS_PER_PGD * sizeof(pgd_t)); |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 56 | { |
| 57 | int num, num2; |
| 58 | pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd()); |
| 59 | |
| 60 | if (ret) { |
| 61 | num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE; |
| 62 | memzero(ret, num * sizeof(pgd_t)); |
| 63 | |
| 64 | num2 = VMALLOC_SIZE / PGDIR_SIZE; |
| 65 | memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t)); |
| 66 | |
| 67 | memzero(ret + num + num2, |
| 68 | (PTRS_PER_PGD - num - num2) * sizeof(pgd_t)); |
| 69 | |
| 70 | } |
| 71 | return ret; |
| 72 | } |
| 73 | |
| 74 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 75 | { |
| 76 | free_pages((unsigned long)pgd, __get_order_pgd()); |
| 77 | } |
| 78 | |
| 79 | |
| 80 | /* |
| 81 | * With software-only page-tables, addr-split for traversal is tweakable and |
| 82 | * that directly governs how big tables would be at each level. |
| 83 | * Further, the MMU page size is configurable. |
| 84 | * Thus we need to programatically assert the size constraint |
| 85 | * All of this is const math, allowing gcc to do constant folding/propagation. |
| 86 | */ |
| 87 | |
| 88 | static inline int __get_order_pte(void) |
| 89 | { |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 90 | return get_order(PTRS_PER_PTE * sizeof(pte_t)); |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 94 | unsigned long address) |
| 95 | { |
| 96 | pte_t *pte; |
| 97 | |
| 98 | pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, |
| 99 | __get_order_pte()); |
| 100 | |
| 101 | return pte; |
| 102 | } |
| 103 | |
| 104 | static inline pgtable_t |
| 105 | pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 106 | { |
| 107 | pgtable_t pte_pg; |
Kirill A. Shutemov | ca6ec3b | 2013-11-14 14:31:25 -0800 | [diff] [blame] | 108 | struct page *page; |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 109 | |
Vineet Gupta | e8a7596 | 2015-08-28 08:39:57 +0530 | [diff] [blame] | 110 | pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); |
Kirill A. Shutemov | ca6ec3b | 2013-11-14 14:31:25 -0800 | [diff] [blame] | 111 | if (!pte_pg) |
| 112 | return 0; |
Vineet Gupta | 5a364c2 | 2015-02-06 18:44:57 +0300 | [diff] [blame] | 113 | memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); |
Kirill A. Shutemov | ca6ec3b | 2013-11-14 14:31:25 -0800 | [diff] [blame] | 114 | page = virt_to_page(pte_pg); |
| 115 | if (!pgtable_page_ctor(page)) { |
| 116 | __free_page(page); |
| 117 | return 0; |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | return pte_pg; |
| 121 | } |
| 122 | |
| 123 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
| 124 | { |
| 125 | free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */ |
| 126 | } |
| 127 | |
| 128 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) |
| 129 | { |
| 130 | pgtable_page_dtor(virt_to_page(ptep)); |
Vineet Gupta | e8a7596 | 2015-08-28 08:39:57 +0530 | [diff] [blame] | 131 | free_pages((unsigned long)ptep, __get_order_pte()); |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) |
| 135 | |
| 136 | #define check_pgt_cache() do { } while (0) |
Vineet Gupta | e8a7596 | 2015-08-28 08:39:57 +0530 | [diff] [blame] | 137 | #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) |
Vineet Gupta | 5dda4dc | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 138 | |
| 139 | #endif /* _ASM_ARC_PGALLOC_H */ |