Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1 | /* |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 2 | * Copyright IBM Corp. 2006 |
| 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/bootmem.h> |
| 7 | #include <linux/pfn.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/list.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 11 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 13 | #include <linux/memblock.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 14 | #include <asm/pgalloc.h> |
| 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/setup.h> |
| 17 | #include <asm/tlbflush.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 18 | #include <asm/sections.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 19 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 20 | static DEFINE_MUTEX(vmem_mutex); |
| 21 | |
| 22 | struct memory_segment { |
| 23 | struct list_head list; |
| 24 | unsigned long start; |
| 25 | unsigned long size; |
| 26 | }; |
| 27 | |
| 28 | static LIST_HEAD(mem_segs); |
| 29 | |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
| 31 | { |
| 32 | if (slab_is_available()) |
| 33 | return (void *)__get_free_pages(GFP_KERNEL, order); |
| 34 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); |
| 35 | } |
| 36 | |
| 37 | static inline pud_t *vmem_pud_alloc(void) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 38 | { |
| 39 | pud_t *pud = NULL; |
| 40 | |
| 41 | #ifdef CONFIG_64BIT |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 42 | pud = vmem_alloc_pages(2); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 43 | if (!pud) |
| 44 | return NULL; |
Heiko Carstens | 8fc6365 | 2008-04-30 13:38:44 +0200 | [diff] [blame] | 45 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 46 | #endif |
| 47 | return pud; |
| 48 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 49 | |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 50 | static inline pmd_t *vmem_pmd_alloc(void) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 51 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 52 | pmd_t *pmd = NULL; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 53 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 54 | #ifdef CONFIG_64BIT |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 55 | pmd = vmem_alloc_pages(2); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 56 | if (!pmd) |
| 57 | return NULL; |
Heiko Carstens | 8fc6365 | 2008-04-30 13:38:44 +0200 | [diff] [blame] | 58 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 59 | #endif |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 60 | return pmd; |
| 61 | } |
| 62 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 63 | static pte_t __ref *vmem_pte_alloc(unsigned long address) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 64 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 65 | pte_t *pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 66 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 67 | if (slab_is_available()) |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 68 | pte = (pte_t *) page_table_alloc(&init_mm); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 69 | else |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 70 | pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), |
| 71 | PTRS_PER_PTE * sizeof(pte_t)); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 72 | if (!pte) |
| 73 | return NULL; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 74 | clear_table((unsigned long *) pte, _PAGE_INVALID, |
Christian Borntraeger | 6af7eea | 2010-04-09 13:43:01 +0200 | [diff] [blame] | 75 | PTRS_PER_PTE * sizeof(pte_t)); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 76 | return pte; |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * Add a physical memory range to the 1:1 mapping. |
| 81 | */ |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 82 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 83 | { |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 84 | unsigned long end = start + size; |
| 85 | unsigned long address = start; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 86 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 87 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 88 | pmd_t *pm_dir; |
| 89 | pte_t *pt_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 90 | int ret = -ENOMEM; |
| 91 | |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 92 | while (address < end) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 93 | pg_dir = pgd_offset_k(address); |
| 94 | if (pgd_none(*pg_dir)) { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 95 | pu_dir = vmem_pud_alloc(); |
| 96 | if (!pu_dir) |
| 97 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 98 | pgd_populate(&init_mm, pg_dir, pu_dir); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 99 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 100 | pu_dir = pud_offset(pg_dir, address); |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 101 | #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) |
| 102 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && |
| 103 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 104 | pud_val(*pu_dir) = __pa(address) | |
| 105 | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 106 | (ro ? _REGION_ENTRY_PROTECT : 0); |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 107 | address += PUD_SIZE; |
| 108 | continue; |
| 109 | } |
| 110 | #endif |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 111 | if (pud_none(*pu_dir)) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 112 | pm_dir = vmem_pmd_alloc(); |
| 113 | if (!pm_dir) |
| 114 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 115 | pud_populate(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 116 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 117 | pm_dir = pmd_offset(pu_dir, address); |
Gerald Schaefer | 648609e | 2012-08-21 12:36:34 +0200 | [diff] [blame] | 118 | #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 119 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && |
| 120 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 121 | pmd_val(*pm_dir) = __pa(address) | |
| 122 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 123 | _SEGMENT_ENTRY_YOUNG | |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 124 | (ro ? _SEGMENT_ENTRY_PROTECT : 0); |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 125 | address += PMD_SIZE; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 126 | continue; |
| 127 | } |
| 128 | #endif |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 129 | if (pmd_none(*pm_dir)) { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 130 | pt_dir = vmem_pte_alloc(address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 131 | if (!pt_dir) |
| 132 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 133 | pmd_populate(&init_mm, pm_dir, pt_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | pt_dir = pte_offset_kernel(pm_dir, address); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 137 | pte_val(*pt_dir) = __pa(address) | |
| 138 | pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 139 | address += PAGE_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 140 | } |
| 141 | ret = 0; |
| 142 | out: |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 143 | return ret; |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * Remove a physical memory range from the 1:1 mapping. |
| 148 | * Currently only invalidates page table entries. |
| 149 | */ |
| 150 | static void vmem_remove_range(unsigned long start, unsigned long size) |
| 151 | { |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 152 | unsigned long end = start + size; |
| 153 | unsigned long address = start; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 154 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 155 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 156 | pmd_t *pm_dir; |
| 157 | pte_t *pt_dir; |
| 158 | pte_t pte; |
| 159 | |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 160 | pte_val(pte) = _PAGE_INVALID; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 161 | while (address < end) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 162 | pg_dir = pgd_offset_k(address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 163 | if (pgd_none(*pg_dir)) { |
| 164 | address += PGDIR_SIZE; |
| 165 | continue; |
| 166 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 167 | pu_dir = pud_offset(pg_dir, address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 168 | if (pud_none(*pu_dir)) { |
| 169 | address += PUD_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 170 | continue; |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 171 | } |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 172 | if (pud_large(*pu_dir)) { |
| 173 | pud_clear(pu_dir); |
| 174 | address += PUD_SIZE; |
| 175 | continue; |
| 176 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 177 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 178 | if (pmd_none(*pm_dir)) { |
| 179 | address += PMD_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 180 | continue; |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 181 | } |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 182 | if (pmd_large(*pm_dir)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 183 | pmd_clear(pm_dir); |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 184 | address += PMD_SIZE; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 185 | continue; |
| 186 | } |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 187 | pt_dir = pte_offset_kernel(pm_dir, address); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 188 | *pt_dir = pte; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 189 | address += PAGE_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 190 | } |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 191 | flush_tlb_kernel_range(start, end); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | /* |
| 195 | * Add a backed mem_map array to the virtual mem_map array. |
| 196 | */ |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 197 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 198 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 199 | unsigned long address = start; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 200 | pgd_t *pg_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 201 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 202 | pmd_t *pm_dir; |
| 203 | pte_t *pt_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 204 | int ret = -ENOMEM; |
| 205 | |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 206 | for (address = start; address < end;) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 207 | pg_dir = pgd_offset_k(address); |
| 208 | if (pgd_none(*pg_dir)) { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 209 | pu_dir = vmem_pud_alloc(); |
| 210 | if (!pu_dir) |
| 211 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 212 | pgd_populate(&init_mm, pg_dir, pu_dir); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | pu_dir = pud_offset(pg_dir, address); |
| 216 | if (pud_none(*pu_dir)) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 217 | pm_dir = vmem_pmd_alloc(); |
| 218 | if (!pm_dir) |
| 219 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 220 | pud_populate(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 221 | } |
| 222 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 223 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 224 | if (pmd_none(*pm_dir)) { |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 225 | #ifdef CONFIG_64BIT |
| 226 | /* Use 1MB frames for vmemmap if available. We always |
| 227 | * use large frames even if they are only partially |
| 228 | * used. |
| 229 | * Otherwise we would have also page tables since |
| 230 | * vmemmap_populate gets called for each section |
| 231 | * separately. */ |
| 232 | if (MACHINE_HAS_EDAT1) { |
| 233 | void *new_page; |
| 234 | |
| 235 | new_page = vmemmap_alloc_block(PMD_SIZE, node); |
| 236 | if (!new_page) |
| 237 | goto out; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 238 | pmd_val(*pm_dir) = __pa(new_page) | |
Heiko Carstens | 6a5c148 | 2014-09-22 08:50:51 +0200 | [diff] [blame] | 239 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE; |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 240 | address = (address + PMD_SIZE) & PMD_MASK; |
| 241 | continue; |
| 242 | } |
| 243 | #endif |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 244 | pt_dir = vmem_pte_alloc(address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 245 | if (!pt_dir) |
| 246 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 247 | pmd_populate(&init_mm, pm_dir, pt_dir); |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 248 | } else if (pmd_large(*pm_dir)) { |
| 249 | address = (address + PMD_SIZE) & PMD_MASK; |
| 250 | continue; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 251 | } |
| 252 | |
| 253 | pt_dir = pte_offset_kernel(pm_dir, address); |
| 254 | if (pte_none(*pt_dir)) { |
Heiko Carstens | 70c9d29 | 2014-09-20 11:12:08 +0200 | [diff] [blame] | 255 | void *new_page; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 256 | |
Heiko Carstens | 70c9d29 | 2014-09-20 11:12:08 +0200 | [diff] [blame] | 257 | new_page = vmemmap_alloc_block(PAGE_SIZE, node); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 258 | if (!new_page) |
| 259 | goto out; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 260 | pte_val(*pt_dir) = |
| 261 | __pa(new_page) | pgprot_val(PAGE_KERNEL); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 262 | } |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 263 | address += PAGE_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 264 | } |
| 265 | ret = 0; |
| 266 | out: |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 267 | return ret; |
| 268 | } |
| 269 | |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 270 | void vmemmap_free(unsigned long start, unsigned long end) |
Tang Chen | 0197518 | 2013-02-22 16:33:08 -0800 | [diff] [blame] | 271 | { |
| 272 | } |
| 273 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 274 | /* |
| 275 | * Add memory segment to the segment list if it doesn't overlap with |
| 276 | * an already present segment. |
| 277 | */ |
| 278 | static int insert_memory_segment(struct memory_segment *seg) |
| 279 | { |
| 280 | struct memory_segment *tmp; |
| 281 | |
Heiko Carstens | ee0ddad | 2008-06-10 10:03:20 +0200 | [diff] [blame] | 282 | if (seg->start + seg->size > VMEM_MAX_PHYS || |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 283 | seg->start + seg->size < seg->start) |
| 284 | return -ERANGE; |
| 285 | |
| 286 | list_for_each_entry(tmp, &mem_segs, list) { |
| 287 | if (seg->start >= tmp->start + tmp->size) |
| 288 | continue; |
| 289 | if (seg->start + seg->size <= tmp->start) |
| 290 | continue; |
| 291 | return -ENOSPC; |
| 292 | } |
| 293 | list_add(&seg->list, &mem_segs); |
| 294 | return 0; |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * Remove memory segment from the segment list. |
| 299 | */ |
| 300 | static void remove_memory_segment(struct memory_segment *seg) |
| 301 | { |
| 302 | list_del(&seg->list); |
| 303 | } |
| 304 | |
| 305 | static void __remove_shared_memory(struct memory_segment *seg) |
| 306 | { |
| 307 | remove_memory_segment(seg); |
| 308 | vmem_remove_range(seg->start, seg->size); |
| 309 | } |
| 310 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 311 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 312 | { |
| 313 | struct memory_segment *seg; |
| 314 | int ret; |
| 315 | |
| 316 | mutex_lock(&vmem_mutex); |
| 317 | |
| 318 | ret = -ENOENT; |
| 319 | list_for_each_entry(seg, &mem_segs, list) { |
| 320 | if (seg->start == start && seg->size == size) |
| 321 | break; |
| 322 | } |
| 323 | |
| 324 | if (seg->start != start || seg->size != size) |
| 325 | goto out; |
| 326 | |
| 327 | ret = 0; |
| 328 | __remove_shared_memory(seg); |
| 329 | kfree(seg); |
| 330 | out: |
| 331 | mutex_unlock(&vmem_mutex); |
| 332 | return ret; |
| 333 | } |
| 334 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 335 | int vmem_add_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 336 | { |
| 337 | struct memory_segment *seg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 338 | int ret; |
| 339 | |
| 340 | mutex_lock(&vmem_mutex); |
| 341 | ret = -ENOMEM; |
| 342 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 343 | if (!seg) |
| 344 | goto out; |
| 345 | seg->start = start; |
| 346 | seg->size = size; |
| 347 | |
| 348 | ret = insert_memory_segment(seg); |
| 349 | if (ret) |
| 350 | goto out_free; |
| 351 | |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 352 | ret = vmem_add_mem(start, size, 0); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 353 | if (ret) |
| 354 | goto out_remove; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 355 | goto out; |
| 356 | |
| 357 | out_remove: |
| 358 | __remove_shared_memory(seg); |
| 359 | out_free: |
| 360 | kfree(seg); |
| 361 | out: |
| 362 | mutex_unlock(&vmem_mutex); |
| 363 | return ret; |
| 364 | } |
| 365 | |
| 366 | /* |
| 367 | * map whole physical memory to virtual memory (identity mapping) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 368 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
| 369 | * additional memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 370 | */ |
| 371 | void __init vmem_map_init(void) |
| 372 | { |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 373 | unsigned long ro_start, ro_end; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 374 | struct memblock_region *reg; |
| 375 | phys_addr_t start, end; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 376 | |
Heiko Carstens | 8fe234d | 2012-10-04 17:02:02 +0200 | [diff] [blame] | 377 | ro_start = PFN_ALIGN((unsigned long)&_stext); |
| 378 | ro_end = (unsigned long)&_eshared & PAGE_MASK; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 379 | for_each_memblock(memory, reg) { |
| 380 | start = reg->base; |
| 381 | end = reg->base + reg->size - 1; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 382 | if (start >= ro_end || end <= ro_start) |
| 383 | vmem_add_mem(start, end - start, 0); |
| 384 | else if (start >= ro_start && end <= ro_end) |
| 385 | vmem_add_mem(start, end - start, 1); |
| 386 | else if (start >= ro_start) { |
| 387 | vmem_add_mem(start, ro_end - start, 1); |
| 388 | vmem_add_mem(ro_end, end - ro_end, 0); |
| 389 | } else if (end < ro_end) { |
| 390 | vmem_add_mem(start, ro_start - start, 0); |
| 391 | vmem_add_mem(ro_start, end - ro_start, 1); |
| 392 | } else { |
| 393 | vmem_add_mem(start, ro_start - start, 0); |
| 394 | vmem_add_mem(ro_start, ro_end - ro_start, 1); |
| 395 | vmem_add_mem(ro_end, end - ro_end, 0); |
| 396 | } |
| 397 | } |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 398 | } |
| 399 | |
| 400 | /* |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 401 | * Convert memblock.memory to a memory segment list so there is a single |
| 402 | * list that contains all memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 403 | */ |
| 404 | static int __init vmem_convert_memory_chunk(void) |
| 405 | { |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 406 | struct memblock_region *reg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 407 | struct memory_segment *seg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 408 | |
| 409 | mutex_lock(&vmem_mutex); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 410 | for_each_memblock(memory, reg) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 411 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 412 | if (!seg) |
| 413 | panic("Out of memory...\n"); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 414 | seg->start = reg->base; |
| 415 | seg->size = reg->size; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 416 | insert_memory_segment(seg); |
| 417 | } |
| 418 | mutex_unlock(&vmem_mutex); |
| 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | core_initcall(vmem_convert_memory_chunk); |