Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Provide common bits of early_ioremap() support for architectures needing |
| 3 | * temporary mappings during boot before ioremap() is available. |
| 4 | * |
| 5 | * This is mostly a direct copy of the x86 early_ioremap implementation. |
| 6 | * |
| 7 | * (C) Copyright 1995 1996, 2014 Linus Torvalds |
| 8 | * |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/vmalloc.h> |
| 17 | #include <asm/fixmap.h> |
| 18 | |
| 19 | #ifdef CONFIG_MMU |
| 20 | static int early_ioremap_debug __initdata; |
| 21 | |
| 22 | static int __init early_ioremap_debug_setup(char *str) |
| 23 | { |
| 24 | early_ioremap_debug = 1; |
| 25 | |
| 26 | return 0; |
| 27 | } |
| 28 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
| 29 | |
| 30 | static int after_paging_init __initdata; |
| 31 | |
| 32 | void __init __weak early_ioremap_shutdown(void) |
| 33 | { |
| 34 | } |
| 35 | |
| 36 | void __init early_ioremap_reset(void) |
| 37 | { |
| 38 | early_ioremap_shutdown(); |
| 39 | after_paging_init = 1; |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Generally, ioremap() is available after paging_init() has been called. |
| 44 | * Architectures wanting to allow early_ioremap after paging_init() can |
| 45 | * define __late_set_fixmap and __late_clear_fixmap to do the right thing. |
| 46 | */ |
| 47 | #ifndef __late_set_fixmap |
| 48 | static inline void __init __late_set_fixmap(enum fixed_addresses idx, |
| 49 | phys_addr_t phys, pgprot_t prot) |
| 50 | { |
| 51 | BUG(); |
| 52 | } |
| 53 | #endif |
| 54 | |
| 55 | #ifndef __late_clear_fixmap |
| 56 | static inline void __init __late_clear_fixmap(enum fixed_addresses idx) |
| 57 | { |
| 58 | BUG(); |
| 59 | } |
| 60 | #endif |
| 61 | |
| 62 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
| 63 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
| 64 | static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; |
| 65 | |
| 66 | void __init early_ioremap_setup(void) |
| 67 | { |
| 68 | int i; |
| 69 | |
| 70 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
| 71 | if (WARN_ON(prev_map[i])) |
| 72 | break; |
| 73 | |
| 74 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
| 75 | slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); |
| 76 | } |
| 77 | |
| 78 | static int __init check_early_ioremap_leak(void) |
| 79 | { |
| 80 | int count = 0; |
| 81 | int i; |
| 82 | |
| 83 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
| 84 | if (prev_map[i]) |
| 85 | count++; |
| 86 | |
| 87 | if (WARN(count, KERN_WARNING |
| 88 | "Debug warning: early ioremap leak of %d areas detected.\n" |
| 89 | "please boot with early_ioremap_debug and report the dmesg.\n", |
| 90 | count)) |
| 91 | return 1; |
| 92 | return 0; |
| 93 | } |
| 94 | late_initcall(check_early_ioremap_leak); |
| 95 | |
| 96 | static void __init __iomem * |
| 97 | __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) |
| 98 | { |
| 99 | unsigned long offset; |
| 100 | resource_size_t last_addr; |
| 101 | unsigned int nrpages; |
| 102 | enum fixed_addresses idx; |
| 103 | int i, slot; |
| 104 | |
| 105 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 106 | |
| 107 | slot = -1; |
| 108 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 109 | if (!prev_map[i]) { |
| 110 | slot = i; |
| 111 | break; |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n", |
| 116 | __func__, (u64)phys_addr, size)) |
| 117 | return NULL; |
| 118 | |
| 119 | /* Don't allow wraparound or zero size */ |
| 120 | last_addr = phys_addr + size - 1; |
| 121 | if (WARN_ON(!size || last_addr < phys_addr)) |
| 122 | return NULL; |
| 123 | |
| 124 | prev_size[slot] = size; |
| 125 | /* |
| 126 | * Mappings have to be page-aligned |
| 127 | */ |
| 128 | offset = phys_addr & ~PAGE_MASK; |
| 129 | phys_addr &= PAGE_MASK; |
| 130 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
| 131 | |
| 132 | /* |
| 133 | * Mappings have to fit in the FIX_BTMAP area. |
| 134 | */ |
| 135 | nrpages = size >> PAGE_SHIFT; |
| 136 | if (WARN_ON(nrpages > NR_FIX_BTMAPS)) |
| 137 | return NULL; |
| 138 | |
| 139 | /* |
| 140 | * Ok, go for it.. |
| 141 | */ |
| 142 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
| 143 | while (nrpages > 0) { |
| 144 | if (after_paging_init) |
| 145 | __late_set_fixmap(idx, phys_addr, prot); |
| 146 | else |
| 147 | __early_set_fixmap(idx, phys_addr, prot); |
| 148 | phys_addr += PAGE_SIZE; |
| 149 | --idx; |
| 150 | --nrpages; |
| 151 | } |
| 152 | WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n", |
| 153 | __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); |
| 154 | |
| 155 | prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); |
| 156 | return prev_map[slot]; |
| 157 | } |
| 158 | |
| 159 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
| 160 | { |
| 161 | unsigned long virt_addr; |
| 162 | unsigned long offset; |
| 163 | unsigned int nrpages; |
| 164 | enum fixed_addresses idx; |
| 165 | int i, slot; |
| 166 | |
| 167 | slot = -1; |
| 168 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 169 | if (prev_map[i] == addr) { |
| 170 | slot = i; |
| 171 | break; |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n", |
| 176 | addr, size)) |
| 177 | return; |
| 178 | |
| 179 | if (WARN(prev_size[slot] != size, |
| 180 | "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", |
| 181 | addr, size, slot, prev_size[slot])) |
| 182 | return; |
| 183 | |
| 184 | WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n", |
| 185 | addr, size, slot); |
| 186 | |
| 187 | virt_addr = (unsigned long)addr; |
| 188 | if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) |
| 189 | return; |
| 190 | |
| 191 | offset = virt_addr & ~PAGE_MASK; |
| 192 | nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; |
| 193 | |
| 194 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
| 195 | while (nrpages > 0) { |
| 196 | if (after_paging_init) |
| 197 | __late_clear_fixmap(idx); |
| 198 | else |
| 199 | __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); |
| 200 | --idx; |
| 201 | --nrpages; |
| 202 | } |
| 203 | prev_map[slot] = NULL; |
| 204 | } |
| 205 | |
| 206 | /* Remap an IO device */ |
| 207 | void __init __iomem * |
| 208 | early_ioremap(resource_size_t phys_addr, unsigned long size) |
| 209 | { |
| 210 | return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); |
| 211 | } |
| 212 | |
| 213 | /* Remap memory */ |
| 214 | void __init * |
| 215 | early_memremap(resource_size_t phys_addr, unsigned long size) |
| 216 | { |
| 217 | return (__force void *)__early_ioremap(phys_addr, size, |
| 218 | FIXMAP_PAGE_NORMAL); |
| 219 | } |
Mark Salter | 6b0f68e | 2015-09-08 15:03:01 -0700 | [diff] [blame^] | 220 | |
| 221 | #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) |
| 222 | |
| 223 | void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) |
| 224 | { |
| 225 | unsigned long slop, clen; |
| 226 | char *p; |
| 227 | |
| 228 | while (size) { |
| 229 | slop = src & ~PAGE_MASK; |
| 230 | clen = size; |
| 231 | if (clen > MAX_MAP_CHUNK - slop) |
| 232 | clen = MAX_MAP_CHUNK - slop; |
| 233 | p = early_memremap(src & PAGE_MASK, clen + slop); |
| 234 | memcpy(dest, p + slop, clen); |
| 235 | early_memunmap(p, clen + slop); |
| 236 | dest += clen; |
| 237 | src += clen; |
| 238 | size -= clen; |
| 239 | } |
| 240 | } |
| 241 | |
Mark Salter | 9e5c33d | 2014-04-07 15:39:48 -0700 | [diff] [blame] | 242 | #else /* CONFIG_MMU */ |
| 243 | |
| 244 | void __init __iomem * |
| 245 | early_ioremap(resource_size_t phys_addr, unsigned long size) |
| 246 | { |
| 247 | return (__force void __iomem *)phys_addr; |
| 248 | } |
| 249 | |
| 250 | /* Remap memory */ |
| 251 | void __init * |
| 252 | early_memremap(resource_size_t phys_addr, unsigned long size) |
| 253 | { |
| 254 | return (void *)phys_addr; |
| 255 | } |
| 256 | |
| 257 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
| 258 | { |
| 259 | } |
| 260 | |
| 261 | #endif /* CONFIG_MMU */ |
| 262 | |
| 263 | |
| 264 | void __init early_memunmap(void *addr, unsigned long size) |
| 265 | { |
| 266 | early_iounmap((__force void __iomem *)addr, size); |
| 267 | } |