blob: fcd52cefee29619ce2f90913dd5b546a50183bbe [file] [log] [blame]
Greg Ungerer1bccc432011-03-28 22:37:13 +10001/*
2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
Greg Ungerer1bccc432011-03-28 22:37:13 +100023
24#undef DEBUG
25
26#define PTRTREESIZE (256*1024)
27
28/*
29 * For 040/060 we can use the virtual memory area like other architectures,
Geert Uytterhoevenf6fc30d2013-12-17 11:24:58 +010030 * but for 020/030 we want to use early termination page descriptors and we
Greg Ungerer1bccc432011-03-28 22:37:13 +100031 * can't mix this with normal page descriptors, so we have to copy that code
Geert Uytterhoevenf6fc30d2013-12-17 11:24:58 +010032 * (mm/vmalloc.c) and return appropriately aligned addresses.
Greg Ungerer1bccc432011-03-28 22:37:13 +100033 */
34
35#ifdef CPU_M68040_OR_M68060_ONLY
36
37#define IO_SIZE PAGE_SIZE
38
39static inline struct vm_struct *get_io_area(unsigned long size)
40{
41 return get_vm_area(size, VM_IOREMAP);
42}
43
44
45static inline void free_io_area(void *addr)
46{
47 vfree((void *)(PAGE_MASK & (unsigned long)addr));
48}
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#else
Greg Ungerer1bccc432011-03-28 22:37:13 +100051
52#define IO_SIZE (256*1024)
53
54static struct vm_struct *iolist;
55
56static struct vm_struct *get_io_area(unsigned long size)
57{
58 unsigned long addr;
59 struct vm_struct **p, *tmp, *area;
60
61 area = kmalloc(sizeof(*area), GFP_KERNEL);
62 if (!area)
63 return NULL;
64 addr = KMAP_START;
65 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
66 if (size + addr < (unsigned long)tmp->addr)
67 break;
68 if (addr > KMAP_END-size) {
69 kfree(area);
70 return NULL;
71 }
72 addr = tmp->size + (unsigned long)tmp->addr;
73 }
74 area->addr = (void *)addr;
75 area->size = size + IO_SIZE;
76 area->next = *p;
77 *p = area;
78 return area;
79}
80
81static inline void free_io_area(void *addr)
82{
83 struct vm_struct **p, *tmp;
84
85 if (!addr)
86 return;
87 addr = (void *)((unsigned long)addr & -IO_SIZE);
88 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
89 if (tmp->addr == addr) {
90 *p = tmp->next;
Michael Schmitz5692dcf2018-05-14 23:10:53 +120091 /* remove gap added in get_io_area() */
92 __iounmap(tmp->addr, tmp->size - IO_SIZE);
Greg Ungerer1bccc432011-03-28 22:37:13 +100093 kfree(tmp);
94 return;
95 }
96 }
97}
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#endif
Greg Ungerer1bccc432011-03-28 22:37:13 +1000100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
Greg Ungererd49316e2011-10-18 16:16:43 +1000174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
175 _PAGE_DIRTY | _PAGE_READWRITE);
Greg Ungerer1bccc432011-03-28 22:37:13 +1000176 switch (cacheflag) {
177 case IOMAP_NOCACHE_SER:
178 case IOMAP_NOCACHE_NONSER:
179 default:
180 physaddr |= _PAGE_NOCACHE030;
181 break;
182 case IOMAP_FULL_CACHING:
183 case IOMAP_WRITETHROUGH:
184 break;
185 }
186 }
187
188 while ((long)size > 0) {
189#ifdef DEBUG
190 if (!(virtaddr & (PTRTREESIZE-1)))
191 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
192#endif
193 pgd_dir = pgd_offset_k(virtaddr);
194 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
195 if (!pmd_dir) {
196 printk("ioremap: no mem for pmd_dir\n");
197 return NULL;
198 }
199
200 if (CPU_IS_020_OR_030) {
201 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
202 physaddr += PTRTREESIZE;
203 virtaddr += PTRTREESIZE;
204 size -= PTRTREESIZE;
205 } else {
206 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
207 if (!pte_dir) {
208 printk("ioremap: no mem for pte_dir\n");
209 return NULL;
210 }
211
212 pte_val(*pte_dir) = physaddr;
213 virtaddr += PAGE_SIZE;
214 physaddr += PAGE_SIZE;
215 size -= PAGE_SIZE;
216 }
217 }
218#ifdef DEBUG
219 printk("\n");
220#endif
221 flush_tlb_all();
222
223 return (void __iomem *)retaddr;
224}
225EXPORT_SYMBOL(__ioremap);
226
227/*
Geert Uytterhoevenf6fc30d2013-12-17 11:24:58 +0100228 * Unmap an ioremap()ed region again
Greg Ungerer1bccc432011-03-28 22:37:13 +1000229 */
230void iounmap(void __iomem *addr)
231{
232#ifdef CONFIG_AMIGA
233 if ((!MACH_IS_AMIGA) ||
234 (((unsigned long)addr < 0x40000000) ||
235 ((unsigned long)addr > 0x60000000)))
236 free_io_area((__force void *)addr);
237#else
238 free_io_area((__force void *)addr);
239#endif
240}
241EXPORT_SYMBOL(iounmap);
242
243/*
244 * __iounmap unmaps nearly everything, so be careful
Geert Uytterhoevenf6fc30d2013-12-17 11:24:58 +0100245 * Currently it doesn't free pointer/page tables anymore but this
246 * wasn't used anyway and might be added later.
Greg Ungerer1bccc432011-03-28 22:37:13 +1000247 */
248void __iounmap(void *addr, unsigned long size)
249{
250 unsigned long virtaddr = (unsigned long)addr;
251 pgd_t *pgd_dir;
252 pmd_t *pmd_dir;
253 pte_t *pte_dir;
254
255 while ((long)size > 0) {
256 pgd_dir = pgd_offset_k(virtaddr);
257 if (pgd_bad(*pgd_dir)) {
258 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
259 pgd_clear(pgd_dir);
260 return;
261 }
262 pmd_dir = pmd_offset(pgd_dir, virtaddr);
263
264 if (CPU_IS_020_OR_030) {
265 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
266 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
267
268 if (pmd_type == _PAGE_PRESENT) {
269 pmd_dir->pmd[pmd_off] = 0;
270 virtaddr += PTRTREESIZE;
271 size -= PTRTREESIZE;
272 continue;
273 } else if (pmd_type == 0)
274 continue;
275 }
276
277 if (pmd_bad(*pmd_dir)) {
278 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
279 pmd_clear(pmd_dir);
280 return;
281 }
282 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
283
284 pte_val(*pte_dir) = 0;
285 virtaddr += PAGE_SIZE;
286 size -= PAGE_SIZE;
287 }
288
289 flush_tlb_all();
290}
291
292/*
293 * Set new cache mode for some kernel address space.
294 * The caller must push data for that range itself, if such data may already
295 * be in the cache.
296 */
297void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
298{
299 unsigned long virtaddr = (unsigned long)addr;
300 pgd_t *pgd_dir;
301 pmd_t *pmd_dir;
302 pte_t *pte_dir;
303
304 if (CPU_IS_040_OR_060) {
305 switch (cmode) {
306 case IOMAP_FULL_CACHING:
307 cmode = _PAGE_CACHE040;
308 break;
309 case IOMAP_NOCACHE_SER:
310 default:
311 cmode = _PAGE_NOCACHE_S;
312 break;
313 case IOMAP_NOCACHE_NONSER:
314 cmode = _PAGE_NOCACHE;
315 break;
316 case IOMAP_WRITETHROUGH:
317 cmode = _PAGE_CACHE040W;
318 break;
319 }
320 } else {
321 switch (cmode) {
322 case IOMAP_NOCACHE_SER:
323 case IOMAP_NOCACHE_NONSER:
324 default:
325 cmode = _PAGE_NOCACHE030;
326 break;
327 case IOMAP_FULL_CACHING:
328 case IOMAP_WRITETHROUGH:
329 cmode = 0;
330 }
331 }
332
333 while ((long)size > 0) {
334 pgd_dir = pgd_offset_k(virtaddr);
335 if (pgd_bad(*pgd_dir)) {
336 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
337 pgd_clear(pgd_dir);
338 return;
339 }
340 pmd_dir = pmd_offset(pgd_dir, virtaddr);
341
342 if (CPU_IS_020_OR_030) {
343 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
344
345 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
346 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
347 _CACHEMASK040) | cmode;
348 virtaddr += PTRTREESIZE;
349 size -= PTRTREESIZE;
350 continue;
351 }
352 }
353
354 if (pmd_bad(*pmd_dir)) {
355 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
356 pmd_clear(pmd_dir);
357 return;
358 }
359 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
360
361 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
362 virtaddr += PAGE_SIZE;
363 size -= PAGE_SIZE;
364 }
365
366 flush_tlb_all();
367}
368EXPORT_SYMBOL(kernel_set_cachemode);