blob: 990857756d44db11195d23edc5eee4564a8a7556 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/mm/ioremap.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 *
11 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
12 * derived from arch/i386/mm/ioremap.c .
13 *
14 * (C) Copyright 1995 1996 Linus Torvalds
15 */
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/sched.h>
20#include <linux/string.h>
Haavard Skinnemoen10731b82006-12-08 02:38:08 -080021#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/pgalloc.h>
23#include <asm/tlbflush.h>
24#include <linux/ioport.h>
25#include <linux/bootmem.h>
26#include <linux/proc_fs.h>
27
28static void shmedia_mapioaddr(unsigned long, unsigned long);
29static unsigned long shmedia_ioremap(struct resource *, u32, int);
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031/*
32 * Generic mapping function (not visible outside):
33 */
34
35/*
36 * Remap an arbitrary physical address space into the kernel virtual
37 * address space. Needed when the kernel wants to access high addresses
38 * directly.
39 *
40 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
41 * have to convert them into an offset in a page-aligned mapping, but the
42 * caller shouldn't need to know that small detail.
43 */
44void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
45{
46 void * addr;
47 struct vm_struct * area;
48 unsigned long offset, last_addr;
Haavard Skinnemoen10731b82006-12-08 02:38:08 -080049 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51 /* Don't allow wraparound or zero size */
52 last_addr = phys_addr + size - 1;
53 if (!size || last_addr < phys_addr)
54 return NULL;
55
Haavard Skinnemoen10731b82006-12-08 02:38:08 -080056 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ |
57 _PAGE_WRITE | _PAGE_DIRTY |
58 _PAGE_ACCESSED | _PAGE_SHARED | flags);
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /*
61 * Mappings have to be page-aligned
62 */
63 offset = phys_addr & ~PAGE_MASK;
64 phys_addr &= PAGE_MASK;
65 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
66
67 /*
68 * Ok, go for it..
69 */
70 area = get_vm_area(size, VM_IOREMAP);
71 pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
72 if (!area)
73 return NULL;
74 area->phys_addr = phys_addr;
75 addr = area->addr;
Haavard Skinnemoen10731b82006-12-08 02:38:08 -080076 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
77 phys_addr, pgprot)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 vunmap(addr);
79 return NULL;
80 }
81 return (void *) (offset + (char *)addr);
82}
83
84void iounmap(void *addr)
85{
86 struct vm_struct *area;
87
88 vfree((void *) (PAGE_MASK & (unsigned long) addr));
89 area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
90 if (!area) {
91 printk(KERN_ERR "iounmap: bad address %p\n", addr);
92 return;
93 }
94
95 kfree(area);
96}
97
98static struct resource shmedia_iomap = {
99 .name = "shmedia_iomap",
100 .start = IOBASE_VADDR + PAGE_SIZE,
101 .end = IOBASE_END - 1,
102};
103
104static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
105static void shmedia_unmapioaddr(unsigned long vaddr);
106static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
107
108/*
109 * We have the same problem as the SPARC, so lets have the same comment:
110 * Our mini-allocator...
111 * Boy this is gross! We need it because we must map I/O for
112 * timers and interrupt controller before the kmalloc is available.
113 */
114
115#define XNMLN 15
116#define XNRES 10
117
118struct xresource {
119 struct resource xres; /* Must be first */
120 int xflag; /* 1 == used */
121 char xname[XNMLN+1];
122};
123
124static struct xresource xresv[XNRES];
125
126static struct xresource *xres_alloc(void)
127{
128 struct xresource *xrp;
129 int n;
130
131 xrp = xresv;
132 for (n = 0; n < XNRES; n++) {
133 if (xrp->xflag == 0) {
134 xrp->xflag = 1;
135 return xrp;
136 }
137 xrp++;
138 }
139 return NULL;
140}
141
142static void xres_free(struct xresource *xrp)
143{
144 xrp->xflag = 0;
145}
146
147static struct resource *shmedia_find_resource(struct resource *root,
148 unsigned long vaddr)
149{
150 struct resource *res;
151
152 for (res = root->child; res; res = res->sibling)
153 if (res->start <= vaddr && res->end >= vaddr)
154 return res;
155
156 return NULL;
157}
158
159static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
160 const char *name)
161{
162 static int printed_full = 0;
163 struct xresource *xres;
164 struct resource *res;
165 char *tack;
166 int tlen;
167
168 if (name == NULL) name = "???";
169
170 if ((xres = xres_alloc()) != 0) {
171 tack = xres->xname;
172 res = &xres->xres;
173 } else {
174 if (!printed_full) {
175 printk("%s: done with statics, switching to kmalloc\n",
176 __FUNCTION__);
177 printed_full = 1;
178 }
179 tlen = strlen(name);
180 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
181 if (!tack)
182 return -ENOMEM;
183 memset(tack, 0, sizeof(struct resource));
184 res = (struct resource *) tack;
185 tack += sizeof (struct resource);
186 }
187
188 strncpy(tack, name, XNMLN);
189 tack[XNMLN] = 0;
190 res->name = tack;
191
192 return shmedia_ioremap(res, phys, size);
193}
194
195static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
196{
197 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
198 unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
199 unsigned long va;
200 unsigned int psz;
201
202 if (allocate_resource(&shmedia_iomap, res, round_sz,
203 shmedia_iomap.start, shmedia_iomap.end,
204 PAGE_SIZE, NULL, NULL) != 0) {
205 panic("alloc_io_res(%s): cannot occupy\n",
206 (res->name != NULL)? res->name: "???");
207 }
208
209 va = res->start;
210 pa &= PAGE_MASK;
211
212 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
213
214 /* log at boot time ... */
215 printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
216 ((res->name != NULL) ? res->name : "???"),
217 psz, psz == 1 ? " " : "s", va, pa);
218
219 for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
220 shmedia_mapioaddr(pa, va);
221 va += PAGE_SIZE;
222 pa += PAGE_SIZE;
223 }
224
225 res->start += offset;
226 res->end = res->start + sz - 1; /* not strictly necessary.. */
227
228 return res->start;
229}
230
231static void shmedia_free_io(struct resource *res)
232{
233 unsigned long len = res->end - res->start + 1;
234
235 BUG_ON((len & (PAGE_SIZE - 1)) != 0);
236
237 while (len) {
238 len -= PAGE_SIZE;
239 shmedia_unmapioaddr(res->start + len);
240 }
241
242 release_resource(res);
243}
244
Paul Mundtfad9e7d2007-07-20 17:46:42 +0900245static __init_refok void *sh64_get_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 extern int after_bootmem;
248 void *page;
249
250 if (after_bootmem) {
251 page = (void *)get_zeroed_page(GFP_ATOMIC);
252 } else {
253 page = alloc_bootmem_pages(PAGE_SIZE);
254 }
255
256 if (!page || ((unsigned long)page & ~PAGE_MASK))
257 panic("sh64_get_page: Out of memory already?\n");
258
259 return page;
260}
261
262static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
263{
264 pgd_t *pgdp;
265 pmd_t *pmdp;
266 pte_t *ptep, pte;
267 pgprot_t prot;
268 unsigned long flags = 1; /* 1 = CB0-1 device */
269
270 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
271
272 pgdp = pgd_offset_k(va);
273 if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
274 pmdp = (pmd_t *)sh64_get_page();
275 set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
276 }
277
278 pmdp = pmd_offset(pgdp, va);
279 if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
280 ptep = (pte_t *)sh64_get_page();
281 set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
282 }
283
284 prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
285 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
286
287 pte = pfn_pte(pa >> PAGE_SHIFT, prot);
288 ptep = pte_offset_kernel(pmdp, va);
289
290 if (!pte_none(*ptep) &&
291 pte_val(*ptep) != pte_val(pte))
292 pte_ERROR(*ptep);
293
294 set_pte(ptep, pte);
295
296 flush_tlb_kernel_range(va, PAGE_SIZE);
297}
298
299static void shmedia_unmapioaddr(unsigned long vaddr)
300{
301 pgd_t *pgdp;
302 pmd_t *pmdp;
303 pte_t *ptep;
304
305 pgdp = pgd_offset_k(vaddr);
306 pmdp = pmd_offset(pgdp, vaddr);
307
308 if (pmd_none(*pmdp) || pmd_bad(*pmdp))
309 return;
310
311 ptep = pte_offset_kernel(pmdp, vaddr);
312
313 if (pte_none(*ptep) || !pte_present(*ptep))
314 return;
315
316 clear_page((void *)ptep);
317 pte_clear(&init_mm, vaddr, ptep);
318}
319
320unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
321{
322 if (size < PAGE_SIZE)
323 size = PAGE_SIZE;
324
325 return shmedia_alloc_io(phys, size, name);
326}
327
328void onchip_unmap(unsigned long vaddr)
329{
330 struct resource *res;
331 unsigned int psz;
332
333 res = shmedia_find_resource(&shmedia_iomap, vaddr);
334 if (!res) {
335 printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
336 __FUNCTION__, vaddr);
337 return;
338 }
339
340 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
341
342 printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n",
343 res->name, psz, psz == 1 ? " " : "s");
344
345 shmedia_free_io(res);
346
347 if ((char *)res >= (char *)xresv &&
348 (char *)res < (char *)&xresv[XNRES]) {
349 xres_free((struct xresource *)res);
350 } else {
351 kfree(res);
352 }
353}
354
355#ifdef CONFIG_PROC_FS
356static int
357ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
358 void *data)
359{
360 char *p = buf, *e = buf + length;
361 struct resource *r;
362 const char *nm;
363
364 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
365 if (p + 32 >= e) /* Better than nothing */
366 break;
367 if ((nm = r->name) == 0) nm = "???";
Paul Mundt21264132006-09-12 14:36:46 +0900368 p += sprintf(p, "%08lx-%08lx: %s\n",
369 (unsigned long)r->start,
370 (unsigned long)r->end, nm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
372
373 return p-buf;
374}
375#endif /* CONFIG_PROC_FS */
376
377static int __init register_proc_onchip(void)
378{
379#ifdef CONFIG_PROC_FS
380 create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
381#endif
382 return 0;
383}
384
385__initcall(register_proc_onchip);