blob: b79a7820613558181d16a8d25dce4687e6a3c7e3 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/signal.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/bootmem.h>
40#include <linux/highmem.h>
41#include <linux/idr.h>
42#include <linux/nodemask.h>
43#include <linux/module.h>
44
45#include <asm/pgalloc.h>
46#include <asm/page.h>
47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/rtas.h>
50#include <asm/io.h>
51#include <asm/mmu_context.h>
52#include <asm/pgtable.h>
53#include <asm/mmu.h>
54#include <asm/uaccess.h>
55#include <asm/smp.h>
56#include <asm/machdep.h>
57#include <asm/tlb.h>
58#include <asm/eeh.h>
59#include <asm/processor.h>
60#include <asm/mmzone.h>
61#include <asm/cputable.h>
62#include <asm/ppcdebug.h>
63#include <asm/sections.h>
64#include <asm/system.h>
65#include <asm/iommu.h>
66#include <asm/abs_addr.h>
67#include <asm/vdso.h>
68#include <asm/imalloc.h>
69
Paul Mackerras14cf11a2005-09-26 16:04:21 +100070unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
Paul Mackerras14cf11a2005-09-26 16:04:21 +100073#ifdef CONFIG_PPC_ISERIES
74
75void __iomem *ioremap(unsigned long addr, unsigned long size)
76{
77 return (void __iomem *)addr;
78}
79
80extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
81 unsigned long flags)
82{
83 return (void __iomem *)addr;
84}
85
86void iounmap(volatile void __iomem *addr)
87{
88 return;
89}
90
91#else
92
93/*
94 * map_io_page currently only called by __ioremap
95 * map_io_page adds an entry to the ioremap page table
96 * and adds an entry to the HPT, possibly bolting it
97 */
98static int map_io_page(unsigned long ea, unsigned long pa, int flags)
99{
100 pgd_t *pgdp;
101 pud_t *pudp;
102 pmd_t *pmdp;
103 pte_t *ptep;
104 unsigned long vsid;
105
106 if (mem_init_done) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000107 pgdp = pgd_offset_k(ea);
108 pudp = pud_alloc(&init_mm, pgdp, ea);
109 if (!pudp)
110 return -ENOMEM;
111 pmdp = pmd_alloc(&init_mm, pudp, ea);
112 if (!pmdp)
113 return -ENOMEM;
Paul Mackerras23fd0772005-10-31 13:37:12 +1100114 ptep = pte_alloc_kernel(pmdp, ea);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000115 if (!ptep)
116 return -ENOMEM;
117 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
118 __pgprot(flags)));
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000119 } else {
120 unsigned long va, vpn, hash, hpteg;
121
122 /*
123 * If the mm subsystem is not fully up, we cannot create a
124 * linux page table entry for this mapping. Simply bolt an
125 * entry in the hardware page table.
126 */
127 vsid = get_kernel_vsid(ea);
128 va = (vsid << 28) | (ea & 0xFFFFFFF);
129 vpn = va >> PAGE_SHIFT;
130
131 hash = hpt_hash(vpn, 0);
132
133 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
134
135 /* Panic if a pte grpup is full */
136 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
137 HPTE_V_BOLTED,
138 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
139 == -1) {
140 panic("map_io_page: could not insert mapping");
141 }
142 }
143 return 0;
144}
145
146
147static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
148 unsigned long ea, unsigned long size,
149 unsigned long flags)
150{
151 unsigned long i;
152
153 if ((flags & _PAGE_PRESENT) == 0)
154 flags |= pgprot_val(PAGE_KERNEL);
155
156 for (i = 0; i < size; i += PAGE_SIZE)
157 if (map_io_page(ea+i, pa+i, flags))
158 return NULL;
159
160 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
161}
162
163
164void __iomem *
165ioremap(unsigned long addr, unsigned long size)
166{
167 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
168}
169
170void __iomem * __ioremap(unsigned long addr, unsigned long size,
171 unsigned long flags)
172{
173 unsigned long pa, ea;
174 void __iomem *ret;
175
176 /*
177 * Choose an address to map it to.
178 * Once the imalloc system is running, we use it.
179 * Before that, we map using addresses going
180 * up from ioremap_bot. imalloc will use
181 * the addresses from ioremap_bot through
182 * IMALLOC_END
183 *
184 */
185 pa = addr & PAGE_MASK;
186 size = PAGE_ALIGN(addr + size) - pa;
187
188 if (size == 0)
189 return NULL;
190
191 if (mem_init_done) {
192 struct vm_struct *area;
193 area = im_get_free_area(size);
194 if (area == NULL)
195 return NULL;
196 ea = (unsigned long)(area->addr);
197 ret = __ioremap_com(addr, pa, ea, size, flags);
198 if (!ret)
199 im_free(area->addr);
200 } else {
201 ea = ioremap_bot;
202 ret = __ioremap_com(addr, pa, ea, size, flags);
203 if (ret)
204 ioremap_bot += size;
205 }
206 return ret;
207}
208
209#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
210
211int __ioremap_explicit(unsigned long pa, unsigned long ea,
212 unsigned long size, unsigned long flags)
213{
214 struct vm_struct *area;
215 void __iomem *ret;
216
217 /* For now, require page-aligned values for pa, ea, and size */
218 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
219 !IS_PAGE_ALIGNED(size)) {
220 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
221 return 1;
222 }
223
224 if (!mem_init_done) {
225 /* Two things to consider in this case:
226 * 1) No records will be kept (imalloc, etc) that the region
227 * has been remapped
228 * 2) It won't be easy to iounmap() the region later (because
229 * of 1)
230 */
231 ;
232 } else {
233 area = im_get_area(ea, size,
234 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
235 if (area == NULL) {
236 /* Expected when PHB-dlpar is in play */
237 return 1;
238 }
239 if (ea != (unsigned long) area->addr) {
240 printk(KERN_ERR "unexpected addr return from "
241 "im_get_area\n");
242 return 1;
243 }
244 }
245
246 ret = __ioremap_com(pa, pa, ea, size, flags);
247 if (ret == NULL) {
248 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
249 return 1;
250 }
251 if (ret != (void *) ea) {
252 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
253 return 1;
254 }
255
256 return 0;
257}
258
259/*
260 * Unmap an IO region and remove it from imalloc'd list.
261 * Access to IO memory should be serialized by driver.
262 * This code is modeled after vmalloc code - unmap_vm_area()
263 *
264 * XXX what about calls before mem_init_done (ie python_countermeasures())
265 */
266void iounmap(volatile void __iomem *token)
267{
268 void *addr;
269
270 if (!mem_init_done)
271 return;
272
273 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
274
275 im_free(addr);
276}
277
278static int iounmap_subset_regions(unsigned long addr, unsigned long size)
279{
280 struct vm_struct *area;
281
282 /* Check whether subsets of this region exist */
283 area = im_get_area(addr, size, IM_REGION_SUPERSET);
284 if (area == NULL)
285 return 1;
286
287 while (area) {
288 iounmap((void __iomem *) area->addr);
289 area = im_get_area(addr, size,
290 IM_REGION_SUPERSET);
291 }
292
293 return 0;
294}
295
296int iounmap_explicit(volatile void __iomem *start, unsigned long size)
297{
298 struct vm_struct *area;
299 unsigned long addr;
300 int rc;
301
302 addr = (unsigned long __force) start & PAGE_MASK;
303
304 /* Verify that the region either exists or is a subset of an existing
305 * region. In the latter case, split the parent region to create
306 * the exact region
307 */
308 area = im_get_area(addr, size,
309 IM_REGION_EXISTS | IM_REGION_SUBSET);
310 if (area == NULL) {
311 /* Determine whether subset regions exist. If so, unmap */
312 rc = iounmap_subset_regions(addr, size);
313 if (rc) {
314 printk(KERN_ERR
315 "%s() cannot unmap nonexistent range 0x%lx\n",
316 __FUNCTION__, addr);
317 return 1;
318 }
319 } else {
320 iounmap((void __iomem *) area->addr);
321 }
322 /*
323 * FIXME! This can't be right:
324 iounmap(area->addr);
325 * Maybe it should be "iounmap(area);"
326 */
327 return 0;
328}
329
330#endif
331
332EXPORT_SYMBOL(ioremap);
333EXPORT_SYMBOL(__ioremap);
334EXPORT_SYMBOL(iounmap);
Paul Mackerrasab1f9da2005-10-10 21:58:35 +1000335
336void __iomem * reserve_phb_iospace(unsigned long size)
337{
338 void __iomem *virt_addr;
339
340 if (phbs_io_bot >= IMALLOC_BASE)
341 panic("reserve_phb_iospace(): phb io space overflow\n");
342
343 virt_addr = (void __iomem *) phbs_io_bot;
344 phbs_io_bot += size;
345
346 return virt_addr;
347}