blob: ac64f4aaa5091b0fa48246eff049318303dc7bc2 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
Paul Mackerras14cf11a2005-09-26 16:04:21 +100025#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
36#include <linux/init.h>
37#include <linux/delay.h>
38#include <linux/bootmem.h>
39#include <linux/highmem.h>
40#include <linux/idr.h>
41#include <linux/nodemask.h>
42#include <linux/module.h>
43
44#include <asm/pgalloc.h>
45#include <asm/page.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/rtas.h>
49#include <asm/io.h>
50#include <asm/mmu_context.h>
51#include <asm/pgtable.h>
52#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h>
55#include <asm/machdep.h>
56#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100061#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
Stephen Rothwell5e203d62006-09-25 13:36:31 +100066#include <asm/firmware.h>
David Gibson800fc3e2005-11-16 15:43:48 +110067
68#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100069
Paul Mackerras14cf11a2005-09-26 16:04:21 +100070unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
Paul Mackerras14cf11a2005-09-26 16:04:21 +100073/*
74 * map_io_page currently only called by __ioremap
75 * map_io_page adds an entry to the ioremap page table
76 * and adds an entry to the HPT, possibly bolting it
77 */
78static int map_io_page(unsigned long ea, unsigned long pa, int flags)
79{
80 pgd_t *pgdp;
81 pud_t *pudp;
82 pmd_t *pmdp;
83 pte_t *ptep;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100084
85 if (mem_init_done) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +100086 pgdp = pgd_offset_k(ea);
87 pudp = pud_alloc(&init_mm, pgdp, ea);
88 if (!pudp)
89 return -ENOMEM;
90 pmdp = pmd_alloc(&init_mm, pudp, ea);
91 if (!pmdp)
92 return -ENOMEM;
Paul Mackerras23fd0772005-10-31 13:37:12 +110093 ptep = pte_alloc_kernel(pmdp, ea);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100094 if (!ptep)
95 return -ENOMEM;
96 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
97 __pgprot(flags)));
Paul Mackerras14cf11a2005-09-26 16:04:21 +100098 } else {
Paul Mackerras14cf11a2005-09-26 16:04:21 +100099 /*
100 * If the mm subsystem is not fully up, we cannot create a
101 * linux page table entry for this mapping. Simply bolt an
102 * entry in the hardware page table.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100103 *
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000104 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100105 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
Benjamin Herrenschmidt77ac1662005-11-10 11:12:11 +1100106 mmu_virtual_psize)) {
107 printk(KERN_ERR "Failed to do bolted mapping IO "
108 "memory at %016lx !\n", pa);
109 return -ENOMEM;
110 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000111 }
112 return 0;
113}
114
115
116static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
117 unsigned long ea, unsigned long size,
118 unsigned long flags)
119{
120 unsigned long i;
121
122 if ((flags & _PAGE_PRESENT) == 0)
123 flags |= pgprot_val(PAGE_KERNEL);
124
125 for (i = 0; i < size; i += PAGE_SIZE)
126 if (map_io_page(ea+i, pa+i, flags))
127 return NULL;
128
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
130}
131
132
133void __iomem *
134ioremap(unsigned long addr, unsigned long size)
135{
136 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
137}
138
139void __iomem * __ioremap(unsigned long addr, unsigned long size,
140 unsigned long flags)
141{
142 unsigned long pa, ea;
143 void __iomem *ret;
144
Stephen Rothwell5e203d62006-09-25 13:36:31 +1000145 if (firmware_has_feature(FW_FEATURE_ISERIES))
146 return (void __iomem *)addr;
147
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000148 /*
149 * Choose an address to map it to.
150 * Once the imalloc system is running, we use it.
151 * Before that, we map using addresses going
152 * up from ioremap_bot. imalloc will use
153 * the addresses from ioremap_bot through
154 * IMALLOC_END
155 *
156 */
157 pa = addr & PAGE_MASK;
158 size = PAGE_ALIGN(addr + size) - pa;
159
Linas Vepstasd177c202005-11-03 18:55:14 -0600160 if ((size == 0) || (pa == 0))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000161 return NULL;
162
163 if (mem_init_done) {
164 struct vm_struct *area;
165 area = im_get_free_area(size);
166 if (area == NULL)
167 return NULL;
168 ea = (unsigned long)(area->addr);
169 ret = __ioremap_com(addr, pa, ea, size, flags);
170 if (!ret)
171 im_free(area->addr);
172 } else {
173 ea = ioremap_bot;
174 ret = __ioremap_com(addr, pa, ea, size, flags);
175 if (ret)
176 ioremap_bot += size;
177 }
178 return ret;
179}
180
181#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
182
183int __ioremap_explicit(unsigned long pa, unsigned long ea,
184 unsigned long size, unsigned long flags)
185{
186 struct vm_struct *area;
187 void __iomem *ret;
188
189 /* For now, require page-aligned values for pa, ea, and size */
190 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
191 !IS_PAGE_ALIGNED(size)) {
192 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
193 return 1;
194 }
195
196 if (!mem_init_done) {
197 /* Two things to consider in this case:
198 * 1) No records will be kept (imalloc, etc) that the region
199 * has been remapped
200 * 2) It won't be easy to iounmap() the region later (because
201 * of 1)
202 */
203 ;
204 } else {
205 area = im_get_area(ea, size,
206 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
207 if (area == NULL) {
208 /* Expected when PHB-dlpar is in play */
209 return 1;
210 }
211 if (ea != (unsigned long) area->addr) {
212 printk(KERN_ERR "unexpected addr return from "
213 "im_get_area\n");
214 return 1;
215 }
216 }
217
218 ret = __ioremap_com(pa, pa, ea, size, flags);
219 if (ret == NULL) {
220 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
221 return 1;
222 }
223 if (ret != (void *) ea) {
224 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
225 return 1;
226 }
227
228 return 0;
229}
230
231/*
232 * Unmap an IO region and remove it from imalloc'd list.
233 * Access to IO memory should be serialized by driver.
234 * This code is modeled after vmalloc code - unmap_vm_area()
235 *
236 * XXX what about calls before mem_init_done (ie python_countermeasures())
237 */
238void iounmap(volatile void __iomem *token)
239{
240 void *addr;
241
Stephen Rothwell5e203d62006-09-25 13:36:31 +1000242 if (firmware_has_feature(FW_FEATURE_ISERIES))
243 return;
244
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000245 if (!mem_init_done)
246 return;
247
248 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
249
250 im_free(addr);
251}
252
253static int iounmap_subset_regions(unsigned long addr, unsigned long size)
254{
255 struct vm_struct *area;
256
257 /* Check whether subsets of this region exist */
258 area = im_get_area(addr, size, IM_REGION_SUPERSET);
259 if (area == NULL)
260 return 1;
261
262 while (area) {
263 iounmap((void __iomem *) area->addr);
264 area = im_get_area(addr, size,
265 IM_REGION_SUPERSET);
266 }
267
268 return 0;
269}
270
271int iounmap_explicit(volatile void __iomem *start, unsigned long size)
272{
273 struct vm_struct *area;
274 unsigned long addr;
275 int rc;
276
277 addr = (unsigned long __force) start & PAGE_MASK;
278
279 /* Verify that the region either exists or is a subset of an existing
280 * region. In the latter case, split the parent region to create
281 * the exact region
282 */
283 area = im_get_area(addr, size,
284 IM_REGION_EXISTS | IM_REGION_SUBSET);
285 if (area == NULL) {
286 /* Determine whether subset regions exist. If so, unmap */
287 rc = iounmap_subset_regions(addr, size);
288 if (rc) {
289 printk(KERN_ERR
290 "%s() cannot unmap nonexistent range 0x%lx\n",
291 __FUNCTION__, addr);
292 return 1;
293 }
294 } else {
295 iounmap((void __iomem *) area->addr);
296 }
297 /*
298 * FIXME! This can't be right:
299 iounmap(area->addr);
300 * Maybe it should be "iounmap(area);"
301 */
302 return 0;
303}
304
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000305EXPORT_SYMBOL(ioremap);
306EXPORT_SYMBOL(__ioremap);
307EXPORT_SYMBOL(iounmap);
Paul Mackerrasab1f9da2005-10-10 21:58:35 +1000308
309void __iomem * reserve_phb_iospace(unsigned long size)
310{
311 void __iomem *virt_addr;
312
313 if (phbs_io_bot >= IMALLOC_BASE)
314 panic("reserve_phb_iospace(): phb io space overflow\n");
315
316 virt_addr = (void __iomem *) phbs_io_bot;
317 phbs_io_bot += size;
318
319 return virt_addr;
320}