blob: 7f682e5dafcf2a1465cd1578dfdd2ce803e46fc2 [file] [log] [blame]
Matt Fleming4d35b932009-11-05 07:54:17 +00001/*
2 * Re-map IO memory to kernel address space so that we can access it.
3 *
4 * These functions should only be used when it is necessary to map a
5 * physical address space into the kernel address space before ioremap()
6 * can be used, e.g. early in boot before paging_init().
7 *
8 * Copyright (C) 2009 Matt Fleming
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/ioport.h>
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/io.h>
16#include <linux/bootmem.h>
17#include <linux/proc_fs.h>
18#include <linux/slab.h>
19#include <asm/fixmap.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/addrspace.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/mmu.h>
26#include <asm/mmu_context.h>
27
28struct ioremap_map {
29 void __iomem *addr;
30 unsigned long size;
31 unsigned long fixmap_addr;
32};
33
34static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS];
35
36void __init ioremap_fixed_init(void)
37{
38 struct ioremap_map *map;
39 int i;
40
41 for (i = 0; i < FIX_N_IOREMAPS; i++) {
42 map = &ioremap_maps[i];
43 map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
44 }
45}
46
47void __init __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +090048ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
Matt Fleming4d35b932009-11-05 07:54:17 +000049{
50 enum fixed_addresses idx0, idx;
Matt Fleming4d35b932009-11-05 07:54:17 +000051 struct ioremap_map *map;
Matt Fleming4d35b932009-11-05 07:54:17 +000052 unsigned int nrpages;
Paul Mundt90e7d642010-02-23 16:20:53 +090053 unsigned long offset;
Matt Fleming4d35b932009-11-05 07:54:17 +000054 int i, slot;
55
Paul Mundt90e7d642010-02-23 16:20:53 +090056 /*
57 * Mappings have to be page-aligned
58 */
59 offset = phys_addr & ~PAGE_MASK;
60 phys_addr &= PAGE_MASK;
61 size = PAGE_ALIGN(phys_addr + size) - phys_addr;
62
Matt Fleming4d35b932009-11-05 07:54:17 +000063 slot = -1;
64 for (i = 0; i < FIX_N_IOREMAPS; i++) {
65 map = &ioremap_maps[i];
66 if (!map->addr) {
67 map->size = size;
68 slot = i;
69 break;
70 }
71 }
72
73 if (slot < 0)
74 return NULL;
75
Matt Fleming4d35b932009-11-05 07:54:17 +000076 /*
77 * Mappings have to fit in the FIX_IOREMAP area.
78 */
79 nrpages = size >> PAGE_SHIFT;
80 if (nrpages > FIX_N_IOREMAPS)
81 return NULL;
82
83 /*
84 * Ok, go for it..
85 */
86 idx0 = FIX_IOREMAP_BEGIN + slot;
87 idx = idx0;
88 while (nrpages > 0) {
89 pgprot_val(prot) |= _PAGE_WIRED;
90 __set_fixmap(idx, phys_addr, prot);
91 phys_addr += PAGE_SIZE;
92 idx++;
93 --nrpages;
94 }
95
96 map->addr = (void __iomem *)(offset + map->fixmap_addr);
97 return map->addr;
98}
99
Paul Mundt4f744af2010-01-18 21:30:29 +0900100int iounmap_fixed(void __iomem *addr)
Matt Fleming4d35b932009-11-05 07:54:17 +0000101{
102 enum fixed_addresses idx;
Matt Fleming4d35b932009-11-05 07:54:17 +0000103 struct ioremap_map *map;
Matt Fleming4d35b932009-11-05 07:54:17 +0000104 unsigned int nrpages;
105 int i, slot;
Matt Fleming4d35b932009-11-05 07:54:17 +0000106
107 slot = -1;
108 for (i = 0; i < FIX_N_IOREMAPS; i++) {
109 map = &ioremap_maps[i];
110 if (map->addr == addr) {
111 slot = i;
112 break;
113 }
114 }
115
Paul Mundt4f744af2010-01-18 21:30:29 +0900116 /*
117 * If we don't match, it's not for us.
118 */
Matt Fleming4d35b932009-11-05 07:54:17 +0000119 if (slot < 0)
Paul Mundt4f744af2010-01-18 21:30:29 +0900120 return -EINVAL;
Matt Fleming4d35b932009-11-05 07:54:17 +0000121
Paul Mundt920efaa2010-01-20 18:10:30 +0900122 nrpages = map->size >> PAGE_SHIFT;
Matt Fleming4d35b932009-11-05 07:54:17 +0000123
Paul Mundt920efaa2010-01-20 18:10:30 +0900124 idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1;
Matt Fleming4d35b932009-11-05 07:54:17 +0000125 while (nrpages > 0) {
Paul Mundtacf2c962010-01-19 13:49:19 +0900126 __clear_fixmap(idx, __pgprot(_PAGE_WIRED));
Matt Fleming4d35b932009-11-05 07:54:17 +0000127 --idx;
128 --nrpages;
129 }
130
131 map->size = 0;
132 map->addr = NULL;
Paul Mundt4f744af2010-01-18 21:30:29 +0900133
134 return 0;
Matt Fleming4d35b932009-11-05 07:54:17 +0000135}