blob: 227547bae54891775e26739efeaffa6352c85348 [file] [log] [blame]
Magnus Damme7cc9a72008-02-07 20:18:21 +09001/*
2 * Trapped io support
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * Intercept io operations by trapping.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/bitops.h>
15#include <linux/vmalloc.h>
16#include <asm/system.h>
17#include <asm/mmu_context.h>
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/io_trapped.h>
21
22#define TRAPPED_PAGES_MAX 16
Magnus Damme7cc9a72008-02-07 20:18:21 +090023
24#ifdef CONFIG_HAS_IOPORT
25LIST_HEAD(trapped_io);
26#endif
27#ifdef CONFIG_HAS_IOMEM
28LIST_HEAD(trapped_mem);
29#endif
30static DEFINE_SPINLOCK(trapped_lock);
31
32int __init register_trapped_io(struct trapped_io *tiop)
33{
34 struct resource *res;
35 unsigned long len = 0, flags = 0;
36 struct page *pages[TRAPPED_PAGES_MAX];
37 int k, n;
38
39 /* structure must be page aligned */
40 if ((unsigned long)tiop & (PAGE_SIZE - 1))
41 goto bad;
42
43 for (k = 0; k < tiop->num_resources; k++) {
44 res = tiop->resource + k;
45 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
46 flags |= res->flags;
47 }
48
49 /* support IORESOURCE_IO _or_ MEM, not both */
50 if (hweight_long(flags) != 1)
51 goto bad;
52
53 n = len >> PAGE_SHIFT;
54
55 if (n >= TRAPPED_PAGES_MAX)
56 goto bad;
57
58 for (k = 0; k < n; k++)
59 pages[k] = virt_to_page(tiop);
60
61 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
62 if (!tiop->virt_base)
63 goto bad;
64
65 len = 0;
66 for (k = 0; k < tiop->num_resources; k++) {
67 res = tiop->resource + k;
68 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
69 (unsigned long)(tiop->virt_base + len),
70 res->flags & IORESOURCE_IO ? "io" : "mmio",
71 (unsigned long)res->start);
72 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
73 }
74
75 tiop->magic = IO_TRAPPED_MAGIC;
76 INIT_LIST_HEAD(&tiop->list);
77 spin_lock_irq(&trapped_lock);
78 if (flags & IORESOURCE_IO)
79 list_add(&tiop->list, &trapped_io);
80 if (flags & IORESOURCE_MEM)
81 list_add(&tiop->list, &trapped_mem);
82 spin_unlock_irq(&trapped_lock);
83
84 return 0;
85 bad:
86 pr_warning("unable to install trapped io filter\n");
87 return -1;
88}
89
90void __iomem *match_trapped_io_handler(struct list_head *list,
91 unsigned long offset,
92 unsigned long size)
93{
94 unsigned long voffs;
95 struct trapped_io *tiop;
96 struct resource *res;
97 int k, len;
98
99 spin_lock_irq(&trapped_lock);
100 list_for_each_entry(tiop, list, list) {
101 voffs = 0;
102 for (k = 0; k < tiop->num_resources; k++) {
103 res = tiop->resource + k;
104 if (res->start == offset) {
105 spin_unlock_irq(&trapped_lock);
106 return tiop->virt_base + voffs;
107 }
108
109 len = (res->end - res->start) + 1;
110 voffs += roundup(len, PAGE_SIZE);
111 }
112 }
113 spin_unlock_irq(&trapped_lock);
114 return NULL;
115}
116
117static struct trapped_io *lookup_tiop(unsigned long address)
118{
119 pgd_t *pgd_k;
120 pud_t *pud_k;
121 pmd_t *pmd_k;
122 pte_t *pte_k;
123 pte_t entry;
124
125 pgd_k = swapper_pg_dir + pgd_index(address);
126 if (!pgd_present(*pgd_k))
127 return NULL;
128
129 pud_k = pud_offset(pgd_k, address);
130 if (!pud_present(*pud_k))
131 return NULL;
132
133 pmd_k = pmd_offset(pud_k, address);
134 if (!pmd_present(*pmd_k))
135 return NULL;
136
137 pte_k = pte_offset_kernel(pmd_k, address);
138 entry = *pte_k;
139
140 return pfn_to_kaddr(pte_pfn(entry));
141}
142
143static unsigned long lookup_address(struct trapped_io *tiop,
144 unsigned long address)
145{
146 struct resource *res;
147 unsigned long vaddr = (unsigned long)tiop->virt_base;
148 unsigned long len;
149 int k;
150
151 for (k = 0; k < tiop->num_resources; k++) {
152 res = tiop->resource + k;
153 len = roundup((res->end - res->start) + 1, PAGE_SIZE);
154 if (address < (vaddr + len))
155 return res->start + (address - vaddr);
156 vaddr += len;
157 }
158 return 0;
159}
160
161static unsigned long long copy_word(unsigned long src_addr, int src_len,
162 unsigned long dst_addr, int dst_len)
163{
164 unsigned long long tmp = 0;
165
166 switch (src_len) {
167 case 1:
168 tmp = ctrl_inb(src_addr);
169 break;
170 case 2:
171 tmp = ctrl_inw(src_addr);
172 break;
173 case 4:
174 tmp = ctrl_inl(src_addr);
175 break;
176 case 8:
177 tmp = ctrl_inq(src_addr);
178 break;
179 }
180
181 switch (dst_len) {
182 case 1:
183 ctrl_outb(tmp, dst_addr);
184 break;
185 case 2:
186 ctrl_outw(tmp, dst_addr);
187 break;
188 case 4:
189 ctrl_outl(tmp, dst_addr);
190 break;
191 case 8:
192 ctrl_outq(tmp, dst_addr);
193 break;
194 }
195
196 return tmp;
197}
198
199static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
200{
201 struct trapped_io *tiop;
202 unsigned long src_addr = (unsigned long)src;
203 unsigned long long tmp;
204
205 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
206 tiop = lookup_tiop(src_addr);
207 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
208
209 src_addr = lookup_address(tiop, src_addr);
210 if (!src_addr)
211 return cnt;
212
Paul Mundtf1cdd632008-02-09 19:10:52 +0900213 tmp = copy_word(src_addr,
214 max_t(unsigned long, cnt,
215 (tiop->minimum_bus_width / 8)),
Magnus Damme7cc9a72008-02-07 20:18:21 +0900216 (unsigned long)dst, cnt);
217
218 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
219 return 0;
220}
221
222static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
223{
224 struct trapped_io *tiop;
225 unsigned long dst_addr = (unsigned long)dst;
226 unsigned long long tmp;
227
228 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
229 tiop = lookup_tiop(dst_addr);
230 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
231
232 dst_addr = lookup_address(tiop, dst_addr);
233 if (!dst_addr)
234 return cnt;
235
236 tmp = copy_word((unsigned long)src, cnt,
Paul Mundtf1cdd632008-02-09 19:10:52 +0900237 dst_addr, max_t(unsigned long, cnt,
238 (tiop->minimum_bus_width / 8)));
Magnus Damme7cc9a72008-02-07 20:18:21 +0900239
240 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
241 return 0;
242}
243
244static struct mem_access trapped_io_access = {
245 from_device,
246 to_device,
247};
248
249int handle_trapped_io(struct pt_regs *regs, unsigned long address)
250{
251 mm_segment_t oldfs;
252 opcode_t instruction;
253 int tmp;
254
255 if (!lookup_tiop(address))
256 return 0;
257
258 WARN_ON(user_mode(regs));
259
260 oldfs = get_fs();
261 set_fs(KERNEL_DS);
262 if (copy_from_user(&instruction, (void *)(regs->pc),
263 sizeof(instruction))) {
264 set_fs(oldfs);
265 return 0;
266 }
267
268 tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
269 set_fs(oldfs);
270 return tmp == 0;
271}