blob: 3797e211a4d8abf9869989400647757c61746f45 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Basic implementation of a SW emulation of the domain manager feature in
3 * ARM architecture. Assumes single processor ARMv7 chipset.
4 *
5 * Requires hooks to be alerted to any runtime changes of dacr or MMU context.
6 *
7 * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/sched.h>
20#include <asm/domain.h>
21#include <asm/pgtable.h>
22#include <asm/tlbflush.h>
23#include <linux/module.h>
24
25#define DOMAIN_MANAGER_BITS (0xAAAAAAAA)
26
27#define DFSR_DOMAIN(dfsr) ((dfsr >> 4) & (16-1))
28
29#define FSR_PERMISSION_FAULT(fsr) ((fsr & 0x40D) == 0x00D)
30#define FSR_PERMISSION_SECT(fsr) ((fsr & 0x40F) == 0x00D)
31
32/* ARMv7 MMU HW Macros. Not conveniently defined elsewhere */
33#define MMU_TTB_ADDRESS(x) ((u32 *)(((u32)(x)) & ~((1 << 14) - 1)))
34#define MMU_PMD_INDEX(addr) (((u32)addr) >> SECTION_SHIFT)
35#define MMU_TABLE_ADDRESS(x) ((u32 *)((x) & ~((1 << 10) - 1)))
36#define MMU_TABLE_INDEX(x) ((((u32)x) >> 12) & (256 - 1))
37
38/* Convenience Macros */
39#define PMD_IS_VALID(x) (PMD_IS_TABLE(x) || PMD_IS_SECTION(x))
40#define PMD_IS_TABLE(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
41#define PMD_IS_SECTION(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_SECT)
42#define PMD_IS_SUPERSECTION(x) \
43 (PMD_IS_SECTION(x) && ((x & PMD_SECT_SUPER) == PMD_SECT_SUPER))
44
45#define PMD_GET_DOMAIN(x) \
46 (PMD_IS_TABLE(x) || \
47 (PMD_IS_SECTION(x) && !PMD_IS_SUPERSECTION(x)) ? \
48 0 : (x >> 5) & (16-1))
49
50#define PTE_IS_LARGE(x) ((x & PTE_TYPE_MASK) == PTE_TYPE_LARGE)
51
52
53/* Only DOMAIN_MMU_ENTRIES will be granted access simultaneously */
54#define DOMAIN_MMU_ENTRIES (8)
55
56#define LRU_INC(lru) ((lru + 1) >= DOMAIN_MMU_ENTRIES ? 0 : lru + 1)
57
58
59static DEFINE_SPINLOCK(edm_lock);
60
61static u32 edm_manager_bits;
62
63struct domain_entry_save {
64 u32 *mmu_entry;
65 u32 *addr;
66 u32 value;
67 u16 sect;
68 u16 size;
69};
70
71static struct domain_entry_save edm_save[DOMAIN_MMU_ENTRIES];
72
73static u32 edm_lru;
74
75
76/*
77 * Return virtual address of pmd (level 1) entry for addr
78 *
79 * This routine walks the ARMv7 page tables in HW.
80 */
81static inline u32 *__get_pmd_v7(u32 *addr)
82{
83 u32 *ttb;
84
85 __asm__ __volatile__(
86 "mrc p15, 0, %0, c2, c0, 0 @ ttbr0\n\t"
87 : "=r" (ttb)
88 :
89 );
90
91 return __va(MMU_TTB_ADDRESS(ttb) + MMU_PMD_INDEX(addr));
92}
93
94/*
95 * Return virtual address of pte (level 2) entry for addr
96 *
97 * This routine walks the ARMv7 page tables in HW.
98 */
99static inline u32 *__get_pte_v7(u32 *addr)
100{
101 u32 *pmd = __get_pmd_v7(addr);
102 u32 *table_pa = pmd && PMD_IS_TABLE(*pmd) ?
103 MMU_TABLE_ADDRESS(*pmd) : 0;
104 u32 *entry = table_pa ? __va(table_pa[MMU_TABLE_INDEX(addr)]) : 0;
105
106 return entry;
107}
108
109/*
110 * Invalidate the TLB for a given address for the current context
111 *
112 * After manipulating access permissions, TLB invalidation changes are
113 * observed
114 */
115static inline void __tlb_invalidate(u32 *addr)
116{
117 __asm__ __volatile__(
118 "mrc p15, 0, %%r2, c13, c0, 1 @ contextidr\n\t"
119 "and %%r2, %%r2, #0xff @ asid\n\t"
120 "mov %%r3, %0, lsr #12 @ mva[31:12]\n\t"
121 "orr %%r2, %%r2, %%r3, lsl #12 @ tlb mva and asid\n\t"
122 "mcr p15, 0, %%r2, c8, c7, 1 @ utlbimva\n\t"
123 "isb"
124 :
125 : "r" (addr)
126 : "r2", "r3"
127 );
128}
129
130/*
131 * Set HW MMU entry and do required synchronization operations.
132 */
133static inline void __set_entry(u32 *entry, u32 *addr, u32 value, int size)
134{
135 int i;
136
137 if (!entry)
138 return;
139
140 entry = (u32 *)((u32) entry & ~(size * sizeof(u32) - 1));
141
142 for (i = 0; i < size; i++)
143 entry[i] = value;
144
145 __asm__ __volatile__(
146 "mcr p15, 0, %0, c7, c10, 1 @ flush entry\n\t"
147 "dsb\n\t"
148 "isb\n\t"
149 :
150 : "r" (entry)
151 );
152 __tlb_invalidate(addr);
153}
154
155/*
156 * Return the number of duplicate entries associated with entry value.
157 * Supersections and Large page table entries are replicated 16x.
158 */
159static inline int __entry_size(int sect, int value)
160{
161 u32 size;
162
163 if (sect)
164 size = PMD_IS_SUPERSECTION(value) ? 16 : 1;
165 else
166 size = PTE_IS_LARGE(value) ? 16 : 1;
167
168 return size;
169}
170
171/*
172 * Change entry permissions to emulate domain manager access
173 */
174static inline int __manager_perm(int sect, int value)
175{
176 u32 edm_value;
177
178 if (sect) {
179 edm_value = (value & ~(PMD_SECT_APX | PMD_SECT_XN)) |
180 (PMD_SECT_AP_READ | PMD_SECT_AP_WRITE);
181 } else {
182 edm_value = (value & ~(PTE_EXT_APX | PTE_EXT_XN)) |
183 (PTE_EXT_AP1 | PTE_EXT_AP0);
184 }
185 return edm_value;
186}
187
188/*
189 * Restore original HW MMU entry. Cancels domain manager access
190 */
191static inline void __restore_entry(int index)
192{
193 struct domain_entry_save *entry = &edm_save[index];
194 u32 edm_value;
195
196 if (!entry->mmu_entry)
197 return;
198
199 edm_value = __manager_perm(entry->sect, entry->value);
200
201 if (*entry->mmu_entry == edm_value)
202 __set_entry(entry->mmu_entry, entry->addr,
203 entry->value, entry->size);
204
205 entry->mmu_entry = 0;
206}
207
208/*
209 * Modify HW MMU entry to grant domain manager access for a given MMU entry.
210 * This adds full read, write, and exec access permissions.
211 */
212static inline void __set_manager(int sect, u32 *addr)
213{
214 u32 *entry = sect ? __get_pmd_v7(addr) : __get_pte_v7(addr);
215 u32 value;
216 u32 edm_value;
217 u16 size;
218
219 if (!entry)
220 return;
221
222 value = *entry;
223
224 size = __entry_size(sect, value);
225 edm_value = __manager_perm(sect, value);
226
227 __set_entry(entry, addr, edm_value, size);
228
229 __restore_entry(edm_lru);
230
231 edm_save[edm_lru].mmu_entry = entry;
232 edm_save[edm_lru].addr = addr;
233 edm_save[edm_lru].value = value;
234 edm_save[edm_lru].sect = sect;
235 edm_save[edm_lru].size = size;
236
237 edm_lru = LRU_INC(edm_lru);
238}
239
240/*
241 * Restore original HW MMU entries.
242 *
243 * entry - MVA for HW MMU entry
244 */
245static inline void __restore(void)
246{
247 if (unlikely(edm_manager_bits)) {
248 u32 i;
249
250 for (i = 0; i < DOMAIN_MMU_ENTRIES; i++)
251 __restore_entry(i);
252 }
253}
254
255/*
256 * Common abort handler code
257 *
258 * If domain manager was actually set, permission fault would not happen.
259 * Open access permissions to emulate. Save original settings to restore
260 * later. Return 1 to pretend fault did not happen.
261 */
262static int __emulate_domain_manager_abort(u32 fsr, u32 far, int dabort)
263{
264 if (unlikely(FSR_PERMISSION_FAULT(fsr) && edm_manager_bits)) {
265 int domain = dabort ? DFSR_DOMAIN(fsr) : PMD_GET_DOMAIN(far);
266 if (edm_manager_bits & domain_val(domain, DOMAIN_MANAGER)) {
267 unsigned long flags;
268
269 spin_lock_irqsave(&edm_lock, flags);
270
271 __set_manager(FSR_PERMISSION_SECT(fsr), (u32 *) far);
272
273 spin_unlock_irqrestore(&edm_lock, flags);
274 return 1;
275 }
276 }
277 return 0;
278}
279
280/*
281 * Change domain setting.
282 *
283 * Lock and restore original contents. Extract and save manager bits. Set
284 * DACR, excluding manager bits.
285 */
286void emulate_domain_manager_set(u32 domain)
287{
288 unsigned long flags;
289
290 spin_lock_irqsave(&edm_lock, flags);
291
292 if (edm_manager_bits != (domain & DOMAIN_MANAGER_BITS)) {
293 __restore();
294 edm_manager_bits = domain & DOMAIN_MANAGER_BITS;
295 }
296
297 __asm__ __volatile__(
298 "mcr p15, 0, %0, c3, c0, 0 @ set domain\n\t"
299 "isb"
300 :
301 : "r" (domain & ~DOMAIN_MANAGER_BITS)
302 );
303
304 spin_unlock_irqrestore(&edm_lock, flags);
305}
306EXPORT_SYMBOL_GPL(emulate_domain_manager_set);
307
308/*
309 * Switch thread context. Restore original contents.
310 */
311void emulate_domain_manager_switch_mm(unsigned long pgd_phys,
312 struct mm_struct *mm,
313 void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *))
314{
315 unsigned long flags;
316
317 spin_lock_irqsave(&edm_lock, flags);
318
319 __restore();
320
321 /* Call underlying kernel handler */
322 switch_mm(pgd_phys, mm);
323
324 spin_unlock_irqrestore(&edm_lock, flags);
325}
326EXPORT_SYMBOL_GPL(emulate_domain_manager_switch_mm);
327
328/*
329 * Kernel data_abort hook
330 */
331int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar)
332{
333 return __emulate_domain_manager_abort(dfsr, dfar, 1);
334}
335EXPORT_SYMBOL_GPL(emulate_domain_manager_data_abort);
336
337/*
338 * Kernel prefetch_abort hook
339 */
340int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar)
341{
342 return __emulate_domain_manager_abort(ifsr, ifar, 0);
343}
344EXPORT_SYMBOL_GPL(emulate_domain_manager_prefetch_abort);
345