blob: eadbd64b3bb81c7bba3bf41b2a669b40a7276a5e [file] [log] [blame]
Olav Haugane6d01ef2013-01-25 16:55:44 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
Olav Hauganc5993142013-02-04 13:59:39 -080030#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v0.h>
Olav Haugan090614f2013-03-22 12:14:18 -070032#include <mach/msm_iommu_priv.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070033#include <mach/iommu.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060034#include <mach/msm_smem.h>
Olav Haugan236970a2013-05-14 17:00:02 -070035#include <mach/msm_bus.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070036
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080037#define MRC(reg, processor, op1, crn, crm, op2) \
38__asm__ __volatile__ ( \
39" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
40: "=r" (reg))
41
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44
Steve Mucklef132c6c2012-06-06 18:30:57 -070045/* Sharability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NON_SH 0x0
47#define MSM_IOMMU_ATTR_SH 0x4
48
49/* Cacheability attributes of MSM IOMMU mappings */
50#define MSM_IOMMU_ATTR_NONCACHED 0x0
51#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
52#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
53#define MSM_IOMMU_ATTR_CACHED_WT 0x3
54
Laura Abbott0d135652012-10-04 12:59:03 -070055struct bus_type msm_iommu_sec_bus_type = {
56 .name = "msm_iommu_sec_bus",
57};
Steve Mucklef132c6c2012-06-06 18:30:57 -070058
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -060059static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
60 unsigned int len);
61
Steve Mucklef132c6c2012-06-06 18:30:57 -070062static inline void clean_pte(unsigned long *start, unsigned long *end,
63 int redirect)
64{
65 if (!redirect)
66 dmac_flush_range(start, end);
67}
68
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020069/* bitmap of the page sizes currently supported */
70#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
71
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080072static int msm_iommu_tex_class[4];
73
Steve Mucklef132c6c2012-06-06 18:30:57 -070074DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070075
Olav Haugan65209cd2012-11-07 15:02:56 -080076/**
77 * Remote spinlock implementation based on Peterson's algorithm to be used
78 * to synchronize IOMMU config port access between CPU and GPU.
79 * This implements Process 0 of the spin lock algorithm. GPU implements
80 * Process 1. Flag and turn is stored in shared memory to allow GPU to
81 * access these.
82 */
83struct msm_iommu_remote_lock {
84 int initialized;
85 struct remote_iommu_petersons_spinlock *lock;
86};
87
88static struct msm_iommu_remote_lock msm_iommu_remote_lock;
89
90#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
91static void _msm_iommu_remote_spin_lock_init(void)
92{
93 msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
94 memset(msm_iommu_remote_lock.lock, 0,
95 sizeof(*msm_iommu_remote_lock.lock));
96}
97
98void msm_iommu_remote_p0_spin_lock(void)
99{
100 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
101 msm_iommu_remote_lock.lock->turn = 1;
102
103 smp_mb();
104
105 while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
106 msm_iommu_remote_lock.lock->turn == 1)
107 cpu_relax();
108}
109
110void msm_iommu_remote_p0_spin_unlock(void)
111{
112 smp_mb();
113
114 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
115}
116#endif
117
118inline void msm_iommu_mutex_lock(void)
119{
120 mutex_lock(&msm_iommu_lock);
121}
122
123inline void msm_iommu_mutex_unlock(void)
124{
125 mutex_unlock(&msm_iommu_lock);
126}
127
128void *msm_iommu_lock_initialize(void)
129{
130 mutex_lock(&msm_iommu_lock);
131 if (!msm_iommu_remote_lock.initialized) {
132 msm_iommu_remote_lock_init();
133 msm_iommu_remote_lock.initialized = 1;
134 }
135 mutex_unlock(&msm_iommu_lock);
136 return msm_iommu_remote_lock.lock;
137}
138
Olav Haugan236970a2013-05-14 17:00:02 -0700139static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
140{
141 int ret = 0;
142
143 if (drvdata->bus_client) {
144 ret = msm_bus_scale_client_update_request(drvdata->bus_client,
145 vote);
146 if (ret)
147 pr_err("%s: Failed to vote for bus: %d\n", __func__,
148 vote);
149 }
150 return ret;
151}
152
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800153static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
154{
155 int ret;
156
Steve Mucklef132c6c2012-06-06 18:30:57 -0700157 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800158 if (ret)
159 goto fail;
160
161 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700162 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800163 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700164 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800165 }
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700166
167 if (ret)
168 goto fail;
169
170 if (drvdata->aclk) {
171 ret = clk_prepare_enable(drvdata->aclk);
172 if (ret) {
173 clk_disable_unprepare(drvdata->clk);
174 clk_disable_unprepare(drvdata->pclk);
175 }
176 }
177
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800178fail:
179 return ret;
180}
181
182static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
183{
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700184 if (drvdata->aclk)
185 clk_disable_unprepare(drvdata->aclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800186 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187 clk_disable_unprepare(drvdata->clk);
188 clk_disable_unprepare(drvdata->pclk);
189}
190
Olav Hauganeece7e52013-04-02 10:22:21 -0700191static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800192{
Olav Hauganeece7e52013-04-02 10:22:21 -0700193 /* No need to do anything. IOMMUv0 is always on. */
194 return 0;
Olav Hauganc5993142013-02-04 13:59:39 -0800195}
196
Olav Hauganeece7e52013-04-02 10:22:21 -0700197static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800198{
Olav Hauganeece7e52013-04-02 10:22:21 -0700199 /* No need to do anything. IOMMUv0 is always on. */
Olav Hauganc5993142013-02-04 13:59:39 -0800200}
201
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600202static void *_iommu_lock_initialize(void)
203{
204 return msm_iommu_lock_initialize();
205}
206
Olav Hauganc5993142013-02-04 13:59:39 -0800207static void _iommu_lock_acquire(void)
208{
209 msm_iommu_lock();
210}
211
212static void _iommu_lock_release(void)
213{
214 msm_iommu_unlock();
215}
216
217struct iommu_access_ops iommu_access_ops_v0 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700218 .iommu_power_on = __enable_regulators,
219 .iommu_power_off = __disable_regulators,
Olav Haugan236970a2013-05-14 17:00:02 -0700220 .iommu_bus_vote = apply_bus_vote,
Olav Hauganeece7e52013-04-02 10:22:21 -0700221 .iommu_clk_on = __enable_clocks,
222 .iommu_clk_off = __disable_clocks,
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600223 .iommu_lock_initialize = _iommu_lock_initialize,
Olav Hauganc5993142013-02-04 13:59:39 -0800224 .iommu_lock_acquire = _iommu_lock_acquire,
225 .iommu_lock_release = _iommu_lock_release,
226};
227EXPORT_SYMBOL(iommu_access_ops_v0);
228
Steve Mucklef132c6c2012-06-06 18:30:57 -0700229static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
230{
Olav Haugan090614f2013-03-22 12:14:18 -0700231 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700232 struct msm_iommu_drvdata *iommu_drvdata;
233 struct msm_iommu_ctx_drvdata *ctx_drvdata;
234 int ret = 0;
235 int asid;
236
237 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
238 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
239 BUG();
240
241 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
242 if (!iommu_drvdata)
243 BUG();
244
245 ret = __enable_clocks(iommu_drvdata);
246 if (ret)
247 goto fail;
248
Olav Haugan65209cd2012-11-07 15:02:56 -0800249 msm_iommu_remote_spin_lock();
250
Steve Mucklef132c6c2012-06-06 18:30:57 -0700251 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
252 ctx_drvdata->num);
253
254 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
255 asid | (va & TLBIVA_VA));
256 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800257
258 msm_iommu_remote_spin_unlock();
259
Steve Mucklef132c6c2012-06-06 18:30:57 -0700260 __disable_clocks(iommu_drvdata);
261 }
262fail:
263 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800264}
265
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800266static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700267{
Olav Haugan090614f2013-03-22 12:14:18 -0700268 struct msm_iommu_priv *priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700269 struct msm_iommu_drvdata *iommu_drvdata;
270 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800271 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700272 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700273
274 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
275 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
276 BUG();
277
278 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700279 if (!iommu_drvdata)
280 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800281
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800282 ret = __enable_clocks(iommu_drvdata);
283 if (ret)
284 goto fail;
285
Olav Haugan65209cd2012-11-07 15:02:56 -0800286 msm_iommu_remote_spin_lock();
287
Steve Mucklef132c6c2012-06-06 18:30:57 -0700288 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
289 ctx_drvdata->num);
290
291 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
292 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800293
294 msm_iommu_remote_spin_unlock();
295
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800296 __disable_clocks(iommu_drvdata);
297 }
298fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800299 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700300}
301
Olav Haugan95d24162012-12-05 14:47:47 -0800302static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700303{
Olav Haugan95d24162012-12-05 14:47:47 -0800304 SET_BPRCOSH(glb_base, ctx, 0);
305 SET_BPRCISH(glb_base, ctx, 0);
306 SET_BPRCNSH(glb_base, ctx, 0);
307 SET_BPSHCFG(glb_base, ctx, 0);
308 SET_BPMTCFG(glb_base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700309 SET_ACTLR(base, ctx, 0);
310 SET_SCTLR(base, ctx, 0);
311 SET_FSRRESTORE(base, ctx, 0);
312 SET_TTBR0(base, ctx, 0);
313 SET_TTBR1(base, ctx, 0);
314 SET_TTBCR(base, ctx, 0);
315 SET_BFBCR(base, ctx, 0);
316 SET_PAR(base, ctx, 0);
317 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700318 SET_TLBFLPTER(base, ctx, 0);
319 SET_TLBSLPTER(base, ctx, 0);
320 SET_TLBLKCR(base, ctx, 0);
321 SET_PRRR(base, ctx, 0);
322 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700323 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700324}
325
Olav Haugan95d24162012-12-05 14:47:47 -0800326static void __program_context(void __iomem *base, void __iomem *glb_base,
327 int ctx, int ncb, phys_addr_t pgtable,
328 int redirect, int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700329{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800330 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700331 int i, j, found;
Olav Haugan65209cd2012-11-07 15:02:56 -0800332 msm_iommu_remote_spin_lock();
333
Olav Haugan95d24162012-12-05 14:47:47 -0800334 __reset_context(base, glb_base, ctx);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700335
336 /* Set up HTW mode */
337 /* TLB miss configuration: perform HTW on miss */
338 SET_TLBMCFG(base, ctx, 0x3);
339
340 /* V2P configuration: HTW for access */
341 SET_V2PCFG(base, ctx, 0x3);
342
Steve Mucklef132c6c2012-06-06 18:30:57 -0700343 SET_TTBCR(base, ctx, ttbr_split);
344 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
345 if (ttbr_split)
346 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700347
348 /* Enable context fault interrupt */
349 SET_CFEIE(base, ctx, 1);
350
351 /* Stall access on a context fault and let the handler deal with it */
352 SET_CFCFG(base, ctx, 1);
353
354 /* Redirect all cacheable requests to L2 slave port. */
355 SET_RCISH(base, ctx, 1);
356 SET_RCOSH(base, ctx, 1);
357 SET_RCNSH(base, ctx, 1);
358
359 /* Turn on TEX Remap */
360 SET_TRE(base, ctx, 1);
361
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800362 /* Set TEX remap attributes */
363 RCP15_PRRR(prrr);
364 RCP15_NMRR(nmrr);
365 SET_PRRR(base, ctx, prrr);
366 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700367
368 /* Turn on BFB prefetch */
369 SET_BFBDFE(base, ctx, 1);
370
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700371 /* Configure page tables as inner-cacheable and shareable to reduce
372 * the TLB miss penalty.
373 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700374 if (redirect) {
375 SET_TTBR0_SH(base, ctx, 1);
376 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700377
Steve Mucklef132c6c2012-06-06 18:30:57 -0700378 SET_TTBR0_NOS(base, ctx, 1);
379 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700380
Steve Mucklef132c6c2012-06-06 18:30:57 -0700381 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
382 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700383
Steve Mucklef132c6c2012-06-06 18:30:57 -0700384 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
385 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700386
Steve Mucklef132c6c2012-06-06 18:30:57 -0700387 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
388 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
389 }
390
391 /* Find if this page table is used elsewhere, and re-use ASID */
392 found = 0;
393 for (i = 0; i < ncb; i++)
394 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
395 i != ctx) {
396 SET_CONTEXTIDR_ASID(base, ctx, \
397 GET_CONTEXTIDR_ASID(base, i));
398 found = 1;
399 break;
400 }
401
402 /* If page table is new, find an unused ASID */
403 if (!found) {
404 for (i = 0; i < ncb; i++) {
405 found = 0;
406 for (j = 0; j < ncb; j++) {
407 if (GET_CONTEXTIDR_ASID(base, j) == i &&
408 j != ctx)
409 found = 1;
410 }
411
412 if (!found) {
413 SET_CONTEXTIDR_ASID(base, ctx, i);
414 break;
415 }
416 }
417 BUG_ON(found);
418 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700419
420 /* Enable the MMU */
421 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700422 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800423
424 msm_iommu_remote_spin_unlock();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700425}
426
Steve Mucklef132c6c2012-06-06 18:30:57 -0700427static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700428{
Olav Haugan090614f2013-03-22 12:14:18 -0700429 struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700430
431 if (!priv)
432 goto fail_nomem;
433
434 INIT_LIST_HEAD(&priv->list_attached);
Olav Haugan090614f2013-03-22 12:14:18 -0700435 priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700436 get_order(SZ_16K));
437
Olav Haugan090614f2013-03-22 12:14:18 -0700438 if (!priv->pt.fl_table)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700439 goto fail_nomem;
440
Steve Mucklef132c6c2012-06-06 18:30:57 -0700441#ifdef CONFIG_IOMMU_PGTABLES_L2
Olav Haugan090614f2013-03-22 12:14:18 -0700442 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700443#endif
444
Olav Haugan090614f2013-03-22 12:14:18 -0700445 memset(priv->pt.fl_table, 0, SZ_16K);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700446 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700447
Olav Haugan090614f2013-03-22 12:14:18 -0700448 clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
449 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700450
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700451 return 0;
452
453fail_nomem:
454 kfree(priv);
455 return -ENOMEM;
456}
457
458static void msm_iommu_domain_destroy(struct iommu_domain *domain)
459{
Olav Haugan090614f2013-03-22 12:14:18 -0700460 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700461 unsigned long *fl_table;
462 int i;
463
Steve Mucklef132c6c2012-06-06 18:30:57 -0700464 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700465 priv = domain->priv;
466 domain->priv = NULL;
467
468 if (priv) {
Olav Haugan090614f2013-03-22 12:14:18 -0700469 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700470
471 for (i = 0; i < NUM_FL_PTE; i++)
472 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
473 free_page((unsigned long) __va(((fl_table[i]) &
474 FL_BASE_MASK)));
475
Olav Haugan090614f2013-03-22 12:14:18 -0700476 free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
477 priv->pt.fl_table = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700478 }
479
480 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700481 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700482}
483
484static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
485{
Olav Haugan090614f2013-03-22 12:14:18 -0700486 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700487 struct msm_iommu_drvdata *iommu_drvdata;
488 struct msm_iommu_ctx_drvdata *ctx_drvdata;
489 struct msm_iommu_ctx_drvdata *tmp_drvdata;
490 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700491
Steve Mucklef132c6c2012-06-06 18:30:57 -0700492 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700493
494 priv = domain->priv;
495
496 if (!priv || !dev) {
497 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800498 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700499 }
500
501 iommu_drvdata = dev_get_drvdata(dev->parent);
502 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503
Olav Haugan95d24162012-12-05 14:47:47 -0800504 if (!iommu_drvdata || !ctx_drvdata) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700505 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800506 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700507 }
508
Olav Haugane99ee7e2012-12-11 15:02:02 -0800509 ++ctx_drvdata->attach_count;
510
511 if (ctx_drvdata->attach_count > 1)
512 goto unlock;
513
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800514 if (!list_empty(&ctx_drvdata->attached_elm)) {
515 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800516 goto unlock;
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800517 }
518
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700519 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
520 if (tmp_drvdata == ctx_drvdata) {
521 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800522 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700523 }
524
Olav Haugan236970a2013-05-14 17:00:02 -0700525 ret = apply_bus_vote(iommu_drvdata, 1);
526
527 if (ret)
528 goto unlock;
529
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800530 ret = __enable_clocks(iommu_drvdata);
531 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800532 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800533
Olav Haugan95d24162012-12-05 14:47:47 -0800534 __program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
535 ctx_drvdata->num, iommu_drvdata->ncb,
Olav Haugan090614f2013-03-22 12:14:18 -0700536 __pa(priv->pt.fl_table), priv->pt.redirect,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700537 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700538
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800539 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700540 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700541
Steve Mucklef132c6c2012-06-06 18:30:57 -0700542 ctx_drvdata->attached_domain = domain;
Olav Hauganc5993142013-02-04 13:59:39 -0800543
544 mutex_unlock(&msm_iommu_lock);
545
546 msm_iommu_attached(dev->parent);
547 return ret;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800548unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700549 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700550 return ret;
551}
552
553static void msm_iommu_detach_dev(struct iommu_domain *domain,
554 struct device *dev)
555{
Olav Haugan090614f2013-03-22 12:14:18 -0700556 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700557 struct msm_iommu_drvdata *iommu_drvdata;
558 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800559 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700560
Olav Hauganc5993142013-02-04 13:59:39 -0800561 msm_iommu_detached(dev->parent);
562
Steve Mucklef132c6c2012-06-06 18:30:57 -0700563 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700564 priv = domain->priv;
565
566 if (!priv || !dev)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800567 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700568
569 iommu_drvdata = dev_get_drvdata(dev->parent);
570 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700571
Olav Haugan35deadc2012-12-10 18:28:27 -0800572 if (!iommu_drvdata || !ctx_drvdata)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800573 goto unlock;
574
575 --ctx_drvdata->attach_count;
576 BUG_ON(ctx_drvdata->attach_count < 0);
577
578 if (ctx_drvdata->attach_count > 0)
579 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700580
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800581 ret = __enable_clocks(iommu_drvdata);
582 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800583 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800584
Olav Haugan65209cd2012-11-07 15:02:56 -0800585 msm_iommu_remote_spin_lock();
586
Olav Haugan35deadc2012-12-10 18:28:27 -0800587 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
588 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700589
Olav Haugan95d24162012-12-05 14:47:47 -0800590 __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
Olav Haugan35deadc2012-12-10 18:28:27 -0800591 ctx_drvdata->num);
Olav Haugan65209cd2012-11-07 15:02:56 -0800592
593 msm_iommu_remote_spin_unlock();
594
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800595 __disable_clocks(iommu_drvdata);
Olav Haugan236970a2013-05-14 17:00:02 -0700596
597 apply_bus_vote(iommu_drvdata, 0);
598
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700599 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700600 ctx_drvdata->attached_domain = NULL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800601unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700602 mutex_unlock(&msm_iommu_lock);
603}
604
605static int __get_pgprot(int prot, int len)
606{
607 unsigned int pgprot;
608 int tex;
609
610 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
611 prot |= IOMMU_READ | IOMMU_WRITE;
612 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
613 }
614
615 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
616 prot |= IOMMU_READ;
617 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
618 }
619
620 if (prot & IOMMU_CACHE)
621 tex = (pgprot_kernel >> 2) & 0x07;
622 else
623 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
624
625 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
626 return 0;
627
628 if (len == SZ_16M || len == SZ_1M) {
629 pgprot = FL_SHARED;
630 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
631 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
632 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
633 pgprot |= FL_AP0 | FL_AP1;
634 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
635 } else {
636 pgprot = SL_SHARED;
637 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
638 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
639 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
640 pgprot |= SL_AP0 | SL_AP1;
641 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
642 }
643
644 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700645}
646
Olav Haugan090614f2013-03-22 12:14:18 -0700647static unsigned long *make_second_level(struct msm_iommu_priv *priv,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600648 unsigned long *fl_pte)
649{
650 unsigned long *sl;
651 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
652 get_order(SZ_4K));
653
654 if (!sl) {
655 pr_debug("Could not allocate second level table\n");
656 goto fail;
657 }
658 memset(sl, 0, SZ_4K);
Olav Haugan090614f2013-03-22 12:14:18 -0700659 clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600660
661 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
662 FL_TYPE_TABLE);
663
Olav Haugan090614f2013-03-22 12:14:18 -0700664 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600665fail:
666 return sl;
667}
668
669static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
670{
671 int ret = 0;
672
673 if (*sl_pte) {
674 ret = -EBUSY;
675 goto fail;
676 }
677
678 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
679 | SL_TYPE_SMALL | pgprot;
680fail:
681 return ret;
682}
683
684static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
685{
686 int ret = 0;
687
688 int i;
689
690 for (i = 0; i < 16; i++)
691 if (*(sl_pte+i)) {
692 ret = -EBUSY;
693 goto fail;
694 }
695
696 for (i = 0; i < 16; i++)
697 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
698 | SL_SHARED | SL_TYPE_LARGE | pgprot;
699
700fail:
701 return ret;
702}
703
704
705static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
706{
707 if (*fl_pte)
708 return -EBUSY;
709
710 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
711 | pgprot;
712
713 return 0;
714}
715
716
717static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
718{
719 int i;
720 int ret = 0;
721 for (i = 0; i < 16; i++)
722 if (*(fl_pte+i)) {
723 ret = -EBUSY;
724 goto fail;
725 }
726 for (i = 0; i < 16; i++)
727 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
728 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
729fail:
730 return ret;
731}
732
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700733static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200734 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700735{
Olav Haugan090614f2013-03-22 12:14:18 -0700736 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700737 unsigned long *fl_table;
738 unsigned long *fl_pte;
739 unsigned long fl_offset;
740 unsigned long *sl_table;
741 unsigned long *sl_pte;
742 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800743 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700744 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700745
Steve Mucklef132c6c2012-06-06 18:30:57 -0700746 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800747
748 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700749 if (!priv) {
750 ret = -EINVAL;
751 goto fail;
752 }
753
Olav Haugan090614f2013-03-22 12:14:18 -0700754 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700755
756 if (len != SZ_16M && len != SZ_1M &&
757 len != SZ_64K && len != SZ_4K) {
758 pr_debug("Bad size: %d\n", len);
759 ret = -EINVAL;
760 goto fail;
761 }
762
763 if (!fl_table) {
764 pr_debug("Null page table\n");
765 ret = -EINVAL;
766 goto fail;
767 }
768
Steve Mucklef132c6c2012-06-06 18:30:57 -0700769 pgprot = __get_pgprot(prot, len);
770
771 if (!pgprot) {
772 ret = -EINVAL;
773 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800774 }
775
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700776 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
777 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
778
779 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600780 ret = fl_16m(fl_pte, pa, pgprot);
781 if (ret)
782 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700783 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700784 }
785
Steve Mucklef132c6c2012-06-06 18:30:57 -0700786 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600787 ret = fl_1m(fl_pte, pa, pgprot);
788 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700789 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700790 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700791 }
792
793 /* Need a 2nd level table */
794 if (len == SZ_4K || len == SZ_64K) {
795
796 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600797 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700798 ret = -ENOMEM;
799 goto fail;
800 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700801 }
802
803 if (!(*fl_pte & FL_TYPE_TABLE)) {
804 ret = -EBUSY;
805 goto fail;
806 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700807 }
808
809 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
810 sl_offset = SL_OFFSET(va);
811 sl_pte = sl_table + sl_offset;
812
Steve Mucklef132c6c2012-06-06 18:30:57 -0700813 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600814 ret = sl_4k(sl_pte, pa, pgprot);
815 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700816 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700817
Olav Haugan090614f2013-03-22 12:14:18 -0700818 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700819 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700820
821 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600822 ret = sl_64k(sl_pte, pa, pgprot);
823 if (ret)
824 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700825 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700826 }
827
Steve Mucklef132c6c2012-06-06 18:30:57 -0700828 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700829fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700830 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700831 return ret;
832}
833
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200834static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
835 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700836{
Olav Haugan090614f2013-03-22 12:14:18 -0700837 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700838 unsigned long *fl_table;
839 unsigned long *fl_pte;
840 unsigned long fl_offset;
841 unsigned long *sl_table;
842 unsigned long *sl_pte;
843 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700844 int i, ret = 0;
845
Steve Mucklef132c6c2012-06-06 18:30:57 -0700846 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700847
848 priv = domain->priv;
849
Joerg Roedel05df1f32012-01-26 18:25:37 +0100850 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700851 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700852
Olav Haugan090614f2013-03-22 12:14:18 -0700853 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700854
855 if (len != SZ_16M && len != SZ_1M &&
856 len != SZ_64K && len != SZ_4K) {
857 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700858 goto fail;
859 }
860
861 if (!fl_table) {
862 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700863 goto fail;
864 }
865
866 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
867 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
868
869 if (*fl_pte == 0) {
870 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700871 goto fail;
872 }
873
874 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700875 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700876 for (i = 0; i < 16; i++)
877 *(fl_pte+i) = 0;
878
Olav Haugan090614f2013-03-22 12:14:18 -0700879 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700880 }
881
882 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700883 *fl_pte = 0;
884
Olav Haugan090614f2013-03-22 12:14:18 -0700885 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700886 }
887
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700888 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
889 sl_offset = SL_OFFSET(va);
890 sl_pte = sl_table + sl_offset;
891
892 if (len == SZ_64K) {
893 for (i = 0; i < 16; i++)
894 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700895
Olav Haugan090614f2013-03-22 12:14:18 -0700896 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700897 }
898
Steve Mucklef132c6c2012-06-06 18:30:57 -0700899 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700900 *sl_pte = 0;
901
Olav Haugan090614f2013-03-22 12:14:18 -0700902 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700903 }
904
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700905 if (len == SZ_4K || len == SZ_64K) {
906 int used = 0;
907
908 for (i = 0; i < NUM_SL_PTE; i++)
909 if (sl_table[i])
910 used = 1;
911 if (!used) {
912 free_page((unsigned long)sl_table);
913 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700914
Olav Haugan090614f2013-03-22 12:14:18 -0700915 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700916 }
917 }
918
Steve Mucklef132c6c2012-06-06 18:30:57 -0700919 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400920
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700921fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700922 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200923
924 /* the IOMMU API requires us to return how many bytes were unmapped */
925 len = ret ? 0 : len;
926 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700927}
928
Steve Mucklef132c6c2012-06-06 18:30:57 -0700929static unsigned int get_phys_addr(struct scatterlist *sg)
930{
931 /*
932 * Try sg_dma_address first so that we can
933 * map carveout regions that do not have a
934 * struct page associated with them.
935 */
936 unsigned int pa = sg_dma_address(sg);
937 if (pa == 0)
938 pa = sg_phys(sg);
939 return pa;
940}
941
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600942static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
943 int align)
944{
945 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
946 && (len >= align);
947}
948
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600949static int check_range(unsigned long *fl_table, unsigned int va,
950 unsigned int len)
951{
952 unsigned int offset = 0;
953 unsigned long *fl_pte;
954 unsigned long fl_offset;
955 unsigned long *sl_table;
956 unsigned long sl_start, sl_end;
957 int i;
958
959 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
960 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
961
962 while (offset < len) {
963 if (*fl_pte & FL_TYPE_TABLE) {
964 sl_start = SL_OFFSET(va);
965 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
966 sl_end = ((len - offset) / SZ_4K) + sl_start;
967
968 if (sl_end > NUM_SL_PTE)
969 sl_end = NUM_SL_PTE;
970
971 for (i = sl_start; i < sl_end; i++) {
972 if (sl_table[i] != 0) {
973 pr_err("%08x - %08x already mapped\n",
974 va, va + SZ_4K);
975 return -EBUSY;
976 }
977 offset += SZ_4K;
978 va += SZ_4K;
979 }
980
981
982 sl_start = 0;
983 } else {
984 if (*fl_pte != 0) {
985 pr_err("%08x - %08x already mapped\n",
986 va, va + SZ_1M);
987 return -EBUSY;
988 }
989 va += SZ_1M;
990 offset += SZ_1M;
991 sl_start = 0;
992 }
993 fl_pte++;
994 }
995 return 0;
996}
997
Steve Mucklef132c6c2012-06-06 18:30:57 -0700998static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
999 struct scatterlist *sg, unsigned int len,
1000 int prot)
1001{
1002 unsigned int pa;
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -06001003 unsigned int start_va = va;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001004 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001005 unsigned long *fl_table;
1006 unsigned long *fl_pte;
1007 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001008 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001009 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001010 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001011 int ret = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001012 struct msm_iommu_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001013 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001014
1015 mutex_lock(&msm_iommu_lock);
1016
1017 BUG_ON(len & (SZ_4K - 1));
1018
1019 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001020 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001021
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001022 pgprot4k = __get_pgprot(prot, SZ_4K);
1023 pgprot64k = __get_pgprot(prot, SZ_64K);
1024 pgprot1m = __get_pgprot(prot, SZ_1M);
1025 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001026
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001027 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001028 ret = -EINVAL;
1029 goto fail;
1030 }
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -06001031 ret = check_range(fl_table, va, len);
1032 if (ret)
1033 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001034
1035 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1036 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001037 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001038
1039 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001040 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001041
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001042 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1043 SZ_16M))
1044 chunk_size = SZ_16M;
1045 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1046 SZ_1M))
1047 chunk_size = SZ_1M;
1048 /* 64k or 4k determined later */
1049
1050 /* for 1M and 16M, only first level entries are required */
1051 if (chunk_size >= SZ_1M) {
1052 if (chunk_size == SZ_16M) {
1053 ret = fl_16m(fl_pte, pa, pgprot16m);
1054 if (ret)
1055 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001056 clean_pte(fl_pte, fl_pte + 16,
1057 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001058 fl_pte += 16;
1059 } else if (chunk_size == SZ_1M) {
1060 ret = fl_1m(fl_pte, pa, pgprot1m);
1061 if (ret)
1062 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001063 clean_pte(fl_pte, fl_pte + 1,
1064 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001065 fl_pte++;
1066 }
1067
1068 offset += chunk_size;
1069 chunk_offset += chunk_size;
1070 va += chunk_size;
1071 pa += chunk_size;
1072
1073 if (chunk_offset >= sg->length && offset < len) {
1074 chunk_offset = 0;
1075 sg = sg_next(sg);
1076 pa = get_phys_addr(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001077 }
1078 continue;
1079 }
1080 /* for 4K or 64K, make sure there is a second level table */
1081 if (*fl_pte == 0) {
1082 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001083 ret = -ENOMEM;
1084 goto fail;
1085 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001086 }
1087 if (!(*fl_pte & FL_TYPE_TABLE)) {
1088 ret = -EBUSY;
1089 goto fail;
1090 }
1091 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1092 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001093 /* Keep track of initial position so we
1094 * don't clean more than we have to
1095 */
1096 sl_start = sl_offset;
1097
1098 /* Build the 2nd level page table */
1099 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001100
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001101 /* Map a large 64K page if the chunk is large enough and
1102 * the pa and va are aligned
1103 */
1104
1105 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1106 SZ_64K))
1107 chunk_size = SZ_64K;
1108 else
1109 chunk_size = SZ_4K;
1110
1111 if (chunk_size == SZ_4K) {
1112 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
1113 sl_offset++;
1114 } else {
1115 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
1116 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
1117 sl_offset += 16;
1118 }
1119
1120
1121 offset += chunk_size;
1122 chunk_offset += chunk_size;
1123 va += chunk_size;
1124 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001125
1126 if (chunk_offset >= sg->length && offset < len) {
1127 chunk_offset = 0;
1128 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001129 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001130 }
1131 }
1132
1133 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Olav Haugan090614f2013-03-22 12:14:18 -07001134 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001135
1136 fl_pte++;
1137 sl_offset = 0;
1138 }
1139 __flush_iotlb(domain);
1140fail:
1141 mutex_unlock(&msm_iommu_lock);
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -06001142 if (ret && offset > 0)
1143 msm_iommu_unmap_range(domain, start_va, offset);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001144 return ret;
1145}
1146
1147
1148static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
1149 unsigned int len)
1150{
1151 unsigned int offset = 0;
1152 unsigned long *fl_table;
1153 unsigned long *fl_pte;
1154 unsigned long fl_offset;
1155 unsigned long *sl_table;
1156 unsigned long sl_start, sl_end;
1157 int used, i;
Olav Haugan090614f2013-03-22 12:14:18 -07001158 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001159
1160 mutex_lock(&msm_iommu_lock);
1161
1162 BUG_ON(len & (SZ_4K - 1));
1163
1164 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001165 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001166
1167 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1168 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
1169
Steve Mucklef132c6c2012-06-06 18:30:57 -07001170 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001171 if (*fl_pte & FL_TYPE_TABLE) {
1172 sl_start = SL_OFFSET(va);
1173 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1174 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001175
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001176 if (sl_end > NUM_SL_PTE)
1177 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001178
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001179 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
1180 clean_pte(sl_table + sl_start, sl_table + sl_end,
Olav Haugan090614f2013-03-22 12:14:18 -07001181 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001182
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001183 offset += (sl_end - sl_start) * SZ_4K;
1184 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001185
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001186 /* Unmap and free the 2nd level table if all mappings
1187 * in it were removed. This saves memory, but the table
1188 * will need to be re-allocated the next time someone
1189 * tries to map these VAs.
1190 */
1191 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001192
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001193 /* If we just unmapped the whole table, don't bother
1194 * seeing if there are still used entries left.
1195 */
1196 if (sl_end - sl_start != NUM_SL_PTE)
1197 for (i = 0; i < NUM_SL_PTE; i++)
1198 if (sl_table[i]) {
1199 used = 1;
1200 break;
1201 }
1202 if (!used) {
1203 free_page((unsigned long)sl_table);
1204 *fl_pte = 0;
1205
Olav Haugan090614f2013-03-22 12:14:18 -07001206 clean_pte(fl_pte, fl_pte + 1,
1207 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001208 }
1209
1210 sl_start = 0;
1211 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001212 *fl_pte = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001213 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001214 va += SZ_1M;
1215 offset += SZ_1M;
1216 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001217 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001218 fl_pte++;
1219 }
1220
1221 __flush_iotlb(domain);
1222 mutex_unlock(&msm_iommu_lock);
1223 return 0;
1224}
1225
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001226static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1227 unsigned long va)
1228{
Olav Haugan090614f2013-03-22 12:14:18 -07001229 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001230 struct msm_iommu_drvdata *iommu_drvdata;
1231 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1232 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001233 void __iomem *base;
1234 phys_addr_t ret = 0;
1235 int ctx;
1236
Steve Mucklef132c6c2012-06-06 18:30:57 -07001237 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001238
1239 priv = domain->priv;
1240 if (list_empty(&priv->list_attached))
1241 goto fail;
1242
1243 ctx_drvdata = list_entry(priv->list_attached.next,
1244 struct msm_iommu_ctx_drvdata, attached_elm);
1245 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1246
1247 base = iommu_drvdata->base;
1248 ctx = ctx_drvdata->num;
1249
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001250 ret = __enable_clocks(iommu_drvdata);
1251 if (ret)
1252 goto fail;
1253
Olav Haugan65209cd2012-11-07 15:02:56 -08001254 msm_iommu_remote_spin_lock();
1255
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001256 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001257
Steve Mucklef132c6c2012-06-06 18:30:57 -07001258 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001259 par = GET_PAR(base, ctx);
1260
1261 /* We are dealing with a supersection */
1262 if (GET_NOFAULT_SS(base, ctx))
1263 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1264 else /* Upper 20 bits from PAR, lower 12 from VA */
1265 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1266
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001267 if (GET_FAULT(base, ctx))
1268 ret = 0;
1269
Olav Haugan65209cd2012-11-07 15:02:56 -08001270 msm_iommu_remote_spin_unlock();
1271
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001272 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001273fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001274 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001275 return ret;
1276}
1277
1278static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1279 unsigned long cap)
1280{
1281 return 0;
1282}
1283
Mitchel Humpherys9e90db32013-05-21 17:37:22 -07001284static void __print_ctx_regs(void __iomem *base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001285{
1286 unsigned int fsr = GET_FSR(base, ctx);
1287 pr_err("FAR = %08x PAR = %08x\n",
1288 GET_FAR(base, ctx), GET_PAR(base, ctx));
1289 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1290 (fsr & 0x02) ? "TF " : "",
1291 (fsr & 0x04) ? "AFF " : "",
1292 (fsr & 0x08) ? "APF " : "",
1293 (fsr & 0x10) ? "TLBMF " : "",
1294 (fsr & 0x20) ? "HTWDEEF " : "",
1295 (fsr & 0x40) ? "HTWSEEF " : "",
1296 (fsr & 0x80) ? "MHF " : "",
1297 (fsr & 0x10000) ? "SL " : "",
1298 (fsr & 0x40000000) ? "SS " : "",
1299 (fsr & 0x80000000) ? "MULTI " : "");
1300
1301 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1302 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1303 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1304 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1305 pr_err("SCTLR = %08x ACTLR = %08x\n",
1306 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1307 pr_err("PRRR = %08x NMRR = %08x\n",
1308 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1309}
1310
1311irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1312{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001313 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1314 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001315 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001316 unsigned int fsr, num;
1317 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001318
Steve Mucklef132c6c2012-06-06 18:30:57 -07001319 mutex_lock(&msm_iommu_lock);
1320 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001321
Steve Mucklef132c6c2012-06-06 18:30:57 -07001322 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1323 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001324
1325 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001326 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001327
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001328 ret = __enable_clocks(drvdata);
1329 if (ret)
1330 goto fail;
1331
Olav Haugan65209cd2012-11-07 15:02:56 -08001332 msm_iommu_remote_spin_lock();
1333
Steve Mucklef132c6c2012-06-06 18:30:57 -07001334 fsr = GET_FSR(base, num);
1335
1336 if (fsr) {
1337 if (!ctx_drvdata->attached_domain) {
1338 pr_err("Bad domain in interrupt handler\n");
1339 ret = -ENOSYS;
1340 } else
1341 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1342 &ctx_drvdata->pdev->dev,
1343 GET_FAR(base, num), 0);
1344
1345 if (ret == -ENOSYS) {
1346 pr_err("Unexpected IOMMU page fault!\n");
1347 pr_err("name = %s\n", drvdata->name);
1348 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001349 pr_err("Interesting registers:\n");
Mitchel Humpherys9e90db32013-05-21 17:37:22 -07001350 __print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001351 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001352
1353 SET_FSR(base, num, fsr);
Shubhraprakash Das52f50c42012-10-09 16:14:28 -07001354 /*
1355 * Only resume fetches if the registered fault handler
1356 * allows it
1357 */
1358 if (ret != -EBUSY)
1359 SET_RESUME(base, num, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001360
1361 ret = IRQ_HANDLED;
1362 } else
1363 ret = IRQ_NONE;
1364
Olav Haugan65209cd2012-11-07 15:02:56 -08001365 msm_iommu_remote_spin_unlock();
1366
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001367 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001368fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001369 mutex_unlock(&msm_iommu_lock);
1370 return ret;
1371}
1372
1373static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1374{
Olav Haugan090614f2013-03-22 12:14:18 -07001375 struct msm_iommu_priv *priv = domain->priv;
1376 return __pa(priv->pt.fl_table);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001377}
1378
1379static struct iommu_ops msm_iommu_ops = {
1380 .domain_init = msm_iommu_domain_init,
1381 .domain_destroy = msm_iommu_domain_destroy,
1382 .attach_dev = msm_iommu_attach_dev,
1383 .detach_dev = msm_iommu_detach_dev,
1384 .map = msm_iommu_map,
1385 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001386 .map_range = msm_iommu_map_range,
1387 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001388 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001389 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001390 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001391 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001392};
1393
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001394static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1395{
1396 int i = 0;
1397 unsigned int prrr = 0;
1398 unsigned int nmrr = 0;
1399 int c_icp, c_ocp, c_mt, c_nos;
1400
1401 RCP15_PRRR(prrr);
1402 RCP15_NMRR(nmrr);
1403
1404 for (i = 0; i < NUM_TEX_CLASS; i++) {
1405 c_nos = PRRR_NOS(prrr, i);
1406 c_mt = PRRR_MT(prrr, i);
1407 c_icp = NMRR_ICP(nmrr, i);
1408 c_ocp = NMRR_OCP(nmrr, i);
1409
1410 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1411 return i;
1412 }
1413
1414 return -ENODEV;
1415}
1416
1417static void __init setup_iommu_tex_classes(void)
1418{
1419 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1420 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1421
1422 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1423 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1424
1425 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1426 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1427
1428 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1429 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1430}
1431
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001432static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001433{
Olav Haugan0e22c482013-01-28 17:39:36 -08001434 if (!msm_soc_version_supports_iommu_v0())
Steve Mucklef132c6c2012-06-06 18:30:57 -07001435 return -ENODEV;
1436
Olav Haugan65209cd2012-11-07 15:02:56 -08001437 msm_iommu_lock_initialize();
1438
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001439 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001440 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001441 return 0;
1442}
1443
1444subsys_initcall(msm_iommu_init);
1445
1446MODULE_LICENSE("GPL v2");
1447MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");