blob: 49bfdb8b6a147c963f4d9ddf119af9c70445d08b [file] [log] [blame]
Olav Haugane6d01ef2013-01-25 16:55:44 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
Olav Hauganc5993142013-02-04 13:59:39 -080030#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v0.h>
Olav Haugan090614f2013-03-22 12:14:18 -070032#include <mach/msm_iommu_priv.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070033#include <mach/iommu.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060034#include <mach/msm_smem.h>
Olav Haugan236970a2013-05-14 17:00:02 -070035#include <mach/msm_bus.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070036
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080037#define MRC(reg, processor, op1, crn, crm, op2) \
38__asm__ __volatile__ ( \
39" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
40: "=r" (reg))
41
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44
Steve Mucklef132c6c2012-06-06 18:30:57 -070045/* Sharability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NON_SH 0x0
47#define MSM_IOMMU_ATTR_SH 0x4
48
49/* Cacheability attributes of MSM IOMMU mappings */
50#define MSM_IOMMU_ATTR_NONCACHED 0x0
51#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
52#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
53#define MSM_IOMMU_ATTR_CACHED_WT 0x3
54
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -060055static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
56 unsigned int len);
57
Steve Mucklef132c6c2012-06-06 18:30:57 -070058static inline void clean_pte(unsigned long *start, unsigned long *end,
59 int redirect)
60{
61 if (!redirect)
62 dmac_flush_range(start, end);
63}
64
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020065/* bitmap of the page sizes currently supported */
66#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
67
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080068static int msm_iommu_tex_class[4];
69
Steve Mucklef132c6c2012-06-06 18:30:57 -070070DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070071
Olav Haugan65209cd2012-11-07 15:02:56 -080072/**
73 * Remote spinlock implementation based on Peterson's algorithm to be used
74 * to synchronize IOMMU config port access between CPU and GPU.
75 * This implements Process 0 of the spin lock algorithm. GPU implements
76 * Process 1. Flag and turn is stored in shared memory to allow GPU to
77 * access these.
78 */
79struct msm_iommu_remote_lock {
80 int initialized;
81 struct remote_iommu_petersons_spinlock *lock;
82};
83
84static struct msm_iommu_remote_lock msm_iommu_remote_lock;
85
86#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
87static void _msm_iommu_remote_spin_lock_init(void)
88{
89 msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
90 memset(msm_iommu_remote_lock.lock, 0,
91 sizeof(*msm_iommu_remote_lock.lock));
92}
93
94void msm_iommu_remote_p0_spin_lock(void)
95{
96 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
97 msm_iommu_remote_lock.lock->turn = 1;
98
99 smp_mb();
100
101 while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
102 msm_iommu_remote_lock.lock->turn == 1)
103 cpu_relax();
104}
105
106void msm_iommu_remote_p0_spin_unlock(void)
107{
108 smp_mb();
109
110 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
111}
112#endif
113
114inline void msm_iommu_mutex_lock(void)
115{
116 mutex_lock(&msm_iommu_lock);
117}
118
119inline void msm_iommu_mutex_unlock(void)
120{
121 mutex_unlock(&msm_iommu_lock);
122}
123
124void *msm_iommu_lock_initialize(void)
125{
126 mutex_lock(&msm_iommu_lock);
127 if (!msm_iommu_remote_lock.initialized) {
128 msm_iommu_remote_lock_init();
129 msm_iommu_remote_lock.initialized = 1;
130 }
131 mutex_unlock(&msm_iommu_lock);
132 return msm_iommu_remote_lock.lock;
133}
134
Olav Haugan236970a2013-05-14 17:00:02 -0700135static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
136{
137 int ret = 0;
138
139 if (drvdata->bus_client) {
140 ret = msm_bus_scale_client_update_request(drvdata->bus_client,
141 vote);
142 if (ret)
143 pr_err("%s: Failed to vote for bus: %d\n", __func__,
144 vote);
145 }
146 return ret;
147}
148
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800149static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
150{
151 int ret;
152
Steve Mucklef132c6c2012-06-06 18:30:57 -0700153 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800154 if (ret)
155 goto fail;
156
157 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700158 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800159 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700160 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800161 }
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700162
163 if (ret)
164 goto fail;
165
166 if (drvdata->aclk) {
167 ret = clk_prepare_enable(drvdata->aclk);
168 if (ret) {
169 clk_disable_unprepare(drvdata->clk);
170 clk_disable_unprepare(drvdata->pclk);
171 }
172 }
173
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800174fail:
175 return ret;
176}
177
178static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
179{
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700180 if (drvdata->aclk)
181 clk_disable_unprepare(drvdata->aclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800182 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700183 clk_disable_unprepare(drvdata->clk);
184 clk_disable_unprepare(drvdata->pclk);
185}
186
Olav Hauganeece7e52013-04-02 10:22:21 -0700187static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800188{
Olav Hauganeece7e52013-04-02 10:22:21 -0700189 /* No need to do anything. IOMMUv0 is always on. */
190 return 0;
Olav Hauganc5993142013-02-04 13:59:39 -0800191}
192
Olav Hauganeece7e52013-04-02 10:22:21 -0700193static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800194{
Olav Hauganeece7e52013-04-02 10:22:21 -0700195 /* No need to do anything. IOMMUv0 is always on. */
Olav Hauganc5993142013-02-04 13:59:39 -0800196}
197
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600198static void *_iommu_lock_initialize(void)
199{
200 return msm_iommu_lock_initialize();
201}
202
Olav Hauganc5993142013-02-04 13:59:39 -0800203static void _iommu_lock_acquire(void)
204{
205 msm_iommu_lock();
206}
207
208static void _iommu_lock_release(void)
209{
210 msm_iommu_unlock();
211}
212
213struct iommu_access_ops iommu_access_ops_v0 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700214 .iommu_power_on = __enable_regulators,
215 .iommu_power_off = __disable_regulators,
Olav Haugan236970a2013-05-14 17:00:02 -0700216 .iommu_bus_vote = apply_bus_vote,
Olav Hauganeece7e52013-04-02 10:22:21 -0700217 .iommu_clk_on = __enable_clocks,
218 .iommu_clk_off = __disable_clocks,
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600219 .iommu_lock_initialize = _iommu_lock_initialize,
Olav Hauganc5993142013-02-04 13:59:39 -0800220 .iommu_lock_acquire = _iommu_lock_acquire,
221 .iommu_lock_release = _iommu_lock_release,
222};
Olav Hauganc5993142013-02-04 13:59:39 -0800223
Steve Mucklef132c6c2012-06-06 18:30:57 -0700224static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
225{
Olav Haugan090614f2013-03-22 12:14:18 -0700226 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700227 struct msm_iommu_drvdata *iommu_drvdata;
228 struct msm_iommu_ctx_drvdata *ctx_drvdata;
229 int ret = 0;
230 int asid;
231
232 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
233 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
234 BUG();
235
236 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
237 if (!iommu_drvdata)
238 BUG();
239
240 ret = __enable_clocks(iommu_drvdata);
241 if (ret)
242 goto fail;
243
Olav Haugan65209cd2012-11-07 15:02:56 -0800244 msm_iommu_remote_spin_lock();
245
Steve Mucklef132c6c2012-06-06 18:30:57 -0700246 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
247 ctx_drvdata->num);
248
249 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
250 asid | (va & TLBIVA_VA));
251 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800252
253 msm_iommu_remote_spin_unlock();
254
Steve Mucklef132c6c2012-06-06 18:30:57 -0700255 __disable_clocks(iommu_drvdata);
256 }
257fail:
258 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800259}
260
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800261static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700262{
Olav Haugan090614f2013-03-22 12:14:18 -0700263 struct msm_iommu_priv *priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700264 struct msm_iommu_drvdata *iommu_drvdata;
265 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800266 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700267 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700268
269 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
270 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
271 BUG();
272
273 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700274 if (!iommu_drvdata)
275 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800276
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800277 ret = __enable_clocks(iommu_drvdata);
278 if (ret)
279 goto fail;
280
Olav Haugan65209cd2012-11-07 15:02:56 -0800281 msm_iommu_remote_spin_lock();
282
Steve Mucklef132c6c2012-06-06 18:30:57 -0700283 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
284 ctx_drvdata->num);
285
286 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
287 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800288
289 msm_iommu_remote_spin_unlock();
290
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800291 __disable_clocks(iommu_drvdata);
292 }
293fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800294 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700295}
296
Olav Haugan95d24162012-12-05 14:47:47 -0800297static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700298{
Olav Haugan95d24162012-12-05 14:47:47 -0800299 SET_BPRCOSH(glb_base, ctx, 0);
300 SET_BPRCISH(glb_base, ctx, 0);
301 SET_BPRCNSH(glb_base, ctx, 0);
302 SET_BPSHCFG(glb_base, ctx, 0);
303 SET_BPMTCFG(glb_base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700304 SET_ACTLR(base, ctx, 0);
305 SET_SCTLR(base, ctx, 0);
306 SET_FSRRESTORE(base, ctx, 0);
307 SET_TTBR0(base, ctx, 0);
308 SET_TTBR1(base, ctx, 0);
309 SET_TTBCR(base, ctx, 0);
310 SET_BFBCR(base, ctx, 0);
311 SET_PAR(base, ctx, 0);
312 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700313 SET_TLBFLPTER(base, ctx, 0);
314 SET_TLBSLPTER(base, ctx, 0);
315 SET_TLBLKCR(base, ctx, 0);
316 SET_PRRR(base, ctx, 0);
317 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700318 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700319}
320
Olav Haugan95d24162012-12-05 14:47:47 -0800321static void __program_context(void __iomem *base, void __iomem *glb_base,
322 int ctx, int ncb, phys_addr_t pgtable,
323 int redirect, int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700324{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800325 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700326 int i, j, found;
Olav Haugan65209cd2012-11-07 15:02:56 -0800327 msm_iommu_remote_spin_lock();
328
Olav Haugan95d24162012-12-05 14:47:47 -0800329 __reset_context(base, glb_base, ctx);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700330
331 /* Set up HTW mode */
332 /* TLB miss configuration: perform HTW on miss */
333 SET_TLBMCFG(base, ctx, 0x3);
334
335 /* V2P configuration: HTW for access */
336 SET_V2PCFG(base, ctx, 0x3);
337
Steve Mucklef132c6c2012-06-06 18:30:57 -0700338 SET_TTBCR(base, ctx, ttbr_split);
339 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
340 if (ttbr_split)
341 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700342
343 /* Enable context fault interrupt */
344 SET_CFEIE(base, ctx, 1);
345
346 /* Stall access on a context fault and let the handler deal with it */
347 SET_CFCFG(base, ctx, 1);
348
349 /* Redirect all cacheable requests to L2 slave port. */
350 SET_RCISH(base, ctx, 1);
351 SET_RCOSH(base, ctx, 1);
352 SET_RCNSH(base, ctx, 1);
353
354 /* Turn on TEX Remap */
355 SET_TRE(base, ctx, 1);
356
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800357 /* Set TEX remap attributes */
358 RCP15_PRRR(prrr);
359 RCP15_NMRR(nmrr);
360 SET_PRRR(base, ctx, prrr);
361 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700362
363 /* Turn on BFB prefetch */
364 SET_BFBDFE(base, ctx, 1);
365
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700366 /* Configure page tables as inner-cacheable and shareable to reduce
367 * the TLB miss penalty.
368 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700369 if (redirect) {
370 SET_TTBR0_SH(base, ctx, 1);
371 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700372
Steve Mucklef132c6c2012-06-06 18:30:57 -0700373 SET_TTBR0_NOS(base, ctx, 1);
374 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700375
Steve Mucklef132c6c2012-06-06 18:30:57 -0700376 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
377 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700378
Steve Mucklef132c6c2012-06-06 18:30:57 -0700379 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
380 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700381
Steve Mucklef132c6c2012-06-06 18:30:57 -0700382 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
383 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
384 }
385
386 /* Find if this page table is used elsewhere, and re-use ASID */
387 found = 0;
388 for (i = 0; i < ncb; i++)
389 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
390 i != ctx) {
391 SET_CONTEXTIDR_ASID(base, ctx, \
392 GET_CONTEXTIDR_ASID(base, i));
393 found = 1;
394 break;
395 }
396
397 /* If page table is new, find an unused ASID */
398 if (!found) {
399 for (i = 0; i < ncb; i++) {
400 found = 0;
401 for (j = 0; j < ncb; j++) {
402 if (GET_CONTEXTIDR_ASID(base, j) == i &&
403 j != ctx)
404 found = 1;
405 }
406
407 if (!found) {
408 SET_CONTEXTIDR_ASID(base, ctx, i);
409 break;
410 }
411 }
412 BUG_ON(found);
413 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700414
415 /* Enable the MMU */
416 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700417 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800418
419 msm_iommu_remote_spin_unlock();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700420}
421
Steve Mucklef132c6c2012-06-06 18:30:57 -0700422static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700423{
Olav Haugan090614f2013-03-22 12:14:18 -0700424 struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700425
426 if (!priv)
427 goto fail_nomem;
428
429 INIT_LIST_HEAD(&priv->list_attached);
Olav Haugan090614f2013-03-22 12:14:18 -0700430 priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700431 get_order(SZ_16K));
432
Olav Haugan090614f2013-03-22 12:14:18 -0700433 if (!priv->pt.fl_table)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700434 goto fail_nomem;
435
Steve Mucklef132c6c2012-06-06 18:30:57 -0700436#ifdef CONFIG_IOMMU_PGTABLES_L2
Olav Haugan090614f2013-03-22 12:14:18 -0700437 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700438#endif
439
Olav Haugan090614f2013-03-22 12:14:18 -0700440 memset(priv->pt.fl_table, 0, SZ_16K);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700441 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700442
Olav Haugan090614f2013-03-22 12:14:18 -0700443 clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
444 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700445
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700446 return 0;
447
448fail_nomem:
449 kfree(priv);
450 return -ENOMEM;
451}
452
453static void msm_iommu_domain_destroy(struct iommu_domain *domain)
454{
Olav Haugan090614f2013-03-22 12:14:18 -0700455 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700456 unsigned long *fl_table;
457 int i;
458
Steve Mucklef132c6c2012-06-06 18:30:57 -0700459 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700460 priv = domain->priv;
461 domain->priv = NULL;
462
463 if (priv) {
Olav Haugan090614f2013-03-22 12:14:18 -0700464 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700465
466 for (i = 0; i < NUM_FL_PTE; i++)
467 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
468 free_page((unsigned long) __va(((fl_table[i]) &
469 FL_BASE_MASK)));
470
Olav Haugan090614f2013-03-22 12:14:18 -0700471 free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
472 priv->pt.fl_table = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700473 }
474
475 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700476 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700477}
478
479static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
480{
Olav Haugan090614f2013-03-22 12:14:18 -0700481 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700482 struct msm_iommu_drvdata *iommu_drvdata;
483 struct msm_iommu_ctx_drvdata *ctx_drvdata;
484 struct msm_iommu_ctx_drvdata *tmp_drvdata;
485 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700486
Steve Mucklef132c6c2012-06-06 18:30:57 -0700487 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700488
489 priv = domain->priv;
490
491 if (!priv || !dev) {
492 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800493 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700494 }
495
496 iommu_drvdata = dev_get_drvdata(dev->parent);
497 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700498
Olav Haugan95d24162012-12-05 14:47:47 -0800499 if (!iommu_drvdata || !ctx_drvdata) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700500 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800501 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700502 }
503
Olav Haugane99ee7e2012-12-11 15:02:02 -0800504 ++ctx_drvdata->attach_count;
505
506 if (ctx_drvdata->attach_count > 1)
507 goto unlock;
508
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800509 if (!list_empty(&ctx_drvdata->attached_elm)) {
510 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800511 goto unlock;
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800512 }
513
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700514 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
515 if (tmp_drvdata == ctx_drvdata) {
516 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800517 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700518 }
519
Olav Haugan236970a2013-05-14 17:00:02 -0700520 ret = apply_bus_vote(iommu_drvdata, 1);
521
522 if (ret)
523 goto unlock;
524
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800525 ret = __enable_clocks(iommu_drvdata);
526 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800527 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800528
Olav Haugan95d24162012-12-05 14:47:47 -0800529 __program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
530 ctx_drvdata->num, iommu_drvdata->ncb,
Olav Haugan090614f2013-03-22 12:14:18 -0700531 __pa(priv->pt.fl_table), priv->pt.redirect,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700532 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700533
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800534 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700535 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700536
Steve Mucklef132c6c2012-06-06 18:30:57 -0700537 ctx_drvdata->attached_domain = domain;
Olav Hauganc5993142013-02-04 13:59:39 -0800538
539 mutex_unlock(&msm_iommu_lock);
540
541 msm_iommu_attached(dev->parent);
542 return ret;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800543unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700544 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700545 return ret;
546}
547
548static void msm_iommu_detach_dev(struct iommu_domain *domain,
549 struct device *dev)
550{
Olav Haugan090614f2013-03-22 12:14:18 -0700551 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700552 struct msm_iommu_drvdata *iommu_drvdata;
553 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800554 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700555
Olav Hauganc5993142013-02-04 13:59:39 -0800556 msm_iommu_detached(dev->parent);
557
Steve Mucklef132c6c2012-06-06 18:30:57 -0700558 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700559 priv = domain->priv;
560
561 if (!priv || !dev)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800562 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700563
564 iommu_drvdata = dev_get_drvdata(dev->parent);
565 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700566
Olav Haugan35deadc2012-12-10 18:28:27 -0800567 if (!iommu_drvdata || !ctx_drvdata)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800568 goto unlock;
569
570 --ctx_drvdata->attach_count;
571 BUG_ON(ctx_drvdata->attach_count < 0);
572
573 if (ctx_drvdata->attach_count > 0)
574 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700575
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800576 ret = __enable_clocks(iommu_drvdata);
577 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800578 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800579
Olav Haugan65209cd2012-11-07 15:02:56 -0800580 msm_iommu_remote_spin_lock();
581
Olav Haugan35deadc2012-12-10 18:28:27 -0800582 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
583 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700584
Olav Haugan95d24162012-12-05 14:47:47 -0800585 __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
Olav Haugan35deadc2012-12-10 18:28:27 -0800586 ctx_drvdata->num);
Olav Haugan65209cd2012-11-07 15:02:56 -0800587
588 msm_iommu_remote_spin_unlock();
589
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800590 __disable_clocks(iommu_drvdata);
Olav Haugan236970a2013-05-14 17:00:02 -0700591
592 apply_bus_vote(iommu_drvdata, 0);
593
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700594 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700595 ctx_drvdata->attached_domain = NULL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800596unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700597 mutex_unlock(&msm_iommu_lock);
598}
599
600static int __get_pgprot(int prot, int len)
601{
602 unsigned int pgprot;
603 int tex;
604
605 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
606 prot |= IOMMU_READ | IOMMU_WRITE;
607 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
608 }
609
610 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
611 prot |= IOMMU_READ;
612 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
613 }
614
615 if (prot & IOMMU_CACHE)
616 tex = (pgprot_kernel >> 2) & 0x07;
617 else
618 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
619
620 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
621 return 0;
622
623 if (len == SZ_16M || len == SZ_1M) {
624 pgprot = FL_SHARED;
625 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
626 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
627 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
628 pgprot |= FL_AP0 | FL_AP1;
629 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
630 } else {
631 pgprot = SL_SHARED;
632 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
633 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
634 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
635 pgprot |= SL_AP0 | SL_AP1;
636 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
637 }
638
639 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700640}
641
Olav Haugan090614f2013-03-22 12:14:18 -0700642static unsigned long *make_second_level(struct msm_iommu_priv *priv,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600643 unsigned long *fl_pte)
644{
645 unsigned long *sl;
646 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
647 get_order(SZ_4K));
648
649 if (!sl) {
650 pr_debug("Could not allocate second level table\n");
651 goto fail;
652 }
653 memset(sl, 0, SZ_4K);
Olav Haugan090614f2013-03-22 12:14:18 -0700654 clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600655
656 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
657 FL_TYPE_TABLE);
658
Olav Haugan090614f2013-03-22 12:14:18 -0700659 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600660fail:
661 return sl;
662}
663
664static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
665{
666 int ret = 0;
667
668 if (*sl_pte) {
669 ret = -EBUSY;
670 goto fail;
671 }
672
673 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
674 | SL_TYPE_SMALL | pgprot;
675fail:
676 return ret;
677}
678
679static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
680{
681 int ret = 0;
682
683 int i;
684
685 for (i = 0; i < 16; i++)
686 if (*(sl_pte+i)) {
687 ret = -EBUSY;
688 goto fail;
689 }
690
691 for (i = 0; i < 16; i++)
692 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
693 | SL_SHARED | SL_TYPE_LARGE | pgprot;
694
695fail:
696 return ret;
697}
698
699
700static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
701{
702 if (*fl_pte)
703 return -EBUSY;
704
705 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
706 | pgprot;
707
708 return 0;
709}
710
711
712static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
713{
714 int i;
715 int ret = 0;
716 for (i = 0; i < 16; i++)
717 if (*(fl_pte+i)) {
718 ret = -EBUSY;
719 goto fail;
720 }
721 for (i = 0; i < 16; i++)
722 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
723 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
724fail:
725 return ret;
726}
727
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700728static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200729 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700730{
Olav Haugan090614f2013-03-22 12:14:18 -0700731 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700732 unsigned long *fl_table;
733 unsigned long *fl_pte;
734 unsigned long fl_offset;
735 unsigned long *sl_table;
736 unsigned long *sl_pte;
737 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800738 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700739 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700740
Steve Mucklef132c6c2012-06-06 18:30:57 -0700741 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800742
743 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700744 if (!priv) {
745 ret = -EINVAL;
746 goto fail;
747 }
748
Olav Haugan090614f2013-03-22 12:14:18 -0700749 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700750
751 if (len != SZ_16M && len != SZ_1M &&
752 len != SZ_64K && len != SZ_4K) {
753 pr_debug("Bad size: %d\n", len);
754 ret = -EINVAL;
755 goto fail;
756 }
757
758 if (!fl_table) {
759 pr_debug("Null page table\n");
760 ret = -EINVAL;
761 goto fail;
762 }
763
Steve Mucklef132c6c2012-06-06 18:30:57 -0700764 pgprot = __get_pgprot(prot, len);
765
766 if (!pgprot) {
767 ret = -EINVAL;
768 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800769 }
770
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700771 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
772 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
773
774 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600775 ret = fl_16m(fl_pte, pa, pgprot);
776 if (ret)
777 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700778 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700779 }
780
Steve Mucklef132c6c2012-06-06 18:30:57 -0700781 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600782 ret = fl_1m(fl_pte, pa, pgprot);
783 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700784 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700785 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700786 }
787
788 /* Need a 2nd level table */
789 if (len == SZ_4K || len == SZ_64K) {
790
791 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600792 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700793 ret = -ENOMEM;
794 goto fail;
795 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700796 }
797
798 if (!(*fl_pte & FL_TYPE_TABLE)) {
799 ret = -EBUSY;
800 goto fail;
801 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700802 }
803
804 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
805 sl_offset = SL_OFFSET(va);
806 sl_pte = sl_table + sl_offset;
807
Steve Mucklef132c6c2012-06-06 18:30:57 -0700808 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600809 ret = sl_4k(sl_pte, pa, pgprot);
810 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700811 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700812
Olav Haugan090614f2013-03-22 12:14:18 -0700813 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700814 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700815
816 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600817 ret = sl_64k(sl_pte, pa, pgprot);
818 if (ret)
819 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700820 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700821 }
822
Steve Mucklef132c6c2012-06-06 18:30:57 -0700823 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700824fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700825 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700826 return ret;
827}
828
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200829static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
830 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700831{
Olav Haugan090614f2013-03-22 12:14:18 -0700832 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700833 unsigned long *fl_table;
834 unsigned long *fl_pte;
835 unsigned long fl_offset;
836 unsigned long *sl_table;
837 unsigned long *sl_pte;
838 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700839 int i, ret = 0;
840
Steve Mucklef132c6c2012-06-06 18:30:57 -0700841 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700842
843 priv = domain->priv;
844
Joerg Roedel05df1f32012-01-26 18:25:37 +0100845 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700846 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700847
Olav Haugan090614f2013-03-22 12:14:18 -0700848 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700849
850 if (len != SZ_16M && len != SZ_1M &&
851 len != SZ_64K && len != SZ_4K) {
852 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700853 goto fail;
854 }
855
856 if (!fl_table) {
857 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700858 goto fail;
859 }
860
861 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
862 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
863
864 if (*fl_pte == 0) {
865 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700866 goto fail;
867 }
868
869 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700870 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700871 for (i = 0; i < 16; i++)
872 *(fl_pte+i) = 0;
873
Olav Haugan090614f2013-03-22 12:14:18 -0700874 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700875 }
876
877 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700878 *fl_pte = 0;
879
Olav Haugan090614f2013-03-22 12:14:18 -0700880 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700881 }
882
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700883 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
884 sl_offset = SL_OFFSET(va);
885 sl_pte = sl_table + sl_offset;
886
887 if (len == SZ_64K) {
888 for (i = 0; i < 16; i++)
889 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700890
Olav Haugan090614f2013-03-22 12:14:18 -0700891 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700892 }
893
Steve Mucklef132c6c2012-06-06 18:30:57 -0700894 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700895 *sl_pte = 0;
896
Olav Haugan090614f2013-03-22 12:14:18 -0700897 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700898 }
899
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700900 if (len == SZ_4K || len == SZ_64K) {
901 int used = 0;
902
903 for (i = 0; i < NUM_SL_PTE; i++)
904 if (sl_table[i])
905 used = 1;
906 if (!used) {
907 free_page((unsigned long)sl_table);
908 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700909
Olav Haugan090614f2013-03-22 12:14:18 -0700910 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700911 }
912 }
913
Steve Mucklef132c6c2012-06-06 18:30:57 -0700914 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400915
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700916fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700917 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200918
919 /* the IOMMU API requires us to return how many bytes were unmapped */
920 len = ret ? 0 : len;
921 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700922}
923
Steve Mucklef132c6c2012-06-06 18:30:57 -0700924static unsigned int get_phys_addr(struct scatterlist *sg)
925{
926 /*
927 * Try sg_dma_address first so that we can
928 * map carveout regions that do not have a
929 * struct page associated with them.
930 */
931 unsigned int pa = sg_dma_address(sg);
932 if (pa == 0)
933 pa = sg_phys(sg);
934 return pa;
935}
936
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600937static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
938 int align)
939{
940 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
941 && (len >= align);
942}
943
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600944static int check_range(unsigned long *fl_table, unsigned int va,
945 unsigned int len)
946{
947 unsigned int offset = 0;
948 unsigned long *fl_pte;
949 unsigned long fl_offset;
950 unsigned long *sl_table;
951 unsigned long sl_start, sl_end;
952 int i;
953
954 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
955 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
956
957 while (offset < len) {
958 if (*fl_pte & FL_TYPE_TABLE) {
959 sl_start = SL_OFFSET(va);
960 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
961 sl_end = ((len - offset) / SZ_4K) + sl_start;
962
963 if (sl_end > NUM_SL_PTE)
964 sl_end = NUM_SL_PTE;
965
966 for (i = sl_start; i < sl_end; i++) {
967 if (sl_table[i] != 0) {
968 pr_err("%08x - %08x already mapped\n",
969 va, va + SZ_4K);
970 return -EBUSY;
971 }
972 offset += SZ_4K;
973 va += SZ_4K;
974 }
975
976
977 sl_start = 0;
978 } else {
979 if (*fl_pte != 0) {
980 pr_err("%08x - %08x already mapped\n",
981 va, va + SZ_1M);
982 return -EBUSY;
983 }
984 va += SZ_1M;
985 offset += SZ_1M;
986 sl_start = 0;
987 }
988 fl_pte++;
989 }
990 return 0;
991}
992
Steve Mucklef132c6c2012-06-06 18:30:57 -0700993static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
994 struct scatterlist *sg, unsigned int len,
995 int prot)
996{
997 unsigned int pa;
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -0600998 unsigned int start_va = va;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700999 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001000 unsigned long *fl_table;
1001 unsigned long *fl_pte;
1002 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001003 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001004 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001005 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001006 int ret = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001007 struct msm_iommu_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001008 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001009
1010 mutex_lock(&msm_iommu_lock);
1011
1012 BUG_ON(len & (SZ_4K - 1));
1013
1014 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001015 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001016
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001017 pgprot4k = __get_pgprot(prot, SZ_4K);
1018 pgprot64k = __get_pgprot(prot, SZ_64K);
1019 pgprot1m = __get_pgprot(prot, SZ_1M);
1020 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001021
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001022 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001023 ret = -EINVAL;
1024 goto fail;
1025 }
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -06001026 ret = check_range(fl_table, va, len);
1027 if (ret)
1028 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001029
1030 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1031 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001032 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001033
1034 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001035 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001036
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001037 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1038 SZ_16M))
1039 chunk_size = SZ_16M;
1040 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1041 SZ_1M))
1042 chunk_size = SZ_1M;
1043 /* 64k or 4k determined later */
1044
1045 /* for 1M and 16M, only first level entries are required */
1046 if (chunk_size >= SZ_1M) {
1047 if (chunk_size == SZ_16M) {
1048 ret = fl_16m(fl_pte, pa, pgprot16m);
1049 if (ret)
1050 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001051 clean_pte(fl_pte, fl_pte + 16,
1052 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001053 fl_pte += 16;
1054 } else if (chunk_size == SZ_1M) {
1055 ret = fl_1m(fl_pte, pa, pgprot1m);
1056 if (ret)
1057 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001058 clean_pte(fl_pte, fl_pte + 1,
1059 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001060 fl_pte++;
1061 }
1062
1063 offset += chunk_size;
1064 chunk_offset += chunk_size;
1065 va += chunk_size;
1066 pa += chunk_size;
1067
1068 if (chunk_offset >= sg->length && offset < len) {
1069 chunk_offset = 0;
1070 sg = sg_next(sg);
1071 pa = get_phys_addr(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001072 }
1073 continue;
1074 }
1075 /* for 4K or 64K, make sure there is a second level table */
1076 if (*fl_pte == 0) {
1077 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001078 ret = -ENOMEM;
1079 goto fail;
1080 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001081 }
1082 if (!(*fl_pte & FL_TYPE_TABLE)) {
1083 ret = -EBUSY;
1084 goto fail;
1085 }
1086 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1087 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001088 /* Keep track of initial position so we
1089 * don't clean more than we have to
1090 */
1091 sl_start = sl_offset;
1092
1093 /* Build the 2nd level page table */
1094 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001095
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001096 /* Map a large 64K page if the chunk is large enough and
1097 * the pa and va are aligned
1098 */
1099
1100 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1101 SZ_64K))
1102 chunk_size = SZ_64K;
1103 else
1104 chunk_size = SZ_4K;
1105
1106 if (chunk_size == SZ_4K) {
1107 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
1108 sl_offset++;
1109 } else {
1110 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
1111 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
1112 sl_offset += 16;
1113 }
1114
1115
1116 offset += chunk_size;
1117 chunk_offset += chunk_size;
1118 va += chunk_size;
1119 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001120
1121 if (chunk_offset >= sg->length && offset < len) {
1122 chunk_offset = 0;
1123 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001124 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001125 }
1126 }
1127
1128 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Olav Haugan090614f2013-03-22 12:14:18 -07001129 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001130
1131 fl_pte++;
1132 sl_offset = 0;
1133 }
1134 __flush_iotlb(domain);
1135fail:
1136 mutex_unlock(&msm_iommu_lock);
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -06001137 if (ret && offset > 0)
1138 msm_iommu_unmap_range(domain, start_va, offset);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001139 return ret;
1140}
1141
1142
1143static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
1144 unsigned int len)
1145{
1146 unsigned int offset = 0;
1147 unsigned long *fl_table;
1148 unsigned long *fl_pte;
1149 unsigned long fl_offset;
1150 unsigned long *sl_table;
1151 unsigned long sl_start, sl_end;
1152 int used, i;
Olav Haugan090614f2013-03-22 12:14:18 -07001153 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001154
1155 mutex_lock(&msm_iommu_lock);
1156
1157 BUG_ON(len & (SZ_4K - 1));
1158
1159 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001160 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001161
1162 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1163 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
1164
Steve Mucklef132c6c2012-06-06 18:30:57 -07001165 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001166 if (*fl_pte & FL_TYPE_TABLE) {
1167 sl_start = SL_OFFSET(va);
1168 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1169 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001170
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001171 if (sl_end > NUM_SL_PTE)
1172 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001173
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001174 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
1175 clean_pte(sl_table + sl_start, sl_table + sl_end,
Olav Haugan090614f2013-03-22 12:14:18 -07001176 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001177
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001178 offset += (sl_end - sl_start) * SZ_4K;
1179 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001180
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001181 /* Unmap and free the 2nd level table if all mappings
1182 * in it were removed. This saves memory, but the table
1183 * will need to be re-allocated the next time someone
1184 * tries to map these VAs.
1185 */
1186 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001187
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001188 /* If we just unmapped the whole table, don't bother
1189 * seeing if there are still used entries left.
1190 */
1191 if (sl_end - sl_start != NUM_SL_PTE)
1192 for (i = 0; i < NUM_SL_PTE; i++)
1193 if (sl_table[i]) {
1194 used = 1;
1195 break;
1196 }
1197 if (!used) {
1198 free_page((unsigned long)sl_table);
1199 *fl_pte = 0;
1200
Olav Haugan090614f2013-03-22 12:14:18 -07001201 clean_pte(fl_pte, fl_pte + 1,
1202 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001203 }
1204
1205 sl_start = 0;
1206 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001207 *fl_pte = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001208 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001209 va += SZ_1M;
1210 offset += SZ_1M;
1211 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001212 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001213 fl_pte++;
1214 }
1215
1216 __flush_iotlb(domain);
1217 mutex_unlock(&msm_iommu_lock);
1218 return 0;
1219}
1220
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001221static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1222 unsigned long va)
1223{
Olav Haugan090614f2013-03-22 12:14:18 -07001224 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001225 struct msm_iommu_drvdata *iommu_drvdata;
1226 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1227 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001228 void __iomem *base;
1229 phys_addr_t ret = 0;
1230 int ctx;
1231
Steve Mucklef132c6c2012-06-06 18:30:57 -07001232 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001233
1234 priv = domain->priv;
1235 if (list_empty(&priv->list_attached))
1236 goto fail;
1237
1238 ctx_drvdata = list_entry(priv->list_attached.next,
1239 struct msm_iommu_ctx_drvdata, attached_elm);
1240 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1241
1242 base = iommu_drvdata->base;
1243 ctx = ctx_drvdata->num;
1244
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001245 ret = __enable_clocks(iommu_drvdata);
1246 if (ret)
1247 goto fail;
1248
Olav Haugan65209cd2012-11-07 15:02:56 -08001249 msm_iommu_remote_spin_lock();
1250
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001251 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001252
Steve Mucklef132c6c2012-06-06 18:30:57 -07001253 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001254 par = GET_PAR(base, ctx);
1255
1256 /* We are dealing with a supersection */
1257 if (GET_NOFAULT_SS(base, ctx))
1258 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1259 else /* Upper 20 bits from PAR, lower 12 from VA */
1260 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1261
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001262 if (GET_FAULT(base, ctx))
1263 ret = 0;
1264
Olav Haugan65209cd2012-11-07 15:02:56 -08001265 msm_iommu_remote_spin_unlock();
1266
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001267 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001268fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001269 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001270 return ret;
1271}
1272
1273static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1274 unsigned long cap)
1275{
1276 return 0;
1277}
1278
Mitchel Humpherys9e90db32013-05-21 17:37:22 -07001279static void __print_ctx_regs(void __iomem *base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001280{
1281 unsigned int fsr = GET_FSR(base, ctx);
1282 pr_err("FAR = %08x PAR = %08x\n",
1283 GET_FAR(base, ctx), GET_PAR(base, ctx));
1284 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1285 (fsr & 0x02) ? "TF " : "",
1286 (fsr & 0x04) ? "AFF " : "",
1287 (fsr & 0x08) ? "APF " : "",
1288 (fsr & 0x10) ? "TLBMF " : "",
1289 (fsr & 0x20) ? "HTWDEEF " : "",
1290 (fsr & 0x40) ? "HTWSEEF " : "",
1291 (fsr & 0x80) ? "MHF " : "",
1292 (fsr & 0x10000) ? "SL " : "",
1293 (fsr & 0x40000000) ? "SS " : "",
1294 (fsr & 0x80000000) ? "MULTI " : "");
1295
1296 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1297 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1298 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1299 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1300 pr_err("SCTLR = %08x ACTLR = %08x\n",
1301 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1302 pr_err("PRRR = %08x NMRR = %08x\n",
1303 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1304}
1305
1306irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1307{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001308 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1309 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001310 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001311 unsigned int fsr, num;
1312 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001313
Steve Mucklef132c6c2012-06-06 18:30:57 -07001314 mutex_lock(&msm_iommu_lock);
1315 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001316
Steve Mucklef132c6c2012-06-06 18:30:57 -07001317 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1318 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001319
1320 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001321 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001322
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001323 ret = __enable_clocks(drvdata);
1324 if (ret)
1325 goto fail;
1326
Olav Haugan65209cd2012-11-07 15:02:56 -08001327 msm_iommu_remote_spin_lock();
1328
Steve Mucklef132c6c2012-06-06 18:30:57 -07001329 fsr = GET_FSR(base, num);
1330
1331 if (fsr) {
1332 if (!ctx_drvdata->attached_domain) {
1333 pr_err("Bad domain in interrupt handler\n");
1334 ret = -ENOSYS;
1335 } else
1336 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1337 &ctx_drvdata->pdev->dev,
1338 GET_FAR(base, num), 0);
1339
1340 if (ret == -ENOSYS) {
1341 pr_err("Unexpected IOMMU page fault!\n");
1342 pr_err("name = %s\n", drvdata->name);
1343 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001344 pr_err("Interesting registers:\n");
Mitchel Humpherys9e90db32013-05-21 17:37:22 -07001345 __print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001346 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001347
1348 SET_FSR(base, num, fsr);
Shubhraprakash Das52f50c42012-10-09 16:14:28 -07001349 /*
1350 * Only resume fetches if the registered fault handler
1351 * allows it
1352 */
1353 if (ret != -EBUSY)
1354 SET_RESUME(base, num, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001355
1356 ret = IRQ_HANDLED;
1357 } else
1358 ret = IRQ_NONE;
1359
Olav Haugan65209cd2012-11-07 15:02:56 -08001360 msm_iommu_remote_spin_unlock();
1361
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001362 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001363fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001364 mutex_unlock(&msm_iommu_lock);
1365 return ret;
1366}
1367
1368static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1369{
Olav Haugan090614f2013-03-22 12:14:18 -07001370 struct msm_iommu_priv *priv = domain->priv;
1371 return __pa(priv->pt.fl_table);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001372}
1373
1374static struct iommu_ops msm_iommu_ops = {
1375 .domain_init = msm_iommu_domain_init,
1376 .domain_destroy = msm_iommu_domain_destroy,
1377 .attach_dev = msm_iommu_attach_dev,
1378 .detach_dev = msm_iommu_detach_dev,
1379 .map = msm_iommu_map,
1380 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001381 .map_range = msm_iommu_map_range,
1382 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001383 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001384 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001385 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001386 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001387};
1388
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001389static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1390{
1391 int i = 0;
1392 unsigned int prrr = 0;
1393 unsigned int nmrr = 0;
1394 int c_icp, c_ocp, c_mt, c_nos;
1395
1396 RCP15_PRRR(prrr);
1397 RCP15_NMRR(nmrr);
1398
1399 for (i = 0; i < NUM_TEX_CLASS; i++) {
1400 c_nos = PRRR_NOS(prrr, i);
1401 c_mt = PRRR_MT(prrr, i);
1402 c_icp = NMRR_ICP(nmrr, i);
1403 c_ocp = NMRR_OCP(nmrr, i);
1404
1405 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1406 return i;
1407 }
1408
1409 return -ENODEV;
1410}
1411
1412static void __init setup_iommu_tex_classes(void)
1413{
1414 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1415 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1416
1417 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1418 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1419
1420 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1421 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1422
1423 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1424 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1425}
1426
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001427static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001428{
Olav Haugan0e22c482013-01-28 17:39:36 -08001429 if (!msm_soc_version_supports_iommu_v0())
Steve Mucklef132c6c2012-06-06 18:30:57 -07001430 return -ENODEV;
1431
Olav Haugan65209cd2012-11-07 15:02:56 -08001432 msm_iommu_lock_initialize();
1433
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001434 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001435 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001436 return 0;
1437}
1438
1439subsys_initcall(msm_iommu_init);
1440
1441MODULE_LICENSE("GPL v2");
1442MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");