blob: 99d071b1323a14d427e4f861fe160c3e1a38316e [file] [log] [blame]
Olav Haugane6d01ef2013-01-25 16:55:44 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
Olav Hauganc5993142013-02-04 13:59:39 -080030#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v0.h>
Olav Haugan090614f2013-03-22 12:14:18 -070032#include <mach/msm_iommu_priv.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070033#include <mach/iommu.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060034#include <mach/msm_smem.h>
Olav Haugan236970a2013-05-14 17:00:02 -070035#include <mach/msm_bus.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070036
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080037#define MRC(reg, processor, op1, crn, crm, op2) \
38__asm__ __volatile__ ( \
39" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
40: "=r" (reg))
41
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44
Steve Mucklef132c6c2012-06-06 18:30:57 -070045/* Sharability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NON_SH 0x0
47#define MSM_IOMMU_ATTR_SH 0x4
48
49/* Cacheability attributes of MSM IOMMU mappings */
50#define MSM_IOMMU_ATTR_NONCACHED 0x0
51#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
52#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
53#define MSM_IOMMU_ATTR_CACHED_WT 0x3
54
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -060055static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
56 unsigned int len);
57
Steve Mucklef132c6c2012-06-06 18:30:57 -070058static inline void clean_pte(unsigned long *start, unsigned long *end,
59 int redirect)
60{
61 if (!redirect)
62 dmac_flush_range(start, end);
63}
64
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020065/* bitmap of the page sizes currently supported */
66#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
67
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080068static int msm_iommu_tex_class[4];
69
Steve Mucklef132c6c2012-06-06 18:30:57 -070070DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070071
Olav Haugan65209cd2012-11-07 15:02:56 -080072/**
73 * Remote spinlock implementation based on Peterson's algorithm to be used
74 * to synchronize IOMMU config port access between CPU and GPU.
75 * This implements Process 0 of the spin lock algorithm. GPU implements
76 * Process 1. Flag and turn is stored in shared memory to allow GPU to
77 * access these.
78 */
79struct msm_iommu_remote_lock {
80 int initialized;
81 struct remote_iommu_petersons_spinlock *lock;
82};
83
84static struct msm_iommu_remote_lock msm_iommu_remote_lock;
85
Olav Hauganc0972722013-09-30 16:56:54 -070086#ifdef CONFIG_MSM_IOMMU_SYNC
Olav Haugan65209cd2012-11-07 15:02:56 -080087static void _msm_iommu_remote_spin_lock_init(void)
88{
89 msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
90 memset(msm_iommu_remote_lock.lock, 0,
91 sizeof(*msm_iommu_remote_lock.lock));
92}
93
Olav Hauganf75b52e2013-10-01 09:18:03 -070094void msm_iommu_remote_p0_spin_lock(unsigned int need_lock)
Olav Haugan65209cd2012-11-07 15:02:56 -080095{
Olav Hauganf75b52e2013-10-01 09:18:03 -070096 if (!need_lock)
97 return;
98
Olav Haugan65209cd2012-11-07 15:02:56 -080099 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
100 msm_iommu_remote_lock.lock->turn = 1;
101
102 smp_mb();
103
104 while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
105 msm_iommu_remote_lock.lock->turn == 1)
106 cpu_relax();
107}
108
Olav Hauganf75b52e2013-10-01 09:18:03 -0700109void msm_iommu_remote_p0_spin_unlock(unsigned int need_lock)
Olav Haugan65209cd2012-11-07 15:02:56 -0800110{
Olav Hauganf75b52e2013-10-01 09:18:03 -0700111 if (!need_lock)
112 return;
113
Olav Haugan65209cd2012-11-07 15:02:56 -0800114 smp_mb();
115
116 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
117}
118#endif
119
120inline void msm_iommu_mutex_lock(void)
121{
122 mutex_lock(&msm_iommu_lock);
123}
124
125inline void msm_iommu_mutex_unlock(void)
126{
127 mutex_unlock(&msm_iommu_lock);
128}
129
130void *msm_iommu_lock_initialize(void)
131{
132 mutex_lock(&msm_iommu_lock);
133 if (!msm_iommu_remote_lock.initialized) {
134 msm_iommu_remote_lock_init();
135 msm_iommu_remote_lock.initialized = 1;
136 }
137 mutex_unlock(&msm_iommu_lock);
138 return msm_iommu_remote_lock.lock;
139}
140
Olav Haugan236970a2013-05-14 17:00:02 -0700141static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
142{
143 int ret = 0;
144
145 if (drvdata->bus_client) {
146 ret = msm_bus_scale_client_update_request(drvdata->bus_client,
147 vote);
148 if (ret)
149 pr_err("%s: Failed to vote for bus: %d\n", __func__,
150 vote);
151 }
152 return ret;
153}
154
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800155static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
156{
157 int ret;
158
Steve Mucklef132c6c2012-06-06 18:30:57 -0700159 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800160 if (ret)
161 goto fail;
162
163 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700164 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800165 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700166 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800167 }
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700168
169 if (ret)
170 goto fail;
171
172 if (drvdata->aclk) {
173 ret = clk_prepare_enable(drvdata->aclk);
174 if (ret) {
175 clk_disable_unprepare(drvdata->clk);
176 clk_disable_unprepare(drvdata->pclk);
177 }
178 }
179
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800180fail:
181 return ret;
182}
183
184static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
185{
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700186 if (drvdata->aclk)
187 clk_disable_unprepare(drvdata->aclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800188 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700189 clk_disable_unprepare(drvdata->clk);
190 clk_disable_unprepare(drvdata->pclk);
191}
192
Olav Hauganeece7e52013-04-02 10:22:21 -0700193static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800194{
Olav Hauganeece7e52013-04-02 10:22:21 -0700195 /* No need to do anything. IOMMUv0 is always on. */
196 return 0;
Olav Hauganc5993142013-02-04 13:59:39 -0800197}
198
Olav Hauganeece7e52013-04-02 10:22:21 -0700199static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800200{
Olav Hauganeece7e52013-04-02 10:22:21 -0700201 /* No need to do anything. IOMMUv0 is always on. */
Olav Hauganc5993142013-02-04 13:59:39 -0800202}
203
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600204static void *_iommu_lock_initialize(void)
205{
206 return msm_iommu_lock_initialize();
207}
208
Olav Hauganf75b52e2013-10-01 09:18:03 -0700209static void _iommu_lock_acquire(unsigned int need_extra_lock)
Olav Hauganc5993142013-02-04 13:59:39 -0800210{
Olav Hauganc0972722013-09-30 16:56:54 -0700211 msm_iommu_mutex_lock();
Olav Hauganf75b52e2013-10-01 09:18:03 -0700212 msm_iommu_remote_spin_lock(need_extra_lock);
Olav Hauganc5993142013-02-04 13:59:39 -0800213}
214
Olav Hauganf75b52e2013-10-01 09:18:03 -0700215static void _iommu_lock_release(unsigned int need_extra_lock)
Olav Hauganc5993142013-02-04 13:59:39 -0800216{
Olav Hauganf75b52e2013-10-01 09:18:03 -0700217 msm_iommu_remote_spin_unlock(need_extra_lock);
Olav Hauganc0972722013-09-30 16:56:54 -0700218 msm_iommu_mutex_unlock();
Olav Hauganc5993142013-02-04 13:59:39 -0800219}
220
221struct iommu_access_ops iommu_access_ops_v0 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700222 .iommu_power_on = __enable_regulators,
223 .iommu_power_off = __disable_regulators,
Olav Haugan236970a2013-05-14 17:00:02 -0700224 .iommu_bus_vote = apply_bus_vote,
Olav Hauganeece7e52013-04-02 10:22:21 -0700225 .iommu_clk_on = __enable_clocks,
226 .iommu_clk_off = __disable_clocks,
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600227 .iommu_lock_initialize = _iommu_lock_initialize,
Olav Hauganc5993142013-02-04 13:59:39 -0800228 .iommu_lock_acquire = _iommu_lock_acquire,
229 .iommu_lock_release = _iommu_lock_release,
230};
Olav Hauganc5993142013-02-04 13:59:39 -0800231
Steve Mucklef132c6c2012-06-06 18:30:57 -0700232static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
233{
Olav Haugan090614f2013-03-22 12:14:18 -0700234 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700235 struct msm_iommu_drvdata *iommu_drvdata;
236 struct msm_iommu_ctx_drvdata *ctx_drvdata;
237 int ret = 0;
238 int asid;
239
240 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
241 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
242 BUG();
243
244 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
245 if (!iommu_drvdata)
246 BUG();
247
248 ret = __enable_clocks(iommu_drvdata);
249 if (ret)
250 goto fail;
251
Olav Hauganf75b52e2013-10-01 09:18:03 -0700252 msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800253
Steve Mucklef132c6c2012-06-06 18:30:57 -0700254 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
255 ctx_drvdata->num);
256
257 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
258 asid | (va & TLBIVA_VA));
259 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800260
Olav Hauganf75b52e2013-10-01 09:18:03 -0700261 msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800262
Steve Mucklef132c6c2012-06-06 18:30:57 -0700263 __disable_clocks(iommu_drvdata);
264 }
265fail:
266 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800267}
268
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800269static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270{
Olav Haugan090614f2013-03-22 12:14:18 -0700271 struct msm_iommu_priv *priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700272 struct msm_iommu_drvdata *iommu_drvdata;
273 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800274 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700275 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700276
277 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
278 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
279 BUG();
280
281 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700282 if (!iommu_drvdata)
283 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800284
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800285 ret = __enable_clocks(iommu_drvdata);
286 if (ret)
287 goto fail;
288
Olav Hauganf75b52e2013-10-01 09:18:03 -0700289 msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800290
Steve Mucklef132c6c2012-06-06 18:30:57 -0700291 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
292 ctx_drvdata->num);
293
294 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
295 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800296
Olav Hauganf75b52e2013-10-01 09:18:03 -0700297 msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800298
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800299 __disable_clocks(iommu_drvdata);
300 }
301fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800302 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700303}
304
Olav Haugan95d24162012-12-05 14:47:47 -0800305static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700306{
Olav Haugan95d24162012-12-05 14:47:47 -0800307 SET_BPRCOSH(glb_base, ctx, 0);
308 SET_BPRCISH(glb_base, ctx, 0);
309 SET_BPRCNSH(glb_base, ctx, 0);
310 SET_BPSHCFG(glb_base, ctx, 0);
311 SET_BPMTCFG(glb_base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700312 SET_ACTLR(base, ctx, 0);
313 SET_SCTLR(base, ctx, 0);
314 SET_FSRRESTORE(base, ctx, 0);
315 SET_TTBR0(base, ctx, 0);
316 SET_TTBR1(base, ctx, 0);
317 SET_TTBCR(base, ctx, 0);
318 SET_BFBCR(base, ctx, 0);
319 SET_PAR(base, ctx, 0);
320 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700321 SET_TLBFLPTER(base, ctx, 0);
322 SET_TLBSLPTER(base, ctx, 0);
323 SET_TLBLKCR(base, ctx, 0);
324 SET_PRRR(base, ctx, 0);
325 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700326 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700327}
328
Olav Hauganf75b52e2013-10-01 09:18:03 -0700329static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
Olav Haugan95d24162012-12-05 14:47:47 -0800330 int ctx, int ncb, phys_addr_t pgtable,
331 int redirect, int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700332{
Olav Hauganf75b52e2013-10-01 09:18:03 -0700333 void __iomem *base = iommu_drvdata->base;
334 void __iomem *glb_base = iommu_drvdata->glb_base;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800335 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700336 int i, j, found;
Olav Hauganf75b52e2013-10-01 09:18:03 -0700337
338 msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800339
Olav Haugan95d24162012-12-05 14:47:47 -0800340 __reset_context(base, glb_base, ctx);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700341
342 /* Set up HTW mode */
343 /* TLB miss configuration: perform HTW on miss */
344 SET_TLBMCFG(base, ctx, 0x3);
345
346 /* V2P configuration: HTW for access */
347 SET_V2PCFG(base, ctx, 0x3);
348
Steve Mucklef132c6c2012-06-06 18:30:57 -0700349 SET_TTBCR(base, ctx, ttbr_split);
350 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
351 if (ttbr_split)
352 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700353
354 /* Enable context fault interrupt */
355 SET_CFEIE(base, ctx, 1);
356
357 /* Stall access on a context fault and let the handler deal with it */
358 SET_CFCFG(base, ctx, 1);
359
360 /* Redirect all cacheable requests to L2 slave port. */
361 SET_RCISH(base, ctx, 1);
362 SET_RCOSH(base, ctx, 1);
363 SET_RCNSH(base, ctx, 1);
364
365 /* Turn on TEX Remap */
366 SET_TRE(base, ctx, 1);
367
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800368 /* Set TEX remap attributes */
369 RCP15_PRRR(prrr);
370 RCP15_NMRR(nmrr);
371 SET_PRRR(base, ctx, prrr);
372 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700373
374 /* Turn on BFB prefetch */
375 SET_BFBDFE(base, ctx, 1);
376
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700377 /* Configure page tables as inner-cacheable and shareable to reduce
378 * the TLB miss penalty.
379 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700380 if (redirect) {
381 SET_TTBR0_SH(base, ctx, 1);
382 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700383
Steve Mucklef132c6c2012-06-06 18:30:57 -0700384 SET_TTBR0_NOS(base, ctx, 1);
385 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700386
Steve Mucklef132c6c2012-06-06 18:30:57 -0700387 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
388 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700389
Steve Mucklef132c6c2012-06-06 18:30:57 -0700390 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
391 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700392
Steve Mucklef132c6c2012-06-06 18:30:57 -0700393 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
394 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
395 }
396
397 /* Find if this page table is used elsewhere, and re-use ASID */
398 found = 0;
399 for (i = 0; i < ncb; i++)
400 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
401 i != ctx) {
402 SET_CONTEXTIDR_ASID(base, ctx, \
403 GET_CONTEXTIDR_ASID(base, i));
404 found = 1;
405 break;
406 }
407
408 /* If page table is new, find an unused ASID */
409 if (!found) {
410 for (i = 0; i < ncb; i++) {
411 found = 0;
412 for (j = 0; j < ncb; j++) {
413 if (GET_CONTEXTIDR_ASID(base, j) == i &&
414 j != ctx)
415 found = 1;
416 }
417
418 if (!found) {
419 SET_CONTEXTIDR_ASID(base, ctx, i);
420 break;
421 }
422 }
423 BUG_ON(found);
424 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700425
426 /* Enable the MMU */
427 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700428 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800429
Olav Hauganf75b52e2013-10-01 09:18:03 -0700430 msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700431}
432
Steve Mucklef132c6c2012-06-06 18:30:57 -0700433static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700434{
Olav Haugan090614f2013-03-22 12:14:18 -0700435 struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700436
437 if (!priv)
438 goto fail_nomem;
439
440 INIT_LIST_HEAD(&priv->list_attached);
Olav Haugan090614f2013-03-22 12:14:18 -0700441 priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700442 get_order(SZ_16K));
443
Olav Haugan090614f2013-03-22 12:14:18 -0700444 if (!priv->pt.fl_table)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700445 goto fail_nomem;
446
Steve Mucklef132c6c2012-06-06 18:30:57 -0700447#ifdef CONFIG_IOMMU_PGTABLES_L2
Olav Haugan090614f2013-03-22 12:14:18 -0700448 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700449#endif
450
Olav Haugan090614f2013-03-22 12:14:18 -0700451 memset(priv->pt.fl_table, 0, SZ_16K);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700452 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700453
Olav Haugan090614f2013-03-22 12:14:18 -0700454 clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
455 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700456
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700457 return 0;
458
459fail_nomem:
460 kfree(priv);
461 return -ENOMEM;
462}
463
464static void msm_iommu_domain_destroy(struct iommu_domain *domain)
465{
Olav Haugan090614f2013-03-22 12:14:18 -0700466 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700467 unsigned long *fl_table;
468 int i;
469
Steve Mucklef132c6c2012-06-06 18:30:57 -0700470 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700471 priv = domain->priv;
472 domain->priv = NULL;
473
474 if (priv) {
Olav Haugan090614f2013-03-22 12:14:18 -0700475 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700476
477 for (i = 0; i < NUM_FL_PTE; i++)
478 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
479 free_page((unsigned long) __va(((fl_table[i]) &
480 FL_BASE_MASK)));
481
Olav Haugan090614f2013-03-22 12:14:18 -0700482 free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
483 priv->pt.fl_table = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700484 }
485
486 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700487 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700488}
489
490static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
491{
Olav Haugan090614f2013-03-22 12:14:18 -0700492 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700493 struct msm_iommu_drvdata *iommu_drvdata;
494 struct msm_iommu_ctx_drvdata *ctx_drvdata;
495 struct msm_iommu_ctx_drvdata *tmp_drvdata;
496 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700497
Steve Mucklef132c6c2012-06-06 18:30:57 -0700498 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700499
500 priv = domain->priv;
501
502 if (!priv || !dev) {
503 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800504 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700505 }
506
507 iommu_drvdata = dev_get_drvdata(dev->parent);
508 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700509
Olav Haugan95d24162012-12-05 14:47:47 -0800510 if (!iommu_drvdata || !ctx_drvdata) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700511 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800512 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700513 }
514
Olav Haugane99ee7e2012-12-11 15:02:02 -0800515 ++ctx_drvdata->attach_count;
516
517 if (ctx_drvdata->attach_count > 1)
518 goto unlock;
519
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800520 if (!list_empty(&ctx_drvdata->attached_elm)) {
521 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800522 goto unlock;
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800523 }
524
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700525 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
526 if (tmp_drvdata == ctx_drvdata) {
527 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800528 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700529 }
530
Olav Haugan236970a2013-05-14 17:00:02 -0700531 ret = apply_bus_vote(iommu_drvdata, 1);
532
533 if (ret)
534 goto unlock;
535
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800536 ret = __enable_clocks(iommu_drvdata);
537 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800538 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800539
Olav Hauganf75b52e2013-10-01 09:18:03 -0700540 __program_context(iommu_drvdata,
Olav Haugan95d24162012-12-05 14:47:47 -0800541 ctx_drvdata->num, iommu_drvdata->ncb,
Olav Haugan090614f2013-03-22 12:14:18 -0700542 __pa(priv->pt.fl_table), priv->pt.redirect,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700543 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700544
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800545 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700546 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700547
Steve Mucklef132c6c2012-06-06 18:30:57 -0700548 ctx_drvdata->attached_domain = domain;
Olav Hauganc5993142013-02-04 13:59:39 -0800549
550 mutex_unlock(&msm_iommu_lock);
551
552 msm_iommu_attached(dev->parent);
553 return ret;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800554unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700555 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700556 return ret;
557}
558
559static void msm_iommu_detach_dev(struct iommu_domain *domain,
560 struct device *dev)
561{
Olav Haugan090614f2013-03-22 12:14:18 -0700562 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700563 struct msm_iommu_drvdata *iommu_drvdata;
564 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800565 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700566
Olav Hauganc5993142013-02-04 13:59:39 -0800567 msm_iommu_detached(dev->parent);
568
Steve Mucklef132c6c2012-06-06 18:30:57 -0700569 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700570 priv = domain->priv;
571
572 if (!priv || !dev)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800573 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700574
575 iommu_drvdata = dev_get_drvdata(dev->parent);
576 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700577
Olav Haugan35deadc2012-12-10 18:28:27 -0800578 if (!iommu_drvdata || !ctx_drvdata)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800579 goto unlock;
580
581 --ctx_drvdata->attach_count;
582 BUG_ON(ctx_drvdata->attach_count < 0);
583
584 if (ctx_drvdata->attach_count > 0)
585 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700586
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800587 ret = __enable_clocks(iommu_drvdata);
588 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800589 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800590
Olav Hauganf75b52e2013-10-01 09:18:03 -0700591 msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800592
Olav Haugan35deadc2012-12-10 18:28:27 -0800593 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
594 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700595
Olav Haugan95d24162012-12-05 14:47:47 -0800596 __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
Olav Haugan35deadc2012-12-10 18:28:27 -0800597 ctx_drvdata->num);
Olav Haugan65209cd2012-11-07 15:02:56 -0800598
Olav Hauganf75b52e2013-10-01 09:18:03 -0700599 msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -0800600
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800601 __disable_clocks(iommu_drvdata);
Olav Haugan236970a2013-05-14 17:00:02 -0700602
603 apply_bus_vote(iommu_drvdata, 0);
604
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700605 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700606 ctx_drvdata->attached_domain = NULL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800607unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700608 mutex_unlock(&msm_iommu_lock);
609}
610
611static int __get_pgprot(int prot, int len)
612{
613 unsigned int pgprot;
614 int tex;
615
616 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
617 prot |= IOMMU_READ | IOMMU_WRITE;
618 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
619 }
620
621 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
622 prot |= IOMMU_READ;
623 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
624 }
625
626 if (prot & IOMMU_CACHE)
627 tex = (pgprot_kernel >> 2) & 0x07;
628 else
629 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
630
631 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
632 return 0;
633
634 if (len == SZ_16M || len == SZ_1M) {
635 pgprot = FL_SHARED;
636 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
637 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
638 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
639 pgprot |= FL_AP0 | FL_AP1;
640 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
641 } else {
642 pgprot = SL_SHARED;
643 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
644 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
645 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
646 pgprot |= SL_AP0 | SL_AP1;
647 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
648 }
649
650 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700651}
652
Olav Haugan090614f2013-03-22 12:14:18 -0700653static unsigned long *make_second_level(struct msm_iommu_priv *priv,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600654 unsigned long *fl_pte)
655{
656 unsigned long *sl;
657 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
658 get_order(SZ_4K));
659
660 if (!sl) {
661 pr_debug("Could not allocate second level table\n");
662 goto fail;
663 }
664 memset(sl, 0, SZ_4K);
Olav Haugan090614f2013-03-22 12:14:18 -0700665 clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600666
667 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
668 FL_TYPE_TABLE);
669
Olav Haugan090614f2013-03-22 12:14:18 -0700670 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600671fail:
672 return sl;
673}
674
675static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
676{
677 int ret = 0;
678
679 if (*sl_pte) {
680 ret = -EBUSY;
681 goto fail;
682 }
683
684 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
685 | SL_TYPE_SMALL | pgprot;
686fail:
687 return ret;
688}
689
690static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
691{
692 int ret = 0;
693
694 int i;
695
696 for (i = 0; i < 16; i++)
697 if (*(sl_pte+i)) {
698 ret = -EBUSY;
699 goto fail;
700 }
701
702 for (i = 0; i < 16; i++)
703 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
704 | SL_SHARED | SL_TYPE_LARGE | pgprot;
705
706fail:
707 return ret;
708}
709
710
711static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
712{
713 if (*fl_pte)
714 return -EBUSY;
715
716 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
717 | pgprot;
718
719 return 0;
720}
721
722
723static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
724{
725 int i;
726 int ret = 0;
727 for (i = 0; i < 16; i++)
728 if (*(fl_pte+i)) {
729 ret = -EBUSY;
730 goto fail;
731 }
732 for (i = 0; i < 16; i++)
733 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
734 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
735fail:
736 return ret;
737}
738
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700739static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200740 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700741{
Olav Haugan090614f2013-03-22 12:14:18 -0700742 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700743 unsigned long *fl_table;
744 unsigned long *fl_pte;
745 unsigned long fl_offset;
746 unsigned long *sl_table;
747 unsigned long *sl_pte;
748 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800749 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700750 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700751
Steve Mucklef132c6c2012-06-06 18:30:57 -0700752 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800753
754 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700755 if (!priv) {
756 ret = -EINVAL;
757 goto fail;
758 }
759
Olav Haugan090614f2013-03-22 12:14:18 -0700760 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700761
762 if (len != SZ_16M && len != SZ_1M &&
763 len != SZ_64K && len != SZ_4K) {
764 pr_debug("Bad size: %d\n", len);
765 ret = -EINVAL;
766 goto fail;
767 }
768
769 if (!fl_table) {
770 pr_debug("Null page table\n");
771 ret = -EINVAL;
772 goto fail;
773 }
774
Steve Mucklef132c6c2012-06-06 18:30:57 -0700775 pgprot = __get_pgprot(prot, len);
776
777 if (!pgprot) {
778 ret = -EINVAL;
779 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800780 }
781
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700782 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
783 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
784
785 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600786 ret = fl_16m(fl_pte, pa, pgprot);
787 if (ret)
788 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700789 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700790 }
791
Steve Mucklef132c6c2012-06-06 18:30:57 -0700792 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600793 ret = fl_1m(fl_pte, pa, pgprot);
794 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700795 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700796 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700797 }
798
799 /* Need a 2nd level table */
800 if (len == SZ_4K || len == SZ_64K) {
801
802 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600803 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700804 ret = -ENOMEM;
805 goto fail;
806 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700807 }
808
809 if (!(*fl_pte & FL_TYPE_TABLE)) {
810 ret = -EBUSY;
811 goto fail;
812 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700813 }
814
815 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
816 sl_offset = SL_OFFSET(va);
817 sl_pte = sl_table + sl_offset;
818
Steve Mucklef132c6c2012-06-06 18:30:57 -0700819 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600820 ret = sl_4k(sl_pte, pa, pgprot);
821 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700822 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700823
Olav Haugan090614f2013-03-22 12:14:18 -0700824 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700825 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700826
827 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600828 ret = sl_64k(sl_pte, pa, pgprot);
829 if (ret)
830 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700831 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700832 }
833
Steve Mucklef132c6c2012-06-06 18:30:57 -0700834 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700835fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700836 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700837 return ret;
838}
839
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200840static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
841 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700842{
Olav Haugan090614f2013-03-22 12:14:18 -0700843 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700844 unsigned long *fl_table;
845 unsigned long *fl_pte;
846 unsigned long fl_offset;
847 unsigned long *sl_table;
848 unsigned long *sl_pte;
849 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700850 int i, ret = 0;
851
Steve Mucklef132c6c2012-06-06 18:30:57 -0700852 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700853
854 priv = domain->priv;
855
Joerg Roedel05df1f32012-01-26 18:25:37 +0100856 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700857 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700858
Olav Haugan090614f2013-03-22 12:14:18 -0700859 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700860
861 if (len != SZ_16M && len != SZ_1M &&
862 len != SZ_64K && len != SZ_4K) {
863 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700864 goto fail;
865 }
866
867 if (!fl_table) {
868 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700869 goto fail;
870 }
871
872 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
873 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
874
875 if (*fl_pte == 0) {
876 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700877 goto fail;
878 }
879
880 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700881 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700882 for (i = 0; i < 16; i++)
883 *(fl_pte+i) = 0;
884
Olav Haugan090614f2013-03-22 12:14:18 -0700885 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700886 }
887
888 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700889 *fl_pte = 0;
890
Olav Haugan090614f2013-03-22 12:14:18 -0700891 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700892 }
893
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700894 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
895 sl_offset = SL_OFFSET(va);
896 sl_pte = sl_table + sl_offset;
897
898 if (len == SZ_64K) {
899 for (i = 0; i < 16; i++)
900 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700901
Olav Haugan090614f2013-03-22 12:14:18 -0700902 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700903 }
904
Steve Mucklef132c6c2012-06-06 18:30:57 -0700905 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700906 *sl_pte = 0;
907
Olav Haugan090614f2013-03-22 12:14:18 -0700908 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700909 }
910
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700911 if (len == SZ_4K || len == SZ_64K) {
912 int used = 0;
913
914 for (i = 0; i < NUM_SL_PTE; i++)
915 if (sl_table[i])
916 used = 1;
917 if (!used) {
918 free_page((unsigned long)sl_table);
919 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700920
Olav Haugan090614f2013-03-22 12:14:18 -0700921 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700922 }
923 }
924
Steve Mucklef132c6c2012-06-06 18:30:57 -0700925 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400926
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700927fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700928 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200929
930 /* the IOMMU API requires us to return how many bytes were unmapped */
931 len = ret ? 0 : len;
932 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700933}
934
Steve Mucklef132c6c2012-06-06 18:30:57 -0700935static unsigned int get_phys_addr(struct scatterlist *sg)
936{
937 /*
938 * Try sg_dma_address first so that we can
939 * map carveout regions that do not have a
940 * struct page associated with them.
941 */
942 unsigned int pa = sg_dma_address(sg);
943 if (pa == 0)
944 pa = sg_phys(sg);
945 return pa;
946}
947
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600948static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
949 int align)
950{
951 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
952 && (len >= align);
953}
954
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600955static int check_range(unsigned long *fl_table, unsigned int va,
956 unsigned int len)
957{
958 unsigned int offset = 0;
959 unsigned long *fl_pte;
960 unsigned long fl_offset;
961 unsigned long *sl_table;
962 unsigned long sl_start, sl_end;
963 int i;
964
965 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
966 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
967
968 while (offset < len) {
969 if (*fl_pte & FL_TYPE_TABLE) {
970 sl_start = SL_OFFSET(va);
971 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
972 sl_end = ((len - offset) / SZ_4K) + sl_start;
973
974 if (sl_end > NUM_SL_PTE)
975 sl_end = NUM_SL_PTE;
976
977 for (i = sl_start; i < sl_end; i++) {
978 if (sl_table[i] != 0) {
979 pr_err("%08x - %08x already mapped\n",
980 va, va + SZ_4K);
981 return -EBUSY;
982 }
983 offset += SZ_4K;
984 va += SZ_4K;
985 }
986
987
988 sl_start = 0;
989 } else {
990 if (*fl_pte != 0) {
991 pr_err("%08x - %08x already mapped\n",
992 va, va + SZ_1M);
993 return -EBUSY;
994 }
995 va += SZ_1M;
996 offset += SZ_1M;
997 sl_start = 0;
998 }
999 fl_pte++;
1000 }
1001 return 0;
1002}
1003
Steve Mucklef132c6c2012-06-06 18:30:57 -07001004static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
1005 struct scatterlist *sg, unsigned int len,
1006 int prot)
1007{
1008 unsigned int pa;
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -06001009 unsigned int start_va = va;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001010 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001011 unsigned long *fl_table;
1012 unsigned long *fl_pte;
1013 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001014 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001015 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001016 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001017 int ret = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001018 struct msm_iommu_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001019 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001020
1021 mutex_lock(&msm_iommu_lock);
1022
1023 BUG_ON(len & (SZ_4K - 1));
1024
1025 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001026 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001027
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001028 pgprot4k = __get_pgprot(prot, SZ_4K);
1029 pgprot64k = __get_pgprot(prot, SZ_64K);
1030 pgprot1m = __get_pgprot(prot, SZ_1M);
1031 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001032
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001033 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001034 ret = -EINVAL;
1035 goto fail;
1036 }
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -06001037 ret = check_range(fl_table, va, len);
1038 if (ret)
1039 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001040
1041 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1042 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001043 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001044
1045 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001046 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001047
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001048 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1049 SZ_16M))
1050 chunk_size = SZ_16M;
1051 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1052 SZ_1M))
1053 chunk_size = SZ_1M;
1054 /* 64k or 4k determined later */
1055
1056 /* for 1M and 16M, only first level entries are required */
1057 if (chunk_size >= SZ_1M) {
1058 if (chunk_size == SZ_16M) {
1059 ret = fl_16m(fl_pte, pa, pgprot16m);
1060 if (ret)
1061 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001062 clean_pte(fl_pte, fl_pte + 16,
1063 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001064 fl_pte += 16;
1065 } else if (chunk_size == SZ_1M) {
1066 ret = fl_1m(fl_pte, pa, pgprot1m);
1067 if (ret)
1068 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001069 clean_pte(fl_pte, fl_pte + 1,
1070 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001071 fl_pte++;
1072 }
1073
1074 offset += chunk_size;
1075 chunk_offset += chunk_size;
1076 va += chunk_size;
1077 pa += chunk_size;
1078
1079 if (chunk_offset >= sg->length && offset < len) {
1080 chunk_offset = 0;
1081 sg = sg_next(sg);
1082 pa = get_phys_addr(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001083 }
1084 continue;
1085 }
1086 /* for 4K or 64K, make sure there is a second level table */
1087 if (*fl_pte == 0) {
1088 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001089 ret = -ENOMEM;
1090 goto fail;
1091 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001092 }
1093 if (!(*fl_pte & FL_TYPE_TABLE)) {
1094 ret = -EBUSY;
1095 goto fail;
1096 }
1097 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1098 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001099 /* Keep track of initial position so we
1100 * don't clean more than we have to
1101 */
1102 sl_start = sl_offset;
1103
1104 /* Build the 2nd level page table */
1105 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001106
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001107 /* Map a large 64K page if the chunk is large enough and
1108 * the pa and va are aligned
1109 */
1110
1111 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1112 SZ_64K))
1113 chunk_size = SZ_64K;
1114 else
1115 chunk_size = SZ_4K;
1116
1117 if (chunk_size == SZ_4K) {
1118 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
1119 sl_offset++;
1120 } else {
1121 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
1122 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
1123 sl_offset += 16;
1124 }
1125
1126
1127 offset += chunk_size;
1128 chunk_offset += chunk_size;
1129 va += chunk_size;
1130 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001131
1132 if (chunk_offset >= sg->length && offset < len) {
1133 chunk_offset = 0;
1134 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001135 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001136 }
1137 }
1138
1139 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Olav Haugan090614f2013-03-22 12:14:18 -07001140 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001141
1142 fl_pte++;
1143 sl_offset = 0;
1144 }
1145 __flush_iotlb(domain);
1146fail:
1147 mutex_unlock(&msm_iommu_lock);
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -06001148 if (ret && offset > 0)
1149 msm_iommu_unmap_range(domain, start_va, offset);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001150 return ret;
1151}
1152
1153
1154static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
1155 unsigned int len)
1156{
1157 unsigned int offset = 0;
1158 unsigned long *fl_table;
1159 unsigned long *fl_pte;
1160 unsigned long fl_offset;
1161 unsigned long *sl_table;
1162 unsigned long sl_start, sl_end;
1163 int used, i;
Olav Haugan090614f2013-03-22 12:14:18 -07001164 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001165
1166 mutex_lock(&msm_iommu_lock);
1167
1168 BUG_ON(len & (SZ_4K - 1));
1169
1170 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001171 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001172
1173 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1174 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
1175
Steve Mucklef132c6c2012-06-06 18:30:57 -07001176 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001177 if (*fl_pte & FL_TYPE_TABLE) {
1178 sl_start = SL_OFFSET(va);
1179 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1180 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001181
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001182 if (sl_end > NUM_SL_PTE)
1183 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001184
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001185 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
1186 clean_pte(sl_table + sl_start, sl_table + sl_end,
Olav Haugan090614f2013-03-22 12:14:18 -07001187 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001188
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001189 offset += (sl_end - sl_start) * SZ_4K;
1190 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001191
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001192 /* Unmap and free the 2nd level table if all mappings
1193 * in it were removed. This saves memory, but the table
1194 * will need to be re-allocated the next time someone
1195 * tries to map these VAs.
1196 */
1197 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001198
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001199 /* If we just unmapped the whole table, don't bother
1200 * seeing if there are still used entries left.
1201 */
1202 if (sl_end - sl_start != NUM_SL_PTE)
1203 for (i = 0; i < NUM_SL_PTE; i++)
1204 if (sl_table[i]) {
1205 used = 1;
1206 break;
1207 }
1208 if (!used) {
1209 free_page((unsigned long)sl_table);
1210 *fl_pte = 0;
1211
Olav Haugan090614f2013-03-22 12:14:18 -07001212 clean_pte(fl_pte, fl_pte + 1,
1213 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001214 }
1215
1216 sl_start = 0;
1217 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001218 *fl_pte = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001219 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001220 va += SZ_1M;
1221 offset += SZ_1M;
1222 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001223 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001224 fl_pte++;
1225 }
1226
1227 __flush_iotlb(domain);
1228 mutex_unlock(&msm_iommu_lock);
1229 return 0;
1230}
1231
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001232static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1233 unsigned long va)
1234{
Olav Haugan090614f2013-03-22 12:14:18 -07001235 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001236 struct msm_iommu_drvdata *iommu_drvdata;
1237 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1238 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001239 void __iomem *base;
1240 phys_addr_t ret = 0;
1241 int ctx;
1242
Steve Mucklef132c6c2012-06-06 18:30:57 -07001243 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001244
1245 priv = domain->priv;
1246 if (list_empty(&priv->list_attached))
1247 goto fail;
1248
1249 ctx_drvdata = list_entry(priv->list_attached.next,
1250 struct msm_iommu_ctx_drvdata, attached_elm);
1251 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1252
1253 base = iommu_drvdata->base;
1254 ctx = ctx_drvdata->num;
1255
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001256 ret = __enable_clocks(iommu_drvdata);
1257 if (ret)
1258 goto fail;
1259
Olav Hauganf75b52e2013-10-01 09:18:03 -07001260 msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -08001261
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001262 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001263
Steve Mucklef132c6c2012-06-06 18:30:57 -07001264 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001265 par = GET_PAR(base, ctx);
1266
1267 /* We are dealing with a supersection */
1268 if (GET_NOFAULT_SS(base, ctx))
1269 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1270 else /* Upper 20 bits from PAR, lower 12 from VA */
1271 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1272
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001273 if (GET_FAULT(base, ctx))
1274 ret = 0;
1275
Olav Hauganf75b52e2013-10-01 09:18:03 -07001276 msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -08001277
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001278 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001279fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001280 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001281 return ret;
1282}
1283
1284static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1285 unsigned long cap)
1286{
1287 return 0;
1288}
1289
Mitchel Humpherys9e90db32013-05-21 17:37:22 -07001290static void __print_ctx_regs(void __iomem *base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001291{
1292 unsigned int fsr = GET_FSR(base, ctx);
1293 pr_err("FAR = %08x PAR = %08x\n",
1294 GET_FAR(base, ctx), GET_PAR(base, ctx));
1295 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1296 (fsr & 0x02) ? "TF " : "",
1297 (fsr & 0x04) ? "AFF " : "",
1298 (fsr & 0x08) ? "APF " : "",
1299 (fsr & 0x10) ? "TLBMF " : "",
1300 (fsr & 0x20) ? "HTWDEEF " : "",
1301 (fsr & 0x40) ? "HTWSEEF " : "",
1302 (fsr & 0x80) ? "MHF " : "",
1303 (fsr & 0x10000) ? "SL " : "",
1304 (fsr & 0x40000000) ? "SS " : "",
1305 (fsr & 0x80000000) ? "MULTI " : "");
1306
1307 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1308 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1309 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1310 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1311 pr_err("SCTLR = %08x ACTLR = %08x\n",
1312 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1313 pr_err("PRRR = %08x NMRR = %08x\n",
1314 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1315}
1316
1317irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1318{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001319 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1320 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001321 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001322 unsigned int fsr, num;
1323 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001324
Steve Mucklef132c6c2012-06-06 18:30:57 -07001325 mutex_lock(&msm_iommu_lock);
1326 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001327
Steve Mucklef132c6c2012-06-06 18:30:57 -07001328 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1329 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001330
1331 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001332 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001333
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001334 ret = __enable_clocks(drvdata);
1335 if (ret)
1336 goto fail;
1337
Olav Hauganf75b52e2013-10-01 09:18:03 -07001338 msm_iommu_remote_spin_lock(drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -08001339
Steve Mucklef132c6c2012-06-06 18:30:57 -07001340 fsr = GET_FSR(base, num);
1341
1342 if (fsr) {
1343 if (!ctx_drvdata->attached_domain) {
1344 pr_err("Bad domain in interrupt handler\n");
1345 ret = -ENOSYS;
1346 } else
1347 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1348 &ctx_drvdata->pdev->dev,
1349 GET_FAR(base, num), 0);
1350
1351 if (ret == -ENOSYS) {
1352 pr_err("Unexpected IOMMU page fault!\n");
1353 pr_err("name = %s\n", drvdata->name);
1354 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001355 pr_err("Interesting registers:\n");
Mitchel Humpherys9e90db32013-05-21 17:37:22 -07001356 __print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001357 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001358
1359 SET_FSR(base, num, fsr);
Shubhraprakash Das52f50c42012-10-09 16:14:28 -07001360 /*
1361 * Only resume fetches if the registered fault handler
1362 * allows it
1363 */
1364 if (ret != -EBUSY)
1365 SET_RESUME(base, num, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001366
1367 ret = IRQ_HANDLED;
1368 } else
1369 ret = IRQ_NONE;
1370
Olav Hauganf75b52e2013-10-01 09:18:03 -07001371 msm_iommu_remote_spin_unlock(drvdata->needs_rem_spinlock);
Olav Haugan65209cd2012-11-07 15:02:56 -08001372
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001373 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001374fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001375 mutex_unlock(&msm_iommu_lock);
1376 return ret;
1377}
1378
1379static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1380{
Olav Haugan090614f2013-03-22 12:14:18 -07001381 struct msm_iommu_priv *priv = domain->priv;
1382 return __pa(priv->pt.fl_table);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001383}
1384
1385static struct iommu_ops msm_iommu_ops = {
1386 .domain_init = msm_iommu_domain_init,
1387 .domain_destroy = msm_iommu_domain_destroy,
1388 .attach_dev = msm_iommu_attach_dev,
1389 .detach_dev = msm_iommu_detach_dev,
1390 .map = msm_iommu_map,
1391 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001392 .map_range = msm_iommu_map_range,
1393 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001394 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001395 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001396 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001397 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001398};
1399
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001400static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1401{
1402 int i = 0;
1403 unsigned int prrr = 0;
1404 unsigned int nmrr = 0;
1405 int c_icp, c_ocp, c_mt, c_nos;
1406
1407 RCP15_PRRR(prrr);
1408 RCP15_NMRR(nmrr);
1409
1410 for (i = 0; i < NUM_TEX_CLASS; i++) {
1411 c_nos = PRRR_NOS(prrr, i);
1412 c_mt = PRRR_MT(prrr, i);
1413 c_icp = NMRR_ICP(nmrr, i);
1414 c_ocp = NMRR_OCP(nmrr, i);
1415
1416 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1417 return i;
1418 }
1419
1420 return -ENODEV;
1421}
1422
1423static void __init setup_iommu_tex_classes(void)
1424{
1425 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1426 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1427
1428 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1429 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1430
1431 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1432 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1433
1434 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1435 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1436}
1437
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001438static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001439{
Olav Haugan0e22c482013-01-28 17:39:36 -08001440 if (!msm_soc_version_supports_iommu_v0())
Steve Mucklef132c6c2012-06-06 18:30:57 -07001441 return -ENODEV;
1442
Olav Haugan65209cd2012-11-07 15:02:56 -08001443 msm_iommu_lock_initialize();
1444
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001445 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001446 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001447 return 0;
1448}
1449
1450subsys_initcall(msm_iommu_init);
1451
1452MODULE_LICENSE("GPL v2");
1453MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");