blob: 89eef57a176b9ee9b8ef43b3a0cfe1c15380bc76 [file] [log] [blame]
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -080041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -070052static inline void clean_pte(unsigned long *start, unsigned long *end,
53 int redirect)
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070054{
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -070055 if (!redirect)
56 dmac_flush_range(start, end);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070057}
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070058
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080059static int msm_iommu_tex_class[4];
60
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -080061DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070062
63struct msm_priv {
64 unsigned long *pgtable;
Stepan Moskovchenkob2438892011-08-31 17:16:19 -070065 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070066 struct list_head list_attached;
67};
68
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080069static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
70{
71 int ret;
72
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080073 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080074 if (ret)
75 goto fail;
76
77 if (drvdata->clk) {
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080078 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080079 if (ret)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080080 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080081 }
82fail:
83 return ret;
84}
85
86static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
87{
88 if (drvdata->clk)
Stepan Moskovchenkobbf95e12012-02-14 15:42:27 -080089 clk_disable_unprepare(drvdata->clk);
90 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080091}
92
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070093static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
94{
95 struct msm_priv *priv = domain->priv;
96 struct msm_iommu_drvdata *iommu_drvdata;
97 struct msm_iommu_ctx_drvdata *ctx_drvdata;
98 int ret = 0;
99 int asid;
100
101 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
102 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
103 BUG();
104
105 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
106 if (!iommu_drvdata)
107 BUG();
108
109 ret = __enable_clocks(iommu_drvdata);
110 if (ret)
111 goto fail;
112
113 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
114 ctx_drvdata->num);
115
116 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
117 asid | (va & TLBIVA_VA));
118 mb();
119 __disable_clocks(iommu_drvdata);
120 }
121fail:
122 return ret;
123}
124
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700125static int __flush_iotlb(struct iommu_domain *domain)
126{
127 struct msm_priv *priv = domain->priv;
128 struct msm_iommu_drvdata *iommu_drvdata;
129 struct msm_iommu_ctx_drvdata *ctx_drvdata;
130 int ret = 0;
131 int asid;
132
133 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
134 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
135 BUG();
136
137 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
138 if (!iommu_drvdata)
139 BUG();
140
141 ret = __enable_clocks(iommu_drvdata);
142 if (ret)
143 goto fail;
144
145 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
146 ctx_drvdata->num);
147
148 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
149 mb();
150 __disable_clocks(iommu_drvdata);
151 }
152fail:
153 return ret;
154}
155
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700156static void __reset_context(void __iomem *base, int ctx)
157{
158 SET_BPRCOSH(base, ctx, 0);
159 SET_BPRCISH(base, ctx, 0);
160 SET_BPRCNSH(base, ctx, 0);
161 SET_BPSHCFG(base, ctx, 0);
162 SET_BPMTCFG(base, ctx, 0);
163 SET_ACTLR(base, ctx, 0);
164 SET_SCTLR(base, ctx, 0);
165 SET_FSRRESTORE(base, ctx, 0);
166 SET_TTBR0(base, ctx, 0);
167 SET_TTBR1(base, ctx, 0);
168 SET_TTBCR(base, ctx, 0);
169 SET_BFBCR(base, ctx, 0);
170 SET_PAR(base, ctx, 0);
171 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700172 SET_TLBFLPTER(base, ctx, 0);
173 SET_TLBSLPTER(base, ctx, 0);
174 SET_TLBLKCR(base, ctx, 0);
175 SET_PRRR(base, ctx, 0);
176 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700178}
179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180static void __program_context(void __iomem *base, int ctx, int ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600181 phys_addr_t pgtable, int redirect,
182 int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700183{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800184 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700186 __reset_context(base, ctx);
187
188 /* Set up HTW mode */
189 /* TLB miss configuration: perform HTW on miss */
190 SET_TLBMCFG(base, ctx, 0x3);
191
192 /* V2P configuration: HTW for access */
193 SET_V2PCFG(base, ctx, 0x3);
194
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600195 SET_TTBCR(base, ctx, ttbr_split);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600197 if (ttbr_split)
198 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700199
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700200 /* Enable context fault interrupt */
201 SET_CFEIE(base, ctx, 1);
202
203 /* Stall access on a context fault and let the handler deal with it */
204 SET_CFCFG(base, ctx, 1);
205
206 /* Redirect all cacheable requests to L2 slave port. */
207 SET_RCISH(base, ctx, 1);
208 SET_RCOSH(base, ctx, 1);
209 SET_RCNSH(base, ctx, 1);
210
211 /* Turn on TEX Remap */
212 SET_TRE(base, ctx, 1);
213
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800214 /* Set TEX remap attributes */
215 RCP15_PRRR(prrr);
216 RCP15_NMRR(nmrr);
217 SET_PRRR(base, ctx, prrr);
218 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700219
220 /* Turn on BFB prefetch */
221 SET_BFBDFE(base, ctx, 1);
222
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700223 /* Configure page tables as inner-cacheable and shareable to reduce
224 * the TLB miss penalty.
225 */
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700226 if (redirect) {
227 SET_TTBR0_SH(base, ctx, 1);
228 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700229
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700230 SET_TTBR0_NOS(base, ctx, 1);
231 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700232
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700233 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
234 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700235
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700236 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
237 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700238
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700239 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
240 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
241 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 /* Find if this page table is used elsewhere, and re-use ASID */
244 found = 0;
245 for (i = 0; i < ncb; i++)
246 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
247 i != ctx) {
248 SET_CONTEXTIDR_ASID(base, ctx, \
249 GET_CONTEXTIDR_ASID(base, i));
250 found = 1;
251 break;
252 }
253
254 /* If page table is new, find an unused ASID */
255 if (!found) {
256 for (i = 0; i < ncb; i++) {
257 found = 0;
258 for (j = 0; j < ncb; j++) {
259 if (GET_CONTEXTIDR_ASID(base, j) == i &&
260 j != ctx)
261 found = 1;
262 }
263
264 if (!found) {
265 SET_CONTEXTIDR_ASID(base, ctx, i);
266 break;
267 }
268 }
269 BUG_ON(found);
270 }
271
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700272 /* Enable the MMU */
273 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700275}
276
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700277static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700278{
279 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
280
281 if (!priv)
282 goto fail_nomem;
283
284 INIT_LIST_HEAD(&priv->list_attached);
285 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
286 get_order(SZ_16K));
287
288 if (!priv->pgtable)
289 goto fail_nomem;
290
Stepan Moskovchenkob2438892011-08-31 17:16:19 -0700291#ifdef CONFIG_IOMMU_PGTABLES_L2
292 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
293#endif
294
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700295 memset(priv->pgtable, 0, SZ_16K);
296 domain->priv = priv;
Stepan Moskovchenko561b50d2012-05-11 17:30:08 -0700297
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700298 clean_pte(priv->pgtable, priv->pgtable + NUM_FL_PTE, priv->redirect);
Stepan Moskovchenko561b50d2012-05-11 17:30:08 -0700299
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700300 return 0;
301
302fail_nomem:
303 kfree(priv);
304 return -ENOMEM;
305}
306
307static void msm_iommu_domain_destroy(struct iommu_domain *domain)
308{
309 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700310 unsigned long *fl_table;
311 int i;
312
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800313 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700314 priv = domain->priv;
315 domain->priv = NULL;
316
317 if (priv) {
318 fl_table = priv->pgtable;
319
320 for (i = 0; i < NUM_FL_PTE; i++)
321 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
322 free_page((unsigned long) __va(((fl_table[i]) &
323 FL_BASE_MASK)));
324
325 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
326 priv->pgtable = NULL;
327 }
328
329 kfree(priv);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800330 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700331}
332
333static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
334{
335 struct msm_priv *priv;
336 struct msm_iommu_ctx_dev *ctx_dev;
337 struct msm_iommu_drvdata *iommu_drvdata;
338 struct msm_iommu_ctx_drvdata *ctx_drvdata;
339 struct msm_iommu_ctx_drvdata *tmp_drvdata;
340 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700341
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800342 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700343
344 priv = domain->priv;
345
346 if (!priv || !dev) {
347 ret = -EINVAL;
348 goto fail;
349 }
350
351 iommu_drvdata = dev_get_drvdata(dev->parent);
352 ctx_drvdata = dev_get_drvdata(dev);
353 ctx_dev = dev->platform_data;
354
355 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
356 ret = -EINVAL;
357 goto fail;
358 }
359
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800360 if (!list_empty(&ctx_drvdata->attached_elm)) {
361 ret = -EBUSY;
362 goto fail;
363 }
364
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700365 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
366 if (tmp_drvdata == ctx_drvdata) {
367 ret = -EBUSY;
368 goto fail;
369 }
370
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800371 ret = __enable_clocks(iommu_drvdata);
372 if (ret)
373 goto fail;
374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Shubhraprakash Das935e6a52012-04-05 14:47:30 -0600376 __pa(priv->pgtable), priv->redirect,
377 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700378
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800379 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700380 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700381
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700382 ctx_drvdata->attached_domain = domain;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700383fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800384 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700385 return ret;
386}
387
388static void msm_iommu_detach_dev(struct iommu_domain *domain,
389 struct device *dev)
390{
391 struct msm_priv *priv;
392 struct msm_iommu_ctx_dev *ctx_dev;
393 struct msm_iommu_drvdata *iommu_drvdata;
394 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800395 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700396
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800397 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700398 priv = domain->priv;
399
400 if (!priv || !dev)
401 goto fail;
402
403 iommu_drvdata = dev_get_drvdata(dev->parent);
404 ctx_drvdata = dev_get_drvdata(dev);
405 ctx_dev = dev->platform_data;
406
407 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
408 goto fail;
409
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800410 ret = __enable_clocks(iommu_drvdata);
411 if (ret)
412 goto fail;
413
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700414 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
415 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
416
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700417 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800418 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700419 list_del_init(&ctx_drvdata->attached_elm);
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700420 ctx_drvdata->attached_domain = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700421fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800422 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700423}
424
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700425static int __get_pgprot(int prot, int len)
426{
427 unsigned int pgprot;
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800428 int tex;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700429
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700430 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
431 prot |= IOMMU_READ | IOMMU_WRITE;
432 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
433 }
434
435 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
436 prot |= IOMMU_READ;
437 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
438 }
439
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800440 if (prot & IOMMU_CACHE)
441 tex = (pgprot_kernel >> 2) & 0x07;
442 else
443 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700444
445 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
446 return 0;
447
448 if (len == SZ_16M || len == SZ_1M) {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800449 pgprot = FL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700450 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
451 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
452 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700453 pgprot |= FL_AP0 | FL_AP1;
454 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700455 } else {
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800456 pgprot = SL_SHARED;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700457 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
458 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
459 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700460 pgprot |= SL_AP0 | SL_AP1;
461 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700462 }
463
464 return pgprot;
465}
466
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700467static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
468 phys_addr_t pa, int order, int prot)
469{
470 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700471 unsigned long *fl_table;
472 unsigned long *fl_pte;
473 unsigned long fl_offset;
474 unsigned long *sl_table;
475 unsigned long *sl_pte;
476 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800477 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700478 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700479 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700480
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800481 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700482
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800483 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700484 if (!priv) {
485 ret = -EINVAL;
486 goto fail;
487 }
488
489 fl_table = priv->pgtable;
490
491 if (len != SZ_16M && len != SZ_1M &&
492 len != SZ_64K && len != SZ_4K) {
493 pr_debug("Bad size: %d\n", len);
494 ret = -EINVAL;
495 goto fail;
496 }
497
498 if (!fl_table) {
499 pr_debug("Null page table\n");
500 ret = -EINVAL;
501 goto fail;
502 }
503
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700504 pgprot = __get_pgprot(prot, len);
505
506 if (!pgprot) {
507 ret = -EINVAL;
508 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800509 }
510
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700511 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
512 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
513
514 if (len == SZ_16M) {
515 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516
517 for (i = 0; i < 16; i++)
518 if (*(fl_pte+i)) {
519 ret = -EBUSY;
520 goto fail;
521 }
522
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700523 for (i = 0; i < 16; i++)
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700524 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
525 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700526 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700527 }
528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 if (len == SZ_1M) {
530 if (*fl_pte) {
531 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700532 goto fail;
533 }
534
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700535 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
536 | pgprot;
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700537 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 }
539
540 /* Need a 2nd level table */
541 if (len == SZ_4K || len == SZ_64K) {
542
543 if (*fl_pte == 0) {
544 unsigned long *sl;
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800545 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 get_order(SZ_4K));
547
548 if (!sl) {
549 pr_debug("Could not allocate second level table\n");
550 ret = -ENOMEM;
551 goto fail;
552 }
553 memset(sl, 0, SZ_4K);
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700554 clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555
556 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
557 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700558
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700559 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 }
561
562 if (!(*fl_pte & FL_TYPE_TABLE)) {
563 ret = -EBUSY;
564 goto fail;
565 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700566 }
567
568 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
569 sl_offset = SL_OFFSET(va);
570 sl_pte = sl_table + sl_offset;
571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (len == SZ_4K) {
573 if (*sl_pte) {
574 ret = -EBUSY;
575 goto fail;
576 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700577
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700578 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
579 | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700580 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700582
583 if (len == SZ_64K) {
584 int i;
585
586 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 if (*(sl_pte+i)) {
588 ret = -EBUSY;
589 goto fail;
590 }
591
592 for (i = 0; i < 16; i++)
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700593 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
594 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700595
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700596 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700597 }
598
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700599 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700600fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800601 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700602 return ret;
603}
604
605static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
606 int order)
607{
608 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700609 unsigned long *fl_table;
610 unsigned long *fl_pte;
611 unsigned long fl_offset;
612 unsigned long *sl_table;
613 unsigned long *sl_pte;
614 unsigned long sl_offset;
615 size_t len = 0x1000UL << order;
616 int i, ret = 0;
617
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800618 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700619
620 priv = domain->priv;
621
622 if (!priv) {
623 ret = -ENODEV;
624 goto fail;
625 }
626
627 fl_table = priv->pgtable;
628
629 if (len != SZ_16M && len != SZ_1M &&
630 len != SZ_64K && len != SZ_4K) {
631 pr_debug("Bad length: %d\n", len);
632 ret = -EINVAL;
633 goto fail;
634 }
635
636 if (!fl_table) {
637 pr_debug("Null page table\n");
638 ret = -EINVAL;
639 goto fail;
640 }
641
642 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
643 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
644
645 if (*fl_pte == 0) {
646 pr_debug("First level PTE is 0\n");
647 ret = -ENODEV;
648 goto fail;
649 }
650
651 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700652 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700653 for (i = 0; i < 16; i++)
654 *(fl_pte+i) = 0;
655
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700656 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700657 }
658
659 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700660 *fl_pte = 0;
661
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700662 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700663 }
664
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700665 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
666 sl_offset = SL_OFFSET(va);
667 sl_pte = sl_table + sl_offset;
668
669 if (len == SZ_64K) {
670 for (i = 0; i < 16; i++)
671 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700672
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700673 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700674 }
675
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700676 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700677 *sl_pte = 0;
678
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700679 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700680 }
681
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700682 if (len == SZ_4K || len == SZ_64K) {
683 int used = 0;
684
685 for (i = 0; i < NUM_SL_PTE; i++)
686 if (sl_table[i])
687 used = 1;
688 if (!used) {
689 free_page((unsigned long)sl_table);
690 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700691
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700692 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700693 }
694 }
695
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700696 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700697fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800698 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700699 return ret;
700}
701
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600702static unsigned int get_phys_addr(struct scatterlist *sg)
703{
704 /*
705 * Try sg_dma_address first so that we can
706 * map carveout regions that do not have a
707 * struct page associated with them.
708 */
709 unsigned int pa = sg_dma_address(sg);
710 if (pa == 0)
711 pa = sg_phys(sg);
712 return pa;
713}
714
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700715static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
716 struct scatterlist *sg, unsigned int len,
717 int prot)
718{
719 unsigned int pa;
720 unsigned int offset = 0;
721 unsigned int pgprot;
722 unsigned long *fl_table;
723 unsigned long *fl_pte;
724 unsigned long fl_offset;
725 unsigned long *sl_table;
726 unsigned long sl_offset, sl_start;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700727 unsigned int chunk_offset = 0;
728 unsigned int chunk_pa;
729 int ret = 0;
730 struct msm_priv *priv;
731
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800732 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700733
734 BUG_ON(len & (SZ_4K - 1));
735
736 priv = domain->priv;
737 fl_table = priv->pgtable;
738
739 pgprot = __get_pgprot(prot, SZ_4K);
740
741 if (!pgprot) {
742 ret = -EINVAL;
743 goto fail;
744 }
745
746 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
747 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
748
749 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
750 sl_offset = SL_OFFSET(va);
751
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600752 chunk_pa = get_phys_addr(sg);
753 if (chunk_pa == 0) {
754 pr_debug("No dma address for sg %p\n", sg);
755 ret = -EINVAL;
756 goto fail;
757 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700758
759 while (offset < len) {
760 /* Set up a 2nd level page table if one doesn't exist */
761 if (*fl_pte == 0) {
762 sl_table = (unsigned long *)
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800763 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700764
765 if (!sl_table) {
766 pr_debug("Could not allocate second level table\n");
767 ret = -ENOMEM;
768 goto fail;
769 }
770
771 memset(sl_table, 0, SZ_4K);
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700772 clean_pte(sl_table, sl_table + NUM_SL_PTE,
773 priv->redirect);
Stepan Moskovchenko561b50d2012-05-11 17:30:08 -0700774
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700775 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
776 FL_TYPE_TABLE);
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700777 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700778 } else
779 sl_table = (unsigned long *)
780 __va(((*fl_pte) & FL_BASE_MASK));
781
782 /* Keep track of initial position so we
783 * don't clean more than we have to
784 */
785 sl_start = sl_offset;
786
787 /* Build the 2nd level page table */
788 while (offset < len && sl_offset < NUM_SL_PTE) {
789 pa = chunk_pa + chunk_offset;
790 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
Stepan Moskovchenko3c8cdf82012-03-15 15:23:48 -0700791 pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700792 sl_offset++;
793 offset += SZ_4K;
794
795 chunk_offset += SZ_4K;
796
797 if (chunk_offset >= sg->length && offset < len) {
798 chunk_offset = 0;
799 sg = sg_next(sg);
Jeremy Gebben74e57d42012-03-23 10:26:11 -0600800 chunk_pa = get_phys_addr(sg);
801 if (chunk_pa == 0) {
802 pr_debug("No dma address for sg %p\n",
803 sg);
804 ret = -EINVAL;
805 goto fail;
806 }
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700807 }
808 }
809
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700810 clean_pte(sl_table + sl_start, sl_table + sl_offset,
811 priv->redirect);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700812
813 fl_pte++;
814 sl_offset = 0;
815 }
816 __flush_iotlb(domain);
817fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800818 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700819 return ret;
820}
821
822
823static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
824 unsigned int len)
825{
826 unsigned int offset = 0;
827 unsigned long *fl_table;
828 unsigned long *fl_pte;
829 unsigned long fl_offset;
830 unsigned long *sl_table;
831 unsigned long sl_start, sl_end;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700832 int used, i;
833 struct msm_priv *priv;
834
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800835 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700836
837 BUG_ON(len & (SZ_4K - 1));
838
839 priv = domain->priv;
840 fl_table = priv->pgtable;
841
842 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
843 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
844
845 sl_start = SL_OFFSET(va);
846
847 while (offset < len) {
848 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
849 sl_end = ((len - offset) / SZ_4K) + sl_start;
850
851 if (sl_end > NUM_SL_PTE)
852 sl_end = NUM_SL_PTE;
853
854 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700855 clean_pte(sl_table + sl_start, sl_table + sl_end,
856 priv->redirect);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700857
858 offset += (sl_end - sl_start) * SZ_4K;
859
860 /* Unmap and free the 2nd level table if all mappings in it
861 * were removed. This saves memory, but the table will need
862 * to be re-allocated the next time someone tries to map these
863 * VAs.
864 */
865 used = 0;
866
867 /* If we just unmapped the whole table, don't bother
868 * seeing if there are still used entries left.
869 */
870 if (sl_end - sl_start != NUM_SL_PTE)
871 for (i = 0; i < NUM_SL_PTE; i++)
872 if (sl_table[i]) {
873 used = 1;
874 break;
875 }
876 if (!used) {
877 free_page((unsigned long)sl_table);
878 *fl_pte = 0;
879
Stepan Moskovchenko70dddc92012-05-11 18:43:02 -0700880 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700881 }
882
883 sl_start = 0;
884 fl_pte++;
885 }
886
887 __flush_iotlb(domain);
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800888 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700889 return 0;
890}
891
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700892static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
893 unsigned long va)
894{
895 struct msm_priv *priv;
896 struct msm_iommu_drvdata *iommu_drvdata;
897 struct msm_iommu_ctx_drvdata *ctx_drvdata;
898 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700899 void __iomem *base;
900 phys_addr_t ret = 0;
901 int ctx;
902
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800903 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700904
905 priv = domain->priv;
906 if (list_empty(&priv->list_attached))
907 goto fail;
908
909 ctx_drvdata = list_entry(priv->list_attached.next,
910 struct msm_iommu_ctx_drvdata, attached_elm);
911 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
912
913 base = iommu_drvdata->base;
914 ctx = ctx_drvdata->num;
915
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800916 ret = __enable_clocks(iommu_drvdata);
917 if (ret)
918 goto fail;
919
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800920 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700923 par = GET_PAR(base, ctx);
924
925 /* We are dealing with a supersection */
926 if (GET_NOFAULT_SS(base, ctx))
927 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
928 else /* Upper 20 bits from PAR, lower 12 from VA */
929 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
930
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800931 if (GET_FAULT(base, ctx))
932 ret = 0;
933
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800934 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700935fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800936 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700937 return ret;
938}
939
940static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
941 unsigned long cap)
942{
943 return 0;
944}
945
946static void print_ctx_regs(void __iomem *base, int ctx)
947{
948 unsigned int fsr = GET_FSR(base, ctx);
949 pr_err("FAR = %08x PAR = %08x\n",
950 GET_FAR(base, ctx), GET_PAR(base, ctx));
951 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
952 (fsr & 0x02) ? "TF " : "",
953 (fsr & 0x04) ? "AFF " : "",
954 (fsr & 0x08) ? "APF " : "",
955 (fsr & 0x10) ? "TLBMF " : "",
956 (fsr & 0x20) ? "HTWDEEF " : "",
957 (fsr & 0x40) ? "HTWSEEF " : "",
958 (fsr & 0x80) ? "MHF " : "",
959 (fsr & 0x10000) ? "SL " : "",
960 (fsr & 0x40000000) ? "SS " : "",
961 (fsr & 0x80000000) ? "MULTI " : "");
962
963 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
964 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
965 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
966 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
967 pr_err("SCTLR = %08x ACTLR = %08x\n",
968 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
969 pr_err("PRRR = %08x NMRR = %08x\n",
970 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
971}
972
973irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
974{
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700975 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
976 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700977 void __iomem *base;
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700978 unsigned int fsr, num;
979 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700980
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -0800981 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700982 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700983
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700984 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
985 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700986
987 base = drvdata->base;
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700988 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700989
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800990 ret = __enable_clocks(drvdata);
991 if (ret)
992 goto fail;
993
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -0700994 fsr = GET_FSR(base, num);
995
996 if (fsr) {
997 if (!ctx_drvdata->attached_domain) {
998 pr_err("Bad domain in interrupt handler\n");
999 ret = -ENOSYS;
1000 } else
1001 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1002 &ctx_drvdata->pdev->dev,
1003 GET_FAR(base, num), 0);
1004
1005 if (ret == -ENOSYS) {
1006 pr_err("Unexpected IOMMU page fault!\n");
1007 pr_err("name = %s\n", drvdata->name);
1008 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001009 pr_err("Interesting registers:\n");
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -07001010 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001011 }
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -07001012
1013 SET_FSR(base, num, fsr);
1014 SET_RESUME(base, num, 1);
1015
1016 ret = IRQ_HANDLED;
1017 } else
1018 ret = IRQ_NONE;
1019
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001020 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001021fail:
Stepan Moskovchenkoc5888f22012-02-14 15:42:05 -08001022 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko73a50f62012-05-03 17:29:12 -07001023 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001024}
1025
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001026static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1027{
1028 struct msm_priv *priv = domain->priv;
1029 return __pa(priv->pgtable);
1030}
1031
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001032static struct iommu_ops msm_iommu_ops = {
1033 .domain_init = msm_iommu_domain_init,
1034 .domain_destroy = msm_iommu_domain_destroy,
1035 .attach_dev = msm_iommu_attach_dev,
1036 .detach_dev = msm_iommu_detach_dev,
1037 .map = msm_iommu_map,
1038 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -07001039 .map_range = msm_iommu_map_range,
1040 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001041 .iova_to_phys = msm_iommu_iova_to_phys,
Shubhraprakash Das4c436f22011-12-02 18:01:57 -07001042 .domain_has_cap = msm_iommu_domain_has_cap,
1043 .get_pt_base_addr = msm_iommu_get_pt_base_addr
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001044};
1045
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001046static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1047{
1048 int i = 0;
1049 unsigned int prrr = 0;
1050 unsigned int nmrr = 0;
1051 int c_icp, c_ocp, c_mt, c_nos;
1052
1053 RCP15_PRRR(prrr);
1054 RCP15_NMRR(nmrr);
1055
1056 for (i = 0; i < NUM_TEX_CLASS; i++) {
1057 c_nos = PRRR_NOS(prrr, i);
1058 c_mt = PRRR_MT(prrr, i);
1059 c_icp = NMRR_ICP(nmrr, i);
1060 c_ocp = NMRR_OCP(nmrr, i);
1061
1062 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1063 return i;
1064 }
1065
1066 return -ENODEV;
1067}
1068
1069static void __init setup_iommu_tex_classes(void)
1070{
1071 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1072 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1073
1074 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1075 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1076
1077 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1078 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1079
1080 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1081 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1082}
1083
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001084static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001085{
Sathish Ambleyd1b89ed2012-02-07 21:47:47 -08001086 if (!msm_soc_version_supports_iommu_v1())
Stepan Moskovchenko15f209c2011-10-31 15:32:44 -07001087 return -ENODEV;
1088
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001089 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001090 register_iommu(&msm_iommu_ops);
1091 return 0;
1092}
1093
1094subsys_initcall(msm_iommu_init);
1095
1096MODULE_LICENSE("GPL v2");
1097MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");