blob: aab8d243a404f6e0c7423d9ddee24106df0c98a1 [file] [log] [blame]
Olav Haugan3c7fb382013-01-02 17:32:25 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -070028#include <linux/regulator/consumer.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <asm/sizes.h>
30
31#include <mach/iommu_hw-v2.h>
32#include <mach/iommu.h>
33
34#include "msm_iommu_pagetable.h"
35
36/* bitmap of the page sizes currently supported */
37#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
39static DEFINE_MUTEX(msm_iommu_lock);
40
41struct msm_priv {
42 struct iommu_pt pt;
43 struct list_head list_attached;
44};
45
Olav Haugan2648d972013-01-07 17:32:31 -080046static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
47{
48 int ret = regulator_enable(drvdata->gdsc);
49 if (ret)
50 goto fail;
51
52 if (drvdata->alt_gdsc)
53 ret = regulator_enable(drvdata->alt_gdsc);
54
55 if (ret)
56 regulator_disable(drvdata->gdsc);
57fail:
58 return ret;
59}
60
61static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
62{
63 if (drvdata->alt_gdsc)
64 regulator_disable(drvdata->alt_gdsc);
65
66 regulator_disable(drvdata->gdsc);
67}
68
Steve Mucklef132c6c2012-06-06 18:30:57 -070069static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
70{
71 int ret;
72
73 ret = clk_prepare_enable(drvdata->pclk);
74 if (ret)
75 goto fail;
76
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070077 ret = clk_prepare_enable(drvdata->clk);
78 if (ret)
79 clk_disable_unprepare(drvdata->pclk);
80
81 if (drvdata->aclk) {
82 ret = clk_prepare_enable(drvdata->aclk);
83 if (ret) {
84 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -070085 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070086 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070087 }
Olav Haugan3c7fb382013-01-02 17:32:25 -080088
89 if (drvdata->clk_reg_virt) {
90 unsigned int value;
91
92 value = readl_relaxed(drvdata->clk_reg_virt);
93 value &= ~0x1;
94 writel_relaxed(value, drvdata->clk_reg_virt);
95 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070096fail:
97 return ret;
98}
99
100static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
101{
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700102 if (drvdata->aclk)
103 clk_disable_unprepare(drvdata->aclk);
104 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700105 clk_disable_unprepare(drvdata->pclk);
106}
107
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700108static void __sync_tlb(void __iomem *base, int ctx)
109{
110 SET_TLBSYNC(base, ctx, 0);
111
112 /* No barrier needed due to register proximity */
113 while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
114 cpu_relax();
115
116 /* No barrier needed due to read dependency */
117}
118
Steve Mucklef132c6c2012-06-06 18:30:57 -0700119static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
120{
121 struct msm_priv *priv = domain->priv;
122 struct msm_iommu_drvdata *iommu_drvdata;
123 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700124 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700125 int asid;
126
127 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
128 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
129
130 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
131 BUG_ON(!iommu_drvdata);
132
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700133
134 ret = __enable_clocks(iommu_drvdata);
135 if (ret)
136 goto fail;
137
Steve Mucklef132c6c2012-06-06 18:30:57 -0700138 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
139 ctx_drvdata->num);
140
141 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
142 asid | (va & CB_TLBIVA_VA));
143 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700144 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700145 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700146 }
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700147fail:
148 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700149}
150
151static int __flush_iotlb(struct iommu_domain *domain)
152{
153 struct msm_priv *priv = domain->priv;
154 struct msm_iommu_drvdata *iommu_drvdata;
155 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700156 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700157 int asid;
158
159 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
160 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
161
162 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
163 BUG_ON(!iommu_drvdata);
164
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700165 ret = __enable_clocks(iommu_drvdata);
166 if (ret)
167 goto fail;
168
Steve Mucklef132c6c2012-06-06 18:30:57 -0700169 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
170 ctx_drvdata->num);
171
172 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
173 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700174 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700175 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700176 }
177
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700178fail:
179 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700180}
181
Laura Abbottf4daa692012-10-10 19:31:53 -0700182/*
183 * May only be called for non-secure iommus
184 */
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700185static void __reset_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700186{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700187 int i, smt_size;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700188
189 SET_ACR(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700190 SET_CR2(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700191 SET_GFAR(base, 0);
192 SET_GFSRRESTORE(base, 0);
193 SET_TLBIALLNSNH(base, 0);
194 SET_PMCR(base, 0);
195 SET_SCR1(base, 0);
196 SET_SSDR_N(base, 0, 0);
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700197 smt_size = GET_IDR0_NUMSMRG(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700198
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700199 for (i = 0; i < smt_size; i++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700200 SET_SMR_VALID(base, i, 0);
201
202 mb();
203}
204
Laura Abbottf4daa692012-10-10 19:31:53 -0700205/*
206 * May only be called for non-secure iommus
207 */
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700208static void __program_iommu(void __iomem *base,
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700209 struct msm_iommu_bfb_settings *bfb_settings)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700210{
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700211 int i;
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700212 __reset_iommu(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700213
214 SET_CR0_SMCFCFG(base, 1);
215 SET_CR0_USFCFG(base, 1);
216 SET_CR0_STALLD(base, 1);
217 SET_CR0_GCFGFIE(base, 1);
218 SET_CR0_GCFGFRE(base, 1);
219 SET_CR0_GFIE(base, 1);
220 SET_CR0_GFRE(base, 1);
221 SET_CR0_CLIENTPD(base, 0);
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700222
223 if (bfb_settings)
224 for (i = 0; i < bfb_settings->length; i++)
225 SET_GLOBAL_REG(base, bfb_settings->regs[i],
226 bfb_settings->data[i]);
227
Sathish Ambleycf045e62012-06-07 12:56:50 -0700228 mb(); /* Make sure writes complete before returning */
229}
230
Steve Mucklef132c6c2012-06-06 18:30:57 -0700231static void __reset_context(void __iomem *base, int ctx)
232{
233 SET_ACTLR(base, ctx, 0);
234 SET_FAR(base, ctx, 0);
235 SET_FSRRESTORE(base, ctx, 0);
236 SET_NMRR(base, ctx, 0);
237 SET_PAR(base, ctx, 0);
238 SET_PRRR(base, ctx, 0);
239 SET_SCTLR(base, ctx, 0);
240 SET_TLBIALL(base, ctx, 0);
241 SET_TTBCR(base, ctx, 0);
242 SET_TTBR0(base, ctx, 0);
243 SET_TTBR1(base, ctx, 0);
244 mb();
245}
246
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700247static void __release_smg(void __iomem *base, int ctx)
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700248{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700249 int i, smt_size;
250 smt_size = GET_IDR0_NUMSMRG(base);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700251
252 /* Invalidate any SMGs associated with this context */
253 for (i = 0; i < smt_size; i++)
254 if (GET_SMR_VALID(base, i) &&
255 GET_S2CR_CBNDX(base, i) == ctx)
256 SET_SMR_VALID(base, i, 0);
257}
258
Olav Haugan26ddd432012-12-07 11:39:21 -0800259static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
260 struct msm_iommu_ctx_drvdata *curr_ctx,
261 phys_addr_t pgtable)
262{
263 struct platform_device *pdev;
264 struct device_node *child;
265 struct msm_iommu_ctx_drvdata *ctx;
266 unsigned int found = 0;
267 void __iomem *base = iommu_drvdata->base;
268 struct device_node *iommu_node = iommu_drvdata->dev->of_node;
269 unsigned int asid;
270 unsigned int ncb = iommu_drvdata->ncb;
271
272 /* Find if this page table is used elsewhere, and re-use ASID */
273 for_each_child_of_node(iommu_node, child) {
274 pdev = of_find_device_by_node(child);
275 ctx = dev_get_drvdata(&pdev->dev);
276
277 if (ctx->secure_context) {
278 of_dev_put(pdev);
279 continue;
280 }
281
282 if ((ctx != curr_ctx) &&
283 (GET_CB_TTBR0_ADDR(base, ctx->num) == pgtable)) {
284 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, ctx->asid);
285 curr_ctx->asid = ctx->asid;
286 found = 1;
287 of_dev_put(pdev);
288 of_node_put(child);
289 break;
290 }
291 of_dev_put(pdev);
292 }
293
294 /* If page table is new, find an unused ASID */
295 if (!found) {
296 for (asid = 1; asid < ncb + 1; ++asid) {
297 found = 0;
298 for_each_child_of_node(iommu_node, child) {
299 pdev = of_find_device_by_node(child);
300 ctx = dev_get_drvdata(&pdev->dev);
301
302 if (ctx != curr_ctx && ctx->asid == asid) {
303 found = 1;
304 of_dev_put(pdev);
305 of_node_put(child);
306 break;
307 }
308 of_dev_put(pdev);
309 }
310 if (!found) {
311 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
312 asid);
313 curr_ctx->asid = asid;
314 break;
315 }
316 }
317 BUG_ON(found);
318 }
319}
320
321static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
322 struct msm_iommu_ctx_drvdata *ctx_drvdata,
323 phys_addr_t pgtable, int redirect, bool is_secure)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700324{
325 unsigned int prrr, nmrr;
326 unsigned int pn;
Olav Haugan26ddd432012-12-07 11:39:21 -0800327 int num = 0, i, smt_size;
328 void __iomem *base = iommu_drvdata->base;
329 unsigned int ctx = ctx_drvdata->num;
330 u32 *sids = ctx_drvdata->sids;
331 int len = ctx_drvdata->nsid;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700332
333 __reset_context(base, ctx);
Laura Abbottf4daa692012-10-10 19:31:53 -0700334
Steve Mucklef132c6c2012-06-06 18:30:57 -0700335 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
336 SET_TTBCR(base, ctx, 0);
337 SET_CB_TTBR0_ADDR(base, ctx, pn);
338
339 /* Enable context fault interrupt */
340 SET_CB_SCTLR_CFIE(base, ctx, 1);
341
342 /* Redirect all cacheable requests to L2 slave port. */
343 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
344 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
345 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
346
347 /* Turn on TEX Remap */
348 SET_CB_SCTLR_TRE(base, ctx, 1);
349
350 /* Enable private ASID namespace */
351 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
352
353 /* Set TEX remap attributes */
354 RCP15_PRRR(prrr);
355 RCP15_NMRR(nmrr);
356 SET_PRRR(base, ctx, prrr);
357 SET_NMRR(base, ctx, nmrr);
358
359 /* Configure page tables as inner-cacheable and shareable to reduce
360 * the TLB miss penalty.
361 */
362 if (redirect) {
363 SET_CB_TTBR0_S(base, ctx, 1);
364 SET_CB_TTBR0_NOS(base, ctx, 1);
365 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
366 SET_CB_TTBR0_IRGN0(base, ctx, 1);
367 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
368 }
369
Laura Abbottf4daa692012-10-10 19:31:53 -0700370 if (!is_secure) {
371 smt_size = GET_IDR0_NUMSMRG(base);
372 /* Program the M2V tables for this context */
373 for (i = 0; i < len / sizeof(*sids); i++) {
374 for (; num < smt_size; num++)
375 if (GET_SMR_VALID(base, num) == 0)
376 break;
377 BUG_ON(num >= smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700378
Laura Abbottf4daa692012-10-10 19:31:53 -0700379 SET_SMR_VALID(base, num, 1);
380 SET_SMR_MASK(base, num, 0);
381 SET_SMR_ID(base, num, sids[i]);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700382
Laura Abbottf4daa692012-10-10 19:31:53 -0700383 SET_S2CR_N(base, num, 0);
384 SET_S2CR_CBNDX(base, num, ctx);
385 SET_S2CR_MEMATTR(base, num, 0x0A);
386 /* Set security bit override to be Non-secure */
387 SET_S2CR_NSCFG(base, num, 3);
388 }
389 SET_CBAR_N(base, ctx, 0);
390
391 /* Stage 1 Context with Stage 2 bypass */
392 SET_CBAR_TYPE(base, ctx, 1);
393
394 /* Route page faults to the non-secure interrupt */
395 SET_CBAR_IRPTNDX(base, ctx, 1);
396
397 /* Set VMID to non-secure HLOS */
398 SET_CBAR_VMID(base, ctx, 3);
399
400 /* Bypass is treated as inner-shareable */
401 SET_CBAR_BPSHCFG(base, ctx, 2);
402
403 /* Do not downgrade memory attributes */
404 SET_CBAR_MEMATTR(base, ctx, 0x0A);
405
Sathish Ambleycf045e62012-06-07 12:56:50 -0700406 }
407
Olav Haugan26ddd432012-12-07 11:39:21 -0800408 msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, pn);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700409
410 /* Enable the MMU */
411 SET_CB_SCTLR_M(base, ctx, 1);
412 mb();
413}
414
415static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
416{
417 struct msm_priv *priv;
418
419 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
420 if (!priv)
421 goto fail_nomem;
422
423#ifdef CONFIG_IOMMU_PGTABLES_L2
424 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
425#endif
426
427 INIT_LIST_HEAD(&priv->list_attached);
428 if (msm_iommu_pagetable_alloc(&priv->pt))
429 goto fail_nomem;
430
431 domain->priv = priv;
432 return 0;
433
434fail_nomem:
435 kfree(priv);
436 return -ENOMEM;
437}
438
439static void msm_iommu_domain_destroy(struct iommu_domain *domain)
440{
441 struct msm_priv *priv;
442
443 mutex_lock(&msm_iommu_lock);
444 priv = domain->priv;
445 domain->priv = NULL;
446
447 if (priv)
448 msm_iommu_pagetable_free(&priv->pt);
449
450 kfree(priv);
451 mutex_unlock(&msm_iommu_lock);
452}
453
Sathish Ambleycf045e62012-06-07 12:56:50 -0700454static int msm_iommu_ctx_attached(struct device *dev)
455{
456 struct platform_device *pdev;
457 struct device_node *child;
458 struct msm_iommu_ctx_drvdata *ctx;
459
460 for_each_child_of_node(dev->of_node, child) {
461 pdev = of_find_device_by_node(child);
462
463 ctx = dev_get_drvdata(&pdev->dev);
464 if (ctx->attached_domain) {
Olav Hauganbd3e9332012-12-19 10:50:02 -0800465 of_dev_put(pdev);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700466 of_node_put(child);
467 return 1;
468 }
Olav Hauganbd3e9332012-12-19 10:50:02 -0800469 of_dev_put(pdev);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700470 }
471
472 return 0;
473}
474
Steve Mucklef132c6c2012-06-06 18:30:57 -0700475static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
476{
477 struct msm_priv *priv;
478 struct msm_iommu_drvdata *iommu_drvdata;
479 struct msm_iommu_ctx_drvdata *ctx_drvdata;
480 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700481 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700482 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700483
484 mutex_lock(&msm_iommu_lock);
485
486 priv = domain->priv;
487 if (!priv || !dev) {
488 ret = -EINVAL;
489 goto fail;
490 }
491
492 iommu_drvdata = dev_get_drvdata(dev->parent);
493 ctx_drvdata = dev_get_drvdata(dev);
494 if (!iommu_drvdata || !ctx_drvdata) {
495 ret = -EINVAL;
496 goto fail;
497 }
498
499 if (!list_empty(&ctx_drvdata->attached_elm)) {
500 ret = -EBUSY;
501 goto fail;
502 }
503
504 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
505 if (tmp_drvdata == ctx_drvdata) {
506 ret = -EBUSY;
507 goto fail;
508 }
509
Laura Abbottf4daa692012-10-10 19:31:53 -0700510 is_secure = iommu_drvdata->sec_id != -1;
511
Olav Haugan2648d972013-01-07 17:32:31 -0800512 ret = __enable_regulators(iommu_drvdata);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700513 if (ret)
514 goto fail;
515
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700516 ret = __enable_clocks(iommu_drvdata);
517 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800518 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700519 goto fail;
520 }
521
Laura Abbottf4daa692012-10-10 19:31:53 -0700522 if (!msm_iommu_ctx_attached(dev->parent)) {
523 if (!is_secure) {
524 __program_iommu(iommu_drvdata->base,
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700525 iommu_drvdata->bfb_settings);
Laura Abbottf4daa692012-10-10 19:31:53 -0700526 } else {
527 ret = msm_iommu_sec_program_iommu(
528 iommu_drvdata->sec_id);
529 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800530 __disable_regulators(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700531 __disable_clocks(iommu_drvdata);
532 goto fail;
533 }
534 }
535 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700536
Olav Haugan26ddd432012-12-07 11:39:21 -0800537 __program_context(iommu_drvdata, ctx_drvdata, __pa(priv->pt.fl_table),
538 priv->pt.redirect, is_secure);
539
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700540 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700541
Steve Mucklef132c6c2012-06-06 18:30:57 -0700542 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
543 ctx_drvdata->attached_domain = domain;
544
545fail:
546 mutex_unlock(&msm_iommu_lock);
547 return ret;
548}
549
550static void msm_iommu_detach_dev(struct iommu_domain *domain,
551 struct device *dev)
552{
553 struct msm_priv *priv;
554 struct msm_iommu_drvdata *iommu_drvdata;
555 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700556 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700557 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700558
559 mutex_lock(&msm_iommu_lock);
560 priv = domain->priv;
561 if (!priv || !dev)
562 goto fail;
563
564 iommu_drvdata = dev_get_drvdata(dev->parent);
565 ctx_drvdata = dev_get_drvdata(dev);
566 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
567 goto fail;
568
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700569 ret = __enable_clocks(iommu_drvdata);
570 if (ret)
571 goto fail;
572
Laura Abbottf4daa692012-10-10 19:31:53 -0700573 is_secure = iommu_drvdata->sec_id != -1;
574
Olav Haugan26ddd432012-12-07 11:39:21 -0800575 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
576 ctx_drvdata->asid = -1;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700577
578 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Laura Abbottf4daa692012-10-10 19:31:53 -0700579 if (!is_secure)
580 __release_smg(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700581
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700582 __disable_clocks(iommu_drvdata);
583
Olav Haugan2648d972013-01-07 17:32:31 -0800584 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700585
Steve Mucklef132c6c2012-06-06 18:30:57 -0700586 list_del_init(&ctx_drvdata->attached_elm);
587 ctx_drvdata->attached_domain = NULL;
588
Steve Mucklef132c6c2012-06-06 18:30:57 -0700589fail:
590 mutex_unlock(&msm_iommu_lock);
591}
592
593static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
594 phys_addr_t pa, size_t len, int prot)
595{
596 struct msm_priv *priv;
597 int ret = 0;
598
599 mutex_lock(&msm_iommu_lock);
600
601 priv = domain->priv;
602 if (!priv) {
603 ret = -EINVAL;
604 goto fail;
605 }
606
607 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
608 if (ret)
609 goto fail;
610
611 ret = __flush_iotlb_va(domain, va);
612fail:
613 mutex_unlock(&msm_iommu_lock);
614 return ret;
615}
616
617static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
618 size_t len)
619{
620 struct msm_priv *priv;
621 int ret = -ENODEV;
622
623 mutex_lock(&msm_iommu_lock);
624
625 priv = domain->priv;
626 if (!priv)
627 goto fail;
628
629 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
630 if (ret < 0)
631 goto fail;
632
633 ret = __flush_iotlb_va(domain, va);
634fail:
635 mutex_unlock(&msm_iommu_lock);
636
637 /* the IOMMU API requires us to return how many bytes were unmapped */
638 len = ret ? 0 : len;
639 return len;
640}
641
642static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
643 struct scatterlist *sg, unsigned int len,
644 int prot)
645{
646 int ret;
647 struct msm_priv *priv;
648
649 mutex_lock(&msm_iommu_lock);
650
651 priv = domain->priv;
652 if (!priv) {
653 ret = -EINVAL;
654 goto fail;
655 }
656
657 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
658 if (ret)
659 goto fail;
660
661 __flush_iotlb(domain);
662fail:
663 mutex_unlock(&msm_iommu_lock);
664 return ret;
665}
666
667
668static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
669 unsigned int len)
670{
671 struct msm_priv *priv;
672
673 mutex_lock(&msm_iommu_lock);
674
675 priv = domain->priv;
676 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
677
678 __flush_iotlb(domain);
679 mutex_unlock(&msm_iommu_lock);
680 return 0;
681}
682
683static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
684 unsigned long va)
685{
686 struct msm_priv *priv;
687 struct msm_iommu_drvdata *iommu_drvdata;
688 struct msm_iommu_ctx_drvdata *ctx_drvdata;
689 unsigned int par;
690 void __iomem *base;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700691 phys_addr_t ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700692 int ctx;
693
694 mutex_lock(&msm_iommu_lock);
695
696 priv = domain->priv;
697 if (list_empty(&priv->list_attached))
698 goto fail;
699
700 ctx_drvdata = list_entry(priv->list_attached.next,
701 struct msm_iommu_ctx_drvdata, attached_elm);
702 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
703
704 base = iommu_drvdata->base;
705 ctx = ctx_drvdata->num;
706
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700707 ret = __enable_clocks(iommu_drvdata);
708 if (ret) {
709 ret = 0; /* 0 indicates translation failed */
710 goto fail;
711 }
712
Steve Mucklef132c6c2012-06-06 18:30:57 -0700713 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
714 mb();
715 while (GET_CB_ATSR_ACTIVE(base, ctx))
716 cpu_relax();
717
718 par = GET_PAR(base, ctx);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700719 __disable_clocks(iommu_drvdata);
720
Steve Mucklef132c6c2012-06-06 18:30:57 -0700721 if (par & CB_PAR_F) {
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700722 ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700723 } else {
724 /* We are dealing with a supersection */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700725 if (ret & CB_PAR_SS)
726 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700727 else /* Upper 20 bits from PAR, lower 12 from VA */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700728 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700729 }
730
731fail:
732 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700733 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700734}
735
736static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
737 unsigned long cap)
738{
739 return 0;
740}
741
742static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
743{
744 pr_err("FAR = %08x PAR = %08x\n",
745 GET_FAR(base, ctx), GET_PAR(base, ctx));
746 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
747 (fsr & 0x02) ? "TF " : "",
748 (fsr & 0x04) ? "AFF " : "",
749 (fsr & 0x08) ? "PF " : "",
750 (fsr & 0x10) ? "EF " : "",
751 (fsr & 0x20) ? "TLBMCF " : "",
752 (fsr & 0x40) ? "TLBLKF " : "",
753 (fsr & 0x80) ? "MHF " : "",
754 (fsr & 0x40000000) ? "SS " : "",
755 (fsr & 0x80000000) ? "MULTI " : "");
756
757 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
758 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
759 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
760 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
761 pr_err("SCTLR = %08x ACTLR = %08x\n",
762 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
763 pr_err("PRRR = %08x NMRR = %08x\n",
764 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
765}
766
767irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
768{
769 struct platform_device *pdev = dev_id;
770 struct msm_iommu_drvdata *drvdata;
771 struct msm_iommu_ctx_drvdata *ctx_drvdata;
772 unsigned int fsr;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700773 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700774
775 mutex_lock(&msm_iommu_lock);
776
777 BUG_ON(!pdev);
778
779 drvdata = dev_get_drvdata(pdev->dev.parent);
780 BUG_ON(!drvdata);
781
782 ctx_drvdata = dev_get_drvdata(&pdev->dev);
783 BUG_ON(!ctx_drvdata);
784
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700785 ret = __enable_clocks(drvdata);
786 if (ret) {
787 ret = IRQ_NONE;
788 goto fail;
789 }
790
Steve Mucklef132c6c2012-06-06 18:30:57 -0700791 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
792 if (fsr) {
793 if (!ctx_drvdata->attached_domain) {
794 pr_err("Bad domain in interrupt handler\n");
795 ret = -ENOSYS;
796 } else
797 ret = report_iommu_fault(ctx_drvdata->attached_domain,
798 &ctx_drvdata->pdev->dev,
799 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
800
801 if (ret == -ENOSYS) {
802 pr_err("Unexpected IOMMU page fault!\n");
803 pr_err("name = %s\n", drvdata->name);
804 pr_err("context = %s (%d)\n", ctx_drvdata->name,
805 ctx_drvdata->num);
806 pr_err("Interesting registers:\n");
807 print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
808 }
809
810 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
811 ret = IRQ_HANDLED;
812 } else
813 ret = IRQ_NONE;
814
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700815 __disable_clocks(drvdata);
816fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700817 mutex_unlock(&msm_iommu_lock);
818 return ret;
819}
820
821static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
822{
823 struct msm_priv *priv = domain->priv;
824 return __pa(priv->pt.fl_table);
825}
826
827static struct iommu_ops msm_iommu_ops = {
828 .domain_init = msm_iommu_domain_init,
829 .domain_destroy = msm_iommu_domain_destroy,
830 .attach_dev = msm_iommu_attach_dev,
831 .detach_dev = msm_iommu_detach_dev,
832 .map = msm_iommu_map,
833 .unmap = msm_iommu_unmap,
834 .map_range = msm_iommu_map_range,
835 .unmap_range = msm_iommu_unmap_range,
836 .iova_to_phys = msm_iommu_iova_to_phys,
837 .domain_has_cap = msm_iommu_domain_has_cap,
838 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
839 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
840};
841
842static int __init msm_iommu_init(void)
843{
844 msm_iommu_pagetable_init();
845 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
846 return 0;
847}
848
849subsys_initcall(msm_iommu_init);
850
851MODULE_LICENSE("GPL v2");
852MODULE_DESCRIPTION("MSM SMMU v2 Driver");