blob: 15a81edccff0db7ec948b2cbcb49131040a28c7c [file] [log] [blame]
Olav Haugan3c7fb382013-01-02 17:32:25 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -070028#include <linux/regulator/consumer.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <asm/sizes.h>
30
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v1.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032#include <mach/iommu.h>
Olav Haugan5ebfbc62013-01-07 17:49:10 -080033#include <mach/iommu_perfmon.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070034#include "msm_iommu_pagetable.h"
35
36/* bitmap of the page sizes currently supported */
37#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
39static DEFINE_MUTEX(msm_iommu_lock);
40
41struct msm_priv {
42 struct iommu_pt pt;
43 struct list_head list_attached;
44};
45
Olav Haugan2648d972013-01-07 17:32:31 -080046static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
47{
48 int ret = regulator_enable(drvdata->gdsc);
49 if (ret)
50 goto fail;
51
52 if (drvdata->alt_gdsc)
53 ret = regulator_enable(drvdata->alt_gdsc);
54
55 if (ret)
56 regulator_disable(drvdata->gdsc);
57fail:
58 return ret;
59}
60
61static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
62{
63 if (drvdata->alt_gdsc)
64 regulator_disable(drvdata->alt_gdsc);
65
66 regulator_disable(drvdata->gdsc);
67}
68
Steve Mucklef132c6c2012-06-06 18:30:57 -070069static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
70{
71 int ret;
72
73 ret = clk_prepare_enable(drvdata->pclk);
74 if (ret)
75 goto fail;
76
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070077 ret = clk_prepare_enable(drvdata->clk);
78 if (ret)
79 clk_disable_unprepare(drvdata->pclk);
80
81 if (drvdata->aclk) {
82 ret = clk_prepare_enable(drvdata->aclk);
83 if (ret) {
84 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -070085 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070086 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070087 }
Olav Haugan3c7fb382013-01-02 17:32:25 -080088
89 if (drvdata->clk_reg_virt) {
90 unsigned int value;
91
92 value = readl_relaxed(drvdata->clk_reg_virt);
93 value &= ~0x1;
94 writel_relaxed(value, drvdata->clk_reg_virt);
Olav Hauganaf4eb0b2013-02-06 09:51:48 -080095 /* Ensure clock is on before continuing */
96 mb();
Olav Haugan3c7fb382013-01-02 17:32:25 -080097 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070098fail:
99 return ret;
100}
101
102static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
103{
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700104 if (drvdata->aclk)
105 clk_disable_unprepare(drvdata->aclk);
106 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700107 clk_disable_unprepare(drvdata->pclk);
108}
109
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800110static int _iommu_power_off(void *data)
111{
112 struct msm_iommu_drvdata *drvdata;
113
114 drvdata = (struct msm_iommu_drvdata *)data;
115 __disable_clocks(drvdata);
116 __disable_regulators(drvdata);
117 return 0;
118}
119
120static int _iommu_power_on(void *data)
121{
122 int ret;
123 struct msm_iommu_drvdata *drvdata;
124
125 drvdata = (struct msm_iommu_drvdata *)data;
126 ret = __enable_regulators(drvdata);
127 if (ret)
128 goto fail;
129
130 ret = __enable_clocks(drvdata);
131 if (ret) {
132 __disable_regulators(drvdata);
133 goto fail;
134 }
135 return 0;
136fail:
137 return -EIO;
138}
139
140static void _iommu_lock_acquire(void)
141{
142 mutex_lock(&msm_iommu_lock);
143}
144
145static void _iommu_lock_release(void)
146{
147 mutex_unlock(&msm_iommu_lock);
148}
149
Olav Hauganef69e892013-02-04 13:47:08 -0800150struct iommu_access_ops iommu_access_ops_v1 = {
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800151 .iommu_power_on = _iommu_power_on,
152 .iommu_power_off = _iommu_power_off,
153 .iommu_lock_acquire = _iommu_lock_acquire,
154 .iommu_lock_release = _iommu_lock_release,
155};
Olav Hauganef69e892013-02-04 13:47:08 -0800156EXPORT_SYMBOL(iommu_access_ops_v1);
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800157
Olav Haugancd932192013-01-31 18:30:15 -0800158void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
159{
160 if (iommu_drvdata->halt_enabled) {
161 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 1);
162
163 while (GET_MICRO_MMU_CTRL_IDLE(iommu_drvdata->base) == 0)
164 cpu_relax();
165 /* Ensure device is idle before continuing */
166 mb();
167 }
168}
169
170void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
171{
172 if (iommu_drvdata->halt_enabled) {
173 /*
174 * Ensure transactions have completed before releasing
175 * the halt
176 */
177 mb();
178 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
Olav Haugana142c982013-02-07 12:33:05 -0800179 /*
180 * Ensure write is complete before continuing to ensure
181 * we don't turn off clocks while transaction is still
182 * pending.
183 */
184 mb();
Olav Haugancd932192013-01-31 18:30:15 -0800185 }
186}
187
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700188static void __sync_tlb(void __iomem *base, int ctx)
189{
190 SET_TLBSYNC(base, ctx, 0);
191
192 /* No barrier needed due to register proximity */
193 while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
194 cpu_relax();
195
196 /* No barrier needed due to read dependency */
197}
198
Steve Mucklef132c6c2012-06-06 18:30:57 -0700199static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
200{
201 struct msm_priv *priv = domain->priv;
202 struct msm_iommu_drvdata *iommu_drvdata;
203 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700204 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700205 int asid;
206
207 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
208 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
209
210 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
211 BUG_ON(!iommu_drvdata);
212
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700213
214 ret = __enable_clocks(iommu_drvdata);
215 if (ret)
216 goto fail;
217
Steve Mucklef132c6c2012-06-06 18:30:57 -0700218 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
219 ctx_drvdata->num);
220
221 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
222 asid | (va & CB_TLBIVA_VA));
223 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700224 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700225 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700226 }
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700227fail:
228 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700229}
230
231static int __flush_iotlb(struct iommu_domain *domain)
232{
233 struct msm_priv *priv = domain->priv;
234 struct msm_iommu_drvdata *iommu_drvdata;
235 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700236 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700237 int asid;
238
239 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
240 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
241
242 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
243 BUG_ON(!iommu_drvdata);
244
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700245 ret = __enable_clocks(iommu_drvdata);
246 if (ret)
247 goto fail;
248
Steve Mucklef132c6c2012-06-06 18:30:57 -0700249 asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
250 ctx_drvdata->num);
251
252 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
253 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700254 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700255 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700256 }
257
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700258fail:
259 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700260}
261
Laura Abbottf4daa692012-10-10 19:31:53 -0700262/*
263 * May only be called for non-secure iommus
264 */
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700265static void __reset_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700266{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700267 int i, smt_size;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700268
269 SET_ACR(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700270 SET_CR2(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700271 SET_GFAR(base, 0);
272 SET_GFSRRESTORE(base, 0);
273 SET_TLBIALLNSNH(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700274 SET_SCR1(base, 0);
275 SET_SSDR_N(base, 0, 0);
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700276 smt_size = GET_IDR0_NUMSMRG(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700277
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700278 for (i = 0; i < smt_size; i++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700279 SET_SMR_VALID(base, i, 0);
280
281 mb();
282}
283
Laura Abbottf4daa692012-10-10 19:31:53 -0700284/*
285 * May only be called for non-secure iommus
286 */
Olav Hauganf3782732013-01-11 11:23:30 -0800287static void __program_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700288{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700289 __reset_iommu(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700290
291 SET_CR0_SMCFCFG(base, 1);
292 SET_CR0_USFCFG(base, 1);
293 SET_CR0_STALLD(base, 1);
294 SET_CR0_GCFGFIE(base, 1);
295 SET_CR0_GCFGFRE(base, 1);
296 SET_CR0_GFIE(base, 1);
297 SET_CR0_GFRE(base, 1);
298 SET_CR0_CLIENTPD(base, 0);
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700299
Olav Hauganf3782732013-01-11 11:23:30 -0800300 mb(); /* Make sure writes complete before returning */
301}
302
303void program_iommu_bfb_settings(void __iomem *base,
304 const struct msm_iommu_bfb_settings *bfb_settings)
305{
306 unsigned int i;
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700307 if (bfb_settings)
308 for (i = 0; i < bfb_settings->length; i++)
309 SET_GLOBAL_REG(base, bfb_settings->regs[i],
310 bfb_settings->data[i]);
311
Olav Hauganf3782732013-01-11 11:23:30 -0800312 mb(); /* Make sure writes complete before returning */
Sathish Ambleycf045e62012-06-07 12:56:50 -0700313}
314
Steve Mucklef132c6c2012-06-06 18:30:57 -0700315static void __reset_context(void __iomem *base, int ctx)
316{
317 SET_ACTLR(base, ctx, 0);
318 SET_FAR(base, ctx, 0);
319 SET_FSRRESTORE(base, ctx, 0);
320 SET_NMRR(base, ctx, 0);
321 SET_PAR(base, ctx, 0);
322 SET_PRRR(base, ctx, 0);
323 SET_SCTLR(base, ctx, 0);
324 SET_TLBIALL(base, ctx, 0);
325 SET_TTBCR(base, ctx, 0);
326 SET_TTBR0(base, ctx, 0);
327 SET_TTBR1(base, ctx, 0);
328 mb();
329}
330
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700331static void __release_smg(void __iomem *base, int ctx)
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700332{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700333 int i, smt_size;
334 smt_size = GET_IDR0_NUMSMRG(base);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700335
336 /* Invalidate any SMGs associated with this context */
337 for (i = 0; i < smt_size; i++)
338 if (GET_SMR_VALID(base, i) &&
339 GET_S2CR_CBNDX(base, i) == ctx)
340 SET_SMR_VALID(base, i, 0);
341}
342
Olav Haugan26ddd432012-12-07 11:39:21 -0800343static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
344 struct msm_iommu_ctx_drvdata *curr_ctx,
345 phys_addr_t pgtable)
346{
347 struct platform_device *pdev;
348 struct device_node *child;
349 struct msm_iommu_ctx_drvdata *ctx;
350 unsigned int found = 0;
351 void __iomem *base = iommu_drvdata->base;
352 struct device_node *iommu_node = iommu_drvdata->dev->of_node;
353 unsigned int asid;
354 unsigned int ncb = iommu_drvdata->ncb;
355
356 /* Find if this page table is used elsewhere, and re-use ASID */
357 for_each_child_of_node(iommu_node, child) {
358 pdev = of_find_device_by_node(child);
359 ctx = dev_get_drvdata(&pdev->dev);
360
361 if (ctx->secure_context) {
362 of_dev_put(pdev);
363 continue;
364 }
365
366 if ((ctx != curr_ctx) &&
367 (GET_CB_TTBR0_ADDR(base, ctx->num) == pgtable)) {
368 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, ctx->asid);
369 curr_ctx->asid = ctx->asid;
370 found = 1;
371 of_dev_put(pdev);
372 of_node_put(child);
373 break;
374 }
375 of_dev_put(pdev);
376 }
377
378 /* If page table is new, find an unused ASID */
379 if (!found) {
380 for (asid = 1; asid < ncb + 1; ++asid) {
381 found = 0;
382 for_each_child_of_node(iommu_node, child) {
383 pdev = of_find_device_by_node(child);
384 ctx = dev_get_drvdata(&pdev->dev);
385
386 if (ctx != curr_ctx && ctx->asid == asid) {
387 found = 1;
388 of_dev_put(pdev);
389 of_node_put(child);
390 break;
391 }
392 of_dev_put(pdev);
393 }
394 if (!found) {
395 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
396 asid);
397 curr_ctx->asid = asid;
398 break;
399 }
400 }
401 BUG_ON(found);
402 }
403}
404
405static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
406 struct msm_iommu_ctx_drvdata *ctx_drvdata,
407 phys_addr_t pgtable, int redirect, bool is_secure)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700408{
409 unsigned int prrr, nmrr;
410 unsigned int pn;
Olav Haugan26ddd432012-12-07 11:39:21 -0800411 int num = 0, i, smt_size;
412 void __iomem *base = iommu_drvdata->base;
413 unsigned int ctx = ctx_drvdata->num;
414 u32 *sids = ctx_drvdata->sids;
415 int len = ctx_drvdata->nsid;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700416
417 __reset_context(base, ctx);
Laura Abbottf4daa692012-10-10 19:31:53 -0700418
Steve Mucklef132c6c2012-06-06 18:30:57 -0700419 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
420 SET_TTBCR(base, ctx, 0);
421 SET_CB_TTBR0_ADDR(base, ctx, pn);
422
423 /* Enable context fault interrupt */
424 SET_CB_SCTLR_CFIE(base, ctx, 1);
425
426 /* Redirect all cacheable requests to L2 slave port. */
427 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
428 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
429 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
430
431 /* Turn on TEX Remap */
432 SET_CB_SCTLR_TRE(base, ctx, 1);
433
434 /* Enable private ASID namespace */
435 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
436
437 /* Set TEX remap attributes */
438 RCP15_PRRR(prrr);
439 RCP15_NMRR(nmrr);
440 SET_PRRR(base, ctx, prrr);
441 SET_NMRR(base, ctx, nmrr);
442
443 /* Configure page tables as inner-cacheable and shareable to reduce
444 * the TLB miss penalty.
445 */
446 if (redirect) {
447 SET_CB_TTBR0_S(base, ctx, 1);
448 SET_CB_TTBR0_NOS(base, ctx, 1);
449 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
450 SET_CB_TTBR0_IRGN0(base, ctx, 1);
451 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
452 }
453
Laura Abbottf4daa692012-10-10 19:31:53 -0700454 if (!is_secure) {
455 smt_size = GET_IDR0_NUMSMRG(base);
456 /* Program the M2V tables for this context */
457 for (i = 0; i < len / sizeof(*sids); i++) {
458 for (; num < smt_size; num++)
459 if (GET_SMR_VALID(base, num) == 0)
460 break;
461 BUG_ON(num >= smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700462
Laura Abbottf4daa692012-10-10 19:31:53 -0700463 SET_SMR_VALID(base, num, 1);
464 SET_SMR_MASK(base, num, 0);
465 SET_SMR_ID(base, num, sids[i]);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700466
Laura Abbottf4daa692012-10-10 19:31:53 -0700467 SET_S2CR_N(base, num, 0);
468 SET_S2CR_CBNDX(base, num, ctx);
469 SET_S2CR_MEMATTR(base, num, 0x0A);
470 /* Set security bit override to be Non-secure */
471 SET_S2CR_NSCFG(base, num, 3);
472 }
473 SET_CBAR_N(base, ctx, 0);
474
475 /* Stage 1 Context with Stage 2 bypass */
476 SET_CBAR_TYPE(base, ctx, 1);
477
478 /* Route page faults to the non-secure interrupt */
479 SET_CBAR_IRPTNDX(base, ctx, 1);
480
481 /* Set VMID to non-secure HLOS */
482 SET_CBAR_VMID(base, ctx, 3);
483
484 /* Bypass is treated as inner-shareable */
485 SET_CBAR_BPSHCFG(base, ctx, 2);
486
487 /* Do not downgrade memory attributes */
488 SET_CBAR_MEMATTR(base, ctx, 0x0A);
489
Sathish Ambleycf045e62012-06-07 12:56:50 -0700490 }
491
Olav Haugan26ddd432012-12-07 11:39:21 -0800492 msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, pn);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700493
494 /* Enable the MMU */
495 SET_CB_SCTLR_M(base, ctx, 1);
496 mb();
497}
498
499static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
500{
501 struct msm_priv *priv;
502
503 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
504 if (!priv)
505 goto fail_nomem;
506
507#ifdef CONFIG_IOMMU_PGTABLES_L2
508 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
509#endif
510
511 INIT_LIST_HEAD(&priv->list_attached);
512 if (msm_iommu_pagetable_alloc(&priv->pt))
513 goto fail_nomem;
514
515 domain->priv = priv;
516 return 0;
517
518fail_nomem:
519 kfree(priv);
520 return -ENOMEM;
521}
522
523static void msm_iommu_domain_destroy(struct iommu_domain *domain)
524{
525 struct msm_priv *priv;
526
527 mutex_lock(&msm_iommu_lock);
528 priv = domain->priv;
529 domain->priv = NULL;
530
531 if (priv)
532 msm_iommu_pagetable_free(&priv->pt);
533
534 kfree(priv);
535 mutex_unlock(&msm_iommu_lock);
536}
537
Sathish Ambleycf045e62012-06-07 12:56:50 -0700538static int msm_iommu_ctx_attached(struct device *dev)
539{
540 struct platform_device *pdev;
541 struct device_node *child;
542 struct msm_iommu_ctx_drvdata *ctx;
543
544 for_each_child_of_node(dev->of_node, child) {
545 pdev = of_find_device_by_node(child);
546
547 ctx = dev_get_drvdata(&pdev->dev);
548 if (ctx->attached_domain) {
Olav Hauganbd3e9332012-12-19 10:50:02 -0800549 of_dev_put(pdev);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700550 of_node_put(child);
551 return 1;
552 }
Olav Hauganbd3e9332012-12-19 10:50:02 -0800553 of_dev_put(pdev);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700554 }
555
556 return 0;
557}
558
Steve Mucklef132c6c2012-06-06 18:30:57 -0700559static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
560{
561 struct msm_priv *priv;
562 struct msm_iommu_drvdata *iommu_drvdata;
563 struct msm_iommu_ctx_drvdata *ctx_drvdata;
564 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700565 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700566 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700567
568 mutex_lock(&msm_iommu_lock);
569
570 priv = domain->priv;
571 if (!priv || !dev) {
572 ret = -EINVAL;
573 goto fail;
574 }
575
576 iommu_drvdata = dev_get_drvdata(dev->parent);
577 ctx_drvdata = dev_get_drvdata(dev);
578 if (!iommu_drvdata || !ctx_drvdata) {
579 ret = -EINVAL;
580 goto fail;
581 }
582
583 if (!list_empty(&ctx_drvdata->attached_elm)) {
584 ret = -EBUSY;
585 goto fail;
586 }
587
588 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
589 if (tmp_drvdata == ctx_drvdata) {
590 ret = -EBUSY;
591 goto fail;
592 }
593
Laura Abbottf4daa692012-10-10 19:31:53 -0700594 is_secure = iommu_drvdata->sec_id != -1;
595
Olav Haugan2648d972013-01-07 17:32:31 -0800596 ret = __enable_regulators(iommu_drvdata);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700597 if (ret)
598 goto fail;
599
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700600 ret = __enable_clocks(iommu_drvdata);
601 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800602 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700603 goto fail;
604 }
605
Olav Haugancd932192013-01-31 18:30:15 -0800606
Laura Abbottf4daa692012-10-10 19:31:53 -0700607 if (!msm_iommu_ctx_attached(dev->parent)) {
608 if (!is_secure) {
Olav Haugance2eab92013-02-07 12:59:18 -0800609 iommu_halt(iommu_drvdata);
Olav Hauganf3782732013-01-11 11:23:30 -0800610 __program_iommu(iommu_drvdata->base);
Olav Haugance2eab92013-02-07 12:59:18 -0800611 iommu_resume(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700612 } else {
613 ret = msm_iommu_sec_program_iommu(
614 iommu_drvdata->sec_id);
615 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800616 __disable_regulators(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700617 __disable_clocks(iommu_drvdata);
618 goto fail;
619 }
620 }
Olav Hauganf3782732013-01-11 11:23:30 -0800621 program_iommu_bfb_settings(iommu_drvdata->base,
622 iommu_drvdata->bfb_settings);
Laura Abbottf4daa692012-10-10 19:31:53 -0700623 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700624
Olav Haugance2eab92013-02-07 12:59:18 -0800625 iommu_halt(iommu_drvdata);
626
Olav Haugan26ddd432012-12-07 11:39:21 -0800627 __program_context(iommu_drvdata, ctx_drvdata, __pa(priv->pt.fl_table),
628 priv->pt.redirect, is_secure);
629
Olav Haugancd932192013-01-31 18:30:15 -0800630 iommu_resume(iommu_drvdata);
631
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700632 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700633
Steve Mucklef132c6c2012-06-06 18:30:57 -0700634 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
635 ctx_drvdata->attached_domain = domain;
636
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800637 mutex_unlock(&msm_iommu_lock);
638
639 msm_iommu_attached(dev->parent);
640 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700641fail:
642 mutex_unlock(&msm_iommu_lock);
643 return ret;
644}
645
646static void msm_iommu_detach_dev(struct iommu_domain *domain,
647 struct device *dev)
648{
649 struct msm_priv *priv;
650 struct msm_iommu_drvdata *iommu_drvdata;
651 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700652 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700653 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700654
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800655 msm_iommu_detached(dev->parent);
656
Steve Mucklef132c6c2012-06-06 18:30:57 -0700657 mutex_lock(&msm_iommu_lock);
658 priv = domain->priv;
659 if (!priv || !dev)
660 goto fail;
661
662 iommu_drvdata = dev_get_drvdata(dev->parent);
663 ctx_drvdata = dev_get_drvdata(dev);
664 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
665 goto fail;
666
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700667 ret = __enable_clocks(iommu_drvdata);
668 if (ret)
669 goto fail;
670
Laura Abbottf4daa692012-10-10 19:31:53 -0700671 is_secure = iommu_drvdata->sec_id != -1;
672
Olav Haugan26ddd432012-12-07 11:39:21 -0800673 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
674 ctx_drvdata->asid = -1;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700675
Olav Haugancd932192013-01-31 18:30:15 -0800676 iommu_halt(iommu_drvdata);
677
Steve Mucklef132c6c2012-06-06 18:30:57 -0700678 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Laura Abbottf4daa692012-10-10 19:31:53 -0700679 if (!is_secure)
680 __release_smg(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700681
Olav Haugancd932192013-01-31 18:30:15 -0800682 iommu_resume(iommu_drvdata);
683
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700684 __disable_clocks(iommu_drvdata);
685
Olav Haugan2648d972013-01-07 17:32:31 -0800686 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700687
Steve Mucklef132c6c2012-06-06 18:30:57 -0700688 list_del_init(&ctx_drvdata->attached_elm);
689 ctx_drvdata->attached_domain = NULL;
690
Steve Mucklef132c6c2012-06-06 18:30:57 -0700691fail:
692 mutex_unlock(&msm_iommu_lock);
693}
694
695static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
696 phys_addr_t pa, size_t len, int prot)
697{
698 struct msm_priv *priv;
699 int ret = 0;
700
701 mutex_lock(&msm_iommu_lock);
702
703 priv = domain->priv;
704 if (!priv) {
705 ret = -EINVAL;
706 goto fail;
707 }
708
709 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
710 if (ret)
711 goto fail;
712
713 ret = __flush_iotlb_va(domain, va);
714fail:
715 mutex_unlock(&msm_iommu_lock);
716 return ret;
717}
718
719static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
720 size_t len)
721{
722 struct msm_priv *priv;
723 int ret = -ENODEV;
724
725 mutex_lock(&msm_iommu_lock);
726
727 priv = domain->priv;
728 if (!priv)
729 goto fail;
730
731 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
732 if (ret < 0)
733 goto fail;
734
735 ret = __flush_iotlb_va(domain, va);
736fail:
737 mutex_unlock(&msm_iommu_lock);
738
739 /* the IOMMU API requires us to return how many bytes were unmapped */
740 len = ret ? 0 : len;
741 return len;
742}
743
744static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
745 struct scatterlist *sg, unsigned int len,
746 int prot)
747{
748 int ret;
749 struct msm_priv *priv;
750
751 mutex_lock(&msm_iommu_lock);
752
753 priv = domain->priv;
754 if (!priv) {
755 ret = -EINVAL;
756 goto fail;
757 }
758
759 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
760 if (ret)
761 goto fail;
762
763 __flush_iotlb(domain);
764fail:
765 mutex_unlock(&msm_iommu_lock);
766 return ret;
767}
768
769
770static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
771 unsigned int len)
772{
773 struct msm_priv *priv;
774
775 mutex_lock(&msm_iommu_lock);
776
777 priv = domain->priv;
778 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
779
780 __flush_iotlb(domain);
781 mutex_unlock(&msm_iommu_lock);
782 return 0;
783}
784
785static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
786 unsigned long va)
787{
788 struct msm_priv *priv;
789 struct msm_iommu_drvdata *iommu_drvdata;
790 struct msm_iommu_ctx_drvdata *ctx_drvdata;
791 unsigned int par;
792 void __iomem *base;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700793 phys_addr_t ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700794 int ctx;
795
796 mutex_lock(&msm_iommu_lock);
797
798 priv = domain->priv;
799 if (list_empty(&priv->list_attached))
800 goto fail;
801
802 ctx_drvdata = list_entry(priv->list_attached.next,
803 struct msm_iommu_ctx_drvdata, attached_elm);
804 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
805
806 base = iommu_drvdata->base;
807 ctx = ctx_drvdata->num;
808
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700809 ret = __enable_clocks(iommu_drvdata);
810 if (ret) {
811 ret = 0; /* 0 indicates translation failed */
812 goto fail;
813 }
814
Steve Mucklef132c6c2012-06-06 18:30:57 -0700815 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
816 mb();
817 while (GET_CB_ATSR_ACTIVE(base, ctx))
818 cpu_relax();
819
820 par = GET_PAR(base, ctx);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700821 __disable_clocks(iommu_drvdata);
822
Steve Mucklef132c6c2012-06-06 18:30:57 -0700823 if (par & CB_PAR_F) {
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700824 ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700825 } else {
826 /* We are dealing with a supersection */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700827 if (ret & CB_PAR_SS)
828 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700829 else /* Upper 20 bits from PAR, lower 12 from VA */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700830 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700831 }
832
833fail:
834 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700835 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700836}
837
838static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
839 unsigned long cap)
840{
841 return 0;
842}
843
844static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
845{
846 pr_err("FAR = %08x PAR = %08x\n",
847 GET_FAR(base, ctx), GET_PAR(base, ctx));
848 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
849 (fsr & 0x02) ? "TF " : "",
850 (fsr & 0x04) ? "AFF " : "",
851 (fsr & 0x08) ? "PF " : "",
852 (fsr & 0x10) ? "EF " : "",
853 (fsr & 0x20) ? "TLBMCF " : "",
854 (fsr & 0x40) ? "TLBLKF " : "",
855 (fsr & 0x80) ? "MHF " : "",
856 (fsr & 0x40000000) ? "SS " : "",
857 (fsr & 0x80000000) ? "MULTI " : "");
858
859 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
860 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
861 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
862 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
863 pr_err("SCTLR = %08x ACTLR = %08x\n",
864 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
865 pr_err("PRRR = %08x NMRR = %08x\n",
866 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
867}
868
869irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
870{
871 struct platform_device *pdev = dev_id;
872 struct msm_iommu_drvdata *drvdata;
873 struct msm_iommu_ctx_drvdata *ctx_drvdata;
874 unsigned int fsr;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700875 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700876
877 mutex_lock(&msm_iommu_lock);
878
879 BUG_ON(!pdev);
880
881 drvdata = dev_get_drvdata(pdev->dev.parent);
882 BUG_ON(!drvdata);
883
884 ctx_drvdata = dev_get_drvdata(&pdev->dev);
885 BUG_ON(!ctx_drvdata);
886
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700887 ret = __enable_clocks(drvdata);
888 if (ret) {
889 ret = IRQ_NONE;
890 goto fail;
891 }
892
Steve Mucklef132c6c2012-06-06 18:30:57 -0700893 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
894 if (fsr) {
895 if (!ctx_drvdata->attached_domain) {
896 pr_err("Bad domain in interrupt handler\n");
897 ret = -ENOSYS;
898 } else
899 ret = report_iommu_fault(ctx_drvdata->attached_domain,
900 &ctx_drvdata->pdev->dev,
901 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
902
903 if (ret == -ENOSYS) {
904 pr_err("Unexpected IOMMU page fault!\n");
905 pr_err("name = %s\n", drvdata->name);
906 pr_err("context = %s (%d)\n", ctx_drvdata->name,
907 ctx_drvdata->num);
908 pr_err("Interesting registers:\n");
909 print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
910 }
911
912 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
913 ret = IRQ_HANDLED;
914 } else
915 ret = IRQ_NONE;
916
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700917 __disable_clocks(drvdata);
918fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700919 mutex_unlock(&msm_iommu_lock);
920 return ret;
921}
922
923static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
924{
925 struct msm_priv *priv = domain->priv;
926 return __pa(priv->pt.fl_table);
927}
928
929static struct iommu_ops msm_iommu_ops = {
930 .domain_init = msm_iommu_domain_init,
931 .domain_destroy = msm_iommu_domain_destroy,
932 .attach_dev = msm_iommu_attach_dev,
933 .detach_dev = msm_iommu_detach_dev,
934 .map = msm_iommu_map,
935 .unmap = msm_iommu_unmap,
936 .map_range = msm_iommu_map_range,
937 .unmap_range = msm_iommu_unmap_range,
938 .iova_to_phys = msm_iommu_iova_to_phys,
939 .domain_has_cap = msm_iommu_domain_has_cap,
940 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
941 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
942};
943
944static int __init msm_iommu_init(void)
945{
946 msm_iommu_pagetable_init();
947 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
948 return 0;
949}
950
951subsys_initcall(msm_iommu_init);
952
953MODULE_LICENSE("GPL v2");
954MODULE_DESCRIPTION("MSM SMMU v2 Driver");