blob: fa5ca8c80f142e4bee0d3594987a1e7120c9823d [file] [log] [blame]
Olav Haugan3c7fb382013-01-02 17:32:25 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -070028#include <linux/regulator/consumer.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <asm/sizes.h>
30
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v1.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032#include <mach/iommu.h>
Olav Haugan090614f2013-03-22 12:14:18 -070033#include <mach/msm_iommu_priv.h>
Olav Haugan5ebfbc62013-01-07 17:49:10 -080034#include <mach/iommu_perfmon.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070035#include "msm_iommu_pagetable.h"
36
37/* bitmap of the page sizes currently supported */
38#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
39
40static DEFINE_MUTEX(msm_iommu_lock);
41
Olav Haugan2648d972013-01-07 17:32:31 -080042static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
43{
44 int ret = regulator_enable(drvdata->gdsc);
45 if (ret)
46 goto fail;
47
48 if (drvdata->alt_gdsc)
49 ret = regulator_enable(drvdata->alt_gdsc);
50
51 if (ret)
52 regulator_disable(drvdata->gdsc);
53fail:
54 return ret;
55}
56
57static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
58{
59 if (drvdata->alt_gdsc)
60 regulator_disable(drvdata->alt_gdsc);
61
62 regulator_disable(drvdata->gdsc);
63}
64
Steve Mucklef132c6c2012-06-06 18:30:57 -070065static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
66{
67 int ret;
68
69 ret = clk_prepare_enable(drvdata->pclk);
70 if (ret)
71 goto fail;
72
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070073 ret = clk_prepare_enable(drvdata->clk);
74 if (ret)
75 clk_disable_unprepare(drvdata->pclk);
76
77 if (drvdata->aclk) {
78 ret = clk_prepare_enable(drvdata->aclk);
79 if (ret) {
80 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -070081 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070082 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070083 }
Olav Haugan3c7fb382013-01-02 17:32:25 -080084
85 if (drvdata->clk_reg_virt) {
86 unsigned int value;
87
88 value = readl_relaxed(drvdata->clk_reg_virt);
89 value &= ~0x1;
90 writel_relaxed(value, drvdata->clk_reg_virt);
Olav Hauganaf4eb0b2013-02-06 09:51:48 -080091 /* Ensure clock is on before continuing */
92 mb();
Olav Haugan3c7fb382013-01-02 17:32:25 -080093 }
Steve Mucklef132c6c2012-06-06 18:30:57 -070094fail:
95 return ret;
96}
97
98static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
99{
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700100 if (drvdata->aclk)
101 clk_disable_unprepare(drvdata->aclk);
102 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700103 clk_disable_unprepare(drvdata->pclk);
104}
105
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800106static void _iommu_lock_acquire(void)
107{
108 mutex_lock(&msm_iommu_lock);
109}
110
111static void _iommu_lock_release(void)
112{
113 mutex_unlock(&msm_iommu_lock);
114}
115
Olav Hauganef69e892013-02-04 13:47:08 -0800116struct iommu_access_ops iommu_access_ops_v1 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700117 .iommu_power_on = __enable_regulators,
118 .iommu_power_off = __disable_regulators,
119 .iommu_clk_on = __enable_clocks,
120 .iommu_clk_off = __disable_clocks,
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800121 .iommu_lock_acquire = _iommu_lock_acquire,
122 .iommu_lock_release = _iommu_lock_release,
123};
Olav Hauganef69e892013-02-04 13:47:08 -0800124EXPORT_SYMBOL(iommu_access_ops_v1);
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800125
Olav Haugancd932192013-01-31 18:30:15 -0800126void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
127{
128 if (iommu_drvdata->halt_enabled) {
129 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 1);
130
131 while (GET_MICRO_MMU_CTRL_IDLE(iommu_drvdata->base) == 0)
132 cpu_relax();
133 /* Ensure device is idle before continuing */
134 mb();
135 }
136}
137
138void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
139{
140 if (iommu_drvdata->halt_enabled) {
141 /*
142 * Ensure transactions have completed before releasing
143 * the halt
144 */
145 mb();
146 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
Olav Haugana142c982013-02-07 12:33:05 -0800147 /*
148 * Ensure write is complete before continuing to ensure
149 * we don't turn off clocks while transaction is still
150 * pending.
151 */
152 mb();
Olav Haugancd932192013-01-31 18:30:15 -0800153 }
154}
155
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700156static void __sync_tlb(void __iomem *base, int ctx)
157{
158 SET_TLBSYNC(base, ctx, 0);
159
160 /* No barrier needed due to register proximity */
161 while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
162 cpu_relax();
163
164 /* No barrier needed due to read dependency */
165}
166
Steve Mucklef132c6c2012-06-06 18:30:57 -0700167static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
168{
Olav Haugan090614f2013-03-22 12:14:18 -0700169 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700170 struct msm_iommu_drvdata *iommu_drvdata;
171 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700172 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700173
174 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
175 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
176
177 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
178 BUG_ON(!iommu_drvdata);
179
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700180
181 ret = __enable_clocks(iommu_drvdata);
182 if (ret)
183 goto fail;
184
Steve Mucklef132c6c2012-06-06 18:30:57 -0700185 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
Olav Haugan4e315c42013-03-06 10:14:28 -0800186 ctx_drvdata->asid | (va & CB_TLBIVA_VA));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700188 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700189 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700190 }
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700191fail:
192 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700193}
194
195static int __flush_iotlb(struct iommu_domain *domain)
196{
Olav Haugan090614f2013-03-22 12:14:18 -0700197 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700198 struct msm_iommu_drvdata *iommu_drvdata;
199 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700200 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700201
202 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
203 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
204
205 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
206 BUG_ON(!iommu_drvdata);
207
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700208 ret = __enable_clocks(iommu_drvdata);
209 if (ret)
210 goto fail;
211
Olav Haugan4e315c42013-03-06 10:14:28 -0800212 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
213 ctx_drvdata->asid);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700214 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700215 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700216 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700217 }
218
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700219fail:
220 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700221}
222
Laura Abbottf4daa692012-10-10 19:31:53 -0700223/*
224 * May only be called for non-secure iommus
225 */
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700226static void __reset_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700227{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700228 int i, smt_size;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700229
230 SET_ACR(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700231 SET_CR2(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700232 SET_GFAR(base, 0);
233 SET_GFSRRESTORE(base, 0);
234 SET_TLBIALLNSNH(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700235 SET_SCR1(base, 0);
236 SET_SSDR_N(base, 0, 0);
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700237 smt_size = GET_IDR0_NUMSMRG(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700238
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700239 for (i = 0; i < smt_size; i++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700240 SET_SMR_VALID(base, i, 0);
241
242 mb();
243}
244
Laura Abbottf4daa692012-10-10 19:31:53 -0700245/*
246 * May only be called for non-secure iommus
247 */
Olav Hauganf3782732013-01-11 11:23:30 -0800248static void __program_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700249{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700250 __reset_iommu(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700251
252 SET_CR0_SMCFCFG(base, 1);
253 SET_CR0_USFCFG(base, 1);
254 SET_CR0_STALLD(base, 1);
255 SET_CR0_GCFGFIE(base, 1);
256 SET_CR0_GCFGFRE(base, 1);
257 SET_CR0_GFIE(base, 1);
258 SET_CR0_GFRE(base, 1);
259 SET_CR0_CLIENTPD(base, 0);
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700260
Olav Hauganf3782732013-01-11 11:23:30 -0800261 mb(); /* Make sure writes complete before returning */
262}
263
264void program_iommu_bfb_settings(void __iomem *base,
265 const struct msm_iommu_bfb_settings *bfb_settings)
266{
267 unsigned int i;
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700268 if (bfb_settings)
269 for (i = 0; i < bfb_settings->length; i++)
270 SET_GLOBAL_REG(base, bfb_settings->regs[i],
271 bfb_settings->data[i]);
272
Olav Hauganf3782732013-01-11 11:23:30 -0800273 mb(); /* Make sure writes complete before returning */
Sathish Ambleycf045e62012-06-07 12:56:50 -0700274}
275
Steve Mucklef132c6c2012-06-06 18:30:57 -0700276static void __reset_context(void __iomem *base, int ctx)
277{
278 SET_ACTLR(base, ctx, 0);
279 SET_FAR(base, ctx, 0);
280 SET_FSRRESTORE(base, ctx, 0);
281 SET_NMRR(base, ctx, 0);
282 SET_PAR(base, ctx, 0);
283 SET_PRRR(base, ctx, 0);
284 SET_SCTLR(base, ctx, 0);
285 SET_TLBIALL(base, ctx, 0);
286 SET_TTBCR(base, ctx, 0);
287 SET_TTBR0(base, ctx, 0);
288 SET_TTBR1(base, ctx, 0);
289 mb();
290}
291
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700292static void __release_smg(void __iomem *base, int ctx)
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700293{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700294 int i, smt_size;
295 smt_size = GET_IDR0_NUMSMRG(base);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700296
297 /* Invalidate any SMGs associated with this context */
298 for (i = 0; i < smt_size; i++)
299 if (GET_SMR_VALID(base, i) &&
300 GET_S2CR_CBNDX(base, i) == ctx)
301 SET_SMR_VALID(base, i, 0);
302}
303
Olav Haugan26ddd432012-12-07 11:39:21 -0800304static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
305 struct msm_iommu_ctx_drvdata *curr_ctx,
Olav Haugan090614f2013-03-22 12:14:18 -0700306 struct msm_iommu_priv *priv)
Olav Haugan26ddd432012-12-07 11:39:21 -0800307{
Olav Haugan26ddd432012-12-07 11:39:21 -0800308 unsigned int found = 0;
309 void __iomem *base = iommu_drvdata->base;
Olav Haugan4e315c42013-03-06 10:14:28 -0800310 unsigned int i;
Olav Haugan26ddd432012-12-07 11:39:21 -0800311 unsigned int ncb = iommu_drvdata->ncb;
Olav Haugan4e315c42013-03-06 10:14:28 -0800312 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Olav Haugan26ddd432012-12-07 11:39:21 -0800313
314 /* Find if this page table is used elsewhere, and re-use ASID */
Olav Haugan4e315c42013-03-06 10:14:28 -0800315 if (!list_empty(&priv->list_attached)) {
316 tmp_drvdata = list_first_entry(&priv->list_attached,
317 struct msm_iommu_ctx_drvdata, attached_elm);
Olav Haugan26ddd432012-12-07 11:39:21 -0800318
Olav Haugan4e315c42013-03-06 10:14:28 -0800319 ++iommu_drvdata->asid[tmp_drvdata->asid - 1];
320 curr_ctx->asid = tmp_drvdata->asid;
Olav Haugan26ddd432012-12-07 11:39:21 -0800321
Olav Haugan4e315c42013-03-06 10:14:28 -0800322 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, curr_ctx->asid);
323 found = 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800324 }
325
326 /* If page table is new, find an unused ASID */
327 if (!found) {
Olav Haugan4e315c42013-03-06 10:14:28 -0800328 for (i = 0; i < ncb; ++i) {
329 if (iommu_drvdata->asid[i] == 0) {
330 ++iommu_drvdata->asid[i];
331 curr_ctx->asid = i + 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800332
Olav Haugan26ddd432012-12-07 11:39:21 -0800333 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
Olav Haugan4e315c42013-03-06 10:14:28 -0800334 curr_ctx->asid);
335 found = 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800336 break;
337 }
338 }
Olav Haugan4e315c42013-03-06 10:14:28 -0800339 BUG_ON(!found);
Olav Haugan26ddd432012-12-07 11:39:21 -0800340 }
341}
342
343static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
344 struct msm_iommu_ctx_drvdata *ctx_drvdata,
Olav Haugan090614f2013-03-22 12:14:18 -0700345 struct msm_iommu_priv *priv, bool is_secure)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700346{
347 unsigned int prrr, nmrr;
348 unsigned int pn;
Olav Haugan26ddd432012-12-07 11:39:21 -0800349 int num = 0, i, smt_size;
350 void __iomem *base = iommu_drvdata->base;
351 unsigned int ctx = ctx_drvdata->num;
352 u32 *sids = ctx_drvdata->sids;
353 int len = ctx_drvdata->nsid;
Olav Haugan4e315c42013-03-06 10:14:28 -0800354 phys_addr_t pgtable = __pa(priv->pt.fl_table);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700355
356 __reset_context(base, ctx);
Laura Abbottf4daa692012-10-10 19:31:53 -0700357
Steve Mucklef132c6c2012-06-06 18:30:57 -0700358 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
359 SET_TTBCR(base, ctx, 0);
360 SET_CB_TTBR0_ADDR(base, ctx, pn);
361
362 /* Enable context fault interrupt */
363 SET_CB_SCTLR_CFIE(base, ctx, 1);
364
365 /* Redirect all cacheable requests to L2 slave port. */
366 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
367 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
368 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
369
370 /* Turn on TEX Remap */
371 SET_CB_SCTLR_TRE(base, ctx, 1);
372
373 /* Enable private ASID namespace */
374 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
375
376 /* Set TEX remap attributes */
377 RCP15_PRRR(prrr);
378 RCP15_NMRR(nmrr);
379 SET_PRRR(base, ctx, prrr);
380 SET_NMRR(base, ctx, nmrr);
381
382 /* Configure page tables as inner-cacheable and shareable to reduce
383 * the TLB miss penalty.
384 */
Olav Haugan4e315c42013-03-06 10:14:28 -0800385 if (priv->pt.redirect) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700386 SET_CB_TTBR0_S(base, ctx, 1);
387 SET_CB_TTBR0_NOS(base, ctx, 1);
388 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
389 SET_CB_TTBR0_IRGN0(base, ctx, 1);
390 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
391 }
392
Laura Abbottf4daa692012-10-10 19:31:53 -0700393 if (!is_secure) {
394 smt_size = GET_IDR0_NUMSMRG(base);
395 /* Program the M2V tables for this context */
396 for (i = 0; i < len / sizeof(*sids); i++) {
397 for (; num < smt_size; num++)
398 if (GET_SMR_VALID(base, num) == 0)
399 break;
400 BUG_ON(num >= smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700401
Laura Abbottf4daa692012-10-10 19:31:53 -0700402 SET_SMR_VALID(base, num, 1);
403 SET_SMR_MASK(base, num, 0);
404 SET_SMR_ID(base, num, sids[i]);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700405
Laura Abbottf4daa692012-10-10 19:31:53 -0700406 SET_S2CR_N(base, num, 0);
407 SET_S2CR_CBNDX(base, num, ctx);
408 SET_S2CR_MEMATTR(base, num, 0x0A);
409 /* Set security bit override to be Non-secure */
410 SET_S2CR_NSCFG(base, num, 3);
411 }
412 SET_CBAR_N(base, ctx, 0);
413
414 /* Stage 1 Context with Stage 2 bypass */
415 SET_CBAR_TYPE(base, ctx, 1);
416
417 /* Route page faults to the non-secure interrupt */
418 SET_CBAR_IRPTNDX(base, ctx, 1);
419
420 /* Set VMID to non-secure HLOS */
421 SET_CBAR_VMID(base, ctx, 3);
422
423 /* Bypass is treated as inner-shareable */
424 SET_CBAR_BPSHCFG(base, ctx, 2);
425
426 /* Do not downgrade memory attributes */
427 SET_CBAR_MEMATTR(base, ctx, 0x0A);
428
Sathish Ambleycf045e62012-06-07 12:56:50 -0700429 }
430
Olav Haugan4e315c42013-03-06 10:14:28 -0800431 msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700432
433 /* Enable the MMU */
434 SET_CB_SCTLR_M(base, ctx, 1);
435 mb();
436}
437
438static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
439{
Olav Haugan090614f2013-03-22 12:14:18 -0700440 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700441
442 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
443 if (!priv)
444 goto fail_nomem;
445
446#ifdef CONFIG_IOMMU_PGTABLES_L2
447 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
448#endif
449
450 INIT_LIST_HEAD(&priv->list_attached);
451 if (msm_iommu_pagetable_alloc(&priv->pt))
452 goto fail_nomem;
453
454 domain->priv = priv;
455 return 0;
456
457fail_nomem:
458 kfree(priv);
459 return -ENOMEM;
460}
461
462static void msm_iommu_domain_destroy(struct iommu_domain *domain)
463{
Olav Haugan090614f2013-03-22 12:14:18 -0700464 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700465
466 mutex_lock(&msm_iommu_lock);
467 priv = domain->priv;
468 domain->priv = NULL;
469
470 if (priv)
471 msm_iommu_pagetable_free(&priv->pt);
472
473 kfree(priv);
474 mutex_unlock(&msm_iommu_lock);
475}
476
477static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
478{
Olav Haugan090614f2013-03-22 12:14:18 -0700479 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700480 struct msm_iommu_drvdata *iommu_drvdata;
481 struct msm_iommu_ctx_drvdata *ctx_drvdata;
482 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700483 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700484 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700485
486 mutex_lock(&msm_iommu_lock);
487
488 priv = domain->priv;
489 if (!priv || !dev) {
490 ret = -EINVAL;
491 goto fail;
492 }
493
494 iommu_drvdata = dev_get_drvdata(dev->parent);
495 ctx_drvdata = dev_get_drvdata(dev);
496 if (!iommu_drvdata || !ctx_drvdata) {
497 ret = -EINVAL;
498 goto fail;
499 }
500
501 if (!list_empty(&ctx_drvdata->attached_elm)) {
502 ret = -EBUSY;
503 goto fail;
504 }
505
506 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
507 if (tmp_drvdata == ctx_drvdata) {
508 ret = -EBUSY;
509 goto fail;
510 }
511
Laura Abbottf4daa692012-10-10 19:31:53 -0700512 is_secure = iommu_drvdata->sec_id != -1;
513
Olav Haugan2648d972013-01-07 17:32:31 -0800514 ret = __enable_regulators(iommu_drvdata);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700515 if (ret)
516 goto fail;
517
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700518 ret = __enable_clocks(iommu_drvdata);
519 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800520 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700521 goto fail;
522 }
523
Olav Haugane3885392013-03-06 16:22:53 -0800524 /* We can only do this once */
525 if (!iommu_drvdata->ctx_attach_count) {
Laura Abbottf4daa692012-10-10 19:31:53 -0700526 if (!is_secure) {
Olav Haugance2eab92013-02-07 12:59:18 -0800527 iommu_halt(iommu_drvdata);
Olav Hauganf3782732013-01-11 11:23:30 -0800528 __program_iommu(iommu_drvdata->base);
Olav Haugance2eab92013-02-07 12:59:18 -0800529 iommu_resume(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700530 } else {
531 ret = msm_iommu_sec_program_iommu(
532 iommu_drvdata->sec_id);
533 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800534 __disable_regulators(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700535 __disable_clocks(iommu_drvdata);
536 goto fail;
537 }
538 }
Olav Hauganf3782732013-01-11 11:23:30 -0800539 program_iommu_bfb_settings(iommu_drvdata->base,
540 iommu_drvdata->bfb_settings);
Laura Abbottf4daa692012-10-10 19:31:53 -0700541 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700542
Olav Haugance2eab92013-02-07 12:59:18 -0800543 iommu_halt(iommu_drvdata);
544
Olav Haugan4e315c42013-03-06 10:14:28 -0800545 __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure);
Olav Haugan26ddd432012-12-07 11:39:21 -0800546
Olav Haugancd932192013-01-31 18:30:15 -0800547 iommu_resume(iommu_drvdata);
548
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700549 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700550
Steve Mucklef132c6c2012-06-06 18:30:57 -0700551 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
552 ctx_drvdata->attached_domain = domain;
Olav Haugane3885392013-03-06 16:22:53 -0800553 ++iommu_drvdata->ctx_attach_count;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700554
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800555 mutex_unlock(&msm_iommu_lock);
556
557 msm_iommu_attached(dev->parent);
558 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700559fail:
560 mutex_unlock(&msm_iommu_lock);
561 return ret;
562}
563
564static void msm_iommu_detach_dev(struct iommu_domain *domain,
565 struct device *dev)
566{
Olav Haugan090614f2013-03-22 12:14:18 -0700567 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700568 struct msm_iommu_drvdata *iommu_drvdata;
569 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700570 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700571 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700572
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800573 msm_iommu_detached(dev->parent);
574
Steve Mucklef132c6c2012-06-06 18:30:57 -0700575 mutex_lock(&msm_iommu_lock);
576 priv = domain->priv;
577 if (!priv || !dev)
578 goto fail;
579
580 iommu_drvdata = dev_get_drvdata(dev->parent);
581 ctx_drvdata = dev_get_drvdata(dev);
582 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
583 goto fail;
584
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700585 ret = __enable_clocks(iommu_drvdata);
586 if (ret)
587 goto fail;
588
Laura Abbottf4daa692012-10-10 19:31:53 -0700589 is_secure = iommu_drvdata->sec_id != -1;
590
Olav Haugan26ddd432012-12-07 11:39:21 -0800591 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
Olav Haugan4e315c42013-03-06 10:14:28 -0800592
593 BUG_ON(iommu_drvdata->asid[ctx_drvdata->asid - 1] == 0);
594 iommu_drvdata->asid[ctx_drvdata->asid - 1]--;
Olav Haugan26ddd432012-12-07 11:39:21 -0800595 ctx_drvdata->asid = -1;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700596
Olav Haugancd932192013-01-31 18:30:15 -0800597 iommu_halt(iommu_drvdata);
598
Steve Mucklef132c6c2012-06-06 18:30:57 -0700599 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Laura Abbottf4daa692012-10-10 19:31:53 -0700600 if (!is_secure)
601 __release_smg(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700602
Olav Haugancd932192013-01-31 18:30:15 -0800603 iommu_resume(iommu_drvdata);
604
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700605 __disable_clocks(iommu_drvdata);
606
Olav Haugan2648d972013-01-07 17:32:31 -0800607 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700608
Steve Mucklef132c6c2012-06-06 18:30:57 -0700609 list_del_init(&ctx_drvdata->attached_elm);
610 ctx_drvdata->attached_domain = NULL;
Olav Haugane3885392013-03-06 16:22:53 -0800611 BUG_ON(iommu_drvdata->ctx_attach_count == 0);
612 --iommu_drvdata->ctx_attach_count;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700613fail:
614 mutex_unlock(&msm_iommu_lock);
615}
616
617static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
618 phys_addr_t pa, size_t len, int prot)
619{
Olav Haugan090614f2013-03-22 12:14:18 -0700620 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700621 int ret = 0;
622
623 mutex_lock(&msm_iommu_lock);
624
625 priv = domain->priv;
626 if (!priv) {
627 ret = -EINVAL;
628 goto fail;
629 }
630
631 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
632 if (ret)
633 goto fail;
634
635 ret = __flush_iotlb_va(domain, va);
636fail:
637 mutex_unlock(&msm_iommu_lock);
638 return ret;
639}
640
641static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
642 size_t len)
643{
Olav Haugan090614f2013-03-22 12:14:18 -0700644 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700645 int ret = -ENODEV;
646
647 mutex_lock(&msm_iommu_lock);
648
649 priv = domain->priv;
650 if (!priv)
651 goto fail;
652
653 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
654 if (ret < 0)
655 goto fail;
656
657 ret = __flush_iotlb_va(domain, va);
658fail:
659 mutex_unlock(&msm_iommu_lock);
660
661 /* the IOMMU API requires us to return how many bytes were unmapped */
662 len = ret ? 0 : len;
663 return len;
664}
665
666static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
667 struct scatterlist *sg, unsigned int len,
668 int prot)
669{
670 int ret;
Olav Haugan090614f2013-03-22 12:14:18 -0700671 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700672
673 mutex_lock(&msm_iommu_lock);
674
675 priv = domain->priv;
676 if (!priv) {
677 ret = -EINVAL;
678 goto fail;
679 }
680
681 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
682 if (ret)
683 goto fail;
684
685 __flush_iotlb(domain);
686fail:
687 mutex_unlock(&msm_iommu_lock);
688 return ret;
689}
690
691
692static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
693 unsigned int len)
694{
Olav Haugan090614f2013-03-22 12:14:18 -0700695 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700696
697 mutex_lock(&msm_iommu_lock);
698
699 priv = domain->priv;
700 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
701
702 __flush_iotlb(domain);
703 mutex_unlock(&msm_iommu_lock);
704 return 0;
705}
706
707static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
708 unsigned long va)
709{
Olav Haugan090614f2013-03-22 12:14:18 -0700710 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700711 struct msm_iommu_drvdata *iommu_drvdata;
712 struct msm_iommu_ctx_drvdata *ctx_drvdata;
713 unsigned int par;
714 void __iomem *base;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700715 phys_addr_t ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700716 int ctx;
717
718 mutex_lock(&msm_iommu_lock);
719
720 priv = domain->priv;
721 if (list_empty(&priv->list_attached))
722 goto fail;
723
724 ctx_drvdata = list_entry(priv->list_attached.next,
725 struct msm_iommu_ctx_drvdata, attached_elm);
726 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
727
728 base = iommu_drvdata->base;
729 ctx = ctx_drvdata->num;
730
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700731 ret = __enable_clocks(iommu_drvdata);
732 if (ret) {
733 ret = 0; /* 0 indicates translation failed */
734 goto fail;
735 }
736
Steve Mucklef132c6c2012-06-06 18:30:57 -0700737 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
738 mb();
739 while (GET_CB_ATSR_ACTIVE(base, ctx))
740 cpu_relax();
741
742 par = GET_PAR(base, ctx);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700743 __disable_clocks(iommu_drvdata);
744
Steve Mucklef132c6c2012-06-06 18:30:57 -0700745 if (par & CB_PAR_F) {
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700746 ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700747 } else {
748 /* We are dealing with a supersection */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700749 if (ret & CB_PAR_SS)
750 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700751 else /* Upper 20 bits from PAR, lower 12 from VA */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700752 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700753 }
754
755fail:
756 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700757 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700758}
759
760static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
761 unsigned long cap)
762{
763 return 0;
764}
765
766static void print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
767{
768 pr_err("FAR = %08x PAR = %08x\n",
769 GET_FAR(base, ctx), GET_PAR(base, ctx));
770 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
771 (fsr & 0x02) ? "TF " : "",
772 (fsr & 0x04) ? "AFF " : "",
773 (fsr & 0x08) ? "PF " : "",
774 (fsr & 0x10) ? "EF " : "",
775 (fsr & 0x20) ? "TLBMCF " : "",
776 (fsr & 0x40) ? "TLBLKF " : "",
777 (fsr & 0x80) ? "MHF " : "",
778 (fsr & 0x40000000) ? "SS " : "",
779 (fsr & 0x80000000) ? "MULTI " : "");
780
781 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
782 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
783 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
784 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
785 pr_err("SCTLR = %08x ACTLR = %08x\n",
786 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
787 pr_err("PRRR = %08x NMRR = %08x\n",
788 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
789}
790
791irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
792{
793 struct platform_device *pdev = dev_id;
794 struct msm_iommu_drvdata *drvdata;
795 struct msm_iommu_ctx_drvdata *ctx_drvdata;
796 unsigned int fsr;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700797 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700798
799 mutex_lock(&msm_iommu_lock);
800
801 BUG_ON(!pdev);
802
803 drvdata = dev_get_drvdata(pdev->dev.parent);
804 BUG_ON(!drvdata);
805
806 ctx_drvdata = dev_get_drvdata(&pdev->dev);
807 BUG_ON(!ctx_drvdata);
808
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700809 ret = __enable_clocks(drvdata);
810 if (ret) {
811 ret = IRQ_NONE;
812 goto fail;
813 }
814
Steve Mucklef132c6c2012-06-06 18:30:57 -0700815 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
816 if (fsr) {
817 if (!ctx_drvdata->attached_domain) {
818 pr_err("Bad domain in interrupt handler\n");
819 ret = -ENOSYS;
820 } else
821 ret = report_iommu_fault(ctx_drvdata->attached_domain,
822 &ctx_drvdata->pdev->dev,
823 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
824
825 if (ret == -ENOSYS) {
826 pr_err("Unexpected IOMMU page fault!\n");
827 pr_err("name = %s\n", drvdata->name);
828 pr_err("context = %s (%d)\n", ctx_drvdata->name,
829 ctx_drvdata->num);
830 pr_err("Interesting registers:\n");
831 print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
832 }
833
834 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
835 ret = IRQ_HANDLED;
836 } else
837 ret = IRQ_NONE;
838
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700839 __disable_clocks(drvdata);
840fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700841 mutex_unlock(&msm_iommu_lock);
842 return ret;
843}
844
845static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
846{
Olav Haugan090614f2013-03-22 12:14:18 -0700847 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700848 return __pa(priv->pt.fl_table);
849}
850
851static struct iommu_ops msm_iommu_ops = {
852 .domain_init = msm_iommu_domain_init,
853 .domain_destroy = msm_iommu_domain_destroy,
854 .attach_dev = msm_iommu_attach_dev,
855 .detach_dev = msm_iommu_detach_dev,
856 .map = msm_iommu_map,
857 .unmap = msm_iommu_unmap,
858 .map_range = msm_iommu_map_range,
859 .unmap_range = msm_iommu_unmap_range,
860 .iova_to_phys = msm_iommu_iova_to_phys,
861 .domain_has_cap = msm_iommu_domain_has_cap,
862 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
863 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
864};
865
866static int __init msm_iommu_init(void)
867{
868 msm_iommu_pagetable_init();
869 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
870 return 0;
871}
872
873subsys_initcall(msm_iommu_init);
874
875MODULE_LICENSE("GPL v2");
876MODULE_DESCRIPTION("MSM SMMU v2 Driver");