blob: 84f81bf3760d7e391657a925d3f1083da764f3d7 [file] [log] [blame]
Olav Haugan3c7fb382013-01-02 17:32:25 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -070028#include <linux/regulator/consumer.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <asm/sizes.h>
30
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v1.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032#include <mach/iommu.h>
Olav Haugan090614f2013-03-22 12:14:18 -070033#include <mach/msm_iommu_priv.h>
Olav Haugan5ebfbc62013-01-07 17:49:10 -080034#include <mach/iommu_perfmon.h>
Olav Haugan236970a2013-05-14 17:00:02 -070035#include <mach/msm_bus.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070036#include "msm_iommu_pagetable.h"
37
38/* bitmap of the page sizes currently supported */
39#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
40
41static DEFINE_MUTEX(msm_iommu_lock);
Olav Hauganbab41422013-06-17 15:06:01 -070042struct dump_regs_tbl dump_regs_tbl[MAX_DUMP_REGS];
Steve Mucklef132c6c2012-06-06 18:30:57 -070043
Olav Haugan2648d972013-01-07 17:32:31 -080044static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
45{
Olav Haugan00082d92013-05-21 09:21:10 -070046 int ret = 0;
47 if (drvdata->gdsc) {
48 ret = regulator_enable(drvdata->gdsc);
49 if (ret)
50 goto fail;
Olav Haugan2648d972013-01-07 17:32:31 -080051
Olav Haugan00082d92013-05-21 09:21:10 -070052 if (drvdata->alt_gdsc)
53 ret = regulator_enable(drvdata->alt_gdsc);
Olav Haugan2648d972013-01-07 17:32:31 -080054
Olav Haugan00082d92013-05-21 09:21:10 -070055 if (ret)
56 regulator_disable(drvdata->gdsc);
57 }
Olav Haugan2648d972013-01-07 17:32:31 -080058fail:
59 return ret;
60}
61
62static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
63{
64 if (drvdata->alt_gdsc)
65 regulator_disable(drvdata->alt_gdsc);
66
Olav Haugan00082d92013-05-21 09:21:10 -070067 if (drvdata->gdsc)
68 regulator_disable(drvdata->gdsc);
Olav Haugan2648d972013-01-07 17:32:31 -080069}
70
Olav Haugan236970a2013-05-14 17:00:02 -070071static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
72{
73 int ret = 0;
74
75 if (drvdata->bus_client) {
76 ret = msm_bus_scale_client_update_request(drvdata->bus_client,
77 vote);
78 if (ret)
79 pr_err("%s: Failed to vote for bus: %d\n", __func__,
80 vote);
81 }
82 return ret;
83}
84
Steve Mucklef132c6c2012-06-06 18:30:57 -070085static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
86{
87 int ret;
88
89 ret = clk_prepare_enable(drvdata->pclk);
90 if (ret)
91 goto fail;
92
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070093 ret = clk_prepare_enable(drvdata->clk);
94 if (ret)
95 clk_disable_unprepare(drvdata->pclk);
96
97 if (drvdata->aclk) {
98 ret = clk_prepare_enable(drvdata->aclk);
99 if (ret) {
100 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700101 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700102 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700103 }
Olav Haugan3c7fb382013-01-02 17:32:25 -0800104
105 if (drvdata->clk_reg_virt) {
106 unsigned int value;
107
108 value = readl_relaxed(drvdata->clk_reg_virt);
109 value &= ~0x1;
110 writel_relaxed(value, drvdata->clk_reg_virt);
Olav Hauganaf4eb0b2013-02-06 09:51:48 -0800111 /* Ensure clock is on before continuing */
112 mb();
Olav Haugan3c7fb382013-01-02 17:32:25 -0800113 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700114fail:
115 return ret;
116}
117
118static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
119{
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700120 if (drvdata->aclk)
121 clk_disable_unprepare(drvdata->aclk);
122 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700123 clk_disable_unprepare(drvdata->pclk);
124}
125
Olav Hauganf75b52e2013-10-01 09:18:03 -0700126static void _iommu_lock_acquire(unsigned int need_extra_lock)
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800127{
128 mutex_lock(&msm_iommu_lock);
129}
130
Olav Hauganf75b52e2013-10-01 09:18:03 -0700131static void _iommu_lock_release(unsigned int need_extra_lock)
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800132{
133 mutex_unlock(&msm_iommu_lock);
134}
135
Olav Hauganef69e892013-02-04 13:47:08 -0800136struct iommu_access_ops iommu_access_ops_v1 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700137 .iommu_power_on = __enable_regulators,
138 .iommu_power_off = __disable_regulators,
Olav Haugan236970a2013-05-14 17:00:02 -0700139 .iommu_bus_vote = apply_bus_vote,
Olav Hauganeece7e52013-04-02 10:22:21 -0700140 .iommu_clk_on = __enable_clocks,
141 .iommu_clk_off = __disable_clocks,
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800142 .iommu_lock_acquire = _iommu_lock_acquire,
143 .iommu_lock_release = _iommu_lock_release,
144};
145
Olav Haugancd932192013-01-31 18:30:15 -0800146void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
147{
148 if (iommu_drvdata->halt_enabled) {
149 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 1);
150
151 while (GET_MICRO_MMU_CTRL_IDLE(iommu_drvdata->base) == 0)
152 cpu_relax();
153 /* Ensure device is idle before continuing */
154 mb();
155 }
156}
157
158void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
159{
160 if (iommu_drvdata->halt_enabled) {
161 /*
162 * Ensure transactions have completed before releasing
163 * the halt
164 */
165 mb();
166 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
Olav Haugana142c982013-02-07 12:33:05 -0800167 /*
168 * Ensure write is complete before continuing to ensure
169 * we don't turn off clocks while transaction is still
170 * pending.
171 */
172 mb();
Olav Haugancd932192013-01-31 18:30:15 -0800173 }
174}
175
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700176static void __sync_tlb(void __iomem *base, int ctx)
177{
178 SET_TLBSYNC(base, ctx, 0);
179
180 /* No barrier needed due to register proximity */
181 while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
182 cpu_relax();
183
184 /* No barrier needed due to read dependency */
185}
186
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
188{
Olav Haugan090614f2013-03-22 12:14:18 -0700189 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700190 struct msm_iommu_drvdata *iommu_drvdata;
191 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700192 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700193
194 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
195 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
196
197 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
198 BUG_ON(!iommu_drvdata);
199
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700200
201 ret = __enable_clocks(iommu_drvdata);
202 if (ret)
203 goto fail;
204
Steve Mucklef132c6c2012-06-06 18:30:57 -0700205 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
Olav Haugan4e315c42013-03-06 10:14:28 -0800206 ctx_drvdata->asid | (va & CB_TLBIVA_VA));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700207 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700208 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700209 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700210 }
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700211fail:
212 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700213}
214
215static int __flush_iotlb(struct iommu_domain *domain)
216{
Olav Haugan090614f2013-03-22 12:14:18 -0700217 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700218 struct msm_iommu_drvdata *iommu_drvdata;
219 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700220 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700221
222 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
223 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
224
225 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
226 BUG_ON(!iommu_drvdata);
227
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700228 ret = __enable_clocks(iommu_drvdata);
229 if (ret)
230 goto fail;
231
Olav Haugan4e315c42013-03-06 10:14:28 -0800232 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
233 ctx_drvdata->asid);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700234 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700235 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700236 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700237 }
238
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700239fail:
240 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700241}
242
Laura Abbottf4daa692012-10-10 19:31:53 -0700243/*
244 * May only be called for non-secure iommus
245 */
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700246static void __reset_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700247{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700248 int i, smt_size;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700249
250 SET_ACR(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700251 SET_CR2(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700252 SET_GFAR(base, 0);
253 SET_GFSRRESTORE(base, 0);
254 SET_TLBIALLNSNH(base, 0);
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700255 smt_size = GET_IDR0_NUMSMRG(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700256
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700257 for (i = 0; i < smt_size; i++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700258 SET_SMR_VALID(base, i, 0);
259
260 mb();
261}
262
Olav Haugancaa12d32013-06-05 16:05:47 -0700263#ifdef CONFIG_IOMMU_NON_SECURE
264static void __reset_iommu_secure(void __iomem *base)
265{
266 SET_NSACR(base, 0);
267 SET_NSCR2(base, 0);
268 SET_NSGFAR(base, 0);
269 SET_NSGFSRRESTORE(base, 0);
270 mb();
271}
272
273static void __program_iommu_secure(void __iomem *base)
274{
275 SET_NSCR0_SMCFCFG(base, 1);
276 SET_NSCR0_USFCFG(base, 1);
277 SET_NSCR0_STALLD(base, 1);
278 SET_NSCR0_GCFGFIE(base, 1);
279 SET_NSCR0_GCFGFRE(base, 1);
280 SET_NSCR0_GFIE(base, 1);
281 SET_NSCR0_GFRE(base, 1);
282 SET_NSCR0_CLIENTPD(base, 0);
283}
284
285#else
286static inline void __reset_iommu_secure(void __iomem *base)
287{
288}
289
290static inline void __program_iommu_secure(void __iomem *base)
291{
292}
293
294#endif
295
Laura Abbottf4daa692012-10-10 19:31:53 -0700296/*
297 * May only be called for non-secure iommus
298 */
Olav Hauganf3782732013-01-11 11:23:30 -0800299static void __program_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700300{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700301 __reset_iommu(base);
Olav Haugancaa12d32013-06-05 16:05:47 -0700302 __reset_iommu_secure(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700303
304 SET_CR0_SMCFCFG(base, 1);
305 SET_CR0_USFCFG(base, 1);
306 SET_CR0_STALLD(base, 1);
307 SET_CR0_GCFGFIE(base, 1);
308 SET_CR0_GCFGFRE(base, 1);
309 SET_CR0_GFIE(base, 1);
310 SET_CR0_GFRE(base, 1);
311 SET_CR0_CLIENTPD(base, 0);
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700312
Olav Haugancaa12d32013-06-05 16:05:47 -0700313 __program_iommu_secure(base);
314
Olav Hauganf3782732013-01-11 11:23:30 -0800315 mb(); /* Make sure writes complete before returning */
316}
317
318void program_iommu_bfb_settings(void __iomem *base,
319 const struct msm_iommu_bfb_settings *bfb_settings)
320{
321 unsigned int i;
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700322 if (bfb_settings)
323 for (i = 0; i < bfb_settings->length; i++)
324 SET_GLOBAL_REG(base, bfb_settings->regs[i],
325 bfb_settings->data[i]);
326
Olav Hauganf3782732013-01-11 11:23:30 -0800327 mb(); /* Make sure writes complete before returning */
Sathish Ambleycf045e62012-06-07 12:56:50 -0700328}
329
Steve Mucklef132c6c2012-06-06 18:30:57 -0700330static void __reset_context(void __iomem *base, int ctx)
331{
332 SET_ACTLR(base, ctx, 0);
333 SET_FAR(base, ctx, 0);
334 SET_FSRRESTORE(base, ctx, 0);
335 SET_NMRR(base, ctx, 0);
336 SET_PAR(base, ctx, 0);
337 SET_PRRR(base, ctx, 0);
338 SET_SCTLR(base, ctx, 0);
339 SET_TLBIALL(base, ctx, 0);
340 SET_TTBCR(base, ctx, 0);
341 SET_TTBR0(base, ctx, 0);
342 SET_TTBR1(base, ctx, 0);
343 mb();
344}
345
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700346static void __release_smg(void __iomem *base, int ctx)
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700347{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700348 int i, smt_size;
349 smt_size = GET_IDR0_NUMSMRG(base);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700350
351 /* Invalidate any SMGs associated with this context */
352 for (i = 0; i < smt_size; i++)
353 if (GET_SMR_VALID(base, i) &&
354 GET_S2CR_CBNDX(base, i) == ctx)
355 SET_SMR_VALID(base, i, 0);
356}
357
Olav Haugan26ddd432012-12-07 11:39:21 -0800358static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
359 struct msm_iommu_ctx_drvdata *curr_ctx,
Olav Haugan090614f2013-03-22 12:14:18 -0700360 struct msm_iommu_priv *priv)
Olav Haugan26ddd432012-12-07 11:39:21 -0800361{
Olav Haugan26ddd432012-12-07 11:39:21 -0800362 unsigned int found = 0;
363 void __iomem *base = iommu_drvdata->base;
Olav Haugan4e315c42013-03-06 10:14:28 -0800364 unsigned int i;
Olav Haugan26ddd432012-12-07 11:39:21 -0800365 unsigned int ncb = iommu_drvdata->ncb;
Olav Haugan4e315c42013-03-06 10:14:28 -0800366 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Olav Haugan26ddd432012-12-07 11:39:21 -0800367
368 /* Find if this page table is used elsewhere, and re-use ASID */
Olav Haugan4e315c42013-03-06 10:14:28 -0800369 if (!list_empty(&priv->list_attached)) {
370 tmp_drvdata = list_first_entry(&priv->list_attached,
371 struct msm_iommu_ctx_drvdata, attached_elm);
Olav Haugan26ddd432012-12-07 11:39:21 -0800372
Olav Haugan4e315c42013-03-06 10:14:28 -0800373 ++iommu_drvdata->asid[tmp_drvdata->asid - 1];
374 curr_ctx->asid = tmp_drvdata->asid;
Olav Haugan26ddd432012-12-07 11:39:21 -0800375
Olav Haugan4e315c42013-03-06 10:14:28 -0800376 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, curr_ctx->asid);
377 found = 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800378 }
379
380 /* If page table is new, find an unused ASID */
381 if (!found) {
Olav Haugan4e315c42013-03-06 10:14:28 -0800382 for (i = 0; i < ncb; ++i) {
383 if (iommu_drvdata->asid[i] == 0) {
384 ++iommu_drvdata->asid[i];
385 curr_ctx->asid = i + 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800386
Olav Haugan26ddd432012-12-07 11:39:21 -0800387 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
Olav Haugan4e315c42013-03-06 10:14:28 -0800388 curr_ctx->asid);
389 found = 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800390 break;
391 }
392 }
Olav Haugan4e315c42013-03-06 10:14:28 -0800393 BUG_ON(!found);
Olav Haugan26ddd432012-12-07 11:39:21 -0800394 }
395}
396
397static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
398 struct msm_iommu_ctx_drvdata *ctx_drvdata,
Olav Haugan090614f2013-03-22 12:14:18 -0700399 struct msm_iommu_priv *priv, bool is_secure)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700400{
401 unsigned int prrr, nmrr;
402 unsigned int pn;
Olav Haugan26ddd432012-12-07 11:39:21 -0800403 int num = 0, i, smt_size;
404 void __iomem *base = iommu_drvdata->base;
405 unsigned int ctx = ctx_drvdata->num;
406 u32 *sids = ctx_drvdata->sids;
407 int len = ctx_drvdata->nsid;
Olav Haugan4e315c42013-03-06 10:14:28 -0800408 phys_addr_t pgtable = __pa(priv->pt.fl_table);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700409
410 __reset_context(base, ctx);
Laura Abbottf4daa692012-10-10 19:31:53 -0700411
Steve Mucklef132c6c2012-06-06 18:30:57 -0700412 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
413 SET_TTBCR(base, ctx, 0);
414 SET_CB_TTBR0_ADDR(base, ctx, pn);
415
416 /* Enable context fault interrupt */
417 SET_CB_SCTLR_CFIE(base, ctx, 1);
418
419 /* Redirect all cacheable requests to L2 slave port. */
420 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
421 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
422 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
423
424 /* Turn on TEX Remap */
425 SET_CB_SCTLR_TRE(base, ctx, 1);
426
427 /* Enable private ASID namespace */
428 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
429
430 /* Set TEX remap attributes */
431 RCP15_PRRR(prrr);
432 RCP15_NMRR(nmrr);
433 SET_PRRR(base, ctx, prrr);
434 SET_NMRR(base, ctx, nmrr);
435
436 /* Configure page tables as inner-cacheable and shareable to reduce
437 * the TLB miss penalty.
438 */
Olav Haugan4e315c42013-03-06 10:14:28 -0800439 if (priv->pt.redirect) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700440 SET_CB_TTBR0_S(base, ctx, 1);
441 SET_CB_TTBR0_NOS(base, ctx, 1);
442 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
443 SET_CB_TTBR0_IRGN0(base, ctx, 1);
444 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
445 }
446
Laura Abbottf4daa692012-10-10 19:31:53 -0700447 if (!is_secure) {
448 smt_size = GET_IDR0_NUMSMRG(base);
449 /* Program the M2V tables for this context */
450 for (i = 0; i < len / sizeof(*sids); i++) {
451 for (; num < smt_size; num++)
452 if (GET_SMR_VALID(base, num) == 0)
453 break;
454 BUG_ON(num >= smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700455
Laura Abbottf4daa692012-10-10 19:31:53 -0700456 SET_SMR_VALID(base, num, 1);
457 SET_SMR_MASK(base, num, 0);
458 SET_SMR_ID(base, num, sids[i]);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700459
Laura Abbottf4daa692012-10-10 19:31:53 -0700460 SET_S2CR_N(base, num, 0);
461 SET_S2CR_CBNDX(base, num, ctx);
462 SET_S2CR_MEMATTR(base, num, 0x0A);
463 /* Set security bit override to be Non-secure */
464 SET_S2CR_NSCFG(base, num, 3);
465 }
466 SET_CBAR_N(base, ctx, 0);
467
468 /* Stage 1 Context with Stage 2 bypass */
469 SET_CBAR_TYPE(base, ctx, 1);
470
471 /* Route page faults to the non-secure interrupt */
472 SET_CBAR_IRPTNDX(base, ctx, 1);
473
474 /* Set VMID to non-secure HLOS */
475 SET_CBAR_VMID(base, ctx, 3);
476
477 /* Bypass is treated as inner-shareable */
478 SET_CBAR_BPSHCFG(base, ctx, 2);
479
480 /* Do not downgrade memory attributes */
481 SET_CBAR_MEMATTR(base, ctx, 0x0A);
482
Sathish Ambleycf045e62012-06-07 12:56:50 -0700483 }
484
Olav Haugan4e315c42013-03-06 10:14:28 -0800485 msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700486
487 /* Enable the MMU */
488 SET_CB_SCTLR_M(base, ctx, 1);
489 mb();
490}
491
492static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
493{
Olav Haugan090614f2013-03-22 12:14:18 -0700494 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700495
496 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
497 if (!priv)
498 goto fail_nomem;
499
500#ifdef CONFIG_IOMMU_PGTABLES_L2
501 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
502#endif
503
504 INIT_LIST_HEAD(&priv->list_attached);
505 if (msm_iommu_pagetable_alloc(&priv->pt))
506 goto fail_nomem;
507
508 domain->priv = priv;
509 return 0;
510
511fail_nomem:
512 kfree(priv);
513 return -ENOMEM;
514}
515
516static void msm_iommu_domain_destroy(struct iommu_domain *domain)
517{
Olav Haugan090614f2013-03-22 12:14:18 -0700518 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700519
520 mutex_lock(&msm_iommu_lock);
521 priv = domain->priv;
522 domain->priv = NULL;
523
524 if (priv)
525 msm_iommu_pagetable_free(&priv->pt);
526
527 kfree(priv);
528 mutex_unlock(&msm_iommu_lock);
529}
530
531static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
532{
Olav Haugan090614f2013-03-22 12:14:18 -0700533 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700534 struct msm_iommu_drvdata *iommu_drvdata;
535 struct msm_iommu_ctx_drvdata *ctx_drvdata;
536 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700537 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700538 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700539
540 mutex_lock(&msm_iommu_lock);
541
542 priv = domain->priv;
543 if (!priv || !dev) {
544 ret = -EINVAL;
545 goto fail;
546 }
547
548 iommu_drvdata = dev_get_drvdata(dev->parent);
549 ctx_drvdata = dev_get_drvdata(dev);
550 if (!iommu_drvdata || !ctx_drvdata) {
551 ret = -EINVAL;
552 goto fail;
553 }
554
555 if (!list_empty(&ctx_drvdata->attached_elm)) {
556 ret = -EBUSY;
557 goto fail;
558 }
559
560 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
561 if (tmp_drvdata == ctx_drvdata) {
562 ret = -EBUSY;
563 goto fail;
564 }
565
Laura Abbottf4daa692012-10-10 19:31:53 -0700566 is_secure = iommu_drvdata->sec_id != -1;
567
Olav Haugan2648d972013-01-07 17:32:31 -0800568 ret = __enable_regulators(iommu_drvdata);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700569 if (ret)
570 goto fail;
571
Olav Haugan236970a2013-05-14 17:00:02 -0700572 ret = apply_bus_vote(iommu_drvdata, 1);
573 if (ret)
574 goto fail;
575
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700576 ret = __enable_clocks(iommu_drvdata);
577 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800578 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700579 goto fail;
580 }
581
Olav Haugane3885392013-03-06 16:22:53 -0800582 /* We can only do this once */
583 if (!iommu_drvdata->ctx_attach_count) {
Laura Abbottf4daa692012-10-10 19:31:53 -0700584 if (!is_secure) {
Olav Haugance2eab92013-02-07 12:59:18 -0800585 iommu_halt(iommu_drvdata);
Olav Hauganf3782732013-01-11 11:23:30 -0800586 __program_iommu(iommu_drvdata->base);
Olav Haugance2eab92013-02-07 12:59:18 -0800587 iommu_resume(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700588 } else {
589 ret = msm_iommu_sec_program_iommu(
590 iommu_drvdata->sec_id);
591 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800592 __disable_regulators(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700593 __disable_clocks(iommu_drvdata);
594 goto fail;
595 }
596 }
Olav Hauganf3782732013-01-11 11:23:30 -0800597 program_iommu_bfb_settings(iommu_drvdata->base,
598 iommu_drvdata->bfb_settings);
Laura Abbottf4daa692012-10-10 19:31:53 -0700599 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700600
Olav Haugance2eab92013-02-07 12:59:18 -0800601 iommu_halt(iommu_drvdata);
602
Olav Haugan4e315c42013-03-06 10:14:28 -0800603 __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure);
Olav Haugan26ddd432012-12-07 11:39:21 -0800604
Olav Haugancd932192013-01-31 18:30:15 -0800605 iommu_resume(iommu_drvdata);
606
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700607 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700608
Steve Mucklef132c6c2012-06-06 18:30:57 -0700609 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
610 ctx_drvdata->attached_domain = domain;
Olav Haugane3885392013-03-06 16:22:53 -0800611 ++iommu_drvdata->ctx_attach_count;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700612
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800613 mutex_unlock(&msm_iommu_lock);
614
615 msm_iommu_attached(dev->parent);
616 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700617fail:
618 mutex_unlock(&msm_iommu_lock);
619 return ret;
620}
621
622static void msm_iommu_detach_dev(struct iommu_domain *domain,
623 struct device *dev)
624{
Olav Haugan090614f2013-03-22 12:14:18 -0700625 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700626 struct msm_iommu_drvdata *iommu_drvdata;
627 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700628 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700629 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700630
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800631 msm_iommu_detached(dev->parent);
632
Steve Mucklef132c6c2012-06-06 18:30:57 -0700633 mutex_lock(&msm_iommu_lock);
634 priv = domain->priv;
635 if (!priv || !dev)
636 goto fail;
637
638 iommu_drvdata = dev_get_drvdata(dev->parent);
639 ctx_drvdata = dev_get_drvdata(dev);
640 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
641 goto fail;
642
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700643 ret = __enable_clocks(iommu_drvdata);
644 if (ret)
645 goto fail;
646
Laura Abbottf4daa692012-10-10 19:31:53 -0700647 is_secure = iommu_drvdata->sec_id != -1;
648
Olav Haugan26ddd432012-12-07 11:39:21 -0800649 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
Olav Haugan4e315c42013-03-06 10:14:28 -0800650
651 BUG_ON(iommu_drvdata->asid[ctx_drvdata->asid - 1] == 0);
652 iommu_drvdata->asid[ctx_drvdata->asid - 1]--;
Olav Haugan26ddd432012-12-07 11:39:21 -0800653 ctx_drvdata->asid = -1;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700654
Olav Haugancd932192013-01-31 18:30:15 -0800655 iommu_halt(iommu_drvdata);
656
Steve Mucklef132c6c2012-06-06 18:30:57 -0700657 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Laura Abbottf4daa692012-10-10 19:31:53 -0700658 if (!is_secure)
659 __release_smg(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700660
Olav Haugancd932192013-01-31 18:30:15 -0800661 iommu_resume(iommu_drvdata);
662
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700663 __disable_clocks(iommu_drvdata);
664
Olav Haugan236970a2013-05-14 17:00:02 -0700665 apply_bus_vote(iommu_drvdata, 0);
666
Olav Haugan2648d972013-01-07 17:32:31 -0800667 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700668
Steve Mucklef132c6c2012-06-06 18:30:57 -0700669 list_del_init(&ctx_drvdata->attached_elm);
670 ctx_drvdata->attached_domain = NULL;
Olav Haugane3885392013-03-06 16:22:53 -0800671 BUG_ON(iommu_drvdata->ctx_attach_count == 0);
672 --iommu_drvdata->ctx_attach_count;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700673fail:
674 mutex_unlock(&msm_iommu_lock);
675}
676
677static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
678 phys_addr_t pa, size_t len, int prot)
679{
Olav Haugan090614f2013-03-22 12:14:18 -0700680 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700681 int ret = 0;
682
683 mutex_lock(&msm_iommu_lock);
684
685 priv = domain->priv;
686 if (!priv) {
687 ret = -EINVAL;
688 goto fail;
689 }
690
691 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
692 if (ret)
693 goto fail;
694
Olav Haugan896af3e2013-08-16 13:43:22 -0700695 ret = __flush_iotlb_va(domain, va);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700696fail:
697 mutex_unlock(&msm_iommu_lock);
698 return ret;
699}
700
701static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
702 size_t len)
703{
Olav Haugan090614f2013-03-22 12:14:18 -0700704 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700705 int ret = -ENODEV;
706
707 mutex_lock(&msm_iommu_lock);
708
709 priv = domain->priv;
710 if (!priv)
711 goto fail;
712
713 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
714 if (ret < 0)
715 goto fail;
716
717 ret = __flush_iotlb_va(domain, va);
718fail:
719 mutex_unlock(&msm_iommu_lock);
720
721 /* the IOMMU API requires us to return how many bytes were unmapped */
722 len = ret ? 0 : len;
723 return len;
724}
725
726static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
727 struct scatterlist *sg, unsigned int len,
728 int prot)
729{
730 int ret;
Olav Haugan090614f2013-03-22 12:14:18 -0700731 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700732
733 mutex_lock(&msm_iommu_lock);
734
735 priv = domain->priv;
736 if (!priv) {
737 ret = -EINVAL;
738 goto fail;
739 }
740
741 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
742 if (ret)
743 goto fail;
744
Olav Haugan896af3e2013-08-16 13:43:22 -0700745 __flush_iotlb(domain);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700746fail:
747 mutex_unlock(&msm_iommu_lock);
748 return ret;
749}
750
751
752static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
753 unsigned int len)
754{
Olav Haugan090614f2013-03-22 12:14:18 -0700755 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700756
757 mutex_lock(&msm_iommu_lock);
758
759 priv = domain->priv;
760 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
761
762 __flush_iotlb(domain);
763 mutex_unlock(&msm_iommu_lock);
764 return 0;
765}
766
767static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
768 unsigned long va)
769{
Olav Haugan090614f2013-03-22 12:14:18 -0700770 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700771 struct msm_iommu_drvdata *iommu_drvdata;
772 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Olav Haugandf96ffa2013-06-10 10:32:45 -0700773 u64 par;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700774 void __iomem *base;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700775 phys_addr_t ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700776 int ctx;
777
778 mutex_lock(&msm_iommu_lock);
779
780 priv = domain->priv;
781 if (list_empty(&priv->list_attached))
782 goto fail;
783
784 ctx_drvdata = list_entry(priv->list_attached.next,
785 struct msm_iommu_ctx_drvdata, attached_elm);
786 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
787
788 base = iommu_drvdata->base;
789 ctx = ctx_drvdata->num;
790
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700791 ret = __enable_clocks(iommu_drvdata);
792 if (ret) {
793 ret = 0; /* 0 indicates translation failed */
794 goto fail;
795 }
796
Steve Mucklef132c6c2012-06-06 18:30:57 -0700797 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
798 mb();
799 while (GET_CB_ATSR_ACTIVE(base, ctx))
800 cpu_relax();
801
802 par = GET_PAR(base, ctx);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700803 __disable_clocks(iommu_drvdata);
804
Steve Mucklef132c6c2012-06-06 18:30:57 -0700805 if (par & CB_PAR_F) {
Olav Haugandf96ffa2013-06-10 10:32:45 -0700806 unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
807 pr_err("IOMMU translation fault!\n");
808 pr_err("name = %s\n", iommu_drvdata->name);
809 pr_err("context = %s (%d)\n", ctx_drvdata->name,
810 ctx_drvdata->num);
811 pr_err("Interesting registers:\n");
812 pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
813 (par & CB_PAR_F) ? "F " : "",
814 (par & CB_PAR_TF) ? "TF " : "",
815 (par & CB_PAR_AFF) ? "AFF " : "",
816 (par & CB_PAR_PF) ? "PF " : "",
817 (par & CB_PAR_EF) ? "EF " : "",
818 (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
819 (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
820 (par & CB_PAR_ATOT) ? "ATOT " : "",
821 level,
822 (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700823 ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700824 } else {
825 /* We are dealing with a supersection */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700826 if (ret & CB_PAR_SS)
827 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700828 else /* Upper 20 bits from PAR, lower 12 from VA */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700829 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700830 }
831
832fail:
833 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700834 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700835}
836
837static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
838 unsigned long cap)
839{
840 return 0;
841}
842
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700843void print_ctx_regs(struct msm_iommu_context_reg regs[])
Steve Mucklef132c6c2012-06-06 18:30:57 -0700844{
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700845 uint32_t fsr = regs[DUMP_REG_FSR].val;
Olav Haugancdb13112013-06-21 17:45:34 -0700846 u64 ttbr;
Mitchel Humpherys9e90db32013-05-21 17:37:22 -0700847
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700848 pr_err("FAR = %016llx\n",
849 COMBINE_DUMP_REG(
850 regs[DUMP_REG_FAR1].val,
851 regs[DUMP_REG_FAR0].val));
852 pr_err("PAR = %016llx\n",
853 COMBINE_DUMP_REG(
854 regs[DUMP_REG_PAR1].val,
855 regs[DUMP_REG_PAR0].val));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700856 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
857 (fsr & 0x02) ? "TF " : "",
858 (fsr & 0x04) ? "AFF " : "",
859 (fsr & 0x08) ? "PF " : "",
860 (fsr & 0x10) ? "EF " : "",
861 (fsr & 0x20) ? "TLBMCF " : "",
862 (fsr & 0x40) ? "TLBLKF " : "",
863 (fsr & 0x80) ? "MHF " : "",
864 (fsr & 0x40000000) ? "SS " : "",
865 (fsr & 0x80000000) ? "MULTI " : "");
866
867 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700868 regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
Olav Haugancdb13112013-06-21 17:45:34 -0700869
870 ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
871 regs[DUMP_REG_TTBR0_0].val);
872 if (regs[DUMP_REG_TTBR0_1].valid)
873 pr_err("TTBR0 = %016llx\n", ttbr);
874 else
875 pr_err("TTBR0 = %016llx (32b)\n", ttbr);
876
877 ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
878 regs[DUMP_REG_TTBR1_0].val);
879
880 if (regs[DUMP_REG_TTBR1_1].valid)
881 pr_err("TTBR1 = %016llx\n", ttbr);
882 else
883 pr_err("TTBR1 = %016llx (32b)\n", ttbr);
884
Steve Mucklef132c6c2012-06-06 18:30:57 -0700885 pr_err("SCTLR = %08x ACTLR = %08x\n",
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700886 regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700887 pr_err("PRRR = %08x NMRR = %08x\n",
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700888 regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
Mitchel Humpherys9e90db32013-05-21 17:37:22 -0700889}
890
891static void __print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
892{
Olav Hauganbab41422013-06-17 15:06:01 -0700893 struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
894 unsigned int i;
895
896 for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
897 regs[i].val = GET_CTX_REG(dump_regs_tbl[i].key, base, ctx);
898 regs[i].valid = 1;
899 }
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700900 print_ctx_regs(regs);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700901}
902
903irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
904{
905 struct platform_device *pdev = dev_id;
906 struct msm_iommu_drvdata *drvdata;
907 struct msm_iommu_ctx_drvdata *ctx_drvdata;
908 unsigned int fsr;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700909 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700910
911 mutex_lock(&msm_iommu_lock);
912
913 BUG_ON(!pdev);
914
915 drvdata = dev_get_drvdata(pdev->dev.parent);
916 BUG_ON(!drvdata);
917
918 ctx_drvdata = dev_get_drvdata(&pdev->dev);
919 BUG_ON(!ctx_drvdata);
920
Olav Haugan055d58f2013-04-26 12:50:28 -0700921 if (!drvdata->ctx_attach_count) {
922 pr_err("Unexpected IOMMU page fault!\n");
923 pr_err("name = %s\n", drvdata->name);
924 pr_err("Power is OFF. Unable to read page fault information\n");
925 /*
926 * We cannot determine which context bank caused the issue so
927 * we just return handled here to ensure IRQ handler code is
928 * happy
929 */
930 ret = IRQ_HANDLED;
931 goto fail;
932 }
933
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700934 ret = __enable_clocks(drvdata);
935 if (ret) {
936 ret = IRQ_NONE;
937 goto fail;
938 }
939
Steve Mucklef132c6c2012-06-06 18:30:57 -0700940 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
941 if (fsr) {
942 if (!ctx_drvdata->attached_domain) {
943 pr_err("Bad domain in interrupt handler\n");
944 ret = -ENOSYS;
945 } else
946 ret = report_iommu_fault(ctx_drvdata->attached_domain,
947 &ctx_drvdata->pdev->dev,
948 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
949
950 if (ret == -ENOSYS) {
951 pr_err("Unexpected IOMMU page fault!\n");
952 pr_err("name = %s\n", drvdata->name);
953 pr_err("context = %s (%d)\n", ctx_drvdata->name,
954 ctx_drvdata->num);
955 pr_err("Interesting registers:\n");
Mitchel Humpherys9e90db32013-05-21 17:37:22 -0700956 __print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700957 }
958
Shubhraprakash Das93e75262013-08-08 17:24:47 -0700959 if (ret != -EBUSY)
960 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700961 ret = IRQ_HANDLED;
962 } else
963 ret = IRQ_NONE;
964
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700965 __disable_clocks(drvdata);
966fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700967 mutex_unlock(&msm_iommu_lock);
968 return ret;
969}
970
971static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
972{
Olav Haugan090614f2013-03-22 12:14:18 -0700973 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700974 return __pa(priv->pt.fl_table);
975}
976
Olav Haugancdb13112013-06-21 17:45:34 -0700977#define DUMP_REG_INIT(dump_reg, cb_reg, mbp) \
Olav Hauganbab41422013-06-17 15:06:01 -0700978 do { \
979 dump_regs_tbl[dump_reg].key = cb_reg; \
980 dump_regs_tbl[dump_reg].name = #cb_reg; \
Olav Haugancdb13112013-06-21 17:45:34 -0700981 dump_regs_tbl[dump_reg].must_be_present = mbp; \
Olav Hauganbab41422013-06-17 15:06:01 -0700982 } while (0)
983
984static void msm_iommu_build_dump_regs_table(void)
985{
Olav Haugancdb13112013-06-21 17:45:34 -0700986 DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1);
987 DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1);
988 DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1);
989 DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1);
990 DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1);
991 DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1);
992 DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1);
993 DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1);
994 DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0);
995 DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1);
996 DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0);
997 DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1);
998 DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1);
999 DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1);
1000 DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1);
Olav Hauganbab41422013-06-17 15:06:01 -07001001}
1002
Steve Mucklef132c6c2012-06-06 18:30:57 -07001003static struct iommu_ops msm_iommu_ops = {
1004 .domain_init = msm_iommu_domain_init,
1005 .domain_destroy = msm_iommu_domain_destroy,
1006 .attach_dev = msm_iommu_attach_dev,
1007 .detach_dev = msm_iommu_detach_dev,
1008 .map = msm_iommu_map,
1009 .unmap = msm_iommu_unmap,
1010 .map_range = msm_iommu_map_range,
1011 .unmap_range = msm_iommu_unmap_range,
1012 .iova_to_phys = msm_iommu_iova_to_phys,
1013 .domain_has_cap = msm_iommu_domain_has_cap,
1014 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
1015 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
1016};
1017
1018static int __init msm_iommu_init(void)
1019{
1020 msm_iommu_pagetable_init();
1021 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Olav Hauganbab41422013-06-17 15:06:01 -07001022 msm_iommu_build_dump_regs_table();
1023
Steve Mucklef132c6c2012-06-06 18:30:57 -07001024 return 0;
1025}
1026
1027subsys_initcall(msm_iommu_init);
1028
1029MODULE_LICENSE("GPL v2");
1030MODULE_DESCRIPTION("MSM SMMU v2 Driver");