blob: 53c7c3000166eaff58f5e438bee18cfce86d1b0a [file] [log] [blame]
Olav Haugan3c7fb382013-01-02 17:32:25 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
Sathish Ambleycf045e62012-06-07 12:56:50 -070026#include <linux/of.h>
27#include <linux/of_device.h>
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -070028#include <linux/regulator/consumer.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <asm/sizes.h>
30
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v1.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032#include <mach/iommu.h>
Olav Haugan090614f2013-03-22 12:14:18 -070033#include <mach/msm_iommu_priv.h>
Olav Haugan5ebfbc62013-01-07 17:49:10 -080034#include <mach/iommu_perfmon.h>
Olav Haugan236970a2013-05-14 17:00:02 -070035#include <mach/msm_bus.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070036#include "msm_iommu_pagetable.h"
37
38/* bitmap of the page sizes currently supported */
39#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
40
41static DEFINE_MUTEX(msm_iommu_lock);
Olav Hauganbab41422013-06-17 15:06:01 -070042struct dump_regs_tbl dump_regs_tbl[MAX_DUMP_REGS];
Steve Mucklef132c6c2012-06-06 18:30:57 -070043
Olav Haugan2648d972013-01-07 17:32:31 -080044static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
45{
Olav Haugan00082d92013-05-21 09:21:10 -070046 int ret = 0;
47 if (drvdata->gdsc) {
48 ret = regulator_enable(drvdata->gdsc);
49 if (ret)
50 goto fail;
Olav Haugan2648d972013-01-07 17:32:31 -080051
Olav Haugan00082d92013-05-21 09:21:10 -070052 if (drvdata->alt_gdsc)
53 ret = regulator_enable(drvdata->alt_gdsc);
Olav Haugan2648d972013-01-07 17:32:31 -080054
Olav Haugan00082d92013-05-21 09:21:10 -070055 if (ret)
56 regulator_disable(drvdata->gdsc);
57 }
Olav Haugan2648d972013-01-07 17:32:31 -080058fail:
59 return ret;
60}
61
62static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
63{
64 if (drvdata->alt_gdsc)
65 regulator_disable(drvdata->alt_gdsc);
66
Olav Haugan00082d92013-05-21 09:21:10 -070067 if (drvdata->gdsc)
68 regulator_disable(drvdata->gdsc);
Olav Haugan2648d972013-01-07 17:32:31 -080069}
70
Olav Haugan236970a2013-05-14 17:00:02 -070071static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
72{
73 int ret = 0;
74
75 if (drvdata->bus_client) {
76 ret = msm_bus_scale_client_update_request(drvdata->bus_client,
77 vote);
78 if (ret)
79 pr_err("%s: Failed to vote for bus: %d\n", __func__,
80 vote);
81 }
82 return ret;
83}
84
Steve Mucklef132c6c2012-06-06 18:30:57 -070085static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
86{
87 int ret;
88
89 ret = clk_prepare_enable(drvdata->pclk);
90 if (ret)
91 goto fail;
92
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -070093 ret = clk_prepare_enable(drvdata->clk);
94 if (ret)
95 clk_disable_unprepare(drvdata->pclk);
96
97 if (drvdata->aclk) {
98 ret = clk_prepare_enable(drvdata->aclk);
99 if (ret) {
100 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700101 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700102 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700103 }
Olav Haugan3c7fb382013-01-02 17:32:25 -0800104
105 if (drvdata->clk_reg_virt) {
106 unsigned int value;
107
108 value = readl_relaxed(drvdata->clk_reg_virt);
109 value &= ~0x1;
110 writel_relaxed(value, drvdata->clk_reg_virt);
Olav Hauganaf4eb0b2013-02-06 09:51:48 -0800111 /* Ensure clock is on before continuing */
112 mb();
Olav Haugan3c7fb382013-01-02 17:32:25 -0800113 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700114fail:
115 return ret;
116}
117
118static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
119{
Stepan Moskovchenko17ae71e2012-07-24 19:24:14 -0700120 if (drvdata->aclk)
121 clk_disable_unprepare(drvdata->aclk);
122 clk_disable_unprepare(drvdata->clk);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700123 clk_disable_unprepare(drvdata->pclk);
124}
125
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800126static void _iommu_lock_acquire(void)
127{
128 mutex_lock(&msm_iommu_lock);
129}
130
131static void _iommu_lock_release(void)
132{
133 mutex_unlock(&msm_iommu_lock);
134}
135
Olav Hauganef69e892013-02-04 13:47:08 -0800136struct iommu_access_ops iommu_access_ops_v1 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700137 .iommu_power_on = __enable_regulators,
138 .iommu_power_off = __disable_regulators,
Olav Haugan236970a2013-05-14 17:00:02 -0700139 .iommu_bus_vote = apply_bus_vote,
Olav Hauganeece7e52013-04-02 10:22:21 -0700140 .iommu_clk_on = __enable_clocks,
141 .iommu_clk_off = __disable_clocks,
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800142 .iommu_lock_acquire = _iommu_lock_acquire,
143 .iommu_lock_release = _iommu_lock_release,
144};
145
Olav Haugancd932192013-01-31 18:30:15 -0800146void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
147{
148 if (iommu_drvdata->halt_enabled) {
149 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 1);
150
151 while (GET_MICRO_MMU_CTRL_IDLE(iommu_drvdata->base) == 0)
152 cpu_relax();
153 /* Ensure device is idle before continuing */
154 mb();
155 }
156}
157
158void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
159{
160 if (iommu_drvdata->halt_enabled) {
161 /*
162 * Ensure transactions have completed before releasing
163 * the halt
164 */
165 mb();
166 SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
Olav Haugana142c982013-02-07 12:33:05 -0800167 /*
168 * Ensure write is complete before continuing to ensure
169 * we don't turn off clocks while transaction is still
170 * pending.
171 */
172 mb();
Olav Haugancd932192013-01-31 18:30:15 -0800173 }
174}
175
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700176static void __sync_tlb(void __iomem *base, int ctx)
177{
178 SET_TLBSYNC(base, ctx, 0);
179
180 /* No barrier needed due to register proximity */
181 while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
182 cpu_relax();
183
184 /* No barrier needed due to read dependency */
185}
186
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
188{
Olav Haugan090614f2013-03-22 12:14:18 -0700189 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700190 struct msm_iommu_drvdata *iommu_drvdata;
191 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700192 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700193
194 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
195 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
196
197 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
198 BUG_ON(!iommu_drvdata);
199
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700200
201 ret = __enable_clocks(iommu_drvdata);
202 if (ret)
203 goto fail;
204
Steve Mucklef132c6c2012-06-06 18:30:57 -0700205 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
Olav Haugan4e315c42013-03-06 10:14:28 -0800206 ctx_drvdata->asid | (va & CB_TLBIVA_VA));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700207 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700208 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700209 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700210 }
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700211fail:
212 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700213}
214
215static int __flush_iotlb(struct iommu_domain *domain)
216{
Olav Haugan090614f2013-03-22 12:14:18 -0700217 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700218 struct msm_iommu_drvdata *iommu_drvdata;
219 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700220 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700221
222 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
223 BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
224
225 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
226 BUG_ON(!iommu_drvdata);
227
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700228 ret = __enable_clocks(iommu_drvdata);
229 if (ret)
230 goto fail;
231
Olav Haugan4e315c42013-03-06 10:14:28 -0800232 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
233 ctx_drvdata->asid);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700234 mb();
Stepan Moskovchenko22d32c62012-07-11 18:00:06 -0700235 __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700236 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700237 }
238
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700239fail:
240 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700241}
242
Laura Abbottf4daa692012-10-10 19:31:53 -0700243/*
244 * May only be called for non-secure iommus
245 */
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700246static void __reset_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700247{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700248 int i, smt_size;
Sathish Ambleycf045e62012-06-07 12:56:50 -0700249
250 SET_ACR(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700251 SET_CR2(base, 0);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700252 SET_GFAR(base, 0);
253 SET_GFSRRESTORE(base, 0);
254 SET_TLBIALLNSNH(base, 0);
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700255 smt_size = GET_IDR0_NUMSMRG(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700256
Stepan Moskovchenko518ca102012-06-27 15:15:26 -0700257 for (i = 0; i < smt_size; i++)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700258 SET_SMR_VALID(base, i, 0);
259
260 mb();
261}
262
Olav Haugancaa12d32013-06-05 16:05:47 -0700263#ifdef CONFIG_IOMMU_NON_SECURE
264static void __reset_iommu_secure(void __iomem *base)
265{
266 SET_NSACR(base, 0);
267 SET_NSCR2(base, 0);
268 SET_NSGFAR(base, 0);
269 SET_NSGFSRRESTORE(base, 0);
270 mb();
271}
272
273static void __program_iommu_secure(void __iomem *base)
274{
275 SET_NSCR0_SMCFCFG(base, 1);
276 SET_NSCR0_USFCFG(base, 1);
277 SET_NSCR0_STALLD(base, 1);
278 SET_NSCR0_GCFGFIE(base, 1);
279 SET_NSCR0_GCFGFRE(base, 1);
280 SET_NSCR0_GFIE(base, 1);
281 SET_NSCR0_GFRE(base, 1);
282 SET_NSCR0_CLIENTPD(base, 0);
283}
284
285#else
286static inline void __reset_iommu_secure(void __iomem *base)
287{
288}
289
290static inline void __program_iommu_secure(void __iomem *base)
291{
292}
293
294#endif
295
Laura Abbottf4daa692012-10-10 19:31:53 -0700296/*
297 * May only be called for non-secure iommus
298 */
Olav Hauganf3782732013-01-11 11:23:30 -0800299static void __program_iommu(void __iomem *base)
Sathish Ambleycf045e62012-06-07 12:56:50 -0700300{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700301 __reset_iommu(base);
Olav Haugancaa12d32013-06-05 16:05:47 -0700302 __reset_iommu_secure(base);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700303
304 SET_CR0_SMCFCFG(base, 1);
305 SET_CR0_USFCFG(base, 1);
306 SET_CR0_STALLD(base, 1);
307 SET_CR0_GCFGFIE(base, 1);
308 SET_CR0_GCFGFRE(base, 1);
309 SET_CR0_GFIE(base, 1);
310 SET_CR0_GFRE(base, 1);
311 SET_CR0_CLIENTPD(base, 0);
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700312
Olav Haugancaa12d32013-06-05 16:05:47 -0700313 __program_iommu_secure(base);
314
Olav Hauganf3782732013-01-11 11:23:30 -0800315 mb(); /* Make sure writes complete before returning */
316}
317
318void program_iommu_bfb_settings(void __iomem *base,
319 const struct msm_iommu_bfb_settings *bfb_settings)
320{
321 unsigned int i;
Stepan Moskovchenko880a3182012-10-01 12:35:24 -0700322 if (bfb_settings)
323 for (i = 0; i < bfb_settings->length; i++)
324 SET_GLOBAL_REG(base, bfb_settings->regs[i],
325 bfb_settings->data[i]);
326
Olav Hauganf3782732013-01-11 11:23:30 -0800327 mb(); /* Make sure writes complete before returning */
Sathish Ambleycf045e62012-06-07 12:56:50 -0700328}
329
Steve Mucklef132c6c2012-06-06 18:30:57 -0700330static void __reset_context(void __iomem *base, int ctx)
331{
332 SET_ACTLR(base, ctx, 0);
333 SET_FAR(base, ctx, 0);
334 SET_FSRRESTORE(base, ctx, 0);
335 SET_NMRR(base, ctx, 0);
336 SET_PAR(base, ctx, 0);
337 SET_PRRR(base, ctx, 0);
338 SET_SCTLR(base, ctx, 0);
339 SET_TLBIALL(base, ctx, 0);
340 SET_TTBCR(base, ctx, 0);
341 SET_TTBR0(base, ctx, 0);
342 SET_TTBR1(base, ctx, 0);
343 mb();
344}
345
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700346static void __release_smg(void __iomem *base, int ctx)
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700347{
Stepan Moskovchenko00f0cac2012-10-05 23:56:05 -0700348 int i, smt_size;
349 smt_size = GET_IDR0_NUMSMRG(base);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700350
351 /* Invalidate any SMGs associated with this context */
352 for (i = 0; i < smt_size; i++)
353 if (GET_SMR_VALID(base, i) &&
354 GET_S2CR_CBNDX(base, i) == ctx)
355 SET_SMR_VALID(base, i, 0);
356}
357
Olav Haugan26ddd432012-12-07 11:39:21 -0800358static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
359 struct msm_iommu_ctx_drvdata *curr_ctx,
Olav Haugan090614f2013-03-22 12:14:18 -0700360 struct msm_iommu_priv *priv)
Olav Haugan26ddd432012-12-07 11:39:21 -0800361{
Olav Haugan26ddd432012-12-07 11:39:21 -0800362 unsigned int found = 0;
363 void __iomem *base = iommu_drvdata->base;
Olav Haugan4e315c42013-03-06 10:14:28 -0800364 unsigned int i;
Olav Haugan26ddd432012-12-07 11:39:21 -0800365 unsigned int ncb = iommu_drvdata->ncb;
Olav Haugan4e315c42013-03-06 10:14:28 -0800366 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Olav Haugan26ddd432012-12-07 11:39:21 -0800367
368 /* Find if this page table is used elsewhere, and re-use ASID */
Olav Haugan4e315c42013-03-06 10:14:28 -0800369 if (!list_empty(&priv->list_attached)) {
370 tmp_drvdata = list_first_entry(&priv->list_attached,
371 struct msm_iommu_ctx_drvdata, attached_elm);
Olav Haugan26ddd432012-12-07 11:39:21 -0800372
Olav Haugan4e315c42013-03-06 10:14:28 -0800373 ++iommu_drvdata->asid[tmp_drvdata->asid - 1];
374 curr_ctx->asid = tmp_drvdata->asid;
Olav Haugan26ddd432012-12-07 11:39:21 -0800375
Olav Haugan4e315c42013-03-06 10:14:28 -0800376 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, curr_ctx->asid);
377 found = 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800378 }
379
380 /* If page table is new, find an unused ASID */
381 if (!found) {
Olav Haugan4e315c42013-03-06 10:14:28 -0800382 for (i = 0; i < ncb; ++i) {
383 if (iommu_drvdata->asid[i] == 0) {
384 ++iommu_drvdata->asid[i];
385 curr_ctx->asid = i + 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800386
Olav Haugan26ddd432012-12-07 11:39:21 -0800387 SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
Olav Haugan4e315c42013-03-06 10:14:28 -0800388 curr_ctx->asid);
389 found = 1;
Olav Haugan26ddd432012-12-07 11:39:21 -0800390 break;
391 }
392 }
Olav Haugan4e315c42013-03-06 10:14:28 -0800393 BUG_ON(!found);
Olav Haugan26ddd432012-12-07 11:39:21 -0800394 }
395}
396
397static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
398 struct msm_iommu_ctx_drvdata *ctx_drvdata,
Olav Haugan090614f2013-03-22 12:14:18 -0700399 struct msm_iommu_priv *priv, bool is_secure)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700400{
401 unsigned int prrr, nmrr;
402 unsigned int pn;
Olav Haugan26ddd432012-12-07 11:39:21 -0800403 int num = 0, i, smt_size;
404 void __iomem *base = iommu_drvdata->base;
405 unsigned int ctx = ctx_drvdata->num;
406 u32 *sids = ctx_drvdata->sids;
407 int len = ctx_drvdata->nsid;
Olav Haugan4e315c42013-03-06 10:14:28 -0800408 phys_addr_t pgtable = __pa(priv->pt.fl_table);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700409
410 __reset_context(base, ctx);
Laura Abbottf4daa692012-10-10 19:31:53 -0700411
Steve Mucklef132c6c2012-06-06 18:30:57 -0700412 pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
413 SET_TTBCR(base, ctx, 0);
414 SET_CB_TTBR0_ADDR(base, ctx, pn);
415
416 /* Enable context fault interrupt */
417 SET_CB_SCTLR_CFIE(base, ctx, 1);
418
419 /* Redirect all cacheable requests to L2 slave port. */
420 SET_CB_ACTLR_BPRCISH(base, ctx, 1);
421 SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
422 SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
423
424 /* Turn on TEX Remap */
425 SET_CB_SCTLR_TRE(base, ctx, 1);
426
427 /* Enable private ASID namespace */
428 SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
429
430 /* Set TEX remap attributes */
431 RCP15_PRRR(prrr);
432 RCP15_NMRR(nmrr);
433 SET_PRRR(base, ctx, prrr);
434 SET_NMRR(base, ctx, nmrr);
435
436 /* Configure page tables as inner-cacheable and shareable to reduce
437 * the TLB miss penalty.
438 */
Olav Haugan4e315c42013-03-06 10:14:28 -0800439 if (priv->pt.redirect) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700440 SET_CB_TTBR0_S(base, ctx, 1);
441 SET_CB_TTBR0_NOS(base, ctx, 1);
442 SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
443 SET_CB_TTBR0_IRGN0(base, ctx, 1);
444 SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
445 }
446
Laura Abbottf4daa692012-10-10 19:31:53 -0700447 if (!is_secure) {
448 smt_size = GET_IDR0_NUMSMRG(base);
449 /* Program the M2V tables for this context */
450 for (i = 0; i < len / sizeof(*sids); i++) {
451 for (; num < smt_size; num++)
452 if (GET_SMR_VALID(base, num) == 0)
453 break;
454 BUG_ON(num >= smt_size);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700455
Laura Abbottf4daa692012-10-10 19:31:53 -0700456 SET_SMR_VALID(base, num, 1);
457 SET_SMR_MASK(base, num, 0);
458 SET_SMR_ID(base, num, sids[i]);
Sathish Ambleycf045e62012-06-07 12:56:50 -0700459
Laura Abbottf4daa692012-10-10 19:31:53 -0700460 SET_S2CR_N(base, num, 0);
461 SET_S2CR_CBNDX(base, num, ctx);
462 SET_S2CR_MEMATTR(base, num, 0x0A);
463 /* Set security bit override to be Non-secure */
464 SET_S2CR_NSCFG(base, num, 3);
465 }
466 SET_CBAR_N(base, ctx, 0);
467
468 /* Stage 1 Context with Stage 2 bypass */
469 SET_CBAR_TYPE(base, ctx, 1);
470
471 /* Route page faults to the non-secure interrupt */
472 SET_CBAR_IRPTNDX(base, ctx, 1);
473
474 /* Set VMID to non-secure HLOS */
475 SET_CBAR_VMID(base, ctx, 3);
476
477 /* Bypass is treated as inner-shareable */
478 SET_CBAR_BPSHCFG(base, ctx, 2);
479
480 /* Do not downgrade memory attributes */
481 SET_CBAR_MEMATTR(base, ctx, 0x0A);
482
Sathish Ambleycf045e62012-06-07 12:56:50 -0700483 }
484
Olav Haugan4e315c42013-03-06 10:14:28 -0800485 msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700486
487 /* Enable the MMU */
488 SET_CB_SCTLR_M(base, ctx, 1);
489 mb();
490}
491
492static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
493{
Olav Haugan090614f2013-03-22 12:14:18 -0700494 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700495
496 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
497 if (!priv)
498 goto fail_nomem;
499
500#ifdef CONFIG_IOMMU_PGTABLES_L2
501 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
502#endif
503
504 INIT_LIST_HEAD(&priv->list_attached);
505 if (msm_iommu_pagetable_alloc(&priv->pt))
506 goto fail_nomem;
507
508 domain->priv = priv;
509 return 0;
510
511fail_nomem:
512 kfree(priv);
513 return -ENOMEM;
514}
515
516static void msm_iommu_domain_destroy(struct iommu_domain *domain)
517{
Olav Haugan090614f2013-03-22 12:14:18 -0700518 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700519
520 mutex_lock(&msm_iommu_lock);
521 priv = domain->priv;
522 domain->priv = NULL;
523
524 if (priv)
525 msm_iommu_pagetable_free(&priv->pt);
526
527 kfree(priv);
528 mutex_unlock(&msm_iommu_lock);
529}
530
531static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
532{
Olav Haugan090614f2013-03-22 12:14:18 -0700533 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700534 struct msm_iommu_drvdata *iommu_drvdata;
535 struct msm_iommu_ctx_drvdata *ctx_drvdata;
536 struct msm_iommu_ctx_drvdata *tmp_drvdata;
Stepan Moskovchenko4575bdd2012-06-28 14:59:00 -0700537 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700538 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700539
540 mutex_lock(&msm_iommu_lock);
541
542 priv = domain->priv;
543 if (!priv || !dev) {
544 ret = -EINVAL;
545 goto fail;
546 }
547
548 iommu_drvdata = dev_get_drvdata(dev->parent);
549 ctx_drvdata = dev_get_drvdata(dev);
550 if (!iommu_drvdata || !ctx_drvdata) {
551 ret = -EINVAL;
552 goto fail;
553 }
554
555 if (!list_empty(&ctx_drvdata->attached_elm)) {
556 ret = -EBUSY;
557 goto fail;
558 }
559
560 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
561 if (tmp_drvdata == ctx_drvdata) {
562 ret = -EBUSY;
563 goto fail;
564 }
565
Laura Abbottf4daa692012-10-10 19:31:53 -0700566 is_secure = iommu_drvdata->sec_id != -1;
567
Olav Haugan2648d972013-01-07 17:32:31 -0800568 ret = __enable_regulators(iommu_drvdata);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700569 if (ret)
570 goto fail;
571
Olav Haugan236970a2013-05-14 17:00:02 -0700572 ret = apply_bus_vote(iommu_drvdata, 1);
573 if (ret)
574 goto fail;
575
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700576 ret = __enable_clocks(iommu_drvdata);
577 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800578 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700579 goto fail;
580 }
581
Olav Haugane3885392013-03-06 16:22:53 -0800582 /* We can only do this once */
583 if (!iommu_drvdata->ctx_attach_count) {
Laura Abbottf4daa692012-10-10 19:31:53 -0700584 if (!is_secure) {
Olav Haugance2eab92013-02-07 12:59:18 -0800585 iommu_halt(iommu_drvdata);
Olav Hauganf3782732013-01-11 11:23:30 -0800586 __program_iommu(iommu_drvdata->base);
Olav Haugance2eab92013-02-07 12:59:18 -0800587 iommu_resume(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700588 } else {
589 ret = msm_iommu_sec_program_iommu(
590 iommu_drvdata->sec_id);
591 if (ret) {
Olav Haugan2648d972013-01-07 17:32:31 -0800592 __disable_regulators(iommu_drvdata);
Laura Abbottf4daa692012-10-10 19:31:53 -0700593 __disable_clocks(iommu_drvdata);
594 goto fail;
595 }
596 }
Olav Hauganf3782732013-01-11 11:23:30 -0800597 program_iommu_bfb_settings(iommu_drvdata->base,
598 iommu_drvdata->bfb_settings);
Laura Abbottf4daa692012-10-10 19:31:53 -0700599 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700600
Olav Haugance2eab92013-02-07 12:59:18 -0800601 iommu_halt(iommu_drvdata);
602
Olav Haugan4e315c42013-03-06 10:14:28 -0800603 __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure);
Olav Haugan26ddd432012-12-07 11:39:21 -0800604
Olav Haugancd932192013-01-31 18:30:15 -0800605 iommu_resume(iommu_drvdata);
606
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700607 __disable_clocks(iommu_drvdata);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700608
Steve Mucklef132c6c2012-06-06 18:30:57 -0700609 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
610 ctx_drvdata->attached_domain = domain;
Olav Haugane3885392013-03-06 16:22:53 -0800611 ++iommu_drvdata->ctx_attach_count;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700612
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800613 mutex_unlock(&msm_iommu_lock);
614
615 msm_iommu_attached(dev->parent);
616 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700617fail:
618 mutex_unlock(&msm_iommu_lock);
619 return ret;
620}
621
622static void msm_iommu_detach_dev(struct iommu_domain *domain,
623 struct device *dev)
624{
Olav Haugan090614f2013-03-22 12:14:18 -0700625 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700626 struct msm_iommu_drvdata *iommu_drvdata;
627 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700628 int ret;
Laura Abbottf4daa692012-10-10 19:31:53 -0700629 int is_secure;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700630
Olav Haugan5ebfbc62013-01-07 17:49:10 -0800631 msm_iommu_detached(dev->parent);
632
Steve Mucklef132c6c2012-06-06 18:30:57 -0700633 mutex_lock(&msm_iommu_lock);
634 priv = domain->priv;
635 if (!priv || !dev)
636 goto fail;
637
638 iommu_drvdata = dev_get_drvdata(dev->parent);
639 ctx_drvdata = dev_get_drvdata(dev);
640 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
641 goto fail;
642
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700643 ret = __enable_clocks(iommu_drvdata);
644 if (ret)
645 goto fail;
646
Laura Abbottf4daa692012-10-10 19:31:53 -0700647 is_secure = iommu_drvdata->sec_id != -1;
648
Olav Haugan26ddd432012-12-07 11:39:21 -0800649 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
Olav Haugan4e315c42013-03-06 10:14:28 -0800650
651 BUG_ON(iommu_drvdata->asid[ctx_drvdata->asid - 1] == 0);
652 iommu_drvdata->asid[ctx_drvdata->asid - 1]--;
Olav Haugan26ddd432012-12-07 11:39:21 -0800653 ctx_drvdata->asid = -1;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700654
Olav Haugancd932192013-01-31 18:30:15 -0800655 iommu_halt(iommu_drvdata);
656
Steve Mucklef132c6c2012-06-06 18:30:57 -0700657 __reset_context(iommu_drvdata->base, ctx_drvdata->num);
Laura Abbottf4daa692012-10-10 19:31:53 -0700658 if (!is_secure)
659 __release_smg(iommu_drvdata->base, ctx_drvdata->num);
Stepan Moskovchenkoce749352012-10-04 19:02:03 -0700660
Olav Haugancd932192013-01-31 18:30:15 -0800661 iommu_resume(iommu_drvdata);
662
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700663 __disable_clocks(iommu_drvdata);
664
Olav Haugan236970a2013-05-14 17:00:02 -0700665 apply_bus_vote(iommu_drvdata, 0);
666
Olav Haugan2648d972013-01-07 17:32:31 -0800667 __disable_regulators(iommu_drvdata);
Stepan Moskovchenko6751acc2012-06-21 17:36:47 -0700668
Steve Mucklef132c6c2012-06-06 18:30:57 -0700669 list_del_init(&ctx_drvdata->attached_elm);
670 ctx_drvdata->attached_domain = NULL;
Olav Haugane3885392013-03-06 16:22:53 -0800671 BUG_ON(iommu_drvdata->ctx_attach_count == 0);
672 --iommu_drvdata->ctx_attach_count;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700673fail:
674 mutex_unlock(&msm_iommu_lock);
675}
676
677static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
678 phys_addr_t pa, size_t len, int prot)
679{
Olav Haugan090614f2013-03-22 12:14:18 -0700680 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700681 int ret = 0;
682
683 mutex_lock(&msm_iommu_lock);
684
685 priv = domain->priv;
686 if (!priv) {
687 ret = -EINVAL;
688 goto fail;
689 }
690
691 ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
692 if (ret)
693 goto fail;
694
Steve Mucklef132c6c2012-06-06 18:30:57 -0700695fail:
696 mutex_unlock(&msm_iommu_lock);
697 return ret;
698}
699
700static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
701 size_t len)
702{
Olav Haugan090614f2013-03-22 12:14:18 -0700703 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700704 int ret = -ENODEV;
705
706 mutex_lock(&msm_iommu_lock);
707
708 priv = domain->priv;
709 if (!priv)
710 goto fail;
711
712 ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
713 if (ret < 0)
714 goto fail;
715
716 ret = __flush_iotlb_va(domain, va);
717fail:
718 mutex_unlock(&msm_iommu_lock);
719
720 /* the IOMMU API requires us to return how many bytes were unmapped */
721 len = ret ? 0 : len;
722 return len;
723}
724
725static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
726 struct scatterlist *sg, unsigned int len,
727 int prot)
728{
729 int ret;
Olav Haugan090614f2013-03-22 12:14:18 -0700730 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700731
732 mutex_lock(&msm_iommu_lock);
733
734 priv = domain->priv;
735 if (!priv) {
736 ret = -EINVAL;
737 goto fail;
738 }
739
740 ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
741 if (ret)
742 goto fail;
743
Steve Mucklef132c6c2012-06-06 18:30:57 -0700744fail:
745 mutex_unlock(&msm_iommu_lock);
746 return ret;
747}
748
749
750static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
751 unsigned int len)
752{
Olav Haugan090614f2013-03-22 12:14:18 -0700753 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700754
755 mutex_lock(&msm_iommu_lock);
756
757 priv = domain->priv;
758 msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
759
760 __flush_iotlb(domain);
761 mutex_unlock(&msm_iommu_lock);
762 return 0;
763}
764
765static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
766 unsigned long va)
767{
Olav Haugan090614f2013-03-22 12:14:18 -0700768 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700769 struct msm_iommu_drvdata *iommu_drvdata;
770 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Olav Haugandf96ffa2013-06-10 10:32:45 -0700771 u64 par;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700772 void __iomem *base;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700773 phys_addr_t ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700774 int ctx;
775
776 mutex_lock(&msm_iommu_lock);
777
778 priv = domain->priv;
779 if (list_empty(&priv->list_attached))
780 goto fail;
781
782 ctx_drvdata = list_entry(priv->list_attached.next,
783 struct msm_iommu_ctx_drvdata, attached_elm);
784 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
785
786 base = iommu_drvdata->base;
787 ctx = ctx_drvdata->num;
788
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700789 ret = __enable_clocks(iommu_drvdata);
790 if (ret) {
791 ret = 0; /* 0 indicates translation failed */
792 goto fail;
793 }
794
Steve Mucklef132c6c2012-06-06 18:30:57 -0700795 SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
796 mb();
797 while (GET_CB_ATSR_ACTIVE(base, ctx))
798 cpu_relax();
799
800 par = GET_PAR(base, ctx);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700801 __disable_clocks(iommu_drvdata);
802
Steve Mucklef132c6c2012-06-06 18:30:57 -0700803 if (par & CB_PAR_F) {
Olav Haugandf96ffa2013-06-10 10:32:45 -0700804 unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
805 pr_err("IOMMU translation fault!\n");
806 pr_err("name = %s\n", iommu_drvdata->name);
807 pr_err("context = %s (%d)\n", ctx_drvdata->name,
808 ctx_drvdata->num);
809 pr_err("Interesting registers:\n");
810 pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
811 (par & CB_PAR_F) ? "F " : "",
812 (par & CB_PAR_TF) ? "TF " : "",
813 (par & CB_PAR_AFF) ? "AFF " : "",
814 (par & CB_PAR_PF) ? "PF " : "",
815 (par & CB_PAR_EF) ? "EF " : "",
816 (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
817 (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
818 (par & CB_PAR_ATOT) ? "ATOT " : "",
819 level,
820 (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700821 ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700822 } else {
823 /* We are dealing with a supersection */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700824 if (ret & CB_PAR_SS)
825 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700826 else /* Upper 20 bits from PAR, lower 12 from VA */
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700827 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700828 }
829
830fail:
831 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700832 return ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700833}
834
835static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
836 unsigned long cap)
837{
838 return 0;
839}
840
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700841void print_ctx_regs(struct msm_iommu_context_reg regs[])
Steve Mucklef132c6c2012-06-06 18:30:57 -0700842{
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700843 uint32_t fsr = regs[DUMP_REG_FSR].val;
Olav Haugancdb13112013-06-21 17:45:34 -0700844 u64 ttbr;
Mitchel Humpherys9e90db32013-05-21 17:37:22 -0700845
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700846 pr_err("FAR = %016llx\n",
847 COMBINE_DUMP_REG(
848 regs[DUMP_REG_FAR1].val,
849 regs[DUMP_REG_FAR0].val));
850 pr_err("PAR = %016llx\n",
851 COMBINE_DUMP_REG(
852 regs[DUMP_REG_PAR1].val,
853 regs[DUMP_REG_PAR0].val));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700854 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
855 (fsr & 0x02) ? "TF " : "",
856 (fsr & 0x04) ? "AFF " : "",
857 (fsr & 0x08) ? "PF " : "",
858 (fsr & 0x10) ? "EF " : "",
859 (fsr & 0x20) ? "TLBMCF " : "",
860 (fsr & 0x40) ? "TLBLKF " : "",
861 (fsr & 0x80) ? "MHF " : "",
862 (fsr & 0x40000000) ? "SS " : "",
863 (fsr & 0x80000000) ? "MULTI " : "");
864
865 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700866 regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
Olav Haugancdb13112013-06-21 17:45:34 -0700867
868 ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
869 regs[DUMP_REG_TTBR0_0].val);
870 if (regs[DUMP_REG_TTBR0_1].valid)
871 pr_err("TTBR0 = %016llx\n", ttbr);
872 else
873 pr_err("TTBR0 = %016llx (32b)\n", ttbr);
874
875 ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
876 regs[DUMP_REG_TTBR1_0].val);
877
878 if (regs[DUMP_REG_TTBR1_1].valid)
879 pr_err("TTBR1 = %016llx\n", ttbr);
880 else
881 pr_err("TTBR1 = %016llx (32b)\n", ttbr);
882
Steve Mucklef132c6c2012-06-06 18:30:57 -0700883 pr_err("SCTLR = %08x ACTLR = %08x\n",
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700884 regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700885 pr_err("PRRR = %08x NMRR = %08x\n",
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700886 regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
Mitchel Humpherys9e90db32013-05-21 17:37:22 -0700887}
888
889static void __print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
890{
Olav Hauganbab41422013-06-17 15:06:01 -0700891 struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
892 unsigned int i;
893
894 for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
895 regs[i].val = GET_CTX_REG(dump_regs_tbl[i].key, base, ctx);
896 regs[i].valid = 1;
897 }
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700898 print_ctx_regs(regs);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700899}
900
901irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
902{
903 struct platform_device *pdev = dev_id;
904 struct msm_iommu_drvdata *drvdata;
905 struct msm_iommu_ctx_drvdata *ctx_drvdata;
906 unsigned int fsr;
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700907 int ret;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700908
909 mutex_lock(&msm_iommu_lock);
910
911 BUG_ON(!pdev);
912
913 drvdata = dev_get_drvdata(pdev->dev.parent);
914 BUG_ON(!drvdata);
915
916 ctx_drvdata = dev_get_drvdata(&pdev->dev);
917 BUG_ON(!ctx_drvdata);
918
Olav Haugan055d58f2013-04-26 12:50:28 -0700919 if (!drvdata->ctx_attach_count) {
920 pr_err("Unexpected IOMMU page fault!\n");
921 pr_err("name = %s\n", drvdata->name);
922 pr_err("Power is OFF. Unable to read page fault information\n");
923 /*
924 * We cannot determine which context bank caused the issue so
925 * we just return handled here to ensure IRQ handler code is
926 * happy
927 */
928 ret = IRQ_HANDLED;
929 goto fail;
930 }
931
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700932 ret = __enable_clocks(drvdata);
933 if (ret) {
934 ret = IRQ_NONE;
935 goto fail;
936 }
937
Steve Mucklef132c6c2012-06-06 18:30:57 -0700938 fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
939 if (fsr) {
940 if (!ctx_drvdata->attached_domain) {
941 pr_err("Bad domain in interrupt handler\n");
942 ret = -ENOSYS;
943 } else
944 ret = report_iommu_fault(ctx_drvdata->attached_domain,
945 &ctx_drvdata->pdev->dev,
946 GET_FAR(drvdata->base, ctx_drvdata->num), 0);
947
948 if (ret == -ENOSYS) {
949 pr_err("Unexpected IOMMU page fault!\n");
950 pr_err("name = %s\n", drvdata->name);
951 pr_err("context = %s (%d)\n", ctx_drvdata->name,
952 ctx_drvdata->num);
953 pr_err("Interesting registers:\n");
Mitchel Humpherys9e90db32013-05-21 17:37:22 -0700954 __print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700955 }
956
957 SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
958 ret = IRQ_HANDLED;
959 } else
960 ret = IRQ_NONE;
961
Stepan Moskovchenko0bab7482012-06-21 17:15:01 -0700962 __disable_clocks(drvdata);
963fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700964 mutex_unlock(&msm_iommu_lock);
965 return ret;
966}
967
968static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
969{
Olav Haugan090614f2013-03-22 12:14:18 -0700970 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700971 return __pa(priv->pt.fl_table);
972}
973
Olav Haugancdb13112013-06-21 17:45:34 -0700974#define DUMP_REG_INIT(dump_reg, cb_reg, mbp) \
Olav Hauganbab41422013-06-17 15:06:01 -0700975 do { \
976 dump_regs_tbl[dump_reg].key = cb_reg; \
977 dump_regs_tbl[dump_reg].name = #cb_reg; \
Olav Haugancdb13112013-06-21 17:45:34 -0700978 dump_regs_tbl[dump_reg].must_be_present = mbp; \
Olav Hauganbab41422013-06-17 15:06:01 -0700979 } while (0)
980
981static void msm_iommu_build_dump_regs_table(void)
982{
Olav Haugancdb13112013-06-21 17:45:34 -0700983 DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1);
984 DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1);
985 DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1);
986 DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1);
987 DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1);
988 DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1);
989 DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1);
990 DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1);
991 DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0);
992 DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1);
993 DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0);
994 DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1);
995 DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1);
996 DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1);
997 DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1);
Olav Hauganbab41422013-06-17 15:06:01 -0700998}
999
Steve Mucklef132c6c2012-06-06 18:30:57 -07001000static struct iommu_ops msm_iommu_ops = {
1001 .domain_init = msm_iommu_domain_init,
1002 .domain_destroy = msm_iommu_domain_destroy,
1003 .attach_dev = msm_iommu_attach_dev,
1004 .detach_dev = msm_iommu_detach_dev,
1005 .map = msm_iommu_map,
1006 .unmap = msm_iommu_unmap,
1007 .map_range = msm_iommu_map_range,
1008 .unmap_range = msm_iommu_unmap_range,
1009 .iova_to_phys = msm_iommu_iova_to_phys,
1010 .domain_has_cap = msm_iommu_domain_has_cap,
1011 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
1012 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
1013};
1014
1015static int __init msm_iommu_init(void)
1016{
1017 msm_iommu_pagetable_init();
1018 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Olav Hauganbab41422013-06-17 15:06:01 -07001019 msm_iommu_build_dump_regs_table();
1020
Steve Mucklef132c6c2012-06-06 18:30:57 -07001021 return 0;
1022}
1023
1024subsys_initcall(msm_iommu_init);
1025
1026MODULE_LICENSE("GPL v2");
1027MODULE_DESCRIPTION("MSM SMMU v2 Driver");