blob: 06f4a0f5a55712c60d9afa6d80e97d5409ec3169 [file] [log] [blame]
Olav Haugane6d01ef2013-01-25 16:55:44 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
Olav Hauganc5993142013-02-04 13:59:39 -080030#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v0.h>
Olav Haugan090614f2013-03-22 12:14:18 -070032#include <mach/msm_iommu_priv.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070033#include <mach/iommu.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060034#include <mach/msm_smem.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070035
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080036#define MRC(reg, processor, op1, crn, crm, op2) \
37__asm__ __volatile__ ( \
38" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
39: "=r" (reg))
40
41#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
42#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
43
Steve Mucklef132c6c2012-06-06 18:30:57 -070044/* Sharability attributes of MSM IOMMU mappings */
45#define MSM_IOMMU_ATTR_NON_SH 0x0
46#define MSM_IOMMU_ATTR_SH 0x4
47
48/* Cacheability attributes of MSM IOMMU mappings */
49#define MSM_IOMMU_ATTR_NONCACHED 0x0
50#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
51#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
52#define MSM_IOMMU_ATTR_CACHED_WT 0x3
53
Laura Abbott0d135652012-10-04 12:59:03 -070054struct bus_type msm_iommu_sec_bus_type = {
55 .name = "msm_iommu_sec_bus",
56};
Steve Mucklef132c6c2012-06-06 18:30:57 -070057
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -060058static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
59 unsigned int len);
60
Steve Mucklef132c6c2012-06-06 18:30:57 -070061static inline void clean_pte(unsigned long *start, unsigned long *end,
62 int redirect)
63{
64 if (!redirect)
65 dmac_flush_range(start, end);
66}
67
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020068/* bitmap of the page sizes currently supported */
69#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
70
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080071static int msm_iommu_tex_class[4];
72
Steve Mucklef132c6c2012-06-06 18:30:57 -070073DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070074
Olav Haugan65209cd2012-11-07 15:02:56 -080075/**
76 * Remote spinlock implementation based on Peterson's algorithm to be used
77 * to synchronize IOMMU config port access between CPU and GPU.
78 * This implements Process 0 of the spin lock algorithm. GPU implements
79 * Process 1. Flag and turn is stored in shared memory to allow GPU to
80 * access these.
81 */
82struct msm_iommu_remote_lock {
83 int initialized;
84 struct remote_iommu_petersons_spinlock *lock;
85};
86
87static struct msm_iommu_remote_lock msm_iommu_remote_lock;
88
89#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
90static void _msm_iommu_remote_spin_lock_init(void)
91{
92 msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
93 memset(msm_iommu_remote_lock.lock, 0,
94 sizeof(*msm_iommu_remote_lock.lock));
95}
96
97void msm_iommu_remote_p0_spin_lock(void)
98{
99 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
100 msm_iommu_remote_lock.lock->turn = 1;
101
102 smp_mb();
103
104 while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
105 msm_iommu_remote_lock.lock->turn == 1)
106 cpu_relax();
107}
108
109void msm_iommu_remote_p0_spin_unlock(void)
110{
111 smp_mb();
112
113 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
114}
115#endif
116
117inline void msm_iommu_mutex_lock(void)
118{
119 mutex_lock(&msm_iommu_lock);
120}
121
122inline void msm_iommu_mutex_unlock(void)
123{
124 mutex_unlock(&msm_iommu_lock);
125}
126
127void *msm_iommu_lock_initialize(void)
128{
129 mutex_lock(&msm_iommu_lock);
130 if (!msm_iommu_remote_lock.initialized) {
131 msm_iommu_remote_lock_init();
132 msm_iommu_remote_lock.initialized = 1;
133 }
134 mutex_unlock(&msm_iommu_lock);
135 return msm_iommu_remote_lock.lock;
136}
137
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800138static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
139{
140 int ret;
141
Steve Mucklef132c6c2012-06-06 18:30:57 -0700142 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800143 if (ret)
144 goto fail;
145
146 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700147 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800148 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700149 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800150 }
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700151
152 if (ret)
153 goto fail;
154
155 if (drvdata->aclk) {
156 ret = clk_prepare_enable(drvdata->aclk);
157 if (ret) {
158 clk_disable_unprepare(drvdata->clk);
159 clk_disable_unprepare(drvdata->pclk);
160 }
161 }
162
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800163fail:
164 return ret;
165}
166
167static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
168{
Olav Haugan97ce7aa2013-04-30 13:59:41 -0700169 if (drvdata->aclk)
170 clk_disable_unprepare(drvdata->aclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800171 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700172 clk_disable_unprepare(drvdata->clk);
173 clk_disable_unprepare(drvdata->pclk);
174}
175
Olav Hauganeece7e52013-04-02 10:22:21 -0700176static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800177{
Olav Hauganeece7e52013-04-02 10:22:21 -0700178 /* No need to do anything. IOMMUv0 is always on. */
179 return 0;
Olav Hauganc5993142013-02-04 13:59:39 -0800180}
181
Olav Hauganeece7e52013-04-02 10:22:21 -0700182static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800183{
Olav Hauganeece7e52013-04-02 10:22:21 -0700184 /* No need to do anything. IOMMUv0 is always on. */
Olav Hauganc5993142013-02-04 13:59:39 -0800185}
186
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600187static void *_iommu_lock_initialize(void)
188{
189 return msm_iommu_lock_initialize();
190}
191
Olav Hauganc5993142013-02-04 13:59:39 -0800192static void _iommu_lock_acquire(void)
193{
194 msm_iommu_lock();
195}
196
197static void _iommu_lock_release(void)
198{
199 msm_iommu_unlock();
200}
201
202struct iommu_access_ops iommu_access_ops_v0 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700203 .iommu_power_on = __enable_regulators,
204 .iommu_power_off = __disable_regulators,
205 .iommu_clk_on = __enable_clocks,
206 .iommu_clk_off = __disable_clocks,
Jordan Crouse64bf39f2013-04-18 15:48:13 -0600207 .iommu_lock_initialize = _iommu_lock_initialize,
Olav Hauganc5993142013-02-04 13:59:39 -0800208 .iommu_lock_acquire = _iommu_lock_acquire,
209 .iommu_lock_release = _iommu_lock_release,
210};
211EXPORT_SYMBOL(iommu_access_ops_v0);
212
Steve Mucklef132c6c2012-06-06 18:30:57 -0700213static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
214{
Olav Haugan090614f2013-03-22 12:14:18 -0700215 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700216 struct msm_iommu_drvdata *iommu_drvdata;
217 struct msm_iommu_ctx_drvdata *ctx_drvdata;
218 int ret = 0;
219 int asid;
220
221 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
222 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
223 BUG();
224
225 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
226 if (!iommu_drvdata)
227 BUG();
228
229 ret = __enable_clocks(iommu_drvdata);
230 if (ret)
231 goto fail;
232
Olav Haugan65209cd2012-11-07 15:02:56 -0800233 msm_iommu_remote_spin_lock();
234
Steve Mucklef132c6c2012-06-06 18:30:57 -0700235 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
236 ctx_drvdata->num);
237
238 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
239 asid | (va & TLBIVA_VA));
240 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800241
242 msm_iommu_remote_spin_unlock();
243
Steve Mucklef132c6c2012-06-06 18:30:57 -0700244 __disable_clocks(iommu_drvdata);
245 }
246fail:
247 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800248}
249
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800250static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700251{
Olav Haugan090614f2013-03-22 12:14:18 -0700252 struct msm_iommu_priv *priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700253 struct msm_iommu_drvdata *iommu_drvdata;
254 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800255 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700256 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700257
258 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
259 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
260 BUG();
261
262 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700263 if (!iommu_drvdata)
264 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800265
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800266 ret = __enable_clocks(iommu_drvdata);
267 if (ret)
268 goto fail;
269
Olav Haugan65209cd2012-11-07 15:02:56 -0800270 msm_iommu_remote_spin_lock();
271
Steve Mucklef132c6c2012-06-06 18:30:57 -0700272 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
273 ctx_drvdata->num);
274
275 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
276 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800277
278 msm_iommu_remote_spin_unlock();
279
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800280 __disable_clocks(iommu_drvdata);
281 }
282fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800283 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700284}
285
Olav Haugan95d24162012-12-05 14:47:47 -0800286static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700287{
Olav Haugan95d24162012-12-05 14:47:47 -0800288 SET_BPRCOSH(glb_base, ctx, 0);
289 SET_BPRCISH(glb_base, ctx, 0);
290 SET_BPRCNSH(glb_base, ctx, 0);
291 SET_BPSHCFG(glb_base, ctx, 0);
292 SET_BPMTCFG(glb_base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700293 SET_ACTLR(base, ctx, 0);
294 SET_SCTLR(base, ctx, 0);
295 SET_FSRRESTORE(base, ctx, 0);
296 SET_TTBR0(base, ctx, 0);
297 SET_TTBR1(base, ctx, 0);
298 SET_TTBCR(base, ctx, 0);
299 SET_BFBCR(base, ctx, 0);
300 SET_PAR(base, ctx, 0);
301 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700302 SET_TLBFLPTER(base, ctx, 0);
303 SET_TLBSLPTER(base, ctx, 0);
304 SET_TLBLKCR(base, ctx, 0);
305 SET_PRRR(base, ctx, 0);
306 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700307 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700308}
309
Olav Haugan95d24162012-12-05 14:47:47 -0800310static void __program_context(void __iomem *base, void __iomem *glb_base,
311 int ctx, int ncb, phys_addr_t pgtable,
312 int redirect, int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700313{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800314 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700315 int i, j, found;
Olav Haugan65209cd2012-11-07 15:02:56 -0800316 msm_iommu_remote_spin_lock();
317
Olav Haugan95d24162012-12-05 14:47:47 -0800318 __reset_context(base, glb_base, ctx);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700319
320 /* Set up HTW mode */
321 /* TLB miss configuration: perform HTW on miss */
322 SET_TLBMCFG(base, ctx, 0x3);
323
324 /* V2P configuration: HTW for access */
325 SET_V2PCFG(base, ctx, 0x3);
326
Steve Mucklef132c6c2012-06-06 18:30:57 -0700327 SET_TTBCR(base, ctx, ttbr_split);
328 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
329 if (ttbr_split)
330 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700331
332 /* Enable context fault interrupt */
333 SET_CFEIE(base, ctx, 1);
334
335 /* Stall access on a context fault and let the handler deal with it */
336 SET_CFCFG(base, ctx, 1);
337
338 /* Redirect all cacheable requests to L2 slave port. */
339 SET_RCISH(base, ctx, 1);
340 SET_RCOSH(base, ctx, 1);
341 SET_RCNSH(base, ctx, 1);
342
343 /* Turn on TEX Remap */
344 SET_TRE(base, ctx, 1);
345
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800346 /* Set TEX remap attributes */
347 RCP15_PRRR(prrr);
348 RCP15_NMRR(nmrr);
349 SET_PRRR(base, ctx, prrr);
350 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700351
352 /* Turn on BFB prefetch */
353 SET_BFBDFE(base, ctx, 1);
354
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700355 /* Configure page tables as inner-cacheable and shareable to reduce
356 * the TLB miss penalty.
357 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700358 if (redirect) {
359 SET_TTBR0_SH(base, ctx, 1);
360 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700361
Steve Mucklef132c6c2012-06-06 18:30:57 -0700362 SET_TTBR0_NOS(base, ctx, 1);
363 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700364
Steve Mucklef132c6c2012-06-06 18:30:57 -0700365 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
366 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700367
Steve Mucklef132c6c2012-06-06 18:30:57 -0700368 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
369 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700370
Steve Mucklef132c6c2012-06-06 18:30:57 -0700371 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
372 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
373 }
374
375 /* Find if this page table is used elsewhere, and re-use ASID */
376 found = 0;
377 for (i = 0; i < ncb; i++)
378 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
379 i != ctx) {
380 SET_CONTEXTIDR_ASID(base, ctx, \
381 GET_CONTEXTIDR_ASID(base, i));
382 found = 1;
383 break;
384 }
385
386 /* If page table is new, find an unused ASID */
387 if (!found) {
388 for (i = 0; i < ncb; i++) {
389 found = 0;
390 for (j = 0; j < ncb; j++) {
391 if (GET_CONTEXTIDR_ASID(base, j) == i &&
392 j != ctx)
393 found = 1;
394 }
395
396 if (!found) {
397 SET_CONTEXTIDR_ASID(base, ctx, i);
398 break;
399 }
400 }
401 BUG_ON(found);
402 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700403
404 /* Enable the MMU */
405 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700406 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800407
408 msm_iommu_remote_spin_unlock();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700409}
410
Steve Mucklef132c6c2012-06-06 18:30:57 -0700411static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700412{
Olav Haugan090614f2013-03-22 12:14:18 -0700413 struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700414
415 if (!priv)
416 goto fail_nomem;
417
418 INIT_LIST_HEAD(&priv->list_attached);
Olav Haugan090614f2013-03-22 12:14:18 -0700419 priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700420 get_order(SZ_16K));
421
Olav Haugan090614f2013-03-22 12:14:18 -0700422 if (!priv->pt.fl_table)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700423 goto fail_nomem;
424
Steve Mucklef132c6c2012-06-06 18:30:57 -0700425#ifdef CONFIG_IOMMU_PGTABLES_L2
Olav Haugan090614f2013-03-22 12:14:18 -0700426 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700427#endif
428
Olav Haugan090614f2013-03-22 12:14:18 -0700429 memset(priv->pt.fl_table, 0, SZ_16K);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700430 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700431
Olav Haugan090614f2013-03-22 12:14:18 -0700432 clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
433 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700434
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700435 return 0;
436
437fail_nomem:
438 kfree(priv);
439 return -ENOMEM;
440}
441
442static void msm_iommu_domain_destroy(struct iommu_domain *domain)
443{
Olav Haugan090614f2013-03-22 12:14:18 -0700444 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700445 unsigned long *fl_table;
446 int i;
447
Steve Mucklef132c6c2012-06-06 18:30:57 -0700448 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700449 priv = domain->priv;
450 domain->priv = NULL;
451
452 if (priv) {
Olav Haugan090614f2013-03-22 12:14:18 -0700453 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700454
455 for (i = 0; i < NUM_FL_PTE; i++)
456 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
457 free_page((unsigned long) __va(((fl_table[i]) &
458 FL_BASE_MASK)));
459
Olav Haugan090614f2013-03-22 12:14:18 -0700460 free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
461 priv->pt.fl_table = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700462 }
463
464 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700465 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700466}
467
468static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
469{
Olav Haugan090614f2013-03-22 12:14:18 -0700470 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700471 struct msm_iommu_drvdata *iommu_drvdata;
472 struct msm_iommu_ctx_drvdata *ctx_drvdata;
473 struct msm_iommu_ctx_drvdata *tmp_drvdata;
474 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700475
Steve Mucklef132c6c2012-06-06 18:30:57 -0700476 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700477
478 priv = domain->priv;
479
480 if (!priv || !dev) {
481 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800482 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700483 }
484
485 iommu_drvdata = dev_get_drvdata(dev->parent);
486 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700487
Olav Haugan95d24162012-12-05 14:47:47 -0800488 if (!iommu_drvdata || !ctx_drvdata) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700489 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800490 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700491 }
492
Olav Haugane99ee7e2012-12-11 15:02:02 -0800493 ++ctx_drvdata->attach_count;
494
495 if (ctx_drvdata->attach_count > 1)
496 goto unlock;
497
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800498 if (!list_empty(&ctx_drvdata->attached_elm)) {
499 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800500 goto unlock;
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800501 }
502
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
504 if (tmp_drvdata == ctx_drvdata) {
505 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800506 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700507 }
508
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800509 ret = __enable_clocks(iommu_drvdata);
510 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800511 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800512
Olav Haugan95d24162012-12-05 14:47:47 -0800513 __program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
514 ctx_drvdata->num, iommu_drvdata->ncb,
Olav Haugan090614f2013-03-22 12:14:18 -0700515 __pa(priv->pt.fl_table), priv->pt.redirect,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700516 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700517
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800518 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700519 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700520
Steve Mucklef132c6c2012-06-06 18:30:57 -0700521 ctx_drvdata->attached_domain = domain;
Olav Hauganc5993142013-02-04 13:59:39 -0800522
523 mutex_unlock(&msm_iommu_lock);
524
525 msm_iommu_attached(dev->parent);
526 return ret;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800527unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700528 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700529 return ret;
530}
531
532static void msm_iommu_detach_dev(struct iommu_domain *domain,
533 struct device *dev)
534{
Olav Haugan090614f2013-03-22 12:14:18 -0700535 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700536 struct msm_iommu_drvdata *iommu_drvdata;
537 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800538 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700539
Olav Hauganc5993142013-02-04 13:59:39 -0800540 msm_iommu_detached(dev->parent);
541
Steve Mucklef132c6c2012-06-06 18:30:57 -0700542 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700543 priv = domain->priv;
544
545 if (!priv || !dev)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800546 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700547
548 iommu_drvdata = dev_get_drvdata(dev->parent);
549 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700550
Olav Haugan35deadc2012-12-10 18:28:27 -0800551 if (!iommu_drvdata || !ctx_drvdata)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800552 goto unlock;
553
554 --ctx_drvdata->attach_count;
555 BUG_ON(ctx_drvdata->attach_count < 0);
556
557 if (ctx_drvdata->attach_count > 0)
558 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700559
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800560 ret = __enable_clocks(iommu_drvdata);
561 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800562 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800563
Olav Haugan65209cd2012-11-07 15:02:56 -0800564 msm_iommu_remote_spin_lock();
565
Olav Haugan35deadc2012-12-10 18:28:27 -0800566 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
567 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700568
Olav Haugan95d24162012-12-05 14:47:47 -0800569 __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
Olav Haugan35deadc2012-12-10 18:28:27 -0800570 ctx_drvdata->num);
Olav Haugan65209cd2012-11-07 15:02:56 -0800571
572 msm_iommu_remote_spin_unlock();
573
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800574 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700575 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700576 ctx_drvdata->attached_domain = NULL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800577unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700578 mutex_unlock(&msm_iommu_lock);
579}
580
581static int __get_pgprot(int prot, int len)
582{
583 unsigned int pgprot;
584 int tex;
585
586 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
587 prot |= IOMMU_READ | IOMMU_WRITE;
588 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
589 }
590
591 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
592 prot |= IOMMU_READ;
593 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
594 }
595
596 if (prot & IOMMU_CACHE)
597 tex = (pgprot_kernel >> 2) & 0x07;
598 else
599 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
600
601 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
602 return 0;
603
604 if (len == SZ_16M || len == SZ_1M) {
605 pgprot = FL_SHARED;
606 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
607 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
608 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
609 pgprot |= FL_AP0 | FL_AP1;
610 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
611 } else {
612 pgprot = SL_SHARED;
613 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
614 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
615 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
616 pgprot |= SL_AP0 | SL_AP1;
617 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
618 }
619
620 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700621}
622
Olav Haugan090614f2013-03-22 12:14:18 -0700623static unsigned long *make_second_level(struct msm_iommu_priv *priv,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600624 unsigned long *fl_pte)
625{
626 unsigned long *sl;
627 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
628 get_order(SZ_4K));
629
630 if (!sl) {
631 pr_debug("Could not allocate second level table\n");
632 goto fail;
633 }
634 memset(sl, 0, SZ_4K);
Olav Haugan090614f2013-03-22 12:14:18 -0700635 clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600636
637 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
638 FL_TYPE_TABLE);
639
Olav Haugan090614f2013-03-22 12:14:18 -0700640 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600641fail:
642 return sl;
643}
644
645static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
646{
647 int ret = 0;
648
649 if (*sl_pte) {
650 ret = -EBUSY;
651 goto fail;
652 }
653
654 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
655 | SL_TYPE_SMALL | pgprot;
656fail:
657 return ret;
658}
659
660static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
661{
662 int ret = 0;
663
664 int i;
665
666 for (i = 0; i < 16; i++)
667 if (*(sl_pte+i)) {
668 ret = -EBUSY;
669 goto fail;
670 }
671
672 for (i = 0; i < 16; i++)
673 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
674 | SL_SHARED | SL_TYPE_LARGE | pgprot;
675
676fail:
677 return ret;
678}
679
680
681static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
682{
683 if (*fl_pte)
684 return -EBUSY;
685
686 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
687 | pgprot;
688
689 return 0;
690}
691
692
693static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
694{
695 int i;
696 int ret = 0;
697 for (i = 0; i < 16; i++)
698 if (*(fl_pte+i)) {
699 ret = -EBUSY;
700 goto fail;
701 }
702 for (i = 0; i < 16; i++)
703 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
704 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
705fail:
706 return ret;
707}
708
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700709static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200710 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700711{
Olav Haugan090614f2013-03-22 12:14:18 -0700712 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700713 unsigned long *fl_table;
714 unsigned long *fl_pte;
715 unsigned long fl_offset;
716 unsigned long *sl_table;
717 unsigned long *sl_pte;
718 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800719 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700720 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700721
Steve Mucklef132c6c2012-06-06 18:30:57 -0700722 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800723
724 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700725 if (!priv) {
726 ret = -EINVAL;
727 goto fail;
728 }
729
Olav Haugan090614f2013-03-22 12:14:18 -0700730 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700731
732 if (len != SZ_16M && len != SZ_1M &&
733 len != SZ_64K && len != SZ_4K) {
734 pr_debug("Bad size: %d\n", len);
735 ret = -EINVAL;
736 goto fail;
737 }
738
739 if (!fl_table) {
740 pr_debug("Null page table\n");
741 ret = -EINVAL;
742 goto fail;
743 }
744
Steve Mucklef132c6c2012-06-06 18:30:57 -0700745 pgprot = __get_pgprot(prot, len);
746
747 if (!pgprot) {
748 ret = -EINVAL;
749 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800750 }
751
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700752 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
753 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
754
755 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600756 ret = fl_16m(fl_pte, pa, pgprot);
757 if (ret)
758 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700759 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700760 }
761
Steve Mucklef132c6c2012-06-06 18:30:57 -0700762 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600763 ret = fl_1m(fl_pte, pa, pgprot);
764 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700765 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700766 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700767 }
768
769 /* Need a 2nd level table */
770 if (len == SZ_4K || len == SZ_64K) {
771
772 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600773 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700774 ret = -ENOMEM;
775 goto fail;
776 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700777 }
778
779 if (!(*fl_pte & FL_TYPE_TABLE)) {
780 ret = -EBUSY;
781 goto fail;
782 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700783 }
784
785 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
786 sl_offset = SL_OFFSET(va);
787 sl_pte = sl_table + sl_offset;
788
Steve Mucklef132c6c2012-06-06 18:30:57 -0700789 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600790 ret = sl_4k(sl_pte, pa, pgprot);
791 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700792 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700793
Olav Haugan090614f2013-03-22 12:14:18 -0700794 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700795 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700796
797 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600798 ret = sl_64k(sl_pte, pa, pgprot);
799 if (ret)
800 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700801 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700802 }
803
Steve Mucklef132c6c2012-06-06 18:30:57 -0700804 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700805fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700806 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700807 return ret;
808}
809
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200810static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
811 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700812{
Olav Haugan090614f2013-03-22 12:14:18 -0700813 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700814 unsigned long *fl_table;
815 unsigned long *fl_pte;
816 unsigned long fl_offset;
817 unsigned long *sl_table;
818 unsigned long *sl_pte;
819 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700820 int i, ret = 0;
821
Steve Mucklef132c6c2012-06-06 18:30:57 -0700822 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700823
824 priv = domain->priv;
825
Joerg Roedel05df1f32012-01-26 18:25:37 +0100826 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700827 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700828
Olav Haugan090614f2013-03-22 12:14:18 -0700829 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700830
831 if (len != SZ_16M && len != SZ_1M &&
832 len != SZ_64K && len != SZ_4K) {
833 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700834 goto fail;
835 }
836
837 if (!fl_table) {
838 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700839 goto fail;
840 }
841
842 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
843 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
844
845 if (*fl_pte == 0) {
846 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700847 goto fail;
848 }
849
850 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700851 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700852 for (i = 0; i < 16; i++)
853 *(fl_pte+i) = 0;
854
Olav Haugan090614f2013-03-22 12:14:18 -0700855 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700856 }
857
858 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700859 *fl_pte = 0;
860
Olav Haugan090614f2013-03-22 12:14:18 -0700861 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700862 }
863
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700864 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
865 sl_offset = SL_OFFSET(va);
866 sl_pte = sl_table + sl_offset;
867
868 if (len == SZ_64K) {
869 for (i = 0; i < 16; i++)
870 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700871
Olav Haugan090614f2013-03-22 12:14:18 -0700872 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700873 }
874
Steve Mucklef132c6c2012-06-06 18:30:57 -0700875 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700876 *sl_pte = 0;
877
Olav Haugan090614f2013-03-22 12:14:18 -0700878 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700879 }
880
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700881 if (len == SZ_4K || len == SZ_64K) {
882 int used = 0;
883
884 for (i = 0; i < NUM_SL_PTE; i++)
885 if (sl_table[i])
886 used = 1;
887 if (!used) {
888 free_page((unsigned long)sl_table);
889 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700890
Olav Haugan090614f2013-03-22 12:14:18 -0700891 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700892 }
893 }
894
Steve Mucklef132c6c2012-06-06 18:30:57 -0700895 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400896
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700897fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700898 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200899
900 /* the IOMMU API requires us to return how many bytes were unmapped */
901 len = ret ? 0 : len;
902 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700903}
904
Steve Mucklef132c6c2012-06-06 18:30:57 -0700905static unsigned int get_phys_addr(struct scatterlist *sg)
906{
907 /*
908 * Try sg_dma_address first so that we can
909 * map carveout regions that do not have a
910 * struct page associated with them.
911 */
912 unsigned int pa = sg_dma_address(sg);
913 if (pa == 0)
914 pa = sg_phys(sg);
915 return pa;
916}
917
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600918static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
919 int align)
920{
921 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
922 && (len >= align);
923}
924
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600925static int check_range(unsigned long *fl_table, unsigned int va,
926 unsigned int len)
927{
928 unsigned int offset = 0;
929 unsigned long *fl_pte;
930 unsigned long fl_offset;
931 unsigned long *sl_table;
932 unsigned long sl_start, sl_end;
933 int i;
934
935 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
936 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
937
938 while (offset < len) {
939 if (*fl_pte & FL_TYPE_TABLE) {
940 sl_start = SL_OFFSET(va);
941 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
942 sl_end = ((len - offset) / SZ_4K) + sl_start;
943
944 if (sl_end > NUM_SL_PTE)
945 sl_end = NUM_SL_PTE;
946
947 for (i = sl_start; i < sl_end; i++) {
948 if (sl_table[i] != 0) {
949 pr_err("%08x - %08x already mapped\n",
950 va, va + SZ_4K);
951 return -EBUSY;
952 }
953 offset += SZ_4K;
954 va += SZ_4K;
955 }
956
957
958 sl_start = 0;
959 } else {
960 if (*fl_pte != 0) {
961 pr_err("%08x - %08x already mapped\n",
962 va, va + SZ_1M);
963 return -EBUSY;
964 }
965 va += SZ_1M;
966 offset += SZ_1M;
967 sl_start = 0;
968 }
969 fl_pte++;
970 }
971 return 0;
972}
973
Steve Mucklef132c6c2012-06-06 18:30:57 -0700974static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
975 struct scatterlist *sg, unsigned int len,
976 int prot)
977{
978 unsigned int pa;
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -0600979 unsigned int start_va = va;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700980 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700981 unsigned long *fl_table;
982 unsigned long *fl_pte;
983 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600984 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700985 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600986 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700987 int ret = 0;
Olav Haugan090614f2013-03-22 12:14:18 -0700988 struct msm_iommu_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600989 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700990
991 mutex_lock(&msm_iommu_lock);
992
993 BUG_ON(len & (SZ_4K - 1));
994
995 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -0700996 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700997
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600998 pgprot4k = __get_pgprot(prot, SZ_4K);
999 pgprot64k = __get_pgprot(prot, SZ_64K);
1000 pgprot1m = __get_pgprot(prot, SZ_1M);
1001 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001002
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001003 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001004 ret = -EINVAL;
1005 goto fail;
1006 }
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -06001007 ret = check_range(fl_table, va, len);
1008 if (ret)
1009 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001010
1011 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1012 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001013 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001014
1015 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001016 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001017
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001018 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1019 SZ_16M))
1020 chunk_size = SZ_16M;
1021 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1022 SZ_1M))
1023 chunk_size = SZ_1M;
1024 /* 64k or 4k determined later */
1025
1026 /* for 1M and 16M, only first level entries are required */
1027 if (chunk_size >= SZ_1M) {
1028 if (chunk_size == SZ_16M) {
1029 ret = fl_16m(fl_pte, pa, pgprot16m);
1030 if (ret)
1031 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001032 clean_pte(fl_pte, fl_pte + 16,
1033 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001034 fl_pte += 16;
1035 } else if (chunk_size == SZ_1M) {
1036 ret = fl_1m(fl_pte, pa, pgprot1m);
1037 if (ret)
1038 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001039 clean_pte(fl_pte, fl_pte + 1,
1040 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001041 fl_pte++;
1042 }
1043
1044 offset += chunk_size;
1045 chunk_offset += chunk_size;
1046 va += chunk_size;
1047 pa += chunk_size;
1048
1049 if (chunk_offset >= sg->length && offset < len) {
1050 chunk_offset = 0;
1051 sg = sg_next(sg);
1052 pa = get_phys_addr(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001053 }
1054 continue;
1055 }
1056 /* for 4K or 64K, make sure there is a second level table */
1057 if (*fl_pte == 0) {
1058 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001059 ret = -ENOMEM;
1060 goto fail;
1061 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001062 }
1063 if (!(*fl_pte & FL_TYPE_TABLE)) {
1064 ret = -EBUSY;
1065 goto fail;
1066 }
1067 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1068 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001069 /* Keep track of initial position so we
1070 * don't clean more than we have to
1071 */
1072 sl_start = sl_offset;
1073
1074 /* Build the 2nd level page table */
1075 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001076
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001077 /* Map a large 64K page if the chunk is large enough and
1078 * the pa and va are aligned
1079 */
1080
1081 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1082 SZ_64K))
1083 chunk_size = SZ_64K;
1084 else
1085 chunk_size = SZ_4K;
1086
1087 if (chunk_size == SZ_4K) {
1088 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
1089 sl_offset++;
1090 } else {
1091 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
1092 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
1093 sl_offset += 16;
1094 }
1095
1096
1097 offset += chunk_size;
1098 chunk_offset += chunk_size;
1099 va += chunk_size;
1100 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001101
1102 if (chunk_offset >= sg->length && offset < len) {
1103 chunk_offset = 0;
1104 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001105 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001106 }
1107 }
1108
1109 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Olav Haugan090614f2013-03-22 12:14:18 -07001110 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001111
1112 fl_pte++;
1113 sl_offset = 0;
1114 }
1115 __flush_iotlb(domain);
1116fail:
1117 mutex_unlock(&msm_iommu_lock);
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -06001118 if (ret && offset > 0)
1119 msm_iommu_unmap_range(domain, start_va, offset);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001120 return ret;
1121}
1122
1123
1124static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
1125 unsigned int len)
1126{
1127 unsigned int offset = 0;
1128 unsigned long *fl_table;
1129 unsigned long *fl_pte;
1130 unsigned long fl_offset;
1131 unsigned long *sl_table;
1132 unsigned long sl_start, sl_end;
1133 int used, i;
Olav Haugan090614f2013-03-22 12:14:18 -07001134 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001135
1136 mutex_lock(&msm_iommu_lock);
1137
1138 BUG_ON(len & (SZ_4K - 1));
1139
1140 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001141 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001142
1143 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1144 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
1145
Steve Mucklef132c6c2012-06-06 18:30:57 -07001146 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001147 if (*fl_pte & FL_TYPE_TABLE) {
1148 sl_start = SL_OFFSET(va);
1149 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1150 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001151
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001152 if (sl_end > NUM_SL_PTE)
1153 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001154
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001155 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
1156 clean_pte(sl_table + sl_start, sl_table + sl_end,
Olav Haugan090614f2013-03-22 12:14:18 -07001157 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001158
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001159 offset += (sl_end - sl_start) * SZ_4K;
1160 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001161
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001162 /* Unmap and free the 2nd level table if all mappings
1163 * in it were removed. This saves memory, but the table
1164 * will need to be re-allocated the next time someone
1165 * tries to map these VAs.
1166 */
1167 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001168
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001169 /* If we just unmapped the whole table, don't bother
1170 * seeing if there are still used entries left.
1171 */
1172 if (sl_end - sl_start != NUM_SL_PTE)
1173 for (i = 0; i < NUM_SL_PTE; i++)
1174 if (sl_table[i]) {
1175 used = 1;
1176 break;
1177 }
1178 if (!used) {
1179 free_page((unsigned long)sl_table);
1180 *fl_pte = 0;
1181
Olav Haugan090614f2013-03-22 12:14:18 -07001182 clean_pte(fl_pte, fl_pte + 1,
1183 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001184 }
1185
1186 sl_start = 0;
1187 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001188 *fl_pte = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001189 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001190 va += SZ_1M;
1191 offset += SZ_1M;
1192 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001193 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001194 fl_pte++;
1195 }
1196
1197 __flush_iotlb(domain);
1198 mutex_unlock(&msm_iommu_lock);
1199 return 0;
1200}
1201
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001202static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1203 unsigned long va)
1204{
Olav Haugan090614f2013-03-22 12:14:18 -07001205 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001206 struct msm_iommu_drvdata *iommu_drvdata;
1207 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1208 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001209 void __iomem *base;
1210 phys_addr_t ret = 0;
1211 int ctx;
1212
Steve Mucklef132c6c2012-06-06 18:30:57 -07001213 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001214
1215 priv = domain->priv;
1216 if (list_empty(&priv->list_attached))
1217 goto fail;
1218
1219 ctx_drvdata = list_entry(priv->list_attached.next,
1220 struct msm_iommu_ctx_drvdata, attached_elm);
1221 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1222
1223 base = iommu_drvdata->base;
1224 ctx = ctx_drvdata->num;
1225
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001226 ret = __enable_clocks(iommu_drvdata);
1227 if (ret)
1228 goto fail;
1229
Olav Haugan65209cd2012-11-07 15:02:56 -08001230 msm_iommu_remote_spin_lock();
1231
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001232 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001233
Steve Mucklef132c6c2012-06-06 18:30:57 -07001234 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001235 par = GET_PAR(base, ctx);
1236
1237 /* We are dealing with a supersection */
1238 if (GET_NOFAULT_SS(base, ctx))
1239 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1240 else /* Upper 20 bits from PAR, lower 12 from VA */
1241 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1242
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001243 if (GET_FAULT(base, ctx))
1244 ret = 0;
1245
Olav Haugan65209cd2012-11-07 15:02:56 -08001246 msm_iommu_remote_spin_unlock();
1247
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001248 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001249fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001250 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001251 return ret;
1252}
1253
1254static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1255 unsigned long cap)
1256{
1257 return 0;
1258}
1259
1260static void print_ctx_regs(void __iomem *base, int ctx)
1261{
1262 unsigned int fsr = GET_FSR(base, ctx);
1263 pr_err("FAR = %08x PAR = %08x\n",
1264 GET_FAR(base, ctx), GET_PAR(base, ctx));
1265 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1266 (fsr & 0x02) ? "TF " : "",
1267 (fsr & 0x04) ? "AFF " : "",
1268 (fsr & 0x08) ? "APF " : "",
1269 (fsr & 0x10) ? "TLBMF " : "",
1270 (fsr & 0x20) ? "HTWDEEF " : "",
1271 (fsr & 0x40) ? "HTWSEEF " : "",
1272 (fsr & 0x80) ? "MHF " : "",
1273 (fsr & 0x10000) ? "SL " : "",
1274 (fsr & 0x40000000) ? "SS " : "",
1275 (fsr & 0x80000000) ? "MULTI " : "");
1276
1277 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1278 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1279 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1280 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1281 pr_err("SCTLR = %08x ACTLR = %08x\n",
1282 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1283 pr_err("PRRR = %08x NMRR = %08x\n",
1284 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1285}
1286
1287irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1288{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001289 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1290 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001291 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001292 unsigned int fsr, num;
1293 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001294
Steve Mucklef132c6c2012-06-06 18:30:57 -07001295 mutex_lock(&msm_iommu_lock);
1296 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001297
Steve Mucklef132c6c2012-06-06 18:30:57 -07001298 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1299 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001300
1301 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001302 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001303
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001304 ret = __enable_clocks(drvdata);
1305 if (ret)
1306 goto fail;
1307
Olav Haugan65209cd2012-11-07 15:02:56 -08001308 msm_iommu_remote_spin_lock();
1309
Steve Mucklef132c6c2012-06-06 18:30:57 -07001310 fsr = GET_FSR(base, num);
1311
1312 if (fsr) {
1313 if (!ctx_drvdata->attached_domain) {
1314 pr_err("Bad domain in interrupt handler\n");
1315 ret = -ENOSYS;
1316 } else
1317 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1318 &ctx_drvdata->pdev->dev,
1319 GET_FAR(base, num), 0);
1320
1321 if (ret == -ENOSYS) {
1322 pr_err("Unexpected IOMMU page fault!\n");
1323 pr_err("name = %s\n", drvdata->name);
1324 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001325 pr_err("Interesting registers:\n");
Steve Mucklef132c6c2012-06-06 18:30:57 -07001326 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001327 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001328
1329 SET_FSR(base, num, fsr);
Shubhraprakash Das52f50c42012-10-09 16:14:28 -07001330 /*
1331 * Only resume fetches if the registered fault handler
1332 * allows it
1333 */
1334 if (ret != -EBUSY)
1335 SET_RESUME(base, num, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001336
1337 ret = IRQ_HANDLED;
1338 } else
1339 ret = IRQ_NONE;
1340
Olav Haugan65209cd2012-11-07 15:02:56 -08001341 msm_iommu_remote_spin_unlock();
1342
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001343 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001344fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001345 mutex_unlock(&msm_iommu_lock);
1346 return ret;
1347}
1348
1349static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1350{
Olav Haugan090614f2013-03-22 12:14:18 -07001351 struct msm_iommu_priv *priv = domain->priv;
1352 return __pa(priv->pt.fl_table);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001353}
1354
1355static struct iommu_ops msm_iommu_ops = {
1356 .domain_init = msm_iommu_domain_init,
1357 .domain_destroy = msm_iommu_domain_destroy,
1358 .attach_dev = msm_iommu_attach_dev,
1359 .detach_dev = msm_iommu_detach_dev,
1360 .map = msm_iommu_map,
1361 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001362 .map_range = msm_iommu_map_range,
1363 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001364 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001365 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001366 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001367 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001368};
1369
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001370static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1371{
1372 int i = 0;
1373 unsigned int prrr = 0;
1374 unsigned int nmrr = 0;
1375 int c_icp, c_ocp, c_mt, c_nos;
1376
1377 RCP15_PRRR(prrr);
1378 RCP15_NMRR(nmrr);
1379
1380 for (i = 0; i < NUM_TEX_CLASS; i++) {
1381 c_nos = PRRR_NOS(prrr, i);
1382 c_mt = PRRR_MT(prrr, i);
1383 c_icp = NMRR_ICP(nmrr, i);
1384 c_ocp = NMRR_OCP(nmrr, i);
1385
1386 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1387 return i;
1388 }
1389
1390 return -ENODEV;
1391}
1392
1393static void __init setup_iommu_tex_classes(void)
1394{
1395 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1396 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1397
1398 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1399 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1400
1401 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1402 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1403
1404 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1405 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1406}
1407
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001408static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001409{
Olav Haugan0e22c482013-01-28 17:39:36 -08001410 if (!msm_soc_version_supports_iommu_v0())
Steve Mucklef132c6c2012-06-06 18:30:57 -07001411 return -ENODEV;
1412
Olav Haugan65209cd2012-11-07 15:02:56 -08001413 msm_iommu_lock_initialize();
1414
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001415 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001416 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001417 return 0;
1418}
1419
1420subsys_initcall(msm_iommu_init);
1421
1422MODULE_LICENSE("GPL v2");
1423MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");