blob: c0a47205e83dec8e9ad006eccd6eb5ac5393f712 [file] [log] [blame]
Olav Haugane6d01ef2013-01-25 16:55:44 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
Olav Hauganc5993142013-02-04 13:59:39 -080030#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v0.h>
Olav Haugan090614f2013-03-22 12:14:18 -070032#include <mach/msm_iommu_priv.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070033#include <mach/iommu.h>
Olav Haugan65209cd2012-11-07 15:02:56 -080034#include <mach/msm_smsm.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070035
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080036#define MRC(reg, processor, op1, crn, crm, op2) \
37__asm__ __volatile__ ( \
38" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
39: "=r" (reg))
40
41#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
42#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
43
Steve Mucklef132c6c2012-06-06 18:30:57 -070044/* Sharability attributes of MSM IOMMU mappings */
45#define MSM_IOMMU_ATTR_NON_SH 0x0
46#define MSM_IOMMU_ATTR_SH 0x4
47
48/* Cacheability attributes of MSM IOMMU mappings */
49#define MSM_IOMMU_ATTR_NONCACHED 0x0
50#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
51#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
52#define MSM_IOMMU_ATTR_CACHED_WT 0x3
53
Laura Abbott0d135652012-10-04 12:59:03 -070054struct bus_type msm_iommu_sec_bus_type = {
55 .name = "msm_iommu_sec_bus",
56};
Steve Mucklef132c6c2012-06-06 18:30:57 -070057
58static inline void clean_pte(unsigned long *start, unsigned long *end,
59 int redirect)
60{
61 if (!redirect)
62 dmac_flush_range(start, end);
63}
64
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020065/* bitmap of the page sizes currently supported */
66#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
67
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080068static int msm_iommu_tex_class[4];
69
Steve Mucklef132c6c2012-06-06 18:30:57 -070070DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070071
Olav Haugan65209cd2012-11-07 15:02:56 -080072/**
73 * Remote spinlock implementation based on Peterson's algorithm to be used
74 * to synchronize IOMMU config port access between CPU and GPU.
75 * This implements Process 0 of the spin lock algorithm. GPU implements
76 * Process 1. Flag and turn is stored in shared memory to allow GPU to
77 * access these.
78 */
79struct msm_iommu_remote_lock {
80 int initialized;
81 struct remote_iommu_petersons_spinlock *lock;
82};
83
84static struct msm_iommu_remote_lock msm_iommu_remote_lock;
85
86#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
87static void _msm_iommu_remote_spin_lock_init(void)
88{
89 msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
90 memset(msm_iommu_remote_lock.lock, 0,
91 sizeof(*msm_iommu_remote_lock.lock));
92}
93
94void msm_iommu_remote_p0_spin_lock(void)
95{
96 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
97 msm_iommu_remote_lock.lock->turn = 1;
98
99 smp_mb();
100
101 while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
102 msm_iommu_remote_lock.lock->turn == 1)
103 cpu_relax();
104}
105
106void msm_iommu_remote_p0_spin_unlock(void)
107{
108 smp_mb();
109
110 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
111}
112#endif
113
114inline void msm_iommu_mutex_lock(void)
115{
116 mutex_lock(&msm_iommu_lock);
117}
118
119inline void msm_iommu_mutex_unlock(void)
120{
121 mutex_unlock(&msm_iommu_lock);
122}
123
124void *msm_iommu_lock_initialize(void)
125{
126 mutex_lock(&msm_iommu_lock);
127 if (!msm_iommu_remote_lock.initialized) {
128 msm_iommu_remote_lock_init();
129 msm_iommu_remote_lock.initialized = 1;
130 }
131 mutex_unlock(&msm_iommu_lock);
132 return msm_iommu_remote_lock.lock;
133}
134
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800135static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
136{
137 int ret;
138
Steve Mucklef132c6c2012-06-06 18:30:57 -0700139 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800140 if (ret)
141 goto fail;
142
143 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700144 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800145 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700146 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800147 }
148fail:
149 return ret;
150}
151
152static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
153{
154 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700155 clk_disable_unprepare(drvdata->clk);
156 clk_disable_unprepare(drvdata->pclk);
157}
158
Olav Hauganeece7e52013-04-02 10:22:21 -0700159static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800160{
Olav Hauganeece7e52013-04-02 10:22:21 -0700161 /* No need to do anything. IOMMUv0 is always on. */
162 return 0;
Olav Hauganc5993142013-02-04 13:59:39 -0800163}
164
Olav Hauganeece7e52013-04-02 10:22:21 -0700165static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
Olav Hauganc5993142013-02-04 13:59:39 -0800166{
Olav Hauganeece7e52013-04-02 10:22:21 -0700167 /* No need to do anything. IOMMUv0 is always on. */
Olav Hauganc5993142013-02-04 13:59:39 -0800168}
169
170static void _iommu_lock_acquire(void)
171{
172 msm_iommu_lock();
173}
174
175static void _iommu_lock_release(void)
176{
177 msm_iommu_unlock();
178}
179
180struct iommu_access_ops iommu_access_ops_v0 = {
Olav Hauganeece7e52013-04-02 10:22:21 -0700181 .iommu_power_on = __enable_regulators,
182 .iommu_power_off = __disable_regulators,
183 .iommu_clk_on = __enable_clocks,
184 .iommu_clk_off = __disable_clocks,
Olav Hauganc5993142013-02-04 13:59:39 -0800185 .iommu_lock_acquire = _iommu_lock_acquire,
186 .iommu_lock_release = _iommu_lock_release,
187};
188EXPORT_SYMBOL(iommu_access_ops_v0);
189
Steve Mucklef132c6c2012-06-06 18:30:57 -0700190static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
191{
Olav Haugan090614f2013-03-22 12:14:18 -0700192 struct msm_iommu_priv *priv = domain->priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700193 struct msm_iommu_drvdata *iommu_drvdata;
194 struct msm_iommu_ctx_drvdata *ctx_drvdata;
195 int ret = 0;
196 int asid;
197
198 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
199 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
200 BUG();
201
202 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
203 if (!iommu_drvdata)
204 BUG();
205
206 ret = __enable_clocks(iommu_drvdata);
207 if (ret)
208 goto fail;
209
Olav Haugan65209cd2012-11-07 15:02:56 -0800210 msm_iommu_remote_spin_lock();
211
Steve Mucklef132c6c2012-06-06 18:30:57 -0700212 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
213 ctx_drvdata->num);
214
215 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
216 asid | (va & TLBIVA_VA));
217 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800218
219 msm_iommu_remote_spin_unlock();
220
Steve Mucklef132c6c2012-06-06 18:30:57 -0700221 __disable_clocks(iommu_drvdata);
222 }
223fail:
224 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800225}
226
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800227static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700228{
Olav Haugan090614f2013-03-22 12:14:18 -0700229 struct msm_iommu_priv *priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700230 struct msm_iommu_drvdata *iommu_drvdata;
231 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800232 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700233 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700234
235 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
236 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
237 BUG();
238
239 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700240 if (!iommu_drvdata)
241 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800242
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800243 ret = __enable_clocks(iommu_drvdata);
244 if (ret)
245 goto fail;
246
Olav Haugan65209cd2012-11-07 15:02:56 -0800247 msm_iommu_remote_spin_lock();
248
Steve Mucklef132c6c2012-06-06 18:30:57 -0700249 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
250 ctx_drvdata->num);
251
252 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
253 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800254
255 msm_iommu_remote_spin_unlock();
256
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800257 __disable_clocks(iommu_drvdata);
258 }
259fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800260 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700261}
262
Olav Haugan95d24162012-12-05 14:47:47 -0800263static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700264{
Olav Haugan95d24162012-12-05 14:47:47 -0800265 SET_BPRCOSH(glb_base, ctx, 0);
266 SET_BPRCISH(glb_base, ctx, 0);
267 SET_BPRCNSH(glb_base, ctx, 0);
268 SET_BPSHCFG(glb_base, ctx, 0);
269 SET_BPMTCFG(glb_base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270 SET_ACTLR(base, ctx, 0);
271 SET_SCTLR(base, ctx, 0);
272 SET_FSRRESTORE(base, ctx, 0);
273 SET_TTBR0(base, ctx, 0);
274 SET_TTBR1(base, ctx, 0);
275 SET_TTBCR(base, ctx, 0);
276 SET_BFBCR(base, ctx, 0);
277 SET_PAR(base, ctx, 0);
278 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700279 SET_TLBFLPTER(base, ctx, 0);
280 SET_TLBSLPTER(base, ctx, 0);
281 SET_TLBLKCR(base, ctx, 0);
282 SET_PRRR(base, ctx, 0);
283 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700284 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700285}
286
Olav Haugan95d24162012-12-05 14:47:47 -0800287static void __program_context(void __iomem *base, void __iomem *glb_base,
288 int ctx, int ncb, phys_addr_t pgtable,
289 int redirect, int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700290{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800291 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700292 int i, j, found;
Olav Haugan65209cd2012-11-07 15:02:56 -0800293 msm_iommu_remote_spin_lock();
294
Olav Haugan95d24162012-12-05 14:47:47 -0800295 __reset_context(base, glb_base, ctx);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700296
297 /* Set up HTW mode */
298 /* TLB miss configuration: perform HTW on miss */
299 SET_TLBMCFG(base, ctx, 0x3);
300
301 /* V2P configuration: HTW for access */
302 SET_V2PCFG(base, ctx, 0x3);
303
Steve Mucklef132c6c2012-06-06 18:30:57 -0700304 SET_TTBCR(base, ctx, ttbr_split);
305 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
306 if (ttbr_split)
307 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700308
309 /* Enable context fault interrupt */
310 SET_CFEIE(base, ctx, 1);
311
312 /* Stall access on a context fault and let the handler deal with it */
313 SET_CFCFG(base, ctx, 1);
314
315 /* Redirect all cacheable requests to L2 slave port. */
316 SET_RCISH(base, ctx, 1);
317 SET_RCOSH(base, ctx, 1);
318 SET_RCNSH(base, ctx, 1);
319
320 /* Turn on TEX Remap */
321 SET_TRE(base, ctx, 1);
322
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800323 /* Set TEX remap attributes */
324 RCP15_PRRR(prrr);
325 RCP15_NMRR(nmrr);
326 SET_PRRR(base, ctx, prrr);
327 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700328
329 /* Turn on BFB prefetch */
330 SET_BFBDFE(base, ctx, 1);
331
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700332 /* Configure page tables as inner-cacheable and shareable to reduce
333 * the TLB miss penalty.
334 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700335 if (redirect) {
336 SET_TTBR0_SH(base, ctx, 1);
337 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700338
Steve Mucklef132c6c2012-06-06 18:30:57 -0700339 SET_TTBR0_NOS(base, ctx, 1);
340 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700341
Steve Mucklef132c6c2012-06-06 18:30:57 -0700342 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
343 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700344
Steve Mucklef132c6c2012-06-06 18:30:57 -0700345 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
346 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700347
Steve Mucklef132c6c2012-06-06 18:30:57 -0700348 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
349 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
350 }
351
352 /* Find if this page table is used elsewhere, and re-use ASID */
353 found = 0;
354 for (i = 0; i < ncb; i++)
355 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
356 i != ctx) {
357 SET_CONTEXTIDR_ASID(base, ctx, \
358 GET_CONTEXTIDR_ASID(base, i));
359 found = 1;
360 break;
361 }
362
363 /* If page table is new, find an unused ASID */
364 if (!found) {
365 for (i = 0; i < ncb; i++) {
366 found = 0;
367 for (j = 0; j < ncb; j++) {
368 if (GET_CONTEXTIDR_ASID(base, j) == i &&
369 j != ctx)
370 found = 1;
371 }
372
373 if (!found) {
374 SET_CONTEXTIDR_ASID(base, ctx, i);
375 break;
376 }
377 }
378 BUG_ON(found);
379 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700380
381 /* Enable the MMU */
382 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700383 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800384
385 msm_iommu_remote_spin_unlock();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700386}
387
Steve Mucklef132c6c2012-06-06 18:30:57 -0700388static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700389{
Olav Haugan090614f2013-03-22 12:14:18 -0700390 struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700391
392 if (!priv)
393 goto fail_nomem;
394
395 INIT_LIST_HEAD(&priv->list_attached);
Olav Haugan090614f2013-03-22 12:14:18 -0700396 priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700397 get_order(SZ_16K));
398
Olav Haugan090614f2013-03-22 12:14:18 -0700399 if (!priv->pt.fl_table)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700400 goto fail_nomem;
401
Steve Mucklef132c6c2012-06-06 18:30:57 -0700402#ifdef CONFIG_IOMMU_PGTABLES_L2
Olav Haugan090614f2013-03-22 12:14:18 -0700403 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700404#endif
405
Olav Haugan090614f2013-03-22 12:14:18 -0700406 memset(priv->pt.fl_table, 0, SZ_16K);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700407 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700408
Olav Haugan090614f2013-03-22 12:14:18 -0700409 clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
410 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700411
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700412 return 0;
413
414fail_nomem:
415 kfree(priv);
416 return -ENOMEM;
417}
418
419static void msm_iommu_domain_destroy(struct iommu_domain *domain)
420{
Olav Haugan090614f2013-03-22 12:14:18 -0700421 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700422 unsigned long *fl_table;
423 int i;
424
Steve Mucklef132c6c2012-06-06 18:30:57 -0700425 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700426 priv = domain->priv;
427 domain->priv = NULL;
428
429 if (priv) {
Olav Haugan090614f2013-03-22 12:14:18 -0700430 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700431
432 for (i = 0; i < NUM_FL_PTE; i++)
433 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
434 free_page((unsigned long) __va(((fl_table[i]) &
435 FL_BASE_MASK)));
436
Olav Haugan090614f2013-03-22 12:14:18 -0700437 free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
438 priv->pt.fl_table = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700439 }
440
441 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700442 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700443}
444
445static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
446{
Olav Haugan090614f2013-03-22 12:14:18 -0700447 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700448 struct msm_iommu_drvdata *iommu_drvdata;
449 struct msm_iommu_ctx_drvdata *ctx_drvdata;
450 struct msm_iommu_ctx_drvdata *tmp_drvdata;
451 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700452
Steve Mucklef132c6c2012-06-06 18:30:57 -0700453 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700454
455 priv = domain->priv;
456
457 if (!priv || !dev) {
458 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800459 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700460 }
461
462 iommu_drvdata = dev_get_drvdata(dev->parent);
463 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700464
Olav Haugan95d24162012-12-05 14:47:47 -0800465 if (!iommu_drvdata || !ctx_drvdata) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700466 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800467 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700468 }
469
Olav Haugane99ee7e2012-12-11 15:02:02 -0800470 ++ctx_drvdata->attach_count;
471
472 if (ctx_drvdata->attach_count > 1)
473 goto unlock;
474
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800475 if (!list_empty(&ctx_drvdata->attached_elm)) {
476 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800477 goto unlock;
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800478 }
479
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700480 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
481 if (tmp_drvdata == ctx_drvdata) {
482 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800483 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700484 }
485
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800486 ret = __enable_clocks(iommu_drvdata);
487 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800488 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800489
Olav Haugan95d24162012-12-05 14:47:47 -0800490 __program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
491 ctx_drvdata->num, iommu_drvdata->ncb,
Olav Haugan090614f2013-03-22 12:14:18 -0700492 __pa(priv->pt.fl_table), priv->pt.redirect,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700493 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700494
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800495 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700496 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700497
Steve Mucklef132c6c2012-06-06 18:30:57 -0700498 ctx_drvdata->attached_domain = domain;
Olav Hauganc5993142013-02-04 13:59:39 -0800499
500 mutex_unlock(&msm_iommu_lock);
501
502 msm_iommu_attached(dev->parent);
503 return ret;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800504unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700505 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700506 return ret;
507}
508
509static void msm_iommu_detach_dev(struct iommu_domain *domain,
510 struct device *dev)
511{
Olav Haugan090614f2013-03-22 12:14:18 -0700512 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700513 struct msm_iommu_drvdata *iommu_drvdata;
514 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800515 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700516
Olav Hauganc5993142013-02-04 13:59:39 -0800517 msm_iommu_detached(dev->parent);
518
Steve Mucklef132c6c2012-06-06 18:30:57 -0700519 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700520 priv = domain->priv;
521
522 if (!priv || !dev)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800523 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700524
525 iommu_drvdata = dev_get_drvdata(dev->parent);
526 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700527
Olav Haugan35deadc2012-12-10 18:28:27 -0800528 if (!iommu_drvdata || !ctx_drvdata)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800529 goto unlock;
530
531 --ctx_drvdata->attach_count;
532 BUG_ON(ctx_drvdata->attach_count < 0);
533
534 if (ctx_drvdata->attach_count > 0)
535 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700536
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800537 ret = __enable_clocks(iommu_drvdata);
538 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800539 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800540
Olav Haugan65209cd2012-11-07 15:02:56 -0800541 msm_iommu_remote_spin_lock();
542
Olav Haugan35deadc2012-12-10 18:28:27 -0800543 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
544 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700545
Olav Haugan95d24162012-12-05 14:47:47 -0800546 __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
Olav Haugan35deadc2012-12-10 18:28:27 -0800547 ctx_drvdata->num);
Olav Haugan65209cd2012-11-07 15:02:56 -0800548
549 msm_iommu_remote_spin_unlock();
550
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800551 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700552 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700553 ctx_drvdata->attached_domain = NULL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800554unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700555 mutex_unlock(&msm_iommu_lock);
556}
557
558static int __get_pgprot(int prot, int len)
559{
560 unsigned int pgprot;
561 int tex;
562
563 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
564 prot |= IOMMU_READ | IOMMU_WRITE;
565 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
566 }
567
568 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
569 prot |= IOMMU_READ;
570 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
571 }
572
573 if (prot & IOMMU_CACHE)
574 tex = (pgprot_kernel >> 2) & 0x07;
575 else
576 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
577
578 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
579 return 0;
580
581 if (len == SZ_16M || len == SZ_1M) {
582 pgprot = FL_SHARED;
583 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
584 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
585 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
586 pgprot |= FL_AP0 | FL_AP1;
587 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
588 } else {
589 pgprot = SL_SHARED;
590 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
591 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
592 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
593 pgprot |= SL_AP0 | SL_AP1;
594 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
595 }
596
597 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700598}
599
Olav Haugan090614f2013-03-22 12:14:18 -0700600static unsigned long *make_second_level(struct msm_iommu_priv *priv,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600601 unsigned long *fl_pte)
602{
603 unsigned long *sl;
604 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
605 get_order(SZ_4K));
606
607 if (!sl) {
608 pr_debug("Could not allocate second level table\n");
609 goto fail;
610 }
611 memset(sl, 0, SZ_4K);
Olav Haugan090614f2013-03-22 12:14:18 -0700612 clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600613
614 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
615 FL_TYPE_TABLE);
616
Olav Haugan090614f2013-03-22 12:14:18 -0700617 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600618fail:
619 return sl;
620}
621
622static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
623{
624 int ret = 0;
625
626 if (*sl_pte) {
627 ret = -EBUSY;
628 goto fail;
629 }
630
631 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
632 | SL_TYPE_SMALL | pgprot;
633fail:
634 return ret;
635}
636
637static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
638{
639 int ret = 0;
640
641 int i;
642
643 for (i = 0; i < 16; i++)
644 if (*(sl_pte+i)) {
645 ret = -EBUSY;
646 goto fail;
647 }
648
649 for (i = 0; i < 16; i++)
650 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
651 | SL_SHARED | SL_TYPE_LARGE | pgprot;
652
653fail:
654 return ret;
655}
656
657
658static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
659{
660 if (*fl_pte)
661 return -EBUSY;
662
663 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
664 | pgprot;
665
666 return 0;
667}
668
669
670static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
671{
672 int i;
673 int ret = 0;
674 for (i = 0; i < 16; i++)
675 if (*(fl_pte+i)) {
676 ret = -EBUSY;
677 goto fail;
678 }
679 for (i = 0; i < 16; i++)
680 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
681 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
682fail:
683 return ret;
684}
685
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700686static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200687 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700688{
Olav Haugan090614f2013-03-22 12:14:18 -0700689 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700690 unsigned long *fl_table;
691 unsigned long *fl_pte;
692 unsigned long fl_offset;
693 unsigned long *sl_table;
694 unsigned long *sl_pte;
695 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800696 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700697 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700698
Steve Mucklef132c6c2012-06-06 18:30:57 -0700699 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800700
701 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700702 if (!priv) {
703 ret = -EINVAL;
704 goto fail;
705 }
706
Olav Haugan090614f2013-03-22 12:14:18 -0700707 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700708
709 if (len != SZ_16M && len != SZ_1M &&
710 len != SZ_64K && len != SZ_4K) {
711 pr_debug("Bad size: %d\n", len);
712 ret = -EINVAL;
713 goto fail;
714 }
715
716 if (!fl_table) {
717 pr_debug("Null page table\n");
718 ret = -EINVAL;
719 goto fail;
720 }
721
Steve Mucklef132c6c2012-06-06 18:30:57 -0700722 pgprot = __get_pgprot(prot, len);
723
724 if (!pgprot) {
725 ret = -EINVAL;
726 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800727 }
728
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700729 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
730 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
731
732 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600733 ret = fl_16m(fl_pte, pa, pgprot);
734 if (ret)
735 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700736 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700737 }
738
Steve Mucklef132c6c2012-06-06 18:30:57 -0700739 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600740 ret = fl_1m(fl_pte, pa, pgprot);
741 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700742 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700743 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700744 }
745
746 /* Need a 2nd level table */
747 if (len == SZ_4K || len == SZ_64K) {
748
749 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600750 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700751 ret = -ENOMEM;
752 goto fail;
753 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700754 }
755
756 if (!(*fl_pte & FL_TYPE_TABLE)) {
757 ret = -EBUSY;
758 goto fail;
759 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700760 }
761
762 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
763 sl_offset = SL_OFFSET(va);
764 sl_pte = sl_table + sl_offset;
765
Steve Mucklef132c6c2012-06-06 18:30:57 -0700766 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600767 ret = sl_4k(sl_pte, pa, pgprot);
768 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700769 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700770
Olav Haugan090614f2013-03-22 12:14:18 -0700771 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700772 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700773
774 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600775 ret = sl_64k(sl_pte, pa, pgprot);
776 if (ret)
777 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -0700778 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700779 }
780
Steve Mucklef132c6c2012-06-06 18:30:57 -0700781 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700782fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700783 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700784 return ret;
785}
786
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200787static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
788 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700789{
Olav Haugan090614f2013-03-22 12:14:18 -0700790 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700791 unsigned long *fl_table;
792 unsigned long *fl_pte;
793 unsigned long fl_offset;
794 unsigned long *sl_table;
795 unsigned long *sl_pte;
796 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700797 int i, ret = 0;
798
Steve Mucklef132c6c2012-06-06 18:30:57 -0700799 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700800
801 priv = domain->priv;
802
Joerg Roedel05df1f32012-01-26 18:25:37 +0100803 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700804 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700805
Olav Haugan090614f2013-03-22 12:14:18 -0700806 fl_table = priv->pt.fl_table;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700807
808 if (len != SZ_16M && len != SZ_1M &&
809 len != SZ_64K && len != SZ_4K) {
810 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700811 goto fail;
812 }
813
814 if (!fl_table) {
815 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700816 goto fail;
817 }
818
819 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
820 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
821
822 if (*fl_pte == 0) {
823 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700824 goto fail;
825 }
826
827 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700828 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700829 for (i = 0; i < 16; i++)
830 *(fl_pte+i) = 0;
831
Olav Haugan090614f2013-03-22 12:14:18 -0700832 clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700833 }
834
835 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700836 *fl_pte = 0;
837
Olav Haugan090614f2013-03-22 12:14:18 -0700838 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700839 }
840
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700841 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
842 sl_offset = SL_OFFSET(va);
843 sl_pte = sl_table + sl_offset;
844
845 if (len == SZ_64K) {
846 for (i = 0; i < 16; i++)
847 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700848
Olav Haugan090614f2013-03-22 12:14:18 -0700849 clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700850 }
851
Steve Mucklef132c6c2012-06-06 18:30:57 -0700852 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700853 *sl_pte = 0;
854
Olav Haugan090614f2013-03-22 12:14:18 -0700855 clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700856 }
857
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700858 if (len == SZ_4K || len == SZ_64K) {
859 int used = 0;
860
861 for (i = 0; i < NUM_SL_PTE; i++)
862 if (sl_table[i])
863 used = 1;
864 if (!used) {
865 free_page((unsigned long)sl_table);
866 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700867
Olav Haugan090614f2013-03-22 12:14:18 -0700868 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700869 }
870 }
871
Steve Mucklef132c6c2012-06-06 18:30:57 -0700872 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400873
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700874fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700875 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200876
877 /* the IOMMU API requires us to return how many bytes were unmapped */
878 len = ret ? 0 : len;
879 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700880}
881
Steve Mucklef132c6c2012-06-06 18:30:57 -0700882static unsigned int get_phys_addr(struct scatterlist *sg)
883{
884 /*
885 * Try sg_dma_address first so that we can
886 * map carveout regions that do not have a
887 * struct page associated with them.
888 */
889 unsigned int pa = sg_dma_address(sg);
890 if (pa == 0)
891 pa = sg_phys(sg);
892 return pa;
893}
894
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600895static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
896 int align)
897{
898 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
899 && (len >= align);
900}
901
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600902static int check_range(unsigned long *fl_table, unsigned int va,
903 unsigned int len)
904{
905 unsigned int offset = 0;
906 unsigned long *fl_pte;
907 unsigned long fl_offset;
908 unsigned long *sl_table;
909 unsigned long sl_start, sl_end;
910 int i;
911
912 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
913 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
914
915 while (offset < len) {
916 if (*fl_pte & FL_TYPE_TABLE) {
917 sl_start = SL_OFFSET(va);
918 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
919 sl_end = ((len - offset) / SZ_4K) + sl_start;
920
921 if (sl_end > NUM_SL_PTE)
922 sl_end = NUM_SL_PTE;
923
924 for (i = sl_start; i < sl_end; i++) {
925 if (sl_table[i] != 0) {
926 pr_err("%08x - %08x already mapped\n",
927 va, va + SZ_4K);
928 return -EBUSY;
929 }
930 offset += SZ_4K;
931 va += SZ_4K;
932 }
933
934
935 sl_start = 0;
936 } else {
937 if (*fl_pte != 0) {
938 pr_err("%08x - %08x already mapped\n",
939 va, va + SZ_1M);
940 return -EBUSY;
941 }
942 va += SZ_1M;
943 offset += SZ_1M;
944 sl_start = 0;
945 }
946 fl_pte++;
947 }
948 return 0;
949}
950
Steve Mucklef132c6c2012-06-06 18:30:57 -0700951static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
952 struct scatterlist *sg, unsigned int len,
953 int prot)
954{
955 unsigned int pa;
956 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700957 unsigned long *fl_table;
958 unsigned long *fl_pte;
959 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600960 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700961 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600962 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700963 int ret = 0;
Olav Haugan090614f2013-03-22 12:14:18 -0700964 struct msm_iommu_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600965 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700966
967 mutex_lock(&msm_iommu_lock);
968
969 BUG_ON(len & (SZ_4K - 1));
970
971 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -0700972 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700973
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600974 pgprot4k = __get_pgprot(prot, SZ_4K);
975 pgprot64k = __get_pgprot(prot, SZ_64K);
976 pgprot1m = __get_pgprot(prot, SZ_1M);
977 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700978
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600979 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700980 ret = -EINVAL;
981 goto fail;
982 }
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600983 ret = check_range(fl_table, va, len);
984 if (ret)
985 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700986
987 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
988 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600989 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700990
991 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600992 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700993
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600994 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
995 SZ_16M))
996 chunk_size = SZ_16M;
997 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
998 SZ_1M))
999 chunk_size = SZ_1M;
1000 /* 64k or 4k determined later */
1001
1002 /* for 1M and 16M, only first level entries are required */
1003 if (chunk_size >= SZ_1M) {
1004 if (chunk_size == SZ_16M) {
1005 ret = fl_16m(fl_pte, pa, pgprot16m);
1006 if (ret)
1007 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001008 clean_pte(fl_pte, fl_pte + 16,
1009 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001010 fl_pte += 16;
1011 } else if (chunk_size == SZ_1M) {
1012 ret = fl_1m(fl_pte, pa, pgprot1m);
1013 if (ret)
1014 goto fail;
Olav Haugan090614f2013-03-22 12:14:18 -07001015 clean_pte(fl_pte, fl_pte + 1,
1016 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001017 fl_pte++;
1018 }
1019
1020 offset += chunk_size;
1021 chunk_offset += chunk_size;
1022 va += chunk_size;
1023 pa += chunk_size;
1024
1025 if (chunk_offset >= sg->length && offset < len) {
1026 chunk_offset = 0;
1027 sg = sg_next(sg);
1028 pa = get_phys_addr(sg);
1029 if (pa == 0) {
1030 pr_debug("No dma address for sg %p\n",
1031 sg);
1032 ret = -EINVAL;
1033 goto fail;
1034 }
1035 }
1036 continue;
1037 }
1038 /* for 4K or 64K, make sure there is a second level table */
1039 if (*fl_pte == 0) {
1040 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001041 ret = -ENOMEM;
1042 goto fail;
1043 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001044 }
1045 if (!(*fl_pte & FL_TYPE_TABLE)) {
1046 ret = -EBUSY;
1047 goto fail;
1048 }
1049 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1050 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001051 /* Keep track of initial position so we
1052 * don't clean more than we have to
1053 */
1054 sl_start = sl_offset;
1055
1056 /* Build the 2nd level page table */
1057 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001058
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001059 /* Map a large 64K page if the chunk is large enough and
1060 * the pa and va are aligned
1061 */
1062
1063 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1064 SZ_64K))
1065 chunk_size = SZ_64K;
1066 else
1067 chunk_size = SZ_4K;
1068
1069 if (chunk_size == SZ_4K) {
1070 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
1071 sl_offset++;
1072 } else {
1073 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
1074 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
1075 sl_offset += 16;
1076 }
1077
1078
1079 offset += chunk_size;
1080 chunk_offset += chunk_size;
1081 va += chunk_size;
1082 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001083
1084 if (chunk_offset >= sg->length && offset < len) {
1085 chunk_offset = 0;
1086 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001087 pa = get_phys_addr(sg);
1088 if (pa == 0) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001089 pr_debug("No dma address for sg %p\n",
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001090 sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001091 ret = -EINVAL;
1092 goto fail;
1093 }
1094 }
1095 }
1096
1097 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Olav Haugan090614f2013-03-22 12:14:18 -07001098 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001099
1100 fl_pte++;
1101 sl_offset = 0;
1102 }
1103 __flush_iotlb(domain);
1104fail:
1105 mutex_unlock(&msm_iommu_lock);
1106 return ret;
1107}
1108
1109
1110static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
1111 unsigned int len)
1112{
1113 unsigned int offset = 0;
1114 unsigned long *fl_table;
1115 unsigned long *fl_pte;
1116 unsigned long fl_offset;
1117 unsigned long *sl_table;
1118 unsigned long sl_start, sl_end;
1119 int used, i;
Olav Haugan090614f2013-03-22 12:14:18 -07001120 struct msm_iommu_priv *priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001121
1122 mutex_lock(&msm_iommu_lock);
1123
1124 BUG_ON(len & (SZ_4K - 1));
1125
1126 priv = domain->priv;
Olav Haugan090614f2013-03-22 12:14:18 -07001127 fl_table = priv->pt.fl_table;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001128
1129 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1130 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
1131
Steve Mucklef132c6c2012-06-06 18:30:57 -07001132 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001133 if (*fl_pte & FL_TYPE_TABLE) {
1134 sl_start = SL_OFFSET(va);
1135 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1136 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001137
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001138 if (sl_end > NUM_SL_PTE)
1139 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001140
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001141 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
1142 clean_pte(sl_table + sl_start, sl_table + sl_end,
Olav Haugan090614f2013-03-22 12:14:18 -07001143 priv->pt.redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001144
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001145 offset += (sl_end - sl_start) * SZ_4K;
1146 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001147
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001148 /* Unmap and free the 2nd level table if all mappings
1149 * in it were removed. This saves memory, but the table
1150 * will need to be re-allocated the next time someone
1151 * tries to map these VAs.
1152 */
1153 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001154
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001155 /* If we just unmapped the whole table, don't bother
1156 * seeing if there are still used entries left.
1157 */
1158 if (sl_end - sl_start != NUM_SL_PTE)
1159 for (i = 0; i < NUM_SL_PTE; i++)
1160 if (sl_table[i]) {
1161 used = 1;
1162 break;
1163 }
1164 if (!used) {
1165 free_page((unsigned long)sl_table);
1166 *fl_pte = 0;
1167
Olav Haugan090614f2013-03-22 12:14:18 -07001168 clean_pte(fl_pte, fl_pte + 1,
1169 priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001170 }
1171
1172 sl_start = 0;
1173 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001174 *fl_pte = 0;
Olav Haugan090614f2013-03-22 12:14:18 -07001175 clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001176 va += SZ_1M;
1177 offset += SZ_1M;
1178 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001179 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001180 fl_pte++;
1181 }
1182
1183 __flush_iotlb(domain);
1184 mutex_unlock(&msm_iommu_lock);
1185 return 0;
1186}
1187
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001188static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1189 unsigned long va)
1190{
Olav Haugan090614f2013-03-22 12:14:18 -07001191 struct msm_iommu_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001192 struct msm_iommu_drvdata *iommu_drvdata;
1193 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1194 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001195 void __iomem *base;
1196 phys_addr_t ret = 0;
1197 int ctx;
1198
Steve Mucklef132c6c2012-06-06 18:30:57 -07001199 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001200
1201 priv = domain->priv;
1202 if (list_empty(&priv->list_attached))
1203 goto fail;
1204
1205 ctx_drvdata = list_entry(priv->list_attached.next,
1206 struct msm_iommu_ctx_drvdata, attached_elm);
1207 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1208
1209 base = iommu_drvdata->base;
1210 ctx = ctx_drvdata->num;
1211
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001212 ret = __enable_clocks(iommu_drvdata);
1213 if (ret)
1214 goto fail;
1215
Olav Haugan65209cd2012-11-07 15:02:56 -08001216 msm_iommu_remote_spin_lock();
1217
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001218 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001219
Steve Mucklef132c6c2012-06-06 18:30:57 -07001220 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001221 par = GET_PAR(base, ctx);
1222
1223 /* We are dealing with a supersection */
1224 if (GET_NOFAULT_SS(base, ctx))
1225 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1226 else /* Upper 20 bits from PAR, lower 12 from VA */
1227 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1228
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001229 if (GET_FAULT(base, ctx))
1230 ret = 0;
1231
Olav Haugan65209cd2012-11-07 15:02:56 -08001232 msm_iommu_remote_spin_unlock();
1233
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001234 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001235fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001236 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001237 return ret;
1238}
1239
1240static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1241 unsigned long cap)
1242{
1243 return 0;
1244}
1245
1246static void print_ctx_regs(void __iomem *base, int ctx)
1247{
1248 unsigned int fsr = GET_FSR(base, ctx);
1249 pr_err("FAR = %08x PAR = %08x\n",
1250 GET_FAR(base, ctx), GET_PAR(base, ctx));
1251 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1252 (fsr & 0x02) ? "TF " : "",
1253 (fsr & 0x04) ? "AFF " : "",
1254 (fsr & 0x08) ? "APF " : "",
1255 (fsr & 0x10) ? "TLBMF " : "",
1256 (fsr & 0x20) ? "HTWDEEF " : "",
1257 (fsr & 0x40) ? "HTWSEEF " : "",
1258 (fsr & 0x80) ? "MHF " : "",
1259 (fsr & 0x10000) ? "SL " : "",
1260 (fsr & 0x40000000) ? "SS " : "",
1261 (fsr & 0x80000000) ? "MULTI " : "");
1262
1263 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1264 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1265 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1266 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1267 pr_err("SCTLR = %08x ACTLR = %08x\n",
1268 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1269 pr_err("PRRR = %08x NMRR = %08x\n",
1270 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1271}
1272
1273irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1274{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001275 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1276 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001277 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001278 unsigned int fsr, num;
1279 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001280
Steve Mucklef132c6c2012-06-06 18:30:57 -07001281 mutex_lock(&msm_iommu_lock);
1282 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001283
Steve Mucklef132c6c2012-06-06 18:30:57 -07001284 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1285 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001286
1287 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001288 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001289
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001290 ret = __enable_clocks(drvdata);
1291 if (ret)
1292 goto fail;
1293
Olav Haugan65209cd2012-11-07 15:02:56 -08001294 msm_iommu_remote_spin_lock();
1295
Steve Mucklef132c6c2012-06-06 18:30:57 -07001296 fsr = GET_FSR(base, num);
1297
1298 if (fsr) {
1299 if (!ctx_drvdata->attached_domain) {
1300 pr_err("Bad domain in interrupt handler\n");
1301 ret = -ENOSYS;
1302 } else
1303 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1304 &ctx_drvdata->pdev->dev,
1305 GET_FAR(base, num), 0);
1306
1307 if (ret == -ENOSYS) {
1308 pr_err("Unexpected IOMMU page fault!\n");
1309 pr_err("name = %s\n", drvdata->name);
1310 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001311 pr_err("Interesting registers:\n");
Steve Mucklef132c6c2012-06-06 18:30:57 -07001312 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001313 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001314
1315 SET_FSR(base, num, fsr);
Shubhraprakash Das52f50c42012-10-09 16:14:28 -07001316 /*
1317 * Only resume fetches if the registered fault handler
1318 * allows it
1319 */
1320 if (ret != -EBUSY)
1321 SET_RESUME(base, num, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001322
1323 ret = IRQ_HANDLED;
1324 } else
1325 ret = IRQ_NONE;
1326
Olav Haugan65209cd2012-11-07 15:02:56 -08001327 msm_iommu_remote_spin_unlock();
1328
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001329 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001330fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001331 mutex_unlock(&msm_iommu_lock);
1332 return ret;
1333}
1334
1335static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1336{
Olav Haugan090614f2013-03-22 12:14:18 -07001337 struct msm_iommu_priv *priv = domain->priv;
1338 return __pa(priv->pt.fl_table);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001339}
1340
1341static struct iommu_ops msm_iommu_ops = {
1342 .domain_init = msm_iommu_domain_init,
1343 .domain_destroy = msm_iommu_domain_destroy,
1344 .attach_dev = msm_iommu_attach_dev,
1345 .detach_dev = msm_iommu_detach_dev,
1346 .map = msm_iommu_map,
1347 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001348 .map_range = msm_iommu_map_range,
1349 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001350 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001351 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001352 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001353 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001354};
1355
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001356static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1357{
1358 int i = 0;
1359 unsigned int prrr = 0;
1360 unsigned int nmrr = 0;
1361 int c_icp, c_ocp, c_mt, c_nos;
1362
1363 RCP15_PRRR(prrr);
1364 RCP15_NMRR(nmrr);
1365
1366 for (i = 0; i < NUM_TEX_CLASS; i++) {
1367 c_nos = PRRR_NOS(prrr, i);
1368 c_mt = PRRR_MT(prrr, i);
1369 c_icp = NMRR_ICP(nmrr, i);
1370 c_ocp = NMRR_OCP(nmrr, i);
1371
1372 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1373 return i;
1374 }
1375
1376 return -ENODEV;
1377}
1378
1379static void __init setup_iommu_tex_classes(void)
1380{
1381 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1382 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1383
1384 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1385 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1386
1387 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1388 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1389
1390 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1391 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1392}
1393
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001394static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001395{
Olav Haugan0e22c482013-01-28 17:39:36 -08001396 if (!msm_soc_version_supports_iommu_v0())
Steve Mucklef132c6c2012-06-06 18:30:57 -07001397 return -ENODEV;
1398
Olav Haugan65209cd2012-11-07 15:02:56 -08001399 msm_iommu_lock_initialize();
1400
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001401 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001402 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001403 return 0;
1404}
1405
1406subsys_initcall(msm_iommu_init);
1407
1408MODULE_LICENSE("GPL v2");
1409MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");