blob: 6bf02200599cc5f7affbcaf840da1c96c702513b [file] [log] [blame]
Olav Haugane6d01ef2013-01-25 16:55:44 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
Olav Hauganc5993142013-02-04 13:59:39 -080030#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080031#include <mach/iommu_hw-v0.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070032#include <mach/iommu.h>
Olav Haugan65209cd2012-11-07 15:02:56 -080033#include <mach/msm_smsm.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070034
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080035#define MRC(reg, processor, op1, crn, crm, op2) \
36__asm__ __volatile__ ( \
37" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
38: "=r" (reg))
39
40#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
41#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
42
Steve Mucklef132c6c2012-06-06 18:30:57 -070043/* Sharability attributes of MSM IOMMU mappings */
44#define MSM_IOMMU_ATTR_NON_SH 0x0
45#define MSM_IOMMU_ATTR_SH 0x4
46
47/* Cacheability attributes of MSM IOMMU mappings */
48#define MSM_IOMMU_ATTR_NONCACHED 0x0
49#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
50#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
51#define MSM_IOMMU_ATTR_CACHED_WT 0x3
52
Laura Abbott0d135652012-10-04 12:59:03 -070053struct bus_type msm_iommu_sec_bus_type = {
54 .name = "msm_iommu_sec_bus",
55};
Steve Mucklef132c6c2012-06-06 18:30:57 -070056
57static inline void clean_pte(unsigned long *start, unsigned long *end,
58 int redirect)
59{
60 if (!redirect)
61 dmac_flush_range(start, end);
62}
63
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020064/* bitmap of the page sizes currently supported */
65#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
66
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080067static int msm_iommu_tex_class[4];
68
Steve Mucklef132c6c2012-06-06 18:30:57 -070069DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070070
Olav Haugan65209cd2012-11-07 15:02:56 -080071/**
72 * Remote spinlock implementation based on Peterson's algorithm to be used
73 * to synchronize IOMMU config port access between CPU and GPU.
74 * This implements Process 0 of the spin lock algorithm. GPU implements
75 * Process 1. Flag and turn is stored in shared memory to allow GPU to
76 * access these.
77 */
78struct msm_iommu_remote_lock {
79 int initialized;
80 struct remote_iommu_petersons_spinlock *lock;
81};
82
83static struct msm_iommu_remote_lock msm_iommu_remote_lock;
84
85#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
86static void _msm_iommu_remote_spin_lock_init(void)
87{
88 msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
89 memset(msm_iommu_remote_lock.lock, 0,
90 sizeof(*msm_iommu_remote_lock.lock));
91}
92
93void msm_iommu_remote_p0_spin_lock(void)
94{
95 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
96 msm_iommu_remote_lock.lock->turn = 1;
97
98 smp_mb();
99
100 while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
101 msm_iommu_remote_lock.lock->turn == 1)
102 cpu_relax();
103}
104
105void msm_iommu_remote_p0_spin_unlock(void)
106{
107 smp_mb();
108
109 msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
110}
111#endif
112
113inline void msm_iommu_mutex_lock(void)
114{
115 mutex_lock(&msm_iommu_lock);
116}
117
118inline void msm_iommu_mutex_unlock(void)
119{
120 mutex_unlock(&msm_iommu_lock);
121}
122
123void *msm_iommu_lock_initialize(void)
124{
125 mutex_lock(&msm_iommu_lock);
126 if (!msm_iommu_remote_lock.initialized) {
127 msm_iommu_remote_lock_init();
128 msm_iommu_remote_lock.initialized = 1;
129 }
130 mutex_unlock(&msm_iommu_lock);
131 return msm_iommu_remote_lock.lock;
132}
133
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700134struct msm_priv {
135 unsigned long *pgtable;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700136 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700137 struct list_head list_attached;
138};
139
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800140static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
141{
142 int ret;
143
Steve Mucklef132c6c2012-06-06 18:30:57 -0700144 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800145 if (ret)
146 goto fail;
147
148 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700149 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800150 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700151 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800152 }
153fail:
154 return ret;
155}
156
157static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
158{
159 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700160 clk_disable_unprepare(drvdata->clk);
161 clk_disable_unprepare(drvdata->pclk);
162}
163
Olav Hauganc5993142013-02-04 13:59:39 -0800164static int _iommu_power_on(void *data)
165{
166 struct msm_iommu_drvdata *drvdata;
167
168 drvdata = (struct msm_iommu_drvdata *)data;
169 return __enable_clocks(drvdata);
170}
171
172static int _iommu_power_off(void *data)
173{
174 struct msm_iommu_drvdata *drvdata;
175
176 drvdata = (struct msm_iommu_drvdata *)data;
177 __disable_clocks(drvdata);
178 return 0;
179}
180
181static void _iommu_lock_acquire(void)
182{
183 msm_iommu_lock();
184}
185
186static void _iommu_lock_release(void)
187{
188 msm_iommu_unlock();
189}
190
191struct iommu_access_ops iommu_access_ops_v0 = {
192 .iommu_power_on = _iommu_power_on,
193 .iommu_power_off = _iommu_power_off,
194 .iommu_lock_acquire = _iommu_lock_acquire,
195 .iommu_lock_release = _iommu_lock_release,
196};
197EXPORT_SYMBOL(iommu_access_ops_v0);
198
Steve Mucklef132c6c2012-06-06 18:30:57 -0700199static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
200{
201 struct msm_priv *priv = domain->priv;
202 struct msm_iommu_drvdata *iommu_drvdata;
203 struct msm_iommu_ctx_drvdata *ctx_drvdata;
204 int ret = 0;
205 int asid;
206
207 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
208 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
209 BUG();
210
211 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
212 if (!iommu_drvdata)
213 BUG();
214
215 ret = __enable_clocks(iommu_drvdata);
216 if (ret)
217 goto fail;
218
Olav Haugan65209cd2012-11-07 15:02:56 -0800219 msm_iommu_remote_spin_lock();
220
Steve Mucklef132c6c2012-06-06 18:30:57 -0700221 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
222 ctx_drvdata->num);
223
224 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
225 asid | (va & TLBIVA_VA));
226 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800227
228 msm_iommu_remote_spin_unlock();
229
Steve Mucklef132c6c2012-06-06 18:30:57 -0700230 __disable_clocks(iommu_drvdata);
231 }
232fail:
233 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800234}
235
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800236static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700237{
238 struct msm_priv *priv = domain->priv;
239 struct msm_iommu_drvdata *iommu_drvdata;
240 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800241 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700242 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700243
244 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
245 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
246 BUG();
247
248 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700249 if (!iommu_drvdata)
250 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800251
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800252 ret = __enable_clocks(iommu_drvdata);
253 if (ret)
254 goto fail;
255
Olav Haugan65209cd2012-11-07 15:02:56 -0800256 msm_iommu_remote_spin_lock();
257
Steve Mucklef132c6c2012-06-06 18:30:57 -0700258 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
259 ctx_drvdata->num);
260
261 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
262 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800263
264 msm_iommu_remote_spin_unlock();
265
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800266 __disable_clocks(iommu_drvdata);
267 }
268fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800269 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700270}
271
Olav Haugan95d24162012-12-05 14:47:47 -0800272static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700273{
Olav Haugan95d24162012-12-05 14:47:47 -0800274 SET_BPRCOSH(glb_base, ctx, 0);
275 SET_BPRCISH(glb_base, ctx, 0);
276 SET_BPRCNSH(glb_base, ctx, 0);
277 SET_BPSHCFG(glb_base, ctx, 0);
278 SET_BPMTCFG(glb_base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700279 SET_ACTLR(base, ctx, 0);
280 SET_SCTLR(base, ctx, 0);
281 SET_FSRRESTORE(base, ctx, 0);
282 SET_TTBR0(base, ctx, 0);
283 SET_TTBR1(base, ctx, 0);
284 SET_TTBCR(base, ctx, 0);
285 SET_BFBCR(base, ctx, 0);
286 SET_PAR(base, ctx, 0);
287 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700288 SET_TLBFLPTER(base, ctx, 0);
289 SET_TLBSLPTER(base, ctx, 0);
290 SET_TLBLKCR(base, ctx, 0);
291 SET_PRRR(base, ctx, 0);
292 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700293 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700294}
295
Olav Haugan95d24162012-12-05 14:47:47 -0800296static void __program_context(void __iomem *base, void __iomem *glb_base,
297 int ctx, int ncb, phys_addr_t pgtable,
298 int redirect, int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700299{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800300 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700301 int i, j, found;
Olav Haugan65209cd2012-11-07 15:02:56 -0800302 msm_iommu_remote_spin_lock();
303
Olav Haugan95d24162012-12-05 14:47:47 -0800304 __reset_context(base, glb_base, ctx);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700305
306 /* Set up HTW mode */
307 /* TLB miss configuration: perform HTW on miss */
308 SET_TLBMCFG(base, ctx, 0x3);
309
310 /* V2P configuration: HTW for access */
311 SET_V2PCFG(base, ctx, 0x3);
312
Steve Mucklef132c6c2012-06-06 18:30:57 -0700313 SET_TTBCR(base, ctx, ttbr_split);
314 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
315 if (ttbr_split)
316 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700317
318 /* Enable context fault interrupt */
319 SET_CFEIE(base, ctx, 1);
320
321 /* Stall access on a context fault and let the handler deal with it */
322 SET_CFCFG(base, ctx, 1);
323
324 /* Redirect all cacheable requests to L2 slave port. */
325 SET_RCISH(base, ctx, 1);
326 SET_RCOSH(base, ctx, 1);
327 SET_RCNSH(base, ctx, 1);
328
329 /* Turn on TEX Remap */
330 SET_TRE(base, ctx, 1);
331
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800332 /* Set TEX remap attributes */
333 RCP15_PRRR(prrr);
334 RCP15_NMRR(nmrr);
335 SET_PRRR(base, ctx, prrr);
336 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700337
338 /* Turn on BFB prefetch */
339 SET_BFBDFE(base, ctx, 1);
340
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700341 /* Configure page tables as inner-cacheable and shareable to reduce
342 * the TLB miss penalty.
343 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700344 if (redirect) {
345 SET_TTBR0_SH(base, ctx, 1);
346 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700347
Steve Mucklef132c6c2012-06-06 18:30:57 -0700348 SET_TTBR0_NOS(base, ctx, 1);
349 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700350
Steve Mucklef132c6c2012-06-06 18:30:57 -0700351 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
352 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700353
Steve Mucklef132c6c2012-06-06 18:30:57 -0700354 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
355 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700356
Steve Mucklef132c6c2012-06-06 18:30:57 -0700357 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
358 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
359 }
360
361 /* Find if this page table is used elsewhere, and re-use ASID */
362 found = 0;
363 for (i = 0; i < ncb; i++)
364 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
365 i != ctx) {
366 SET_CONTEXTIDR_ASID(base, ctx, \
367 GET_CONTEXTIDR_ASID(base, i));
368 found = 1;
369 break;
370 }
371
372 /* If page table is new, find an unused ASID */
373 if (!found) {
374 for (i = 0; i < ncb; i++) {
375 found = 0;
376 for (j = 0; j < ncb; j++) {
377 if (GET_CONTEXTIDR_ASID(base, j) == i &&
378 j != ctx)
379 found = 1;
380 }
381
382 if (!found) {
383 SET_CONTEXTIDR_ASID(base, ctx, i);
384 break;
385 }
386 }
387 BUG_ON(found);
388 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700389
390 /* Enable the MMU */
391 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700392 mb();
Olav Haugan65209cd2012-11-07 15:02:56 -0800393
394 msm_iommu_remote_spin_unlock();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700395}
396
Steve Mucklef132c6c2012-06-06 18:30:57 -0700397static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700398{
399 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
400
401 if (!priv)
402 goto fail_nomem;
403
404 INIT_LIST_HEAD(&priv->list_attached);
405 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
406 get_order(SZ_16K));
407
408 if (!priv->pgtable)
409 goto fail_nomem;
410
Steve Mucklef132c6c2012-06-06 18:30:57 -0700411#ifdef CONFIG_IOMMU_PGTABLES_L2
412 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
413#endif
414
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700415 memset(priv->pgtable, 0, SZ_16K);
416 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700417
418 clean_pte(priv->pgtable, priv->pgtable + NUM_FL_PTE, priv->redirect);
419
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700420 return 0;
421
422fail_nomem:
423 kfree(priv);
424 return -ENOMEM;
425}
426
427static void msm_iommu_domain_destroy(struct iommu_domain *domain)
428{
429 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700430 unsigned long *fl_table;
431 int i;
432
Steve Mucklef132c6c2012-06-06 18:30:57 -0700433 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700434 priv = domain->priv;
435 domain->priv = NULL;
436
437 if (priv) {
438 fl_table = priv->pgtable;
439
440 for (i = 0; i < NUM_FL_PTE; i++)
441 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
442 free_page((unsigned long) __va(((fl_table[i]) &
443 FL_BASE_MASK)));
444
445 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
446 priv->pgtable = NULL;
447 }
448
449 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700450 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700451}
452
453static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
454{
455 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700456 struct msm_iommu_drvdata *iommu_drvdata;
457 struct msm_iommu_ctx_drvdata *ctx_drvdata;
458 struct msm_iommu_ctx_drvdata *tmp_drvdata;
459 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700460
Steve Mucklef132c6c2012-06-06 18:30:57 -0700461 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700462
463 priv = domain->priv;
464
465 if (!priv || !dev) {
466 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800467 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700468 }
469
470 iommu_drvdata = dev_get_drvdata(dev->parent);
471 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700472
Olav Haugan95d24162012-12-05 14:47:47 -0800473 if (!iommu_drvdata || !ctx_drvdata) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700474 ret = -EINVAL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800475 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700476 }
477
Olav Haugane99ee7e2012-12-11 15:02:02 -0800478 ++ctx_drvdata->attach_count;
479
480 if (ctx_drvdata->attach_count > 1)
481 goto unlock;
482
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800483 if (!list_empty(&ctx_drvdata->attached_elm)) {
484 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800485 goto unlock;
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800486 }
487
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700488 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
489 if (tmp_drvdata == ctx_drvdata) {
490 ret = -EBUSY;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800491 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700492 }
493
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800494 ret = __enable_clocks(iommu_drvdata);
495 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800496 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800497
Olav Haugan95d24162012-12-05 14:47:47 -0800498 __program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
499 ctx_drvdata->num, iommu_drvdata->ncb,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700500 __pa(priv->pgtable), priv->redirect,
501 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700502
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800503 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700504 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700505
Steve Mucklef132c6c2012-06-06 18:30:57 -0700506 ctx_drvdata->attached_domain = domain;
Olav Hauganc5993142013-02-04 13:59:39 -0800507
508 mutex_unlock(&msm_iommu_lock);
509
510 msm_iommu_attached(dev->parent);
511 return ret;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800512unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700513 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700514 return ret;
515}
516
517static void msm_iommu_detach_dev(struct iommu_domain *domain,
518 struct device *dev)
519{
520 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700521 struct msm_iommu_drvdata *iommu_drvdata;
522 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800523 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700524
Olav Hauganc5993142013-02-04 13:59:39 -0800525 msm_iommu_detached(dev->parent);
526
Steve Mucklef132c6c2012-06-06 18:30:57 -0700527 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700528 priv = domain->priv;
529
530 if (!priv || !dev)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800531 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700532
533 iommu_drvdata = dev_get_drvdata(dev->parent);
534 ctx_drvdata = dev_get_drvdata(dev);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700535
Olav Haugan35deadc2012-12-10 18:28:27 -0800536 if (!iommu_drvdata || !ctx_drvdata)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800537 goto unlock;
538
539 --ctx_drvdata->attach_count;
540 BUG_ON(ctx_drvdata->attach_count < 0);
541
542 if (ctx_drvdata->attach_count > 0)
543 goto unlock;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700544
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800545 ret = __enable_clocks(iommu_drvdata);
546 if (ret)
Olav Haugane99ee7e2012-12-11 15:02:02 -0800547 goto unlock;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800548
Olav Haugan65209cd2012-11-07 15:02:56 -0800549 msm_iommu_remote_spin_lock();
550
Olav Haugan35deadc2012-12-10 18:28:27 -0800551 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
552 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
Steve Mucklef132c6c2012-06-06 18:30:57 -0700553
Olav Haugan95d24162012-12-05 14:47:47 -0800554 __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
Olav Haugan35deadc2012-12-10 18:28:27 -0800555 ctx_drvdata->num);
Olav Haugan65209cd2012-11-07 15:02:56 -0800556
557 msm_iommu_remote_spin_unlock();
558
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800559 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700560 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700561 ctx_drvdata->attached_domain = NULL;
Olav Haugane99ee7e2012-12-11 15:02:02 -0800562unlock:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700563 mutex_unlock(&msm_iommu_lock);
564}
565
566static int __get_pgprot(int prot, int len)
567{
568 unsigned int pgprot;
569 int tex;
570
571 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
572 prot |= IOMMU_READ | IOMMU_WRITE;
573 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
574 }
575
576 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
577 prot |= IOMMU_READ;
578 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
579 }
580
581 if (prot & IOMMU_CACHE)
582 tex = (pgprot_kernel >> 2) & 0x07;
583 else
584 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
585
586 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
587 return 0;
588
589 if (len == SZ_16M || len == SZ_1M) {
590 pgprot = FL_SHARED;
591 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
592 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
593 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
594 pgprot |= FL_AP0 | FL_AP1;
595 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
596 } else {
597 pgprot = SL_SHARED;
598 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
599 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
600 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
601 pgprot |= SL_AP0 | SL_AP1;
602 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
603 }
604
605 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700606}
607
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600608static unsigned long *make_second_level(struct msm_priv *priv,
609 unsigned long *fl_pte)
610{
611 unsigned long *sl;
612 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
613 get_order(SZ_4K));
614
615 if (!sl) {
616 pr_debug("Could not allocate second level table\n");
617 goto fail;
618 }
619 memset(sl, 0, SZ_4K);
620 clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
621
622 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
623 FL_TYPE_TABLE);
624
625 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
626fail:
627 return sl;
628}
629
630static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
631{
632 int ret = 0;
633
634 if (*sl_pte) {
635 ret = -EBUSY;
636 goto fail;
637 }
638
639 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
640 | SL_TYPE_SMALL | pgprot;
641fail:
642 return ret;
643}
644
645static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
646{
647 int ret = 0;
648
649 int i;
650
651 for (i = 0; i < 16; i++)
652 if (*(sl_pte+i)) {
653 ret = -EBUSY;
654 goto fail;
655 }
656
657 for (i = 0; i < 16; i++)
658 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
659 | SL_SHARED | SL_TYPE_LARGE | pgprot;
660
661fail:
662 return ret;
663}
664
665
666static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
667{
668 if (*fl_pte)
669 return -EBUSY;
670
671 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
672 | pgprot;
673
674 return 0;
675}
676
677
678static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
679{
680 int i;
681 int ret = 0;
682 for (i = 0; i < 16; i++)
683 if (*(fl_pte+i)) {
684 ret = -EBUSY;
685 goto fail;
686 }
687 for (i = 0; i < 16; i++)
688 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
689 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
690fail:
691 return ret;
692}
693
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700694static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200695 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700696{
697 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700698 unsigned long *fl_table;
699 unsigned long *fl_pte;
700 unsigned long fl_offset;
701 unsigned long *sl_table;
702 unsigned long *sl_pte;
703 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800704 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700705 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700706
Steve Mucklef132c6c2012-06-06 18:30:57 -0700707 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800708
709 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700710 if (!priv) {
711 ret = -EINVAL;
712 goto fail;
713 }
714
715 fl_table = priv->pgtable;
716
717 if (len != SZ_16M && len != SZ_1M &&
718 len != SZ_64K && len != SZ_4K) {
719 pr_debug("Bad size: %d\n", len);
720 ret = -EINVAL;
721 goto fail;
722 }
723
724 if (!fl_table) {
725 pr_debug("Null page table\n");
726 ret = -EINVAL;
727 goto fail;
728 }
729
Steve Mucklef132c6c2012-06-06 18:30:57 -0700730 pgprot = __get_pgprot(prot, len);
731
732 if (!pgprot) {
733 ret = -EINVAL;
734 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800735 }
736
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700737 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
738 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
739
740 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600741 ret = fl_16m(fl_pte, pa, pgprot);
742 if (ret)
743 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700744 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700745 }
746
Steve Mucklef132c6c2012-06-06 18:30:57 -0700747 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600748 ret = fl_1m(fl_pte, pa, pgprot);
749 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700750 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700751 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
752 }
753
754 /* Need a 2nd level table */
755 if (len == SZ_4K || len == SZ_64K) {
756
757 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600758 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700759 ret = -ENOMEM;
760 goto fail;
761 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700762 }
763
764 if (!(*fl_pte & FL_TYPE_TABLE)) {
765 ret = -EBUSY;
766 goto fail;
767 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700768 }
769
770 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
771 sl_offset = SL_OFFSET(va);
772 sl_pte = sl_table + sl_offset;
773
Steve Mucklef132c6c2012-06-06 18:30:57 -0700774 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600775 ret = sl_4k(sl_pte, pa, pgprot);
776 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700777 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700778
Steve Mucklef132c6c2012-06-06 18:30:57 -0700779 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
780 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700781
782 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600783 ret = sl_64k(sl_pte, pa, pgprot);
784 if (ret)
785 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700786 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700787 }
788
Steve Mucklef132c6c2012-06-06 18:30:57 -0700789 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700790fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700791 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700792 return ret;
793}
794
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200795static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
796 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700797{
798 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700799 unsigned long *fl_table;
800 unsigned long *fl_pte;
801 unsigned long fl_offset;
802 unsigned long *sl_table;
803 unsigned long *sl_pte;
804 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700805 int i, ret = 0;
806
Steve Mucklef132c6c2012-06-06 18:30:57 -0700807 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700808
809 priv = domain->priv;
810
Joerg Roedel05df1f32012-01-26 18:25:37 +0100811 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700812 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700813
814 fl_table = priv->pgtable;
815
816 if (len != SZ_16M && len != SZ_1M &&
817 len != SZ_64K && len != SZ_4K) {
818 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700819 goto fail;
820 }
821
822 if (!fl_table) {
823 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700824 goto fail;
825 }
826
827 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
828 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
829
830 if (*fl_pte == 0) {
831 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700832 goto fail;
833 }
834
835 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700836 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700837 for (i = 0; i < 16; i++)
838 *(fl_pte+i) = 0;
839
Steve Mucklef132c6c2012-06-06 18:30:57 -0700840 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
841 }
842
843 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700844 *fl_pte = 0;
845
Steve Mucklef132c6c2012-06-06 18:30:57 -0700846 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
847 }
848
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700849 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
850 sl_offset = SL_OFFSET(va);
851 sl_pte = sl_table + sl_offset;
852
853 if (len == SZ_64K) {
854 for (i = 0; i < 16; i++)
855 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700856
857 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700858 }
859
Steve Mucklef132c6c2012-06-06 18:30:57 -0700860 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700861 *sl_pte = 0;
862
Steve Mucklef132c6c2012-06-06 18:30:57 -0700863 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
864 }
865
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700866 if (len == SZ_4K || len == SZ_64K) {
867 int used = 0;
868
869 for (i = 0; i < NUM_SL_PTE; i++)
870 if (sl_table[i])
871 used = 1;
872 if (!used) {
873 free_page((unsigned long)sl_table);
874 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700875
876 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700877 }
878 }
879
Steve Mucklef132c6c2012-06-06 18:30:57 -0700880 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400881
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700882fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700883 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200884
885 /* the IOMMU API requires us to return how many bytes were unmapped */
886 len = ret ? 0 : len;
887 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700888}
889
Steve Mucklef132c6c2012-06-06 18:30:57 -0700890static unsigned int get_phys_addr(struct scatterlist *sg)
891{
892 /*
893 * Try sg_dma_address first so that we can
894 * map carveout regions that do not have a
895 * struct page associated with them.
896 */
897 unsigned int pa = sg_dma_address(sg);
898 if (pa == 0)
899 pa = sg_phys(sg);
900 return pa;
901}
902
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600903static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
904 int align)
905{
906 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
907 && (len >= align);
908}
909
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600910static int check_range(unsigned long *fl_table, unsigned int va,
911 unsigned int len)
912{
913 unsigned int offset = 0;
914 unsigned long *fl_pte;
915 unsigned long fl_offset;
916 unsigned long *sl_table;
917 unsigned long sl_start, sl_end;
918 int i;
919
920 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
921 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
922
923 while (offset < len) {
924 if (*fl_pte & FL_TYPE_TABLE) {
925 sl_start = SL_OFFSET(va);
926 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
927 sl_end = ((len - offset) / SZ_4K) + sl_start;
928
929 if (sl_end > NUM_SL_PTE)
930 sl_end = NUM_SL_PTE;
931
932 for (i = sl_start; i < sl_end; i++) {
933 if (sl_table[i] != 0) {
934 pr_err("%08x - %08x already mapped\n",
935 va, va + SZ_4K);
936 return -EBUSY;
937 }
938 offset += SZ_4K;
939 va += SZ_4K;
940 }
941
942
943 sl_start = 0;
944 } else {
945 if (*fl_pte != 0) {
946 pr_err("%08x - %08x already mapped\n",
947 va, va + SZ_1M);
948 return -EBUSY;
949 }
950 va += SZ_1M;
951 offset += SZ_1M;
952 sl_start = 0;
953 }
954 fl_pte++;
955 }
956 return 0;
957}
958
Steve Mucklef132c6c2012-06-06 18:30:57 -0700959static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
960 struct scatterlist *sg, unsigned int len,
961 int prot)
962{
963 unsigned int pa;
964 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700965 unsigned long *fl_table;
966 unsigned long *fl_pte;
967 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600968 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700969 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600970 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700971 int ret = 0;
972 struct msm_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600973 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700974
975 mutex_lock(&msm_iommu_lock);
976
977 BUG_ON(len & (SZ_4K - 1));
978
979 priv = domain->priv;
980 fl_table = priv->pgtable;
981
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600982 pgprot4k = __get_pgprot(prot, SZ_4K);
983 pgprot64k = __get_pgprot(prot, SZ_64K);
984 pgprot1m = __get_pgprot(prot, SZ_1M);
985 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700986
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600987 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700988 ret = -EINVAL;
989 goto fail;
990 }
Jeremy Gebben8c5e2f72012-10-05 14:03:45 -0600991 ret = check_range(fl_table, va, len);
992 if (ret)
993 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700994
995 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
996 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600997 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700998
999 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001000 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001001
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001002 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1003 SZ_16M))
1004 chunk_size = SZ_16M;
1005 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1006 SZ_1M))
1007 chunk_size = SZ_1M;
1008 /* 64k or 4k determined later */
1009
1010 /* for 1M and 16M, only first level entries are required */
1011 if (chunk_size >= SZ_1M) {
1012 if (chunk_size == SZ_16M) {
1013 ret = fl_16m(fl_pte, pa, pgprot16m);
1014 if (ret)
1015 goto fail;
1016 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
1017 fl_pte += 16;
1018 } else if (chunk_size == SZ_1M) {
1019 ret = fl_1m(fl_pte, pa, pgprot1m);
1020 if (ret)
1021 goto fail;
1022 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
1023 fl_pte++;
1024 }
1025
1026 offset += chunk_size;
1027 chunk_offset += chunk_size;
1028 va += chunk_size;
1029 pa += chunk_size;
1030
1031 if (chunk_offset >= sg->length && offset < len) {
1032 chunk_offset = 0;
1033 sg = sg_next(sg);
1034 pa = get_phys_addr(sg);
1035 if (pa == 0) {
1036 pr_debug("No dma address for sg %p\n",
1037 sg);
1038 ret = -EINVAL;
1039 goto fail;
1040 }
1041 }
1042 continue;
1043 }
1044 /* for 4K or 64K, make sure there is a second level table */
1045 if (*fl_pte == 0) {
1046 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001047 ret = -ENOMEM;
1048 goto fail;
1049 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001050 }
1051 if (!(*fl_pte & FL_TYPE_TABLE)) {
1052 ret = -EBUSY;
1053 goto fail;
1054 }
1055 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1056 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001057 /* Keep track of initial position so we
1058 * don't clean more than we have to
1059 */
1060 sl_start = sl_offset;
1061
1062 /* Build the 2nd level page table */
1063 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001064
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001065 /* Map a large 64K page if the chunk is large enough and
1066 * the pa and va are aligned
1067 */
1068
1069 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
1070 SZ_64K))
1071 chunk_size = SZ_64K;
1072 else
1073 chunk_size = SZ_4K;
1074
1075 if (chunk_size == SZ_4K) {
1076 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
1077 sl_offset++;
1078 } else {
1079 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
1080 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
1081 sl_offset += 16;
1082 }
1083
1084
1085 offset += chunk_size;
1086 chunk_offset += chunk_size;
1087 va += chunk_size;
1088 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001089
1090 if (chunk_offset >= sg->length && offset < len) {
1091 chunk_offset = 0;
1092 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001093 pa = get_phys_addr(sg);
1094 if (pa == 0) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001095 pr_debug("No dma address for sg %p\n",
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001096 sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001097 ret = -EINVAL;
1098 goto fail;
1099 }
1100 }
1101 }
1102
1103 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001104 priv->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001105
1106 fl_pte++;
1107 sl_offset = 0;
1108 }
1109 __flush_iotlb(domain);
1110fail:
1111 mutex_unlock(&msm_iommu_lock);
1112 return ret;
1113}
1114
1115
1116static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
1117 unsigned int len)
1118{
1119 unsigned int offset = 0;
1120 unsigned long *fl_table;
1121 unsigned long *fl_pte;
1122 unsigned long fl_offset;
1123 unsigned long *sl_table;
1124 unsigned long sl_start, sl_end;
1125 int used, i;
1126 struct msm_priv *priv;
1127
1128 mutex_lock(&msm_iommu_lock);
1129
1130 BUG_ON(len & (SZ_4K - 1));
1131
1132 priv = domain->priv;
1133 fl_table = priv->pgtable;
1134
1135 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
1136 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
1137
Steve Mucklef132c6c2012-06-06 18:30:57 -07001138 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001139 if (*fl_pte & FL_TYPE_TABLE) {
1140 sl_start = SL_OFFSET(va);
1141 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
1142 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001143
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001144 if (sl_end > NUM_SL_PTE)
1145 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001146
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001147 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
1148 clean_pte(sl_table + sl_start, sl_table + sl_end,
1149 priv->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001150
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001151 offset += (sl_end - sl_start) * SZ_4K;
1152 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001153
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001154 /* Unmap and free the 2nd level table if all mappings
1155 * in it were removed. This saves memory, but the table
1156 * will need to be re-allocated the next time someone
1157 * tries to map these VAs.
1158 */
1159 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001160
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001161 /* If we just unmapped the whole table, don't bother
1162 * seeing if there are still used entries left.
1163 */
1164 if (sl_end - sl_start != NUM_SL_PTE)
1165 for (i = 0; i < NUM_SL_PTE; i++)
1166 if (sl_table[i]) {
1167 used = 1;
1168 break;
1169 }
1170 if (!used) {
1171 free_page((unsigned long)sl_table);
1172 *fl_pte = 0;
1173
1174 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
1175 }
1176
1177 sl_start = 0;
1178 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001179 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001180 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -06001181 va += SZ_1M;
1182 offset += SZ_1M;
1183 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001184 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001185 fl_pte++;
1186 }
1187
1188 __flush_iotlb(domain);
1189 mutex_unlock(&msm_iommu_lock);
1190 return 0;
1191}
1192
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001193static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1194 unsigned long va)
1195{
1196 struct msm_priv *priv;
1197 struct msm_iommu_drvdata *iommu_drvdata;
1198 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1199 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001200 void __iomem *base;
1201 phys_addr_t ret = 0;
1202 int ctx;
1203
Steve Mucklef132c6c2012-06-06 18:30:57 -07001204 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001205
1206 priv = domain->priv;
1207 if (list_empty(&priv->list_attached))
1208 goto fail;
1209
1210 ctx_drvdata = list_entry(priv->list_attached.next,
1211 struct msm_iommu_ctx_drvdata, attached_elm);
1212 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1213
1214 base = iommu_drvdata->base;
1215 ctx = ctx_drvdata->num;
1216
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001217 ret = __enable_clocks(iommu_drvdata);
1218 if (ret)
1219 goto fail;
1220
Olav Haugan65209cd2012-11-07 15:02:56 -08001221 msm_iommu_remote_spin_lock();
1222
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001223 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001224
Steve Mucklef132c6c2012-06-06 18:30:57 -07001225 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001226 par = GET_PAR(base, ctx);
1227
1228 /* We are dealing with a supersection */
1229 if (GET_NOFAULT_SS(base, ctx))
1230 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1231 else /* Upper 20 bits from PAR, lower 12 from VA */
1232 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1233
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001234 if (GET_FAULT(base, ctx))
1235 ret = 0;
1236
Olav Haugan65209cd2012-11-07 15:02:56 -08001237 msm_iommu_remote_spin_unlock();
1238
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001239 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001240fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001241 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001242 return ret;
1243}
1244
1245static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1246 unsigned long cap)
1247{
1248 return 0;
1249}
1250
1251static void print_ctx_regs(void __iomem *base, int ctx)
1252{
1253 unsigned int fsr = GET_FSR(base, ctx);
1254 pr_err("FAR = %08x PAR = %08x\n",
1255 GET_FAR(base, ctx), GET_PAR(base, ctx));
1256 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1257 (fsr & 0x02) ? "TF " : "",
1258 (fsr & 0x04) ? "AFF " : "",
1259 (fsr & 0x08) ? "APF " : "",
1260 (fsr & 0x10) ? "TLBMF " : "",
1261 (fsr & 0x20) ? "HTWDEEF " : "",
1262 (fsr & 0x40) ? "HTWSEEF " : "",
1263 (fsr & 0x80) ? "MHF " : "",
1264 (fsr & 0x10000) ? "SL " : "",
1265 (fsr & 0x40000000) ? "SS " : "",
1266 (fsr & 0x80000000) ? "MULTI " : "");
1267
1268 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1269 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1270 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1271 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1272 pr_err("SCTLR = %08x ACTLR = %08x\n",
1273 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1274 pr_err("PRRR = %08x NMRR = %08x\n",
1275 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1276}
1277
1278irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1279{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001280 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1281 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001282 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001283 unsigned int fsr, num;
1284 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001285
Steve Mucklef132c6c2012-06-06 18:30:57 -07001286 mutex_lock(&msm_iommu_lock);
1287 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001288
Steve Mucklef132c6c2012-06-06 18:30:57 -07001289 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1290 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001291
1292 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001293 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001294
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001295 ret = __enable_clocks(drvdata);
1296 if (ret)
1297 goto fail;
1298
Olav Haugan65209cd2012-11-07 15:02:56 -08001299 msm_iommu_remote_spin_lock();
1300
Steve Mucklef132c6c2012-06-06 18:30:57 -07001301 fsr = GET_FSR(base, num);
1302
1303 if (fsr) {
1304 if (!ctx_drvdata->attached_domain) {
1305 pr_err("Bad domain in interrupt handler\n");
1306 ret = -ENOSYS;
1307 } else
1308 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1309 &ctx_drvdata->pdev->dev,
1310 GET_FAR(base, num), 0);
1311
1312 if (ret == -ENOSYS) {
1313 pr_err("Unexpected IOMMU page fault!\n");
1314 pr_err("name = %s\n", drvdata->name);
1315 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001316 pr_err("Interesting registers:\n");
Steve Mucklef132c6c2012-06-06 18:30:57 -07001317 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001318 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001319
1320 SET_FSR(base, num, fsr);
Shubhraprakash Das52f50c42012-10-09 16:14:28 -07001321 /*
1322 * Only resume fetches if the registered fault handler
1323 * allows it
1324 */
1325 if (ret != -EBUSY)
1326 SET_RESUME(base, num, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001327
1328 ret = IRQ_HANDLED;
1329 } else
1330 ret = IRQ_NONE;
1331
Olav Haugan65209cd2012-11-07 15:02:56 -08001332 msm_iommu_remote_spin_unlock();
1333
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001334 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001335fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001336 mutex_unlock(&msm_iommu_lock);
1337 return ret;
1338}
1339
1340static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1341{
1342 struct msm_priv *priv = domain->priv;
1343 return __pa(priv->pgtable);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001344}
1345
1346static struct iommu_ops msm_iommu_ops = {
1347 .domain_init = msm_iommu_domain_init,
1348 .domain_destroy = msm_iommu_domain_destroy,
1349 .attach_dev = msm_iommu_attach_dev,
1350 .detach_dev = msm_iommu_detach_dev,
1351 .map = msm_iommu_map,
1352 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001353 .map_range = msm_iommu_map_range,
1354 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001355 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001356 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001357 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001358 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001359};
1360
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001361static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1362{
1363 int i = 0;
1364 unsigned int prrr = 0;
1365 unsigned int nmrr = 0;
1366 int c_icp, c_ocp, c_mt, c_nos;
1367
1368 RCP15_PRRR(prrr);
1369 RCP15_NMRR(nmrr);
1370
1371 for (i = 0; i < NUM_TEX_CLASS; i++) {
1372 c_nos = PRRR_NOS(prrr, i);
1373 c_mt = PRRR_MT(prrr, i);
1374 c_icp = NMRR_ICP(nmrr, i);
1375 c_ocp = NMRR_OCP(nmrr, i);
1376
1377 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1378 return i;
1379 }
1380
1381 return -ENODEV;
1382}
1383
1384static void __init setup_iommu_tex_classes(void)
1385{
1386 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1387 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1388
1389 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1390 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1391
1392 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1393 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1394
1395 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1396 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1397}
1398
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001399static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001400{
Olav Haugan0e22c482013-01-28 17:39:36 -08001401 if (!msm_soc_version_supports_iommu_v0())
Steve Mucklef132c6c2012-06-06 18:30:57 -07001402 return -ENODEV;
1403
Olav Haugan65209cd2012-11-07 15:02:56 -08001404 msm_iommu_lock_initialize();
1405
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001406 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001407 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001408 return 0;
1409}
1410
1411subsys_initcall(msm_iommu_init);
1412
1413MODULE_LICENSE("GPL v2");
1414MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");