blob: a17a4e8872f8205af34310ecfac3f2686afaebbc [file] [log] [blame]
Olav Hauganf3782732013-01-11 11:23:30 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Laura Abbott0d135652012-10-04 12:59:03 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
Michael Bohan1834e7f2013-01-18 17:16:38 -080028#include <linux/kmemleak.h>
Laura Abbott0d135652012-10-04 12:59:03 -070029
30#include <asm/sizes.h>
31
Olav Haugan64ffdf32013-01-24 17:20:24 -080032#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080033#include <mach/iommu_hw-v1.h>
Olav Haugan090614f2013-03-22 12:14:18 -070034#include <mach/msm_iommu_priv.h>
Laura Abbott0d135652012-10-04 12:59:03 -070035#include <mach/iommu.h>
36#include <mach/scm.h>
37
38/* bitmap of the page sizes currently supported */
39#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
40
Mitchel Humpherysf3b50912013-05-21 17:46:04 -070041/* commands for SCM_SVC_MP */
Laura Abbott0d135652012-10-04 12:59:03 -070042#define IOMMU_SECURE_CFG 2
43#define IOMMU_SECURE_PTBL_SIZE 3
44#define IOMMU_SECURE_PTBL_INIT 4
45#define IOMMU_SECURE_MAP 6
46#define IOMMU_SECURE_UNMAP 7
Adrian Alexeibfe7c462013-04-01 14:36:24 -070047#define IOMMU_SECURE_MAP2 0x0B
48#define IOMMU_SECURE_UNMAP2 0x0C
49#define IOMMU_TLBINVAL_FLAG 0x00000001
Laura Abbott0d135652012-10-04 12:59:03 -070050
Mitchel Humpherysf3b50912013-05-21 17:46:04 -070051/* commands for SCM_SVC_UTIL */
52#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
53
Olav Hauganeece7e52013-04-02 10:22:21 -070054static struct iommu_access_ops *iommu_access_ops;
Laura Abbott0d135652012-10-04 12:59:03 -070055
Laura Abbott0d135652012-10-04 12:59:03 -070056struct msm_scm_paddr_list {
57 unsigned int list;
58 unsigned int list_size;
59 unsigned int size;
60};
61
62struct msm_scm_mapping_info {
63 unsigned int id;
64 unsigned int ctx_id;
65 unsigned int va;
66 unsigned int size;
67};
68
Adrian Alexeibfe7c462013-04-01 14:36:24 -070069struct msm_scm_map2_req {
Laura Abbott0d135652012-10-04 12:59:03 -070070 struct msm_scm_paddr_list plist;
71 struct msm_scm_mapping_info info;
Adrian Alexeibfe7c462013-04-01 14:36:24 -070072 unsigned int flags;
73};
74
75struct msm_scm_unmap2_req {
76 struct msm_scm_mapping_info info;
77 unsigned int flags;
Laura Abbott0d135652012-10-04 12:59:03 -070078};
79
Mitchel Humpherysf3b50912013-05-21 17:46:04 -070080struct msm_scm_fault_regs_dump {
81 uint32_t dump_size;
82 uint32_t fsr_addr;
83 uint32_t fsr;
84 uint32_t far0_addr;
85 uint32_t far0;
86 uint32_t far1_addr;
87 uint32_t far1;
88 uint32_t par0_addr;
89 uint32_t par0;
90 uint32_t par1_addr;
91 uint32_t par1;
92 uint32_t fsyn0_addr;
93 uint32_t fsyn0;
94 uint32_t fsyn1_addr;
95 uint32_t fsyn1;
96 uint32_t ttbr0_addr;
97 uint32_t ttbr0;
98 uint32_t ttbr1_addr;
99 uint32_t ttbr1;
100 uint32_t ttbcr_addr;
101 uint32_t ttbcr;
102 uint32_t sctlr_addr;
103 uint32_t sctlr;
104 uint32_t actlr_addr;
105 uint32_t actlr;
106 uint32_t prrr_addr;
107 uint32_t prrr;
108 uint32_t nmrr_addr;
109 uint32_t nmrr;
110};
111
Olav Hauganeece7e52013-04-02 10:22:21 -0700112void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
113{
114 iommu_access_ops = access_ops;
115}
116
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700117static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
118 struct msm_scm_fault_regs_dump *regs)
119{
120 int ret;
121
122 struct msm_scm_fault_regs_dump_req {
123 uint32_t id;
124 uint32_t cb_num;
125 phys_addr_t buff;
126 uint32_t len;
127 } req_info;
128 int resp;
129
130 req_info.id = smmu_id;
131 req_info.cb_num = cb_num;
132 req_info.buff = virt_to_phys(regs);
133 req_info.len = sizeof(*regs);
134
135 ret = scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
136 &req_info, sizeof(req_info), &resp, 1);
137
138 return ret;
139}
140
141irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
142{
143 struct platform_device *pdev = dev_id;
144 struct msm_iommu_drvdata *drvdata;
145 struct msm_iommu_ctx_drvdata *ctx_drvdata;
146 struct msm_scm_fault_regs_dump *regs;
147 int tmp, ret = IRQ_HANDLED;
148
149 iommu_access_ops->iommu_lock_acquire();
150
151 BUG_ON(!pdev);
152
153 drvdata = dev_get_drvdata(pdev->dev.parent);
154 BUG_ON(!drvdata);
155
156 ctx_drvdata = dev_get_drvdata(&pdev->dev);
157 BUG_ON(!ctx_drvdata);
158
159 regs = kmalloc(sizeof(*regs), GFP_KERNEL);
160 if (!regs) {
161 pr_err("%s: Couldn't allocate memory\n", __func__);
162 goto lock_release;
163 }
164
165 if (!drvdata->ctx_attach_count) {
166 pr_err("Unexpected IOMMU page fault from secure context bank!\n");
167 pr_err("name = %s\n", drvdata->name);
168 pr_err("Power is OFF. Unable to read page fault information\n");
169 /*
170 * We cannot determine which context bank caused the issue so
171 * we just return handled here to ensure IRQ handler code is
172 * happy
173 */
174 goto free_regs;
175 }
176
177 iommu_access_ops->iommu_clk_on(drvdata);
178 tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
179 ctx_drvdata->num, regs);
180 iommu_access_ops->iommu_clk_off(drvdata);
181
182 if (tmp) {
183 pr_err("%s: Couldn't dump fault registers!\n", __func__);
184 goto free_regs;
185 } else if (regs->fsr) {
186 struct msm_iommu_context_regs ctx_regs = {
187 .far = regs->far0,
188 .par = regs->par0,
189 .fsr = regs->fsr,
190 .fsynr0 = regs->fsyn0,
191 .fsynr1 = regs->fsyn1,
192 .ttbr0 = regs->ttbr0,
193 .ttbr1 = regs->ttbr1,
194 .sctlr = regs->sctlr,
195 .actlr = regs->actlr,
196 .prrr = regs->prrr,
197 .nmrr = regs->nmrr,
198 };
199
200 if (!ctx_drvdata->attached_domain) {
201 pr_err("Bad domain in interrupt handler\n");
202 tmp = -ENOSYS;
203 } else {
204 tmp = report_iommu_fault(ctx_drvdata->attached_domain,
205 &ctx_drvdata->pdev->dev,
206 regs->far0, 0);
207 }
208
209 /* if the fault wasn't handled by someone else: */
210 if (tmp == -ENOSYS) {
211 pr_err("Unexpected IOMMU page fault from secure context bank!\n");
212 pr_err("name = %s\n", drvdata->name);
213 pr_err("context = %s (%d)\n", ctx_drvdata->name,
214 ctx_drvdata->num);
215 pr_err("Interesting registers:\n");
216 print_ctx_regs(&ctx_regs);
217 }
218 } else {
219 ret = IRQ_NONE;
220 }
221free_regs:
222 kfree(regs);
223lock_release:
224 iommu_access_ops->iommu_lock_release();
225 return ret;
226}
227
Laura Abbott0d135652012-10-04 12:59:03 -0700228static int msm_iommu_sec_ptbl_init(void)
229{
230 struct device_node *np;
231 struct msm_scm_ptbl_init {
232 unsigned int paddr;
233 unsigned int size;
234 unsigned int spare;
235 } pinit;
236 unsigned int *buf;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800237 int psize[2] = {0, 0};
Laura Abbott0d135652012-10-04 12:59:03 -0700238 unsigned int spare;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800239 int ret, ptbl_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -0700240
Olav Haugan0e22c482013-01-28 17:39:36 -0800241 for_each_compatible_node(np, NULL, "qcom,msm-smmu-v1")
Laura Abbott0d135652012-10-04 12:59:03 -0700242 if (of_find_property(np, "qcom,iommu-secure-id", NULL))
243 break;
244
245 if (!np)
246 return 0;
247
248 of_node_put(np);
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700249 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
Laura Abbott0d135652012-10-04 12:59:03 -0700250 sizeof(spare), psize, sizeof(psize));
251 if (ret) {
252 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
253 goto fail;
254 }
255
256 if (psize[1]) {
257 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
258 goto fail;
259 }
260
261 buf = kmalloc(psize[0], GFP_KERNEL);
262 if (!buf) {
263 pr_err("%s: Failed to allocate %d bytes for PTBL\n",
264 __func__, psize[0]);
265 ret = -ENOMEM;
266 goto fail;
267 }
268
269 pinit.paddr = virt_to_phys(buf);
270 pinit.size = psize[0];
271
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700272 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
Laura Abbott0d135652012-10-04 12:59:03 -0700273 sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
274 if (ret) {
275 pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
276 goto fail_mem;
277 }
278 if (ptbl_ret) {
279 pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
280 goto fail_mem;
281 }
282
Michael Bohan1834e7f2013-01-18 17:16:38 -0800283 kmemleak_not_leak(buf);
284
Laura Abbott0d135652012-10-04 12:59:03 -0700285 return 0;
286
287fail_mem:
288 kfree(buf);
289fail:
290 return ret;
291}
292
Laura Abbottf4daa692012-10-10 19:31:53 -0700293int msm_iommu_sec_program_iommu(int sec_id)
Laura Abbott0d135652012-10-04 12:59:03 -0700294{
295 struct msm_scm_sec_cfg {
296 unsigned int id;
297 unsigned int spare;
298 } cfg;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800299 int ret, scm_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -0700300
301 cfg.id = sec_id;
302
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700303 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
Laura Abbott0d135652012-10-04 12:59:03 -0700304 &scm_ret, sizeof(scm_ret));
305 if (ret || scm_ret) {
306 pr_err("scm call IOMMU_SECURE_CFG failed\n");
307 return ret ? ret : -EINVAL;
308 }
309
310 return ret;
311}
312
313static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
314 struct msm_iommu_ctx_drvdata *ctx_drvdata,
315 unsigned long va, phys_addr_t pa, size_t len)
316{
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700317 struct msm_scm_map2_req map;
Laura Abbott0d135652012-10-04 12:59:03 -0700318 int ret = 0;
319
320 map.plist.list = virt_to_phys(&pa);
321 map.plist.list_size = 1;
322 map.plist.size = len;
323 map.info.id = iommu_drvdata->sec_id;
324 map.info.ctx_id = ctx_drvdata->num;
325 map.info.va = va;
326 map.info.size = len;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700327 map.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700328
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700329 if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map), &ret,
Laura Abbott0d135652012-10-04 12:59:03 -0700330 sizeof(ret)))
331 return -EINVAL;
332 if (ret)
333 return -EINVAL;
334
335 return 0;
336}
337
338static unsigned int get_phys_addr(struct scatterlist *sg)
339{
340 /*
341 * Try sg_dma_address first so that we can
342 * map carveout regions that do not have a
343 * struct page associated with them.
344 */
345 unsigned int pa = sg_dma_address(sg);
346 if (pa == 0)
347 pa = sg_phys(sg);
348 return pa;
349}
350
351static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
352 struct msm_iommu_ctx_drvdata *ctx_drvdata,
353 unsigned long va, struct scatterlist *sg, size_t len)
354{
355 struct scatterlist *sgiter;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700356 struct msm_scm_map2_req map;
Laura Abbott0d135652012-10-04 12:59:03 -0700357 unsigned int *pa_list = 0;
358 unsigned int pa, cnt;
359 unsigned int offset = 0, chunk_offset = 0;
360 int ret, scm_ret;
361
362 map.info.id = iommu_drvdata->sec_id;
363 map.info.ctx_id = ctx_drvdata->num;
364 map.info.va = va;
365 map.info.size = len;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700366 map.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700367
368 if (sg->length == len) {
369 pa = get_phys_addr(sg);
370 map.plist.list = virt_to_phys(&pa);
371 map.plist.list_size = 1;
372 map.plist.size = len;
373 } else {
374 sgiter = sg;
375 cnt = sg->length / SZ_1M;
376 while ((sgiter = sg_next(sgiter)))
377 cnt += sgiter->length / SZ_1M;
378
379 pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
380 if (!pa_list)
381 return -ENOMEM;
382
383 sgiter = sg;
384 cnt = 0;
385 pa = get_phys_addr(sgiter);
386 while (offset < len) {
387 pa += chunk_offset;
388 pa_list[cnt] = pa;
389 chunk_offset += SZ_1M;
390 offset += SZ_1M;
391 cnt++;
392
393 if (chunk_offset >= sgiter->length && offset < len) {
394 chunk_offset = 0;
395 sgiter = sg_next(sgiter);
396 pa = get_phys_addr(sgiter);
397 }
398 }
399
400 map.plist.list = virt_to_phys(pa_list);
401 map.plist.list_size = cnt;
402 map.plist.size = SZ_1M;
403 }
404
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700405 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map),
Laura Abbott0d135652012-10-04 12:59:03 -0700406 &scm_ret, sizeof(scm_ret));
407 kfree(pa_list);
408 return ret;
409}
410
411static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
412 struct msm_iommu_ctx_drvdata *ctx_drvdata,
413 unsigned long va, size_t len)
414{
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700415 struct msm_scm_unmap2_req unmap;
Laura Abbott0d135652012-10-04 12:59:03 -0700416 int ret, scm_ret;
417
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700418 unmap.info.id = iommu_drvdata->sec_id;
419 unmap.info.ctx_id = ctx_drvdata->num;
420 unmap.info.va = va;
421 unmap.info.size = len;
422 unmap.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700423
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700424 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &unmap, sizeof(unmap),
Laura Abbott0d135652012-10-04 12:59:03 -0700425 &scm_ret, sizeof(scm_ret));
426 return ret;
427}
428
Laura Abbott0d135652012-10-04 12:59:03 -0700429static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
430{
Olav Haugan090614f2013-03-22 12:14:18 -0700431 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700432
433 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434 if (!priv)
435 return -ENOMEM;
436
437 INIT_LIST_HEAD(&priv->list_attached);
438 domain->priv = priv;
439 return 0;
440}
441
442static void msm_iommu_domain_destroy(struct iommu_domain *domain)
443{
Olav Haugan090614f2013-03-22 12:14:18 -0700444 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700445
Olav Hauganeece7e52013-04-02 10:22:21 -0700446 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700447 priv = domain->priv;
448 domain->priv = NULL;
449
450 kfree(priv);
Olav Hauganeece7e52013-04-02 10:22:21 -0700451 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700452}
453
454static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
455{
Olav Haugan090614f2013-03-22 12:14:18 -0700456 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700457 struct msm_iommu_drvdata *iommu_drvdata;
458 struct msm_iommu_ctx_drvdata *ctx_drvdata;
459 struct msm_iommu_ctx_drvdata *tmp_drvdata;
460 int ret = 0;
461
Olav Hauganeece7e52013-04-02 10:22:21 -0700462 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700463
464 priv = domain->priv;
465 if (!priv || !dev) {
466 ret = -EINVAL;
467 goto fail;
468 }
469
470 iommu_drvdata = dev_get_drvdata(dev->parent);
471 ctx_drvdata = dev_get_drvdata(dev);
472 if (!iommu_drvdata || !ctx_drvdata) {
473 ret = -EINVAL;
474 goto fail;
475 }
476
477 if (!list_empty(&ctx_drvdata->attached_elm)) {
478 ret = -EBUSY;
479 goto fail;
480 }
481
482 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
483 if (tmp_drvdata == ctx_drvdata) {
484 ret = -EBUSY;
485 goto fail;
486 }
487
Olav Hauganeece7e52013-04-02 10:22:21 -0700488 ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700489 if (ret)
490 goto fail;
491
Olav Haugane3885392013-03-06 16:22:53 -0800492 /* We can only do this once */
493 if (!iommu_drvdata->ctx_attach_count) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700494 ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800495 if (ret) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700496 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800497 goto fail;
498 }
Laura Abbott0d135652012-10-04 12:59:03 -0700499
Olav Haugane3885392013-03-06 16:22:53 -0800500 ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
Olav Haugance2eab92013-02-07 12:59:18 -0800501
Olav Haugane3885392013-03-06 16:22:53 -0800502 /* bfb settings are always programmed by HLOS */
503 program_iommu_bfb_settings(iommu_drvdata->base,
504 iommu_drvdata->bfb_settings);
Olav Hauganf3782732013-01-11 11:23:30 -0800505
Olav Hauganeece7e52013-04-02 10:22:21 -0700506 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800507 if (ret) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700508 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800509 goto fail;
510 }
Laura Abbott0d135652012-10-04 12:59:03 -0700511 }
512
513 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
514 ctx_drvdata->attached_domain = domain;
Olav Haugane3885392013-03-06 16:22:53 -0800515 ++iommu_drvdata->ctx_attach_count;
Laura Abbott0d135652012-10-04 12:59:03 -0700516
Olav Hauganeece7e52013-04-02 10:22:21 -0700517 iommu_access_ops->iommu_lock_release();
Olav Haugan64ffdf32013-01-24 17:20:24 -0800518
519 msm_iommu_attached(dev->parent);
520 return ret;
Laura Abbott0d135652012-10-04 12:59:03 -0700521fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700522 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700523 return ret;
524}
525
526static void msm_iommu_detach_dev(struct iommu_domain *domain,
527 struct device *dev)
528{
529 struct msm_iommu_drvdata *iommu_drvdata;
530 struct msm_iommu_ctx_drvdata *ctx_drvdata;
531
Olav Haugan64ffdf32013-01-24 17:20:24 -0800532 msm_iommu_detached(dev->parent);
533
Olav Hauganeece7e52013-04-02 10:22:21 -0700534 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700535 if (!dev)
536 goto fail;
537
538 iommu_drvdata = dev_get_drvdata(dev->parent);
539 ctx_drvdata = dev_get_drvdata(dev);
540 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
541 goto fail;
542
543 list_del_init(&ctx_drvdata->attached_elm);
544 ctx_drvdata->attached_domain = NULL;
545
Olav Hauganeece7e52013-04-02 10:22:21 -0700546 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800547 BUG_ON(iommu_drvdata->ctx_attach_count == 0);
548 --iommu_drvdata->ctx_attach_count;
Laura Abbott0d135652012-10-04 12:59:03 -0700549fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700550 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700551}
552
553static int get_drvdata(struct iommu_domain *domain,
554 struct msm_iommu_drvdata **iommu_drvdata,
555 struct msm_iommu_ctx_drvdata **ctx_drvdata)
556{
Olav Haugan090614f2013-03-22 12:14:18 -0700557 struct msm_iommu_priv *priv = domain->priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700558 struct msm_iommu_ctx_drvdata *ctx;
559
560 list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
561 if (ctx->attached_domain == domain)
562 break;
563 }
564
565 if (ctx->attached_domain != domain)
566 return -EINVAL;
567
568 *ctx_drvdata = ctx;
569 *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
570 return 0;
571}
572
573static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
574 phys_addr_t pa, size_t len, int prot)
575{
576 struct msm_iommu_drvdata *iommu_drvdata;
577 struct msm_iommu_ctx_drvdata *ctx_drvdata;
578 int ret = 0;
579
Olav Hauganeece7e52013-04-02 10:22:21 -0700580 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700581
582 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
583 if (ret)
584 goto fail;
585
Olav Hauganeece7e52013-04-02 10:22:21 -0700586 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700587 ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
588 va, pa, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700589 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700590fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700591 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700592 return ret;
593}
594
595static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
596 size_t len)
597{
598 struct msm_iommu_drvdata *iommu_drvdata;
599 struct msm_iommu_ctx_drvdata *ctx_drvdata;
600 int ret = -ENODEV;
601
Olav Hauganeece7e52013-04-02 10:22:21 -0700602 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700603
604 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
605 if (ret)
606 goto fail;
607
Olav Hauganeece7e52013-04-02 10:22:21 -0700608 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700609 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
610 va, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700611 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700612fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700613 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700614
615 /* the IOMMU API requires us to return how many bytes were unmapped */
616 len = ret ? 0 : len;
617 return len;
618}
619
620static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
621 struct scatterlist *sg, unsigned int len,
622 int prot)
623{
624 int ret;
625 struct msm_iommu_drvdata *iommu_drvdata;
626 struct msm_iommu_ctx_drvdata *ctx_drvdata;
627
Olav Hauganeece7e52013-04-02 10:22:21 -0700628 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700629
630 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
631 if (ret)
632 goto fail;
Olav Hauganeece7e52013-04-02 10:22:21 -0700633 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700634 ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
635 va, sg, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700636 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700637fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700638 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700639 return ret;
640}
641
642
643static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
644 unsigned int len)
645{
646 struct msm_iommu_drvdata *iommu_drvdata;
647 struct msm_iommu_ctx_drvdata *ctx_drvdata;
648 int ret;
649
Olav Hauganeece7e52013-04-02 10:22:21 -0700650 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700651
652 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
653 if (ret)
654 goto fail;
655
Olav Hauganeece7e52013-04-02 10:22:21 -0700656 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700657 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700658 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700659
660fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700661 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700662 return 0;
663}
664
665static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
666 unsigned long va)
667{
668 return 0;
669}
670
671static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
672 unsigned long cap)
673{
674 return 0;
675}
676
677static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
678{
679 return 0;
680}
681
682static struct iommu_ops msm_iommu_ops = {
683 .domain_init = msm_iommu_domain_init,
684 .domain_destroy = msm_iommu_domain_destroy,
685 .attach_dev = msm_iommu_attach_dev,
686 .detach_dev = msm_iommu_detach_dev,
687 .map = msm_iommu_map,
688 .unmap = msm_iommu_unmap,
689 .map_range = msm_iommu_map_range,
690 .unmap_range = msm_iommu_unmap_range,
691 .iova_to_phys = msm_iommu_iova_to_phys,
692 .domain_has_cap = msm_iommu_domain_has_cap,
693 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
694 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
695};
696
697static int __init msm_iommu_sec_init(void)
698{
699 int ret;
700
701 ret = bus_register(&msm_iommu_sec_bus_type);
702 if (ret)
703 goto fail;
704
705 bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
706 ret = msm_iommu_sec_ptbl_init();
707fail:
708 return ret;
709}
710
711subsys_initcall(msm_iommu_sec_init);
712
713MODULE_LICENSE("GPL v2");
714MODULE_DESCRIPTION("MSM SMMU Secure Driver");