blob: 38dd41b6437f95ba3437a02acd0dec421b83af2a [file] [log] [blame]
Laura Abbottc783c512013-11-20 10:23:25 -08001/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
Laura Abbott0d135652012-10-04 12:59:03 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
Michael Bohan1834e7f2013-01-18 17:16:38 -080028#include <linux/kmemleak.h>
Laura Abbott0d135652012-10-04 12:59:03 -070029
30#include <asm/sizes.h>
31
Olav Haugan64ffdf32013-01-24 17:20:24 -080032#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080033#include <mach/iommu_hw-v1.h>
Olav Haugan090614f2013-03-22 12:14:18 -070034#include <mach/msm_iommu_priv.h>
Laura Abbott0d135652012-10-04 12:59:03 -070035#include <mach/iommu.h>
36#include <mach/scm.h>
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -070037#include <mach/memory.h>
Laura Abbott0d135652012-10-04 12:59:03 -070038
39/* bitmap of the page sizes currently supported */
40#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
41
Mitchel Humpherysf3b50912013-05-21 17:46:04 -070042/* commands for SCM_SVC_MP */
Laura Abbott0d135652012-10-04 12:59:03 -070043#define IOMMU_SECURE_CFG 2
44#define IOMMU_SECURE_PTBL_SIZE 3
45#define IOMMU_SECURE_PTBL_INIT 4
Laura Abbottc783c512013-11-20 10:23:25 -080046#define IOMMU_SET_CP_POOL_SIZE 5
Laura Abbott0d135652012-10-04 12:59:03 -070047#define IOMMU_SECURE_MAP 6
48#define IOMMU_SECURE_UNMAP 7
Adrian Alexeibfe7c462013-04-01 14:36:24 -070049#define IOMMU_SECURE_MAP2 0x0B
50#define IOMMU_SECURE_UNMAP2 0x0C
51#define IOMMU_TLBINVAL_FLAG 0x00000001
Laura Abbott0d135652012-10-04 12:59:03 -070052
Mitchel Humpherysf3b50912013-05-21 17:46:04 -070053/* commands for SCM_SVC_UTIL */
54#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
Laura Abbottc783c512013-11-20 10:23:25 -080055#define MAXIMUM_VIRT_SIZE (300*SZ_1M)
56
57
58#define MAKE_CP_VERSION(major, minor, patch) \
59 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
60
Mitchel Humpherysf3b50912013-05-21 17:46:04 -070061
Olav Hauganeece7e52013-04-02 10:22:21 -070062static struct iommu_access_ops *iommu_access_ops;
Laura Abbott0d135652012-10-04 12:59:03 -070063
Chintan Pandya6eb4dc32014-01-20 19:20:10 +053064static const struct of_device_id msm_smmu_list[] = {
65 { .compatible = "qcom,msm-smmu-v1", },
66 { .compatible = "qcom,msm-smmu-v2", },
67 { }
68};
69
Laura Abbott0d135652012-10-04 12:59:03 -070070struct msm_scm_paddr_list {
71 unsigned int list;
72 unsigned int list_size;
73 unsigned int size;
74};
75
76struct msm_scm_mapping_info {
77 unsigned int id;
78 unsigned int ctx_id;
79 unsigned int va;
80 unsigned int size;
81};
82
Adrian Alexeibfe7c462013-04-01 14:36:24 -070083struct msm_scm_map2_req {
Laura Abbott0d135652012-10-04 12:59:03 -070084 struct msm_scm_paddr_list plist;
85 struct msm_scm_mapping_info info;
Adrian Alexeibfe7c462013-04-01 14:36:24 -070086 unsigned int flags;
87};
88
89struct msm_scm_unmap2_req {
90 struct msm_scm_mapping_info info;
91 unsigned int flags;
Laura Abbott0d135652012-10-04 12:59:03 -070092};
93
Laura Abbottc783c512013-11-20 10:23:25 -080094struct msm_cp_pool_size {
95 uint32_t size;
96 uint32_t spare;
97};
98
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -070099#define NUM_DUMP_REGS 14
100/*
101 * some space to allow the number of registers returned by the secure
102 * environment to grow
103 */
104#define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
105/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
106#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
107
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700108struct msm_scm_fault_regs_dump {
109 uint32_t dump_size;
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700110 uint32_t dump_data[SEC_DUMP_SIZE];
111} __packed;
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700112
Olav Hauganeece7e52013-04-02 10:22:21 -0700113void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
114{
115 iommu_access_ops = access_ops;
116}
117
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700118static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
119 struct msm_scm_fault_regs_dump *regs)
120{
121 int ret;
122
123 struct msm_scm_fault_regs_dump_req {
124 uint32_t id;
125 uint32_t cb_num;
126 phys_addr_t buff;
127 uint32_t len;
128 } req_info;
129 int resp;
130
131 req_info.id = smmu_id;
132 req_info.cb_num = cb_num;
133 req_info.buff = virt_to_phys(regs);
134 req_info.len = sizeof(*regs);
135
136 ret = scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
137 &req_info, sizeof(req_info), &resp, 1);
138
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700139 invalidate_caches((unsigned long) regs, sizeof(*regs),
140 (unsigned long)virt_to_phys(regs));
141
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700142 return ret;
143}
144
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700145#define EXTRACT_DUMP_REG_KEY(addr, ctx) (addr & ((1 << CTX_SHIFT) - 1))
146
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700147static int msm_iommu_reg_dump_to_regs(
148 struct msm_iommu_context_reg ctx_regs[],
149 struct msm_scm_fault_regs_dump *dump, int cb_num)
150{
151 int i, j, ret = 0;
152 const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
153 uint32_t *it = (uint32_t *) dump->dump_data;
154 const uint32_t * const end = ((uint32_t *) dump) + nvals;
155
156 for (i = 1; it < end; it += 2, i += 2) {
157 uint32_t addr = *it;
158 uint32_t val = *(it + 1);
159 struct msm_iommu_context_reg *reg = NULL;
160
161 for (j = 0; j < MAX_DUMP_REGS; ++j) {
162 if (dump_regs_tbl[j].key ==
163 EXTRACT_DUMP_REG_KEY(addr, cb_num)) {
164 reg = &ctx_regs[j];
165 break;
166 }
167 }
168
169 if (reg == NULL) {
170 pr_debug("Unknown register in secure CB dump: %x (%x)\n",
171 addr, EXTRACT_DUMP_REG_KEY(addr, cb_num));
172 continue;
173 }
174
175 if (reg->valid) {
176 WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
177 addr);
178 continue;
179 }
180
181 reg->val = val;
182 reg->valid = true;
183 }
184
185 if (i != nvals) {
186 pr_err("Invalid dump! %d != %d\n", i, nvals);
187 ret = 1;
188 goto out;
189 }
190
191 for (i = 0; i < MAX_DUMP_REGS; ++i) {
192 if (!ctx_regs[i].valid) {
Olav Haugancdb13112013-06-21 17:45:34 -0700193 if (dump_regs_tbl[i].must_be_present) {
194 pr_err("Register missing from dump: %s, %lx\n",
195 dump_regs_tbl[i].name,
196 dump_regs_tbl[i].key);
197 ret = 1;
198 }
199 ctx_regs[i].val = 0;
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700200 }
201 }
202
203out:
204 return ret;
205}
206
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700207irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
208{
209 struct platform_device *pdev = dev_id;
210 struct msm_iommu_drvdata *drvdata;
211 struct msm_iommu_ctx_drvdata *ctx_drvdata;
212 struct msm_scm_fault_regs_dump *regs;
213 int tmp, ret = IRQ_HANDLED;
214
Olav Hauganf75b52e2013-10-01 09:18:03 -0700215 iommu_access_ops->iommu_lock_acquire(0);
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700216
217 BUG_ON(!pdev);
218
219 drvdata = dev_get_drvdata(pdev->dev.parent);
220 BUG_ON(!drvdata);
221
222 ctx_drvdata = dev_get_drvdata(&pdev->dev);
223 BUG_ON(!ctx_drvdata);
224
225 regs = kmalloc(sizeof(*regs), GFP_KERNEL);
226 if (!regs) {
227 pr_err("%s: Couldn't allocate memory\n", __func__);
228 goto lock_release;
229 }
230
231 if (!drvdata->ctx_attach_count) {
232 pr_err("Unexpected IOMMU page fault from secure context bank!\n");
233 pr_err("name = %s\n", drvdata->name);
234 pr_err("Power is OFF. Unable to read page fault information\n");
235 /*
236 * We cannot determine which context bank caused the issue so
237 * we just return handled here to ensure IRQ handler code is
238 * happy
239 */
240 goto free_regs;
241 }
242
243 iommu_access_ops->iommu_clk_on(drvdata);
244 tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
245 ctx_drvdata->num, regs);
246 iommu_access_ops->iommu_clk_off(drvdata);
247
248 if (tmp) {
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700249 pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
250 __func__, tmp, drvdata->name, ctx_drvdata->num);
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700251 goto free_regs;
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700252 } else {
Mitchel Humpherys2b6e5c52013-06-05 16:01:54 -0700253 struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
254 memset(ctx_regs, 0, sizeof(ctx_regs));
255 tmp = msm_iommu_reg_dump_to_regs(ctx_regs, regs,
256 ctx_drvdata->num);
257 if (!tmp && ctx_regs[DUMP_REG_FSR].val) {
258 if (!ctx_drvdata->attached_domain) {
259 pr_err("Bad domain in interrupt handler\n");
260 tmp = -ENOSYS;
261 } else {
262 tmp = report_iommu_fault(
263 ctx_drvdata->attached_domain,
264 &ctx_drvdata->pdev->dev,
265 COMBINE_DUMP_REG(
266 ctx_regs[DUMP_REG_FAR1].val,
267 ctx_regs[DUMP_REG_FAR0].val),
268 0);
269 }
270
271 /* if the fault wasn't handled by someone else: */
272 if (tmp == -ENOSYS) {
273 pr_err("Unexpected IOMMU page fault from secure context bank!\n");
274 pr_err("name = %s\n", drvdata->name);
275 pr_err("context = %s (%d)\n", ctx_drvdata->name,
276 ctx_drvdata->num);
277 pr_err("Interesting registers:\n");
278 print_ctx_regs(ctx_regs);
279 }
280 } else {
281 ret = IRQ_NONE;
282 }
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700283 }
284free_regs:
285 kfree(regs);
286lock_release:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700287 iommu_access_ops->iommu_lock_release(0);
Mitchel Humpherysf3b50912013-05-21 17:46:04 -0700288 return ret;
289}
290
Laura Abbott0d135652012-10-04 12:59:03 -0700291static int msm_iommu_sec_ptbl_init(void)
292{
293 struct device_node *np;
294 struct msm_scm_ptbl_init {
295 unsigned int paddr;
296 unsigned int size;
297 unsigned int spare;
298 } pinit;
299 unsigned int *buf;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800300 int psize[2] = {0, 0};
Laura Abbott0d135652012-10-04 12:59:03 -0700301 unsigned int spare;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800302 int ret, ptbl_ret = 0;
Laura Abbottc783c512013-11-20 10:23:25 -0800303 int version;
Laura Abbott0d135652012-10-04 12:59:03 -0700304
Chintan Pandya6eb4dc32014-01-20 19:20:10 +0530305 for_each_matching_node(np, msm_smmu_list)
306 if (of_find_property(np, "qcom,iommu-secure-id", NULL) &&
307 of_device_is_available(np))
Laura Abbott0d135652012-10-04 12:59:03 -0700308 break;
309
310 if (!np)
311 return 0;
312
313 of_node_put(np);
Laura Abbottc783c512013-11-20 10:23:25 -0800314
315 version = scm_get_feat_version(SCM_SVC_MP);
316
317 if (version >= MAKE_CP_VERSION(1, 1, 1)) {
318 struct msm_cp_pool_size psize;
319 int retval;
320
321 psize.size = MAXIMUM_VIRT_SIZE;
322 psize.spare = 0;
323
324 ret = scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE, &psize,
325 sizeof(psize), &retval, sizeof(retval));
326
327 if (ret) {
328 pr_err("scm call IOMMU_SET_CP_POOL_SIZE failed\n");
329 goto fail;
330 }
331
332 }
333
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700334 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
Laura Abbott0d135652012-10-04 12:59:03 -0700335 sizeof(spare), psize, sizeof(psize));
336 if (ret) {
337 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
338 goto fail;
339 }
340
341 if (psize[1]) {
342 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
343 goto fail;
344 }
345
346 buf = kmalloc(psize[0], GFP_KERNEL);
347 if (!buf) {
348 pr_err("%s: Failed to allocate %d bytes for PTBL\n",
349 __func__, psize[0]);
350 ret = -ENOMEM;
351 goto fail;
352 }
353
354 pinit.paddr = virt_to_phys(buf);
355 pinit.size = psize[0];
356
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700357 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
Laura Abbott0d135652012-10-04 12:59:03 -0700358 sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
359 if (ret) {
360 pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
361 goto fail_mem;
362 }
363 if (ptbl_ret) {
364 pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
365 goto fail_mem;
366 }
367
Michael Bohan1834e7f2013-01-18 17:16:38 -0800368 kmemleak_not_leak(buf);
369
Laura Abbott0d135652012-10-04 12:59:03 -0700370 return 0;
371
372fail_mem:
373 kfree(buf);
374fail:
375 return ret;
376}
377
Laura Abbottf4daa692012-10-10 19:31:53 -0700378int msm_iommu_sec_program_iommu(int sec_id)
Laura Abbott0d135652012-10-04 12:59:03 -0700379{
380 struct msm_scm_sec_cfg {
381 unsigned int id;
382 unsigned int spare;
383 } cfg;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800384 int ret, scm_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -0700385
386 cfg.id = sec_id;
387
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700388 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
Laura Abbott0d135652012-10-04 12:59:03 -0700389 &scm_ret, sizeof(scm_ret));
390 if (ret || scm_ret) {
391 pr_err("scm call IOMMU_SECURE_CFG failed\n");
392 return ret ? ret : -EINVAL;
393 }
394
395 return ret;
396}
397
398static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
399 struct msm_iommu_ctx_drvdata *ctx_drvdata,
400 unsigned long va, phys_addr_t pa, size_t len)
401{
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700402 struct msm_scm_map2_req map;
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700403 void *flush_va;
404 phys_addr_t flush_pa;
Laura Abbott0d135652012-10-04 12:59:03 -0700405 int ret = 0;
406
407 map.plist.list = virt_to_phys(&pa);
408 map.plist.list_size = 1;
409 map.plist.size = len;
410 map.info.id = iommu_drvdata->sec_id;
411 map.info.ctx_id = ctx_drvdata->num;
412 map.info.va = va;
413 map.info.size = len;
Olav Haugan896af3e2013-08-16 13:43:22 -0700414 map.flags = IOMMU_TLBINVAL_FLAG;
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700415 flush_va = &pa;
416 flush_pa = virt_to_phys(&pa);
417
418 /*
419 * Ensure that the buffer is in RAM by the time it gets to TZ
420 */
421 clean_caches((unsigned long) flush_va, len, flush_pa);
Laura Abbott0d135652012-10-04 12:59:03 -0700422
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700423 if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map), &ret,
Laura Abbott0d135652012-10-04 12:59:03 -0700424 sizeof(ret)))
425 return -EINVAL;
426 if (ret)
427 return -EINVAL;
428
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700429 /* Invalidate cache since TZ touched this address range */
430 invalidate_caches((unsigned long) flush_va, len, flush_pa);
431
Laura Abbott0d135652012-10-04 12:59:03 -0700432 return 0;
433}
434
435static unsigned int get_phys_addr(struct scatterlist *sg)
436{
437 /*
438 * Try sg_dma_address first so that we can
439 * map carveout regions that do not have a
440 * struct page associated with them.
441 */
442 unsigned int pa = sg_dma_address(sg);
443 if (pa == 0)
444 pa = sg_phys(sg);
445 return pa;
446}
447
448static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
449 struct msm_iommu_ctx_drvdata *ctx_drvdata,
450 unsigned long va, struct scatterlist *sg, size_t len)
451{
452 struct scatterlist *sgiter;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700453 struct msm_scm_map2_req map;
Laura Abbott0d135652012-10-04 12:59:03 -0700454 unsigned int *pa_list = 0;
455 unsigned int pa, cnt;
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700456 void *flush_va;
Laura Abbott0d135652012-10-04 12:59:03 -0700457 unsigned int offset = 0, chunk_offset = 0;
458 int ret, scm_ret;
459
460 map.info.id = iommu_drvdata->sec_id;
461 map.info.ctx_id = ctx_drvdata->num;
462 map.info.va = va;
463 map.info.size = len;
Olav Haugan896af3e2013-08-16 13:43:22 -0700464 map.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700465
466 if (sg->length == len) {
467 pa = get_phys_addr(sg);
468 map.plist.list = virt_to_phys(&pa);
469 map.plist.list_size = 1;
470 map.plist.size = len;
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700471 flush_va = &pa;
Laura Abbott0d135652012-10-04 12:59:03 -0700472 } else {
473 sgiter = sg;
474 cnt = sg->length / SZ_1M;
475 while ((sgiter = sg_next(sgiter)))
476 cnt += sgiter->length / SZ_1M;
477
478 pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
479 if (!pa_list)
480 return -ENOMEM;
481
482 sgiter = sg;
483 cnt = 0;
484 pa = get_phys_addr(sgiter);
485 while (offset < len) {
486 pa += chunk_offset;
487 pa_list[cnt] = pa;
488 chunk_offset += SZ_1M;
489 offset += SZ_1M;
490 cnt++;
491
492 if (chunk_offset >= sgiter->length && offset < len) {
493 chunk_offset = 0;
494 sgiter = sg_next(sgiter);
495 pa = get_phys_addr(sgiter);
496 }
497 }
498
499 map.plist.list = virt_to_phys(pa_list);
500 map.plist.list_size = cnt;
501 map.plist.size = SZ_1M;
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700502 flush_va = pa_list;
Laura Abbott0d135652012-10-04 12:59:03 -0700503 }
504
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700505 /*
506 * Ensure that the buffer is in RAM by the time it gets to TZ
507 */
508 clean_caches((unsigned long) flush_va,
Laura Abbott87473062013-06-20 16:50:20 -0700509 sizeof(unsigned long) * map.plist.list_size,
510 virt_to_phys(flush_va));
Mitchel Humpherys4ec93cb2013-06-12 12:25:43 -0700511
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700512 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map),
Laura Abbott0d135652012-10-04 12:59:03 -0700513 &scm_ret, sizeof(scm_ret));
514 kfree(pa_list);
515 return ret;
516}
517
518static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
519 struct msm_iommu_ctx_drvdata *ctx_drvdata,
520 unsigned long va, size_t len)
521{
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700522 struct msm_scm_unmap2_req unmap;
Laura Abbott0d135652012-10-04 12:59:03 -0700523 int ret, scm_ret;
524
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700525 unmap.info.id = iommu_drvdata->sec_id;
526 unmap.info.ctx_id = ctx_drvdata->num;
527 unmap.info.va = va;
528 unmap.info.size = len;
529 unmap.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700530
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700531 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &unmap, sizeof(unmap),
Laura Abbott0d135652012-10-04 12:59:03 -0700532 &scm_ret, sizeof(scm_ret));
533 return ret;
534}
535
Laura Abbott0d135652012-10-04 12:59:03 -0700536static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
537{
Olav Haugan090614f2013-03-22 12:14:18 -0700538 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700539
540 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
541 if (!priv)
542 return -ENOMEM;
543
544 INIT_LIST_HEAD(&priv->list_attached);
545 domain->priv = priv;
546 return 0;
547}
548
549static void msm_iommu_domain_destroy(struct iommu_domain *domain)
550{
Olav Haugan090614f2013-03-22 12:14:18 -0700551 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700552
Olav Hauganf75b52e2013-10-01 09:18:03 -0700553 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700554 priv = domain->priv;
555 domain->priv = NULL;
556
557 kfree(priv);
Olav Hauganf75b52e2013-10-01 09:18:03 -0700558 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700559}
560
561static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
562{
Olav Haugan090614f2013-03-22 12:14:18 -0700563 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700564 struct msm_iommu_drvdata *iommu_drvdata;
565 struct msm_iommu_ctx_drvdata *ctx_drvdata;
566 struct msm_iommu_ctx_drvdata *tmp_drvdata;
567 int ret = 0;
568
Olav Hauganf75b52e2013-10-01 09:18:03 -0700569 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700570
571 priv = domain->priv;
572 if (!priv || !dev) {
573 ret = -EINVAL;
574 goto fail;
575 }
576
577 iommu_drvdata = dev_get_drvdata(dev->parent);
578 ctx_drvdata = dev_get_drvdata(dev);
579 if (!iommu_drvdata || !ctx_drvdata) {
580 ret = -EINVAL;
581 goto fail;
582 }
583
584 if (!list_empty(&ctx_drvdata->attached_elm)) {
585 ret = -EBUSY;
586 goto fail;
587 }
588
589 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
590 if (tmp_drvdata == ctx_drvdata) {
591 ret = -EBUSY;
592 goto fail;
593 }
594
Olav Hauganeece7e52013-04-02 10:22:21 -0700595 ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700596 if (ret)
597 goto fail;
598
Olav Haugane3885392013-03-06 16:22:53 -0800599 /* We can only do this once */
600 if (!iommu_drvdata->ctx_attach_count) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700601 ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800602 if (ret) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700603 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800604 goto fail;
605 }
Laura Abbott0d135652012-10-04 12:59:03 -0700606
Olav Haugane3885392013-03-06 16:22:53 -0800607 ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
Olav Haugance2eab92013-02-07 12:59:18 -0800608
Olav Haugane3885392013-03-06 16:22:53 -0800609 /* bfb settings are always programmed by HLOS */
610 program_iommu_bfb_settings(iommu_drvdata->base,
611 iommu_drvdata->bfb_settings);
Olav Hauganf3782732013-01-11 11:23:30 -0800612
Olav Hauganeece7e52013-04-02 10:22:21 -0700613 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800614 if (ret) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700615 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800616 goto fail;
617 }
Laura Abbott0d135652012-10-04 12:59:03 -0700618 }
619
620 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
621 ctx_drvdata->attached_domain = domain;
Olav Haugane3885392013-03-06 16:22:53 -0800622 ++iommu_drvdata->ctx_attach_count;
Laura Abbott0d135652012-10-04 12:59:03 -0700623
Olav Hauganf75b52e2013-10-01 09:18:03 -0700624 iommu_access_ops->iommu_lock_release(0);
Olav Haugan64ffdf32013-01-24 17:20:24 -0800625
626 msm_iommu_attached(dev->parent);
627 return ret;
Laura Abbott0d135652012-10-04 12:59:03 -0700628fail:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700629 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700630 return ret;
631}
632
633static void msm_iommu_detach_dev(struct iommu_domain *domain,
634 struct device *dev)
635{
636 struct msm_iommu_drvdata *iommu_drvdata;
637 struct msm_iommu_ctx_drvdata *ctx_drvdata;
638
Olav Haugan64ffdf32013-01-24 17:20:24 -0800639 msm_iommu_detached(dev->parent);
640
Olav Hauganf75b52e2013-10-01 09:18:03 -0700641 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700642 if (!dev)
643 goto fail;
644
645 iommu_drvdata = dev_get_drvdata(dev->parent);
646 ctx_drvdata = dev_get_drvdata(dev);
647 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
648 goto fail;
649
650 list_del_init(&ctx_drvdata->attached_elm);
651 ctx_drvdata->attached_domain = NULL;
652
Olav Hauganeece7e52013-04-02 10:22:21 -0700653 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800654 BUG_ON(iommu_drvdata->ctx_attach_count == 0);
655 --iommu_drvdata->ctx_attach_count;
Laura Abbott0d135652012-10-04 12:59:03 -0700656fail:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700657 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700658}
659
660static int get_drvdata(struct iommu_domain *domain,
661 struct msm_iommu_drvdata **iommu_drvdata,
662 struct msm_iommu_ctx_drvdata **ctx_drvdata)
663{
Olav Haugan090614f2013-03-22 12:14:18 -0700664 struct msm_iommu_priv *priv = domain->priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700665 struct msm_iommu_ctx_drvdata *ctx;
666
667 list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
668 if (ctx->attached_domain == domain)
669 break;
670 }
671
672 if (ctx->attached_domain != domain)
673 return -EINVAL;
674
675 *ctx_drvdata = ctx;
676 *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
677 return 0;
678}
679
680static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
681 phys_addr_t pa, size_t len, int prot)
682{
683 struct msm_iommu_drvdata *iommu_drvdata;
684 struct msm_iommu_ctx_drvdata *ctx_drvdata;
685 int ret = 0;
686
Olav Hauganf75b52e2013-10-01 09:18:03 -0700687 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700688
689 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
690 if (ret)
691 goto fail;
692
Olav Hauganeece7e52013-04-02 10:22:21 -0700693 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700694 ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
695 va, pa, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700696 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700697fail:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700698 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700699 return ret;
700}
701
702static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
703 size_t len)
704{
705 struct msm_iommu_drvdata *iommu_drvdata;
706 struct msm_iommu_ctx_drvdata *ctx_drvdata;
707 int ret = -ENODEV;
708
Olav Hauganf75b52e2013-10-01 09:18:03 -0700709 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700710
711 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
712 if (ret)
713 goto fail;
714
Olav Hauganeece7e52013-04-02 10:22:21 -0700715 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700716 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
717 va, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700718 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700719fail:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700720 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700721
722 /* the IOMMU API requires us to return how many bytes were unmapped */
723 len = ret ? 0 : len;
724 return len;
725}
726
727static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
728 struct scatterlist *sg, unsigned int len,
729 int prot)
730{
731 int ret;
732 struct msm_iommu_drvdata *iommu_drvdata;
733 struct msm_iommu_ctx_drvdata *ctx_drvdata;
734
Olav Hauganf75b52e2013-10-01 09:18:03 -0700735 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700736
737 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
738 if (ret)
739 goto fail;
Olav Hauganeece7e52013-04-02 10:22:21 -0700740 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700741 ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
742 va, sg, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700743 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700744fail:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700745 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700746 return ret;
747}
748
749
750static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
751 unsigned int len)
752{
753 struct msm_iommu_drvdata *iommu_drvdata;
754 struct msm_iommu_ctx_drvdata *ctx_drvdata;
755 int ret;
756
Olav Hauganf75b52e2013-10-01 09:18:03 -0700757 iommu_access_ops->iommu_lock_acquire(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700758
759 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
760 if (ret)
761 goto fail;
762
Olav Hauganeece7e52013-04-02 10:22:21 -0700763 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700764 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700765 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700766
767fail:
Olav Hauganf75b52e2013-10-01 09:18:03 -0700768 iommu_access_ops->iommu_lock_release(0);
Laura Abbott0d135652012-10-04 12:59:03 -0700769 return 0;
770}
771
772static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
773 unsigned long va)
774{
775 return 0;
776}
777
778static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
779 unsigned long cap)
780{
781 return 0;
782}
783
784static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
785{
786 return 0;
787}
788
789static struct iommu_ops msm_iommu_ops = {
790 .domain_init = msm_iommu_domain_init,
791 .domain_destroy = msm_iommu_domain_destroy,
792 .attach_dev = msm_iommu_attach_dev,
793 .detach_dev = msm_iommu_detach_dev,
794 .map = msm_iommu_map,
795 .unmap = msm_iommu_unmap,
796 .map_range = msm_iommu_map_range,
797 .unmap_range = msm_iommu_unmap_range,
798 .iova_to_phys = msm_iommu_iova_to_phys,
799 .domain_has_cap = msm_iommu_domain_has_cap,
800 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
801 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
802};
803
804static int __init msm_iommu_sec_init(void)
805{
806 int ret;
807
808 ret = bus_register(&msm_iommu_sec_bus_type);
809 if (ret)
810 goto fail;
811
812 bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
813 ret = msm_iommu_sec_ptbl_init();
Laura Abbott0d135652012-10-04 12:59:03 -0700814fail:
815 return ret;
816}
817
818subsys_initcall(msm_iommu_sec_init);
819
820MODULE_LICENSE("GPL v2");
821MODULE_DESCRIPTION("MSM SMMU Secure Driver");