blob: 4e55bd6ad5b34315ab2d0b9dba2b7da158974709 [file] [log] [blame]
Olav Hauganf3782732013-01-11 11:23:30 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Laura Abbott0d135652012-10-04 12:59:03 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
Michael Bohan1834e7f2013-01-18 17:16:38 -080028#include <linux/kmemleak.h>
Laura Abbott0d135652012-10-04 12:59:03 -070029
30#include <asm/sizes.h>
31
Olav Haugan64ffdf32013-01-24 17:20:24 -080032#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080033#include <mach/iommu_hw-v1.h>
Laura Abbott0d135652012-10-04 12:59:03 -070034#include <mach/iommu.h>
35#include <mach/scm.h>
36
37/* bitmap of the page sizes currently supported */
38#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
39
40#define IOMMU_SECURE_CFG 2
41#define IOMMU_SECURE_PTBL_SIZE 3
42#define IOMMU_SECURE_PTBL_INIT 4
43#define IOMMU_SECURE_MAP 6
44#define IOMMU_SECURE_UNMAP 7
45
46static DEFINE_MUTEX(msm_iommu_lock);
47
48struct msm_priv {
49 struct list_head list_attached;
50};
51
52struct msm_scm_paddr_list {
53 unsigned int list;
54 unsigned int list_size;
55 unsigned int size;
56};
57
58struct msm_scm_mapping_info {
59 unsigned int id;
60 unsigned int ctx_id;
61 unsigned int va;
62 unsigned int size;
63};
64
65struct msm_scm_map_req {
66 struct msm_scm_paddr_list plist;
67 struct msm_scm_mapping_info info;
68};
69
70static int msm_iommu_sec_ptbl_init(void)
71{
72 struct device_node *np;
73 struct msm_scm_ptbl_init {
74 unsigned int paddr;
75 unsigned int size;
76 unsigned int spare;
77 } pinit;
78 unsigned int *buf;
Mitchel Humpherys637cc532012-12-12 16:50:58 -080079 int psize[2] = {0, 0};
Laura Abbott0d135652012-10-04 12:59:03 -070080 unsigned int spare;
Mitchel Humpherys637cc532012-12-12 16:50:58 -080081 int ret, ptbl_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -070082
Olav Haugan0e22c482013-01-28 17:39:36 -080083 for_each_compatible_node(np, NULL, "qcom,msm-smmu-v1")
Laura Abbott0d135652012-10-04 12:59:03 -070084 if (of_find_property(np, "qcom,iommu-secure-id", NULL))
85 break;
86
87 if (!np)
88 return 0;
89
90 of_node_put(np);
91 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_SIZE, &spare,
92 sizeof(spare), psize, sizeof(psize));
93 if (ret) {
94 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
95 goto fail;
96 }
97
98 if (psize[1]) {
99 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
100 goto fail;
101 }
102
103 buf = kmalloc(psize[0], GFP_KERNEL);
104 if (!buf) {
105 pr_err("%s: Failed to allocate %d bytes for PTBL\n",
106 __func__, psize[0]);
107 ret = -ENOMEM;
108 goto fail;
109 }
110
111 pinit.paddr = virt_to_phys(buf);
112 pinit.size = psize[0];
113
114 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_INIT, &pinit,
115 sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
116 if (ret) {
117 pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
118 goto fail_mem;
119 }
120 if (ptbl_ret) {
121 pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
122 goto fail_mem;
123 }
124
Michael Bohan1834e7f2013-01-18 17:16:38 -0800125 kmemleak_not_leak(buf);
126
Laura Abbott0d135652012-10-04 12:59:03 -0700127 return 0;
128
129fail_mem:
130 kfree(buf);
131fail:
132 return ret;
133}
134
Laura Abbottf4daa692012-10-10 19:31:53 -0700135int msm_iommu_sec_program_iommu(int sec_id)
Laura Abbott0d135652012-10-04 12:59:03 -0700136{
137 struct msm_scm_sec_cfg {
138 unsigned int id;
139 unsigned int spare;
140 } cfg;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800141 int ret, scm_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -0700142
143 cfg.id = sec_id;
144
145 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
146 &scm_ret, sizeof(scm_ret));
147 if (ret || scm_ret) {
148 pr_err("scm call IOMMU_SECURE_CFG failed\n");
149 return ret ? ret : -EINVAL;
150 }
151
152 return ret;
153}
154
155static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
156 struct msm_iommu_ctx_drvdata *ctx_drvdata,
157 unsigned long va, phys_addr_t pa, size_t len)
158{
159 struct msm_scm_map_req map;
160 int ret = 0;
161
162 map.plist.list = virt_to_phys(&pa);
163 map.plist.list_size = 1;
164 map.plist.size = len;
165 map.info.id = iommu_drvdata->sec_id;
166 map.info.ctx_id = ctx_drvdata->num;
167 map.info.va = va;
168 map.info.size = len;
169
170 if (scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map), &ret,
171 sizeof(ret)))
172 return -EINVAL;
173 if (ret)
174 return -EINVAL;
175
176 return 0;
177}
178
179static unsigned int get_phys_addr(struct scatterlist *sg)
180{
181 /*
182 * Try sg_dma_address first so that we can
183 * map carveout regions that do not have a
184 * struct page associated with them.
185 */
186 unsigned int pa = sg_dma_address(sg);
187 if (pa == 0)
188 pa = sg_phys(sg);
189 return pa;
190}
191
192static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
193 struct msm_iommu_ctx_drvdata *ctx_drvdata,
194 unsigned long va, struct scatterlist *sg, size_t len)
195{
196 struct scatterlist *sgiter;
197 struct msm_scm_map_req map;
198 unsigned int *pa_list = 0;
199 unsigned int pa, cnt;
200 unsigned int offset = 0, chunk_offset = 0;
201 int ret, scm_ret;
202
203 map.info.id = iommu_drvdata->sec_id;
204 map.info.ctx_id = ctx_drvdata->num;
205 map.info.va = va;
206 map.info.size = len;
207
208 if (sg->length == len) {
209 pa = get_phys_addr(sg);
210 map.plist.list = virt_to_phys(&pa);
211 map.plist.list_size = 1;
212 map.plist.size = len;
213 } else {
214 sgiter = sg;
215 cnt = sg->length / SZ_1M;
216 while ((sgiter = sg_next(sgiter)))
217 cnt += sgiter->length / SZ_1M;
218
219 pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
220 if (!pa_list)
221 return -ENOMEM;
222
223 sgiter = sg;
224 cnt = 0;
225 pa = get_phys_addr(sgiter);
226 while (offset < len) {
227 pa += chunk_offset;
228 pa_list[cnt] = pa;
229 chunk_offset += SZ_1M;
230 offset += SZ_1M;
231 cnt++;
232
233 if (chunk_offset >= sgiter->length && offset < len) {
234 chunk_offset = 0;
235 sgiter = sg_next(sgiter);
236 pa = get_phys_addr(sgiter);
237 }
238 }
239
240 map.plist.list = virt_to_phys(pa_list);
241 map.plist.list_size = cnt;
242 map.plist.size = SZ_1M;
243 }
244
245 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map),
246 &scm_ret, sizeof(scm_ret));
247 kfree(pa_list);
248 return ret;
249}
250
251static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
252 struct msm_iommu_ctx_drvdata *ctx_drvdata,
253 unsigned long va, size_t len)
254{
255 struct msm_scm_mapping_info mi;
256 int ret, scm_ret;
257
258 mi.id = iommu_drvdata->sec_id;
259 mi.ctx_id = ctx_drvdata->num;
260 mi.va = va;
261 mi.size = len;
262
263 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_UNMAP, &mi, sizeof(mi),
264 &scm_ret, sizeof(scm_ret));
265 return ret;
266}
267
268static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
269{
270 int ret;
271
272 ret = clk_prepare_enable(drvdata->pclk);
273 if (ret)
274 goto fail;
275
276 ret = clk_prepare_enable(drvdata->clk);
277 if (ret)
278 clk_disable_unprepare(drvdata->pclk);
279
280 if (drvdata->aclk) {
281 ret = clk_prepare_enable(drvdata->aclk);
282 if (ret) {
283 clk_disable_unprepare(drvdata->clk);
284 clk_disable_unprepare(drvdata->pclk);
285 }
286 }
287fail:
288 return ret;
289}
290
291static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
292{
293 if (drvdata->aclk)
294 clk_disable_unprepare(drvdata->aclk);
295 clk_disable_unprepare(drvdata->clk);
296 clk_disable_unprepare(drvdata->pclk);
297}
298
299static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
300{
301 struct msm_priv *priv;
302
303 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
304 if (!priv)
305 return -ENOMEM;
306
307 INIT_LIST_HEAD(&priv->list_attached);
308 domain->priv = priv;
309 return 0;
310}
311
312static void msm_iommu_domain_destroy(struct iommu_domain *domain)
313{
314 struct msm_priv *priv;
315
316 mutex_lock(&msm_iommu_lock);
317 priv = domain->priv;
318 domain->priv = NULL;
319
320 kfree(priv);
321 mutex_unlock(&msm_iommu_lock);
322}
323
324static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
325{
326 struct msm_priv *priv;
327 struct msm_iommu_drvdata *iommu_drvdata;
328 struct msm_iommu_ctx_drvdata *ctx_drvdata;
329 struct msm_iommu_ctx_drvdata *tmp_drvdata;
330 int ret = 0;
331
332 mutex_lock(&msm_iommu_lock);
333
334 priv = domain->priv;
335 if (!priv || !dev) {
336 ret = -EINVAL;
337 goto fail;
338 }
339
340 iommu_drvdata = dev_get_drvdata(dev->parent);
341 ctx_drvdata = dev_get_drvdata(dev);
342 if (!iommu_drvdata || !ctx_drvdata) {
343 ret = -EINVAL;
344 goto fail;
345 }
346
347 if (!list_empty(&ctx_drvdata->attached_elm)) {
348 ret = -EBUSY;
349 goto fail;
350 }
351
352 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
353 if (tmp_drvdata == ctx_drvdata) {
354 ret = -EBUSY;
355 goto fail;
356 }
357
358 ret = regulator_enable(iommu_drvdata->gdsc);
359 if (ret)
360 goto fail;
361
362 ret = __enable_clocks(iommu_drvdata);
363 if (ret) {
364 regulator_disable(iommu_drvdata->gdsc);
365 goto fail;
366 }
367
368 ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
Olav Haugance2eab92013-02-07 12:59:18 -0800369
Olav Hauganf3782732013-01-11 11:23:30 -0800370 /* bfb settings are always programmed by HLOS */
371 program_iommu_bfb_settings(iommu_drvdata->base,
372 iommu_drvdata->bfb_settings);
373
Laura Abbott0d135652012-10-04 12:59:03 -0700374 __disable_clocks(iommu_drvdata);
375 if (ret) {
376 regulator_disable(iommu_drvdata->gdsc);
377 goto fail;
378 }
379
380 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
381 ctx_drvdata->attached_domain = domain;
382
Olav Haugan64ffdf32013-01-24 17:20:24 -0800383 mutex_unlock(&msm_iommu_lock);
384
385 msm_iommu_attached(dev->parent);
386 return ret;
Laura Abbott0d135652012-10-04 12:59:03 -0700387fail:
388 mutex_unlock(&msm_iommu_lock);
389 return ret;
390}
391
392static void msm_iommu_detach_dev(struct iommu_domain *domain,
393 struct device *dev)
394{
395 struct msm_iommu_drvdata *iommu_drvdata;
396 struct msm_iommu_ctx_drvdata *ctx_drvdata;
397
Olav Haugan64ffdf32013-01-24 17:20:24 -0800398 msm_iommu_detached(dev->parent);
399
Laura Abbott0d135652012-10-04 12:59:03 -0700400 mutex_lock(&msm_iommu_lock);
401 if (!dev)
402 goto fail;
403
404 iommu_drvdata = dev_get_drvdata(dev->parent);
405 ctx_drvdata = dev_get_drvdata(dev);
406 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
407 goto fail;
408
409 list_del_init(&ctx_drvdata->attached_elm);
410 ctx_drvdata->attached_domain = NULL;
411
412 regulator_disable(iommu_drvdata->gdsc);
413
414fail:
415 mutex_unlock(&msm_iommu_lock);
416}
417
418static int get_drvdata(struct iommu_domain *domain,
419 struct msm_iommu_drvdata **iommu_drvdata,
420 struct msm_iommu_ctx_drvdata **ctx_drvdata)
421{
422 struct msm_priv *priv = domain->priv;
423 struct msm_iommu_ctx_drvdata *ctx;
424
425 list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
426 if (ctx->attached_domain == domain)
427 break;
428 }
429
430 if (ctx->attached_domain != domain)
431 return -EINVAL;
432
433 *ctx_drvdata = ctx;
434 *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
435 return 0;
436}
437
438static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
439 phys_addr_t pa, size_t len, int prot)
440{
441 struct msm_iommu_drvdata *iommu_drvdata;
442 struct msm_iommu_ctx_drvdata *ctx_drvdata;
443 int ret = 0;
444
445 mutex_lock(&msm_iommu_lock);
446
447 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
448 if (ret)
449 goto fail;
450
451 ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
452 va, pa, len);
453fail:
454 mutex_unlock(&msm_iommu_lock);
455 return ret;
456}
457
458static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
459 size_t len)
460{
461 struct msm_iommu_drvdata *iommu_drvdata;
462 struct msm_iommu_ctx_drvdata *ctx_drvdata;
463 int ret = -ENODEV;
464
465 mutex_lock(&msm_iommu_lock);
466
467 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
468 if (ret)
469 goto fail;
470
471 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
472 va, len);
473fail:
474 mutex_unlock(&msm_iommu_lock);
475
476 /* the IOMMU API requires us to return how many bytes were unmapped */
477 len = ret ? 0 : len;
478 return len;
479}
480
481static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
482 struct scatterlist *sg, unsigned int len,
483 int prot)
484{
485 int ret;
486 struct msm_iommu_drvdata *iommu_drvdata;
487 struct msm_iommu_ctx_drvdata *ctx_drvdata;
488
489 mutex_lock(&msm_iommu_lock);
490
491 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
492 if (ret)
493 goto fail;
494 ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
495 va, sg, len);
496fail:
497 mutex_unlock(&msm_iommu_lock);
498 return ret;
499}
500
501
502static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
503 unsigned int len)
504{
505 struct msm_iommu_drvdata *iommu_drvdata;
506 struct msm_iommu_ctx_drvdata *ctx_drvdata;
507 int ret;
508
509 mutex_lock(&msm_iommu_lock);
510
511 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
512 if (ret)
513 goto fail;
514
515 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
516
517fail:
518 mutex_unlock(&msm_iommu_lock);
519 return 0;
520}
521
522static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
523 unsigned long va)
524{
525 return 0;
526}
527
528static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
529 unsigned long cap)
530{
531 return 0;
532}
533
534static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
535{
536 return 0;
537}
538
539static struct iommu_ops msm_iommu_ops = {
540 .domain_init = msm_iommu_domain_init,
541 .domain_destroy = msm_iommu_domain_destroy,
542 .attach_dev = msm_iommu_attach_dev,
543 .detach_dev = msm_iommu_detach_dev,
544 .map = msm_iommu_map,
545 .unmap = msm_iommu_unmap,
546 .map_range = msm_iommu_map_range,
547 .unmap_range = msm_iommu_unmap_range,
548 .iova_to_phys = msm_iommu_iova_to_phys,
549 .domain_has_cap = msm_iommu_domain_has_cap,
550 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
551 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
552};
553
554static int __init msm_iommu_sec_init(void)
555{
556 int ret;
557
558 ret = bus_register(&msm_iommu_sec_bus_type);
559 if (ret)
560 goto fail;
561
562 bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
563 ret = msm_iommu_sec_ptbl_init();
564fail:
565 return ret;
566}
567
568subsys_initcall(msm_iommu_sec_init);
569
570MODULE_LICENSE("GPL v2");
571MODULE_DESCRIPTION("MSM SMMU Secure Driver");