blob: a6483b9761bd1fed82d5535a672047a9e6e2d62f [file] [log] [blame]
Laura Abbott0d135652012-10-04 12:59:03 -07001/* Copyright (c) 2012 Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28
29#include <asm/sizes.h>
30
31#include <mach/iommu_hw-v2.h>
32#include <mach/iommu.h>
33#include <mach/scm.h>
34
35/* bitmap of the page sizes currently supported */
36#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
37
38#define IOMMU_SECURE_CFG 2
39#define IOMMU_SECURE_PTBL_SIZE 3
40#define IOMMU_SECURE_PTBL_INIT 4
41#define IOMMU_SECURE_MAP 6
42#define IOMMU_SECURE_UNMAP 7
43
44static DEFINE_MUTEX(msm_iommu_lock);
45
46struct msm_priv {
47 struct list_head list_attached;
48};
49
50struct msm_scm_paddr_list {
51 unsigned int list;
52 unsigned int list_size;
53 unsigned int size;
54};
55
56struct msm_scm_mapping_info {
57 unsigned int id;
58 unsigned int ctx_id;
59 unsigned int va;
60 unsigned int size;
61};
62
63struct msm_scm_map_req {
64 struct msm_scm_paddr_list plist;
65 struct msm_scm_mapping_info info;
66};
67
68static int msm_iommu_sec_ptbl_init(void)
69{
70 struct device_node *np;
71 struct msm_scm_ptbl_init {
72 unsigned int paddr;
73 unsigned int size;
74 unsigned int spare;
75 } pinit;
76 unsigned int *buf;
Mitchel Humpherys637cc532012-12-12 16:50:58 -080077 int psize[2] = {0, 0};
Laura Abbott0d135652012-10-04 12:59:03 -070078 unsigned int spare;
Mitchel Humpherys637cc532012-12-12 16:50:58 -080079 int ret, ptbl_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -070080
81 for_each_compatible_node(np, NULL, "qcom,msm-smmu-v2")
82 if (of_find_property(np, "qcom,iommu-secure-id", NULL))
83 break;
84
85 if (!np)
86 return 0;
87
88 of_node_put(np);
89 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_SIZE, &spare,
90 sizeof(spare), psize, sizeof(psize));
91 if (ret) {
92 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
93 goto fail;
94 }
95
96 if (psize[1]) {
97 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
98 goto fail;
99 }
100
101 buf = kmalloc(psize[0], GFP_KERNEL);
102 if (!buf) {
103 pr_err("%s: Failed to allocate %d bytes for PTBL\n",
104 __func__, psize[0]);
105 ret = -ENOMEM;
106 goto fail;
107 }
108
109 pinit.paddr = virt_to_phys(buf);
110 pinit.size = psize[0];
111
112 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_PTBL_INIT, &pinit,
113 sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
114 if (ret) {
115 pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
116 goto fail_mem;
117 }
118 if (ptbl_ret) {
119 pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
120 goto fail_mem;
121 }
122
123 return 0;
124
125fail_mem:
126 kfree(buf);
127fail:
128 return ret;
129}
130
Laura Abbottf4daa692012-10-10 19:31:53 -0700131int msm_iommu_sec_program_iommu(int sec_id)
Laura Abbott0d135652012-10-04 12:59:03 -0700132{
133 struct msm_scm_sec_cfg {
134 unsigned int id;
135 unsigned int spare;
136 } cfg;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800137 int ret, scm_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -0700138
139 cfg.id = sec_id;
140
141 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
142 &scm_ret, sizeof(scm_ret));
143 if (ret || scm_ret) {
144 pr_err("scm call IOMMU_SECURE_CFG failed\n");
145 return ret ? ret : -EINVAL;
146 }
147
148 return ret;
149}
150
151static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
152 struct msm_iommu_ctx_drvdata *ctx_drvdata,
153 unsigned long va, phys_addr_t pa, size_t len)
154{
155 struct msm_scm_map_req map;
156 int ret = 0;
157
158 map.plist.list = virt_to_phys(&pa);
159 map.plist.list_size = 1;
160 map.plist.size = len;
161 map.info.id = iommu_drvdata->sec_id;
162 map.info.ctx_id = ctx_drvdata->num;
163 map.info.va = va;
164 map.info.size = len;
165
166 if (scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map), &ret,
167 sizeof(ret)))
168 return -EINVAL;
169 if (ret)
170 return -EINVAL;
171
172 return 0;
173}
174
175static unsigned int get_phys_addr(struct scatterlist *sg)
176{
177 /*
178 * Try sg_dma_address first so that we can
179 * map carveout regions that do not have a
180 * struct page associated with them.
181 */
182 unsigned int pa = sg_dma_address(sg);
183 if (pa == 0)
184 pa = sg_phys(sg);
185 return pa;
186}
187
188static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
189 struct msm_iommu_ctx_drvdata *ctx_drvdata,
190 unsigned long va, struct scatterlist *sg, size_t len)
191{
192 struct scatterlist *sgiter;
193 struct msm_scm_map_req map;
194 unsigned int *pa_list = 0;
195 unsigned int pa, cnt;
196 unsigned int offset = 0, chunk_offset = 0;
197 int ret, scm_ret;
198
199 map.info.id = iommu_drvdata->sec_id;
200 map.info.ctx_id = ctx_drvdata->num;
201 map.info.va = va;
202 map.info.size = len;
203
204 if (sg->length == len) {
205 pa = get_phys_addr(sg);
206 map.plist.list = virt_to_phys(&pa);
207 map.plist.list_size = 1;
208 map.plist.size = len;
209 } else {
210 sgiter = sg;
211 cnt = sg->length / SZ_1M;
212 while ((sgiter = sg_next(sgiter)))
213 cnt += sgiter->length / SZ_1M;
214
215 pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
216 if (!pa_list)
217 return -ENOMEM;
218
219 sgiter = sg;
220 cnt = 0;
221 pa = get_phys_addr(sgiter);
222 while (offset < len) {
223 pa += chunk_offset;
224 pa_list[cnt] = pa;
225 chunk_offset += SZ_1M;
226 offset += SZ_1M;
227 cnt++;
228
229 if (chunk_offset >= sgiter->length && offset < len) {
230 chunk_offset = 0;
231 sgiter = sg_next(sgiter);
232 pa = get_phys_addr(sgiter);
233 }
234 }
235
236 map.plist.list = virt_to_phys(pa_list);
237 map.plist.list_size = cnt;
238 map.plist.size = SZ_1M;
239 }
240
241 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_MAP, &map, sizeof(map),
242 &scm_ret, sizeof(scm_ret));
243 kfree(pa_list);
244 return ret;
245}
246
247static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
248 struct msm_iommu_ctx_drvdata *ctx_drvdata,
249 unsigned long va, size_t len)
250{
251 struct msm_scm_mapping_info mi;
252 int ret, scm_ret;
253
254 mi.id = iommu_drvdata->sec_id;
255 mi.ctx_id = ctx_drvdata->num;
256 mi.va = va;
257 mi.size = len;
258
259 ret = scm_call(SCM_SVC_CP, IOMMU_SECURE_UNMAP, &mi, sizeof(mi),
260 &scm_ret, sizeof(scm_ret));
261 return ret;
262}
263
264static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
265{
266 int ret;
267
268 ret = clk_prepare_enable(drvdata->pclk);
269 if (ret)
270 goto fail;
271
272 ret = clk_prepare_enable(drvdata->clk);
273 if (ret)
274 clk_disable_unprepare(drvdata->pclk);
275
276 if (drvdata->aclk) {
277 ret = clk_prepare_enable(drvdata->aclk);
278 if (ret) {
279 clk_disable_unprepare(drvdata->clk);
280 clk_disable_unprepare(drvdata->pclk);
281 }
282 }
283fail:
284 return ret;
285}
286
287static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
288{
289 if (drvdata->aclk)
290 clk_disable_unprepare(drvdata->aclk);
291 clk_disable_unprepare(drvdata->clk);
292 clk_disable_unprepare(drvdata->pclk);
293}
294
295static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
296{
297 struct msm_priv *priv;
298
299 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
300 if (!priv)
301 return -ENOMEM;
302
303 INIT_LIST_HEAD(&priv->list_attached);
304 domain->priv = priv;
305 return 0;
306}
307
308static void msm_iommu_domain_destroy(struct iommu_domain *domain)
309{
310 struct msm_priv *priv;
311
312 mutex_lock(&msm_iommu_lock);
313 priv = domain->priv;
314 domain->priv = NULL;
315
316 kfree(priv);
317 mutex_unlock(&msm_iommu_lock);
318}
319
320static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
321{
322 struct msm_priv *priv;
323 struct msm_iommu_drvdata *iommu_drvdata;
324 struct msm_iommu_ctx_drvdata *ctx_drvdata;
325 struct msm_iommu_ctx_drvdata *tmp_drvdata;
326 int ret = 0;
327
328 mutex_lock(&msm_iommu_lock);
329
330 priv = domain->priv;
331 if (!priv || !dev) {
332 ret = -EINVAL;
333 goto fail;
334 }
335
336 iommu_drvdata = dev_get_drvdata(dev->parent);
337 ctx_drvdata = dev_get_drvdata(dev);
338 if (!iommu_drvdata || !ctx_drvdata) {
339 ret = -EINVAL;
340 goto fail;
341 }
342
343 if (!list_empty(&ctx_drvdata->attached_elm)) {
344 ret = -EBUSY;
345 goto fail;
346 }
347
348 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
349 if (tmp_drvdata == ctx_drvdata) {
350 ret = -EBUSY;
351 goto fail;
352 }
353
354 ret = regulator_enable(iommu_drvdata->gdsc);
355 if (ret)
356 goto fail;
357
358 ret = __enable_clocks(iommu_drvdata);
359 if (ret) {
360 regulator_disable(iommu_drvdata->gdsc);
361 goto fail;
362 }
363
364 ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
365 __disable_clocks(iommu_drvdata);
366 if (ret) {
367 regulator_disable(iommu_drvdata->gdsc);
368 goto fail;
369 }
370
371 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
372 ctx_drvdata->attached_domain = domain;
373
374fail:
375 mutex_unlock(&msm_iommu_lock);
376 return ret;
377}
378
379static void msm_iommu_detach_dev(struct iommu_domain *domain,
380 struct device *dev)
381{
382 struct msm_iommu_drvdata *iommu_drvdata;
383 struct msm_iommu_ctx_drvdata *ctx_drvdata;
384
385 mutex_lock(&msm_iommu_lock);
386 if (!dev)
387 goto fail;
388
389 iommu_drvdata = dev_get_drvdata(dev->parent);
390 ctx_drvdata = dev_get_drvdata(dev);
391 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
392 goto fail;
393
394 list_del_init(&ctx_drvdata->attached_elm);
395 ctx_drvdata->attached_domain = NULL;
396
397 regulator_disable(iommu_drvdata->gdsc);
398
399fail:
400 mutex_unlock(&msm_iommu_lock);
401}
402
403static int get_drvdata(struct iommu_domain *domain,
404 struct msm_iommu_drvdata **iommu_drvdata,
405 struct msm_iommu_ctx_drvdata **ctx_drvdata)
406{
407 struct msm_priv *priv = domain->priv;
408 struct msm_iommu_ctx_drvdata *ctx;
409
410 list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
411 if (ctx->attached_domain == domain)
412 break;
413 }
414
415 if (ctx->attached_domain != domain)
416 return -EINVAL;
417
418 *ctx_drvdata = ctx;
419 *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
420 return 0;
421}
422
423static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
424 phys_addr_t pa, size_t len, int prot)
425{
426 struct msm_iommu_drvdata *iommu_drvdata;
427 struct msm_iommu_ctx_drvdata *ctx_drvdata;
428 int ret = 0;
429
430 mutex_lock(&msm_iommu_lock);
431
432 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
433 if (ret)
434 goto fail;
435
436 ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
437 va, pa, len);
438fail:
439 mutex_unlock(&msm_iommu_lock);
440 return ret;
441}
442
443static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
444 size_t len)
445{
446 struct msm_iommu_drvdata *iommu_drvdata;
447 struct msm_iommu_ctx_drvdata *ctx_drvdata;
448 int ret = -ENODEV;
449
450 mutex_lock(&msm_iommu_lock);
451
452 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
453 if (ret)
454 goto fail;
455
456 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
457 va, len);
458fail:
459 mutex_unlock(&msm_iommu_lock);
460
461 /* the IOMMU API requires us to return how many bytes were unmapped */
462 len = ret ? 0 : len;
463 return len;
464}
465
466static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
467 struct scatterlist *sg, unsigned int len,
468 int prot)
469{
470 int ret;
471 struct msm_iommu_drvdata *iommu_drvdata;
472 struct msm_iommu_ctx_drvdata *ctx_drvdata;
473
474 mutex_lock(&msm_iommu_lock);
475
476 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
477 if (ret)
478 goto fail;
479 ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
480 va, sg, len);
481fail:
482 mutex_unlock(&msm_iommu_lock);
483 return ret;
484}
485
486
487static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
488 unsigned int len)
489{
490 struct msm_iommu_drvdata *iommu_drvdata;
491 struct msm_iommu_ctx_drvdata *ctx_drvdata;
492 int ret;
493
494 mutex_lock(&msm_iommu_lock);
495
496 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
497 if (ret)
498 goto fail;
499
500 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
501
502fail:
503 mutex_unlock(&msm_iommu_lock);
504 return 0;
505}
506
507static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
508 unsigned long va)
509{
510 return 0;
511}
512
513static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
514 unsigned long cap)
515{
516 return 0;
517}
518
519static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
520{
521 return 0;
522}
523
524static struct iommu_ops msm_iommu_ops = {
525 .domain_init = msm_iommu_domain_init,
526 .domain_destroy = msm_iommu_domain_destroy,
527 .attach_dev = msm_iommu_attach_dev,
528 .detach_dev = msm_iommu_detach_dev,
529 .map = msm_iommu_map,
530 .unmap = msm_iommu_unmap,
531 .map_range = msm_iommu_map_range,
532 .unmap_range = msm_iommu_unmap_range,
533 .iova_to_phys = msm_iommu_iova_to_phys,
534 .domain_has_cap = msm_iommu_domain_has_cap,
535 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
536 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
537};
538
539static int __init msm_iommu_sec_init(void)
540{
541 int ret;
542
543 ret = bus_register(&msm_iommu_sec_bus_type);
544 if (ret)
545 goto fail;
546
547 bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
548 ret = msm_iommu_sec_ptbl_init();
549fail:
550 return ret;
551}
552
553subsys_initcall(msm_iommu_sec_init);
554
555MODULE_LICENSE("GPL v2");
556MODULE_DESCRIPTION("MSM SMMU Secure Driver");