blob: 74d8b48a23bd80627e6b3d754bc055876ceb1c67 [file] [log] [blame]
Olav Hauganf3782732013-01-11 11:23:30 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Laura Abbott0d135652012-10-04 12:59:03 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
Michael Bohan1834e7f2013-01-18 17:16:38 -080028#include <linux/kmemleak.h>
Laura Abbott0d135652012-10-04 12:59:03 -070029
30#include <asm/sizes.h>
31
Olav Haugan64ffdf32013-01-24 17:20:24 -080032#include <mach/iommu_perfmon.h>
Olav Haugane6d01ef2013-01-25 16:55:44 -080033#include <mach/iommu_hw-v1.h>
Olav Haugan090614f2013-03-22 12:14:18 -070034#include <mach/msm_iommu_priv.h>
Laura Abbott0d135652012-10-04 12:59:03 -070035#include <mach/iommu.h>
36#include <mach/scm.h>
37
38/* bitmap of the page sizes currently supported */
39#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
40
41#define IOMMU_SECURE_CFG 2
42#define IOMMU_SECURE_PTBL_SIZE 3
43#define IOMMU_SECURE_PTBL_INIT 4
44#define IOMMU_SECURE_MAP 6
45#define IOMMU_SECURE_UNMAP 7
Adrian Alexeibfe7c462013-04-01 14:36:24 -070046#define IOMMU_SECURE_MAP2 0x0B
47#define IOMMU_SECURE_UNMAP2 0x0C
48#define IOMMU_TLBINVAL_FLAG 0x00000001
Laura Abbott0d135652012-10-04 12:59:03 -070049
Olav Hauganeece7e52013-04-02 10:22:21 -070050static struct iommu_access_ops *iommu_access_ops;
Laura Abbott0d135652012-10-04 12:59:03 -070051
Laura Abbott0d135652012-10-04 12:59:03 -070052struct msm_scm_paddr_list {
53 unsigned int list;
54 unsigned int list_size;
55 unsigned int size;
56};
57
58struct msm_scm_mapping_info {
59 unsigned int id;
60 unsigned int ctx_id;
61 unsigned int va;
62 unsigned int size;
63};
64
Adrian Alexeibfe7c462013-04-01 14:36:24 -070065struct msm_scm_map2_req {
Laura Abbott0d135652012-10-04 12:59:03 -070066 struct msm_scm_paddr_list plist;
67 struct msm_scm_mapping_info info;
Adrian Alexeibfe7c462013-04-01 14:36:24 -070068 unsigned int flags;
69};
70
71struct msm_scm_unmap2_req {
72 struct msm_scm_mapping_info info;
73 unsigned int flags;
Laura Abbott0d135652012-10-04 12:59:03 -070074};
75
Olav Hauganeece7e52013-04-02 10:22:21 -070076void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
77{
78 iommu_access_ops = access_ops;
79}
80
Laura Abbott0d135652012-10-04 12:59:03 -070081static int msm_iommu_sec_ptbl_init(void)
82{
83 struct device_node *np;
84 struct msm_scm_ptbl_init {
85 unsigned int paddr;
86 unsigned int size;
87 unsigned int spare;
88 } pinit;
89 unsigned int *buf;
Mitchel Humpherys637cc532012-12-12 16:50:58 -080090 int psize[2] = {0, 0};
Laura Abbott0d135652012-10-04 12:59:03 -070091 unsigned int spare;
Mitchel Humpherys637cc532012-12-12 16:50:58 -080092 int ret, ptbl_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -070093
Olav Haugan0e22c482013-01-28 17:39:36 -080094 for_each_compatible_node(np, NULL, "qcom,msm-smmu-v1")
Laura Abbott0d135652012-10-04 12:59:03 -070095 if (of_find_property(np, "qcom,iommu-secure-id", NULL))
96 break;
97
98 if (!np)
99 return 0;
100
101 of_node_put(np);
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700102 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
Laura Abbott0d135652012-10-04 12:59:03 -0700103 sizeof(spare), psize, sizeof(psize));
104 if (ret) {
105 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
106 goto fail;
107 }
108
109 if (psize[1]) {
110 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
111 goto fail;
112 }
113
114 buf = kmalloc(psize[0], GFP_KERNEL);
115 if (!buf) {
116 pr_err("%s: Failed to allocate %d bytes for PTBL\n",
117 __func__, psize[0]);
118 ret = -ENOMEM;
119 goto fail;
120 }
121
122 pinit.paddr = virt_to_phys(buf);
123 pinit.size = psize[0];
124
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700125 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
Laura Abbott0d135652012-10-04 12:59:03 -0700126 sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
127 if (ret) {
128 pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
129 goto fail_mem;
130 }
131 if (ptbl_ret) {
132 pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
133 goto fail_mem;
134 }
135
Michael Bohan1834e7f2013-01-18 17:16:38 -0800136 kmemleak_not_leak(buf);
137
Laura Abbott0d135652012-10-04 12:59:03 -0700138 return 0;
139
140fail_mem:
141 kfree(buf);
142fail:
143 return ret;
144}
145
Laura Abbottf4daa692012-10-10 19:31:53 -0700146int msm_iommu_sec_program_iommu(int sec_id)
Laura Abbott0d135652012-10-04 12:59:03 -0700147{
148 struct msm_scm_sec_cfg {
149 unsigned int id;
150 unsigned int spare;
151 } cfg;
Mitchel Humpherys637cc532012-12-12 16:50:58 -0800152 int ret, scm_ret = 0;
Laura Abbott0d135652012-10-04 12:59:03 -0700153
154 cfg.id = sec_id;
155
Syed Rameez Mustafa6ab6af32013-03-18 12:53:11 -0700156 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
Laura Abbott0d135652012-10-04 12:59:03 -0700157 &scm_ret, sizeof(scm_ret));
158 if (ret || scm_ret) {
159 pr_err("scm call IOMMU_SECURE_CFG failed\n");
160 return ret ? ret : -EINVAL;
161 }
162
163 return ret;
164}
165
166static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
167 struct msm_iommu_ctx_drvdata *ctx_drvdata,
168 unsigned long va, phys_addr_t pa, size_t len)
169{
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700170 struct msm_scm_map2_req map;
Laura Abbott0d135652012-10-04 12:59:03 -0700171 int ret = 0;
172
173 map.plist.list = virt_to_phys(&pa);
174 map.plist.list_size = 1;
175 map.plist.size = len;
176 map.info.id = iommu_drvdata->sec_id;
177 map.info.ctx_id = ctx_drvdata->num;
178 map.info.va = va;
179 map.info.size = len;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700180 map.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700181
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700182 if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map), &ret,
Laura Abbott0d135652012-10-04 12:59:03 -0700183 sizeof(ret)))
184 return -EINVAL;
185 if (ret)
186 return -EINVAL;
187
188 return 0;
189}
190
191static unsigned int get_phys_addr(struct scatterlist *sg)
192{
193 /*
194 * Try sg_dma_address first so that we can
195 * map carveout regions that do not have a
196 * struct page associated with them.
197 */
198 unsigned int pa = sg_dma_address(sg);
199 if (pa == 0)
200 pa = sg_phys(sg);
201 return pa;
202}
203
204static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
205 struct msm_iommu_ctx_drvdata *ctx_drvdata,
206 unsigned long va, struct scatterlist *sg, size_t len)
207{
208 struct scatterlist *sgiter;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700209 struct msm_scm_map2_req map;
Laura Abbott0d135652012-10-04 12:59:03 -0700210 unsigned int *pa_list = 0;
211 unsigned int pa, cnt;
212 unsigned int offset = 0, chunk_offset = 0;
213 int ret, scm_ret;
214
215 map.info.id = iommu_drvdata->sec_id;
216 map.info.ctx_id = ctx_drvdata->num;
217 map.info.va = va;
218 map.info.size = len;
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700219 map.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700220
221 if (sg->length == len) {
222 pa = get_phys_addr(sg);
223 map.plist.list = virt_to_phys(&pa);
224 map.plist.list_size = 1;
225 map.plist.size = len;
226 } else {
227 sgiter = sg;
228 cnt = sg->length / SZ_1M;
229 while ((sgiter = sg_next(sgiter)))
230 cnt += sgiter->length / SZ_1M;
231
232 pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
233 if (!pa_list)
234 return -ENOMEM;
235
236 sgiter = sg;
237 cnt = 0;
238 pa = get_phys_addr(sgiter);
239 while (offset < len) {
240 pa += chunk_offset;
241 pa_list[cnt] = pa;
242 chunk_offset += SZ_1M;
243 offset += SZ_1M;
244 cnt++;
245
246 if (chunk_offset >= sgiter->length && offset < len) {
247 chunk_offset = 0;
248 sgiter = sg_next(sgiter);
249 pa = get_phys_addr(sgiter);
250 }
251 }
252
253 map.plist.list = virt_to_phys(pa_list);
254 map.plist.list_size = cnt;
255 map.plist.size = SZ_1M;
256 }
257
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700258 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map),
Laura Abbott0d135652012-10-04 12:59:03 -0700259 &scm_ret, sizeof(scm_ret));
260 kfree(pa_list);
261 return ret;
262}
263
264static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
265 struct msm_iommu_ctx_drvdata *ctx_drvdata,
266 unsigned long va, size_t len)
267{
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700268 struct msm_scm_unmap2_req unmap;
Laura Abbott0d135652012-10-04 12:59:03 -0700269 int ret, scm_ret;
270
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700271 unmap.info.id = iommu_drvdata->sec_id;
272 unmap.info.ctx_id = ctx_drvdata->num;
273 unmap.info.va = va;
274 unmap.info.size = len;
275 unmap.flags = IOMMU_TLBINVAL_FLAG;
Laura Abbott0d135652012-10-04 12:59:03 -0700276
Adrian Alexeibfe7c462013-04-01 14:36:24 -0700277 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &unmap, sizeof(unmap),
Laura Abbott0d135652012-10-04 12:59:03 -0700278 &scm_ret, sizeof(scm_ret));
279 return ret;
280}
281
Laura Abbott0d135652012-10-04 12:59:03 -0700282static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
283{
Olav Haugan090614f2013-03-22 12:14:18 -0700284 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700285
286 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
287 if (!priv)
288 return -ENOMEM;
289
290 INIT_LIST_HEAD(&priv->list_attached);
291 domain->priv = priv;
292 return 0;
293}
294
295static void msm_iommu_domain_destroy(struct iommu_domain *domain)
296{
Olav Haugan090614f2013-03-22 12:14:18 -0700297 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700298
Olav Hauganeece7e52013-04-02 10:22:21 -0700299 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700300 priv = domain->priv;
301 domain->priv = NULL;
302
303 kfree(priv);
Olav Hauganeece7e52013-04-02 10:22:21 -0700304 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700305}
306
307static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
308{
Olav Haugan090614f2013-03-22 12:14:18 -0700309 struct msm_iommu_priv *priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700310 struct msm_iommu_drvdata *iommu_drvdata;
311 struct msm_iommu_ctx_drvdata *ctx_drvdata;
312 struct msm_iommu_ctx_drvdata *tmp_drvdata;
313 int ret = 0;
314
Olav Hauganeece7e52013-04-02 10:22:21 -0700315 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700316
317 priv = domain->priv;
318 if (!priv || !dev) {
319 ret = -EINVAL;
320 goto fail;
321 }
322
323 iommu_drvdata = dev_get_drvdata(dev->parent);
324 ctx_drvdata = dev_get_drvdata(dev);
325 if (!iommu_drvdata || !ctx_drvdata) {
326 ret = -EINVAL;
327 goto fail;
328 }
329
330 if (!list_empty(&ctx_drvdata->attached_elm)) {
331 ret = -EBUSY;
332 goto fail;
333 }
334
335 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
336 if (tmp_drvdata == ctx_drvdata) {
337 ret = -EBUSY;
338 goto fail;
339 }
340
Olav Hauganeece7e52013-04-02 10:22:21 -0700341 ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700342 if (ret)
343 goto fail;
344
Olav Haugane3885392013-03-06 16:22:53 -0800345 /* We can only do this once */
346 if (!iommu_drvdata->ctx_attach_count) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700347 ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800348 if (ret) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700349 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800350 goto fail;
351 }
Laura Abbott0d135652012-10-04 12:59:03 -0700352
Olav Haugane3885392013-03-06 16:22:53 -0800353 ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
Olav Haugance2eab92013-02-07 12:59:18 -0800354
Olav Haugane3885392013-03-06 16:22:53 -0800355 /* bfb settings are always programmed by HLOS */
356 program_iommu_bfb_settings(iommu_drvdata->base,
357 iommu_drvdata->bfb_settings);
Olav Hauganf3782732013-01-11 11:23:30 -0800358
Olav Hauganeece7e52013-04-02 10:22:21 -0700359 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800360 if (ret) {
Olav Hauganeece7e52013-04-02 10:22:21 -0700361 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800362 goto fail;
363 }
Laura Abbott0d135652012-10-04 12:59:03 -0700364 }
365
366 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
367 ctx_drvdata->attached_domain = domain;
Olav Haugane3885392013-03-06 16:22:53 -0800368 ++iommu_drvdata->ctx_attach_count;
Laura Abbott0d135652012-10-04 12:59:03 -0700369
Olav Hauganeece7e52013-04-02 10:22:21 -0700370 iommu_access_ops->iommu_lock_release();
Olav Haugan64ffdf32013-01-24 17:20:24 -0800371
372 msm_iommu_attached(dev->parent);
373 return ret;
Laura Abbott0d135652012-10-04 12:59:03 -0700374fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700375 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700376 return ret;
377}
378
379static void msm_iommu_detach_dev(struct iommu_domain *domain,
380 struct device *dev)
381{
382 struct msm_iommu_drvdata *iommu_drvdata;
383 struct msm_iommu_ctx_drvdata *ctx_drvdata;
384
Olav Haugan64ffdf32013-01-24 17:20:24 -0800385 msm_iommu_detached(dev->parent);
386
Olav Hauganeece7e52013-04-02 10:22:21 -0700387 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700388 if (!dev)
389 goto fail;
390
391 iommu_drvdata = dev_get_drvdata(dev->parent);
392 ctx_drvdata = dev_get_drvdata(dev);
393 if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
394 goto fail;
395
396 list_del_init(&ctx_drvdata->attached_elm);
397 ctx_drvdata->attached_domain = NULL;
398
Olav Hauganeece7e52013-04-02 10:22:21 -0700399 iommu_access_ops->iommu_power_off(iommu_drvdata);
Olav Haugane3885392013-03-06 16:22:53 -0800400 BUG_ON(iommu_drvdata->ctx_attach_count == 0);
401 --iommu_drvdata->ctx_attach_count;
Laura Abbott0d135652012-10-04 12:59:03 -0700402fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700403 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700404}
405
406static int get_drvdata(struct iommu_domain *domain,
407 struct msm_iommu_drvdata **iommu_drvdata,
408 struct msm_iommu_ctx_drvdata **ctx_drvdata)
409{
Olav Haugan090614f2013-03-22 12:14:18 -0700410 struct msm_iommu_priv *priv = domain->priv;
Laura Abbott0d135652012-10-04 12:59:03 -0700411 struct msm_iommu_ctx_drvdata *ctx;
412
413 list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
414 if (ctx->attached_domain == domain)
415 break;
416 }
417
418 if (ctx->attached_domain != domain)
419 return -EINVAL;
420
421 *ctx_drvdata = ctx;
422 *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
423 return 0;
424}
425
426static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
427 phys_addr_t pa, size_t len, int prot)
428{
429 struct msm_iommu_drvdata *iommu_drvdata;
430 struct msm_iommu_ctx_drvdata *ctx_drvdata;
431 int ret = 0;
432
Olav Hauganeece7e52013-04-02 10:22:21 -0700433 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700434
435 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
436 if (ret)
437 goto fail;
438
Olav Hauganeece7e52013-04-02 10:22:21 -0700439 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700440 ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
441 va, pa, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700442 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700443fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700444 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700445 return ret;
446}
447
448static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
449 size_t len)
450{
451 struct msm_iommu_drvdata *iommu_drvdata;
452 struct msm_iommu_ctx_drvdata *ctx_drvdata;
453 int ret = -ENODEV;
454
Olav Hauganeece7e52013-04-02 10:22:21 -0700455 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700456
457 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
458 if (ret)
459 goto fail;
460
Olav Hauganeece7e52013-04-02 10:22:21 -0700461 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700462 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
463 va, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700464 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700465fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700466 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700467
468 /* the IOMMU API requires us to return how many bytes were unmapped */
469 len = ret ? 0 : len;
470 return len;
471}
472
473static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
474 struct scatterlist *sg, unsigned int len,
475 int prot)
476{
477 int ret;
478 struct msm_iommu_drvdata *iommu_drvdata;
479 struct msm_iommu_ctx_drvdata *ctx_drvdata;
480
Olav Hauganeece7e52013-04-02 10:22:21 -0700481 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700482
483 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
484 if (ret)
485 goto fail;
Olav Hauganeece7e52013-04-02 10:22:21 -0700486 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700487 ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
488 va, sg, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700489 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700490fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700491 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700492 return ret;
493}
494
495
496static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
497 unsigned int len)
498{
499 struct msm_iommu_drvdata *iommu_drvdata;
500 struct msm_iommu_ctx_drvdata *ctx_drvdata;
501 int ret;
502
Olav Hauganeece7e52013-04-02 10:22:21 -0700503 iommu_access_ops->iommu_lock_acquire();
Laura Abbott0d135652012-10-04 12:59:03 -0700504
505 ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
506 if (ret)
507 goto fail;
508
Olav Hauganeece7e52013-04-02 10:22:21 -0700509 iommu_access_ops->iommu_clk_on(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700510 ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
Olav Hauganeece7e52013-04-02 10:22:21 -0700511 iommu_access_ops->iommu_clk_off(iommu_drvdata);
Laura Abbott0d135652012-10-04 12:59:03 -0700512
513fail:
Olav Hauganeece7e52013-04-02 10:22:21 -0700514 iommu_access_ops->iommu_lock_release();
Laura Abbott0d135652012-10-04 12:59:03 -0700515 return 0;
516}
517
518static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
519 unsigned long va)
520{
521 return 0;
522}
523
524static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
525 unsigned long cap)
526{
527 return 0;
528}
529
530static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
531{
532 return 0;
533}
534
535static struct iommu_ops msm_iommu_ops = {
536 .domain_init = msm_iommu_domain_init,
537 .domain_destroy = msm_iommu_domain_destroy,
538 .attach_dev = msm_iommu_attach_dev,
539 .detach_dev = msm_iommu_detach_dev,
540 .map = msm_iommu_map,
541 .unmap = msm_iommu_unmap,
542 .map_range = msm_iommu_map_range,
543 .unmap_range = msm_iommu_unmap_range,
544 .iova_to_phys = msm_iommu_iova_to_phys,
545 .domain_has_cap = msm_iommu_domain_has_cap,
546 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
547 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
548};
549
550static int __init msm_iommu_sec_init(void)
551{
552 int ret;
553
554 ret = bus_register(&msm_iommu_sec_bus_type);
555 if (ret)
556 goto fail;
557
558 bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
559 ret = msm_iommu_sec_ptbl_init();
560fail:
561 return ret;
562}
563
564subsys_initcall(msm_iommu_sec_init);
565
566MODULE_LICENSE("GPL v2");
567MODULE_DESCRIPTION("MSM SMMU Secure Driver");