blob: e0314f9ebc3bcd8f7d6dec5676f9d74ad66039c1 [file] [log] [blame]
Zhenhua Huang60b347d2018-02-26 14:35:56 +08001/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt
14
15#include <linux/iommu.h>
16#include <linux/kernel.h>
17#include <linux/scatterlist.h>
18#include <linux/sizes.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <soc/qcom/scm.h>
22#include <linux/dma-mapping.h>
23#include <asm/cacheflush.h>
24
25#include "io-pgtable.h"
26
27#define IOMMU_SECURE_PTBL_SIZE 3
28#define IOMMU_SECURE_PTBL_INIT 4
29#define IOMMU_SECURE_MAP2_FLAT 0x12
30#define IOMMU_SECURE_UNMAP2_FLAT 0x13
31#define IOMMU_TLBINVAL_FLAG 0x00000001
32
33#define io_pgtable_to_data(x) \
34 container_of((x), struct msm_secure_io_pgtable, iop)
35
36#define io_pgtable_ops_to_pgtable(x) \
37 container_of((x), struct io_pgtable, ops)
38
39#define io_pgtable_ops_to_data(x) \
40 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
41
42struct msm_secure_io_pgtable {
43 struct io_pgtable iop;
Charan Teja Reddy313991e2018-03-12 12:19:31 +053044 /* lock required while operating on page tables */
45 struct mutex pgtbl_lock;
Charan Teja Reddy35144b02017-09-05 16:20:46 +053046};
47
48int msm_iommu_sec_pgtbl_init(void)
49{
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +053050 struct msm_scm_ptbl_init {
51 unsigned int paddr;
52 unsigned int size;
53 unsigned int spare;
54 } pinit = {0};
Charan Teja Reddy35144b02017-09-05 16:20:46 +053055 int psize[2] = {0, 0};
56 unsigned int spare = 0;
57 int ret, ptbl_ret = 0;
58 struct device dev = {0};
59 void *cpu_addr;
60 dma_addr_t paddr;
61 unsigned long attrs = 0;
62
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +053063 struct scm_desc desc = {0};
64
65 if (!is_scm_armv8()) {
66 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
67 sizeof(spare), psize, sizeof(psize));
68 } else {
Charan Teja Reddy35144b02017-09-05 16:20:46 +053069 struct scm_desc desc = {0};
70
71 desc.args[0] = spare;
72 desc.arginfo = SCM_ARGS(1);
73 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
74 IOMMU_SECURE_PTBL_SIZE), &desc);
75 psize[0] = desc.ret[0];
76 psize[1] = desc.ret[1];
Charan Teja Reddy35144b02017-09-05 16:20:46 +053077 }
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +053078 if (ret || psize[1]) {
79 pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
80 goto fail;
81 }
Charan Teja Reddy35144b02017-09-05 16:20:46 +053082 /* Now allocate memory for the secure page tables */
83 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
84 dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
Zhenhua Huang60b347d2018-02-26 14:35:56 +080085 arch_setup_dma_ops(&dev, 0, 0, NULL, 0);
Charan Teja Reddy35144b02017-09-05 16:20:46 +053086 cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
87 if (!cpu_addr) {
88 pr_err("%s: Failed to allocate %d bytes for PTBL\n",
89 __func__, psize[0]);
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +053090 ret = -ENOMEM;
91 goto fail;
Charan Teja Reddy35144b02017-09-05 16:20:46 +053092 }
93
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +053094 pinit.paddr = (unsigned int)paddr;
95 /* paddr may be a physical address > 4GB */
96 desc.args[0] = paddr;
97 desc.args[1] = pinit.size = psize[0];
98 desc.args[2] = pinit.spare;
99 desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530100
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +0530101 if (!is_scm_armv8()) {
102 ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
103 sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
104 } else {
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530105 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
106 IOMMU_SECURE_PTBL_INIT), &desc);
107 ptbl_ret = desc.ret[0];
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +0530108 }
109 if (ret) {
110 pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
111 goto fail_mem;
112 }
113 if (ptbl_ret) {
114 pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
115 goto fail_mem;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530116 }
117
118 return 0;
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +0530119
120fail_mem:
121 dma_free_attrs(&dev, psize[0], cpu_addr, paddr, attrs);
122fail:
123 return ret;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530124}
Ramesh Yadav Javadi9a59a472018-05-09 16:51:31 +0530125
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530126EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);
127
128static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
129 phys_addr_t paddr, size_t size, int iommu_prot)
130{
131 struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
132 struct io_pgtable_cfg *cfg = &data->iop.cfg;
133 void *flush_va, *flush_va_end;
134 struct scm_desc desc = {0};
135 int ret = -EINVAL;
136 u32 resp;
137
138 if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) ||
139 !IS_ALIGNED(size, SZ_1M))
140 return -EINVAL;
141
142 desc.args[0] = virt_to_phys(&paddr);
143 desc.args[1] = 1;
144 desc.args[2] = size;
145 desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
146 desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
147 desc.args[5] = iova;
148 desc.args[6] = size;
149 desc.args[7] = 0;
150
151 flush_va = &paddr;
152 flush_va_end = (void *)
153 (((unsigned long) flush_va) + sizeof(phys_addr_t));
154
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530155 mutex_lock(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530156 /*
157 * Ensure that the buffer is in RAM by the time it gets to TZ
158 */
159 dmac_clean_range(flush_va, flush_va_end);
160
161 desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
162 SCM_VAL, SCM_VAL, SCM_VAL);
163
164 if (is_scm_armv8()) {
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530165 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530166 IOMMU_SECURE_MAP2_FLAT), &desc);
167 resp = desc.ret[0];
168 }
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530169 mutex_unlock(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530170
171 if (ret || resp)
172 return -EINVAL;
173
174 return 0;
175}
176
177static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg)
178{
179 /*
180 * Try sg_dma_address first so that we can
181 * map carveout regions that do not have a
182 * struct page associated with them.
183 */
184 dma_addr_t pa = sg_dma_address(sg);
185
186 if (pa == 0)
187 pa = sg_phys(sg);
188 return pa;
189}
190
191static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
192 struct scatterlist *sg, unsigned int nents,
193 int iommu_prot, size_t *size)
194{
195 struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
196 struct io_pgtable_cfg *cfg = &data->iop.cfg;
197 int ret = -EINVAL;
198 struct scatterlist *tmp, *sgiter;
199 dma_addr_t *pa_list = 0;
200 unsigned int cnt, offset = 0, chunk_offset = 0;
201 dma_addr_t pa;
202 void *flush_va, *flush_va_end;
203 unsigned long len = 0;
204 struct scm_desc desc = {0};
205 int i;
206 u32 resp;
207
208 for_each_sg(sg, tmp, nents, i)
209 len += tmp->length;
210
211 if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
212 return -EINVAL;
213
214 if (sg->length == len) {
215 cnt = 1;
216 pa = msm_secure_get_phys_addr(sg);
217 if (!IS_ALIGNED(pa, SZ_1M))
218 return -EINVAL;
219
220 desc.args[0] = virt_to_phys(&pa);
221 desc.args[1] = cnt;
222 desc.args[2] = len;
223 flush_va = &pa;
224 } else {
225 sgiter = sg;
226 if (!IS_ALIGNED(sgiter->length, SZ_1M))
227 return -EINVAL;
228 cnt = sg->length / SZ_1M;
229 while ((sgiter = sg_next(sgiter))) {
230 if (!IS_ALIGNED(sgiter->length, SZ_1M))
231 return -EINVAL;
232 cnt += sgiter->length / SZ_1M;
233 }
234
235 pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL);
236 if (!pa_list)
237 return -ENOMEM;
238
239 sgiter = sg;
240 cnt = 0;
241 pa = msm_secure_get_phys_addr(sgiter);
242 while (offset < len) {
243
244 if (!IS_ALIGNED(pa, SZ_1M)) {
245 kfree(pa_list);
246 return -EINVAL;
247 }
248
249 pa_list[cnt] = pa + chunk_offset;
250 chunk_offset += SZ_1M;
251 offset += SZ_1M;
252 cnt++;
253
254 if (chunk_offset >= sgiter->length && offset < len) {
255 chunk_offset = 0;
256 sgiter = sg_next(sgiter);
257 pa = msm_secure_get_phys_addr(sgiter);
258 }
259 }
260
261 desc.args[0] = virt_to_phys(pa_list);
262 desc.args[1] = cnt;
263 desc.args[2] = SZ_1M;
264 flush_va = pa_list;
265 }
266
267 desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
268 desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
269 desc.args[5] = iova;
270 desc.args[6] = len;
271 desc.args[7] = 0;
272
273 desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
274 SCM_VAL, SCM_VAL, SCM_VAL);
275
276 /*
277 * Ensure that the buffer is in RAM by the time it gets to TZ
278 */
279
280 flush_va_end = (void *) (((unsigned long) flush_va) +
281 (cnt * sizeof(*pa_list)));
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530282
283 mutex_lock(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530284 dmac_clean_range(flush_va, flush_va_end);
285
286 if (is_scm_armv8()) {
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530287 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
288 IOMMU_SECURE_MAP2_FLAT), &desc);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530289 resp = desc.ret[0];
290
291 if (ret || resp)
292 ret = -EINVAL;
293 else
294 ret = len;
295 }
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530296 mutex_unlock(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530297
298 kfree(pa_list);
299 return ret;
300}
301
302static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
303 size_t len)
304{
305 struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
306 struct io_pgtable_cfg *cfg = &data->iop.cfg;
307 int ret = -EINVAL;
308 struct scm_desc desc = {0};
309
310 if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
311 return ret;
312
313 desc.args[0] = cfg->arm_msm_secure_cfg.sec_id;
314 desc.args[1] = cfg->arm_msm_secure_cfg.cbndx;
315 desc.args[2] = iova;
316 desc.args[3] = len;
317 desc.args[4] = IOMMU_TLBINVAL_FLAG;
318 desc.arginfo = SCM_ARGS(5);
319
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530320 mutex_lock(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530321 if (is_scm_armv8()) {
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530322 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
323 IOMMU_SECURE_UNMAP2_FLAT), &desc);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530324
325 if (!ret)
326 ret = len;
327 }
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530328 mutex_unlock(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530329 return ret;
330}
331
332static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops,
333 unsigned long iova)
334{
335 return -EINVAL;
336}
337
338static struct msm_secure_io_pgtable *
339msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
340{
341 struct msm_secure_io_pgtable *data;
342
343 data = kmalloc(sizeof(*data), GFP_KERNEL);
344 if (!data)
345 return NULL;
346
347 data->iop.ops = (struct io_pgtable_ops) {
348 .map = msm_secure_map,
349 .map_sg = msm_secure_map_sg,
350 .unmap = msm_secure_unmap,
351 .iova_to_phys = msm_secure_iova_to_phys,
352 };
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530353 mutex_init(&data->pgtbl_lock);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530354
355 return data;
356}
357
358static struct io_pgtable *
359msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
360{
361 struct msm_secure_io_pgtable *data =
362 msm_secure_alloc_pgtable_data(cfg);
363
364 return &data->iop;
365}
366
367static void msm_secure_free_pgtable(struct io_pgtable *iop)
368{
369 struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop);
370
371 kfree(data);
372}
373
374struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = {
375 .alloc = msm_secure_alloc_pgtable,
376 .free = msm_secure_free_pgtable,
377};