blob: 49fd7feedcd278ba1e4c3c26c4890ed2aa88525d [file] [log] [blame]
Patrick Dalya125d5d2016-09-30 16:16:10 -07001/*
2 * Copyright (C) 2011 Google, Inc
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -07003 * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Patrick Dalya125d5d2016-09-30 16:16:10 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/highmem.h>
17#include <linux/kernel.h>
18#include <linux/kref.h>
Patrick Dalya125d5d2016-09-30 16:16:10 -070019#include <linux/mutex.h>
20#include <linux/scatterlist.h>
21#include <linux/slab.h>
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -070022#include <linux/dma-mapping.h>
Patrick Dalya125d5d2016-09-30 16:16:10 -070023#include <soc/qcom/scm.h>
Neeti Desai8dcc3642015-03-17 18:20:35 -070024#include <soc/qcom/secure_buffer.h>
Patrick Dalya125d5d2016-09-30 16:16:10 -070025
26DEFINE_MUTEX(secure_buffer_mutex);
27
28struct cp2_mem_chunks {
29 u32 chunk_list;
30 u32 chunk_list_size;
31 u32 chunk_size;
32} __attribute__ ((__packed__));
33
34struct cp2_lock_req {
35 struct cp2_mem_chunks chunks;
36 u32 mem_usage;
37 u32 lock;
38} __attribute__ ((__packed__));
39
40
41struct mem_prot_info {
42 phys_addr_t addr;
43 u64 size;
44};
45
Patrick Dalya125d5d2016-09-30 16:16:10 -070046#define MEM_PROT_ASSIGN_ID 0x16
47#define MEM_PROTECT_LOCK_ID2 0x0A
48#define MEM_PROTECT_LOCK_ID2_FLAT 0x11
49#define V2_CHUNK_SIZE SZ_1M
50#define FEATURE_ID_CP 12
51
Neeti Desai0e64e702015-03-31 15:33:54 -070052struct dest_vm_and_perm_info {
53 u32 vm;
54 u32 perm;
Shiraz Hashimb13a28f2016-11-01 16:36:49 +053055 u64 ctx;
Neeti Desai0e64e702015-03-31 15:33:54 -070056 u32 ctx_size;
57};
58
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -070059static void *qcom_secure_mem;
60#define QCOM_SECURE_MEM_SIZE (512*1024)
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -070061
Patrick Dalya125d5d2016-09-30 16:16:10 -070062static int secure_buffer_change_chunk(u32 chunks,
63 u32 nchunks,
64 u32 chunk_size,
65 int lock)
66{
67 struct cp2_lock_req request;
68 u32 resp;
69 int ret;
70 struct scm_desc desc = {0};
71
72 desc.args[0] = request.chunks.chunk_list = chunks;
73 desc.args[1] = request.chunks.chunk_list_size = nchunks;
74 desc.args[2] = request.chunks.chunk_size = chunk_size;
75 /* Usage is now always 0 */
76 desc.args[3] = request.mem_usage = 0;
77 desc.args[4] = request.lock = lock;
78 desc.args[5] = 0;
79 desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
80 SCM_VAL);
81
82 kmap_flush_unused();
83 kmap_atomic_flush_unused();
84
85 if (!is_scm_armv8()) {
86 ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
87 &request, sizeof(request), &resp, sizeof(resp));
88 } else {
89 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
90 MEM_PROTECT_LOCK_ID2_FLAT), &desc);
91 resp = desc.ret[0];
92 }
93
94 return ret;
95}
96
97
98
99static int secure_buffer_change_table(struct sg_table *table, int lock)
100{
101 int i, j;
102 int ret = -EINVAL;
103 u32 *chunk_list;
104 struct scatterlist *sg;
105
106 for_each_sg(table->sgl, sg, table->nents, i) {
107 int nchunks;
108 int size = sg->length;
109 int chunk_list_len;
110 phys_addr_t chunk_list_phys;
111
112 /*
113 * This should theoretically be a phys_addr_t but the protocol
114 * indicates this should be a u32.
115 */
116 u32 base;
117 u64 tmp = sg_dma_address(sg);
118
119 WARN((tmp >> 32) & 0xffffffff,
120 "%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
121 __func__, sg, tmp);
122 if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
123 WARN(1,
124 "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
125 __func__, i, size, V2_CHUNK_SIZE);
126 return -EINVAL;
127 }
128
129 base = (u32)tmp;
130
131 nchunks = size / V2_CHUNK_SIZE;
132 chunk_list_len = sizeof(u32)*nchunks;
133
134 chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
135
136 if (!chunk_list)
137 return -ENOMEM;
138
139 chunk_list_phys = virt_to_phys(chunk_list);
140 for (j = 0; j < nchunks; j++)
141 chunk_list[j] = base + j * V2_CHUNK_SIZE;
142
143 /*
144 * Flush the chunk list before sending the memory to the
145 * secure environment to ensure the data is actually present
146 * in RAM
147 */
148 dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
149
150 ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
151 nchunks, V2_CHUNK_SIZE, lock);
152
153 if (!ret) {
154 /*
155 * Set or clear the private page flag to communicate the
156 * status of the chunk to other entities
157 */
158 if (lock)
159 SetPagePrivate(sg_page(sg));
160 else
161 ClearPagePrivate(sg_page(sg));
162 }
163
164 kfree(chunk_list);
165 }
166
167 return ret;
168}
169
Neeti Desai8dcc3642015-03-17 18:20:35 -0700170int msm_secure_table(struct sg_table *table)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700171{
172 int ret;
173
174 mutex_lock(&secure_buffer_mutex);
175 ret = secure_buffer_change_table(table, 1);
176 mutex_unlock(&secure_buffer_mutex);
177
178 return ret;
179
180}
181
Neeti Desai8dcc3642015-03-17 18:20:35 -0700182int msm_unsecure_table(struct sg_table *table)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700183{
184 int ret;
185
186 mutex_lock(&secure_buffer_mutex);
187 ret = secure_buffer_change_table(table, 0);
188 mutex_unlock(&secure_buffer_mutex);
189 return ret;
190
191}
192
Patrick Daly9eadc742016-07-20 20:19:53 -0700193static struct dest_vm_and_perm_info *
194populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
195 size_t *size_in_bytes)
Neeti Desai0e64e702015-03-31 15:33:54 -0700196{
197 struct dest_vm_and_perm_info *dest_info;
Neeti Desai0e64e702015-03-31 15:33:54 -0700198 int i;
Patrick Daly9eadc742016-07-20 20:19:53 -0700199 size_t size;
Neeti Desai0e64e702015-03-31 15:33:54 -0700200
Patrick Daly9eadc742016-07-20 20:19:53 -0700201 /* Ensure allocated size is less than PAGE_ALLOC_COSTLY_ORDER */
202 size = nelements * sizeof(*dest_info);
203 if (size > PAGE_SIZE)
204 return NULL;
205
206 dest_info = kzalloc(size, GFP_KERNEL);
207 if (!dest_info)
208 return NULL;
Neeti Desai0e64e702015-03-31 15:33:54 -0700209
210 for (i = 0; i < nelements; i++) {
211 dest_info[i].vm = dest_vmids[i];
212 dest_info[i].perm = dest_perms[i];
Shiraz Hashimb13a28f2016-11-01 16:36:49 +0530213 dest_info[i].ctx = 0x0;
Neeti Desai0e64e702015-03-31 15:33:54 -0700214 dest_info[i].ctx_size = 0;
215 }
Neeti Desai0e64e702015-03-31 15:33:54 -0700216
Patrick Daly9eadc742016-07-20 20:19:53 -0700217 *size_in_bytes = size;
218 return dest_info;
Neeti Desai0e64e702015-03-31 15:33:54 -0700219}
220
Patrick Daly9eadc742016-07-20 20:19:53 -0700221/* Must hold secure_buffer_mutex while allocated buffer is in use */
222static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
223 size_t *size_in_bytes)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700224{
225 int i;
226 struct scatterlist *sg;
227 struct mem_prot_info *info;
Patrick Daly9eadc742016-07-20 20:19:53 -0700228 size_t size;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700229
Patrick Daly9eadc742016-07-20 20:19:53 -0700230 size = table->nents * sizeof(*info);
231
232 if (size >= QCOM_SECURE_MEM_SIZE) {
233 pr_err("%s: Not enough memory allocated. Required size %zd\n",
234 __func__, size);
235 return NULL;
236 }
237
238 if (!qcom_secure_mem) {
239 pr_err("%s is not functional as qcom_secure_mem is not allocated.\n",
240 __func__);
241 return NULL;
242 }
243
244 /* "Allocate" it */
245 info = qcom_secure_mem;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700246
247 for_each_sg(table->sgl, sg, table->nents, i) {
248 info[i].addr = page_to_phys(sg_page(sg));
249 info[i].size = sg->length;
250 }
251
Patrick Daly9eadc742016-07-20 20:19:53 -0700252 *size_in_bytes = size;
253 return info;
Neeti Desai0e64e702015-03-31 15:33:54 -0700254}
255
Patrick Daly8eb0b372016-08-03 19:24:55 -0700256#define BATCH_MAX_SIZE SZ_2M
257#define BATCH_MAX_SECTIONS 32
258
Neeti Desai0e64e702015-03-31 15:33:54 -0700259int hyp_assign_table(struct sg_table *table,
260 u32 *source_vm_list, int source_nelems,
261 int *dest_vmids, int *dest_perms,
262 int dest_nelems)
263{
Patrick Daly48940242016-11-14 14:51:39 -0800264 int ret = 0;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700265 struct scm_desc desc = {0};
Mitchel Humpherysa0183542015-11-23 13:18:47 -0800266 u32 *source_vm_copy;
Patrick Daly9eadc742016-07-20 20:19:53 -0700267 size_t source_vm_copy_size;
268 struct dest_vm_and_perm_info *dest_vm_copy;
269 size_t dest_vm_copy_size;
270 struct mem_prot_info *sg_table_copy;
271 size_t sg_table_copy_size;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700272
Patrick Daly8eb0b372016-08-03 19:24:55 -0700273 int batch_start, batch_end;
274 u64 batch_size;
275
Mitchel Humpherysa0183542015-11-23 13:18:47 -0800276 /*
277 * We can only pass cache-aligned sizes to hypervisor, so we need
278 * to kmalloc and memcpy the source_vm_list here.
279 */
Patrick Daly9eadc742016-07-20 20:19:53 -0700280 source_vm_copy_size = sizeof(*source_vm_copy) * source_nelems;
281 source_vm_copy = kzalloc(source_vm_copy_size, GFP_KERNEL);
282 if (!source_vm_copy)
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700283 return -ENOMEM;
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700284
Patrick Daly9eadc742016-07-20 20:19:53 -0700285 memcpy(source_vm_copy, source_vm_list, source_vm_copy_size);
286
287
288 dest_vm_copy = populate_dest_info(dest_vmids, dest_nelems, dest_perms,
289 &dest_vm_copy_size);
290 if (!dest_vm_copy) {
291 ret = -ENOMEM;
292 goto out_free;
293 }
Mitchel Humpherysa0183542015-11-23 13:18:47 -0800294
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700295 mutex_lock(&secure_buffer_mutex);
296
Patrick Daly9eadc742016-07-20 20:19:53 -0700297 sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
298 if (!sg_table_copy) {
299 ret = -ENOMEM;
300 goto out_unlock;
301 }
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700302
Patrick Daly9eadc742016-07-20 20:19:53 -0700303 desc.args[0] = virt_to_phys(sg_table_copy);
304 desc.args[1] = sg_table_copy_size;
Mitchel Humpherysa0183542015-11-23 13:18:47 -0800305 desc.args[2] = virt_to_phys(source_vm_copy);
Patrick Daly9eadc742016-07-20 20:19:53 -0700306 desc.args[3] = source_vm_copy_size;
307 desc.args[4] = virt_to_phys(dest_vm_copy);
308 desc.args[5] = dest_vm_copy_size;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700309 desc.args[6] = 0;
Neeti Desai0e64e702015-03-31 15:33:54 -0700310
Patrick Dalya125d5d2016-09-30 16:16:10 -0700311 desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO,
312 SCM_VAL, SCM_VAL);
313
Patrick Daly9eadc742016-07-20 20:19:53 -0700314 dmac_flush_range(source_vm_copy,
315 (void *)source_vm_copy + source_vm_copy_size);
316 dmac_flush_range(sg_table_copy,
317 (void *)sg_table_copy + sg_table_copy_size);
318 dmac_flush_range(dest_vm_copy,
319 (void *)dest_vm_copy + dest_vm_copy_size);
Patrick Dalya125d5d2016-09-30 16:16:10 -0700320
Patrick Daly8eb0b372016-08-03 19:24:55 -0700321 batch_start = 0;
322 while (batch_start < table->nents) {
323 /* Ensure no size zero batches */
324 batch_size = sg_table_copy[batch_start].size;
325 batch_end = batch_start + 1;
326 while (1) {
327 u64 size;
328
329 if (batch_end >= table->nents)
330 break;
331 if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
332 break;
333
334 size = sg_table_copy[batch_end].size;
335 if (size + batch_size >= BATCH_MAX_SIZE)
336 break;
337
338 batch_size += size;
339 batch_end++;
340 }
341
342 desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
343 desc.args[1] = (batch_end - batch_start) *
344 sizeof(sg_table_copy[0]);
345
346 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
347 MEM_PROT_ASSIGN_ID), &desc);
348 if (ret) {
349 pr_info("%s: Failed to assign memory protection, ret = %d\n",
350 __func__, ret);
351 break;
352 }
353 batch_start = batch_end;
354 }
Neeti Desai0e64e702015-03-31 15:33:54 -0700355
Patrick Daly9eadc742016-07-20 20:19:53 -0700356out_unlock:
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700357 mutex_unlock(&secure_buffer_mutex);
Patrick Daly9eadc742016-07-20 20:19:53 -0700358 kfree(dest_vm_copy);
359out_free:
Mitchel Humpherysa0183542015-11-23 13:18:47 -0800360 kfree(source_vm_copy);
Patrick Dalya125d5d2016-09-30 16:16:10 -0700361 return ret;
362}
363
Neeti Desaif85f5662015-06-05 18:44:24 -0700364int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
365 int source_nelems, int *dest_vmids,
366 int *dest_perms, int dest_nelems)
Neeti Desai0e64e702015-03-31 15:33:54 -0700367{
Patrick Daly4f73c672016-12-20 15:28:26 -0800368 struct sg_table table;
Neeti Desai0e64e702015-03-31 15:33:54 -0700369 int ret;
370
Patrick Daly4f73c672016-12-20 15:28:26 -0800371 ret = sg_alloc_table(&table, 1, GFP_KERNEL);
Neeti Desai0e64e702015-03-31 15:33:54 -0700372 if (ret)
Patrick Daly4f73c672016-12-20 15:28:26 -0800373 return ret;
Neeti Desai0e64e702015-03-31 15:33:54 -0700374
Patrick Daly4f73c672016-12-20 15:28:26 -0800375 sg_set_page(table.sgl, phys_to_page(addr), size, 0);
Neeti Desai0e64e702015-03-31 15:33:54 -0700376
Patrick Daly4f73c672016-12-20 15:28:26 -0800377 ret = hyp_assign_table(&table, source_vm_list, source_nelems,
378 dest_vmids, dest_perms, dest_nelems);
Neeti Desai0e64e702015-03-31 15:33:54 -0700379
Patrick Daly4f73c672016-12-20 15:28:26 -0800380 sg_free_table(&table);
Neeti Desai0e64e702015-03-31 15:33:54 -0700381 return ret;
382}
Sarada Prasanna Garnayak31539162017-03-14 19:14:05 +0530383EXPORT_SYMBOL(hyp_assign_phys);
Neeti Desai0e64e702015-03-31 15:33:54 -0700384
Mitchel Humpherys5b8290a2015-07-30 19:24:15 -0700385const char *msm_secure_vmid_to_string(int secure_vmid)
386{
387 switch (secure_vmid) {
388 case VMID_HLOS:
389 return "VMID_HLOS";
390 case VMID_CP_TOUCH:
391 return "VMID_CP_TOUCH";
392 case VMID_CP_BITSTREAM:
393 return "VMID_CP_BITSTREAM";
394 case VMID_CP_PIXEL:
395 return "VMID_CP_PIXEL";
396 case VMID_CP_NON_PIXEL:
397 return "VMID_CP_NON_PIXEL";
398 case VMID_CP_CAMERA:
399 return "VMID_CP_CAMERA";
400 case VMID_HLOS_FREE:
401 return "VMID_HLOS_FREE";
402 case VMID_MSS_MSA:
403 return "VMID_MSS_MSA";
404 case VMID_MSS_NONMSA:
405 return "VMID_MSS_NONMSA";
406 case VMID_CP_SEC_DISPLAY:
407 return "VMID_CP_SEC_DISPLAY";
408 case VMID_CP_APP:
409 return "VMID_CP_APP";
Sameer Thalappil253b23d2016-02-16 16:33:34 -0800410 case VMID_WLAN:
411 return "VMID_WLAN";
412 case VMID_WLAN_CE:
413 return "VMID_WLAN_CE";
Liam Markd9a50852016-09-22 11:30:51 -0700414 case VMID_CP_CAMERA_PREVIEW:
415 return "VMID_CP_CAMERA_PREVIEW";
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -0700416 case VMID_CP_SPSS_SP:
417 return "VMID_CP_SPSS_SP";
Sudarshan Rajagopalan8e206792017-06-28 17:45:57 -0700418 case VMID_CP_SPSS_SP_SHARED:
419 return "VMID_CP_SPSS_SP_SHARED";
Sudarshan Rajagopalane08afb62017-07-13 11:19:46 -0700420 case VMID_CP_SPSS_HLOS_SHARED:
421 return "VMID_CP_SPSS_HLOS_SHARED";
Mitchel Humpherys5b8290a2015-07-30 19:24:15 -0700422 case VMID_INVAL:
423 return "VMID_INVAL";
424 default:
425 return "Unknown VMID";
426 }
427}
428
Patrick Dalya125d5d2016-09-30 16:16:10 -0700429#define MAKE_CP_VERSION(major, minor, patch) \
430 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
431
432bool msm_secure_v2_is_supported(void)
433{
434 int version = scm_get_feat_version(FEATURE_ID_CP);
435
436 /*
437 * if the version is < 1.1.0 then dynamic buffer allocation is
438 * not supported
439 */
440 return version >= MAKE_CP_VERSION(1, 1, 0);
441}
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700442
443static int __init alloc_secure_shared_memory(void)
444{
445 int ret = 0;
Susheel Khiani5f961422016-04-11 18:02:43 +0530446 dma_addr_t dma_handle;
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700447
448 qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL);
449 if (!qcom_secure_mem) {
450 /* Fallback to CMA-DMA memory */
451 qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE,
Susheel Khiani5f961422016-04-11 18:02:43 +0530452 &dma_handle, GFP_KERNEL);
Rohit Vaswani78dfd5a2015-10-21 17:22:16 -0700453 if (!qcom_secure_mem) {
454 pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n");
455 return -ENOMEM;
456 }
457 }
458
459 return ret;
460}
461pure_initcall(alloc_secure_shared_memory);