Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 Google, Inc |
Sudarshan Rajagopalan | c934228 | 2017-05-18 00:11:06 -0700 | [diff] [blame] | 3 | * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 and |
| 7 | * only version 2 as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/highmem.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/kref.h> |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 19 | #include <linux/mutex.h> |
| 20 | #include <linux/scatterlist.h> |
| 21 | #include <linux/slab.h> |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 22 | #include <linux/dma-mapping.h> |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 23 | #include <soc/qcom/scm.h> |
Neeti Desai | 8dcc364 | 2015-03-17 18:20:35 -0700 | [diff] [blame] | 24 | #include <soc/qcom/secure_buffer.h> |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 25 | |
| 26 | DEFINE_MUTEX(secure_buffer_mutex); |
| 27 | |
| 28 | struct cp2_mem_chunks { |
| 29 | u32 chunk_list; |
| 30 | u32 chunk_list_size; |
| 31 | u32 chunk_size; |
| 32 | } __attribute__ ((__packed__)); |
| 33 | |
| 34 | struct cp2_lock_req { |
| 35 | struct cp2_mem_chunks chunks; |
| 36 | u32 mem_usage; |
| 37 | u32 lock; |
| 38 | } __attribute__ ((__packed__)); |
| 39 | |
| 40 | |
| 41 | struct mem_prot_info { |
| 42 | phys_addr_t addr; |
| 43 | u64 size; |
| 44 | }; |
| 45 | |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 46 | #define MEM_PROT_ASSIGN_ID 0x16 |
| 47 | #define MEM_PROTECT_LOCK_ID2 0x0A |
| 48 | #define MEM_PROTECT_LOCK_ID2_FLAT 0x11 |
| 49 | #define V2_CHUNK_SIZE SZ_1M |
| 50 | #define FEATURE_ID_CP 12 |
| 51 | |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 52 | struct dest_vm_and_perm_info { |
| 53 | u32 vm; |
| 54 | u32 perm; |
Shiraz Hashim | b13a28f | 2016-11-01 16:36:49 +0530 | [diff] [blame] | 55 | u64 ctx; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 56 | u32 ctx_size; |
| 57 | }; |
| 58 | |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 59 | static void *qcom_secure_mem; |
| 60 | #define QCOM_SECURE_MEM_SIZE (512*1024) |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 61 | |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 62 | static int secure_buffer_change_chunk(u32 chunks, |
| 63 | u32 nchunks, |
| 64 | u32 chunk_size, |
| 65 | int lock) |
| 66 | { |
| 67 | struct cp2_lock_req request; |
| 68 | u32 resp; |
| 69 | int ret; |
| 70 | struct scm_desc desc = {0}; |
| 71 | |
| 72 | desc.args[0] = request.chunks.chunk_list = chunks; |
| 73 | desc.args[1] = request.chunks.chunk_list_size = nchunks; |
| 74 | desc.args[2] = request.chunks.chunk_size = chunk_size; |
| 75 | /* Usage is now always 0 */ |
| 76 | desc.args[3] = request.mem_usage = 0; |
| 77 | desc.args[4] = request.lock = lock; |
| 78 | desc.args[5] = 0; |
| 79 | desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, |
| 80 | SCM_VAL); |
| 81 | |
| 82 | kmap_flush_unused(); |
| 83 | kmap_atomic_flush_unused(); |
| 84 | |
| 85 | if (!is_scm_armv8()) { |
| 86 | ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2, |
| 87 | &request, sizeof(request), &resp, sizeof(resp)); |
| 88 | } else { |
| 89 | ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, |
| 90 | MEM_PROTECT_LOCK_ID2_FLAT), &desc); |
| 91 | resp = desc.ret[0]; |
| 92 | } |
| 93 | |
| 94 | return ret; |
| 95 | } |
| 96 | |
| 97 | |
| 98 | |
| 99 | static int secure_buffer_change_table(struct sg_table *table, int lock) |
| 100 | { |
| 101 | int i, j; |
| 102 | int ret = -EINVAL; |
| 103 | u32 *chunk_list; |
| 104 | struct scatterlist *sg; |
| 105 | |
| 106 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 107 | int nchunks; |
| 108 | int size = sg->length; |
| 109 | int chunk_list_len; |
| 110 | phys_addr_t chunk_list_phys; |
| 111 | |
| 112 | /* |
| 113 | * This should theoretically be a phys_addr_t but the protocol |
| 114 | * indicates this should be a u32. |
| 115 | */ |
| 116 | u32 base; |
| 117 | u64 tmp = sg_dma_address(sg); |
| 118 | |
| 119 | WARN((tmp >> 32) & 0xffffffff, |
| 120 | "%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n", |
| 121 | __func__, sg, tmp); |
| 122 | if (unlikely(!size || (size % V2_CHUNK_SIZE))) { |
| 123 | WARN(1, |
| 124 | "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n", |
| 125 | __func__, i, size, V2_CHUNK_SIZE); |
| 126 | return -EINVAL; |
| 127 | } |
| 128 | |
| 129 | base = (u32)tmp; |
| 130 | |
| 131 | nchunks = size / V2_CHUNK_SIZE; |
| 132 | chunk_list_len = sizeof(u32)*nchunks; |
| 133 | |
| 134 | chunk_list = kzalloc(chunk_list_len, GFP_KERNEL); |
| 135 | |
| 136 | if (!chunk_list) |
| 137 | return -ENOMEM; |
| 138 | |
| 139 | chunk_list_phys = virt_to_phys(chunk_list); |
| 140 | for (j = 0; j < nchunks; j++) |
| 141 | chunk_list[j] = base + j * V2_CHUNK_SIZE; |
| 142 | |
| 143 | /* |
| 144 | * Flush the chunk list before sending the memory to the |
| 145 | * secure environment to ensure the data is actually present |
| 146 | * in RAM |
| 147 | */ |
| 148 | dmac_flush_range(chunk_list, chunk_list + chunk_list_len); |
| 149 | |
| 150 | ret = secure_buffer_change_chunk(virt_to_phys(chunk_list), |
| 151 | nchunks, V2_CHUNK_SIZE, lock); |
| 152 | |
| 153 | if (!ret) { |
| 154 | /* |
| 155 | * Set or clear the private page flag to communicate the |
| 156 | * status of the chunk to other entities |
| 157 | */ |
| 158 | if (lock) |
| 159 | SetPagePrivate(sg_page(sg)); |
| 160 | else |
| 161 | ClearPagePrivate(sg_page(sg)); |
| 162 | } |
| 163 | |
| 164 | kfree(chunk_list); |
| 165 | } |
| 166 | |
| 167 | return ret; |
| 168 | } |
| 169 | |
Neeti Desai | 8dcc364 | 2015-03-17 18:20:35 -0700 | [diff] [blame] | 170 | int msm_secure_table(struct sg_table *table) |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 171 | { |
| 172 | int ret; |
| 173 | |
| 174 | mutex_lock(&secure_buffer_mutex); |
| 175 | ret = secure_buffer_change_table(table, 1); |
| 176 | mutex_unlock(&secure_buffer_mutex); |
| 177 | |
| 178 | return ret; |
| 179 | |
| 180 | } |
| 181 | |
Neeti Desai | 8dcc364 | 2015-03-17 18:20:35 -0700 | [diff] [blame] | 182 | int msm_unsecure_table(struct sg_table *table) |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 183 | { |
| 184 | int ret; |
| 185 | |
| 186 | mutex_lock(&secure_buffer_mutex); |
| 187 | ret = secure_buffer_change_table(table, 0); |
| 188 | mutex_unlock(&secure_buffer_mutex); |
| 189 | return ret; |
| 190 | |
| 191 | } |
| 192 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 193 | static struct dest_vm_and_perm_info * |
| 194 | populate_dest_info(int *dest_vmids, int nelements, int *dest_perms, |
| 195 | size_t *size_in_bytes) |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 196 | { |
| 197 | struct dest_vm_and_perm_info *dest_info; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 198 | int i; |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 199 | size_t size; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 200 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 201 | /* Ensure allocated size is less than PAGE_ALLOC_COSTLY_ORDER */ |
| 202 | size = nelements * sizeof(*dest_info); |
| 203 | if (size > PAGE_SIZE) |
| 204 | return NULL; |
| 205 | |
| 206 | dest_info = kzalloc(size, GFP_KERNEL); |
| 207 | if (!dest_info) |
| 208 | return NULL; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 209 | |
| 210 | for (i = 0; i < nelements; i++) { |
| 211 | dest_info[i].vm = dest_vmids[i]; |
| 212 | dest_info[i].perm = dest_perms[i]; |
Shiraz Hashim | b13a28f | 2016-11-01 16:36:49 +0530 | [diff] [blame] | 213 | dest_info[i].ctx = 0x0; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 214 | dest_info[i].ctx_size = 0; |
| 215 | } |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 216 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 217 | *size_in_bytes = size; |
| 218 | return dest_info; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 221 | /* Must hold secure_buffer_mutex while allocated buffer is in use */ |
| 222 | static struct mem_prot_info *get_info_list_from_table(struct sg_table *table, |
| 223 | size_t *size_in_bytes) |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 224 | { |
| 225 | int i; |
| 226 | struct scatterlist *sg; |
| 227 | struct mem_prot_info *info; |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 228 | size_t size; |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 229 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 230 | size = table->nents * sizeof(*info); |
| 231 | |
| 232 | if (size >= QCOM_SECURE_MEM_SIZE) { |
| 233 | pr_err("%s: Not enough memory allocated. Required size %zd\n", |
| 234 | __func__, size); |
| 235 | return NULL; |
| 236 | } |
| 237 | |
| 238 | if (!qcom_secure_mem) { |
| 239 | pr_err("%s is not functional as qcom_secure_mem is not allocated.\n", |
| 240 | __func__); |
| 241 | return NULL; |
| 242 | } |
| 243 | |
| 244 | /* "Allocate" it */ |
| 245 | info = qcom_secure_mem; |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 246 | |
| 247 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 248 | info[i].addr = page_to_phys(sg_page(sg)); |
| 249 | info[i].size = sg->length; |
| 250 | } |
| 251 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 252 | *size_in_bytes = size; |
| 253 | return info; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 254 | } |
| 255 | |
Patrick Daly | 8eb0b37 | 2016-08-03 19:24:55 -0700 | [diff] [blame] | 256 | #define BATCH_MAX_SIZE SZ_2M |
| 257 | #define BATCH_MAX_SECTIONS 32 |
| 258 | |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 259 | int hyp_assign_table(struct sg_table *table, |
| 260 | u32 *source_vm_list, int source_nelems, |
| 261 | int *dest_vmids, int *dest_perms, |
| 262 | int dest_nelems) |
| 263 | { |
Patrick Daly | 4894024 | 2016-11-14 14:51:39 -0800 | [diff] [blame] | 264 | int ret = 0; |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 265 | struct scm_desc desc = {0}; |
Mitchel Humpherys | a018354 | 2015-11-23 13:18:47 -0800 | [diff] [blame] | 266 | u32 *source_vm_copy; |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 267 | size_t source_vm_copy_size; |
| 268 | struct dest_vm_and_perm_info *dest_vm_copy; |
| 269 | size_t dest_vm_copy_size; |
| 270 | struct mem_prot_info *sg_table_copy; |
| 271 | size_t sg_table_copy_size; |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 272 | |
Patrick Daly | 8eb0b37 | 2016-08-03 19:24:55 -0700 | [diff] [blame] | 273 | int batch_start, batch_end; |
| 274 | u64 batch_size; |
| 275 | |
Mitchel Humpherys | a018354 | 2015-11-23 13:18:47 -0800 | [diff] [blame] | 276 | /* |
| 277 | * We can only pass cache-aligned sizes to hypervisor, so we need |
| 278 | * to kmalloc and memcpy the source_vm_list here. |
| 279 | */ |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 280 | source_vm_copy_size = sizeof(*source_vm_copy) * source_nelems; |
| 281 | source_vm_copy = kzalloc(source_vm_copy_size, GFP_KERNEL); |
| 282 | if (!source_vm_copy) |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 283 | return -ENOMEM; |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 284 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 285 | memcpy(source_vm_copy, source_vm_list, source_vm_copy_size); |
| 286 | |
| 287 | |
| 288 | dest_vm_copy = populate_dest_info(dest_vmids, dest_nelems, dest_perms, |
| 289 | &dest_vm_copy_size); |
| 290 | if (!dest_vm_copy) { |
| 291 | ret = -ENOMEM; |
| 292 | goto out_free; |
| 293 | } |
Mitchel Humpherys | a018354 | 2015-11-23 13:18:47 -0800 | [diff] [blame] | 294 | |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 295 | mutex_lock(&secure_buffer_mutex); |
| 296 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 297 | sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size); |
| 298 | if (!sg_table_copy) { |
| 299 | ret = -ENOMEM; |
| 300 | goto out_unlock; |
| 301 | } |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 302 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 303 | desc.args[0] = virt_to_phys(sg_table_copy); |
| 304 | desc.args[1] = sg_table_copy_size; |
Mitchel Humpherys | a018354 | 2015-11-23 13:18:47 -0800 | [diff] [blame] | 305 | desc.args[2] = virt_to_phys(source_vm_copy); |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 306 | desc.args[3] = source_vm_copy_size; |
| 307 | desc.args[4] = virt_to_phys(dest_vm_copy); |
| 308 | desc.args[5] = dest_vm_copy_size; |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 309 | desc.args[6] = 0; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 310 | |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 311 | desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO, |
| 312 | SCM_VAL, SCM_VAL); |
| 313 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 314 | dmac_flush_range(source_vm_copy, |
| 315 | (void *)source_vm_copy + source_vm_copy_size); |
| 316 | dmac_flush_range(sg_table_copy, |
| 317 | (void *)sg_table_copy + sg_table_copy_size); |
| 318 | dmac_flush_range(dest_vm_copy, |
| 319 | (void *)dest_vm_copy + dest_vm_copy_size); |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 320 | |
Patrick Daly | 8eb0b37 | 2016-08-03 19:24:55 -0700 | [diff] [blame] | 321 | batch_start = 0; |
| 322 | while (batch_start < table->nents) { |
| 323 | /* Ensure no size zero batches */ |
| 324 | batch_size = sg_table_copy[batch_start].size; |
| 325 | batch_end = batch_start + 1; |
| 326 | while (1) { |
| 327 | u64 size; |
| 328 | |
| 329 | if (batch_end >= table->nents) |
| 330 | break; |
| 331 | if (batch_end - batch_start >= BATCH_MAX_SECTIONS) |
| 332 | break; |
| 333 | |
| 334 | size = sg_table_copy[batch_end].size; |
| 335 | if (size + batch_size >= BATCH_MAX_SIZE) |
| 336 | break; |
| 337 | |
| 338 | batch_size += size; |
| 339 | batch_end++; |
| 340 | } |
| 341 | |
| 342 | desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]); |
| 343 | desc.args[1] = (batch_end - batch_start) * |
| 344 | sizeof(sg_table_copy[0]); |
| 345 | |
| 346 | ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, |
| 347 | MEM_PROT_ASSIGN_ID), &desc); |
| 348 | if (ret) { |
| 349 | pr_info("%s: Failed to assign memory protection, ret = %d\n", |
| 350 | __func__, ret); |
| 351 | break; |
| 352 | } |
| 353 | batch_start = batch_end; |
| 354 | } |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 355 | |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 356 | out_unlock: |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 357 | mutex_unlock(&secure_buffer_mutex); |
Patrick Daly | 9eadc74 | 2016-07-20 20:19:53 -0700 | [diff] [blame] | 358 | kfree(dest_vm_copy); |
| 359 | out_free: |
Mitchel Humpherys | a018354 | 2015-11-23 13:18:47 -0800 | [diff] [blame] | 360 | kfree(source_vm_copy); |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 361 | return ret; |
| 362 | } |
| 363 | |
Neeti Desai | f85f566 | 2015-06-05 18:44:24 -0700 | [diff] [blame] | 364 | int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list, |
| 365 | int source_nelems, int *dest_vmids, |
| 366 | int *dest_perms, int dest_nelems) |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 367 | { |
Patrick Daly | 4f73c67 | 2016-12-20 15:28:26 -0800 | [diff] [blame] | 368 | struct sg_table table; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 369 | int ret; |
| 370 | |
Patrick Daly | 4f73c67 | 2016-12-20 15:28:26 -0800 | [diff] [blame] | 371 | ret = sg_alloc_table(&table, 1, GFP_KERNEL); |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 372 | if (ret) |
Patrick Daly | 4f73c67 | 2016-12-20 15:28:26 -0800 | [diff] [blame] | 373 | return ret; |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 374 | |
Patrick Daly | 4f73c67 | 2016-12-20 15:28:26 -0800 | [diff] [blame] | 375 | sg_set_page(table.sgl, phys_to_page(addr), size, 0); |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 376 | |
Patrick Daly | 4f73c67 | 2016-12-20 15:28:26 -0800 | [diff] [blame] | 377 | ret = hyp_assign_table(&table, source_vm_list, source_nelems, |
| 378 | dest_vmids, dest_perms, dest_nelems); |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 379 | |
Patrick Daly | 4f73c67 | 2016-12-20 15:28:26 -0800 | [diff] [blame] | 380 | sg_free_table(&table); |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 381 | return ret; |
| 382 | } |
Sarada Prasanna Garnayak | 3153916 | 2017-03-14 19:14:05 +0530 | [diff] [blame] | 383 | EXPORT_SYMBOL(hyp_assign_phys); |
Neeti Desai | 0e64e70 | 2015-03-31 15:33:54 -0700 | [diff] [blame] | 384 | |
Mitchel Humpherys | 5b8290a | 2015-07-30 19:24:15 -0700 | [diff] [blame] | 385 | const char *msm_secure_vmid_to_string(int secure_vmid) |
| 386 | { |
| 387 | switch (secure_vmid) { |
| 388 | case VMID_HLOS: |
| 389 | return "VMID_HLOS"; |
| 390 | case VMID_CP_TOUCH: |
| 391 | return "VMID_CP_TOUCH"; |
| 392 | case VMID_CP_BITSTREAM: |
| 393 | return "VMID_CP_BITSTREAM"; |
| 394 | case VMID_CP_PIXEL: |
| 395 | return "VMID_CP_PIXEL"; |
| 396 | case VMID_CP_NON_PIXEL: |
| 397 | return "VMID_CP_NON_PIXEL"; |
| 398 | case VMID_CP_CAMERA: |
| 399 | return "VMID_CP_CAMERA"; |
| 400 | case VMID_HLOS_FREE: |
| 401 | return "VMID_HLOS_FREE"; |
| 402 | case VMID_MSS_MSA: |
| 403 | return "VMID_MSS_MSA"; |
| 404 | case VMID_MSS_NONMSA: |
| 405 | return "VMID_MSS_NONMSA"; |
| 406 | case VMID_CP_SEC_DISPLAY: |
| 407 | return "VMID_CP_SEC_DISPLAY"; |
| 408 | case VMID_CP_APP: |
| 409 | return "VMID_CP_APP"; |
Sameer Thalappil | 253b23d | 2016-02-16 16:33:34 -0800 | [diff] [blame] | 410 | case VMID_WLAN: |
| 411 | return "VMID_WLAN"; |
| 412 | case VMID_WLAN_CE: |
| 413 | return "VMID_WLAN_CE"; |
Liam Mark | d9a5085 | 2016-09-22 11:30:51 -0700 | [diff] [blame] | 414 | case VMID_CP_CAMERA_PREVIEW: |
| 415 | return "VMID_CP_CAMERA_PREVIEW"; |
Sudarshan Rajagopalan | c934228 | 2017-05-18 00:11:06 -0700 | [diff] [blame] | 416 | case VMID_CP_SPSS_SP: |
| 417 | return "VMID_CP_SPSS_SP"; |
Sudarshan Rajagopalan | 8e20679 | 2017-06-28 17:45:57 -0700 | [diff] [blame] | 418 | case VMID_CP_SPSS_SP_SHARED: |
| 419 | return "VMID_CP_SPSS_SP_SHARED"; |
Sudarshan Rajagopalan | e08afb6 | 2017-07-13 11:19:46 -0700 | [diff] [blame] | 420 | case VMID_CP_SPSS_HLOS_SHARED: |
| 421 | return "VMID_CP_SPSS_HLOS_SHARED"; |
Mitchel Humpherys | 5b8290a | 2015-07-30 19:24:15 -0700 | [diff] [blame] | 422 | case VMID_INVAL: |
| 423 | return "VMID_INVAL"; |
| 424 | default: |
| 425 | return "Unknown VMID"; |
| 426 | } |
| 427 | } |
| 428 | |
Patrick Daly | a125d5d | 2016-09-30 16:16:10 -0700 | [diff] [blame] | 429 | #define MAKE_CP_VERSION(major, minor, patch) \ |
| 430 | (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF)) |
| 431 | |
| 432 | bool msm_secure_v2_is_supported(void) |
| 433 | { |
| 434 | int version = scm_get_feat_version(FEATURE_ID_CP); |
| 435 | |
| 436 | /* |
| 437 | * if the version is < 1.1.0 then dynamic buffer allocation is |
| 438 | * not supported |
| 439 | */ |
| 440 | return version >= MAKE_CP_VERSION(1, 1, 0); |
| 441 | } |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 442 | |
| 443 | static int __init alloc_secure_shared_memory(void) |
| 444 | { |
| 445 | int ret = 0; |
Susheel Khiani | 5f96142 | 2016-04-11 18:02:43 +0530 | [diff] [blame] | 446 | dma_addr_t dma_handle; |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 447 | |
| 448 | qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL); |
| 449 | if (!qcom_secure_mem) { |
| 450 | /* Fallback to CMA-DMA memory */ |
| 451 | qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE, |
Susheel Khiani | 5f96142 | 2016-04-11 18:02:43 +0530 | [diff] [blame] | 452 | &dma_handle, GFP_KERNEL); |
Rohit Vaswani | 78dfd5a | 2015-10-21 17:22:16 -0700 | [diff] [blame] | 453 | if (!qcom_secure_mem) { |
| 454 | pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n"); |
| 455 | return -ENOMEM; |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | return ret; |
| 460 | } |
| 461 | pure_initcall(alloc_secure_shared_memory); |