blob: 9543d13c7192356d90426a0620308f4747485ff2 [file] [log] [blame]
Patrick Dalya125d5d2016-09-30 16:16:10 -07001/*
2 * Copyright (C) 2011 Google, Inc
3 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/highmem.h>
17#include <linux/kernel.h>
18#include <linux/kref.h>
Patrick Dalya125d5d2016-09-30 16:16:10 -070019#include <linux/mutex.h>
20#include <linux/scatterlist.h>
21#include <linux/slab.h>
22#include <soc/qcom/scm.h>
Neeti Desai8dcc3642015-03-17 18:20:35 -070023#include <soc/qcom/secure_buffer.h>
Patrick Dalya125d5d2016-09-30 16:16:10 -070024
25DEFINE_MUTEX(secure_buffer_mutex);
26
27struct cp2_mem_chunks {
28 u32 chunk_list;
29 u32 chunk_list_size;
30 u32 chunk_size;
31} __attribute__ ((__packed__));
32
33struct cp2_lock_req {
34 struct cp2_mem_chunks chunks;
35 u32 mem_usage;
36 u32 lock;
37} __attribute__ ((__packed__));
38
39
40struct mem_prot_info {
41 phys_addr_t addr;
42 u64 size;
43};
44
45struct info_list {
46 struct mem_prot_info *list_head;
47 u64 list_size;
48};
49
Patrick Dalya125d5d2016-09-30 16:16:10 -070050#define MEM_PROT_ASSIGN_ID 0x16
51#define MEM_PROTECT_LOCK_ID2 0x0A
52#define MEM_PROTECT_LOCK_ID2_FLAT 0x11
53#define V2_CHUNK_SIZE SZ_1M
54#define FEATURE_ID_CP 12
55
Neeti Desai0e64e702015-03-31 15:33:54 -070056struct dest_vm_and_perm_info {
57 u32 vm;
58 u32 perm;
59 u32 *ctx;
60 u32 ctx_size;
61};
62
63struct dest_info_list {
64 struct dest_vm_and_perm_info *dest_info;
65 u64 list_size;
66};
67
Patrick Dalya125d5d2016-09-30 16:16:10 -070068static int secure_buffer_change_chunk(u32 chunks,
69 u32 nchunks,
70 u32 chunk_size,
71 int lock)
72{
73 struct cp2_lock_req request;
74 u32 resp;
75 int ret;
76 struct scm_desc desc = {0};
77
78 desc.args[0] = request.chunks.chunk_list = chunks;
79 desc.args[1] = request.chunks.chunk_list_size = nchunks;
80 desc.args[2] = request.chunks.chunk_size = chunk_size;
81 /* Usage is now always 0 */
82 desc.args[3] = request.mem_usage = 0;
83 desc.args[4] = request.lock = lock;
84 desc.args[5] = 0;
85 desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
86 SCM_VAL);
87
88 kmap_flush_unused();
89 kmap_atomic_flush_unused();
90
91 if (!is_scm_armv8()) {
92 ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
93 &request, sizeof(request), &resp, sizeof(resp));
94 } else {
95 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
96 MEM_PROTECT_LOCK_ID2_FLAT), &desc);
97 resp = desc.ret[0];
98 }
99
100 return ret;
101}
102
103
104
105static int secure_buffer_change_table(struct sg_table *table, int lock)
106{
107 int i, j;
108 int ret = -EINVAL;
109 u32 *chunk_list;
110 struct scatterlist *sg;
111
112 for_each_sg(table->sgl, sg, table->nents, i) {
113 int nchunks;
114 int size = sg->length;
115 int chunk_list_len;
116 phys_addr_t chunk_list_phys;
117
118 /*
119 * This should theoretically be a phys_addr_t but the protocol
120 * indicates this should be a u32.
121 */
122 u32 base;
123 u64 tmp = sg_dma_address(sg);
124
125 WARN((tmp >> 32) & 0xffffffff,
126 "%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
127 __func__, sg, tmp);
128 if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
129 WARN(1,
130 "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
131 __func__, i, size, V2_CHUNK_SIZE);
132 return -EINVAL;
133 }
134
135 base = (u32)tmp;
136
137 nchunks = size / V2_CHUNK_SIZE;
138 chunk_list_len = sizeof(u32)*nchunks;
139
140 chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
141
142 if (!chunk_list)
143 return -ENOMEM;
144
145 chunk_list_phys = virt_to_phys(chunk_list);
146 for (j = 0; j < nchunks; j++)
147 chunk_list[j] = base + j * V2_CHUNK_SIZE;
148
149 /*
150 * Flush the chunk list before sending the memory to the
151 * secure environment to ensure the data is actually present
152 * in RAM
153 */
154 dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
155
156 ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
157 nchunks, V2_CHUNK_SIZE, lock);
158
159 if (!ret) {
160 /*
161 * Set or clear the private page flag to communicate the
162 * status of the chunk to other entities
163 */
164 if (lock)
165 SetPagePrivate(sg_page(sg));
166 else
167 ClearPagePrivate(sg_page(sg));
168 }
169
170 kfree(chunk_list);
171 }
172
173 return ret;
174}
175
Neeti Desai8dcc3642015-03-17 18:20:35 -0700176int msm_secure_table(struct sg_table *table)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700177{
178 int ret;
179
180 mutex_lock(&secure_buffer_mutex);
181 ret = secure_buffer_change_table(table, 1);
182 mutex_unlock(&secure_buffer_mutex);
183
184 return ret;
185
186}
187
Neeti Desai8dcc3642015-03-17 18:20:35 -0700188int msm_unsecure_table(struct sg_table *table)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700189{
190 int ret;
191
192 mutex_lock(&secure_buffer_mutex);
193 ret = secure_buffer_change_table(table, 0);
194 mutex_unlock(&secure_buffer_mutex);
195 return ret;
196
197}
198
Neeti Desai0e64e702015-03-31 15:33:54 -0700199static struct dest_info_list *populate_dest_info(int *dest_vmids, int nelements,
200 int *dest_perms)
201{
202 struct dest_vm_and_perm_info *dest_info;
203 struct dest_info_list *list;
204 int i;
205
206 dest_info = kmalloc_array(nelements,
207 (sizeof(struct dest_vm_and_perm_info)),
208 GFP_KERNEL | __GFP_ZERO);
209 if (!dest_info)
210 return NULL;
211
212 for (i = 0; i < nelements; i++) {
213 dest_info[i].vm = dest_vmids[i];
214 dest_info[i].perm = dest_perms[i];
215 dest_info[i].ctx = NULL;
216 dest_info[i].ctx_size = 0;
217 }
218 list = kzalloc(sizeof(struct dest_info_list), GFP_KERNEL);
219 if (!list) {
220 kfree(dest_info);
221 return NULL;
222 }
223
224 list->dest_info = dest_info;
225 list->list_size = nelements * sizeof(struct dest_vm_and_perm_info);
226
227 return list;
228}
229
230static struct info_list *get_info_list_from_table(struct sg_table *table)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700231{
232 int i;
233 struct scatterlist *sg;
234 struct mem_prot_info *info;
235 struct info_list *list;
236
237 info = kmalloc_array(table->nents, (sizeof(struct mem_prot_info)),
238 GFP_KERNEL | __GFP_ZERO);
239 if (!info)
240 return NULL;
241
242 for_each_sg(table->sgl, sg, table->nents, i) {
243 info[i].addr = page_to_phys(sg_page(sg));
244 info[i].size = sg->length;
245 }
246
247 list = kzalloc(sizeof(struct info_list), GFP_KERNEL);
248 if (!list) {
249 kfree(info);
250 return NULL;
251 }
252
253 list->list_head = info;
254 list->list_size = table->nents * sizeof(struct mem_prot_info);
255 return list;
256}
257
258static void destroy_info_list(struct info_list *info_list)
259{
260 kfree(info_list->list_head);
261 kfree(info_list);
262}
263
Neeti Desai0e64e702015-03-31 15:33:54 -0700264static void destroy_dest_info_list(struct dest_info_list *dest_list)
Patrick Dalya125d5d2016-09-30 16:16:10 -0700265{
Neeti Desai0e64e702015-03-31 15:33:54 -0700266 kfree(dest_list->dest_info);
267 kfree(dest_list);
268}
269
270int hyp_assign_table(struct sg_table *table,
271 u32 *source_vm_list, int source_nelems,
272 int *dest_vmids, int *dest_perms,
273 int dest_nelems)
274{
Patrick Dalya125d5d2016-09-30 16:16:10 -0700275 int ret;
Neeti Desai0e64e702015-03-31 15:33:54 -0700276 struct info_list *info_list = NULL;
277 struct dest_info_list *dest_info_list = NULL;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700278 struct scm_desc desc = {0};
279
Neeti Desai0e64e702015-03-31 15:33:54 -0700280 info_list = get_info_list_from_table(table);
281 if (!info_list)
282 return -ENOMEM;
283
284 dest_info_list = populate_dest_info(dest_vmids, dest_nelems,
285 dest_perms);
286 if (!dest_info_list) {
287 ret = -ENOMEM;
288 goto err1;
289 }
Patrick Dalya125d5d2016-09-30 16:16:10 -0700290
291 desc.args[0] = virt_to_phys(info_list->list_head);
292 desc.args[1] = info_list->list_size;
293 desc.args[2] = virt_to_phys(source_vm_list);
Neeti Desai0e64e702015-03-31 15:33:54 -0700294 desc.args[3] = sizeof(*source_vm_list) * source_nelems;
295 desc.args[4] = virt_to_phys(dest_info_list->dest_info);
296 desc.args[5] = dest_info_list->list_size;
Patrick Dalya125d5d2016-09-30 16:16:10 -0700297 desc.args[6] = 0;
Neeti Desai0e64e702015-03-31 15:33:54 -0700298
Patrick Dalya125d5d2016-09-30 16:16:10 -0700299 desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO,
300 SCM_VAL, SCM_VAL);
301
Neeti Desai0e64e702015-03-31 15:33:54 -0700302 dmac_flush_range(source_vm_list, source_vm_list + source_nelems);
Patrick Dalya125d5d2016-09-30 16:16:10 -0700303 dmac_flush_range(info_list->list_head, info_list->list_head +
Neeti Desai0e64e702015-03-31 15:33:54 -0700304 (info_list->list_size / sizeof(*info_list->list_head)));
305 dmac_flush_range(dest_info_list->dest_info, dest_info_list->dest_info +
306 (dest_info_list->list_size /
307 sizeof(*dest_info_list->dest_info)));
Patrick Dalya125d5d2016-09-30 16:16:10 -0700308
309 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
310 MEM_PROT_ASSIGN_ID), &desc);
311 if (ret)
312 pr_info("%s: Failed to assign memory protection, ret = %d\n",
313 __func__, ret);
Neeti Desai0e64e702015-03-31 15:33:54 -0700314
315 destroy_dest_info_list(dest_info_list);
316
317err1:
Patrick Dalya125d5d2016-09-30 16:16:10 -0700318 destroy_info_list(info_list);
319 return ret;
320}
321
Neeti Desai0e64e702015-03-31 15:33:54 -0700322int hyp_assign_phys(phys_addr_t addr, u64 size,
323 int *dest_vmids, int *dest_perms,
324 int dest_nelems)
325{
326 struct sg_table *table;
327 u32 source_vm;
328 int ret;
329
330 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
331 if (!table)
332 return -ENOMEM;
333 ret = sg_alloc_table(table, 1, GFP_KERNEL);
334 if (ret)
335 goto err1;
336
337 sg_set_page(table->sgl, phys_to_page(addr), size, 0);
338
339 source_vm = VMID_HLOS;
340
341 ret = hyp_assign_table(table, &source_vm, 1, dest_vmids,
342 dest_perms, dest_nelems);
343 if (ret)
344 goto err2;
345
346 return ret;
347err2:
348 sg_free_table(table);
349err1:
350 kfree(table);
351 return ret;
352}
353
Patrick Dalya125d5d2016-09-30 16:16:10 -0700354#define MAKE_CP_VERSION(major, minor, patch) \
355 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
356
357bool msm_secure_v2_is_supported(void)
358{
359 int version = scm_get_feat_version(FEATURE_ID_CP);
360
361 /*
362 * if the version is < 1.1.0 then dynamic buffer allocation is
363 * not supported
364 */
365 return version >= MAKE_CP_VERSION(1, 1, 0);
366}