blob: fcfd6bd63eeb16a508c58553689bf7055451eff4 [file] [log] [blame]
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -08001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi70375042013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070035#include "scm.h"
36
37#pragma GCC optimize ("O0")
38
39/* From Linux Kernel asm/system.h */
40#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
41
42#ifndef offsetof
43# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
44#endif
45
Channagoud Kadabi70375042013-12-12 14:53:31 -080046#define SCM_CLASS_REGISTER (0x2 << 8)
47#define SCM_MASK_IRQS BIT(5)
48#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
49 SCM_CLASS_REGISTER | \
50 SCM_MASK_IRQS | \
51 ((n) & 0xf))
52
53
Shashank Mittal162244e2011-08-08 19:01:25 -070054/**
55 * alloc_scm_command() - Allocate an SCM command
56 * @cmd_size: size of the command buffer
57 * @resp_size: size of the response buffer
58 *
59 * Allocate an SCM command, including enough room for the command
60 * and response headers as well as the command and response buffers.
61 *
62 * Returns a valid &scm_command on success or %NULL if the allocation fails.
63 */
64static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
65{
66 struct scm_command *cmd;
67 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -080068 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -070069
Deepa Dinamani904f8f82012-12-05 16:35:01 -080070 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -080071 if (cmd) {
Shashank Mittal162244e2011-08-08 19:01:25 -070072 cmd->len = len;
73 cmd->buf_offset = offsetof(struct scm_command, buf);
74 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
75 }
76 return cmd;
77}
78
79/**
80 * free_scm_command() - Free an SCM command
81 * @cmd: command to free
82 *
83 * Free an SCM command.
84 */
85static inline void free_scm_command(struct scm_command *cmd)
86{
87 free(cmd);
88}
89
90/**
91 * scm_command_to_response() - Get a pointer to a scm_response
92 * @cmd: command
93 *
94 * Returns a pointer to a response for a command.
95 */
Ajay Dudanib01e5062011-12-03 23:23:42 -080096static inline struct scm_response *scm_command_to_response(const struct
97 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -070098{
99 return (void *)cmd + cmd->resp_hdr_offset;
100}
101
102/**
103 * scm_get_command_buffer() - Get a pointer to a command buffer
104 * @cmd: command
105 *
106 * Returns a pointer to the command buffer of a command.
107 */
108static inline void *scm_get_command_buffer(const struct scm_command *cmd)
109{
110 return (void *)cmd->buf;
111}
112
113/**
114 * scm_get_response_buffer() - Get a pointer to a response buffer
115 * @rsp: response
116 *
117 * Returns a pointer to a response buffer of a response.
118 */
119static inline void *scm_get_response_buffer(const struct scm_response *rsp)
120{
121 return (void *)rsp + rsp->buf_offset;
122}
123
124static uint32_t smc(uint32_t cmd_addr)
125{
126 uint32_t context_id;
127 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800128 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700129 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800130 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700131 return r0;
132}
133
134/**
Channagoud Kadabi70375042013-12-12 14:53:31 -0800135* scm_call_automic: Make scm call with one or no argument
136* @svc: service id
137* @cmd: command id
138* @ arg1: argument
139*/
140
141static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
142{
143 uint32_t context_id;
144 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
Sridhar Parasuramd0443e22015-08-29 09:56:03 -0700145 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi70375042013-12-12 14:53:31 -0800146 register uint32_t r2 __asm__("r2") = arg1;
147
148 __asm__ volatile(
149 __asmeq("%0", "r0")
150 __asmeq("%1", "r0")
151 __asmeq("%2", "r1")
152 __asmeq("%3", "r2")
153 "smc #0 @ switch to secure world\n"
154 : "=r" (r0)
155 : "r" (r0), "r" (r1), "r" (r2)
156 : "r3");
157 return r0;
158}
159
160/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700161 * scm_call() - Send an SCM command
162 * @svc_id: service identifier
163 * @cmd_id: command identifier
164 * @cmd_buf: command buffer
165 * @cmd_len: length of the command buffer
166 * @resp_buf: response buffer
167 * @resp_len: length of the response buffer
168 *
169 * Sends a command to the SCM and waits for the command to finish processing.
170 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800171int
172scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
173 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700174{
175 int ret;
176 struct scm_command *cmd;
177 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700178 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700179
180 cmd = alloc_scm_command(cmd_len, resp_len);
181 if (!cmd)
182 return ERR_NO_MEMORY;
183
184 cmd->id = (svc_id << 10) | cmd_id;
185 if (cmd_buf)
186 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
187
Neeti Desai127b9e02012-03-20 16:11:23 -0700188 /* Flush command to main memory for TZ */
189 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
190
Ajay Dudanib01e5062011-12-03 23:23:42 -0800191 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700192 if (ret)
193 goto out;
194
Ajay Dudanib01e5062011-12-03 23:23:42 -0800195 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700196 rsp = scm_command_to_response(cmd);
197
Neeti Desai127b9e02012-03-20 16:11:23 -0700198 do
199 {
200 /* Need to invalidate before each check since TZ will update
201 * the response complete flag in main memory.
202 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800203 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700204 } while (!rsp->is_complete);
205
206
207 resp_ptr = scm_get_response_buffer(rsp);
208
209 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800210 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700211
212 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700213 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700214 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800215 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700216 free_scm_command(cmd);
217 return ret;
218}
219
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800220int restore_secure_cfg(uint32_t id)
221{
222 int ret, scm_ret = 0;
223 tz_secure_cfg secure_cfg;
224
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800225 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800226 secure_cfg.spare = 0;
227
sundarajan srinivasanc2dee742013-02-21 11:31:36 -0800228 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800229 &scm_ret, sizeof(scm_ret));
230
231 if (ret || scm_ret) {
232 dprintf(CRITICAL, "Secure Config failed\n");
233 ret = 1;
234 } else
235 ret = 0;
236
237 return ret;
238
239}
240
Neeti Desai127b9e02012-03-20 16:11:23 -0700241/* SCM Encrypt Command */
242int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700243{
Neeti Desai127b9e02012-03-20 16:11:23 -0700244 int ret;
245 img_req cmd;
Shashank Mittal162244e2011-08-08 19:01:25 -0700246
Neeti Desai127b9e02012-03-20 16:11:23 -0700247 cmd.img_ptr = (uint32*) img_ptr;
248 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700249
Neeti Desai127b9e02012-03-20 16:11:23 -0700250 /* Image data is operated upon by TZ, which accesses only the main memory.
251 * It must be flushed/invalidated before and after TZ call.
252 */
253 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700254
Neeti Desai127b9e02012-03-20 16:11:23 -0700255 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
Shashank Mittal162244e2011-08-08 19:01:25 -0700256
Neeti Desai127b9e02012-03-20 16:11:23 -0700257 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
258 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800259 */
260 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700261 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800262
Neeti Desai127b9e02012-03-20 16:11:23 -0700263 /* Invalidate the updated image data */
264 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800265
Shashank Mittal162244e2011-08-08 19:01:25 -0700266 return ret;
267}
268
Neeti Desai127b9e02012-03-20 16:11:23 -0700269/* SCM Decrypt Command */
270int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
271{
272 int ret;
273 img_req cmd;
274
275 cmd.img_ptr = (uint32*) img_ptr;
276 cmd.img_len_ptr = img_len_ptr;
277
278 /* Image data is operated upon by TZ, which accesses only the main memory.
279 * It must be flushed/invalidated before and after TZ call.
280 */
281 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
282
283 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
284
285 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
286 * before we use them.
287 */
288 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
289 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
290
291 /* Invalidate the updated image data */
292 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
293
294 return ret;
295}
296
297
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800298static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
299{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700300 int ret = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800301 ssd_parse_md_req parse_req;
302 ssd_parse_md_rsp parse_rsp;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700303 int prev_len = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800304
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700305 /* Populate meta-data ptr. Here md_len is the meta-data length.
306 * The Code below follows a growing length approach. First send
307 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
308 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
309 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
310 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
311
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800312 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700313 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800314
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700315 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800316
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700317 do
318 {
319 ret = scm_call(SCM_SVC_SSD,
320 SSD_PARSE_MD_ID,
321 &parse_req,
322 sizeof(parse_req),
323 &parse_rsp,
324 sizeof(parse_rsp));
325
326 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
327 {
328 prev_len = parse_req.md_len;
329
330 parse_req.md_len *= MULTIPLICATION_FACTOR;
331
Venkatesh Yadav Abbarapu0a060782013-11-11 16:56:04 +0530332 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700333 (parse_req.md_len - prev_len) );
334
335 continue;
336 }
337 else
338 break;
339
340 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800341
342 if(!ret)
343 {
344 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
345 {
346 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700347 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800348 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800349 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700350
351 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800352 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700353 else
354 {
355 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
356
357 ASSERT(ret == 0);
358 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800359
360 return ret;
361}
362
363int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
364{
365 int ret = 0;
366 uint32 ctx_id = 0;
367 ssd_decrypt_img_frag_req decrypt_req;
368 ssd_decrypt_img_frag_rsp decrypt_rsp;
369
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700370 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
371 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700372 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700373 case SSD_PMD_ENCRYPTED:
374 /* Image data is operated upon by TZ, which accesses only the main memory.
375 * It must be flushed/invalidated before and after TZ call.
376 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800377
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700378 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800379
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700380 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800381
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700382 decrypt_req.md_ctx_id = ctx_id;
383 decrypt_req.last_frag = 1;
384 decrypt_req.frag_len = *img_len_ptr;
385 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800386
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700387 ret = scm_call(SCM_SVC_SSD,
388 SSD_DECRYPT_IMG_FRAG_ID,
389 &decrypt_req,
390 sizeof(decrypt_req),
391 &decrypt_rsp,
392 sizeof(decrypt_rsp));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800393
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700394 if(!ret){
395 ret = decrypt_rsp.status;
396 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700397
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700398 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
399 * before we use them.
400 */
401 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
402 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800403
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700404 /* Invalidate the updated image data */
405 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700406
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700407 break;
408
409 case SSD_PMD_NOT_ENCRYPTED:
410 case SSD_PMD_NO_MD_FOUND:
411 ret = 0;
412 break;
413
414 case SSD_PMD_BUSY:
415 case SSD_PMD_BAD_MD_PTR_OR_LEN:
416 case SSD_PMD_PARSING_INCOMPLETE:
417 case SSD_PMD_PARSING_FAILED:
418 case SSD_PMD_SETUP_CIPHER_FAILED:
419 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
420 break;
421
422 default:
423 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
424 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700425 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800426 return ret;
427}
428
429int scm_svc_version(uint32 * major, uint32 * minor)
430{
431 feature_version_req feature_req;
432 feature_version_rsp feature_rsp;
433 int ret = 0;
434
435 feature_req.feature_id = TZBSP_FVER_SSD;
436
437 ret = scm_call(TZBSP_SVC_INFO,
438 TZ_INFO_GET_FEATURE_ID,
439 &feature_req,
440 sizeof(feature_req),
441 &feature_rsp,
442 sizeof(feature_rsp));
443 if(!ret)
444 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
445
446 return ret;
447}
448
449int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
450{
451 int ret=0;
452 ssd_protect_keystore_req protect_req;
453 ssd_protect_keystore_rsp protect_rsp;
454
455 protect_req.keystore_ptr = img_ptr;
456 protect_req.keystore_len = img_len;
457
458 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
459
460 ret = scm_call(SCM_SVC_SSD,
461 SSD_PROTECT_KEYSTORE_ID,
462 &protect_req,
463 sizeof(protect_req),
464 &protect_rsp,
465 sizeof(protect_rsp));
466 if(!ret)
467 {
468 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
469 dprintf(INFO,"Successfully loaded the keystore ");
470 else
471 {
472 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
473 ret = protect_rsp.status;
474 }
475 }
476 else
477 dprintf(INFO,"scm_call failed ");
478
479 return ret;
480}
481
Shashank Mittal162244e2011-08-08 19:01:25 -0700482void set_tamper_fuse_cmd()
483{
484 uint32_t svc_id;
485 uint32_t cmd_id;
486 void *cmd_buf;
487 size_t cmd_len;
488 void *resp_buf = NULL;
489 size_t resp_len = 0;
490
491 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
492 cmd_buf = (void *)&fuse_id;
493 cmd_len = sizeof(fuse_id);
494
Ajay Dudanib01e5062011-12-03 23:23:42 -0800495 /*no response */
Shashank Mittal162244e2011-08-08 19:01:25 -0700496 resp_buf = NULL;
497 resp_len = 0;
498
499 svc_id = SCM_SVC_FUSE;
500 cmd_id = SCM_BLOW_SW_FUSE_ID;
501
502 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
503 return;
504}
505
506uint8_t get_tamper_fuse_cmd()
507{
508 uint32_t svc_id;
509 uint32_t cmd_id;
510 void *cmd_buf;
511 size_t cmd_len;
512 size_t resp_len = 0;
513 uint8_t resp_buf;
514
515 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
516 cmd_buf = (void *)&fuse_id;
517 cmd_len = sizeof(fuse_id);
518
Ajay Dudanib01e5062011-12-03 23:23:42 -0800519 /*response */
Shashank Mittal162244e2011-08-08 19:01:25 -0700520 resp_len = sizeof(resp_buf);
521
522 svc_id = SCM_SVC_FUSE;
523 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
524
525 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
526 return resp_buf;
527}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800528
Amir Samuelov4620ad22013-03-13 11:30:05 +0200529#define SHA256_DIGEST_LENGTH (256/8)
530/*
531 * struct qseecom_save_partition_hash_req
532 * @partition_id - partition id.
533 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
534 */
535struct qseecom_save_partition_hash_req {
536 uint32_t partition_id; /* in */
537 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
538};
539
540
541void save_kernel_hash_cmd(void *digest)
542{
543 uint32_t svc_id;
544 uint32_t cmd_id;
545 void *cmd_buf;
546 size_t cmd_len;
547 void *resp_buf = NULL;
548 size_t resp_len = 0;
549 struct qseecom_save_partition_hash_req req;
550
551 /*no response */
552 resp_buf = NULL;
553 resp_len = 0;
554
555 req.partition_id = 0; /* kernel */
556 memcpy(req.digest, digest, sizeof(req.digest));
557
558 svc_id = SCM_SVC_ES;
559 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
560 cmd_buf = (void *)&req;
561 cmd_len = sizeof(req);
562
563 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
564}
565
Deepa Dinamani193874e2012-02-07 14:00:04 -0800566/*
567 * Switches the CE1 channel between ADM and register usage.
568 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
569 * : AP_CE_ADM_USE, CE1 uses ADM interface
570 */
571uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
572{
573 uint32_t svc_id;
574 uint32_t cmd_id;
575 void *cmd_buf;
576 size_t cmd_len;
577 size_t resp_len = 0;
578 uint8_t resp_buf;
579
580 struct {
581 uint32_t resource;
582 uint32_t chn_id;
583 }__PACKED switch_ce_chn_buf;
584
585 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
586 switch_ce_chn_buf.chn_id = channel;
587 cmd_buf = (void *)&switch_ce_chn_buf;
588 cmd_len = sizeof(switch_ce_chn_buf);
589
590 /*response */
591 resp_len = sizeof(resp_buf);
592
593 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
594 cmd_id = SCM_CE_CHN_SWITCH_ID;
595
596 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
597 return resp_buf;
598}
599
Channagoud Kadabi70375042013-12-12 14:53:31 -0800600int scm_halt_pmic_arbiter()
601{
602 int ret = 0;
603
604 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
605
606 return ret;
607}
Maria Yud7826ef2014-06-30 13:05:43 +0800608
609/* SCM Random Command */
610int scm_random(uint32_t * rbuf, uint32_t r_len)
611{
612 int ret;
613 struct tz_prng_data data;
614
615 data.out_buf = (uint8_t*) rbuf;
616 data.out_buf_size = r_len;
617
618 /*
619 * random buffer must be flushed/invalidated before and after TZ call.
620 */
621 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
622
623 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
624
625 /* Invalidate the updated random buffer */
626 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
627
628 return ret;
629}
630
631void * get_canary()
632{
633 void * canary;
634 if(scm_random(&canary, sizeof(canary))) {
635 dprintf(CRITICAL,"scm_call for random failed !!!");
636 /*
637 * fall back to use lib rand API if scm call failed.
638 */
639 canary = (void *)rand();
640 }
641
642 return canary;
643}
Sridhar Parasuramd0443e22015-08-29 09:56:03 -0700644static bool secure_boot_enabled = true;
645static bool wdog_debug_fuse_disabled = true;
646
647void scm_check_boot_fuses()
648{
649 uint32_t ret = 0;
650 uint32_t resp;
651
652 ret = scm_call(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED, NULL, 0, &resp, sizeof(resp));
653
654 /* Parse Bit 0 and Bit 2 of the response */
655 if(!ret) {
656 /* Bit 0 - SECBOOT_ENABLE_CHECK */
657 if(resp & 0x1)
658 secure_boot_enabled = false;
659 /* Bit 2 - DEBUG_DISABLE_CHECK */
660 if(resp & 0x4)
661 wdog_debug_fuse_disabled = false;
662 } else
663 dprintf(CRITICAL, "scm call to check secure boot fuses failed\n");
664}
665
666bool is_secure_boot_enable()
667{
668 scm_check_boot_fuses();
669 return secure_boot_enabled;
670}