blob: 6042fab016c4320d42e4bb53adc0747c0a45e73d [file] [log] [blame]
Amit Blayfe23ee22015-01-09 19:09:51 +02001/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
vijay kumar4f4405f2014-08-08 11:49:53 +053035#include <rand.h>
36#include <image_verify.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070037#include "scm.h"
38
39#pragma GCC optimize ("O0")
40
41/* From Linux Kernel asm/system.h */
42#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
43
44#ifndef offsetof
45# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
46#endif
47
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080048#define SCM_CLASS_REGISTER (0x2 << 8)
49#define SCM_MASK_IRQS BIT(5)
50#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
51 SCM_CLASS_REGISTER | \
52 SCM_MASK_IRQS | \
53 ((n) & 0xf))
54
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070055/* SCM interface as per ARM spec present? */
56bool scm_arm_support;
57
58static void scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
59{
60 uint32_t ret;
61 scmcall_arg scm_arg = {0};
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -080062 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070063 /* Make a call to check if SCM call available using new interface,
64 * if this returns 0 then scm implementation as per arm spec
65 * otherwise use the old interface for scm calls
66 */
67 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
68 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
69 scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
70
71 ret = scm_call2(&scm_arg, &scm_ret);
72
73 if (!ret)
74 scm_arm_support = true;
75}
76
77
78void scm_init()
79{
80 scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
81}
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080082
Shashank Mittal162244e2011-08-08 19:01:25 -070083/**
84 * alloc_scm_command() - Allocate an SCM command
85 * @cmd_size: size of the command buffer
86 * @resp_size: size of the response buffer
87 *
88 * Allocate an SCM command, including enough room for the command
89 * and response headers as well as the command and response buffers.
90 *
91 * Returns a valid &scm_command on success or %NULL if the allocation fails.
92 */
93static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
94{
95 struct scm_command *cmd;
96 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -080097 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -070098
Deepa Dinamani904f8f82012-12-05 16:35:01 -080099 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -0800100 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +0200101 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700102 cmd->len = len;
103 cmd->buf_offset = offsetof(struct scm_command, buf);
104 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
105 }
106 return cmd;
107}
108
109/**
110 * free_scm_command() - Free an SCM command
111 * @cmd: command to free
112 *
113 * Free an SCM command.
114 */
115static inline void free_scm_command(struct scm_command *cmd)
116{
117 free(cmd);
118}
119
120/**
121 * scm_command_to_response() - Get a pointer to a scm_response
122 * @cmd: command
123 *
124 * Returns a pointer to a response for a command.
125 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800126static inline struct scm_response *scm_command_to_response(const struct
127 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -0700128{
129 return (void *)cmd + cmd->resp_hdr_offset;
130}
131
132/**
133 * scm_get_command_buffer() - Get a pointer to a command buffer
134 * @cmd: command
135 *
136 * Returns a pointer to the command buffer of a command.
137 */
138static inline void *scm_get_command_buffer(const struct scm_command *cmd)
139{
140 return (void *)cmd->buf;
141}
142
143/**
144 * scm_get_response_buffer() - Get a pointer to a response buffer
145 * @rsp: response
146 *
147 * Returns a pointer to a response buffer of a response.
148 */
149static inline void *scm_get_response_buffer(const struct scm_response *rsp)
150{
151 return (void *)rsp + rsp->buf_offset;
152}
153
154static uint32_t smc(uint32_t cmd_addr)
155{
156 uint32_t context_id;
157 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800158 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700159 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800160 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700161 return r0;
162}
163
164/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800165* scm_call_automic: Make scm call with one or no argument
166* @svc: service id
167* @cmd: command id
168* @ arg1: argument
169*/
170
171static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
172{
173 uint32_t context_id;
174 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
vijay kumar4f4405f2014-08-08 11:49:53 +0530175 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800176 register uint32_t r2 __asm__("r2") = arg1;
177
178 __asm__ volatile(
179 __asmeq("%0", "r0")
180 __asmeq("%1", "r0")
181 __asmeq("%2", "r1")
182 __asmeq("%3", "r2")
183 "smc #0 @ switch to secure world\n"
184 : "=r" (r0)
185 : "r" (r0), "r" (r1), "r" (r2)
186 : "r3");
187 return r0;
188}
189
190/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530191 * scm_call_atomic2() - Send an atomic SCM command with two arguments
192 * @svc_id: service identifier
193 * @cmd_id: command identifier
194 * @arg1: first argument
195 * @arg2: second argument
196 *
197 * This shall only be used with commands that are guaranteed to be
198 * uninterruptable, atomic and SMP safe.
199 */
200int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
201{
202 int context_id;
203 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
vijay kumar4f4405f2014-08-08 11:49:53 +0530204 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530205 register uint32_t r2 __asm__("r2") = arg1;
206 register uint32_t r3 __asm__("r3") = arg2;
207
208 __asm__ volatile(
209 __asmeq("%0", "r0")
210 __asmeq("%1", "r0")
211 __asmeq("%2", "r1")
212 __asmeq("%3", "r2")
213 __asmeq("%4", "r3")
214 "smc #0 @ switch to secure world\n"
215 : "=r" (r0)
216 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
217 return r0;
218}
219
220/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700221 * scm_call() - Send an SCM command
222 * @svc_id: service identifier
223 * @cmd_id: command identifier
224 * @cmd_buf: command buffer
225 * @cmd_len: length of the command buffer
226 * @resp_buf: response buffer
227 * @resp_len: length of the response buffer
228 *
229 * Sends a command to the SCM and waits for the command to finish processing.
230 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800231int
232scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
233 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700234{
235 int ret;
236 struct scm_command *cmd;
237 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700238 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700239
240 cmd = alloc_scm_command(cmd_len, resp_len);
241 if (!cmd)
242 return ERR_NO_MEMORY;
243
244 cmd->id = (svc_id << 10) | cmd_id;
245 if (cmd_buf)
246 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
247
Neeti Desai127b9e02012-03-20 16:11:23 -0700248 /* Flush command to main memory for TZ */
249 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
250
Ajay Dudanib01e5062011-12-03 23:23:42 -0800251 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700252 if (ret)
253 goto out;
254
Ajay Dudanib01e5062011-12-03 23:23:42 -0800255 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700256 rsp = scm_command_to_response(cmd);
257
Neeti Desai127b9e02012-03-20 16:11:23 -0700258 do
259 {
260 /* Need to invalidate before each check since TZ will update
261 * the response complete flag in main memory.
262 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800263 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700264 } while (!rsp->is_complete);
265
266
267 resp_ptr = scm_get_response_buffer(rsp);
268
269 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800270 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700271
272 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700273 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700274 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800275 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700276 free_scm_command(cmd);
277 return ret;
278}
279
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800280int restore_secure_cfg(uint32_t id)
281{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700282 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800283 tz_secure_cfg secure_cfg;
284
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800285 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800286 secure_cfg.spare = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700287 scmcall_arg scm_arg = {0};
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800288
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700289 if(!scm_arm_support)
290 {
291 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
292 NULL, 0);
293 }
294 else
295 {
296 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
297 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
298 scm_arg.x2 = id;
299 scm_arg.x3 = 0x0; /* Spare unused */
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800300
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700301 ret = scm_call2(&scm_arg, NULL);
302 }
303
304 if (ret)
305 {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800306 dprintf(CRITICAL, "Secure Config failed\n");
307 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700308 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800309
310 return ret;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800311}
312
Neeti Desai127b9e02012-03-20 16:11:23 -0700313/* SCM Encrypt Command */
314int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700315{
Neeti Desai127b9e02012-03-20 16:11:23 -0700316 int ret;
317 img_req cmd;
vijay kumare95092d2014-10-20 19:24:49 +0530318 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700319
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700320
Neeti Desai127b9e02012-03-20 16:11:23 -0700321 cmd.img_ptr = (uint32*) img_ptr;
322 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700323
Neeti Desai127b9e02012-03-20 16:11:23 -0700324 /* Image data is operated upon by TZ, which accesses only the main memory.
325 * It must be flushed/invalidated before and after TZ call.
326 */
327 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700328
vijay kumare95092d2014-10-20 19:24:49 +0530329 if (!scm_arm_support)
330 {
331 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
332 }
333 else
334 {
335 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD,SSD_ENCRYPT_ID);
vijay kumar83b50d62015-01-09 19:09:59 +0530336 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800337 scm_arg.x2 = (uint32_t) cmd.img_ptr;
338 scm_arg.x3 = (uint32_t) cmd.img_len_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530339
340 ret = scm_call2(&scm_arg, NULL);
341 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700342
Neeti Desai127b9e02012-03-20 16:11:23 -0700343 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
344 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800345 */
346 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700347 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800348
Neeti Desai127b9e02012-03-20 16:11:23 -0700349 /* Invalidate the updated image data */
350 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800351
Shashank Mittal162244e2011-08-08 19:01:25 -0700352 return ret;
353}
354
Neeti Desai127b9e02012-03-20 16:11:23 -0700355/* SCM Decrypt Command */
356int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
357{
358 int ret;
359 img_req cmd;
360
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700361 if (scm_arm_support)
362 {
363 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
364 return -1;
365 }
366
Neeti Desai127b9e02012-03-20 16:11:23 -0700367 cmd.img_ptr = (uint32*) img_ptr;
368 cmd.img_len_ptr = img_len_ptr;
369
370 /* Image data is operated upon by TZ, which accesses only the main memory.
371 * It must be flushed/invalidated before and after TZ call.
372 */
373 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
374
375 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
376
377 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
378 * before we use them.
379 */
380 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
381 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
382
383 /* Invalidate the updated image data */
384 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
385
386 return ret;
387}
388
389
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800390static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
391{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700392 int ret = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800393 ssd_parse_md_req parse_req;
394 ssd_parse_md_rsp parse_rsp;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700395 int prev_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530396 scmcall_arg scm_arg = {0};
397 scmcall_ret scm_ret = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700398 /* Populate meta-data ptr. Here md_len is the meta-data length.
399 * The Code below follows a growing length approach. First send
400 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
401 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
402 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
403 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
404
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800405 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700406 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800407
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700408 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800409
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700410 do
411 {
vijay kumare95092d2014-10-20 19:24:49 +0530412 if (!scm_arm_support)
413 {
414 ret = scm_call(SCM_SVC_SSD,
415 SSD_PARSE_MD_ID,
416 &parse_req,
417 sizeof(parse_req),
418 &parse_rsp,
419 sizeof(parse_rsp));
420 }
421 else
422 {
423 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PARSE_MD_ID);
424 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
425 scm_arg.x2 = parse_req.md_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800426 scm_arg.x3 = (uint32_t) parse_req.md;
Aparna Mallavarapud83990a2014-12-24 12:54:35 +0530427 scm_arg.atomic = true;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700428
vijay kumare95092d2014-10-20 19:24:49 +0530429 ret = scm_call2(&scm_arg, &scm_ret);
430 parse_rsp.status = scm_ret.x1;
431 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700432 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
433 {
434 prev_len = parse_req.md_len;
435
436 parse_req.md_len *= MULTIPLICATION_FACTOR;
437
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530438 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700439 (parse_req.md_len - prev_len) );
440
441 continue;
442 }
443 else
444 break;
445
446 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800447
448 if(!ret)
449 {
450 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
451 {
452 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700453 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800454 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800455 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700456
457 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800458 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700459 else
460 {
461 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
462
463 ASSERT(ret == 0);
464 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800465
466 return ret;
467}
468
469int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
470{
471 int ret = 0;
472 uint32 ctx_id = 0;
473 ssd_decrypt_img_frag_req decrypt_req;
474 ssd_decrypt_img_frag_rsp decrypt_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530475 scmcall_arg scm_arg = {0};
476 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800477
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700478
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700479 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
480 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700481 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700482 case SSD_PMD_ENCRYPTED:
483 /* Image data is operated upon by TZ, which accesses only the main memory.
484 * It must be flushed/invalidated before and after TZ call.
485 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800486
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700487 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800488
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700489 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800490
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700491 decrypt_req.md_ctx_id = ctx_id;
492 decrypt_req.last_frag = 1;
493 decrypt_req.frag_len = *img_len_ptr;
494 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800495
vijay kumare95092d2014-10-20 19:24:49 +0530496 if (!scm_arm_support)
497 {
498 ret = scm_call(SCM_SVC_SSD,
499 SSD_DECRYPT_IMG_FRAG_ID,
500 &decrypt_req,
501 sizeof(decrypt_req),
502 &decrypt_rsp,
503 sizeof(decrypt_rsp));
504 }
505 else
506 {
507 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_DECRYPT_IMG_FRAG_ID);
508 scm_arg.x1 = MAKE_SCM_ARGS(0x4,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
509 scm_arg.x2 = decrypt_req.md_ctx_id;
510 scm_arg.x3 = decrypt_req.last_frag;
511 scm_arg.x4 = decrypt_req.frag_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800512 scm_arg.x5[0] = (uint32_t) decrypt_req.frag;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800513
vijay kumare95092d2014-10-20 19:24:49 +0530514 ret = scm_call2(&scm_arg, &scm_ret);
515 decrypt_rsp.status = scm_ret.x1;
516 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700517 if(!ret){
518 ret = decrypt_rsp.status;
519 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700520
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700521 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
522 * before we use them.
523 */
524 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
525 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800526
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700527 /* Invalidate the updated image data */
528 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700529
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700530 break;
531
532 case SSD_PMD_NOT_ENCRYPTED:
533 case SSD_PMD_NO_MD_FOUND:
534 ret = 0;
535 break;
536
537 case SSD_PMD_BUSY:
538 case SSD_PMD_BAD_MD_PTR_OR_LEN:
539 case SSD_PMD_PARSING_INCOMPLETE:
540 case SSD_PMD_PARSING_FAILED:
541 case SSD_PMD_SETUP_CIPHER_FAILED:
542 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
543 break;
544
545 default:
546 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
547 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700548 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800549 return ret;
550}
551
552int scm_svc_version(uint32 * major, uint32 * minor)
553{
554 feature_version_req feature_req;
555 feature_version_rsp feature_rsp;
556 int ret = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700557 scmcall_arg scm_arg = {0};
558 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800559
560 feature_req.feature_id = TZBSP_FVER_SSD;
561
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700562 if (!scm_arm_support)
563 {
564 ret = scm_call(TZBSP_SVC_INFO,
565 TZ_INFO_GET_FEATURE_ID,
566 &feature_req,
567 sizeof(feature_req),
568 &feature_rsp,
569 sizeof(feature_rsp));
570 }
571 else
572 {
573 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
vijay kumare95092d2014-10-20 19:24:49 +0530574 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700575 scm_arg.x2 = feature_req.feature_id;
576
577 ret = scm_call2(&scm_arg, &scm_ret);
578 feature_rsp.version = scm_ret.x1;
579 }
580
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800581 if(!ret)
582 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
583
584 return ret;
585}
586
587int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
588{
589 int ret=0;
590 ssd_protect_keystore_req protect_req;
591 ssd_protect_keystore_rsp protect_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530592 scmcall_arg scm_arg = {0};
593 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700594
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800595 protect_req.keystore_ptr = img_ptr;
596 protect_req.keystore_len = img_len;
597
598 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
599
vijay kumare95092d2014-10-20 19:24:49 +0530600 if (!scm_arm_support)
601 {
602 ret = scm_call(SCM_SVC_SSD,
603 SSD_PROTECT_KEYSTORE_ID,
604 &protect_req,
605 sizeof(protect_req),
606 &protect_rsp,
607 sizeof(protect_rsp));
608 }
609 else
610 {
611 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PROTECT_KEYSTORE_ID);
612 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800613 scm_arg.x2 = (uint32_t) protect_req.keystore_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530614 scm_arg.x3 = protect_req.keystore_len;
615
616 ret = scm_call2(&scm_arg, &scm_ret);
617 protect_rsp.status = scm_ret.x1;
618 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800619 if(!ret)
620 {
621 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
622 dprintf(INFO,"Successfully loaded the keystore ");
623 else
624 {
625 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
626 ret = protect_rsp.status;
627 }
628 }
629 else
630 dprintf(INFO,"scm_call failed ");
631
632 return ret;
633}
634
Shashank Mittal162244e2011-08-08 19:01:25 -0700635void set_tamper_fuse_cmd()
636{
637 uint32_t svc_id;
638 uint32_t cmd_id;
639 void *cmd_buf;
640 size_t cmd_len;
641 void *resp_buf = NULL;
642 size_t resp_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530643 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700644
645 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
646 cmd_buf = (void *)&fuse_id;
647 cmd_len = sizeof(fuse_id);
648
vijay kumare95092d2014-10-20 19:24:49 +0530649 if (!scm_arm_support)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700650 {
vijay kumare95092d2014-10-20 19:24:49 +0530651 /*no response */
652 resp_buf = NULL;
653 resp_len = 0;
654
655 svc_id = SCM_SVC_FUSE;
656 cmd_id = SCM_BLOW_SW_FUSE_ID;
657
658 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
659 }
660 else
661 {
662 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID);
663 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800664 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530665 scm_arg.x3 = cmd_len;
666
667 scm_call2(&scm_arg, NULL);
668
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700669 }
670
Shashank Mittal162244e2011-08-08 19:01:25 -0700671}
672
673uint8_t get_tamper_fuse_cmd()
674{
675 uint32_t svc_id;
676 uint32_t cmd_id;
677 void *cmd_buf;
678 size_t cmd_len;
679 size_t resp_len = 0;
680 uint8_t resp_buf;
681
682 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
vijay kumare95092d2014-10-20 19:24:49 +0530683 scmcall_arg scm_arg = {0};
684 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700685
Shashank Mittal162244e2011-08-08 19:01:25 -0700686 cmd_buf = (void *)&fuse_id;
687 cmd_len = sizeof(fuse_id);
688
vijay kumare95092d2014-10-20 19:24:49 +0530689 if (!scm_arm_support)
690 {
691 /*response */
692 resp_len = sizeof(resp_buf);
Shashank Mittal162244e2011-08-08 19:01:25 -0700693
vijay kumare95092d2014-10-20 19:24:49 +0530694 svc_id = SCM_SVC_FUSE;
695 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
Shashank Mittal162244e2011-08-08 19:01:25 -0700696
vijay kumare95092d2014-10-20 19:24:49 +0530697 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
698 return resp_buf;
699 }
700 else
701 {
702 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID);
703 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800704 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530705 scm_arg.x3 = cmd_len;
706
707 scm_call2(&scm_arg, &scm_ret);
708 return (uint8_t)scm_ret.x1;
709 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700710}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800711
Amir Samuelov4620ad22013-03-13 11:30:05 +0200712/*
713 * struct qseecom_save_partition_hash_req
714 * @partition_id - partition id.
715 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
716 */
717struct qseecom_save_partition_hash_req {
718 uint32_t partition_id; /* in */
719 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
720};
721
722
723void save_kernel_hash_cmd(void *digest)
724{
725 uint32_t svc_id;
726 uint32_t cmd_id;
727 void *cmd_buf;
728 size_t cmd_len;
729 void *resp_buf = NULL;
730 size_t resp_len = 0;
731 struct qseecom_save_partition_hash_req req;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700732 scmcall_arg scm_arg = {0};
Amir Samuelov4620ad22013-03-13 11:30:05 +0200733
734 /*no response */
735 resp_buf = NULL;
736 resp_len = 0;
737
738 req.partition_id = 0; /* kernel */
739 memcpy(req.digest, digest, sizeof(req.digest));
740
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700741 if (!scm_arm_support)
742 {
743 svc_id = SCM_SVC_ES;
744 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
745 cmd_buf = (void *)&req;
746 cmd_len = sizeof(req);
Amir Samuelov4620ad22013-03-13 11:30:05 +0200747
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700748 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
749 }
750 else
751 {
752 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
753 scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
754 scm_arg.x2 = req.partition_id;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800755 scm_arg.x3 = (uint32_t) &req.digest;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700756 scm_arg.x4 = sizeof(req.digest);
757
758 if (scm_call2(&scm_arg, NULL))
759 dprintf(CRITICAL, "Failed to Save kernel hash\n");
760 }
Amir Samuelov4620ad22013-03-13 11:30:05 +0200761}
762
Amit Blayfe23ee22015-01-09 19:09:51 +0200763int mdtp_cipher_dip_cmd(uint8_t *in_buf, uint32_t in_buf_size, uint8_t *out_buf,
764 uint32_t out_buf_size, uint32_t direction)
765{
766 uint32_t svc_id;
767 uint32_t cmd_id;
768 void *cmd_buf;
769 void *rsp_buf;
770 size_t cmd_len;
771 size_t rsp_len;
772 mdtp_cipher_dip_req req;
773 scmcall_arg scm_arg = {0};
774 scmcall_ret scm_ret = {0};
775
776 ASSERT(in_buf != NULL);
777 ASSERT(out_buf != NULL);
778
779 req.in_buf = in_buf;
780 req.in_buf_size = in_buf_size;
781 req.out_buf = out_buf;
782 req.out_buf_size = out_buf_size;
783 req.direction = direction;
784
785 if (!scm_arm_support)
786 {
787 svc_id = SCM_SVC_MDTP;
788 cmd_id = SCM_MDTP_CIPHER_DIP;
789 cmd_buf = (void *)&req;
790 cmd_len = sizeof(req);
791 rsp_buf = NULL;
792 rsp_len = 0;
793
794 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
795 {
796 dprintf(CRITICAL, "Failed to call Cipher DIP SCM\n");
797 return -1;
798 }
799 }
800 else
801 {
802 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MDTP, SCM_MDTP_CIPHER_DIP);
803 scm_arg.x1 = MAKE_SCM_ARGS(0x5, SMC_PARAM_TYPE_BUFFER_READ, SMC_PARAM_TYPE_VALUE,
804 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE);
805 scm_arg.x2 = (uint32_t)req.in_buf;
806 scm_arg.x3 = req.in_buf_size;
807 scm_arg.x4 = (uint32_t)req.out_buf;
808 scm_arg.x5[0] = req.out_buf_size;
809 scm_arg.x5[1] = req.direction;
810
811 if (scm_call2(&scm_arg, &scm_ret))
812 {
813 dprintf(CRITICAL, "Failed in Cipher DIP SCM call\n");
814 return -1;
815 }
816 }
817
818 return 0;
819}
820
Deepa Dinamani193874e2012-02-07 14:00:04 -0800821/*
822 * Switches the CE1 channel between ADM and register usage.
823 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
824 * : AP_CE_ADM_USE, CE1 uses ADM interface
825 */
826uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
827{
828 uint32_t svc_id;
829 uint32_t cmd_id;
830 void *cmd_buf;
831 size_t cmd_len;
832 size_t resp_len = 0;
833 uint8_t resp_buf;
834
835 struct {
836 uint32_t resource;
837 uint32_t chn_id;
838 }__PACKED switch_ce_chn_buf;
839
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700840 if (scm_arm_support)
841 {
842 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
843 return 0;
844 }
845
Deepa Dinamani193874e2012-02-07 14:00:04 -0800846 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
847 switch_ce_chn_buf.chn_id = channel;
848 cmd_buf = (void *)&switch_ce_chn_buf;
849 cmd_len = sizeof(switch_ce_chn_buf);
850
851 /*response */
852 resp_len = sizeof(resp_buf);
853
854 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
855 cmd_id = SCM_CE_CHN_SWITCH_ID;
856
857 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
858 return resp_buf;
859}
860
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800861int scm_halt_pmic_arbiter()
862{
863 int ret = 0;
864
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700865 if (scm_arm_support)
866 {
867 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
868 return -1;
869 }
870
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800871 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
872
873 return ret;
874}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -0800875
876/* Execption Level exec secure-os call
877 * Jumps to kernel via secure-os and does not return
878 * on successful jump. System parameters are setup &
879 * passed on to secure-os and are utilized to boot the
880 * kernel.
881 *
882 @ kernel_entry : kernel entry point passed in as link register.
883 @ dtb_offset : dt blob address passed in as w0.
884 @ svc_id : indicates direction of switch 32->64 or 64->32
885 *
886 * Assumes all sanity checks have been performed on arguments.
887 */
888
889void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
890{
891 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
892 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
893 void *cmd_buf;
894 size_t cmd_len;
895 static el1_system_param param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700896 scmcall_arg scm_arg = {0};
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -0800897
898 param.el1_x0 = dtb_offset;
899 param.el1_elr = kernel_entry;
900
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -0800901 /* Response Buffer = Null as no response expected */
902 dprintf(INFO, "Jumping to kernel via monitor\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700903
904 if (!scm_arm_support)
905 {
906 /* Command Buffer */
907 cmd_buf = (void *)&param;
908 cmd_len = sizeof(el1_system_param);
909
910 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
911 }
912 else
913 {
914 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
915 scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800916 scm_arg.x2 = (uint32_t ) &param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700917 scm_arg.x3 = sizeof(el1_system_param);
918
919 scm_call2(&scm_arg, NULL);
920 }
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -0800921
922 /* Assert if execution ever reaches here */
923 dprintf(CRITICAL, "Failed to jump to kernel\n");
924 ASSERT(0);
925}
Maria Yubeeeeaf2014-06-30 13:05:43 +0800926
927/* SCM Random Command */
928int scm_random(uint32_t * rbuf, uint32_t r_len)
929{
930 int ret;
931 struct tz_prng_data data;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700932 scmcall_arg scm_arg = {0};
Maria Yubeeeeaf2014-06-30 13:05:43 +0800933
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700934 if (!scm_arm_support)
935 {
936 data.out_buf = (uint8_t*) rbuf;
937 data.out_buf_size = r_len;
Maria Yubeeeeaf2014-06-30 13:05:43 +0800938
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700939 /*
940 * random buffer must be flushed/invalidated before and after TZ call.
941 */
942 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
Maria Yubeeeeaf2014-06-30 13:05:43 +0800943
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700944 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
Maria Yubeeeeaf2014-06-30 13:05:43 +0800945
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700946 /* Invalidate the updated random buffer */
947 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
948 }
949 else
950 {
951 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
952 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800953 scm_arg.x2 = (uint32_t) rbuf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700954 scm_arg.x3 = r_len;
955
956 ret = scm_call2(&scm_arg, NULL);
957 if (!ret)
958 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
959 else
960 dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
961 }
Maria Yubeeeeaf2014-06-30 13:05:43 +0800962
963 return ret;
964}
965
966void * get_canary()
967{
968 void * canary;
vijay kumar4f4405f2014-08-08 11:49:53 +0530969 if(scm_random((uint32_t *)&canary, sizeof(canary))) {
Maria Yubeeeeaf2014-06-30 13:05:43 +0800970 dprintf(CRITICAL,"scm_call for random failed !!!");
971 /*
972 * fall back to use lib rand API if scm call failed.
973 */
974 canary = (void *)rand();
975 }
976
977 return canary;
978}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +0530979
980int scm_xpu_err_fatal_init()
981{
982 uint32_t ret = 0;
983 uint32_t response = 0;
984 tz_xpu_prot_cmd cmd;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700985 scmcall_arg scm_arg = {0};
986 scmcall_ret scm_ret = {0};
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +0530987
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700988 if (!scm_arm_support)
989 {
990 cmd.config = ERR_FATAL_ENABLE;
991 cmd.spare = 0;
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +0530992
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700993 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
994 sizeof(response));
995 }
996 else
997 {
998 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
999 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1000 scm_arg.x2 = ERR_FATAL_ENABLE;
1001 scm_arg.x3 = 0x0;
1002 ret = scm_call2(&scm_arg, &scm_ret);
1003 response = scm_ret.x1;
1004 }
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301005
1006 if (ret)
1007 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
1008 else
1009 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
1010
1011 return ret;
1012}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001013
1014static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
1015{
1016 register uint32_t r0 __asm__("r0") = x0;
1017 register uint32_t r1 __asm__("r1") = x1;
1018 register uint32_t r2 __asm__("r2") = x2;
1019 register uint32_t r3 __asm__("r3") = x3;
1020 register uint32_t r4 __asm__("r4") = x4;
1021 register uint32_t r5 __asm__("r5") = x5;
1022
1023 __asm__ volatile(
1024 __asmeq("%0", "r0")
1025 __asmeq("%1", "r1")
1026 __asmeq("%2", "r2")
1027 __asmeq("%3", "r3")
1028 __asmeq("%4", "r0")
1029 __asmeq("%5", "r1")
1030 __asmeq("%6", "r2")
1031 __asmeq("%7", "r3")
1032 __asmeq("%8", "r4")
1033 __asmeq("%9", "r5")
1034 "smc #0 @ switch to secure world\n"
1035 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
1036 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
1037
1038 if (ret)
1039 {
1040 ret->x1 = r1;
1041 ret->x2 = r2;
1042 ret->x3 = r3;
1043 }
1044
1045 return r0;
1046}
1047
1048uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
1049{
1050 uint32_t *indir_arg = NULL;
1051 uint32_t x5;
1052 int i;
1053 uint32_t rc;
1054
1055 arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
1056 x5 = arg->x5[0];
1057
Amit Blayfe23ee22015-01-09 19:09:51 +02001058 if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN - 1)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001059 {
1060 indir_arg = memalign(CACHE_LINE, (SCM_INDIR_MAX_LEN * sizeof(uint32_t)));
1061 ASSERT(indir_arg);
1062
1063 for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
1064 {
1065 indir_arg[i] = arg->x5[i];
1066 }
1067 arch_clean_invalidate_cache_range((addr_t) indir_arg, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
1068 x5 = (addr_t) indir_arg;
1069 }
1070
1071 rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
1072
1073 if (rc)
1074 {
1075 dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
1076 return rc;
1077 }
1078
1079 if (indir_arg)
1080 free(indir_arg);
1081
1082 return 0;
1083}