blob: 05b5b6c78e61e45c07b15d17ca49ec1b17377dc9 [file] [log] [blame]
lijuang395b5e62015-11-19 17:39:44 +08001/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
vijay kumar4f4405f2014-08-08 11:49:53 +053035#include <rand.h>
36#include <image_verify.h>
Aparna Mallavarapu664ea772015-02-24 18:44:33 +053037#include <dload_util.h>
38#include <platform/iomap.h>
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070039#include <board.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070040#include "scm.h"
41
42#pragma GCC optimize ("O0")
43
44/* From Linux Kernel asm/system.h */
45#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
46
47#ifndef offsetof
48# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
49#endif
50
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080051#define SCM_CLASS_REGISTER (0x2 << 8)
52#define SCM_MASK_IRQS BIT(5)
53#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
54 SCM_CLASS_REGISTER | \
55 SCM_MASK_IRQS | \
56 ((n) & 0xf))
57
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070058/* SCM interface as per ARM spec present? */
59bool scm_arm_support;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070060static bool scm_initialized;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070061
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080062bool is_scm_armv8_support()
63{
Channagoud Kadabi86e1e822015-11-02 11:32:34 -080064#if !NO_SCM_V8_SUPPORT
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070065 if (!scm_initialized)
66 {
67 scm_init();
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070068 }
Channagoud Kadabi86e1e822015-11-02 11:32:34 -080069#endif
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070070
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080071 return scm_arm_support;
72}
73
Channagoud Kadabi77f46a32015-08-05 16:13:13 -070074int is_scm_call_available(uint32_t svc_id, uint32_t cmd_id)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070075{
vijay kumar496a2ff2015-07-22 21:22:48 +053076 int ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070077 scmcall_arg scm_arg = {0};
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -080078 scmcall_ret scm_ret = {0};
Channagoud Kadabia2184b82015-07-07 10:09:32 -070079
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070080 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
81 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
82 scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
83
84 ret = scm_call2(&scm_arg, &scm_ret);
85
86 if (!ret)
Channagoud Kadabia2184b82015-07-07 10:09:32 -070087 return scm_ret.x1;
88
89 return ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070090}
91
Channagoud Kadabia2184b82015-07-07 10:09:32 -070092static int scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
93{
vijay kumar496a2ff2015-07-22 21:22:48 +053094 int ret;
Channagoud Kadabia2184b82015-07-07 10:09:32 -070095
96 ret = is_scm_call_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
97
98 if (ret > 0)
99 scm_arm_support = true;
100
101 return ret;
102}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700103
104void scm_init()
105{
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700106 int ret;
107
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700108 if (scm_initialized)
109 return;
110
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700111 ret = scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
112
vijay kumar496a2ff2015-07-22 21:22:48 +0530113 if (ret < 0)
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700114 dprintf(CRITICAL, "Failed to initialize SCM\n");
lijuang1cff8382016-01-11 17:56:54 +0800115
116 scm_initialized = true;
117
118#if DISABLE_DLOAD_MODE
119 scm_disable_sdi();
120#endif
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700121}
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800122
Shashank Mittal162244e2011-08-08 19:01:25 -0700123/**
124 * alloc_scm_command() - Allocate an SCM command
125 * @cmd_size: size of the command buffer
126 * @resp_size: size of the response buffer
127 *
128 * Allocate an SCM command, including enough room for the command
129 * and response headers as well as the command and response buffers.
130 *
131 * Returns a valid &scm_command on success or %NULL if the allocation fails.
132 */
133static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
134{
135 struct scm_command *cmd;
136 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -0800137 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -0700138
Deepa Dinamani904f8f82012-12-05 16:35:01 -0800139 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -0800140 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +0200141 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700142 cmd->len = len;
143 cmd->buf_offset = offsetof(struct scm_command, buf);
144 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
145 }
146 return cmd;
147}
148
149/**
150 * free_scm_command() - Free an SCM command
151 * @cmd: command to free
152 *
153 * Free an SCM command.
154 */
155static inline void free_scm_command(struct scm_command *cmd)
156{
157 free(cmd);
158}
159
160/**
161 * scm_command_to_response() - Get a pointer to a scm_response
162 * @cmd: command
163 *
164 * Returns a pointer to a response for a command.
165 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800166static inline struct scm_response *scm_command_to_response(const struct
167 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -0700168{
169 return (void *)cmd + cmd->resp_hdr_offset;
170}
171
172/**
173 * scm_get_command_buffer() - Get a pointer to a command buffer
174 * @cmd: command
175 *
176 * Returns a pointer to the command buffer of a command.
177 */
178static inline void *scm_get_command_buffer(const struct scm_command *cmd)
179{
180 return (void *)cmd->buf;
181}
182
183/**
184 * scm_get_response_buffer() - Get a pointer to a response buffer
185 * @rsp: response
186 *
187 * Returns a pointer to a response buffer of a response.
188 */
189static inline void *scm_get_response_buffer(const struct scm_response *rsp)
190{
191 return (void *)rsp + rsp->buf_offset;
192}
193
194static uint32_t smc(uint32_t cmd_addr)
195{
196 uint32_t context_id;
197 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800198 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700199 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800200 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700201 return r0;
202}
203
204/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800205* scm_call_automic: Make scm call with one or no argument
206* @svc: service id
207* @cmd: command id
208* @ arg1: argument
209*/
210
211static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
212{
213 uint32_t context_id;
214 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
vijay kumar4f4405f2014-08-08 11:49:53 +0530215 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800216 register uint32_t r2 __asm__("r2") = arg1;
217
218 __asm__ volatile(
219 __asmeq("%0", "r0")
220 __asmeq("%1", "r0")
221 __asmeq("%2", "r1")
222 __asmeq("%3", "r2")
223 "smc #0 @ switch to secure world\n"
224 : "=r" (r0)
225 : "r" (r0), "r" (r1), "r" (r2)
226 : "r3");
227 return r0;
228}
229
230/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530231 * scm_call_atomic2() - Send an atomic SCM command with two arguments
232 * @svc_id: service identifier
233 * @cmd_id: command identifier
234 * @arg1: first argument
235 * @arg2: second argument
236 *
237 * This shall only be used with commands that are guaranteed to be
238 * uninterruptable, atomic and SMP safe.
239 */
240int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
241{
242 int context_id;
243 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
vijay kumar4f4405f2014-08-08 11:49:53 +0530244 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530245 register uint32_t r2 __asm__("r2") = arg1;
246 register uint32_t r3 __asm__("r3") = arg2;
247
248 __asm__ volatile(
249 __asmeq("%0", "r0")
250 __asmeq("%1", "r0")
251 __asmeq("%2", "r1")
252 __asmeq("%3", "r2")
253 __asmeq("%4", "r3")
254 "smc #0 @ switch to secure world\n"
255 : "=r" (r0)
256 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
257 return r0;
258}
259
260/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700261 * scm_call() - Send an SCM command
262 * @svc_id: service identifier
263 * @cmd_id: command identifier
264 * @cmd_buf: command buffer
265 * @cmd_len: length of the command buffer
266 * @resp_buf: response buffer
267 * @resp_len: length of the response buffer
268 *
269 * Sends a command to the SCM and waits for the command to finish processing.
270 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800271int
272scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
273 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700274{
275 int ret;
276 struct scm_command *cmd;
277 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700278 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700279
280 cmd = alloc_scm_command(cmd_len, resp_len);
281 if (!cmd)
282 return ERR_NO_MEMORY;
283
284 cmd->id = (svc_id << 10) | cmd_id;
285 if (cmd_buf)
286 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
287
Neeti Desai127b9e02012-03-20 16:11:23 -0700288 /* Flush command to main memory for TZ */
289 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
290
Ajay Dudanib01e5062011-12-03 23:23:42 -0800291 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700292 if (ret)
293 goto out;
294
Ajay Dudanib01e5062011-12-03 23:23:42 -0800295 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700296 rsp = scm_command_to_response(cmd);
297
Neeti Desai127b9e02012-03-20 16:11:23 -0700298 do
299 {
300 /* Need to invalidate before each check since TZ will update
301 * the response complete flag in main memory.
302 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800303 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700304 } while (!rsp->is_complete);
305
306
307 resp_ptr = scm_get_response_buffer(rsp);
308
309 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800310 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700311
312 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700313 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700314 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800315 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700316 free_scm_command(cmd);
317 return ret;
318}
319
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800320int restore_secure_cfg(uint32_t id)
321{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700322 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800323 tz_secure_cfg secure_cfg;
324
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800325 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800326 secure_cfg.spare = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700327 scmcall_arg scm_arg = {0};
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800328
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700329 if(!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700330 {
331 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
332 NULL, 0);
333 }
334 else
335 {
336 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
337 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
338 scm_arg.x2 = id;
339 scm_arg.x3 = 0x0; /* Spare unused */
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800340
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700341 ret = scm_call2(&scm_arg, NULL);
342 }
343
344 if (ret)
345 {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800346 dprintf(CRITICAL, "Secure Config failed\n");
347 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700348 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800349
350 return ret;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800351}
352
Neeti Desai127b9e02012-03-20 16:11:23 -0700353/* SCM Encrypt Command */
354int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700355{
Neeti Desai127b9e02012-03-20 16:11:23 -0700356 int ret;
357 img_req cmd;
vijay kumare95092d2014-10-20 19:24:49 +0530358 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700359
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700360
Neeti Desai127b9e02012-03-20 16:11:23 -0700361 cmd.img_ptr = (uint32*) img_ptr;
362 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700363
Neeti Desai127b9e02012-03-20 16:11:23 -0700364 /* Image data is operated upon by TZ, which accesses only the main memory.
365 * It must be flushed/invalidated before and after TZ call.
366 */
367 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700368
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700369 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530370 {
371 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
372 }
373 else
374 {
375 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD,SSD_ENCRYPT_ID);
vijay kumar83b50d62015-01-09 19:09:59 +0530376 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800377 scm_arg.x2 = (uint32_t) cmd.img_ptr;
378 scm_arg.x3 = (uint32_t) cmd.img_len_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530379
380 ret = scm_call2(&scm_arg, NULL);
381 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700382
Neeti Desai127b9e02012-03-20 16:11:23 -0700383 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
384 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800385 */
386 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700387 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800388
Neeti Desai127b9e02012-03-20 16:11:23 -0700389 /* Invalidate the updated image data */
390 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800391
Shashank Mittal162244e2011-08-08 19:01:25 -0700392 return ret;
393}
394
Neeti Desai127b9e02012-03-20 16:11:23 -0700395/* SCM Decrypt Command */
396int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
397{
398 int ret;
399 img_req cmd;
400
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700401 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700402 {
403 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
404 return -1;
405 }
406
Neeti Desai127b9e02012-03-20 16:11:23 -0700407 cmd.img_ptr = (uint32*) img_ptr;
408 cmd.img_len_ptr = img_len_ptr;
409
410 /* Image data is operated upon by TZ, which accesses only the main memory.
411 * It must be flushed/invalidated before and after TZ call.
412 */
413 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
414
415 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
416
417 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
418 * before we use them.
419 */
420 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
421 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
422
423 /* Invalidate the updated image data */
424 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
425
426 return ret;
427}
428
429
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800430static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
431{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700432 int ret = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800433 ssd_parse_md_req parse_req;
434 ssd_parse_md_rsp parse_rsp;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700435 int prev_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530436 scmcall_arg scm_arg = {0};
437 scmcall_ret scm_ret = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700438 /* Populate meta-data ptr. Here md_len is the meta-data length.
439 * The Code below follows a growing length approach. First send
440 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
441 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
442 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
443 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
444
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800445 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700446 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800447
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700448 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800449
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700450 do
451 {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700452 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530453 {
454 ret = scm_call(SCM_SVC_SSD,
455 SSD_PARSE_MD_ID,
456 &parse_req,
457 sizeof(parse_req),
458 &parse_rsp,
459 sizeof(parse_rsp));
460 }
461 else
462 {
463 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PARSE_MD_ID);
464 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
465 scm_arg.x2 = parse_req.md_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800466 scm_arg.x3 = (uint32_t) parse_req.md;
Aparna Mallavarapud83990a2014-12-24 12:54:35 +0530467 scm_arg.atomic = true;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700468
vijay kumare95092d2014-10-20 19:24:49 +0530469 ret = scm_call2(&scm_arg, &scm_ret);
470 parse_rsp.status = scm_ret.x1;
471 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700472 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
473 {
474 prev_len = parse_req.md_len;
475
476 parse_req.md_len *= MULTIPLICATION_FACTOR;
477
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530478 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700479 (parse_req.md_len - prev_len) );
480
481 continue;
482 }
483 else
484 break;
485
486 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800487
488 if(!ret)
489 {
490 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
491 {
492 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700493 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800494 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800495 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700496
497 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800498 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700499 else
500 {
501 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
502
503 ASSERT(ret == 0);
504 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800505
506 return ret;
507}
508
509int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
510{
511 int ret = 0;
512 uint32 ctx_id = 0;
513 ssd_decrypt_img_frag_req decrypt_req;
514 ssd_decrypt_img_frag_rsp decrypt_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530515 scmcall_arg scm_arg = {0};
516 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800517
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700518
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700519 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
520 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700521 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700522 case SSD_PMD_ENCRYPTED:
523 /* Image data is operated upon by TZ, which accesses only the main memory.
524 * It must be flushed/invalidated before and after TZ call.
525 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800526
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700527 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800528
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700529 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800530
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700531 decrypt_req.md_ctx_id = ctx_id;
532 decrypt_req.last_frag = 1;
533 decrypt_req.frag_len = *img_len_ptr;
534 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800535
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700536 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530537 {
538 ret = scm_call(SCM_SVC_SSD,
539 SSD_DECRYPT_IMG_FRAG_ID,
540 &decrypt_req,
541 sizeof(decrypt_req),
542 &decrypt_rsp,
543 sizeof(decrypt_rsp));
544 }
545 else
546 {
547 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_DECRYPT_IMG_FRAG_ID);
548 scm_arg.x1 = MAKE_SCM_ARGS(0x4,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
549 scm_arg.x2 = decrypt_req.md_ctx_id;
550 scm_arg.x3 = decrypt_req.last_frag;
551 scm_arg.x4 = decrypt_req.frag_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800552 scm_arg.x5[0] = (uint32_t) decrypt_req.frag;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800553
vijay kumare95092d2014-10-20 19:24:49 +0530554 ret = scm_call2(&scm_arg, &scm_ret);
555 decrypt_rsp.status = scm_ret.x1;
556 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700557 if(!ret){
558 ret = decrypt_rsp.status;
559 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700560
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700561 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
562 * before we use them.
563 */
564 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
565 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800566
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700567 /* Invalidate the updated image data */
568 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700569
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700570 break;
571
572 case SSD_PMD_NOT_ENCRYPTED:
573 case SSD_PMD_NO_MD_FOUND:
574 ret = 0;
575 break;
576
577 case SSD_PMD_BUSY:
578 case SSD_PMD_BAD_MD_PTR_OR_LEN:
579 case SSD_PMD_PARSING_INCOMPLETE:
580 case SSD_PMD_PARSING_FAILED:
581 case SSD_PMD_SETUP_CIPHER_FAILED:
582 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
583 break;
584
585 default:
586 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
587 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700588 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800589 return ret;
590}
591
592int scm_svc_version(uint32 * major, uint32 * minor)
593{
594 feature_version_req feature_req;
595 feature_version_rsp feature_rsp;
596 int ret = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700597 scmcall_arg scm_arg = {0};
598 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800599
600 feature_req.feature_id = TZBSP_FVER_SSD;
601
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700602 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700603 {
604 ret = scm_call(TZBSP_SVC_INFO,
605 TZ_INFO_GET_FEATURE_ID,
606 &feature_req,
607 sizeof(feature_req),
608 &feature_rsp,
609 sizeof(feature_rsp));
610 }
611 else
612 {
613 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
vijay kumare95092d2014-10-20 19:24:49 +0530614 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700615 scm_arg.x2 = feature_req.feature_id;
616
617 ret = scm_call2(&scm_arg, &scm_ret);
618 feature_rsp.version = scm_ret.x1;
619 }
620
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800621 if(!ret)
622 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
623
624 return ret;
625}
626
Amit Blaybdfabc62015-01-29 22:04:13 +0200627int scm_svc_get_secure_state(uint32_t *state_low, uint32_t *state_high)
628{
629 get_secure_state_req req;
630 get_secure_state_rsp rsp;
631
632 int ret = 0;
633
634 scmcall_arg scm_arg = {0};
635 scmcall_ret scm_ret = {0};
636
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700637 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200638 {
639 req.status_ptr = (uint32_t*)&rsp;
640 req.status_len = sizeof(rsp);
641
642 ret = scm_call(TZBSP_SVC_INFO,
643 TZ_INFO_GET_SECURE_STATE,
644 &req,
645 sizeof(req),
646 NULL,
647 0);
648 }
649 else
650 {
651 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_SECURE_STATE);
652 scm_arg.x1 = MAKE_SCM_ARGS(0x0);
653
654 ret = scm_call2(&scm_arg, &scm_ret);
655
656 rsp.status_low = scm_ret.x1;
657 rsp.status_high = scm_ret.x2;
658 }
659
660 if(!ret)
661 {
662 *state_low = rsp.status_low;
663 *state_high = rsp.status_high;
664 }
665
666 return ret;
667}
668
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800669int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
670{
671 int ret=0;
672 ssd_protect_keystore_req protect_req;
673 ssd_protect_keystore_rsp protect_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530674 scmcall_arg scm_arg = {0};
675 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700676
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800677 protect_req.keystore_ptr = img_ptr;
678 protect_req.keystore_len = img_len;
679
680 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
681
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700682 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530683 {
684 ret = scm_call(SCM_SVC_SSD,
685 SSD_PROTECT_KEYSTORE_ID,
686 &protect_req,
687 sizeof(protect_req),
688 &protect_rsp,
689 sizeof(protect_rsp));
690 }
691 else
692 {
693 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PROTECT_KEYSTORE_ID);
694 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800695 scm_arg.x2 = (uint32_t) protect_req.keystore_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530696 scm_arg.x3 = protect_req.keystore_len;
697
698 ret = scm_call2(&scm_arg, &scm_ret);
699 protect_rsp.status = scm_ret.x1;
700 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800701 if(!ret)
702 {
703 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
704 dprintf(INFO,"Successfully loaded the keystore ");
705 else
706 {
707 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
708 ret = protect_rsp.status;
709 }
710 }
711 else
712 dprintf(INFO,"scm_call failed ");
713
714 return ret;
715}
716
Shashank Mittal162244e2011-08-08 19:01:25 -0700717void set_tamper_fuse_cmd()
718{
719 uint32_t svc_id;
720 uint32_t cmd_id;
721 void *cmd_buf;
722 size_t cmd_len;
723 void *resp_buf = NULL;
724 size_t resp_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530725 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700726
727 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
728 cmd_buf = (void *)&fuse_id;
729 cmd_len = sizeof(fuse_id);
730
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700731 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700732 {
vijay kumare95092d2014-10-20 19:24:49 +0530733 /*no response */
734 resp_buf = NULL;
735 resp_len = 0;
736
737 svc_id = SCM_SVC_FUSE;
738 cmd_id = SCM_BLOW_SW_FUSE_ID;
739
740 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
741 }
742 else
743 {
744 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID);
745 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800746 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530747 scm_arg.x3 = cmd_len;
748
749 scm_call2(&scm_arg, NULL);
750
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700751 }
752
Shashank Mittal162244e2011-08-08 19:01:25 -0700753}
754
755uint8_t get_tamper_fuse_cmd()
756{
757 uint32_t svc_id;
758 uint32_t cmd_id;
759 void *cmd_buf;
760 size_t cmd_len;
761 size_t resp_len = 0;
762 uint8_t resp_buf;
763
764 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
vijay kumare95092d2014-10-20 19:24:49 +0530765 scmcall_arg scm_arg = {0};
766 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700767
Shashank Mittal162244e2011-08-08 19:01:25 -0700768 cmd_buf = (void *)&fuse_id;
769 cmd_len = sizeof(fuse_id);
770
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700771 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530772 {
773 /*response */
774 resp_len = sizeof(resp_buf);
Shashank Mittal162244e2011-08-08 19:01:25 -0700775
vijay kumare95092d2014-10-20 19:24:49 +0530776 svc_id = SCM_SVC_FUSE;
777 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
Shashank Mittal162244e2011-08-08 19:01:25 -0700778
vijay kumare95092d2014-10-20 19:24:49 +0530779 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
780 return resp_buf;
781 }
782 else
783 {
784 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID);
785 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800786 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530787 scm_arg.x3 = cmd_len;
788
789 scm_call2(&scm_arg, &scm_ret);
790 return (uint8_t)scm_ret.x1;
791 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700792}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800793
Amir Samuelov4620ad22013-03-13 11:30:05 +0200794/*
795 * struct qseecom_save_partition_hash_req
796 * @partition_id - partition id.
797 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
798 */
799struct qseecom_save_partition_hash_req {
800 uint32_t partition_id; /* in */
801 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
802};
803
804
805void save_kernel_hash_cmd(void *digest)
806{
807 uint32_t svc_id;
808 uint32_t cmd_id;
809 void *cmd_buf;
810 size_t cmd_len;
811 void *resp_buf = NULL;
812 size_t resp_len = 0;
813 struct qseecom_save_partition_hash_req req;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700814 scmcall_arg scm_arg = {0};
Amir Samuelov4620ad22013-03-13 11:30:05 +0200815
816 /*no response */
817 resp_buf = NULL;
818 resp_len = 0;
819
820 req.partition_id = 0; /* kernel */
821 memcpy(req.digest, digest, sizeof(req.digest));
822
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700823 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700824 {
825 svc_id = SCM_SVC_ES;
826 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
827 cmd_buf = (void *)&req;
828 cmd_len = sizeof(req);
Amir Samuelov4620ad22013-03-13 11:30:05 +0200829
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700830 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
831 }
832 else
833 {
834 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
835 scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
836 scm_arg.x2 = req.partition_id;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800837 scm_arg.x3 = (uint32_t) &req.digest;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700838 scm_arg.x4 = sizeof(req.digest);
839
840 if (scm_call2(&scm_arg, NULL))
841 dprintf(CRITICAL, "Failed to Save kernel hash\n");
842 }
Amir Samuelov4620ad22013-03-13 11:30:05 +0200843}
844
Amit Blayfe23ee22015-01-09 19:09:51 +0200845int mdtp_cipher_dip_cmd(uint8_t *in_buf, uint32_t in_buf_size, uint8_t *out_buf,
846 uint32_t out_buf_size, uint32_t direction)
847{
848 uint32_t svc_id;
849 uint32_t cmd_id;
850 void *cmd_buf;
851 void *rsp_buf;
852 size_t cmd_len;
853 size_t rsp_len;
854 mdtp_cipher_dip_req req;
855 scmcall_arg scm_arg = {0};
856 scmcall_ret scm_ret = {0};
857
858 ASSERT(in_buf != NULL);
859 ASSERT(out_buf != NULL);
860
861 req.in_buf = in_buf;
862 req.in_buf_size = in_buf_size;
863 req.out_buf = out_buf;
864 req.out_buf_size = out_buf_size;
865 req.direction = direction;
866
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700867 if (!is_scm_armv8_support())
Amit Blayfe23ee22015-01-09 19:09:51 +0200868 {
869 svc_id = SCM_SVC_MDTP;
870 cmd_id = SCM_MDTP_CIPHER_DIP;
871 cmd_buf = (void *)&req;
872 cmd_len = sizeof(req);
873 rsp_buf = NULL;
874 rsp_len = 0;
875
876 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
877 {
878 dprintf(CRITICAL, "Failed to call Cipher DIP SCM\n");
879 return -1;
880 }
881 }
882 else
883 {
884 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MDTP, SCM_MDTP_CIPHER_DIP);
885 scm_arg.x1 = MAKE_SCM_ARGS(0x5, SMC_PARAM_TYPE_BUFFER_READ, SMC_PARAM_TYPE_VALUE,
886 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE);
887 scm_arg.x2 = (uint32_t)req.in_buf;
888 scm_arg.x3 = req.in_buf_size;
889 scm_arg.x4 = (uint32_t)req.out_buf;
890 scm_arg.x5[0] = req.out_buf_size;
891 scm_arg.x5[1] = req.direction;
892
893 if (scm_call2(&scm_arg, &scm_ret))
894 {
895 dprintf(CRITICAL, "Failed in Cipher DIP SCM call\n");
896 return -1;
897 }
898 }
899
900 return 0;
901}
902
Amit Blaybdfabc62015-01-29 22:04:13 +0200903int qfprom_read_row_cmd(uint32_t row_address,
904 uint32_t addr_type,
905 uint32_t *row_data,
906 uint32_t *qfprom_api_status)
907{
908 uint32_t svc_id;
909 uint32_t cmd_id;
910 void *cmd_buf;
911 void *rsp_buf;
912 size_t cmd_len;
913 size_t rsp_len;
914 qfprom_read_row_req req;
915 scmcall_arg scm_arg = {0};
916 scmcall_ret scm_ret = {0};
917
918 req.row_address = row_address;
919 req.addr_type = addr_type;
920 req.row_data = row_data;
921 req.qfprom_api_status = qfprom_api_status;
922
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700923 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200924 {
925 svc_id = SCM_SVC_FUSE;
926 cmd_id = SCM_QFPROM_READ_ROW_ID;
927 cmd_buf = (void *)&req;
928 cmd_len = sizeof(req);
929 rsp_buf = NULL;
930 rsp_len = 0;
931
932 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
933 {
934 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
935 return -1;
936 }
937 }
938 else
939 {
940 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_QFPROM_READ_ROW_ID);
941 scm_arg.x1 = MAKE_SCM_ARGS(0x4, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE,
942 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_BUFFER_READWRITE);
943 scm_arg.x2 = req.row_address;
944 scm_arg.x3 = req.addr_type;
945 scm_arg.x4 = (uint32_t)req.row_data;
946 scm_arg.x5[0] = (uint32_t)req.qfprom_api_status;
947
948 if (scm_call2(&scm_arg, &scm_ret))
949 {
950 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
951 return -1;
952 }
953 }
954
955 return 0;
956}
957
Deepa Dinamani193874e2012-02-07 14:00:04 -0800958/*
959 * Switches the CE1 channel between ADM and register usage.
960 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
961 * : AP_CE_ADM_USE, CE1 uses ADM interface
962 */
963uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
964{
965 uint32_t svc_id;
966 uint32_t cmd_id;
967 void *cmd_buf;
968 size_t cmd_len;
969 size_t resp_len = 0;
970 uint8_t resp_buf;
971
972 struct {
973 uint32_t resource;
974 uint32_t chn_id;
975 }__PACKED switch_ce_chn_buf;
976
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700977 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700978 {
979 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
980 return 0;
981 }
982
Deepa Dinamani193874e2012-02-07 14:00:04 -0800983 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
984 switch_ce_chn_buf.chn_id = channel;
985 cmd_buf = (void *)&switch_ce_chn_buf;
986 cmd_len = sizeof(switch_ce_chn_buf);
987
988 /*response */
989 resp_len = sizeof(resp_buf);
990
991 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
992 cmd_id = SCM_CE_CHN_SWITCH_ID;
993
994 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
995 return resp_buf;
996}
997
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800998int scm_halt_pmic_arbiter()
999{
1000 int ret = 0;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301001 scmcall_arg scm_arg = {0};
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001002
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001003 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301004 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER);
1005 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1006 scm_arg.x2 = 0;
1007 scm_arg.atomic = true;
1008 ret = scm_call2(&scm_arg, NULL);
1009 } else {
1010 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001011 }
1012
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301013 /* Retry with the SCM_IO_DISABLE_PMIC_ARBITER1 func ID if the above Func ID fails*/
1014 if(ret) {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001015 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301016 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1);
1017 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1018 scm_arg.x2 = 0;
1019 scm_arg.atomic = true;
1020 ret = scm_call2(&scm_arg, NULL);
1021 } else
1022 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1, 0);
1023 }
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001024
1025 return ret;
1026}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001027
1028/* Execption Level exec secure-os call
1029 * Jumps to kernel via secure-os and does not return
1030 * on successful jump. System parameters are setup &
1031 * passed on to secure-os and are utilized to boot the
1032 * kernel.
1033 *
1034 @ kernel_entry : kernel entry point passed in as link register.
1035 @ dtb_offset : dt blob address passed in as w0.
1036 @ svc_id : indicates direction of switch 32->64 or 64->32
1037 *
1038 * Assumes all sanity checks have been performed on arguments.
1039 */
1040
1041void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
1042{
1043 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
1044 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
1045 void *cmd_buf;
1046 size_t cmd_len;
Sridhar Parasuramfc6ea712015-06-30 11:22:49 -07001047 static el1_system_param param __attribute__((aligned(0x1000)));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001048 scmcall_arg scm_arg = {0};
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001049
1050 param.el1_x0 = dtb_offset;
1051 param.el1_elr = kernel_entry;
1052
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001053 /* Response Buffer = Null as no response expected */
1054 dprintf(INFO, "Jumping to kernel via monitor\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001055
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001056 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001057 {
1058 /* Command Buffer */
1059 cmd_buf = (void *)&param;
1060 cmd_len = sizeof(el1_system_param);
1061
1062 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
1063 }
1064 else
1065 {
1066 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
1067 scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -08001068 scm_arg.x2 = (uint32_t ) &param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001069 scm_arg.x3 = sizeof(el1_system_param);
1070
1071 scm_call2(&scm_arg, NULL);
1072 }
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001073
1074 /* Assert if execution ever reaches here */
1075 dprintf(CRITICAL, "Failed to jump to kernel\n");
1076 ASSERT(0);
1077}
Maria Yubeeeeaf2014-06-30 13:05:43 +08001078
1079/* SCM Random Command */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001080int scm_random(uintptr_t * rbuf, uint32_t r_len)
Maria Yubeeeeaf2014-06-30 13:05:43 +08001081{
1082 int ret;
1083 struct tz_prng_data data;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001084 scmcall_arg scm_arg = {0};
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001085 // Memory passed to TZ should be algined to cache line
1086 BUF_DMA_ALIGN(rand_buf, sizeof(uintptr_t));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001087
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001088 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001089 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001090 data.out_buf = (uint8_t*) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001091 data.out_buf_size = r_len;
Maria Yubeeeeaf2014-06-30 13:05:43 +08001092
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001093 /*
1094 * random buffer must be flushed/invalidated before and after TZ call.
1095 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001096 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001097
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001098 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001099
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001100 /* Invalidate the updated random buffer */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001101 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001102 }
1103 else
1104 {
1105 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
1106 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001107 scm_arg.x2 = (uint32_t) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001108 scm_arg.x3 = r_len;
1109
1110 ret = scm_call2(&scm_arg, NULL);
1111 if (!ret)
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001112 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001113 else
1114 dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
1115 }
Maria Yubeeeeaf2014-06-30 13:05:43 +08001116
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001117 //Copy back into the return buffer
1118 *rbuf = *rand_buf;
Maria Yubeeeeaf2014-06-30 13:05:43 +08001119 return ret;
1120}
1121
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001122uintptr_t get_canary()
Maria Yubeeeeaf2014-06-30 13:05:43 +08001123{
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001124 uintptr_t canary;
1125 if(scm_random(&canary, sizeof(canary))) {
Maria Yubeeeeaf2014-06-30 13:05:43 +08001126 dprintf(CRITICAL,"scm_call for random failed !!!");
1127 /*
1128 * fall back to use lib rand API if scm call failed.
1129 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001130 canary = rand();
Maria Yubeeeeaf2014-06-30 13:05:43 +08001131 }
1132
1133 return canary;
1134}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301135
1136int scm_xpu_err_fatal_init()
1137{
1138 uint32_t ret = 0;
1139 uint32_t response = 0;
1140 tz_xpu_prot_cmd cmd;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001141 scmcall_arg scm_arg = {0};
1142 scmcall_ret scm_ret = {0};
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301143
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001144 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001145 {
1146 cmd.config = ERR_FATAL_ENABLE;
1147 cmd.spare = 0;
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301148
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001149 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
1150 sizeof(response));
1151 }
1152 else
1153 {
1154 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
1155 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1156 scm_arg.x2 = ERR_FATAL_ENABLE;
1157 scm_arg.x3 = 0x0;
1158 ret = scm_call2(&scm_arg, &scm_ret);
1159 response = scm_ret.x1;
1160 }
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301161
1162 if (ret)
1163 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
1164 else
1165 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
1166
1167 return ret;
1168}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001169
1170static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
1171{
1172 register uint32_t r0 __asm__("r0") = x0;
1173 register uint32_t r1 __asm__("r1") = x1;
1174 register uint32_t r2 __asm__("r2") = x2;
1175 register uint32_t r3 __asm__("r3") = x3;
1176 register uint32_t r4 __asm__("r4") = x4;
1177 register uint32_t r5 __asm__("r5") = x5;
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001178 register uint32_t r6 __asm__("r6") = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001179
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001180 do {
1181 __asm__ volatile(
1182 __asmeq("%0", "r0")
1183 __asmeq("%1", "r1")
1184 __asmeq("%2", "r2")
1185 __asmeq("%3", "r3")
1186 __asmeq("%4", "r0")
1187 __asmeq("%5", "r1")
1188 __asmeq("%6", "r2")
1189 __asmeq("%7", "r3")
1190 __asmeq("%8", "r4")
1191 __asmeq("%9", "r5")
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001192 __asmeq("%10", "r6")
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001193 "smc #0 @ switch to secure world\n"
1194 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001195 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6));
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001196 } while(r0 == 1);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001197
1198 if (ret)
1199 {
1200 ret->x1 = r1;
1201 ret->x2 = r2;
1202 ret->x3 = r3;
1203 }
1204
1205 return r0;
1206}
1207
1208uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
1209{
1210 uint32_t *indir_arg = NULL;
1211 uint32_t x5;
1212 int i;
1213 uint32_t rc;
1214
1215 arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
1216 x5 = arg->x5[0];
1217
Amit Blayfe23ee22015-01-09 19:09:51 +02001218 if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN - 1)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001219 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001220 indir_arg = memalign(CACHE_LINE, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001221 ASSERT(indir_arg);
1222
1223 for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
1224 {
1225 indir_arg[i] = arg->x5[i];
1226 }
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001227 arch_clean_invalidate_cache_range((addr_t) indir_arg, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001228 x5 = (addr_t) indir_arg;
1229 }
1230
1231 rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
1232
1233 if (rc)
1234 {
1235 dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
1236 return rc;
1237 }
1238
1239 if (indir_arg)
1240 free(indir_arg);
1241
1242 return 0;
1243}
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301244
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301245static bool secure_boot_enabled = true;
1246static bool wdog_debug_fuse_disabled = true;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301247
1248void scm_check_boot_fuses()
1249{
1250 uint32_t ret = 0;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301251 uint32_t resp;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301252 scmcall_arg scm_arg = {0};
1253 scmcall_ret scm_ret = {0};
1254
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001255 if (!is_scm_armv8_support()) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301256 ret = scm_call(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED, NULL, 0, &resp, sizeof(resp));
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301257 } else {
1258 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED);
1259 ret = scm_call2(&scm_arg, &scm_ret);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301260 resp = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301261 }
1262
1263 /* Parse Bit 0 and Bit 2 of the response */
1264 if(!ret) {
1265 /* Bit 0 - SECBOOT_ENABLE_CHECK */
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301266 if(resp & 0x1)
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301267 secure_boot_enabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301268 /* Bit 2 - DEBUG_DISABLE_CHECK */
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301269 if(resp & 0x4)
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301270 wdog_debug_fuse_disabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301271 } else
1272 dprintf(CRITICAL, "scm call to check secure boot fuses failed\n");
1273}
1274
1275bool is_secure_boot_enable()
1276{
1277 scm_check_boot_fuses();
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301278 return secure_boot_enabled;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301279}
1280
1281static uint32_t scm_io_read(addr_t address)
1282{
1283 uint32_t ret;
1284 scmcall_arg scm_arg = {0};
1285 scmcall_ret scm_ret = {0};
1286
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001287 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301288 ret = scm_call_atomic(SCM_SVC_IO, SCM_IO_READ, address);
1289 } else {
1290 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_READ);
1291 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1292 scm_arg.x2 = address;
1293 scm_arg.atomic = true;
1294 ret = scm_call2(&scm_arg, &scm_ret);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001295 /* Return the value read if the call is successful */
1296 if (!ret)
1297 ret = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301298 }
1299 return ret;
1300}
1301
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301302uint32_t scm_io_write(uint32_t address, uint32_t val)
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301303{
1304 uint32_t ret;
1305 scmcall_arg scm_arg = {0};
1306 scmcall_ret scm_ret = {0};
1307
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001308 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301309 ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
1310 } else {
1311 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_WRITE);
1312 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1313 scm_arg.x2 = address;
1314 scm_arg.x3 = val;
1315 scm_arg.atomic = true;
1316 ret = scm_call2(&scm_arg, &scm_ret);
1317 }
1318 return ret;
1319}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301320
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301321int scm_call2_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301322{
1323 uint32_t ret = 0;
1324 scmcall_arg scm_arg = {0};
1325 scmcall_ret scm_ret = {0};
1326
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001327 if (!is_scm_armv8_support())
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301328 {
1329 ret = scm_call_atomic2(svc, cmd, arg1, arg2);
1330 } else {
1331 scm_arg.x0 = MAKE_SIP_SCM_CMD(svc, cmd);
1332 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1333 scm_arg.x2 = arg1;
1334 scm_arg.x3 = arg2;
1335 ret = scm_call2(&scm_arg, &scm_ret);
1336 }
1337 return ret;
1338}
1339
lijuang1cff8382016-01-11 17:56:54 +08001340int scm_disable_sdi()
1341{
1342 int ret = 0;
1343
1344 scm_check_boot_fuses();
1345
1346 /* Make WDOG_DEBUG DISABLE scm call only in non-secure boot */
1347 if(!(secure_boot_enabled || wdog_debug_fuse_disabled)) {
1348 ret = scm_call2_atomic(SCM_SVC_BOOT, WDOG_DEBUG_DISABLE, 1, 0);
1349 if(ret)
1350 dprintf(CRITICAL, "Failed to disable secure wdog debug: %d\n", ret);
1351 }
1352 return ret;
1353}
1354
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301355#if PLATFORM_USE_SCM_DLOAD
lijuang395b5e62015-11-19 17:39:44 +08001356int scm_dload_mode(enum reboot_reason mode)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301357{
1358 int ret = 0;
1359 uint32_t dload_type;
1360
1361 dprintf(SPEW, "DLOAD mode: %d\n", mode);
lijuang1cff8382016-01-11 17:56:54 +08001362 if (mode == NORMAL_DLOAD) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301363 dload_type = SCM_DLOAD_MODE;
lijuang1cff8382016-01-11 17:56:54 +08001364#if DISABLE_DLOAD_MODE
1365 return 0;
1366#endif
1367 } else if(mode == EMERGENCY_DLOAD)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301368 dload_type = SCM_EDLOAD_MODE;
1369 else
1370 dload_type = 0;
1371
1372 /* Write to the Boot MISC register */
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001373 ret = is_scm_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301374
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001375 if (ret > 0)
1376 ret = scm_call2_atomic(SCM_SVC_BOOT, SCM_DLOAD_CMD, dload_type, 0);
1377 else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301378 ret = scm_io_write(TCSR_BOOT_MISC_DETECT,dload_type);
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001379
1380 if(ret) {
1381 dprintf(CRITICAL, "Failed to write to boot misc: %d\n", ret);
1382 return ret;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301383 }
1384
lijuang1cff8382016-01-11 17:56:54 +08001385#if !DISABLE_DLOAD_MODE
1386 return scm_disable_sdi();
1387#else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301388 return ret;
lijuang1cff8382016-01-11 17:56:54 +08001389#endif
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301390}
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001391
1392bool scm_device_enter_dload()
1393{
1394 uint32_t ret = 0;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001395 uint32_t dload_mode = 0;
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001396
1397 scmcall_arg scm_arg = {0};
1398 scmcall_ret scm_ret = {0};
1399
1400 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_DLOAD_MODE, SCM_DLOAD_CMD);
1401 ret = scm_call2(&scm_arg, &scm_ret);
1402 if (ret)
1403 dprintf(CRITICAL, "SCM call to check dload mode failed: %x\n", ret);
1404
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001405 if (!ret)
1406 {
1407 dload_mode = scm_io_read(TCSR_BOOT_MISC_DETECT);
1408 if (board_soc_version() < 0x30000)
1409 dload_mode = (dload_mode >> 16) & 0xFFFF;
1410 }
1411
1412 if (dload_mode == SCM_DLOAD_MODE)
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001413 return true;
1414
1415 return false;
1416}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301417#endif