blob: 58cfc25d39ca15327578f4a3e0851e0c63d1befb [file] [log] [blame]
Amit Blayfe23ee22015-01-09 19:09:51 +02001/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
vijay kumar4f4405f2014-08-08 11:49:53 +053035#include <rand.h>
36#include <image_verify.h>
Aparna Mallavarapu664ea772015-02-24 18:44:33 +053037#include <dload_util.h>
38#include <platform/iomap.h>
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070039#include <board.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070040#include "scm.h"
41
42#pragma GCC optimize ("O0")
43
44/* From Linux Kernel asm/system.h */
45#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
46
47#ifndef offsetof
48# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
49#endif
50
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080051#define SCM_CLASS_REGISTER (0x2 << 8)
52#define SCM_MASK_IRQS BIT(5)
53#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
54 SCM_CLASS_REGISTER | \
55 SCM_MASK_IRQS | \
56 ((n) & 0xf))
57
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070058/* SCM interface as per ARM spec present? */
59bool scm_arm_support;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070060static bool scm_initialized;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070061
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080062bool is_scm_armv8_support()
63{
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070064 if (!scm_initialized)
65 {
66 scm_init();
67 scm_initialized = true;
68 }
69
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080070 return scm_arm_support;
71}
72
Channagoud Kadabi77f46a32015-08-05 16:13:13 -070073int is_scm_call_available(uint32_t svc_id, uint32_t cmd_id)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070074{
vijay kumar496a2ff2015-07-22 21:22:48 +053075 int ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070076 scmcall_arg scm_arg = {0};
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -080077 scmcall_ret scm_ret = {0};
Channagoud Kadabia2184b82015-07-07 10:09:32 -070078
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070079 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
80 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
81 scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
82
83 ret = scm_call2(&scm_arg, &scm_ret);
84
85 if (!ret)
Channagoud Kadabia2184b82015-07-07 10:09:32 -070086 return scm_ret.x1;
87
88 return ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070089}
90
Channagoud Kadabia2184b82015-07-07 10:09:32 -070091static int scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
92{
vijay kumar496a2ff2015-07-22 21:22:48 +053093 int ret;
Channagoud Kadabia2184b82015-07-07 10:09:32 -070094
95 ret = is_scm_call_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
96
97 if (ret > 0)
98 scm_arm_support = true;
99
100 return ret;
101}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700102
103void scm_init()
104{
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700105 int ret;
106
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700107 if (scm_initialized)
108 return;
109
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700110 ret = scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
111
vijay kumar496a2ff2015-07-22 21:22:48 +0530112 if (ret < 0)
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700113 dprintf(CRITICAL, "Failed to initialize SCM\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700114}
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800115
Shashank Mittal162244e2011-08-08 19:01:25 -0700116/**
117 * alloc_scm_command() - Allocate an SCM command
118 * @cmd_size: size of the command buffer
119 * @resp_size: size of the response buffer
120 *
121 * Allocate an SCM command, including enough room for the command
122 * and response headers as well as the command and response buffers.
123 *
124 * Returns a valid &scm_command on success or %NULL if the allocation fails.
125 */
126static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
127{
128 struct scm_command *cmd;
129 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -0800130 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -0700131
Deepa Dinamani904f8f82012-12-05 16:35:01 -0800132 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -0800133 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +0200134 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700135 cmd->len = len;
136 cmd->buf_offset = offsetof(struct scm_command, buf);
137 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
138 }
139 return cmd;
140}
141
142/**
143 * free_scm_command() - Free an SCM command
144 * @cmd: command to free
145 *
146 * Free an SCM command.
147 */
148static inline void free_scm_command(struct scm_command *cmd)
149{
150 free(cmd);
151}
152
153/**
154 * scm_command_to_response() - Get a pointer to a scm_response
155 * @cmd: command
156 *
157 * Returns a pointer to a response for a command.
158 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800159static inline struct scm_response *scm_command_to_response(const struct
160 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -0700161{
162 return (void *)cmd + cmd->resp_hdr_offset;
163}
164
165/**
166 * scm_get_command_buffer() - Get a pointer to a command buffer
167 * @cmd: command
168 *
169 * Returns a pointer to the command buffer of a command.
170 */
171static inline void *scm_get_command_buffer(const struct scm_command *cmd)
172{
173 return (void *)cmd->buf;
174}
175
176/**
177 * scm_get_response_buffer() - Get a pointer to a response buffer
178 * @rsp: response
179 *
180 * Returns a pointer to a response buffer of a response.
181 */
182static inline void *scm_get_response_buffer(const struct scm_response *rsp)
183{
184 return (void *)rsp + rsp->buf_offset;
185}
186
187static uint32_t smc(uint32_t cmd_addr)
188{
189 uint32_t context_id;
190 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800191 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700192 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800193 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700194 return r0;
195}
196
197/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800198* scm_call_automic: Make scm call with one or no argument
199* @svc: service id
200* @cmd: command id
201* @ arg1: argument
202*/
203
204static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
205{
206 uint32_t context_id;
207 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
vijay kumar4f4405f2014-08-08 11:49:53 +0530208 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800209 register uint32_t r2 __asm__("r2") = arg1;
210
211 __asm__ volatile(
212 __asmeq("%0", "r0")
213 __asmeq("%1", "r0")
214 __asmeq("%2", "r1")
215 __asmeq("%3", "r2")
216 "smc #0 @ switch to secure world\n"
217 : "=r" (r0)
218 : "r" (r0), "r" (r1), "r" (r2)
219 : "r3");
220 return r0;
221}
222
223/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530224 * scm_call_atomic2() - Send an atomic SCM command with two arguments
225 * @svc_id: service identifier
226 * @cmd_id: command identifier
227 * @arg1: first argument
228 * @arg2: second argument
229 *
230 * This shall only be used with commands that are guaranteed to be
231 * uninterruptable, atomic and SMP safe.
232 */
233int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
234{
235 int context_id;
236 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
vijay kumar4f4405f2014-08-08 11:49:53 +0530237 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530238 register uint32_t r2 __asm__("r2") = arg1;
239 register uint32_t r3 __asm__("r3") = arg2;
240
241 __asm__ volatile(
242 __asmeq("%0", "r0")
243 __asmeq("%1", "r0")
244 __asmeq("%2", "r1")
245 __asmeq("%3", "r2")
246 __asmeq("%4", "r3")
247 "smc #0 @ switch to secure world\n"
248 : "=r" (r0)
249 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
250 return r0;
251}
252
253/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700254 * scm_call() - Send an SCM command
255 * @svc_id: service identifier
256 * @cmd_id: command identifier
257 * @cmd_buf: command buffer
258 * @cmd_len: length of the command buffer
259 * @resp_buf: response buffer
260 * @resp_len: length of the response buffer
261 *
262 * Sends a command to the SCM and waits for the command to finish processing.
263 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800264int
265scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
266 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700267{
268 int ret;
269 struct scm_command *cmd;
270 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700271 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700272
273 cmd = alloc_scm_command(cmd_len, resp_len);
274 if (!cmd)
275 return ERR_NO_MEMORY;
276
277 cmd->id = (svc_id << 10) | cmd_id;
278 if (cmd_buf)
279 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
280
Neeti Desai127b9e02012-03-20 16:11:23 -0700281 /* Flush command to main memory for TZ */
282 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
283
Ajay Dudanib01e5062011-12-03 23:23:42 -0800284 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700285 if (ret)
286 goto out;
287
Ajay Dudanib01e5062011-12-03 23:23:42 -0800288 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700289 rsp = scm_command_to_response(cmd);
290
Neeti Desai127b9e02012-03-20 16:11:23 -0700291 do
292 {
293 /* Need to invalidate before each check since TZ will update
294 * the response complete flag in main memory.
295 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800296 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700297 } while (!rsp->is_complete);
298
299
300 resp_ptr = scm_get_response_buffer(rsp);
301
302 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800303 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700304
305 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700306 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700307 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800308 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700309 free_scm_command(cmd);
310 return ret;
311}
312
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800313int restore_secure_cfg(uint32_t id)
314{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700315 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800316 tz_secure_cfg secure_cfg;
317
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800318 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800319 secure_cfg.spare = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700320 scmcall_arg scm_arg = {0};
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800321
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700322 if(!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700323 {
324 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
325 NULL, 0);
326 }
327 else
328 {
329 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
330 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
331 scm_arg.x2 = id;
332 scm_arg.x3 = 0x0; /* Spare unused */
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800333
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700334 ret = scm_call2(&scm_arg, NULL);
335 }
336
337 if (ret)
338 {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800339 dprintf(CRITICAL, "Secure Config failed\n");
340 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700341 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800342
343 return ret;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800344}
345
Neeti Desai127b9e02012-03-20 16:11:23 -0700346/* SCM Encrypt Command */
347int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700348{
Neeti Desai127b9e02012-03-20 16:11:23 -0700349 int ret;
350 img_req cmd;
vijay kumare95092d2014-10-20 19:24:49 +0530351 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700352
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700353
Neeti Desai127b9e02012-03-20 16:11:23 -0700354 cmd.img_ptr = (uint32*) img_ptr;
355 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700356
Neeti Desai127b9e02012-03-20 16:11:23 -0700357 /* Image data is operated upon by TZ, which accesses only the main memory.
358 * It must be flushed/invalidated before and after TZ call.
359 */
360 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700361
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700362 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530363 {
364 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
365 }
366 else
367 {
368 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD,SSD_ENCRYPT_ID);
vijay kumar83b50d62015-01-09 19:09:59 +0530369 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800370 scm_arg.x2 = (uint32_t) cmd.img_ptr;
371 scm_arg.x3 = (uint32_t) cmd.img_len_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530372
373 ret = scm_call2(&scm_arg, NULL);
374 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700375
Neeti Desai127b9e02012-03-20 16:11:23 -0700376 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
377 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800378 */
379 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700380 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800381
Neeti Desai127b9e02012-03-20 16:11:23 -0700382 /* Invalidate the updated image data */
383 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800384
Shashank Mittal162244e2011-08-08 19:01:25 -0700385 return ret;
386}
387
Neeti Desai127b9e02012-03-20 16:11:23 -0700388/* SCM Decrypt Command */
389int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
390{
391 int ret;
392 img_req cmd;
393
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700394 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700395 {
396 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
397 return -1;
398 }
399
Neeti Desai127b9e02012-03-20 16:11:23 -0700400 cmd.img_ptr = (uint32*) img_ptr;
401 cmd.img_len_ptr = img_len_ptr;
402
403 /* Image data is operated upon by TZ, which accesses only the main memory.
404 * It must be flushed/invalidated before and after TZ call.
405 */
406 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
407
408 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
409
410 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
411 * before we use them.
412 */
413 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
414 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
415
416 /* Invalidate the updated image data */
417 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
418
419 return ret;
420}
421
422
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800423static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
424{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700425 int ret = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800426 ssd_parse_md_req parse_req;
427 ssd_parse_md_rsp parse_rsp;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700428 int prev_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530429 scmcall_arg scm_arg = {0};
430 scmcall_ret scm_ret = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700431 /* Populate meta-data ptr. Here md_len is the meta-data length.
432 * The Code below follows a growing length approach. First send
433 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
434 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
435 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
436 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
437
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800438 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700439 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800440
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700441 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800442
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700443 do
444 {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700445 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530446 {
447 ret = scm_call(SCM_SVC_SSD,
448 SSD_PARSE_MD_ID,
449 &parse_req,
450 sizeof(parse_req),
451 &parse_rsp,
452 sizeof(parse_rsp));
453 }
454 else
455 {
456 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PARSE_MD_ID);
457 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
458 scm_arg.x2 = parse_req.md_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800459 scm_arg.x3 = (uint32_t) parse_req.md;
Aparna Mallavarapud83990a2014-12-24 12:54:35 +0530460 scm_arg.atomic = true;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700461
vijay kumare95092d2014-10-20 19:24:49 +0530462 ret = scm_call2(&scm_arg, &scm_ret);
463 parse_rsp.status = scm_ret.x1;
464 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700465 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
466 {
467 prev_len = parse_req.md_len;
468
469 parse_req.md_len *= MULTIPLICATION_FACTOR;
470
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530471 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700472 (parse_req.md_len - prev_len) );
473
474 continue;
475 }
476 else
477 break;
478
479 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800480
481 if(!ret)
482 {
483 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
484 {
485 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700486 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800487 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800488 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700489
490 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800491 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700492 else
493 {
494 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
495
496 ASSERT(ret == 0);
497 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800498
499 return ret;
500}
501
502int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
503{
504 int ret = 0;
505 uint32 ctx_id = 0;
506 ssd_decrypt_img_frag_req decrypt_req;
507 ssd_decrypt_img_frag_rsp decrypt_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530508 scmcall_arg scm_arg = {0};
509 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800510
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700511
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700512 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
513 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700514 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700515 case SSD_PMD_ENCRYPTED:
516 /* Image data is operated upon by TZ, which accesses only the main memory.
517 * It must be flushed/invalidated before and after TZ call.
518 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800519
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700520 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800521
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700522 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800523
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700524 decrypt_req.md_ctx_id = ctx_id;
525 decrypt_req.last_frag = 1;
526 decrypt_req.frag_len = *img_len_ptr;
527 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800528
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700529 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530530 {
531 ret = scm_call(SCM_SVC_SSD,
532 SSD_DECRYPT_IMG_FRAG_ID,
533 &decrypt_req,
534 sizeof(decrypt_req),
535 &decrypt_rsp,
536 sizeof(decrypt_rsp));
537 }
538 else
539 {
540 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_DECRYPT_IMG_FRAG_ID);
541 scm_arg.x1 = MAKE_SCM_ARGS(0x4,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
542 scm_arg.x2 = decrypt_req.md_ctx_id;
543 scm_arg.x3 = decrypt_req.last_frag;
544 scm_arg.x4 = decrypt_req.frag_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800545 scm_arg.x5[0] = (uint32_t) decrypt_req.frag;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800546
vijay kumare95092d2014-10-20 19:24:49 +0530547 ret = scm_call2(&scm_arg, &scm_ret);
548 decrypt_rsp.status = scm_ret.x1;
549 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700550 if(!ret){
551 ret = decrypt_rsp.status;
552 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700553
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700554 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
555 * before we use them.
556 */
557 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
558 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800559
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700560 /* Invalidate the updated image data */
561 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700562
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700563 break;
564
565 case SSD_PMD_NOT_ENCRYPTED:
566 case SSD_PMD_NO_MD_FOUND:
567 ret = 0;
568 break;
569
570 case SSD_PMD_BUSY:
571 case SSD_PMD_BAD_MD_PTR_OR_LEN:
572 case SSD_PMD_PARSING_INCOMPLETE:
573 case SSD_PMD_PARSING_FAILED:
574 case SSD_PMD_SETUP_CIPHER_FAILED:
575 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
576 break;
577
578 default:
579 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
580 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700581 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800582 return ret;
583}
584
585int scm_svc_version(uint32 * major, uint32 * minor)
586{
587 feature_version_req feature_req;
588 feature_version_rsp feature_rsp;
589 int ret = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700590 scmcall_arg scm_arg = {0};
591 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800592
593 feature_req.feature_id = TZBSP_FVER_SSD;
594
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700595 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700596 {
597 ret = scm_call(TZBSP_SVC_INFO,
598 TZ_INFO_GET_FEATURE_ID,
599 &feature_req,
600 sizeof(feature_req),
601 &feature_rsp,
602 sizeof(feature_rsp));
603 }
604 else
605 {
606 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
vijay kumare95092d2014-10-20 19:24:49 +0530607 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700608 scm_arg.x2 = feature_req.feature_id;
609
610 ret = scm_call2(&scm_arg, &scm_ret);
611 feature_rsp.version = scm_ret.x1;
612 }
613
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800614 if(!ret)
615 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
616
617 return ret;
618}
619
Amit Blaybdfabc62015-01-29 22:04:13 +0200620int scm_svc_get_secure_state(uint32_t *state_low, uint32_t *state_high)
621{
622 get_secure_state_req req;
623 get_secure_state_rsp rsp;
624
625 int ret = 0;
626
627 scmcall_arg scm_arg = {0};
628 scmcall_ret scm_ret = {0};
629
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700630 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200631 {
632 req.status_ptr = (uint32_t*)&rsp;
633 req.status_len = sizeof(rsp);
634
635 ret = scm_call(TZBSP_SVC_INFO,
636 TZ_INFO_GET_SECURE_STATE,
637 &req,
638 sizeof(req),
639 NULL,
640 0);
641 }
642 else
643 {
644 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_SECURE_STATE);
645 scm_arg.x1 = MAKE_SCM_ARGS(0x0);
646
647 ret = scm_call2(&scm_arg, &scm_ret);
648
649 rsp.status_low = scm_ret.x1;
650 rsp.status_high = scm_ret.x2;
651 }
652
653 if(!ret)
654 {
655 *state_low = rsp.status_low;
656 *state_high = rsp.status_high;
657 }
658
659 return ret;
660}
661
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800662int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
663{
664 int ret=0;
665 ssd_protect_keystore_req protect_req;
666 ssd_protect_keystore_rsp protect_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530667 scmcall_arg scm_arg = {0};
668 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700669
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800670 protect_req.keystore_ptr = img_ptr;
671 protect_req.keystore_len = img_len;
672
673 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
674
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700675 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530676 {
677 ret = scm_call(SCM_SVC_SSD,
678 SSD_PROTECT_KEYSTORE_ID,
679 &protect_req,
680 sizeof(protect_req),
681 &protect_rsp,
682 sizeof(protect_rsp));
683 }
684 else
685 {
686 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PROTECT_KEYSTORE_ID);
687 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800688 scm_arg.x2 = (uint32_t) protect_req.keystore_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530689 scm_arg.x3 = protect_req.keystore_len;
690
691 ret = scm_call2(&scm_arg, &scm_ret);
692 protect_rsp.status = scm_ret.x1;
693 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800694 if(!ret)
695 {
696 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
697 dprintf(INFO,"Successfully loaded the keystore ");
698 else
699 {
700 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
701 ret = protect_rsp.status;
702 }
703 }
704 else
705 dprintf(INFO,"scm_call failed ");
706
707 return ret;
708}
709
Shashank Mittal162244e2011-08-08 19:01:25 -0700710void set_tamper_fuse_cmd()
711{
712 uint32_t svc_id;
713 uint32_t cmd_id;
714 void *cmd_buf;
715 size_t cmd_len;
716 void *resp_buf = NULL;
717 size_t resp_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530718 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700719
720 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
721 cmd_buf = (void *)&fuse_id;
722 cmd_len = sizeof(fuse_id);
723
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700724 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700725 {
vijay kumare95092d2014-10-20 19:24:49 +0530726 /*no response */
727 resp_buf = NULL;
728 resp_len = 0;
729
730 svc_id = SCM_SVC_FUSE;
731 cmd_id = SCM_BLOW_SW_FUSE_ID;
732
733 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
734 }
735 else
736 {
737 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID);
738 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800739 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530740 scm_arg.x3 = cmd_len;
741
742 scm_call2(&scm_arg, NULL);
743
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700744 }
745
Shashank Mittal162244e2011-08-08 19:01:25 -0700746}
747
748uint8_t get_tamper_fuse_cmd()
749{
750 uint32_t svc_id;
751 uint32_t cmd_id;
752 void *cmd_buf;
753 size_t cmd_len;
754 size_t resp_len = 0;
755 uint8_t resp_buf;
756
757 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
vijay kumare95092d2014-10-20 19:24:49 +0530758 scmcall_arg scm_arg = {0};
759 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700760
Shashank Mittal162244e2011-08-08 19:01:25 -0700761 cmd_buf = (void *)&fuse_id;
762 cmd_len = sizeof(fuse_id);
763
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700764 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530765 {
766 /*response */
767 resp_len = sizeof(resp_buf);
Shashank Mittal162244e2011-08-08 19:01:25 -0700768
vijay kumare95092d2014-10-20 19:24:49 +0530769 svc_id = SCM_SVC_FUSE;
770 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
Shashank Mittal162244e2011-08-08 19:01:25 -0700771
vijay kumare95092d2014-10-20 19:24:49 +0530772 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
773 return resp_buf;
774 }
775 else
776 {
777 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID);
778 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800779 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530780 scm_arg.x3 = cmd_len;
781
782 scm_call2(&scm_arg, &scm_ret);
783 return (uint8_t)scm_ret.x1;
784 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700785}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800786
Amir Samuelov4620ad22013-03-13 11:30:05 +0200787/*
788 * struct qseecom_save_partition_hash_req
789 * @partition_id - partition id.
790 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
791 */
792struct qseecom_save_partition_hash_req {
793 uint32_t partition_id; /* in */
794 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
795};
796
797
798void save_kernel_hash_cmd(void *digest)
799{
800 uint32_t svc_id;
801 uint32_t cmd_id;
802 void *cmd_buf;
803 size_t cmd_len;
804 void *resp_buf = NULL;
805 size_t resp_len = 0;
806 struct qseecom_save_partition_hash_req req;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700807 scmcall_arg scm_arg = {0};
Amir Samuelov4620ad22013-03-13 11:30:05 +0200808
809 /*no response */
810 resp_buf = NULL;
811 resp_len = 0;
812
813 req.partition_id = 0; /* kernel */
814 memcpy(req.digest, digest, sizeof(req.digest));
815
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700816 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700817 {
818 svc_id = SCM_SVC_ES;
819 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
820 cmd_buf = (void *)&req;
821 cmd_len = sizeof(req);
Amir Samuelov4620ad22013-03-13 11:30:05 +0200822
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700823 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
824 }
825 else
826 {
827 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
828 scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
829 scm_arg.x2 = req.partition_id;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800830 scm_arg.x3 = (uint32_t) &req.digest;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700831 scm_arg.x4 = sizeof(req.digest);
832
833 if (scm_call2(&scm_arg, NULL))
834 dprintf(CRITICAL, "Failed to Save kernel hash\n");
835 }
Amir Samuelov4620ad22013-03-13 11:30:05 +0200836}
837
Amit Blayfe23ee22015-01-09 19:09:51 +0200838int mdtp_cipher_dip_cmd(uint8_t *in_buf, uint32_t in_buf_size, uint8_t *out_buf,
839 uint32_t out_buf_size, uint32_t direction)
840{
841 uint32_t svc_id;
842 uint32_t cmd_id;
843 void *cmd_buf;
844 void *rsp_buf;
845 size_t cmd_len;
846 size_t rsp_len;
847 mdtp_cipher_dip_req req;
848 scmcall_arg scm_arg = {0};
849 scmcall_ret scm_ret = {0};
850
851 ASSERT(in_buf != NULL);
852 ASSERT(out_buf != NULL);
853
854 req.in_buf = in_buf;
855 req.in_buf_size = in_buf_size;
856 req.out_buf = out_buf;
857 req.out_buf_size = out_buf_size;
858 req.direction = direction;
859
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700860 if (!is_scm_armv8_support())
Amit Blayfe23ee22015-01-09 19:09:51 +0200861 {
862 svc_id = SCM_SVC_MDTP;
863 cmd_id = SCM_MDTP_CIPHER_DIP;
864 cmd_buf = (void *)&req;
865 cmd_len = sizeof(req);
866 rsp_buf = NULL;
867 rsp_len = 0;
868
869 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
870 {
871 dprintf(CRITICAL, "Failed to call Cipher DIP SCM\n");
872 return -1;
873 }
874 }
875 else
876 {
877 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MDTP, SCM_MDTP_CIPHER_DIP);
878 scm_arg.x1 = MAKE_SCM_ARGS(0x5, SMC_PARAM_TYPE_BUFFER_READ, SMC_PARAM_TYPE_VALUE,
879 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE);
880 scm_arg.x2 = (uint32_t)req.in_buf;
881 scm_arg.x3 = req.in_buf_size;
882 scm_arg.x4 = (uint32_t)req.out_buf;
883 scm_arg.x5[0] = req.out_buf_size;
884 scm_arg.x5[1] = req.direction;
885
886 if (scm_call2(&scm_arg, &scm_ret))
887 {
888 dprintf(CRITICAL, "Failed in Cipher DIP SCM call\n");
889 return -1;
890 }
891 }
892
893 return 0;
894}
895
Amit Blaybdfabc62015-01-29 22:04:13 +0200896int qfprom_read_row_cmd(uint32_t row_address,
897 uint32_t addr_type,
898 uint32_t *row_data,
899 uint32_t *qfprom_api_status)
900{
901 uint32_t svc_id;
902 uint32_t cmd_id;
903 void *cmd_buf;
904 void *rsp_buf;
905 size_t cmd_len;
906 size_t rsp_len;
907 qfprom_read_row_req req;
908 scmcall_arg scm_arg = {0};
909 scmcall_ret scm_ret = {0};
910
911 req.row_address = row_address;
912 req.addr_type = addr_type;
913 req.row_data = row_data;
914 req.qfprom_api_status = qfprom_api_status;
915
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700916 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200917 {
918 svc_id = SCM_SVC_FUSE;
919 cmd_id = SCM_QFPROM_READ_ROW_ID;
920 cmd_buf = (void *)&req;
921 cmd_len = sizeof(req);
922 rsp_buf = NULL;
923 rsp_len = 0;
924
925 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
926 {
927 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
928 return -1;
929 }
930 }
931 else
932 {
933 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_QFPROM_READ_ROW_ID);
934 scm_arg.x1 = MAKE_SCM_ARGS(0x4, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE,
935 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_BUFFER_READWRITE);
936 scm_arg.x2 = req.row_address;
937 scm_arg.x3 = req.addr_type;
938 scm_arg.x4 = (uint32_t)req.row_data;
939 scm_arg.x5[0] = (uint32_t)req.qfprom_api_status;
940
941 if (scm_call2(&scm_arg, &scm_ret))
942 {
943 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
944 return -1;
945 }
946 }
947
948 return 0;
949}
950
Deepa Dinamani193874e2012-02-07 14:00:04 -0800951/*
952 * Switches the CE1 channel between ADM and register usage.
953 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
954 * : AP_CE_ADM_USE, CE1 uses ADM interface
955 */
956uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
957{
958 uint32_t svc_id;
959 uint32_t cmd_id;
960 void *cmd_buf;
961 size_t cmd_len;
962 size_t resp_len = 0;
963 uint8_t resp_buf;
964
965 struct {
966 uint32_t resource;
967 uint32_t chn_id;
968 }__PACKED switch_ce_chn_buf;
969
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700970 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700971 {
972 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
973 return 0;
974 }
975
Deepa Dinamani193874e2012-02-07 14:00:04 -0800976 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
977 switch_ce_chn_buf.chn_id = channel;
978 cmd_buf = (void *)&switch_ce_chn_buf;
979 cmd_len = sizeof(switch_ce_chn_buf);
980
981 /*response */
982 resp_len = sizeof(resp_buf);
983
984 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
985 cmd_id = SCM_CE_CHN_SWITCH_ID;
986
987 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
988 return resp_buf;
989}
990
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800991int scm_halt_pmic_arbiter()
992{
993 int ret = 0;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +0530994 scmcall_arg scm_arg = {0};
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800995
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700996 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +0530997 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER);
998 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
999 scm_arg.x2 = 0;
1000 scm_arg.atomic = true;
1001 ret = scm_call2(&scm_arg, NULL);
1002 } else {
1003 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001004 }
1005
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301006 /* Retry with the SCM_IO_DISABLE_PMIC_ARBITER1 func ID if the above Func ID fails*/
1007 if(ret) {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001008 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301009 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1);
1010 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1011 scm_arg.x2 = 0;
1012 scm_arg.atomic = true;
1013 ret = scm_call2(&scm_arg, NULL);
1014 } else
1015 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1, 0);
1016 }
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001017
1018 return ret;
1019}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001020
1021/* Execption Level exec secure-os call
1022 * Jumps to kernel via secure-os and does not return
1023 * on successful jump. System parameters are setup &
1024 * passed on to secure-os and are utilized to boot the
1025 * kernel.
1026 *
1027 @ kernel_entry : kernel entry point passed in as link register.
1028 @ dtb_offset : dt blob address passed in as w0.
1029 @ svc_id : indicates direction of switch 32->64 or 64->32
1030 *
1031 * Assumes all sanity checks have been performed on arguments.
1032 */
1033
1034void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
1035{
1036 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
1037 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
1038 void *cmd_buf;
1039 size_t cmd_len;
Sridhar Parasuramfc6ea712015-06-30 11:22:49 -07001040 static el1_system_param param __attribute__((aligned(0x1000)));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001041 scmcall_arg scm_arg = {0};
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001042
1043 param.el1_x0 = dtb_offset;
1044 param.el1_elr = kernel_entry;
1045
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001046 /* Response Buffer = Null as no response expected */
1047 dprintf(INFO, "Jumping to kernel via monitor\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001048
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001049 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001050 {
1051 /* Command Buffer */
1052 cmd_buf = (void *)&param;
1053 cmd_len = sizeof(el1_system_param);
1054
1055 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
1056 }
1057 else
1058 {
1059 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
1060 scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -08001061 scm_arg.x2 = (uint32_t ) &param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001062 scm_arg.x3 = sizeof(el1_system_param);
1063
1064 scm_call2(&scm_arg, NULL);
1065 }
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001066
1067 /* Assert if execution ever reaches here */
1068 dprintf(CRITICAL, "Failed to jump to kernel\n");
1069 ASSERT(0);
1070}
Maria Yubeeeeaf2014-06-30 13:05:43 +08001071
1072/* SCM Random Command */
1073int scm_random(uint32_t * rbuf, uint32_t r_len)
1074{
1075 int ret;
1076 struct tz_prng_data data;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001077 scmcall_arg scm_arg = {0};
Maria Yubeeeeaf2014-06-30 13:05:43 +08001078
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001079 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001080 {
1081 data.out_buf = (uint8_t*) rbuf;
1082 data.out_buf_size = r_len;
Maria Yubeeeeaf2014-06-30 13:05:43 +08001083
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001084 /*
1085 * random buffer must be flushed/invalidated before and after TZ call.
1086 */
1087 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001088
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001089 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001090
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001091 /* Invalidate the updated random buffer */
1092 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
1093 }
1094 else
1095 {
1096 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
1097 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -08001098 scm_arg.x2 = (uint32_t) rbuf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001099 scm_arg.x3 = r_len;
1100
1101 ret = scm_call2(&scm_arg, NULL);
1102 if (!ret)
1103 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
1104 else
1105 dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
1106 }
Maria Yubeeeeaf2014-06-30 13:05:43 +08001107
1108 return ret;
1109}
1110
1111void * get_canary()
1112{
1113 void * canary;
vijay kumar4f4405f2014-08-08 11:49:53 +05301114 if(scm_random((uint32_t *)&canary, sizeof(canary))) {
Maria Yubeeeeaf2014-06-30 13:05:43 +08001115 dprintf(CRITICAL,"scm_call for random failed !!!");
1116 /*
1117 * fall back to use lib rand API if scm call failed.
1118 */
1119 canary = (void *)rand();
1120 }
1121
1122 return canary;
1123}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301124
1125int scm_xpu_err_fatal_init()
1126{
1127 uint32_t ret = 0;
1128 uint32_t response = 0;
1129 tz_xpu_prot_cmd cmd;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001130 scmcall_arg scm_arg = {0};
1131 scmcall_ret scm_ret = {0};
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301132
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001133 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001134 {
1135 cmd.config = ERR_FATAL_ENABLE;
1136 cmd.spare = 0;
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301137
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001138 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
1139 sizeof(response));
1140 }
1141 else
1142 {
1143 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
1144 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1145 scm_arg.x2 = ERR_FATAL_ENABLE;
1146 scm_arg.x3 = 0x0;
1147 ret = scm_call2(&scm_arg, &scm_ret);
1148 response = scm_ret.x1;
1149 }
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301150
1151 if (ret)
1152 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
1153 else
1154 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
1155
1156 return ret;
1157}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001158
1159static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
1160{
1161 register uint32_t r0 __asm__("r0") = x0;
1162 register uint32_t r1 __asm__("r1") = x1;
1163 register uint32_t r2 __asm__("r2") = x2;
1164 register uint32_t r3 __asm__("r3") = x3;
1165 register uint32_t r4 __asm__("r4") = x4;
1166 register uint32_t r5 __asm__("r5") = x5;
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001167 register uint32_t r6 __asm__("r6") = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001168
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001169 do {
1170 __asm__ volatile(
1171 __asmeq("%0", "r0")
1172 __asmeq("%1", "r1")
1173 __asmeq("%2", "r2")
1174 __asmeq("%3", "r3")
1175 __asmeq("%4", "r0")
1176 __asmeq("%5", "r1")
1177 __asmeq("%6", "r2")
1178 __asmeq("%7", "r3")
1179 __asmeq("%8", "r4")
1180 __asmeq("%9", "r5")
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001181 __asmeq("%10", "r6")
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001182 "smc #0 @ switch to secure world\n"
1183 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001184 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6));
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001185 } while(r0 == 1);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001186
1187 if (ret)
1188 {
1189 ret->x1 = r1;
1190 ret->x2 = r2;
1191 ret->x3 = r3;
1192 }
1193
1194 return r0;
1195}
1196
1197uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
1198{
1199 uint32_t *indir_arg = NULL;
1200 uint32_t x5;
1201 int i;
1202 uint32_t rc;
1203
1204 arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
1205 x5 = arg->x5[0];
1206
Amit Blayfe23ee22015-01-09 19:09:51 +02001207 if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN - 1)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001208 {
1209 indir_arg = memalign(CACHE_LINE, (SCM_INDIR_MAX_LEN * sizeof(uint32_t)));
1210 ASSERT(indir_arg);
1211
1212 for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
1213 {
1214 indir_arg[i] = arg->x5[i];
1215 }
Channagoud Kadabicf3afe42015-08-07 16:08:08 -07001216 arch_clean_invalidate_cache_range((addr_t) indir_arg, (SCM_INDIR_MAX_LEN * sizeof(uint32_t)));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001217 x5 = (addr_t) indir_arg;
1218 }
1219
1220 rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
1221
1222 if (rc)
1223 {
1224 dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
1225 return rc;
1226 }
1227
1228 if (indir_arg)
1229 free(indir_arg);
1230
1231 return 0;
1232}
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301233
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301234static bool secure_boot_enabled = true;
1235static bool wdog_debug_fuse_disabled = true;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301236
1237void scm_check_boot_fuses()
1238{
1239 uint32_t ret = 0;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301240 uint32_t resp;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301241 scmcall_arg scm_arg = {0};
1242 scmcall_ret scm_ret = {0};
1243
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001244 if (!is_scm_armv8_support()) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301245 ret = scm_call(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED, NULL, 0, &resp, sizeof(resp));
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301246 } else {
1247 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED);
1248 ret = scm_call2(&scm_arg, &scm_ret);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301249 resp = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301250 }
1251
1252 /* Parse Bit 0 and Bit 2 of the response */
1253 if(!ret) {
1254 /* Bit 0 - SECBOOT_ENABLE_CHECK */
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301255 if(resp & 0x1)
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301256 secure_boot_enabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301257 /* Bit 2 - DEBUG_DISABLE_CHECK */
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301258 if(resp & 0x4)
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301259 wdog_debug_fuse_disabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301260 } else
1261 dprintf(CRITICAL, "scm call to check secure boot fuses failed\n");
1262}
1263
1264bool is_secure_boot_enable()
1265{
1266 scm_check_boot_fuses();
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301267 return secure_boot_enabled;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301268}
1269
1270static uint32_t scm_io_read(addr_t address)
1271{
1272 uint32_t ret;
1273 scmcall_arg scm_arg = {0};
1274 scmcall_ret scm_ret = {0};
1275
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001276 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301277 ret = scm_call_atomic(SCM_SVC_IO, SCM_IO_READ, address);
1278 } else {
1279 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_READ);
1280 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1281 scm_arg.x2 = address;
1282 scm_arg.atomic = true;
1283 ret = scm_call2(&scm_arg, &scm_ret);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001284 /* Return the value read if the call is successful */
1285 if (!ret)
1286 ret = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301287 }
1288 return ret;
1289}
1290
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301291uint32_t scm_io_write(uint32_t address, uint32_t val)
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301292{
1293 uint32_t ret;
1294 scmcall_arg scm_arg = {0};
1295 scmcall_ret scm_ret = {0};
1296
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001297 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301298 ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
1299 } else {
1300 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_WRITE);
1301 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1302 scm_arg.x2 = address;
1303 scm_arg.x3 = val;
1304 scm_arg.atomic = true;
1305 ret = scm_call2(&scm_arg, &scm_ret);
1306 }
1307 return ret;
1308}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301309
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301310int scm_call2_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301311{
1312 uint32_t ret = 0;
1313 scmcall_arg scm_arg = {0};
1314 scmcall_ret scm_ret = {0};
1315
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001316 if (!is_scm_armv8_support())
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301317 {
1318 ret = scm_call_atomic2(svc, cmd, arg1, arg2);
1319 } else {
1320 scm_arg.x0 = MAKE_SIP_SCM_CMD(svc, cmd);
1321 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1322 scm_arg.x2 = arg1;
1323 scm_arg.x3 = arg2;
1324 ret = scm_call2(&scm_arg, &scm_ret);
1325 }
1326 return ret;
1327}
1328
1329#if PLATFORM_USE_SCM_DLOAD
1330int scm_dload_mode(int mode)
1331{
1332 int ret = 0;
1333 uint32_t dload_type;
1334
1335 dprintf(SPEW, "DLOAD mode: %d\n", mode);
1336 if (mode == NORMAL_DLOAD)
1337 dload_type = SCM_DLOAD_MODE;
1338 else if(mode == EMERGENCY_DLOAD)
1339 dload_type = SCM_EDLOAD_MODE;
1340 else
1341 dload_type = 0;
1342
1343 /* Write to the Boot MISC register */
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001344 ret = is_scm_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301345
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001346 if (ret > 0)
1347 ret = scm_call2_atomic(SCM_SVC_BOOT, SCM_DLOAD_CMD, dload_type, 0);
1348 else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301349 ret = scm_io_write(TCSR_BOOT_MISC_DETECT,dload_type);
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001350
1351 if(ret) {
1352 dprintf(CRITICAL, "Failed to write to boot misc: %d\n", ret);
1353 return ret;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301354 }
1355
1356 scm_check_boot_fuses();
1357
1358 /* Make WDOG_DEBUG DISABLE scm call only in non-secure boot */
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301359 if(!(secure_boot_enabled || wdog_debug_fuse_disabled)) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301360 ret = scm_call2_atomic(SCM_SVC_BOOT, WDOG_DEBUG_DISABLE, 1, 0);
1361 if(ret)
1362 dprintf(CRITICAL, "Failed to disable the wdog debug \n");
1363 }
1364
1365 return ret;
1366}
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001367
1368bool scm_device_enter_dload()
1369{
1370 uint32_t ret = 0;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001371 uint32_t dload_mode = 0;
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001372
1373 scmcall_arg scm_arg = {0};
1374 scmcall_ret scm_ret = {0};
1375
1376 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_DLOAD_MODE, SCM_DLOAD_CMD);
1377 ret = scm_call2(&scm_arg, &scm_ret);
1378 if (ret)
1379 dprintf(CRITICAL, "SCM call to check dload mode failed: %x\n", ret);
1380
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001381 if (!ret)
1382 {
1383 dload_mode = scm_io_read(TCSR_BOOT_MISC_DETECT);
1384 if (board_soc_version() < 0x30000)
1385 dload_mode = (dload_mode >> 16) & 0xFFFF;
1386 }
1387
1388 if (dload_mode == SCM_DLOAD_MODE)
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001389 return true;
1390
1391 return false;
1392}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301393#endif