blob: a0c39589c6efa15c64a4c8254544b562114ddbd5 [file] [log] [blame]
Mayank Grover59f4a372017-01-27 18:02:51 +05301/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
vijay kumar4f4405f2014-08-08 11:49:53 +053035#include <rand.h>
36#include <image_verify.h>
Aparna Mallavarapu664ea772015-02-24 18:44:33 +053037#include <dload_util.h>
38#include <platform/iomap.h>
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070039#include <board.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070040#include "scm.h"
41
42#pragma GCC optimize ("O0")
43
44/* From Linux Kernel asm/system.h */
45#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
46
47#ifndef offsetof
48# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
49#endif
50
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080051#define SCM_CLASS_REGISTER (0x2 << 8)
52#define SCM_MASK_IRQS BIT(5)
53#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
54 SCM_CLASS_REGISTER | \
55 SCM_MASK_IRQS | \
56 ((n) & 0xf))
57
Mayank Grover8bcdd972016-12-02 14:58:07 +053058#define SECBOOT_FUSE_BIT 0
59#define SECBOOT_FUSE_SHK_BIT 1
60#define SECBOOT_FUSE_DEBUG_DISABLED_BIT 2
61#define SECBOOT_FUSE_ANTI_ROLLBACK_BIT 3
62#define SECBOOT_FUSE_FEC_ENABLED_BIT 4
63#define SECBOOT_FUSE_RPMB_ENABLED_BIT 5
64#define SECBOOT_FUSE_DEBUG_RE_ENABLED_BIT 6
65#define CHECK_BIT(var, pos) ((var) & (1 << (pos)))
66
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070067/* SCM interface as per ARM spec present? */
68bool scm_arm_support;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070069static bool scm_initialized;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070070
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080071bool is_scm_armv8_support()
72{
Channagoud Kadabi86e1e822015-11-02 11:32:34 -080073#if !NO_SCM_V8_SUPPORT
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070074 if (!scm_initialized)
75 {
76 scm_init();
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070077 }
Channagoud Kadabi86e1e822015-11-02 11:32:34 -080078#endif
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070079
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080080 return scm_arm_support;
81}
82
Channagoud Kadabi77f46a32015-08-05 16:13:13 -070083int is_scm_call_available(uint32_t svc_id, uint32_t cmd_id)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070084{
vijay kumar496a2ff2015-07-22 21:22:48 +053085 int ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070086 scmcall_arg scm_arg = {0};
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -080087 scmcall_ret scm_ret = {0};
Channagoud Kadabia2184b82015-07-07 10:09:32 -070088
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070089 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
90 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
91 scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
92
93 ret = scm_call2(&scm_arg, &scm_ret);
94
95 if (!ret)
Channagoud Kadabia2184b82015-07-07 10:09:32 -070096 return scm_ret.x1;
97
98 return ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070099}
100
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700101static int scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
102{
vijay kumar496a2ff2015-07-22 21:22:48 +0530103 int ret;
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700104
105 ret = is_scm_call_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
106
107 if (ret > 0)
108 scm_arm_support = true;
109
110 return ret;
111}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700112
113void scm_init()
114{
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700115 int ret;
116
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700117 if (scm_initialized)
118 return;
119
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700120 ret = scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
121
vijay kumar496a2ff2015-07-22 21:22:48 +0530122 if (ret < 0)
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700123 dprintf(CRITICAL, "Failed to initialize SCM\n");
lijuang1cff8382016-01-11 17:56:54 +0800124
125 scm_initialized = true;
126
127#if DISABLE_DLOAD_MODE
128 scm_disable_sdi();
129#endif
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700130}
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800131
Shashank Mittal162244e2011-08-08 19:01:25 -0700132/**
133 * alloc_scm_command() - Allocate an SCM command
134 * @cmd_size: size of the command buffer
135 * @resp_size: size of the response buffer
136 *
137 * Allocate an SCM command, including enough room for the command
138 * and response headers as well as the command and response buffers.
139 *
140 * Returns a valid &scm_command on success or %NULL if the allocation fails.
141 */
142static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
143{
144 struct scm_command *cmd;
145 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -0800146 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -0700147
Deepa Dinamani904f8f82012-12-05 16:35:01 -0800148 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -0800149 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +0200150 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700151 cmd->len = len;
152 cmd->buf_offset = offsetof(struct scm_command, buf);
153 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
154 }
155 return cmd;
156}
157
158/**
159 * free_scm_command() - Free an SCM command
160 * @cmd: command to free
161 *
162 * Free an SCM command.
163 */
164static inline void free_scm_command(struct scm_command *cmd)
165{
166 free(cmd);
167}
168
169/**
170 * scm_command_to_response() - Get a pointer to a scm_response
171 * @cmd: command
172 *
173 * Returns a pointer to a response for a command.
174 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800175static inline struct scm_response *scm_command_to_response(const struct
176 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -0700177{
178 return (void *)cmd + cmd->resp_hdr_offset;
179}
180
181/**
182 * scm_get_command_buffer() - Get a pointer to a command buffer
183 * @cmd: command
184 *
185 * Returns a pointer to the command buffer of a command.
186 */
187static inline void *scm_get_command_buffer(const struct scm_command *cmd)
188{
189 return (void *)cmd->buf;
190}
191
192/**
193 * scm_get_response_buffer() - Get a pointer to a response buffer
194 * @rsp: response
195 *
196 * Returns a pointer to a response buffer of a response.
197 */
198static inline void *scm_get_response_buffer(const struct scm_response *rsp)
199{
200 return (void *)rsp + rsp->buf_offset;
201}
202
203static uint32_t smc(uint32_t cmd_addr)
204{
205 uint32_t context_id;
206 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800207 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700208 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800209 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700210 return r0;
211}
212
213/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800214* scm_call_automic: Make scm call with one or no argument
215* @svc: service id
216* @cmd: command id
217* @ arg1: argument
218*/
219
220static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
221{
222 uint32_t context_id;
223 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
vijay kumar4f4405f2014-08-08 11:49:53 +0530224 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800225 register uint32_t r2 __asm__("r2") = arg1;
226
227 __asm__ volatile(
228 __asmeq("%0", "r0")
229 __asmeq("%1", "r0")
230 __asmeq("%2", "r1")
231 __asmeq("%3", "r2")
232 "smc #0 @ switch to secure world\n"
233 : "=r" (r0)
234 : "r" (r0), "r" (r1), "r" (r2)
235 : "r3");
236 return r0;
237}
238
239/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530240 * scm_call_atomic2() - Send an atomic SCM command with two arguments
241 * @svc_id: service identifier
242 * @cmd_id: command identifier
243 * @arg1: first argument
244 * @arg2: second argument
245 *
246 * This shall only be used with commands that are guaranteed to be
247 * uninterruptable, atomic and SMP safe.
248 */
249int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
250{
251 int context_id;
252 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
vijay kumar4f4405f2014-08-08 11:49:53 +0530253 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530254 register uint32_t r2 __asm__("r2") = arg1;
255 register uint32_t r3 __asm__("r3") = arg2;
256
257 __asm__ volatile(
258 __asmeq("%0", "r0")
259 __asmeq("%1", "r0")
260 __asmeq("%2", "r1")
261 __asmeq("%3", "r2")
262 __asmeq("%4", "r3")
263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
266 return r0;
267}
268
269/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700270 * scm_call() - Send an SCM command
271 * @svc_id: service identifier
272 * @cmd_id: command identifier
273 * @cmd_buf: command buffer
274 * @cmd_len: length of the command buffer
275 * @resp_buf: response buffer
276 * @resp_len: length of the response buffer
277 *
278 * Sends a command to the SCM and waits for the command to finish processing.
279 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800280int
281scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
282 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700283{
284 int ret;
285 struct scm_command *cmd;
286 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700287 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700288
289 cmd = alloc_scm_command(cmd_len, resp_len);
290 if (!cmd)
291 return ERR_NO_MEMORY;
292
293 cmd->id = (svc_id << 10) | cmd_id;
294 if (cmd_buf)
295 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
296
Neeti Desai127b9e02012-03-20 16:11:23 -0700297 /* Flush command to main memory for TZ */
298 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
299
Ajay Dudanib01e5062011-12-03 23:23:42 -0800300 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700301 if (ret)
302 goto out;
303
Ajay Dudanib01e5062011-12-03 23:23:42 -0800304 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700305 rsp = scm_command_to_response(cmd);
306
Neeti Desai127b9e02012-03-20 16:11:23 -0700307 do
308 {
309 /* Need to invalidate before each check since TZ will update
310 * the response complete flag in main memory.
311 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800312 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700313 } while (!rsp->is_complete);
314
315
316 resp_ptr = scm_get_response_buffer(rsp);
317
318 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800319 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700320
321 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700322 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700323 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800324 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700325 free_scm_command(cmd);
326 return ret;
327}
328
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800329int restore_secure_cfg(uint32_t id)
330{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700331 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800332 tz_secure_cfg secure_cfg;
333
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800334 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800335 secure_cfg.spare = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700336 scmcall_arg scm_arg = {0};
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800337
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700338 if(!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700339 {
340 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
341 NULL, 0);
342 }
343 else
344 {
345 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
346 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
347 scm_arg.x2 = id;
348 scm_arg.x3 = 0x0; /* Spare unused */
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800349
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700350 ret = scm_call2(&scm_arg, NULL);
351 }
352
353 if (ret)
354 {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800355 dprintf(CRITICAL, "Secure Config failed\n");
356 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700357 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800358
359 return ret;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800360}
361
Neeti Desai127b9e02012-03-20 16:11:23 -0700362/* SCM Encrypt Command */
363int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700364{
Neeti Desai127b9e02012-03-20 16:11:23 -0700365 int ret;
366 img_req cmd;
vijay kumare95092d2014-10-20 19:24:49 +0530367 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700368
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700369
Neeti Desai127b9e02012-03-20 16:11:23 -0700370 cmd.img_ptr = (uint32*) img_ptr;
371 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700372
Neeti Desai127b9e02012-03-20 16:11:23 -0700373 /* Image data is operated upon by TZ, which accesses only the main memory.
374 * It must be flushed/invalidated before and after TZ call.
375 */
376 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700377
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700378 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530379 {
380 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
381 }
382 else
383 {
384 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD,SSD_ENCRYPT_ID);
vijay kumar83b50d62015-01-09 19:09:59 +0530385 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800386 scm_arg.x2 = (uint32_t) cmd.img_ptr;
387 scm_arg.x3 = (uint32_t) cmd.img_len_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530388
389 ret = scm_call2(&scm_arg, NULL);
390 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700391
Neeti Desai127b9e02012-03-20 16:11:23 -0700392 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
393 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800394 */
395 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700396 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800397
Neeti Desai127b9e02012-03-20 16:11:23 -0700398 /* Invalidate the updated image data */
399 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800400
Shashank Mittal162244e2011-08-08 19:01:25 -0700401 return ret;
402}
403
Neeti Desai127b9e02012-03-20 16:11:23 -0700404/* SCM Decrypt Command */
405int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
406{
407 int ret;
408 img_req cmd;
409
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700410 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700411 {
412 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
413 return -1;
414 }
415
Neeti Desai127b9e02012-03-20 16:11:23 -0700416 cmd.img_ptr = (uint32*) img_ptr;
417 cmd.img_len_ptr = img_len_ptr;
418
419 /* Image data is operated upon by TZ, which accesses only the main memory.
420 * It must be flushed/invalidated before and after TZ call.
421 */
422 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
423
424 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
425
426 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
427 * before we use them.
428 */
429 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
430 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
431
432 /* Invalidate the updated image data */
433 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
434
435 return ret;
436}
437
438
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800439static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
440{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700441 int ret = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800442 ssd_parse_md_req parse_req;
443 ssd_parse_md_rsp parse_rsp;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700444 int prev_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530445 scmcall_arg scm_arg = {0};
446 scmcall_ret scm_ret = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700447 /* Populate meta-data ptr. Here md_len is the meta-data length.
448 * The Code below follows a growing length approach. First send
449 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
450 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
451 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
452 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
453
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800454 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700455 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800456
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700457 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800458
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700459 do
460 {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700461 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530462 {
463 ret = scm_call(SCM_SVC_SSD,
464 SSD_PARSE_MD_ID,
465 &parse_req,
466 sizeof(parse_req),
467 &parse_rsp,
468 sizeof(parse_rsp));
469 }
470 else
471 {
472 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PARSE_MD_ID);
473 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
474 scm_arg.x2 = parse_req.md_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800475 scm_arg.x3 = (uint32_t) parse_req.md;
Aparna Mallavarapud83990a2014-12-24 12:54:35 +0530476 scm_arg.atomic = true;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700477
vijay kumare95092d2014-10-20 19:24:49 +0530478 ret = scm_call2(&scm_arg, &scm_ret);
479 parse_rsp.status = scm_ret.x1;
480 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700481 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
482 {
483 prev_len = parse_req.md_len;
484
485 parse_req.md_len *= MULTIPLICATION_FACTOR;
486
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530487 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700488 (parse_req.md_len - prev_len) );
489
490 continue;
491 }
492 else
493 break;
494
495 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800496
497 if(!ret)
498 {
499 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
500 {
501 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700502 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800503 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800504 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700505
506 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800507 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700508 else
509 {
510 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
511
512 ASSERT(ret == 0);
513 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800514
515 return ret;
516}
517
518int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
519{
520 int ret = 0;
521 uint32 ctx_id = 0;
522 ssd_decrypt_img_frag_req decrypt_req;
523 ssd_decrypt_img_frag_rsp decrypt_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530524 scmcall_arg scm_arg = {0};
525 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800526
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700527
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700528 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
529 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700530 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700531 case SSD_PMD_ENCRYPTED:
532 /* Image data is operated upon by TZ, which accesses only the main memory.
533 * It must be flushed/invalidated before and after TZ call.
534 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800535
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700536 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800537
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700538 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800539
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700540 decrypt_req.md_ctx_id = ctx_id;
541 decrypt_req.last_frag = 1;
542 decrypt_req.frag_len = *img_len_ptr;
543 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800544
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700545 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530546 {
547 ret = scm_call(SCM_SVC_SSD,
548 SSD_DECRYPT_IMG_FRAG_ID,
549 &decrypt_req,
550 sizeof(decrypt_req),
551 &decrypt_rsp,
552 sizeof(decrypt_rsp));
553 }
554 else
555 {
556 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_DECRYPT_IMG_FRAG_ID);
557 scm_arg.x1 = MAKE_SCM_ARGS(0x4,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
558 scm_arg.x2 = decrypt_req.md_ctx_id;
559 scm_arg.x3 = decrypt_req.last_frag;
560 scm_arg.x4 = decrypt_req.frag_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800561 scm_arg.x5[0] = (uint32_t) decrypt_req.frag;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800562
vijay kumare95092d2014-10-20 19:24:49 +0530563 ret = scm_call2(&scm_arg, &scm_ret);
564 decrypt_rsp.status = scm_ret.x1;
565 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700566 if(!ret){
567 ret = decrypt_rsp.status;
568 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700569
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700570 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
571 * before we use them.
572 */
573 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
574 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800575
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700576 /* Invalidate the updated image data */
577 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700578
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700579 break;
580
581 case SSD_PMD_NOT_ENCRYPTED:
582 case SSD_PMD_NO_MD_FOUND:
583 ret = 0;
584 break;
585
586 case SSD_PMD_BUSY:
587 case SSD_PMD_BAD_MD_PTR_OR_LEN:
588 case SSD_PMD_PARSING_INCOMPLETE:
589 case SSD_PMD_PARSING_FAILED:
590 case SSD_PMD_SETUP_CIPHER_FAILED:
591 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
592 break;
593
594 default:
595 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
596 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700597 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800598 return ret;
599}
600
601int scm_svc_version(uint32 * major, uint32 * minor)
602{
603 feature_version_req feature_req;
604 feature_version_rsp feature_rsp;
605 int ret = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700606 scmcall_arg scm_arg = {0};
607 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800608
609 feature_req.feature_id = TZBSP_FVER_SSD;
610
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700611 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700612 {
613 ret = scm_call(TZBSP_SVC_INFO,
614 TZ_INFO_GET_FEATURE_ID,
615 &feature_req,
616 sizeof(feature_req),
617 &feature_rsp,
618 sizeof(feature_rsp));
619 }
620 else
621 {
622 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
vijay kumare95092d2014-10-20 19:24:49 +0530623 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700624 scm_arg.x2 = feature_req.feature_id;
625
626 ret = scm_call2(&scm_arg, &scm_ret);
627 feature_rsp.version = scm_ret.x1;
628 }
629
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800630 if(!ret)
631 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
632
633 return ret;
634}
635
Amit Blaybdfabc62015-01-29 22:04:13 +0200636int scm_svc_get_secure_state(uint32_t *state_low, uint32_t *state_high)
637{
638 get_secure_state_req req;
639 get_secure_state_rsp rsp;
640
641 int ret = 0;
642
643 scmcall_arg scm_arg = {0};
644 scmcall_ret scm_ret = {0};
645
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700646 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200647 {
648 req.status_ptr = (uint32_t*)&rsp;
649 req.status_len = sizeof(rsp);
650
651 ret = scm_call(TZBSP_SVC_INFO,
652 TZ_INFO_GET_SECURE_STATE,
653 &req,
654 sizeof(req),
655 NULL,
656 0);
657 }
658 else
659 {
660 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_SECURE_STATE);
661 scm_arg.x1 = MAKE_SCM_ARGS(0x0);
662
663 ret = scm_call2(&scm_arg, &scm_ret);
664
665 rsp.status_low = scm_ret.x1;
666 rsp.status_high = scm_ret.x2;
667 }
668
669 if(!ret)
670 {
671 *state_low = rsp.status_low;
672 *state_high = rsp.status_high;
673 }
674
675 return ret;
676}
677
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800678int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
679{
680 int ret=0;
681 ssd_protect_keystore_req protect_req;
682 ssd_protect_keystore_rsp protect_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530683 scmcall_arg scm_arg = {0};
684 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700685
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800686 protect_req.keystore_ptr = img_ptr;
687 protect_req.keystore_len = img_len;
688
689 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
690
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700691 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530692 {
693 ret = scm_call(SCM_SVC_SSD,
694 SSD_PROTECT_KEYSTORE_ID,
695 &protect_req,
696 sizeof(protect_req),
697 &protect_rsp,
698 sizeof(protect_rsp));
699 }
700 else
701 {
702 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PROTECT_KEYSTORE_ID);
703 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800704 scm_arg.x2 = (uint32_t) protect_req.keystore_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530705 scm_arg.x3 = protect_req.keystore_len;
706
707 ret = scm_call2(&scm_arg, &scm_ret);
708 protect_rsp.status = scm_ret.x1;
709 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800710 if(!ret)
711 {
712 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
713 dprintf(INFO,"Successfully loaded the keystore ");
714 else
715 {
716 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
717 ret = protect_rsp.status;
718 }
719 }
720 else
721 dprintf(INFO,"scm_call failed ");
722
723 return ret;
724}
725
Shashank Mittal162244e2011-08-08 19:01:25 -0700726void set_tamper_fuse_cmd()
727{
728 uint32_t svc_id;
729 uint32_t cmd_id;
730 void *cmd_buf;
731 size_t cmd_len;
732 void *resp_buf = NULL;
733 size_t resp_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530734 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700735
736 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
737 cmd_buf = (void *)&fuse_id;
738 cmd_len = sizeof(fuse_id);
739
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700740 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700741 {
vijay kumare95092d2014-10-20 19:24:49 +0530742 /*no response */
743 resp_buf = NULL;
744 resp_len = 0;
745
746 svc_id = SCM_SVC_FUSE;
747 cmd_id = SCM_BLOW_SW_FUSE_ID;
748
749 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
750 }
751 else
752 {
753 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID);
754 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800755 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530756 scm_arg.x3 = cmd_len;
757
758 scm_call2(&scm_arg, NULL);
759
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700760 }
761
Shashank Mittal162244e2011-08-08 19:01:25 -0700762}
763
764uint8_t get_tamper_fuse_cmd()
765{
766 uint32_t svc_id;
767 uint32_t cmd_id;
768 void *cmd_buf;
769 size_t cmd_len;
770 size_t resp_len = 0;
771 uint8_t resp_buf;
772
773 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
vijay kumare95092d2014-10-20 19:24:49 +0530774 scmcall_arg scm_arg = {0};
775 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700776
Shashank Mittal162244e2011-08-08 19:01:25 -0700777 cmd_buf = (void *)&fuse_id;
778 cmd_len = sizeof(fuse_id);
779
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700780 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530781 {
782 /*response */
783 resp_len = sizeof(resp_buf);
Shashank Mittal162244e2011-08-08 19:01:25 -0700784
vijay kumare95092d2014-10-20 19:24:49 +0530785 svc_id = SCM_SVC_FUSE;
786 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
Shashank Mittal162244e2011-08-08 19:01:25 -0700787
vijay kumare95092d2014-10-20 19:24:49 +0530788 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
789 return resp_buf;
790 }
791 else
792 {
793 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID);
794 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800795 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530796 scm_arg.x3 = cmd_len;
797
798 scm_call2(&scm_arg, &scm_ret);
799 return (uint8_t)scm_ret.x1;
800 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700801}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800802
Amir Samuelov4620ad22013-03-13 11:30:05 +0200803/*
804 * struct qseecom_save_partition_hash_req
805 * @partition_id - partition id.
806 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
807 */
808struct qseecom_save_partition_hash_req {
809 uint32_t partition_id; /* in */
810 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
811};
812
813
814void save_kernel_hash_cmd(void *digest)
815{
816 uint32_t svc_id;
817 uint32_t cmd_id;
818 void *cmd_buf;
819 size_t cmd_len;
820 void *resp_buf = NULL;
821 size_t resp_len = 0;
822 struct qseecom_save_partition_hash_req req;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700823 scmcall_arg scm_arg = {0};
Amir Samuelov4620ad22013-03-13 11:30:05 +0200824
825 /*no response */
826 resp_buf = NULL;
827 resp_len = 0;
828
829 req.partition_id = 0; /* kernel */
830 memcpy(req.digest, digest, sizeof(req.digest));
831
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700832 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700833 {
834 svc_id = SCM_SVC_ES;
835 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
836 cmd_buf = (void *)&req;
837 cmd_len = sizeof(req);
Amir Samuelov4620ad22013-03-13 11:30:05 +0200838
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700839 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
840 }
841 else
842 {
843 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
844 scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
845 scm_arg.x2 = req.partition_id;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800846 scm_arg.x3 = (uint32_t) &req.digest;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700847 scm_arg.x4 = sizeof(req.digest);
848
849 if (scm_call2(&scm_arg, NULL))
850 dprintf(CRITICAL, "Failed to Save kernel hash\n");
851 }
Amir Samuelov4620ad22013-03-13 11:30:05 +0200852}
853
Amit Blayfe23ee22015-01-09 19:09:51 +0200854int mdtp_cipher_dip_cmd(uint8_t *in_buf, uint32_t in_buf_size, uint8_t *out_buf,
855 uint32_t out_buf_size, uint32_t direction)
856{
857 uint32_t svc_id;
858 uint32_t cmd_id;
859 void *cmd_buf;
860 void *rsp_buf;
861 size_t cmd_len;
862 size_t rsp_len;
863 mdtp_cipher_dip_req req;
864 scmcall_arg scm_arg = {0};
865 scmcall_ret scm_ret = {0};
866
867 ASSERT(in_buf != NULL);
868 ASSERT(out_buf != NULL);
869
870 req.in_buf = in_buf;
871 req.in_buf_size = in_buf_size;
872 req.out_buf = out_buf;
873 req.out_buf_size = out_buf_size;
874 req.direction = direction;
875
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700876 if (!is_scm_armv8_support())
Amit Blayfe23ee22015-01-09 19:09:51 +0200877 {
878 svc_id = SCM_SVC_MDTP;
879 cmd_id = SCM_MDTP_CIPHER_DIP;
880 cmd_buf = (void *)&req;
881 cmd_len = sizeof(req);
882 rsp_buf = NULL;
883 rsp_len = 0;
884
885 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
886 {
887 dprintf(CRITICAL, "Failed to call Cipher DIP SCM\n");
888 return -1;
889 }
890 }
891 else
892 {
893 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MDTP, SCM_MDTP_CIPHER_DIP);
894 scm_arg.x1 = MAKE_SCM_ARGS(0x5, SMC_PARAM_TYPE_BUFFER_READ, SMC_PARAM_TYPE_VALUE,
895 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE);
896 scm_arg.x2 = (uint32_t)req.in_buf;
897 scm_arg.x3 = req.in_buf_size;
898 scm_arg.x4 = (uint32_t)req.out_buf;
899 scm_arg.x5[0] = req.out_buf_size;
900 scm_arg.x5[1] = req.direction;
901
902 if (scm_call2(&scm_arg, &scm_ret))
903 {
904 dprintf(CRITICAL, "Failed in Cipher DIP SCM call\n");
905 return -1;
906 }
907 }
908
909 return 0;
910}
911
Amit Blaybdfabc62015-01-29 22:04:13 +0200912int qfprom_read_row_cmd(uint32_t row_address,
913 uint32_t addr_type,
914 uint32_t *row_data,
915 uint32_t *qfprom_api_status)
916{
917 uint32_t svc_id;
918 uint32_t cmd_id;
919 void *cmd_buf;
920 void *rsp_buf;
921 size_t cmd_len;
922 size_t rsp_len;
923 qfprom_read_row_req req;
924 scmcall_arg scm_arg = {0};
925 scmcall_ret scm_ret = {0};
926
927 req.row_address = row_address;
928 req.addr_type = addr_type;
929 req.row_data = row_data;
930 req.qfprom_api_status = qfprom_api_status;
931
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700932 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200933 {
934 svc_id = SCM_SVC_FUSE;
935 cmd_id = SCM_QFPROM_READ_ROW_ID;
936 cmd_buf = (void *)&req;
937 cmd_len = sizeof(req);
938 rsp_buf = NULL;
939 rsp_len = 0;
940
941 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
942 {
943 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
944 return -1;
945 }
946 }
947 else
948 {
949 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_QFPROM_READ_ROW_ID);
950 scm_arg.x1 = MAKE_SCM_ARGS(0x4, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE,
951 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_BUFFER_READWRITE);
952 scm_arg.x2 = req.row_address;
953 scm_arg.x3 = req.addr_type;
954 scm_arg.x4 = (uint32_t)req.row_data;
955 scm_arg.x5[0] = (uint32_t)req.qfprom_api_status;
956
957 if (scm_call2(&scm_arg, &scm_ret))
958 {
959 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
960 return -1;
961 }
962 }
963
964 return 0;
965}
966
Deepa Dinamani193874e2012-02-07 14:00:04 -0800967/*
968 * Switches the CE1 channel between ADM and register usage.
969 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
970 * : AP_CE_ADM_USE, CE1 uses ADM interface
971 */
972uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
973{
974 uint32_t svc_id;
975 uint32_t cmd_id;
976 void *cmd_buf;
977 size_t cmd_len;
978 size_t resp_len = 0;
979 uint8_t resp_buf;
980
981 struct {
982 uint32_t resource;
983 uint32_t chn_id;
984 }__PACKED switch_ce_chn_buf;
985
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700986 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700987 {
988 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
989 return 0;
990 }
991
Deepa Dinamani193874e2012-02-07 14:00:04 -0800992 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
993 switch_ce_chn_buf.chn_id = channel;
994 cmd_buf = (void *)&switch_ce_chn_buf;
995 cmd_len = sizeof(switch_ce_chn_buf);
996
997 /*response */
998 resp_len = sizeof(resp_buf);
999
1000 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
1001 cmd_id = SCM_CE_CHN_SWITCH_ID;
1002
1003 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
1004 return resp_buf;
1005}
1006
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001007int scm_halt_pmic_arbiter()
1008{
1009 int ret = 0;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301010 scmcall_arg scm_arg = {0};
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001011
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001012 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301013 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER);
1014 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1015 scm_arg.x2 = 0;
1016 scm_arg.atomic = true;
1017 ret = scm_call2(&scm_arg, NULL);
1018 } else {
1019 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001020 }
1021
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301022 /* Retry with the SCM_IO_DISABLE_PMIC_ARBITER1 func ID if the above Func ID fails*/
1023 if(ret) {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001024 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301025 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1);
1026 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1027 scm_arg.x2 = 0;
1028 scm_arg.atomic = true;
1029 ret = scm_call2(&scm_arg, NULL);
1030 } else
1031 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1, 0);
1032 }
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001033
1034 return ret;
1035}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001036
1037/* Execption Level exec secure-os call
1038 * Jumps to kernel via secure-os and does not return
1039 * on successful jump. System parameters are setup &
1040 * passed on to secure-os and are utilized to boot the
1041 * kernel.
1042 *
1043 @ kernel_entry : kernel entry point passed in as link register.
1044 @ dtb_offset : dt blob address passed in as w0.
1045 @ svc_id : indicates direction of switch 32->64 or 64->32
1046 *
1047 * Assumes all sanity checks have been performed on arguments.
1048 */
1049
1050void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
1051{
1052 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
1053 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
1054 void *cmd_buf;
1055 size_t cmd_len;
Sridhar Parasuramfc6ea712015-06-30 11:22:49 -07001056 static el1_system_param param __attribute__((aligned(0x1000)));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001057 scmcall_arg scm_arg = {0};
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001058
1059 param.el1_x0 = dtb_offset;
1060 param.el1_elr = kernel_entry;
1061
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001062 /* Response Buffer = Null as no response expected */
1063 dprintf(INFO, "Jumping to kernel via monitor\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001064
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001065 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001066 {
1067 /* Command Buffer */
1068 cmd_buf = (void *)&param;
1069 cmd_len = sizeof(el1_system_param);
1070
1071 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
1072 }
1073 else
1074 {
1075 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
1076 scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -08001077 scm_arg.x2 = (uint32_t ) &param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001078 scm_arg.x3 = sizeof(el1_system_param);
1079
1080 scm_call2(&scm_arg, NULL);
1081 }
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001082
1083 /* Assert if execution ever reaches here */
1084 dprintf(CRITICAL, "Failed to jump to kernel\n");
1085 ASSERT(0);
1086}
Maria Yubeeeeaf2014-06-30 13:05:43 +08001087
1088/* SCM Random Command */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001089int scm_random(uintptr_t * rbuf, uint32_t r_len)
Maria Yubeeeeaf2014-06-30 13:05:43 +08001090{
1091 int ret;
1092 struct tz_prng_data data;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001093 scmcall_arg scm_arg = {0};
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001094 // Memory passed to TZ should be algined to cache line
1095 BUF_DMA_ALIGN(rand_buf, sizeof(uintptr_t));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001096
Mayank Grover59f4a372017-01-27 18:02:51 +05301097 // r_len must be less than or equal to sizeof(rand_buf) to avoid memory corruption.
1098 if (r_len > sizeof(rand_buf))
1099 {
1100 dprintf(CRITICAL, "r_len is larger than sizeof(randbuf).");
1101 return -1;
1102 }
1103
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001104 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001105 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001106 data.out_buf = (uint8_t*) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001107 data.out_buf_size = r_len;
Maria Yubeeeeaf2014-06-30 13:05:43 +08001108
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001109 /*
1110 * random buffer must be flushed/invalidated before and after TZ call.
1111 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001112 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001113
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001114 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001115
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001116 /* Invalidate the updated random buffer */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001117 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001118 }
1119 else
1120 {
1121 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
1122 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001123 scm_arg.x2 = (uint32_t) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001124 scm_arg.x3 = r_len;
1125
Gaurav Nebhwani98db6cc2016-05-04 12:15:04 +05301126 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
1127
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001128 ret = scm_call2(&scm_arg, NULL);
1129 if (!ret)
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001130 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001131 else
1132 dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
1133 }
Maria Yubeeeeaf2014-06-30 13:05:43 +08001134
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001135 //Copy back into the return buffer
Parth Dixit2c009282016-11-01 16:06:21 +05301136 memscpy(rbuf, r_len, rand_buf, sizeof(rand_buf));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001137 return ret;
1138}
1139
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001140uintptr_t get_canary()
Maria Yubeeeeaf2014-06-30 13:05:43 +08001141{
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001142 uintptr_t canary;
1143 if(scm_random(&canary, sizeof(canary))) {
Maria Yubeeeeaf2014-06-30 13:05:43 +08001144 dprintf(CRITICAL,"scm_call for random failed !!!");
1145 /*
1146 * fall back to use lib rand API if scm call failed.
1147 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001148 canary = rand();
Maria Yubeeeeaf2014-06-30 13:05:43 +08001149 }
1150
1151 return canary;
1152}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301153
1154int scm_xpu_err_fatal_init()
1155{
1156 uint32_t ret = 0;
1157 uint32_t response = 0;
1158 tz_xpu_prot_cmd cmd;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001159 scmcall_arg scm_arg = {0};
1160 scmcall_ret scm_ret = {0};
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301161
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001162 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001163 {
1164 cmd.config = ERR_FATAL_ENABLE;
1165 cmd.spare = 0;
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301166
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001167 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
1168 sizeof(response));
1169 }
1170 else
1171 {
1172 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
1173 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1174 scm_arg.x2 = ERR_FATAL_ENABLE;
1175 scm_arg.x3 = 0x0;
1176 ret = scm_call2(&scm_arg, &scm_ret);
1177 response = scm_ret.x1;
1178 }
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301179
1180 if (ret)
1181 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
1182 else
1183 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
1184
1185 return ret;
1186}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001187
1188static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
1189{
1190 register uint32_t r0 __asm__("r0") = x0;
1191 register uint32_t r1 __asm__("r1") = x1;
1192 register uint32_t r2 __asm__("r2") = x2;
1193 register uint32_t r3 __asm__("r3") = x3;
1194 register uint32_t r4 __asm__("r4") = x4;
1195 register uint32_t r5 __asm__("r5") = x5;
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001196 register uint32_t r6 __asm__("r6") = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001197
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001198 do {
1199 __asm__ volatile(
1200 __asmeq("%0", "r0")
1201 __asmeq("%1", "r1")
1202 __asmeq("%2", "r2")
1203 __asmeq("%3", "r3")
1204 __asmeq("%4", "r0")
1205 __asmeq("%5", "r1")
1206 __asmeq("%6", "r2")
1207 __asmeq("%7", "r3")
1208 __asmeq("%8", "r4")
1209 __asmeq("%9", "r5")
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001210 __asmeq("%10", "r6")
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001211 "smc #0 @ switch to secure world\n"
1212 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001213 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6));
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001214 } while(r0 == 1);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001215
1216 if (ret)
1217 {
1218 ret->x1 = r1;
1219 ret->x2 = r2;
1220 ret->x3 = r3;
1221 }
1222
1223 return r0;
1224}
1225
1226uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
1227{
1228 uint32_t *indir_arg = NULL;
1229 uint32_t x5;
1230 int i;
1231 uint32_t rc;
1232
1233 arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
1234 x5 = arg->x5[0];
1235
Amit Blayfe23ee22015-01-09 19:09:51 +02001236 if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN - 1)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001237 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001238 indir_arg = memalign(CACHE_LINE, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001239 ASSERT(indir_arg);
1240
1241 for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
1242 {
1243 indir_arg[i] = arg->x5[i];
1244 }
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001245 arch_clean_invalidate_cache_range((addr_t) indir_arg, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001246 x5 = (addr_t) indir_arg;
1247 }
1248
1249 rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
1250
1251 if (rc)
1252 {
1253 dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
1254 return rc;
1255 }
1256
1257 if (indir_arg)
1258 free(indir_arg);
1259
1260 return 0;
1261}
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301262
Mayank Grover8bcdd972016-12-02 14:58:07 +05301263static bool secure_boot_enabled = false;
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301264static bool wdog_debug_fuse_disabled = true;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301265
1266void scm_check_boot_fuses()
1267{
1268 uint32_t ret = 0;
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301269 uint32_t *resp = NULL;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301270 scmcall_arg scm_arg = {0};
1271 scmcall_ret scm_ret = {0};
1272
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301273 resp = memalign(CACHE_LINE, (2 * sizeof(uint32_t)));
1274 ASSERT(resp);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001275 if (!is_scm_armv8_support()) {
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301276 ret = scm_call_atomic2(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED, (uint32_t)resp, 2 * sizeof(uint32_t));
1277 arch_clean_invalidate_cache_range((addr_t)resp, ROUNDUP((2*sizeof(uint32_t)), CACHE_LINE));
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301278 } else {
1279 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED);
1280 ret = scm_call2(&scm_arg, &scm_ret);
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301281 resp[0] = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301282 }
1283
Mayank Grover8bcdd972016-12-02 14:58:07 +05301284 if (!ret) {
1285 /* Check for secure device: Bit#0 = 0, Bit#1 = 0 Bit#2 = 0 , Bit#5 = 0 , Bit#6 = 1 */
1286 if (!CHECK_BIT(resp[0], SECBOOT_FUSE_BIT) && !CHECK_BIT(resp[0], SECBOOT_FUSE_SHK_BIT) &&
1287 !CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_DISABLED_BIT) &&
1288 !CHECK_BIT(resp[0], SECBOOT_FUSE_RPMB_ENABLED_BIT) &&
1289 CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_RE_ENABLED_BIT)) {
1290 secure_boot_enabled = true;
1291 }
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301292 /* Bit 2 - DEBUG_DISABLE_CHECK */
Mayank Grover8bcdd972016-12-02 14:58:07 +05301293 if (CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_DISABLED_BIT))
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301294 wdog_debug_fuse_disabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301295 } else
1296 dprintf(CRITICAL, "scm call to check secure boot fuses failed\n");
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301297 free(resp);
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301298}
1299
1300bool is_secure_boot_enable()
1301{
1302 scm_check_boot_fuses();
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301303 return secure_boot_enabled;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301304}
1305
1306static uint32_t scm_io_read(addr_t address)
1307{
1308 uint32_t ret;
1309 scmcall_arg scm_arg = {0};
1310 scmcall_ret scm_ret = {0};
1311
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001312 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301313 ret = scm_call_atomic(SCM_SVC_IO, SCM_IO_READ, address);
1314 } else {
1315 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_READ);
1316 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1317 scm_arg.x2 = address;
1318 scm_arg.atomic = true;
1319 ret = scm_call2(&scm_arg, &scm_ret);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001320 /* Return the value read if the call is successful */
1321 if (!ret)
1322 ret = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301323 }
1324 return ret;
1325}
1326
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301327uint32_t scm_io_write(uint32_t address, uint32_t val)
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301328{
1329 uint32_t ret;
1330 scmcall_arg scm_arg = {0};
1331 scmcall_ret scm_ret = {0};
1332
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001333 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301334 ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
1335 } else {
1336 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_WRITE);
1337 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1338 scm_arg.x2 = address;
1339 scm_arg.x3 = val;
1340 scm_arg.atomic = true;
1341 ret = scm_call2(&scm_arg, &scm_ret);
1342 }
1343 return ret;
1344}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301345
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301346int scm_call2_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301347{
1348 uint32_t ret = 0;
1349 scmcall_arg scm_arg = {0};
1350 scmcall_ret scm_ret = {0};
1351
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001352 if (!is_scm_armv8_support())
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301353 {
1354 ret = scm_call_atomic2(svc, cmd, arg1, arg2);
1355 } else {
1356 scm_arg.x0 = MAKE_SIP_SCM_CMD(svc, cmd);
1357 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1358 scm_arg.x2 = arg1;
1359 scm_arg.x3 = arg2;
1360 ret = scm_call2(&scm_arg, &scm_ret);
1361 }
1362 return ret;
1363}
1364
lijuang1cff8382016-01-11 17:56:54 +08001365int scm_disable_sdi()
1366{
1367 int ret = 0;
1368
1369 scm_check_boot_fuses();
1370
1371 /* Make WDOG_DEBUG DISABLE scm call only in non-secure boot */
1372 if(!(secure_boot_enabled || wdog_debug_fuse_disabled)) {
1373 ret = scm_call2_atomic(SCM_SVC_BOOT, WDOG_DEBUG_DISABLE, 1, 0);
1374 if(ret)
1375 dprintf(CRITICAL, "Failed to disable secure wdog debug: %d\n", ret);
1376 }
1377 return ret;
1378}
1379
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301380#if PLATFORM_USE_SCM_DLOAD
lijuang395b5e62015-11-19 17:39:44 +08001381int scm_dload_mode(enum reboot_reason mode)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301382{
1383 int ret = 0;
1384 uint32_t dload_type;
1385
1386 dprintf(SPEW, "DLOAD mode: %d\n", mode);
lijuang1cff8382016-01-11 17:56:54 +08001387 if (mode == NORMAL_DLOAD) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301388 dload_type = SCM_DLOAD_MODE;
lijuang1cff8382016-01-11 17:56:54 +08001389#if DISABLE_DLOAD_MODE
1390 return 0;
1391#endif
1392 } else if(mode == EMERGENCY_DLOAD)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301393 dload_type = SCM_EDLOAD_MODE;
1394 else
1395 dload_type = 0;
1396
1397 /* Write to the Boot MISC register */
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001398 ret = is_scm_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301399
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001400 if (ret > 0)
1401 ret = scm_call2_atomic(SCM_SVC_BOOT, SCM_DLOAD_CMD, dload_type, 0);
1402 else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301403 ret = scm_io_write(TCSR_BOOT_MISC_DETECT,dload_type);
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001404
1405 if(ret) {
1406 dprintf(CRITICAL, "Failed to write to boot misc: %d\n", ret);
1407 return ret;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301408 }
1409
lijuang1cff8382016-01-11 17:56:54 +08001410#if !DISABLE_DLOAD_MODE
1411 return scm_disable_sdi();
1412#else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301413 return ret;
lijuang1cff8382016-01-11 17:56:54 +08001414#endif
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301415}
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001416
1417bool scm_device_enter_dload()
1418{
1419 uint32_t ret = 0;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001420 uint32_t dload_mode = 0;
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001421
1422 scmcall_arg scm_arg = {0};
1423 scmcall_ret scm_ret = {0};
1424
1425 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_DLOAD_MODE, SCM_DLOAD_CMD);
1426 ret = scm_call2(&scm_arg, &scm_ret);
1427 if (ret)
1428 dprintf(CRITICAL, "SCM call to check dload mode failed: %x\n", ret);
1429
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001430 if (!ret)
1431 {
1432 dload_mode = scm_io_read(TCSR_BOOT_MISC_DETECT);
1433 if (board_soc_version() < 0x30000)
1434 dload_mode = (dload_mode >> 16) & 0xFFFF;
1435 }
1436
1437 if (dload_mode == SCM_DLOAD_MODE)
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001438 return true;
1439
1440 return false;
1441}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301442#endif