blob: 139c2f78bd00349b3078b15062c885603e011dbd [file] [log] [blame]
Mayank Grover46c6e072018-02-23 18:53:00 +05301/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
vijay kumar4f4405f2014-08-08 11:49:53 +053035#include <rand.h>
36#include <image_verify.h>
Aparna Mallavarapu664ea772015-02-24 18:44:33 +053037#include <dload_util.h>
38#include <platform/iomap.h>
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070039#include <board.h>
Monika Singh40986b32018-03-14 00:50:33 +053040#include <qseecomi_lk.h>
41#include <qseecom_lk_api.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070042#include "scm.h"
43
44#pragma GCC optimize ("O0")
45
46/* From Linux Kernel asm/system.h */
47#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
48
49#ifndef offsetof
50# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
51#endif
52
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080053#define SCM_CLASS_REGISTER (0x2 << 8)
54#define SCM_MASK_IRQS BIT(5)
55#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
56 SCM_CLASS_REGISTER | \
57 SCM_MASK_IRQS | \
58 ((n) & 0xf))
59
Mayank Grover8bcdd972016-12-02 14:58:07 +053060#define SECBOOT_FUSE_BIT 0
61#define SECBOOT_FUSE_SHK_BIT 1
62#define SECBOOT_FUSE_DEBUG_DISABLED_BIT 2
63#define SECBOOT_FUSE_ANTI_ROLLBACK_BIT 3
64#define SECBOOT_FUSE_FEC_ENABLED_BIT 4
65#define SECBOOT_FUSE_RPMB_ENABLED_BIT 5
66#define SECBOOT_FUSE_DEBUG_RE_ENABLED_BIT 6
67#define CHECK_BIT(var, pos) ((var) & (1 << (pos)))
68
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070069/* SCM interface as per ARM spec present? */
70bool scm_arm_support;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070071static bool scm_initialized;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070072
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080073bool is_scm_armv8_support()
74{
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070075 if (!scm_initialized)
76 {
77 scm_init();
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070078 }
79
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080080 return scm_arm_support;
81}
82
Channagoud Kadabi77f46a32015-08-05 16:13:13 -070083int is_scm_call_available(uint32_t svc_id, uint32_t cmd_id)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070084{
vijay kumar496a2ff2015-07-22 21:22:48 +053085 int ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070086 scmcall_arg scm_arg = {0};
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -080087 scmcall_ret scm_ret = {0};
Channagoud Kadabia2184b82015-07-07 10:09:32 -070088
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070089 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
90 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
91 scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
92
93 ret = scm_call2(&scm_arg, &scm_ret);
94
95 if (!ret)
Channagoud Kadabia2184b82015-07-07 10:09:32 -070096 return scm_ret.x1;
97
98 return ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070099}
100
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700101static int scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
102{
vijay kumar496a2ff2015-07-22 21:22:48 +0530103 int ret;
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700104
105 ret = is_scm_call_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
106
107 if (ret > 0)
108 scm_arm_support = true;
109
110 return ret;
111}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700112
113void scm_init()
114{
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700115 int ret;
116
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700117 if (scm_initialized)
118 return;
119
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700120 ret = scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
121
vijay kumar496a2ff2015-07-22 21:22:48 +0530122 if (ret < 0)
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700123 dprintf(CRITICAL, "Failed to initialize SCM\n");
lijuang1cff8382016-01-11 17:56:54 +0800124
125 scm_initialized = true;
126
127#if DISABLE_DLOAD_MODE
128 scm_disable_sdi();
129#endif
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700130}
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800131
Shashank Mittal162244e2011-08-08 19:01:25 -0700132/**
133 * alloc_scm_command() - Allocate an SCM command
134 * @cmd_size: size of the command buffer
135 * @resp_size: size of the response buffer
136 *
137 * Allocate an SCM command, including enough room for the command
138 * and response headers as well as the command and response buffers.
139 *
140 * Returns a valid &scm_command on success or %NULL if the allocation fails.
141 */
142static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
143{
144 struct scm_command *cmd;
145 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -0800146 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -0700147
Deepa Dinamani904f8f82012-12-05 16:35:01 -0800148 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -0800149 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +0200150 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700151 cmd->len = len;
152 cmd->buf_offset = offsetof(struct scm_command, buf);
153 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
154 }
155 return cmd;
156}
157
158/**
159 * free_scm_command() - Free an SCM command
160 * @cmd: command to free
161 *
162 * Free an SCM command.
163 */
164static inline void free_scm_command(struct scm_command *cmd)
165{
166 free(cmd);
167}
168
169/**
170 * scm_command_to_response() - Get a pointer to a scm_response
171 * @cmd: command
172 *
173 * Returns a pointer to a response for a command.
174 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800175static inline struct scm_response *scm_command_to_response(const struct
176 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -0700177{
178 return (void *)cmd + cmd->resp_hdr_offset;
179}
180
181/**
182 * scm_get_command_buffer() - Get a pointer to a command buffer
183 * @cmd: command
184 *
185 * Returns a pointer to the command buffer of a command.
186 */
187static inline void *scm_get_command_buffer(const struct scm_command *cmd)
188{
189 return (void *)cmd->buf;
190}
191
192/**
193 * scm_get_response_buffer() - Get a pointer to a response buffer
194 * @rsp: response
195 *
196 * Returns a pointer to a response buffer of a response.
197 */
198static inline void *scm_get_response_buffer(const struct scm_response *rsp)
199{
200 return (void *)rsp + rsp->buf_offset;
201}
202
203static uint32_t smc(uint32_t cmd_addr)
204{
205 uint32_t context_id;
206 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800207 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700208 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800209 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700210 return r0;
211}
212
213/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800214* scm_call_automic: Make scm call with one or no argument
215* @svc: service id
216* @cmd: command id
217* @ arg1: argument
218*/
219
220static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
221{
222 uint32_t context_id;
223 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
vijay kumar4f4405f2014-08-08 11:49:53 +0530224 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800225 register uint32_t r2 __asm__("r2") = arg1;
226
227 __asm__ volatile(
228 __asmeq("%0", "r0")
229 __asmeq("%1", "r0")
230 __asmeq("%2", "r1")
231 __asmeq("%3", "r2")
232 "smc #0 @ switch to secure world\n"
233 : "=r" (r0)
234 : "r" (r0), "r" (r1), "r" (r2)
235 : "r3");
236 return r0;
237}
238
239/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530240 * scm_call_atomic2() - Send an atomic SCM command with two arguments
241 * @svc_id: service identifier
242 * @cmd_id: command identifier
243 * @arg1: first argument
244 * @arg2: second argument
245 *
246 * This shall only be used with commands that are guaranteed to be
247 * uninterruptable, atomic and SMP safe.
248 */
249int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
250{
251 int context_id;
252 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
vijay kumar4f4405f2014-08-08 11:49:53 +0530253 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530254 register uint32_t r2 __asm__("r2") = arg1;
255 register uint32_t r3 __asm__("r3") = arg2;
256
257 __asm__ volatile(
258 __asmeq("%0", "r0")
259 __asmeq("%1", "r0")
260 __asmeq("%2", "r1")
261 __asmeq("%3", "r2")
262 __asmeq("%4", "r3")
263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
266 return r0;
267}
268
269/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700270 * scm_call() - Send an SCM command
271 * @svc_id: service identifier
272 * @cmd_id: command identifier
273 * @cmd_buf: command buffer
274 * @cmd_len: length of the command buffer
275 * @resp_buf: response buffer
276 * @resp_len: length of the response buffer
277 *
278 * Sends a command to the SCM and waits for the command to finish processing.
279 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800280int
281scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
282 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700283{
284 int ret;
285 struct scm_command *cmd;
286 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700287 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700288
289 cmd = alloc_scm_command(cmd_len, resp_len);
290 if (!cmd)
291 return ERR_NO_MEMORY;
292
293 cmd->id = (svc_id << 10) | cmd_id;
294 if (cmd_buf)
295 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
296
Neeti Desai127b9e02012-03-20 16:11:23 -0700297 /* Flush command to main memory for TZ */
298 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
299
Ajay Dudanib01e5062011-12-03 23:23:42 -0800300 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700301 if (ret)
302 goto out;
303
Ajay Dudanib01e5062011-12-03 23:23:42 -0800304 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700305 rsp = scm_command_to_response(cmd);
306
Neeti Desai127b9e02012-03-20 16:11:23 -0700307 do
308 {
309 /* Need to invalidate before each check since TZ will update
310 * the response complete flag in main memory.
311 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800312 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700313 } while (!rsp->is_complete);
314
315
316 resp_ptr = scm_get_response_buffer(rsp);
317
318 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800319 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700320
321 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700322 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700323 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800324 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700325 free_scm_command(cmd);
326 return ret;
327}
328
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800329int restore_secure_cfg(uint32_t id)
330{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700331 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800332 tz_secure_cfg secure_cfg;
333
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800334 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800335 secure_cfg.spare = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700336 scmcall_arg scm_arg = {0};
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800337
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700338 if(!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700339 {
340 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
341 NULL, 0);
342 }
343 else
344 {
345 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
346 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
347 scm_arg.x2 = id;
348 scm_arg.x3 = 0x0; /* Spare unused */
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800349
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700350 ret = scm_call2(&scm_arg, NULL);
351 }
352
353 if (ret)
354 {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800355 dprintf(CRITICAL, "Secure Config failed\n");
356 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700357 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800358
359 return ret;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800360}
361
Neeti Desai127b9e02012-03-20 16:11:23 -0700362/* SCM Encrypt Command */
363int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700364{
Neeti Desai127b9e02012-03-20 16:11:23 -0700365 int ret;
366 img_req cmd;
vijay kumare95092d2014-10-20 19:24:49 +0530367 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700368
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700369
Neeti Desai127b9e02012-03-20 16:11:23 -0700370 cmd.img_ptr = (uint32*) img_ptr;
371 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700372
Neeti Desai127b9e02012-03-20 16:11:23 -0700373 /* Image data is operated upon by TZ, which accesses only the main memory.
374 * It must be flushed/invalidated before and after TZ call.
375 */
376 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700377
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700378 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530379 {
380 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
381 }
382 else
383 {
384 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD,SSD_ENCRYPT_ID);
vijay kumar83b50d62015-01-09 19:09:59 +0530385 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800386 scm_arg.x2 = (uint32_t) cmd.img_ptr;
387 scm_arg.x3 = (uint32_t) cmd.img_len_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530388
389 ret = scm_call2(&scm_arg, NULL);
390 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700391
Neeti Desai127b9e02012-03-20 16:11:23 -0700392 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
393 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800394 */
395 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700396 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800397
Neeti Desai127b9e02012-03-20 16:11:23 -0700398 /* Invalidate the updated image data */
399 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800400
Shashank Mittal162244e2011-08-08 19:01:25 -0700401 return ret;
402}
403
Neeti Desai127b9e02012-03-20 16:11:23 -0700404/* SCM Decrypt Command */
405int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
406{
407 int ret;
408 img_req cmd;
409
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700410 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700411 {
412 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
413 return -1;
414 }
415
Neeti Desai127b9e02012-03-20 16:11:23 -0700416 cmd.img_ptr = (uint32*) img_ptr;
417 cmd.img_len_ptr = img_len_ptr;
418
419 /* Image data is operated upon by TZ, which accesses only the main memory.
420 * It must be flushed/invalidated before and after TZ call.
421 */
422 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
423
424 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
425
426 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
427 * before we use them.
428 */
429 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
430 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
431
432 /* Invalidate the updated image data */
433 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
434
435 return ret;
436}
437
438
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800439static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
440{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700441 int ret = 0;
Mayank Grover46c6e072018-02-23 18:53:00 +0530442 ssd_parse_md_req parse_req = {0};
443 ssd_parse_md_rsp parse_rsp = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700444 int prev_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530445 scmcall_arg scm_arg = {0};
446 scmcall_ret scm_ret = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700447 /* Populate meta-data ptr. Here md_len is the meta-data length.
448 * The Code below follows a growing length approach. First send
449 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
450 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
451 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
452 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
453
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800454 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700455 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800456
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700457 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800458
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700459 do
460 {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700461 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530462 {
463 ret = scm_call(SCM_SVC_SSD,
464 SSD_PARSE_MD_ID,
465 &parse_req,
466 sizeof(parse_req),
467 &parse_rsp,
468 sizeof(parse_rsp));
469 }
470 else
471 {
472 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PARSE_MD_ID);
473 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
474 scm_arg.x2 = parse_req.md_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800475 scm_arg.x3 = (uint32_t) parse_req.md;
Aparna Mallavarapud83990a2014-12-24 12:54:35 +0530476 scm_arg.atomic = true;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700477
vijay kumare95092d2014-10-20 19:24:49 +0530478 ret = scm_call2(&scm_arg, &scm_ret);
479 parse_rsp.status = scm_ret.x1;
480 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700481 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
482 {
483 prev_len = parse_req.md_len;
484
485 parse_req.md_len *= MULTIPLICATION_FACTOR;
486
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530487 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700488 (parse_req.md_len - prev_len) );
489
490 continue;
491 }
492 else
493 break;
494
495 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800496
497 if(!ret)
498 {
499 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
500 {
501 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700502 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800503 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800504 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700505
506 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800507 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700508 else
509 {
510 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
511
512 ASSERT(ret == 0);
513 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800514
515 return ret;
516}
517
518int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
519{
520 int ret = 0;
521 uint32 ctx_id = 0;
522 ssd_decrypt_img_frag_req decrypt_req;
523 ssd_decrypt_img_frag_rsp decrypt_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530524 scmcall_arg scm_arg = {0};
525 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800526
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700527
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700528 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
529 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700530 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700531 case SSD_PMD_ENCRYPTED:
532 /* Image data is operated upon by TZ, which accesses only the main memory.
533 * It must be flushed/invalidated before and after TZ call.
534 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800535
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700536 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800537
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700538 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800539
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700540 decrypt_req.md_ctx_id = ctx_id;
541 decrypt_req.last_frag = 1;
542 decrypt_req.frag_len = *img_len_ptr;
543 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800544
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700545 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530546 {
547 ret = scm_call(SCM_SVC_SSD,
548 SSD_DECRYPT_IMG_FRAG_ID,
549 &decrypt_req,
550 sizeof(decrypt_req),
551 &decrypt_rsp,
552 sizeof(decrypt_rsp));
553 }
554 else
555 {
556 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_DECRYPT_IMG_FRAG_ID);
557 scm_arg.x1 = MAKE_SCM_ARGS(0x4,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
558 scm_arg.x2 = decrypt_req.md_ctx_id;
559 scm_arg.x3 = decrypt_req.last_frag;
560 scm_arg.x4 = decrypt_req.frag_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800561 scm_arg.x5[0] = (uint32_t) decrypt_req.frag;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800562
vijay kumare95092d2014-10-20 19:24:49 +0530563 ret = scm_call2(&scm_arg, &scm_ret);
564 decrypt_rsp.status = scm_ret.x1;
565 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700566 if(!ret){
567 ret = decrypt_rsp.status;
568 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700569
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700570 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
571 * before we use them.
572 */
573 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
574 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800575
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700576 /* Invalidate the updated image data */
577 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700578
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700579 break;
580
581 case SSD_PMD_NOT_ENCRYPTED:
582 case SSD_PMD_NO_MD_FOUND:
583 ret = 0;
584 break;
585
586 case SSD_PMD_BUSY:
587 case SSD_PMD_BAD_MD_PTR_OR_LEN:
588 case SSD_PMD_PARSING_INCOMPLETE:
589 case SSD_PMD_PARSING_FAILED:
590 case SSD_PMD_SETUP_CIPHER_FAILED:
591 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
592 break;
593
594 default:
595 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
596 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700597 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800598 return ret;
599}
600
601int scm_svc_version(uint32 * major, uint32 * minor)
602{
603 feature_version_req feature_req;
604 feature_version_rsp feature_rsp;
605 int ret = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700606 scmcall_arg scm_arg = {0};
607 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800608
609 feature_req.feature_id = TZBSP_FVER_SSD;
610
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700611 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700612 {
613 ret = scm_call(TZBSP_SVC_INFO,
614 TZ_INFO_GET_FEATURE_ID,
615 &feature_req,
616 sizeof(feature_req),
617 &feature_rsp,
618 sizeof(feature_rsp));
619 }
620 else
621 {
622 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
vijay kumare95092d2014-10-20 19:24:49 +0530623 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700624 scm_arg.x2 = feature_req.feature_id;
625
626 ret = scm_call2(&scm_arg, &scm_ret);
627 feature_rsp.version = scm_ret.x1;
628 }
629
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800630 if(!ret)
631 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
632
633 return ret;
634}
635
Amit Blaybdfabc62015-01-29 22:04:13 +0200636int scm_svc_get_secure_state(uint32_t *state_low, uint32_t *state_high)
637{
638 get_secure_state_req req;
639 get_secure_state_rsp rsp;
640
641 int ret = 0;
642
643 scmcall_arg scm_arg = {0};
644 scmcall_ret scm_ret = {0};
645
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700646 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200647 {
648 req.status_ptr = (uint32_t*)&rsp;
649 req.status_len = sizeof(rsp);
650
651 ret = scm_call(TZBSP_SVC_INFO,
652 TZ_INFO_GET_SECURE_STATE,
653 &req,
654 sizeof(req),
655 NULL,
656 0);
657 }
658 else
659 {
660 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_SECURE_STATE);
661 scm_arg.x1 = MAKE_SCM_ARGS(0x0);
662
663 ret = scm_call2(&scm_arg, &scm_ret);
664
665 rsp.status_low = scm_ret.x1;
666 rsp.status_high = scm_ret.x2;
667 }
668
669 if(!ret)
670 {
671 *state_low = rsp.status_low;
672 *state_high = rsp.status_high;
673 }
674
675 return ret;
676}
677
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800678int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
679{
680 int ret=0;
681 ssd_protect_keystore_req protect_req;
682 ssd_protect_keystore_rsp protect_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530683 scmcall_arg scm_arg = {0};
684 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700685
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800686 protect_req.keystore_ptr = img_ptr;
687 protect_req.keystore_len = img_len;
688
689 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
690
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700691 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530692 {
693 ret = scm_call(SCM_SVC_SSD,
694 SSD_PROTECT_KEYSTORE_ID,
695 &protect_req,
696 sizeof(protect_req),
697 &protect_rsp,
698 sizeof(protect_rsp));
699 }
700 else
701 {
702 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PROTECT_KEYSTORE_ID);
703 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800704 scm_arg.x2 = (uint32_t) protect_req.keystore_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530705 scm_arg.x3 = protect_req.keystore_len;
706
707 ret = scm_call2(&scm_arg, &scm_ret);
708 protect_rsp.status = scm_ret.x1;
709 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800710 if(!ret)
711 {
712 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
713 dprintf(INFO,"Successfully loaded the keystore ");
714 else
715 {
716 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
717 ret = protect_rsp.status;
718 }
719 }
720 else
721 dprintf(INFO,"scm_call failed ");
722
723 return ret;
724}
725
Monika Singh98257462018-06-06 11:28:49 +0530726uint32_t set_tamper_fuse_cmd(uint32_t fuse_id)
Shashank Mittal162244e2011-08-08 19:01:25 -0700727{
728 uint32_t svc_id;
729 uint32_t cmd_id;
730 void *cmd_buf;
731 size_t cmd_len;
732 void *resp_buf = NULL;
733 size_t resp_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530734 scmcall_arg scm_arg = {0};
Monika Singh98257462018-06-06 11:28:49 +0530735 int ret;
Shashank Mittal162244e2011-08-08 19:01:25 -0700736
Shashank Mittal162244e2011-08-08 19:01:25 -0700737 cmd_buf = (void *)&fuse_id;
738 cmd_len = sizeof(fuse_id);
739
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700740 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700741 {
vijay kumare95092d2014-10-20 19:24:49 +0530742 /*no response */
743 resp_buf = NULL;
744 resp_len = 0;
745
746 svc_id = SCM_SVC_FUSE;
747 cmd_id = SCM_BLOW_SW_FUSE_ID;
748
Monika Singh98257462018-06-06 11:28:49 +0530749 ret=scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
vijay kumare95092d2014-10-20 19:24:49 +0530750 }
751 else
752 {
753 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID);
Monika Singh98257462018-06-06 11:28:49 +0530754 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
755 scm_arg.x2 = fuse_id;
vijay kumare95092d2014-10-20 19:24:49 +0530756 scm_arg.x3 = cmd_len;
Monika Singh98257462018-06-06 11:28:49 +0530757 ret=scm_call2(&scm_arg, NULL);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700758 }
759
Monika Singh98257462018-06-06 11:28:49 +0530760 return ret;
Shashank Mittal162244e2011-08-08 19:01:25 -0700761}
762
763uint8_t get_tamper_fuse_cmd()
764{
765 uint32_t svc_id;
766 uint32_t cmd_id;
767 void *cmd_buf;
768 size_t cmd_len;
769 size_t resp_len = 0;
770 uint8_t resp_buf;
771
772 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
vijay kumare95092d2014-10-20 19:24:49 +0530773 scmcall_arg scm_arg = {0};
774 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700775
Shashank Mittal162244e2011-08-08 19:01:25 -0700776 cmd_buf = (void *)&fuse_id;
777 cmd_len = sizeof(fuse_id);
778
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700779 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530780 {
781 /*response */
782 resp_len = sizeof(resp_buf);
Shashank Mittal162244e2011-08-08 19:01:25 -0700783
vijay kumare95092d2014-10-20 19:24:49 +0530784 svc_id = SCM_SVC_FUSE;
785 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
Shashank Mittal162244e2011-08-08 19:01:25 -0700786
vijay kumare95092d2014-10-20 19:24:49 +0530787 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
788 return resp_buf;
789 }
790 else
791 {
792 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID);
793 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800794 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530795 scm_arg.x3 = cmd_len;
796
797 scm_call2(&scm_arg, &scm_ret);
798 return (uint8_t)scm_ret.x1;
799 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700800}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800801
Amir Samuelov4620ad22013-03-13 11:30:05 +0200802/*
803 * struct qseecom_save_partition_hash_req
804 * @partition_id - partition id.
805 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
806 */
807struct qseecom_save_partition_hash_req {
808 uint32_t partition_id; /* in */
809 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
810};
811
812
813void save_kernel_hash_cmd(void *digest)
814{
815 uint32_t svc_id;
816 uint32_t cmd_id;
817 void *cmd_buf;
818 size_t cmd_len;
819 void *resp_buf = NULL;
820 size_t resp_len = 0;
821 struct qseecom_save_partition_hash_req req;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700822 scmcall_arg scm_arg = {0};
Amir Samuelov4620ad22013-03-13 11:30:05 +0200823
824 /*no response */
825 resp_buf = NULL;
826 resp_len = 0;
827
828 req.partition_id = 0; /* kernel */
829 memcpy(req.digest, digest, sizeof(req.digest));
830
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700831 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700832 {
833 svc_id = SCM_SVC_ES;
834 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
835 cmd_buf = (void *)&req;
836 cmd_len = sizeof(req);
Amir Samuelov4620ad22013-03-13 11:30:05 +0200837
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700838 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
839 }
840 else
841 {
842 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
843 scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
844 scm_arg.x2 = req.partition_id;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800845 scm_arg.x3 = (uint32_t) &req.digest;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700846 scm_arg.x4 = sizeof(req.digest);
847
848 if (scm_call2(&scm_arg, NULL))
849 dprintf(CRITICAL, "Failed to Save kernel hash\n");
850 }
Amir Samuelov4620ad22013-03-13 11:30:05 +0200851}
852
Amit Blayfe23ee22015-01-09 19:09:51 +0200853int mdtp_cipher_dip_cmd(uint8_t *in_buf, uint32_t in_buf_size, uint8_t *out_buf,
854 uint32_t out_buf_size, uint32_t direction)
855{
856 uint32_t svc_id;
857 uint32_t cmd_id;
858 void *cmd_buf;
859 void *rsp_buf;
860 size_t cmd_len;
861 size_t rsp_len;
862 mdtp_cipher_dip_req req;
863 scmcall_arg scm_arg = {0};
864 scmcall_ret scm_ret = {0};
865
866 ASSERT(in_buf != NULL);
867 ASSERT(out_buf != NULL);
868
869 req.in_buf = in_buf;
870 req.in_buf_size = in_buf_size;
871 req.out_buf = out_buf;
872 req.out_buf_size = out_buf_size;
873 req.direction = direction;
874
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700875 if (!is_scm_armv8_support())
Amit Blayfe23ee22015-01-09 19:09:51 +0200876 {
877 svc_id = SCM_SVC_MDTP;
878 cmd_id = SCM_MDTP_CIPHER_DIP;
879 cmd_buf = (void *)&req;
880 cmd_len = sizeof(req);
881 rsp_buf = NULL;
882 rsp_len = 0;
883
884 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
885 {
886 dprintf(CRITICAL, "Failed to call Cipher DIP SCM\n");
887 return -1;
888 }
889 }
890 else
891 {
892 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MDTP, SCM_MDTP_CIPHER_DIP);
893 scm_arg.x1 = MAKE_SCM_ARGS(0x5, SMC_PARAM_TYPE_BUFFER_READ, SMC_PARAM_TYPE_VALUE,
894 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE);
895 scm_arg.x2 = (uint32_t)req.in_buf;
896 scm_arg.x3 = req.in_buf_size;
897 scm_arg.x4 = (uint32_t)req.out_buf;
898 scm_arg.x5[0] = req.out_buf_size;
899 scm_arg.x5[1] = req.direction;
900
901 if (scm_call2(&scm_arg, &scm_ret))
902 {
903 dprintf(CRITICAL, "Failed in Cipher DIP SCM call\n");
904 return -1;
905 }
906 }
907
908 return 0;
909}
910
Amit Blaybdfabc62015-01-29 22:04:13 +0200911int qfprom_read_row_cmd(uint32_t row_address,
912 uint32_t addr_type,
913 uint32_t *row_data,
914 uint32_t *qfprom_api_status)
915{
916 uint32_t svc_id;
917 uint32_t cmd_id;
918 void *cmd_buf;
919 void *rsp_buf;
920 size_t cmd_len;
921 size_t rsp_len;
922 qfprom_read_row_req req;
923 scmcall_arg scm_arg = {0};
924 scmcall_ret scm_ret = {0};
925
926 req.row_address = row_address;
927 req.addr_type = addr_type;
928 req.row_data = row_data;
929 req.qfprom_api_status = qfprom_api_status;
930
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700931 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200932 {
933 svc_id = SCM_SVC_FUSE;
934 cmd_id = SCM_QFPROM_READ_ROW_ID;
935 cmd_buf = (void *)&req;
936 cmd_len = sizeof(req);
937 rsp_buf = NULL;
938 rsp_len = 0;
939
940 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
941 {
942 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
943 return -1;
944 }
945 }
946 else
947 {
948 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_QFPROM_READ_ROW_ID);
949 scm_arg.x1 = MAKE_SCM_ARGS(0x4, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE,
950 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_BUFFER_READWRITE);
951 scm_arg.x2 = req.row_address;
952 scm_arg.x3 = req.addr_type;
953 scm_arg.x4 = (uint32_t)req.row_data;
954 scm_arg.x5[0] = (uint32_t)req.qfprom_api_status;
955
956 if (scm_call2(&scm_arg, &scm_ret))
957 {
958 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
959 return -1;
960 }
961 }
962
963 return 0;
964}
965
Deepa Dinamani193874e2012-02-07 14:00:04 -0800966/*
967 * Switches the CE1 channel between ADM and register usage.
968 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
969 * : AP_CE_ADM_USE, CE1 uses ADM interface
970 */
971uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
972{
973 uint32_t svc_id;
974 uint32_t cmd_id;
975 void *cmd_buf;
976 size_t cmd_len;
977 size_t resp_len = 0;
978 uint8_t resp_buf;
979
980 struct {
981 uint32_t resource;
982 uint32_t chn_id;
983 }__PACKED switch_ce_chn_buf;
984
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700985 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700986 {
987 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
988 return 0;
989 }
990
Deepa Dinamani193874e2012-02-07 14:00:04 -0800991 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
992 switch_ce_chn_buf.chn_id = channel;
993 cmd_buf = (void *)&switch_ce_chn_buf;
994 cmd_len = sizeof(switch_ce_chn_buf);
995
996 /*response */
997 resp_len = sizeof(resp_buf);
998
999 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
1000 cmd_id = SCM_CE_CHN_SWITCH_ID;
1001
1002 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
1003 return resp_buf;
1004}
1005
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001006int scm_halt_pmic_arbiter()
1007{
1008 int ret = 0;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301009 scmcall_arg scm_arg = {0};
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001010
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001011 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301012 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER);
1013 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1014 scm_arg.x2 = 0;
1015 scm_arg.atomic = true;
1016 ret = scm_call2(&scm_arg, NULL);
1017 } else {
1018 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001019 }
1020
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301021 /* Retry with the SCM_IO_DISABLE_PMIC_ARBITER1 func ID if the above Func ID fails*/
1022 if(ret) {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001023 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301024 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1);
1025 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1026 scm_arg.x2 = 0;
1027 scm_arg.atomic = true;
1028 ret = scm_call2(&scm_arg, NULL);
1029 } else
1030 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1, 0);
1031 }
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001032
1033 return ret;
1034}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001035
1036/* Execption Level exec secure-os call
1037 * Jumps to kernel via secure-os and does not return
1038 * on successful jump. System parameters are setup &
1039 * passed on to secure-os and are utilized to boot the
1040 * kernel.
1041 *
1042 @ kernel_entry : kernel entry point passed in as link register.
1043 @ dtb_offset : dt blob address passed in as w0.
1044 @ svc_id : indicates direction of switch 32->64 or 64->32
1045 *
1046 * Assumes all sanity checks have been performed on arguments.
1047 */
1048
1049void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
1050{
1051 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
1052 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
1053 void *cmd_buf;
1054 size_t cmd_len;
Sridhar Parasuramfc6ea712015-06-30 11:22:49 -07001055 static el1_system_param param __attribute__((aligned(0x1000)));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001056 scmcall_arg scm_arg = {0};
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001057
1058 param.el1_x0 = dtb_offset;
1059 param.el1_elr = kernel_entry;
1060
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001061 /* Response Buffer = Null as no response expected */
1062 dprintf(INFO, "Jumping to kernel via monitor\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001063
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001064 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001065 {
1066 /* Command Buffer */
1067 cmd_buf = (void *)&param;
1068 cmd_len = sizeof(el1_system_param);
1069
1070 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
1071 }
1072 else
1073 {
1074 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
1075 scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -08001076 scm_arg.x2 = (uint32_t ) &param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001077 scm_arg.x3 = sizeof(el1_system_param);
1078
1079 scm_call2(&scm_arg, NULL);
1080 }
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001081
1082 /* Assert if execution ever reaches here */
1083 dprintf(CRITICAL, "Failed to jump to kernel\n");
1084 ASSERT(0);
1085}
Maria Yubeeeeaf2014-06-30 13:05:43 +08001086
1087/* SCM Random Command */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001088int scm_random(uintptr_t * rbuf, uint32_t r_len)
Maria Yubeeeeaf2014-06-30 13:05:43 +08001089{
1090 int ret;
1091 struct tz_prng_data data;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001092 scmcall_arg scm_arg = {0};
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001093 // Memory passed to TZ should be algined to cache line
1094 BUF_DMA_ALIGN(rand_buf, sizeof(uintptr_t));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001095
Mayank Grover59f4a372017-01-27 18:02:51 +05301096 // r_len must be less than or equal to sizeof(rand_buf) to avoid memory corruption.
1097 if (r_len > sizeof(rand_buf))
1098 {
1099 dprintf(CRITICAL, "r_len is larger than sizeof(randbuf).");
1100 return -1;
1101 }
1102
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001103 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001104 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001105 data.out_buf = (uint8_t*) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001106 data.out_buf_size = r_len;
Maria Yubeeeeaf2014-06-30 13:05:43 +08001107
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001108 /*
1109 * random buffer must be flushed/invalidated before and after TZ call.
1110 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001111 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001112
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001113 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001114
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001115 /* Invalidate the updated random buffer */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001116 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001117 }
1118 else
1119 {
1120 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
1121 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001122 scm_arg.x2 = (uint32_t) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001123 scm_arg.x3 = r_len;
1124
Gaurav Nebhwani98db6cc2016-05-04 12:15:04 +05301125 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
1126
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001127 ret = scm_call2(&scm_arg, NULL);
1128 if (!ret)
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001129 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001130 else
1131 dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
1132 }
Maria Yubeeeeaf2014-06-30 13:05:43 +08001133
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001134 //Copy back into the return buffer
Parth Dixit2c009282016-11-01 16:06:21 +05301135 memscpy(rbuf, r_len, rand_buf, sizeof(rand_buf));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001136 return ret;
1137}
1138
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001139uintptr_t get_canary()
Maria Yubeeeeaf2014-06-30 13:05:43 +08001140{
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001141 uintptr_t canary;
1142 if(scm_random(&canary, sizeof(canary))) {
Maria Yubeeeeaf2014-06-30 13:05:43 +08001143 dprintf(CRITICAL,"scm_call for random failed !!!");
1144 /*
1145 * fall back to use lib rand API if scm call failed.
1146 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001147 canary = rand();
Maria Yubeeeeaf2014-06-30 13:05:43 +08001148 }
1149
1150 return canary;
1151}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301152
1153int scm_xpu_err_fatal_init()
1154{
1155 uint32_t ret = 0;
1156 uint32_t response = 0;
1157 tz_xpu_prot_cmd cmd;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001158 scmcall_arg scm_arg = {0};
1159 scmcall_ret scm_ret = {0};
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301160
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001161 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001162 {
1163 cmd.config = ERR_FATAL_ENABLE;
1164 cmd.spare = 0;
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301165
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001166 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
1167 sizeof(response));
1168 }
1169 else
1170 {
1171 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
1172 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1173 scm_arg.x2 = ERR_FATAL_ENABLE;
1174 scm_arg.x3 = 0x0;
1175 ret = scm_call2(&scm_arg, &scm_ret);
1176 response = scm_ret.x1;
1177 }
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301178
1179 if (ret)
1180 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
1181 else
1182 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
1183
1184 return ret;
1185}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001186
1187static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
1188{
1189 register uint32_t r0 __asm__("r0") = x0;
1190 register uint32_t r1 __asm__("r1") = x1;
1191 register uint32_t r2 __asm__("r2") = x2;
1192 register uint32_t r3 __asm__("r3") = x3;
1193 register uint32_t r4 __asm__("r4") = x4;
1194 register uint32_t r5 __asm__("r5") = x5;
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001195 register uint32_t r6 __asm__("r6") = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001196
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001197 do {
1198 __asm__ volatile(
1199 __asmeq("%0", "r0")
1200 __asmeq("%1", "r1")
1201 __asmeq("%2", "r2")
1202 __asmeq("%3", "r3")
1203 __asmeq("%4", "r0")
1204 __asmeq("%5", "r1")
1205 __asmeq("%6", "r2")
1206 __asmeq("%7", "r3")
1207 __asmeq("%8", "r4")
1208 __asmeq("%9", "r5")
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001209 __asmeq("%10", "r6")
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001210 "smc #0 @ switch to secure world\n"
1211 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001212 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6));
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001213 } while(r0 == 1);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001214
1215 if (ret)
1216 {
1217 ret->x1 = r1;
1218 ret->x2 = r2;
1219 ret->x3 = r3;
1220 }
1221
1222 return r0;
1223}
1224
1225uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
1226{
1227 uint32_t *indir_arg = NULL;
1228 uint32_t x5;
1229 int i;
1230 uint32_t rc;
1231
1232 arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
1233 x5 = arg->x5[0];
1234
Amit Blayfe23ee22015-01-09 19:09:51 +02001235 if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN - 1)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001236 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001237 indir_arg = memalign(CACHE_LINE, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001238 ASSERT(indir_arg);
1239
1240 for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
1241 {
1242 indir_arg[i] = arg->x5[i];
1243 }
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001244 arch_clean_invalidate_cache_range((addr_t) indir_arg, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001245 x5 = (addr_t) indir_arg;
1246 }
1247
1248 rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
1249
1250 if (rc)
1251 {
1252 dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
1253 return rc;
1254 }
1255
1256 if (indir_arg)
1257 free(indir_arg);
1258
1259 return 0;
1260}
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301261
Mayank Grover8bcdd972016-12-02 14:58:07 +05301262static bool secure_boot_enabled = false;
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301263static bool wdog_debug_fuse_disabled = true;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301264
1265void scm_check_boot_fuses()
1266{
1267 uint32_t ret = 0;
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301268 uint32_t *resp = NULL;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301269 scmcall_arg scm_arg = {0};
1270 scmcall_ret scm_ret = {0};
1271
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301272 resp = memalign(CACHE_LINE, (2 * sizeof(uint32_t)));
1273 ASSERT(resp);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001274 if (!is_scm_armv8_support()) {
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301275 ret = scm_call_atomic2(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED, (uint32_t)resp, 2 * sizeof(uint32_t));
1276 arch_clean_invalidate_cache_range((addr_t)resp, ROUNDUP((2*sizeof(uint32_t)), CACHE_LINE));
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301277 } else {
1278 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED);
1279 ret = scm_call2(&scm_arg, &scm_ret);
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301280 resp[0] = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301281 }
1282
Mayank Grover8bcdd972016-12-02 14:58:07 +05301283 if (!ret) {
Monika Singh40986b32018-03-14 00:50:33 +05301284 /* Check for secure device: Bit#0 = 0, Bit#1 = 0 Bit#2 = 0 , Bit#5 = 0 */
1285 /* Check Bit#6 = 1 only for TZ.BF.4.0 */
Mayank Grover8bcdd972016-12-02 14:58:07 +05301286 if (!CHECK_BIT(resp[0], SECBOOT_FUSE_BIT) && !CHECK_BIT(resp[0], SECBOOT_FUSE_SHK_BIT) &&
1287 !CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_DISABLED_BIT) &&
Monika Singh40986b32018-03-14 00:50:33 +05301288 !CHECK_BIT(resp[0], SECBOOT_FUSE_RPMB_ENABLED_BIT)) {
1289 if ((qseecom_get_version() < QSEE_VERSION_40))
1290 secure_boot_enabled = true;
1291 else if (CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_RE_ENABLED_BIT))
1292 secure_boot_enabled = true;
Mayank Grover8bcdd972016-12-02 14:58:07 +05301293 }
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301294 /* Bit 2 - DEBUG_DISABLE_CHECK */
Mayank Grover8bcdd972016-12-02 14:58:07 +05301295 if (CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_DISABLED_BIT))
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301296 wdog_debug_fuse_disabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301297 } else
1298 dprintf(CRITICAL, "scm call to check secure boot fuses failed\n");
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301299 free(resp);
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301300}
1301
1302bool is_secure_boot_enable()
1303{
1304 scm_check_boot_fuses();
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301305 return secure_boot_enabled;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301306}
1307
1308static uint32_t scm_io_read(addr_t address)
1309{
1310 uint32_t ret;
1311 scmcall_arg scm_arg = {0};
1312 scmcall_ret scm_ret = {0};
1313
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001314 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301315 ret = scm_call_atomic(SCM_SVC_IO, SCM_IO_READ, address);
1316 } else {
1317 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_READ);
1318 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1319 scm_arg.x2 = address;
1320 scm_arg.atomic = true;
1321 ret = scm_call2(&scm_arg, &scm_ret);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001322 /* Return the value read if the call is successful */
1323 if (!ret)
1324 ret = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301325 }
1326 return ret;
1327}
1328
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301329uint32_t scm_io_write(uint32_t address, uint32_t val)
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301330{
1331 uint32_t ret;
1332 scmcall_arg scm_arg = {0};
1333 scmcall_ret scm_ret = {0};
1334
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001335 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301336 ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
1337 } else {
1338 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_WRITE);
1339 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1340 scm_arg.x2 = address;
1341 scm_arg.x3 = val;
1342 scm_arg.atomic = true;
1343 ret = scm_call2(&scm_arg, &scm_ret);
1344 }
1345 return ret;
1346}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301347
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301348int scm_call2_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301349{
1350 uint32_t ret = 0;
1351 scmcall_arg scm_arg = {0};
1352 scmcall_ret scm_ret = {0};
1353
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001354 if (!is_scm_armv8_support())
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301355 {
1356 ret = scm_call_atomic2(svc, cmd, arg1, arg2);
1357 } else {
1358 scm_arg.x0 = MAKE_SIP_SCM_CMD(svc, cmd);
1359 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1360 scm_arg.x2 = arg1;
1361 scm_arg.x3 = arg2;
1362 ret = scm_call2(&scm_arg, &scm_ret);
1363 }
1364 return ret;
1365}
1366
lijuang1cff8382016-01-11 17:56:54 +08001367int scm_disable_sdi()
1368{
1369 int ret = 0;
1370
1371 scm_check_boot_fuses();
1372
1373 /* Make WDOG_DEBUG DISABLE scm call only in non-secure boot */
1374 if(!(secure_boot_enabled || wdog_debug_fuse_disabled)) {
1375 ret = scm_call2_atomic(SCM_SVC_BOOT, WDOG_DEBUG_DISABLE, 1, 0);
1376 if(ret)
1377 dprintf(CRITICAL, "Failed to disable secure wdog debug: %d\n", ret);
1378 }
1379 return ret;
1380}
1381
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301382#if PLATFORM_USE_SCM_DLOAD
lijuang395b5e62015-11-19 17:39:44 +08001383int scm_dload_mode(enum reboot_reason mode)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301384{
1385 int ret = 0;
1386 uint32_t dload_type;
1387
1388 dprintf(SPEW, "DLOAD mode: %d\n", mode);
lijuang1cff8382016-01-11 17:56:54 +08001389 if (mode == NORMAL_DLOAD) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301390 dload_type = SCM_DLOAD_MODE;
lijuang1cff8382016-01-11 17:56:54 +08001391#if DISABLE_DLOAD_MODE
1392 return 0;
1393#endif
1394 } else if(mode == EMERGENCY_DLOAD)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301395 dload_type = SCM_EDLOAD_MODE;
1396 else
1397 dload_type = 0;
1398
1399 /* Write to the Boot MISC register */
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001400 ret = is_scm_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301401
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001402 if (ret > 0)
1403 ret = scm_call2_atomic(SCM_SVC_BOOT, SCM_DLOAD_CMD, dload_type, 0);
1404 else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301405 ret = scm_io_write(TCSR_BOOT_MISC_DETECT,dload_type);
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001406
1407 if(ret) {
1408 dprintf(CRITICAL, "Failed to write to boot misc: %d\n", ret);
1409 return ret;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301410 }
1411
lijuang1cff8382016-01-11 17:56:54 +08001412#if !DISABLE_DLOAD_MODE
1413 return scm_disable_sdi();
1414#else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301415 return ret;
lijuang1cff8382016-01-11 17:56:54 +08001416#endif
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301417}
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001418
1419bool scm_device_enter_dload()
1420{
1421 uint32_t ret = 0;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001422 uint32_t dload_mode = 0;
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001423
1424 scmcall_arg scm_arg = {0};
1425 scmcall_ret scm_ret = {0};
1426
1427 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_DLOAD_MODE, SCM_DLOAD_CMD);
1428 ret = scm_call2(&scm_arg, &scm_ret);
1429 if (ret)
1430 dprintf(CRITICAL, "SCM call to check dload mode failed: %x\n", ret);
1431
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001432 if (!ret)
1433 {
1434 dload_mode = scm_io_read(TCSR_BOOT_MISC_DETECT);
1435 if (board_soc_version() < 0x30000)
1436 dload_mode = (dload_mode >> 16) & 0xFFFF;
1437 }
1438
1439 if (dload_mode == SCM_DLOAD_MODE)
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001440 return true;
1441
1442 return false;
1443}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301444#endif
Monika Singh98257462018-06-06 11:28:49 +05301445bool allow_set_fuse(uint32_t version)
1446{
1447 /*if((major > 4) || (major == 4 && minor > 0))*/
1448 if((((version >> 22) & 0x3FF) > 4)
1449 || (((version >> 22) & 0x3FF) == 4 && ((version >> 12) & 0x3FF) > 0))
1450 {
1451 return TRUE;
1452 } else {
1453 return FALSE;
1454 }
1455}