blob: 4e600e3bd5996db90fce791249b47852593f61fb [file] [log] [blame]
Anmolpreet Kaurcdf53ec2019-02-21 12:05:30 +05301/* Copyright (c) 2011-2018,2019 The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
vijay kumar4f4405f2014-08-08 11:49:53 +053035#include <rand.h>
36#include <image_verify.h>
Aparna Mallavarapu664ea772015-02-24 18:44:33 +053037#include <dload_util.h>
38#include <platform/iomap.h>
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070039#include <board.h>
Monika Singh40986b32018-03-14 00:50:33 +053040#include <qseecomi_lk.h>
41#include <qseecom_lk_api.h>
Anmolpreet Kaurcdf53ec2019-02-21 12:05:30 +053042#include <boot_device.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070043#include "scm.h"
44
45#pragma GCC optimize ("O0")
46
47/* From Linux Kernel asm/system.h */
48#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
49
50#ifndef offsetof
51# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
52#endif
53
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080054#define SCM_CLASS_REGISTER (0x2 << 8)
55#define SCM_MASK_IRQS BIT(5)
56#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
57 SCM_CLASS_REGISTER | \
58 SCM_MASK_IRQS | \
59 ((n) & 0xf))
60
Mayank Grover8bcdd972016-12-02 14:58:07 +053061#define SECBOOT_FUSE_BIT 0
62#define SECBOOT_FUSE_SHK_BIT 1
63#define SECBOOT_FUSE_DEBUG_DISABLED_BIT 2
64#define SECBOOT_FUSE_ANTI_ROLLBACK_BIT 3
65#define SECBOOT_FUSE_FEC_ENABLED_BIT 4
66#define SECBOOT_FUSE_RPMB_ENABLED_BIT 5
67#define SECBOOT_FUSE_DEBUG_RE_ENABLED_BIT 6
68#define CHECK_BIT(var, pos) ((var) & (1 << (pos)))
69
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070070/* SCM interface as per ARM spec present? */
71bool scm_arm_support;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070072static bool scm_initialized;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070073
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080074bool is_scm_armv8_support()
75{
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070076 if (!scm_initialized)
77 {
78 scm_init();
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -070079 }
80
Dinesh K Garg6bbbb702015-01-30 11:13:31 -080081 return scm_arm_support;
82}
83
Channagoud Kadabi77f46a32015-08-05 16:13:13 -070084int is_scm_call_available(uint32_t svc_id, uint32_t cmd_id)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070085{
vijay kumar496a2ff2015-07-22 21:22:48 +053086 int ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070087 scmcall_arg scm_arg = {0};
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -080088 scmcall_ret scm_ret = {0};
Channagoud Kadabia2184b82015-07-07 10:09:32 -070089
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -070090 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
91 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
92 scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
93
94 ret = scm_call2(&scm_arg, &scm_ret);
95
96 if (!ret)
Channagoud Kadabia2184b82015-07-07 10:09:32 -070097 return scm_ret.x1;
98
99 return ret;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700100}
101
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700102static int scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
103{
vijay kumar496a2ff2015-07-22 21:22:48 +0530104 int ret;
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700105
106 ret = is_scm_call_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
107
108 if (ret > 0)
109 scm_arm_support = true;
110
111 return ret;
112}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700113
114void scm_init()
115{
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700116 int ret;
117
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700118 if (scm_initialized)
119 return;
120
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700121 ret = scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
122
vijay kumar496a2ff2015-07-22 21:22:48 +0530123 if (ret < 0)
Channagoud Kadabia2184b82015-07-07 10:09:32 -0700124 dprintf(CRITICAL, "Failed to initialize SCM\n");
lijuang1cff8382016-01-11 17:56:54 +0800125
126 scm_initialized = true;
127
128#if DISABLE_DLOAD_MODE
129 scm_disable_sdi();
130#endif
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700131}
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800132
Shashank Mittal162244e2011-08-08 19:01:25 -0700133/**
134 * alloc_scm_command() - Allocate an SCM command
135 * @cmd_size: size of the command buffer
136 * @resp_size: size of the response buffer
137 *
138 * Allocate an SCM command, including enough room for the command
139 * and response headers as well as the command and response buffers.
140 *
141 * Returns a valid &scm_command on success or %NULL if the allocation fails.
142 */
143static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
144{
145 struct scm_command *cmd;
146 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -0800147 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -0700148
Deepa Dinamani904f8f82012-12-05 16:35:01 -0800149 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -0800150 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +0200151 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700152 cmd->len = len;
153 cmd->buf_offset = offsetof(struct scm_command, buf);
154 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
155 }
156 return cmd;
157}
158
159/**
160 * free_scm_command() - Free an SCM command
161 * @cmd: command to free
162 *
163 * Free an SCM command.
164 */
165static inline void free_scm_command(struct scm_command *cmd)
166{
167 free(cmd);
168}
169
170/**
171 * scm_command_to_response() - Get a pointer to a scm_response
172 * @cmd: command
173 *
174 * Returns a pointer to a response for a command.
175 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800176static inline struct scm_response *scm_command_to_response(const struct
177 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -0700178{
179 return (void *)cmd + cmd->resp_hdr_offset;
180}
181
182/**
183 * scm_get_command_buffer() - Get a pointer to a command buffer
184 * @cmd: command
185 *
186 * Returns a pointer to the command buffer of a command.
187 */
188static inline void *scm_get_command_buffer(const struct scm_command *cmd)
189{
190 return (void *)cmd->buf;
191}
192
193/**
194 * scm_get_response_buffer() - Get a pointer to a response buffer
195 * @rsp: response
196 *
197 * Returns a pointer to a response buffer of a response.
198 */
199static inline void *scm_get_response_buffer(const struct scm_response *rsp)
200{
201 return (void *)rsp + rsp->buf_offset;
202}
203
204static uint32_t smc(uint32_t cmd_addr)
205{
206 uint32_t context_id;
207 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800208 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700209 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800210 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700211 return r0;
212}
213
214/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800215* scm_call_automic: Make scm call with one or no argument
216* @svc: service id
217* @cmd: command id
218* @ arg1: argument
219*/
220
221static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
222{
223 uint32_t context_id;
224 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
vijay kumar4f4405f2014-08-08 11:49:53 +0530225 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800226 register uint32_t r2 __asm__("r2") = arg1;
227
228 __asm__ volatile(
229 __asmeq("%0", "r0")
230 __asmeq("%1", "r0")
231 __asmeq("%2", "r1")
232 __asmeq("%3", "r2")
233 "smc #0 @ switch to secure world\n"
234 : "=r" (r0)
235 : "r" (r0), "r" (r1), "r" (r2)
236 : "r3");
237 return r0;
238}
239
240/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530241 * scm_call_atomic2() - Send an atomic SCM command with two arguments
242 * @svc_id: service identifier
243 * @cmd_id: command identifier
244 * @arg1: first argument
245 * @arg2: second argument
246 *
247 * This shall only be used with commands that are guaranteed to be
248 * uninterruptable, atomic and SMP safe.
249 */
250int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
251{
252 int context_id;
253 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
vijay kumar4f4405f2014-08-08 11:49:53 +0530254 register uint32_t r1 __asm__("r1") = (uint32_t)&context_id;
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530255 register uint32_t r2 __asm__("r2") = arg1;
256 register uint32_t r3 __asm__("r3") = arg2;
257
258 __asm__ volatile(
259 __asmeq("%0", "r0")
260 __asmeq("%1", "r0")
261 __asmeq("%2", "r1")
262 __asmeq("%3", "r2")
263 __asmeq("%4", "r3")
264 "smc #0 @ switch to secure world\n"
265 : "=r" (r0)
266 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
267 return r0;
268}
269
270/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700271 * scm_call() - Send an SCM command
272 * @svc_id: service identifier
273 * @cmd_id: command identifier
274 * @cmd_buf: command buffer
275 * @cmd_len: length of the command buffer
276 * @resp_buf: response buffer
277 * @resp_len: length of the response buffer
278 *
279 * Sends a command to the SCM and waits for the command to finish processing.
280 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800281int
282scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
283 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700284{
285 int ret;
286 struct scm_command *cmd;
287 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700288 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700289
290 cmd = alloc_scm_command(cmd_len, resp_len);
291 if (!cmd)
292 return ERR_NO_MEMORY;
293
294 cmd->id = (svc_id << 10) | cmd_id;
295 if (cmd_buf)
296 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
297
Neeti Desai127b9e02012-03-20 16:11:23 -0700298 /* Flush command to main memory for TZ */
299 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
300
Ajay Dudanib01e5062011-12-03 23:23:42 -0800301 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700302 if (ret)
303 goto out;
304
Ajay Dudanib01e5062011-12-03 23:23:42 -0800305 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700306 rsp = scm_command_to_response(cmd);
307
Neeti Desai127b9e02012-03-20 16:11:23 -0700308 do
309 {
310 /* Need to invalidate before each check since TZ will update
311 * the response complete flag in main memory.
312 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800313 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700314 } while (!rsp->is_complete);
315
316
317 resp_ptr = scm_get_response_buffer(rsp);
318
319 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800320 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700321
322 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700323 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700324 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800325 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700326 free_scm_command(cmd);
327 return ret;
328}
329
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800330int restore_secure_cfg(uint32_t id)
331{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700332 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800333 tz_secure_cfg secure_cfg;
334
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800335 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800336 secure_cfg.spare = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700337 scmcall_arg scm_arg = {0};
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800338
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700339 if(!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700340 {
341 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
342 NULL, 0);
343 }
344 else
345 {
346 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
347 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
348 scm_arg.x2 = id;
349 scm_arg.x3 = 0x0; /* Spare unused */
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800350
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700351 ret = scm_call2(&scm_arg, NULL);
352 }
353
354 if (ret)
355 {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800356 dprintf(CRITICAL, "Secure Config failed\n");
357 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700358 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800359
360 return ret;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800361}
362
Neeti Desai127b9e02012-03-20 16:11:23 -0700363/* SCM Encrypt Command */
364int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700365{
Neeti Desai127b9e02012-03-20 16:11:23 -0700366 int ret;
367 img_req cmd;
vijay kumare95092d2014-10-20 19:24:49 +0530368 scmcall_arg scm_arg = {0};
Shashank Mittal162244e2011-08-08 19:01:25 -0700369
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700370
Neeti Desai127b9e02012-03-20 16:11:23 -0700371 cmd.img_ptr = (uint32*) img_ptr;
372 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700373
Neeti Desai127b9e02012-03-20 16:11:23 -0700374 /* Image data is operated upon by TZ, which accesses only the main memory.
375 * It must be flushed/invalidated before and after TZ call.
376 */
377 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700378
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700379 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530380 {
381 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
382 }
383 else
384 {
385 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD,SSD_ENCRYPT_ID);
vijay kumar83b50d62015-01-09 19:09:59 +0530386 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_BUFFER_READWRITE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800387 scm_arg.x2 = (uint32_t) cmd.img_ptr;
388 scm_arg.x3 = (uint32_t) cmd.img_len_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530389
390 ret = scm_call2(&scm_arg, NULL);
391 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700392
Neeti Desai127b9e02012-03-20 16:11:23 -0700393 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
394 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800395 */
396 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700397 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800398
Neeti Desai127b9e02012-03-20 16:11:23 -0700399 /* Invalidate the updated image data */
400 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800401
Shashank Mittal162244e2011-08-08 19:01:25 -0700402 return ret;
403}
404
Neeti Desai127b9e02012-03-20 16:11:23 -0700405/* SCM Decrypt Command */
406int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
407{
408 int ret;
409 img_req cmd;
410
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700411 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700412 {
413 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
414 return -1;
415 }
416
Neeti Desai127b9e02012-03-20 16:11:23 -0700417 cmd.img_ptr = (uint32*) img_ptr;
418 cmd.img_len_ptr = img_len_ptr;
419
420 /* Image data is operated upon by TZ, which accesses only the main memory.
421 * It must be flushed/invalidated before and after TZ call.
422 */
423 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
424
425 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
426
427 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
428 * before we use them.
429 */
430 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
431 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
432
433 /* Invalidate the updated image data */
434 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
435
436 return ret;
437}
438
439
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800440static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
441{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700442 int ret = 0;
Mayank Grover46c6e072018-02-23 18:53:00 +0530443 ssd_parse_md_req parse_req = {0};
444 ssd_parse_md_rsp parse_rsp = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700445 int prev_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530446 scmcall_arg scm_arg = {0};
447 scmcall_ret scm_ret = {0};
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700448 /* Populate meta-data ptr. Here md_len is the meta-data length.
449 * The Code below follows a growing length approach. First send
450 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
451 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
452 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
453 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
454
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800455 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700456 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800457
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700458 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800459
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700460 do
461 {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700462 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530463 {
464 ret = scm_call(SCM_SVC_SSD,
465 SSD_PARSE_MD_ID,
466 &parse_req,
467 sizeof(parse_req),
468 &parse_rsp,
469 sizeof(parse_rsp));
470 }
471 else
472 {
473 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PARSE_MD_ID);
474 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
475 scm_arg.x2 = parse_req.md_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800476 scm_arg.x3 = (uint32_t) parse_req.md;
Aparna Mallavarapud83990a2014-12-24 12:54:35 +0530477 scm_arg.atomic = true;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700478
vijay kumare95092d2014-10-20 19:24:49 +0530479 ret = scm_call2(&scm_arg, &scm_ret);
480 parse_rsp.status = scm_ret.x1;
481 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700482 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
483 {
484 prev_len = parse_req.md_len;
485
486 parse_req.md_len *= MULTIPLICATION_FACTOR;
487
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530488 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700489 (parse_req.md_len - prev_len) );
490
491 continue;
492 }
493 else
494 break;
495
496 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800497
498 if(!ret)
499 {
500 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
501 {
502 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700503 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800504 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800505 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700506
507 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800508 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700509 else
510 {
511 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
512
513 ASSERT(ret == 0);
514 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800515
516 return ret;
517}
518
519int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
520{
521 int ret = 0;
522 uint32 ctx_id = 0;
523 ssd_decrypt_img_frag_req decrypt_req;
524 ssd_decrypt_img_frag_rsp decrypt_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530525 scmcall_arg scm_arg = {0};
526 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800527
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700528
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700529 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
530 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700531 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700532 case SSD_PMD_ENCRYPTED:
533 /* Image data is operated upon by TZ, which accesses only the main memory.
534 * It must be flushed/invalidated before and after TZ call.
535 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800536
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700537 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800538
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700539 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800540
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700541 decrypt_req.md_ctx_id = ctx_id;
542 decrypt_req.last_frag = 1;
543 decrypt_req.frag_len = *img_len_ptr;
544 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800545
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700546 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530547 {
548 ret = scm_call(SCM_SVC_SSD,
549 SSD_DECRYPT_IMG_FRAG_ID,
550 &decrypt_req,
551 sizeof(decrypt_req),
552 &decrypt_rsp,
553 sizeof(decrypt_rsp));
554 }
555 else
556 {
557 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_DECRYPT_IMG_FRAG_ID);
558 scm_arg.x1 = MAKE_SCM_ARGS(0x4,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_VALUE,SMC_PARAM_TYPE_BUFFER_READWRITE);
559 scm_arg.x2 = decrypt_req.md_ctx_id;
560 scm_arg.x3 = decrypt_req.last_frag;
561 scm_arg.x4 = decrypt_req.frag_len;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800562 scm_arg.x5[0] = (uint32_t) decrypt_req.frag;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800563
vijay kumare95092d2014-10-20 19:24:49 +0530564 ret = scm_call2(&scm_arg, &scm_ret);
565 decrypt_rsp.status = scm_ret.x1;
566 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700567 if(!ret){
568 ret = decrypt_rsp.status;
569 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700570
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700571 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
572 * before we use them.
573 */
574 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
575 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800576
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700577 /* Invalidate the updated image data */
578 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700579
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700580 break;
581
582 case SSD_PMD_NOT_ENCRYPTED:
583 case SSD_PMD_NO_MD_FOUND:
584 ret = 0;
585 break;
586
587 case SSD_PMD_BUSY:
588 case SSD_PMD_BAD_MD_PTR_OR_LEN:
589 case SSD_PMD_PARSING_INCOMPLETE:
590 case SSD_PMD_PARSING_FAILED:
591 case SSD_PMD_SETUP_CIPHER_FAILED:
592 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
593 break;
594
595 default:
596 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
597 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700598 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800599 return ret;
600}
601
602int scm_svc_version(uint32 * major, uint32 * minor)
603{
604 feature_version_req feature_req;
605 feature_version_rsp feature_rsp;
606 int ret = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700607 scmcall_arg scm_arg = {0};
608 scmcall_ret scm_ret = {0};
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800609
610 feature_req.feature_id = TZBSP_FVER_SSD;
611
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700612 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700613 {
614 ret = scm_call(TZBSP_SVC_INFO,
615 TZ_INFO_GET_FEATURE_ID,
616 &feature_req,
617 sizeof(feature_req),
618 &feature_rsp,
619 sizeof(feature_rsp));
620 }
621 else
622 {
623 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
vijay kumare95092d2014-10-20 19:24:49 +0530624 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700625 scm_arg.x2 = feature_req.feature_id;
626
627 ret = scm_call2(&scm_arg, &scm_ret);
628 feature_rsp.version = scm_ret.x1;
629 }
630
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800631 if(!ret)
632 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
633
634 return ret;
635}
636
Amit Blaybdfabc62015-01-29 22:04:13 +0200637int scm_svc_get_secure_state(uint32_t *state_low, uint32_t *state_high)
638{
639 get_secure_state_req req;
640 get_secure_state_rsp rsp;
641
642 int ret = 0;
643
644 scmcall_arg scm_arg = {0};
645 scmcall_ret scm_ret = {0};
646
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700647 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200648 {
649 req.status_ptr = (uint32_t*)&rsp;
650 req.status_len = sizeof(rsp);
651
652 ret = scm_call(TZBSP_SVC_INFO,
653 TZ_INFO_GET_SECURE_STATE,
654 &req,
655 sizeof(req),
656 NULL,
657 0);
658 }
659 else
660 {
661 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_SECURE_STATE);
662 scm_arg.x1 = MAKE_SCM_ARGS(0x0);
663
664 ret = scm_call2(&scm_arg, &scm_ret);
665
666 rsp.status_low = scm_ret.x1;
667 rsp.status_high = scm_ret.x2;
668 }
669
670 if(!ret)
671 {
672 *state_low = rsp.status_low;
673 *state_high = rsp.status_high;
674 }
675
676 return ret;
677}
678
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800679int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
680{
681 int ret=0;
682 ssd_protect_keystore_req protect_req;
683 ssd_protect_keystore_rsp protect_rsp;
vijay kumare95092d2014-10-20 19:24:49 +0530684 scmcall_arg scm_arg = {0};
685 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700686
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800687 protect_req.keystore_ptr = img_ptr;
688 protect_req.keystore_len = img_len;
689
690 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
691
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700692 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530693 {
694 ret = scm_call(SCM_SVC_SSD,
695 SSD_PROTECT_KEYSTORE_ID,
696 &protect_req,
697 sizeof(protect_req),
698 &protect_rsp,
699 sizeof(protect_rsp));
700 }
701 else
702 {
703 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_SSD, SSD_PROTECT_KEYSTORE_ID);
704 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800705 scm_arg.x2 = (uint32_t) protect_req.keystore_ptr;
vijay kumare95092d2014-10-20 19:24:49 +0530706 scm_arg.x3 = protect_req.keystore_len;
707
708 ret = scm_call2(&scm_arg, &scm_ret);
709 protect_rsp.status = scm_ret.x1;
710 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800711 if(!ret)
712 {
713 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
714 dprintf(INFO,"Successfully loaded the keystore ");
715 else
716 {
717 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
718 ret = protect_rsp.status;
719 }
720 }
721 else
722 dprintf(INFO,"scm_call failed ");
723
724 return ret;
725}
726
Monika Singh98257462018-06-06 11:28:49 +0530727uint32_t set_tamper_fuse_cmd(uint32_t fuse_id)
Shashank Mittal162244e2011-08-08 19:01:25 -0700728{
729 uint32_t svc_id;
730 uint32_t cmd_id;
731 void *cmd_buf;
732 size_t cmd_len;
733 void *resp_buf = NULL;
734 size_t resp_len = 0;
vijay kumare95092d2014-10-20 19:24:49 +0530735 scmcall_arg scm_arg = {0};
Monika Singh98257462018-06-06 11:28:49 +0530736 int ret;
Shashank Mittal162244e2011-08-08 19:01:25 -0700737
Shashank Mittal162244e2011-08-08 19:01:25 -0700738 cmd_buf = (void *)&fuse_id;
739 cmd_len = sizeof(fuse_id);
740
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700741 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700742 {
vijay kumare95092d2014-10-20 19:24:49 +0530743 /*no response */
744 resp_buf = NULL;
745 resp_len = 0;
746
747 svc_id = SCM_SVC_FUSE;
748 cmd_id = SCM_BLOW_SW_FUSE_ID;
749
Monika Singh98257462018-06-06 11:28:49 +0530750 ret=scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
vijay kumare95092d2014-10-20 19:24:49 +0530751 }
752 else
753 {
754 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID);
Monika Singh98257462018-06-06 11:28:49 +0530755 scm_arg.x1 = MAKE_SCM_ARGS(0x1,SMC_PARAM_TYPE_VALUE);
756 scm_arg.x2 = fuse_id;
vijay kumare95092d2014-10-20 19:24:49 +0530757 scm_arg.x3 = cmd_len;
Monika Singh98257462018-06-06 11:28:49 +0530758 ret=scm_call2(&scm_arg, NULL);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700759 }
760
Monika Singh98257462018-06-06 11:28:49 +0530761 return ret;
Shashank Mittal162244e2011-08-08 19:01:25 -0700762}
763
764uint8_t get_tamper_fuse_cmd()
765{
766 uint32_t svc_id;
767 uint32_t cmd_id;
768 void *cmd_buf;
769 size_t cmd_len;
770 size_t resp_len = 0;
771 uint8_t resp_buf;
772
773 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
vijay kumare95092d2014-10-20 19:24:49 +0530774 scmcall_arg scm_arg = {0};
775 scmcall_ret scm_ret = {0};
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700776
Shashank Mittal162244e2011-08-08 19:01:25 -0700777 cmd_buf = (void *)&fuse_id;
778 cmd_len = sizeof(fuse_id);
779
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700780 if (!is_scm_armv8_support())
vijay kumare95092d2014-10-20 19:24:49 +0530781 {
782 /*response */
783 resp_len = sizeof(resp_buf);
Shashank Mittal162244e2011-08-08 19:01:25 -0700784
vijay kumare95092d2014-10-20 19:24:49 +0530785 svc_id = SCM_SVC_FUSE;
786 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
Shashank Mittal162244e2011-08-08 19:01:25 -0700787
vijay kumare95092d2014-10-20 19:24:49 +0530788 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
789 return resp_buf;
790 }
791 else
792 {
793 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID);
794 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE,SMC_PARAM_TYPE_VALUE);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800795 scm_arg.x2 = (uint32_t) cmd_buf;
vijay kumare95092d2014-10-20 19:24:49 +0530796 scm_arg.x3 = cmd_len;
797
798 scm_call2(&scm_arg, &scm_ret);
799 return (uint8_t)scm_ret.x1;
800 }
Shashank Mittal162244e2011-08-08 19:01:25 -0700801}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800802
Amir Samuelov4620ad22013-03-13 11:30:05 +0200803/*
804 * struct qseecom_save_partition_hash_req
805 * @partition_id - partition id.
806 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
807 */
808struct qseecom_save_partition_hash_req {
809 uint32_t partition_id; /* in */
810 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
811};
812
813
814void save_kernel_hash_cmd(void *digest)
815{
816 uint32_t svc_id;
817 uint32_t cmd_id;
818 void *cmd_buf;
819 size_t cmd_len;
820 void *resp_buf = NULL;
821 size_t resp_len = 0;
822 struct qseecom_save_partition_hash_req req;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700823 scmcall_arg scm_arg = {0};
Amir Samuelov4620ad22013-03-13 11:30:05 +0200824
825 /*no response */
826 resp_buf = NULL;
827 resp_len = 0;
828
829 req.partition_id = 0; /* kernel */
830 memcpy(req.digest, digest, sizeof(req.digest));
831
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700832 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700833 {
834 svc_id = SCM_SVC_ES;
835 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
836 cmd_buf = (void *)&req;
837 cmd_len = sizeof(req);
Amir Samuelov4620ad22013-03-13 11:30:05 +0200838
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700839 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
840 }
841 else
842 {
843 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
844 scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
845 scm_arg.x2 = req.partition_id;
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -0800846 scm_arg.x3 = (uint32_t) &req.digest;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700847 scm_arg.x4 = sizeof(req.digest);
848
849 if (scm_call2(&scm_arg, NULL))
850 dprintf(CRITICAL, "Failed to Save kernel hash\n");
851 }
Amir Samuelov4620ad22013-03-13 11:30:05 +0200852}
853
Amit Blayfe23ee22015-01-09 19:09:51 +0200854int mdtp_cipher_dip_cmd(uint8_t *in_buf, uint32_t in_buf_size, uint8_t *out_buf,
855 uint32_t out_buf_size, uint32_t direction)
856{
857 uint32_t svc_id;
858 uint32_t cmd_id;
859 void *cmd_buf;
860 void *rsp_buf;
861 size_t cmd_len;
862 size_t rsp_len;
863 mdtp_cipher_dip_req req;
864 scmcall_arg scm_arg = {0};
865 scmcall_ret scm_ret = {0};
866
867 ASSERT(in_buf != NULL);
868 ASSERT(out_buf != NULL);
869
870 req.in_buf = in_buf;
871 req.in_buf_size = in_buf_size;
872 req.out_buf = out_buf;
873 req.out_buf_size = out_buf_size;
874 req.direction = direction;
875
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700876 if (!is_scm_armv8_support())
Amit Blayfe23ee22015-01-09 19:09:51 +0200877 {
878 svc_id = SCM_SVC_MDTP;
879 cmd_id = SCM_MDTP_CIPHER_DIP;
880 cmd_buf = (void *)&req;
881 cmd_len = sizeof(req);
882 rsp_buf = NULL;
883 rsp_len = 0;
884
885 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
886 {
887 dprintf(CRITICAL, "Failed to call Cipher DIP SCM\n");
888 return -1;
889 }
890 }
891 else
892 {
893 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MDTP, SCM_MDTP_CIPHER_DIP);
894 scm_arg.x1 = MAKE_SCM_ARGS(0x5, SMC_PARAM_TYPE_BUFFER_READ, SMC_PARAM_TYPE_VALUE,
895 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE);
896 scm_arg.x2 = (uint32_t)req.in_buf;
897 scm_arg.x3 = req.in_buf_size;
898 scm_arg.x4 = (uint32_t)req.out_buf;
899 scm_arg.x5[0] = req.out_buf_size;
900 scm_arg.x5[1] = req.direction;
901
902 if (scm_call2(&scm_arg, &scm_ret))
903 {
904 dprintf(CRITICAL, "Failed in Cipher DIP SCM call\n");
905 return -1;
906 }
907 }
908
909 return 0;
910}
911
Amit Blaybdfabc62015-01-29 22:04:13 +0200912int qfprom_read_row_cmd(uint32_t row_address,
913 uint32_t addr_type,
914 uint32_t *row_data,
915 uint32_t *qfprom_api_status)
916{
917 uint32_t svc_id;
918 uint32_t cmd_id;
919 void *cmd_buf;
920 void *rsp_buf;
921 size_t cmd_len;
922 size_t rsp_len;
923 qfprom_read_row_req req;
924 scmcall_arg scm_arg = {0};
925 scmcall_ret scm_ret = {0};
926
927 req.row_address = row_address;
928 req.addr_type = addr_type;
929 req.row_data = row_data;
930 req.qfprom_api_status = qfprom_api_status;
931
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700932 if (!is_scm_armv8_support())
Amit Blaybdfabc62015-01-29 22:04:13 +0200933 {
934 svc_id = SCM_SVC_FUSE;
935 cmd_id = SCM_QFPROM_READ_ROW_ID;
936 cmd_buf = (void *)&req;
937 cmd_len = sizeof(req);
938 rsp_buf = NULL;
939 rsp_len = 0;
940
941 if (scm_call(svc_id, cmd_id, cmd_buf, cmd_len, rsp_buf, rsp_len))
942 {
943 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
944 return -1;
945 }
946 }
947 else
948 {
949 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_FUSE, SCM_QFPROM_READ_ROW_ID);
950 scm_arg.x1 = MAKE_SCM_ARGS(0x4, SMC_PARAM_TYPE_VALUE, SMC_PARAM_TYPE_VALUE,
951 SMC_PARAM_TYPE_BUFFER_READWRITE, SMC_PARAM_TYPE_BUFFER_READWRITE);
952 scm_arg.x2 = req.row_address;
953 scm_arg.x3 = req.addr_type;
954 scm_arg.x4 = (uint32_t)req.row_data;
955 scm_arg.x5[0] = (uint32_t)req.qfprom_api_status;
956
957 if (scm_call2(&scm_arg, &scm_ret))
958 {
959 dprintf(CRITICAL, "Failed to call SCM_SVC_FUSE.SCM_QFPROM_READ_ROW_ID SCM\n");
960 return -1;
961 }
962 }
963
964 return 0;
965}
966
Deepa Dinamani193874e2012-02-07 14:00:04 -0800967/*
968 * Switches the CE1 channel between ADM and register usage.
969 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
970 * : AP_CE_ADM_USE, CE1 uses ADM interface
971 */
972uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
973{
974 uint32_t svc_id;
975 uint32_t cmd_id;
976 void *cmd_buf;
977 size_t cmd_len;
978 size_t resp_len = 0;
979 uint8_t resp_buf;
980
981 struct {
982 uint32_t resource;
983 uint32_t chn_id;
984 }__PACKED switch_ce_chn_buf;
985
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -0700986 if (is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -0700987 {
988 dprintf(INFO, "%s:SCM call is not supported\n",__func__);
989 return 0;
990 }
991
Deepa Dinamani193874e2012-02-07 14:00:04 -0800992 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
993 switch_ce_chn_buf.chn_id = channel;
994 cmd_buf = (void *)&switch_ce_chn_buf;
995 cmd_len = sizeof(switch_ce_chn_buf);
996
997 /*response */
998 resp_len = sizeof(resp_buf);
999
1000 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
1001 cmd_id = SCM_CE_CHN_SWITCH_ID;
1002
1003 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
1004 return resp_buf;
1005}
1006
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001007int scm_halt_pmic_arbiter()
1008{
1009 int ret = 0;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301010 scmcall_arg scm_arg = {0};
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001011
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001012 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301013 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER);
1014 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1015 scm_arg.x2 = 0;
1016 scm_arg.atomic = true;
1017 ret = scm_call2(&scm_arg, NULL);
1018 } else {
1019 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001020 }
1021
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301022 /* Retry with the SCM_IO_DISABLE_PMIC_ARBITER1 func ID if the above Func ID fails*/
1023 if(ret) {
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001024 if (is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301025 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1);
1026 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1027 scm_arg.x2 = 0;
1028 scm_arg.atomic = true;
1029 ret = scm_call2(&scm_arg, NULL);
1030 } else
1031 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER1, 0);
1032 }
Channagoud Kadabi179df0b2013-12-12 14:53:31 -08001033
1034 return ret;
1035}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001036
1037/* Execption Level exec secure-os call
1038 * Jumps to kernel via secure-os and does not return
1039 * on successful jump. System parameters are setup &
1040 * passed on to secure-os and are utilized to boot the
1041 * kernel.
1042 *
1043 @ kernel_entry : kernel entry point passed in as link register.
1044 @ dtb_offset : dt blob address passed in as w0.
1045 @ svc_id : indicates direction of switch 32->64 or 64->32
1046 *
1047 * Assumes all sanity checks have been performed on arguments.
1048 */
1049
1050void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
1051{
1052 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
1053 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
1054 void *cmd_buf;
1055 size_t cmd_len;
Sridhar Parasuramfc6ea712015-06-30 11:22:49 -07001056 static el1_system_param param __attribute__((aligned(0x1000)));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001057 scmcall_arg scm_arg = {0};
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001058
1059 param.el1_x0 = dtb_offset;
1060 param.el1_elr = kernel_entry;
1061
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001062 /* Response Buffer = Null as no response expected */
1063 dprintf(INFO, "Jumping to kernel via monitor\n");
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001064
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001065 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001066 {
1067 /* Command Buffer */
1068 cmd_buf = (void *)&param;
1069 cmd_len = sizeof(el1_system_param);
1070
1071 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
1072 }
1073 else
1074 {
1075 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
1076 scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
Veera Sundaram Sankaran00181512014-12-09 11:23:39 -08001077 scm_arg.x2 = (uint32_t ) &param;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001078 scm_arg.x3 = sizeof(el1_system_param);
1079
1080 scm_call2(&scm_arg, NULL);
1081 }
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -08001082
1083 /* Assert if execution ever reaches here */
1084 dprintf(CRITICAL, "Failed to jump to kernel\n");
1085 ASSERT(0);
1086}
Maria Yubeeeeaf2014-06-30 13:05:43 +08001087
1088/* SCM Random Command */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001089int scm_random(uintptr_t * rbuf, uint32_t r_len)
Maria Yubeeeeaf2014-06-30 13:05:43 +08001090{
1091 int ret;
1092 struct tz_prng_data data;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001093 scmcall_arg scm_arg = {0};
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001094 // Memory passed to TZ should be algined to cache line
1095 BUF_DMA_ALIGN(rand_buf, sizeof(uintptr_t));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001096
Mayank Grover59f4a372017-01-27 18:02:51 +05301097 // r_len must be less than or equal to sizeof(rand_buf) to avoid memory corruption.
1098 if (r_len > sizeof(rand_buf))
1099 {
1100 dprintf(CRITICAL, "r_len is larger than sizeof(randbuf).");
1101 return -1;
1102 }
1103
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001104 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001105 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001106 data.out_buf = (uint8_t*) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001107 data.out_buf_size = r_len;
Maria Yubeeeeaf2014-06-30 13:05:43 +08001108
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001109 /*
1110 * random buffer must be flushed/invalidated before and after TZ call.
1111 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001112 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001113
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001114 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
Maria Yubeeeeaf2014-06-30 13:05:43 +08001115
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001116 /* Invalidate the updated random buffer */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001117 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001118 }
1119 else
1120 {
1121 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
1122 scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001123 scm_arg.x2 = (uint32_t) rand_buf;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001124 scm_arg.x3 = r_len;
1125
Gaurav Nebhwani98db6cc2016-05-04 12:15:04 +05301126 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
1127
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001128 ret = scm_call2(&scm_arg, NULL);
1129 if (!ret)
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001130 arch_clean_invalidate_cache_range((addr_t) rand_buf, r_len);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001131 else
1132 dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
1133 }
Maria Yubeeeeaf2014-06-30 13:05:43 +08001134
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001135 //Copy back into the return buffer
Parth Dixit2c009282016-11-01 16:06:21 +05301136 memscpy(rbuf, r_len, rand_buf, sizeof(rand_buf));
Maria Yubeeeeaf2014-06-30 13:05:43 +08001137 return ret;
1138}
1139
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001140uintptr_t get_canary()
Maria Yubeeeeaf2014-06-30 13:05:43 +08001141{
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001142 uintptr_t canary;
1143 if(scm_random(&canary, sizeof(canary))) {
Maria Yubeeeeaf2014-06-30 13:05:43 +08001144 dprintf(CRITICAL,"scm_call for random failed !!!");
1145 /*
1146 * fall back to use lib rand API if scm call failed.
1147 */
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001148 canary = rand();
Maria Yubeeeeaf2014-06-30 13:05:43 +08001149 }
1150
1151 return canary;
1152}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301153
1154int scm_xpu_err_fatal_init()
1155{
1156 uint32_t ret = 0;
1157 uint32_t response = 0;
1158 tz_xpu_prot_cmd cmd;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001159 scmcall_arg scm_arg = {0};
1160 scmcall_ret scm_ret = {0};
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301161
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001162 if (!is_scm_armv8_support())
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001163 {
1164 cmd.config = ERR_FATAL_ENABLE;
1165 cmd.spare = 0;
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301166
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001167 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
1168 sizeof(response));
1169 }
1170 else
1171 {
1172 scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
1173 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1174 scm_arg.x2 = ERR_FATAL_ENABLE;
1175 scm_arg.x3 = 0x0;
1176 ret = scm_call2(&scm_arg, &scm_ret);
1177 response = scm_ret.x1;
1178 }
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +05301179
1180 if (ret)
1181 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
1182 else
1183 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
1184
1185 return ret;
1186}
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001187
1188static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
1189{
1190 register uint32_t r0 __asm__("r0") = x0;
1191 register uint32_t r1 __asm__("r1") = x1;
1192 register uint32_t r2 __asm__("r2") = x2;
1193 register uint32_t r3 __asm__("r3") = x3;
1194 register uint32_t r4 __asm__("r4") = x4;
1195 register uint32_t r5 __asm__("r5") = x5;
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001196 register uint32_t r6 __asm__("r6") = 0;
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001197
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001198 do {
1199 __asm__ volatile(
1200 __asmeq("%0", "r0")
1201 __asmeq("%1", "r1")
1202 __asmeq("%2", "r2")
1203 __asmeq("%3", "r3")
1204 __asmeq("%4", "r0")
1205 __asmeq("%5", "r1")
1206 __asmeq("%6", "r2")
1207 __asmeq("%7", "r3")
1208 __asmeq("%8", "r4")
1209 __asmeq("%9", "r5")
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001210 __asmeq("%10", "r6")
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001211 "smc #0 @ switch to secure world\n"
1212 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
Channagoud Kadabie75efb92015-05-19 14:20:05 -07001213 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6));
Dinesh K Garg6bbbb702015-01-30 11:13:31 -08001214 } while(r0 == 1);
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001215
1216 if (ret)
1217 {
1218 ret->x1 = r1;
1219 ret->x2 = r2;
1220 ret->x3 = r3;
1221 }
1222
1223 return r0;
1224}
1225
1226uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
1227{
1228 uint32_t *indir_arg = NULL;
1229 uint32_t x5;
1230 int i;
1231 uint32_t rc;
1232
1233 arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
1234 x5 = arg->x5[0];
1235
Amit Blayfe23ee22015-01-09 19:09:51 +02001236 if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN - 1)
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001237 {
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001238 indir_arg = memalign(CACHE_LINE, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001239 ASSERT(indir_arg);
1240
1241 for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
1242 {
1243 indir_arg[i] = arg->x5[i];
1244 }
Channagoud Kadabiefdeb8a2015-09-23 11:52:20 -07001245 arch_clean_invalidate_cache_range((addr_t) indir_arg, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
Channagoud Kadabidd85e7f2014-08-05 19:58:37 -07001246 x5 = (addr_t) indir_arg;
1247 }
1248
1249 rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
1250
1251 if (rc)
1252 {
1253 dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
1254 return rc;
1255 }
1256
1257 if (indir_arg)
1258 free(indir_arg);
1259
1260 return 0;
1261}
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301262
Mayank Grover8bcdd972016-12-02 14:58:07 +05301263static bool secure_boot_enabled = false;
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301264static bool wdog_debug_fuse_disabled = true;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301265
1266void scm_check_boot_fuses()
1267{
1268 uint32_t ret = 0;
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301269 uint32_t *resp = NULL;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301270 scmcall_arg scm_arg = {0};
1271 scmcall_ret scm_ret = {0};
Anmolpreet Kaurcdf53ec2019-02-21 12:05:30 +05301272 bool secure_value = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301273
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301274 resp = memalign(CACHE_LINE, (2 * sizeof(uint32_t)));
1275 ASSERT(resp);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001276 if (!is_scm_armv8_support()) {
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301277 ret = scm_call_atomic2(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED, (uint32_t)resp, 2 * sizeof(uint32_t));
1278 arch_clean_invalidate_cache_range((addr_t)resp, ROUNDUP((2*sizeof(uint32_t)), CACHE_LINE));
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301279 } else {
1280 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, IS_SECURE_BOOT_ENABLED);
1281 ret = scm_call2(&scm_arg, &scm_ret);
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301282 resp[0] = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301283 }
1284
Mayank Grover8bcdd972016-12-02 14:58:07 +05301285 if (!ret) {
Monika Singh40986b32018-03-14 00:50:33 +05301286 /* Check for secure device: Bit#0 = 0, Bit#1 = 0 Bit#2 = 0 , Bit#5 = 0 */
1287 /* Check Bit#6 = 1 only for TZ.BF.4.0 */
Anmolpreet Kaurcdf53ec2019-02-21 12:05:30 +05301288 secure_value = !CHECK_BIT(resp[0], SECBOOT_FUSE_BIT) &&
1289 !CHECK_BIT(resp[0], SECBOOT_FUSE_SHK_BIT) &&
1290 !CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_DISABLED_BIT);
1291
1292 /* For nand based devices, skip to check the rpmb enabled bit*/
1293 if (!platform_boot_dev_is_nand())
1294 secure_value = secure_value && !CHECK_BIT(resp[0], SECBOOT_FUSE_RPMB_ENABLED_BIT);
1295
1296 if (secure_value) {
1297 if ((qseecom_get_version() < QSEE_VERSION_40))
1298 secure_boot_enabled = true;
1299 else if (CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_RE_ENABLED_BIT))
1300 secure_boot_enabled = true;
1301 }
1302
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301303 /* Bit 2 - DEBUG_DISABLE_CHECK */
Mayank Grover8bcdd972016-12-02 14:58:07 +05301304 if (CHECK_BIT(resp[0], SECBOOT_FUSE_DEBUG_DISABLED_BIT))
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301305 wdog_debug_fuse_disabled = false;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301306 } else
1307 dprintf(CRITICAL, "scm call to check secure boot fuses failed\n");
Vijay Kumar Pendoti6e09e832016-08-31 18:24:50 +05301308 free(resp);
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301309}
1310
1311bool is_secure_boot_enable()
1312{
1313 scm_check_boot_fuses();
Aparna Mallavarapu8adbee22015-03-10 19:58:06 +05301314 return secure_boot_enabled;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301315}
1316
1317static uint32_t scm_io_read(addr_t address)
1318{
1319 uint32_t ret;
1320 scmcall_arg scm_arg = {0};
1321 scmcall_ret scm_ret = {0};
1322
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001323 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301324 ret = scm_call_atomic(SCM_SVC_IO, SCM_IO_READ, address);
1325 } else {
1326 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_READ);
1327 scm_arg.x1 = MAKE_SCM_ARGS(0x1);
1328 scm_arg.x2 = address;
1329 scm_arg.atomic = true;
1330 ret = scm_call2(&scm_arg, &scm_ret);
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001331 /* Return the value read if the call is successful */
1332 if (!ret)
1333 ret = scm_ret.x1;
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301334 }
1335 return ret;
1336}
1337
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301338uint32_t scm_io_write(uint32_t address, uint32_t val)
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301339{
1340 uint32_t ret;
1341 scmcall_arg scm_arg = {0};
1342 scmcall_ret scm_ret = {0};
1343
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001344 if (!is_scm_armv8_support()) {
Aparna Mallavarapu246c30b2014-12-11 12:07:51 +05301345 ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
1346 } else {
1347 scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_IO, SCM_IO_WRITE);
1348 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1349 scm_arg.x2 = address;
1350 scm_arg.x3 = val;
1351 scm_arg.atomic = true;
1352 ret = scm_call2(&scm_arg, &scm_ret);
1353 }
1354 return ret;
1355}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301356
Aparna Mallavarapuda91ea92015-07-10 12:03:46 +05301357int scm_call2_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301358{
1359 uint32_t ret = 0;
1360 scmcall_arg scm_arg = {0};
1361 scmcall_ret scm_ret = {0};
1362
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001363 if (!is_scm_armv8_support())
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301364 {
1365 ret = scm_call_atomic2(svc, cmd, arg1, arg2);
1366 } else {
1367 scm_arg.x0 = MAKE_SIP_SCM_CMD(svc, cmd);
1368 scm_arg.x1 = MAKE_SCM_ARGS(0x2);
1369 scm_arg.x2 = arg1;
1370 scm_arg.x3 = arg2;
1371 ret = scm_call2(&scm_arg, &scm_ret);
1372 }
1373 return ret;
1374}
1375
lijuang1cff8382016-01-11 17:56:54 +08001376int scm_disable_sdi()
1377{
1378 int ret = 0;
1379
1380 scm_check_boot_fuses();
1381
1382 /* Make WDOG_DEBUG DISABLE scm call only in non-secure boot */
1383 if(!(secure_boot_enabled || wdog_debug_fuse_disabled)) {
1384 ret = scm_call2_atomic(SCM_SVC_BOOT, WDOG_DEBUG_DISABLE, 1, 0);
1385 if(ret)
1386 dprintf(CRITICAL, "Failed to disable secure wdog debug: %d\n", ret);
1387 }
1388 return ret;
1389}
1390
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301391#if PLATFORM_USE_SCM_DLOAD
lijuang395b5e62015-11-19 17:39:44 +08001392int scm_dload_mode(enum reboot_reason mode)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301393{
1394 int ret = 0;
1395 uint32_t dload_type;
1396
1397 dprintf(SPEW, "DLOAD mode: %d\n", mode);
lijuang1cff8382016-01-11 17:56:54 +08001398 if (mode == NORMAL_DLOAD) {
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301399 dload_type = SCM_DLOAD_MODE;
lijuang1cff8382016-01-11 17:56:54 +08001400#if DISABLE_DLOAD_MODE
1401 return 0;
1402#endif
1403 } else if(mode == EMERGENCY_DLOAD)
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301404 dload_type = SCM_EDLOAD_MODE;
1405 else
1406 dload_type = 0;
1407
1408 /* Write to the Boot MISC register */
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001409 ret = is_scm_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD);
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301410
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001411 if (ret > 0)
1412 ret = scm_call2_atomic(SCM_SVC_BOOT, SCM_DLOAD_CMD, dload_type, 0);
1413 else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301414 ret = scm_io_write(TCSR_BOOT_MISC_DETECT,dload_type);
Channagoud Kadabia2184b82015-07-07 10:09:32 -07001415
1416 if(ret) {
1417 dprintf(CRITICAL, "Failed to write to boot misc: %d\n", ret);
1418 return ret;
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301419 }
1420
lijuang1cff8382016-01-11 17:56:54 +08001421#if !DISABLE_DLOAD_MODE
1422 return scm_disable_sdi();
1423#else
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301424 return ret;
lijuang1cff8382016-01-11 17:56:54 +08001425#endif
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301426}
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001427
1428bool scm_device_enter_dload()
1429{
1430 uint32_t ret = 0;
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001431 uint32_t dload_mode = 0;
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001432
1433 scmcall_arg scm_arg = {0};
1434 scmcall_ret scm_ret = {0};
1435
1436 scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_DLOAD_MODE, SCM_DLOAD_CMD);
1437 ret = scm_call2(&scm_arg, &scm_ret);
1438 if (ret)
1439 dprintf(CRITICAL, "SCM call to check dload mode failed: %x\n", ret);
1440
Channagoud Kadabi36cdfb62015-10-23 16:53:37 -07001441 if (!ret)
1442 {
1443 dload_mode = scm_io_read(TCSR_BOOT_MISC_DETECT);
1444 if (board_soc_version() < 0x30000)
1445 dload_mode = (dload_mode >> 16) & 0xFFFF;
1446 }
1447
1448 if (dload_mode == SCM_DLOAD_MODE)
Channagoud Kadabi6479ce32015-06-17 17:30:40 -07001449 return true;
1450
1451 return false;
1452}
Aparna Mallavarapu664ea772015-02-24 18:44:33 +05301453#endif
Monika Singh98257462018-06-06 11:28:49 +05301454bool allow_set_fuse(uint32_t version)
1455{
1456 /*if((major > 4) || (major == 4 && minor > 0))*/
1457 if((((version >> 22) & 0x3FF) > 4)
1458 || (((version >> 22) & 0x3FF) == 4 && ((version >> 12) & 0x3FF) > 0))
1459 {
1460 return TRUE;
1461 } else {
1462 return FALSE;
1463 }
1464}