blob: b2ebfa3c987a2142b06c490009153c05257ab208 [file] [log] [blame]
Pavel Nedev80ce36f2014-01-06 14:26:17 +02001/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
Deepa Dinamani904f8f82012-12-05 16:35:01 -08002 *
Shashank Mittal162244e2011-08-08 19:01:25 -07003 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
Deepa Dinamani904f8f82012-12-05 16:35:01 -08006 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
Shashank Mittal162244e2011-08-08 19:01:25 -070015 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <stdlib.h>
30#include <string.h>
31#include <err.h>
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080032#include <asm.h>
33#include <bits.h>
Neeti Desai127b9e02012-03-20 16:11:23 -070034#include <arch/ops.h>
Shashank Mittal162244e2011-08-08 19:01:25 -070035#include "scm.h"
36
37#pragma GCC optimize ("O0")
38
39/* From Linux Kernel asm/system.h */
40#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
41
42#ifndef offsetof
43# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
44#endif
45
Channagoud Kadabi179df0b2013-12-12 14:53:31 -080046#define SCM_CLASS_REGISTER (0x2 << 8)
47#define SCM_MASK_IRQS BIT(5)
48#define SCM_ATOMIC(svc, cmd, n) ((((((svc) & 0x3f) << 10)|((cmd) & 0x3ff)) << 12) | \
49 SCM_CLASS_REGISTER | \
50 SCM_MASK_IRQS | \
51 ((n) & 0xf))
52
53
Shashank Mittal162244e2011-08-08 19:01:25 -070054/**
55 * alloc_scm_command() - Allocate an SCM command
56 * @cmd_size: size of the command buffer
57 * @resp_size: size of the response buffer
58 *
59 * Allocate an SCM command, including enough room for the command
60 * and response headers as well as the command and response buffers.
61 *
62 * Returns a valid &scm_command on success or %NULL if the allocation fails.
63 */
64static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
65{
66 struct scm_command *cmd;
67 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
Ajay Dudanib01e5062011-12-03 23:23:42 -080068 resp_size;
Shashank Mittal162244e2011-08-08 19:01:25 -070069
Deepa Dinamani904f8f82012-12-05 16:35:01 -080070 cmd = memalign(CACHE_LINE, ROUNDUP(len, CACHE_LINE));
Ajay Dudanib01e5062011-12-03 23:23:42 -080071 if (cmd) {
Pavel Nedev80ce36f2014-01-06 14:26:17 +020072 memset(cmd, 0, len);
Shashank Mittal162244e2011-08-08 19:01:25 -070073 cmd->len = len;
74 cmd->buf_offset = offsetof(struct scm_command, buf);
75 cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
76 }
77 return cmd;
78}
79
80/**
81 * free_scm_command() - Free an SCM command
82 * @cmd: command to free
83 *
84 * Free an SCM command.
85 */
86static inline void free_scm_command(struct scm_command *cmd)
87{
88 free(cmd);
89}
90
91/**
92 * scm_command_to_response() - Get a pointer to a scm_response
93 * @cmd: command
94 *
95 * Returns a pointer to a response for a command.
96 */
Ajay Dudanib01e5062011-12-03 23:23:42 -080097static inline struct scm_response *scm_command_to_response(const struct
98 scm_command *cmd)
Shashank Mittal162244e2011-08-08 19:01:25 -070099{
100 return (void *)cmd + cmd->resp_hdr_offset;
101}
102
103/**
104 * scm_get_command_buffer() - Get a pointer to a command buffer
105 * @cmd: command
106 *
107 * Returns a pointer to the command buffer of a command.
108 */
109static inline void *scm_get_command_buffer(const struct scm_command *cmd)
110{
111 return (void *)cmd->buf;
112}
113
114/**
115 * scm_get_response_buffer() - Get a pointer to a response buffer
116 * @rsp: response
117 *
118 * Returns a pointer to a response buffer of a response.
119 */
120static inline void *scm_get_response_buffer(const struct scm_response *rsp)
121{
122 return (void *)rsp + rsp->buf_offset;
123}
124
125static uint32_t smc(uint32_t cmd_addr)
126{
127 uint32_t context_id;
128 register uint32_t r0 __asm__("r0") = 1;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800129 register uint32_t r1 __asm__("r1") = (uint32_t) & context_id;
Shashank Mittal162244e2011-08-08 19:01:25 -0700130 register uint32_t r2 __asm__("r2") = cmd_addr;
Ajay Dudanib01e5062011-12-03 23:23:42 -0800131 __asm__("1:smc #0 @ switch to secure world\n" "cmp r0, #1 \n" "beq 1b \n": "=r"(r0): "r"(r0), "r"(r1), "r"(r2):"r3", "cc");
Shashank Mittal162244e2011-08-08 19:01:25 -0700132 return r0;
133}
134
135/**
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800136* scm_call_automic: Make scm call with one or no argument
137* @svc: service id
138* @cmd: command id
139* @ arg1: argument
140*/
141
142static int scm_call_atomic(uint32_t svc, uint32_t cmd, uint32_t arg1)
143{
144 uint32_t context_id;
145 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 1);
146 register uint32_t r1 __asm__("r1") = &context_id;
147 register uint32_t r2 __asm__("r2") = arg1;
148
149 __asm__ volatile(
150 __asmeq("%0", "r0")
151 __asmeq("%1", "r0")
152 __asmeq("%2", "r1")
153 __asmeq("%3", "r2")
154 "smc #0 @ switch to secure world\n"
155 : "=r" (r0)
156 : "r" (r0), "r" (r1), "r" (r2)
157 : "r3");
158 return r0;
159}
160
161/**
Aparna Mallavarapu68e233f2014-03-21 19:18:34 +0530162 * scm_call_atomic2() - Send an atomic SCM command with two arguments
163 * @svc_id: service identifier
164 * @cmd_id: command identifier
165 * @arg1: first argument
166 * @arg2: second argument
167 *
168 * This shall only be used with commands that are guaranteed to be
169 * uninterruptable, atomic and SMP safe.
170 */
171int scm_call_atomic2(uint32_t svc, uint32_t cmd, uint32_t arg1, uint32_t arg2)
172{
173 int context_id;
174 register uint32_t r0 __asm__("r0") = SCM_ATOMIC(svc, cmd, 2);
175 register uint32_t r1 __asm__("r1") = &context_id;
176 register uint32_t r2 __asm__("r2") = arg1;
177 register uint32_t r3 __asm__("r3") = arg2;
178
179 __asm__ volatile(
180 __asmeq("%0", "r0")
181 __asmeq("%1", "r0")
182 __asmeq("%2", "r1")
183 __asmeq("%3", "r2")
184 __asmeq("%4", "r3")
185 "smc #0 @ switch to secure world\n"
186 : "=r" (r0)
187 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
188 return r0;
189}
190
191/**
Shashank Mittal162244e2011-08-08 19:01:25 -0700192 * scm_call() - Send an SCM command
193 * @svc_id: service identifier
194 * @cmd_id: command identifier
195 * @cmd_buf: command buffer
196 * @cmd_len: length of the command buffer
197 * @resp_buf: response buffer
198 * @resp_len: length of the response buffer
199 *
200 * Sends a command to the SCM and waits for the command to finish processing.
201 */
Ajay Dudanib01e5062011-12-03 23:23:42 -0800202int
203scm_call(uint32_t svc_id, uint32_t cmd_id, const void *cmd_buf,
204 size_t cmd_len, void *resp_buf, size_t resp_len)
Shashank Mittal162244e2011-08-08 19:01:25 -0700205{
206 int ret;
207 struct scm_command *cmd;
208 struct scm_response *rsp;
Neeti Desai127b9e02012-03-20 16:11:23 -0700209 uint8_t *resp_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700210
211 cmd = alloc_scm_command(cmd_len, resp_len);
212 if (!cmd)
213 return ERR_NO_MEMORY;
214
215 cmd->id = (svc_id << 10) | cmd_id;
216 if (cmd_buf)
217 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
218
Neeti Desai127b9e02012-03-20 16:11:23 -0700219 /* Flush command to main memory for TZ */
220 arch_clean_invalidate_cache_range((addr_t) cmd, cmd->len);
221
Ajay Dudanib01e5062011-12-03 23:23:42 -0800222 ret = smc((uint32_t) cmd);
Shashank Mittal162244e2011-08-08 19:01:25 -0700223 if (ret)
224 goto out;
225
Ajay Dudanib01e5062011-12-03 23:23:42 -0800226 if (resp_len) {
Shashank Mittal162244e2011-08-08 19:01:25 -0700227 rsp = scm_command_to_response(cmd);
228
Neeti Desai127b9e02012-03-20 16:11:23 -0700229 do
230 {
231 /* Need to invalidate before each check since TZ will update
232 * the response complete flag in main memory.
233 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800234 arch_invalidate_cache_range((addr_t) rsp, sizeof(*rsp));
Neeti Desai127b9e02012-03-20 16:11:23 -0700235 } while (!rsp->is_complete);
236
237
238 resp_ptr = scm_get_response_buffer(rsp);
239
240 /* Invalidate any cached response data */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800241 arch_invalidate_cache_range((addr_t) resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700242
243 if (resp_buf)
Neeti Desai127b9e02012-03-20 16:11:23 -0700244 memcpy(resp_buf, resp_ptr, resp_len);
Shashank Mittal162244e2011-08-08 19:01:25 -0700245 }
Ajay Dudanib01e5062011-12-03 23:23:42 -0800246 out:
Shashank Mittal162244e2011-08-08 19:01:25 -0700247 free_scm_command(cmd);
248 return ret;
249}
250
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800251int restore_secure_cfg(uint32_t id)
252{
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700253 int ret = 0;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800254 tz_secure_cfg secure_cfg;
255
Siddhartha Agrawald4648892013-02-17 18:16:18 -0800256 secure_cfg.id = id;
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800257 secure_cfg.spare = 0;
258
sundarajan srinivasanc2dee742013-02-21 11:31:36 -0800259 ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700260 NULL, 0);
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800261
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700262 if (ret) {
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800263 dprintf(CRITICAL, "Secure Config failed\n");
264 ret = 1;
Channagoud Kadabiacaa75e2014-06-09 16:29:29 -0700265 }
Siddhartha Agrawaleb094c52013-01-28 12:11:43 -0800266
267 return ret;
268
269}
270
Neeti Desai127b9e02012-03-20 16:11:23 -0700271/* SCM Encrypt Command */
272int encrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
Shashank Mittal162244e2011-08-08 19:01:25 -0700273{
Neeti Desai127b9e02012-03-20 16:11:23 -0700274 int ret;
275 img_req cmd;
Shashank Mittal162244e2011-08-08 19:01:25 -0700276
Neeti Desai127b9e02012-03-20 16:11:23 -0700277 cmd.img_ptr = (uint32*) img_ptr;
278 cmd.img_len_ptr = img_len_ptr;
Shashank Mittal162244e2011-08-08 19:01:25 -0700279
Neeti Desai127b9e02012-03-20 16:11:23 -0700280 /* Image data is operated upon by TZ, which accesses only the main memory.
281 * It must be flushed/invalidated before and after TZ call.
282 */
283 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Shashank Mittal162244e2011-08-08 19:01:25 -0700284
Neeti Desai127b9e02012-03-20 16:11:23 -0700285 ret = scm_call(SCM_SVC_SSD, SSD_ENCRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
Shashank Mittal162244e2011-08-08 19:01:25 -0700286
Neeti Desai127b9e02012-03-20 16:11:23 -0700287 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
288 * before we use them.
Amol Jadi55e58da2011-11-17 14:03:34 -0800289 */
290 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
Neeti Desai127b9e02012-03-20 16:11:23 -0700291 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
Amol Jadi55e58da2011-11-17 14:03:34 -0800292
Neeti Desai127b9e02012-03-20 16:11:23 -0700293 /* Invalidate the updated image data */
294 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
Amol Jadi55e58da2011-11-17 14:03:34 -0800295
Shashank Mittal162244e2011-08-08 19:01:25 -0700296 return ret;
297}
298
Neeti Desai127b9e02012-03-20 16:11:23 -0700299/* SCM Decrypt Command */
300int decrypt_scm(uint32_t ** img_ptr, uint32_t * img_len_ptr)
301{
302 int ret;
303 img_req cmd;
304
305 cmd.img_ptr = (uint32*) img_ptr;
306 cmd.img_len_ptr = img_len_ptr;
307
308 /* Image data is operated upon by TZ, which accesses only the main memory.
309 * It must be flushed/invalidated before and after TZ call.
310 */
311 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
312
313 ret = scm_call(SCM_SVC_SSD, SSD_DECRYPT_ID, &cmd, sizeof(cmd), NULL, 0);
314
315 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
316 * before we use them.
317 */
318 arch_clean_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
319 arch_clean_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
320
321 /* Invalidate the updated image data */
322 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
323
324 return ret;
325}
326
327
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800328static int ssd_image_is_encrypted(uint32_t ** img_ptr, uint32_t * img_len_ptr, uint32 * ctx_id)
329{
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700330 int ret = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800331 ssd_parse_md_req parse_req;
332 ssd_parse_md_rsp parse_rsp;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700333 int prev_len = 0;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800334
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700335 /* Populate meta-data ptr. Here md_len is the meta-data length.
336 * The Code below follows a growing length approach. First send
337 * min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
338 * If parse_rsp.status = PARSING_INCOMPLETE we send md_len = 256.
339 * If subsequent status = PARSING_INCOMPLETE we send md_len = 512,
340 * 1024bytes and so on until we get an valid response(rsp.status) from TZ*/
341
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800342 parse_req.md = (uint32*)*img_ptr;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700343 parse_req.md_len = ((*img_len_ptr) >= SSD_HEADER_MIN_SIZE) ? SSD_HEADER_MIN_SIZE : (*img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800344
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700345 arch_clean_invalidate_cache_range((addr_t) *img_ptr, parse_req.md_len);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800346
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700347 do
348 {
349 ret = scm_call(SCM_SVC_SSD,
350 SSD_PARSE_MD_ID,
351 &parse_req,
352 sizeof(parse_req),
353 &parse_rsp,
354 sizeof(parse_rsp));
355
356 if(!ret && (parse_rsp.status == SSD_PMD_PARSING_INCOMPLETE))
357 {
358 prev_len = parse_req.md_len;
359
360 parse_req.md_len *= MULTIPLICATION_FACTOR;
361
Venkatesh Yadav Abbarapuaf7bfe02013-11-11 16:56:04 +0530362 arch_clean_invalidate_cache_range((addr_t) (*img_ptr + prev_len),
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700363 (parse_req.md_len - prev_len) );
364
365 continue;
366 }
367 else
368 break;
369
370 } while(true);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800371
372 if(!ret)
373 {
374 if(parse_rsp.status == SSD_PMD_ENCRYPTED)
375 {
376 *ctx_id = parse_rsp.md_ctx_id;
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700377 *img_len_ptr = *img_len_ptr - ((uint8_t*)parse_rsp.md_end_ptr - (uint8_t*)*img_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800378 *img_ptr = (uint32_t*)parse_rsp.md_end_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800379 }
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700380
381 ret = parse_rsp.status;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800382 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700383 else
384 {
385 dprintf(CRITICAL,"ssd_image_is_encrypted call failed");
386
387 ASSERT(ret == 0);
388 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800389
390 return ret;
391}
392
393int decrypt_scm_v2(uint32_t ** img_ptr, uint32_t * img_len_ptr)
394{
395 int ret = 0;
396 uint32 ctx_id = 0;
397 ssd_decrypt_img_frag_req decrypt_req;
398 ssd_decrypt_img_frag_rsp decrypt_rsp;
399
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700400 ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
401 switch(ret)
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700402 {
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700403 case SSD_PMD_ENCRYPTED:
404 /* Image data is operated upon by TZ, which accesses only the main memory.
405 * It must be flushed/invalidated before and after TZ call.
406 */
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800407
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700408 arch_clean_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800409
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700410 /*decrypt the image here*/
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800411
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700412 decrypt_req.md_ctx_id = ctx_id;
413 decrypt_req.last_frag = 1;
414 decrypt_req.frag_len = *img_len_ptr;
415 decrypt_req.frag = *img_ptr;
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800416
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700417 ret = scm_call(SCM_SVC_SSD,
418 SSD_DECRYPT_IMG_FRAG_ID,
419 &decrypt_req,
420 sizeof(decrypt_req),
421 &decrypt_rsp,
422 sizeof(decrypt_rsp));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800423
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700424 if(!ret){
425 ret = decrypt_rsp.status;
426 }
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700427
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700428 /* Values at img_ptr and img_len_ptr are updated by TZ. Must be invalidated
429 * before we use them.
430 */
431 arch_invalidate_cache_range((addr_t) img_ptr, sizeof(img_ptr));
432 arch_invalidate_cache_range((addr_t) img_len_ptr, sizeof(img_len_ptr));
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800433
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700434 /* Invalidate the updated image data */
435 arch_invalidate_cache_range((addr_t) *img_ptr, *img_len_ptr);
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700436
Sundarajan Srinivasaneb6d2202013-06-04 14:24:10 -0700437 break;
438
439 case SSD_PMD_NOT_ENCRYPTED:
440 case SSD_PMD_NO_MD_FOUND:
441 ret = 0;
442 break;
443
444 case SSD_PMD_BUSY:
445 case SSD_PMD_BAD_MD_PTR_OR_LEN:
446 case SSD_PMD_PARSING_INCOMPLETE:
447 case SSD_PMD_PARSING_FAILED:
448 case SSD_PMD_SETUP_CIPHER_FAILED:
449 dprintf(CRITICAL,"decrypt_scm_v2: failed status %d\n",ret);
450 break;
451
452 default:
453 dprintf(CRITICAL,"decrypt_scm_v2: case default: failed status %d\n",ret);
454 break;
sundarajan srinivasan6173b872013-03-13 17:36:48 -0700455 }
sundarajan srinivasan4dfd4f72013-02-27 14:13:09 -0800456 return ret;
457}
458
459int scm_svc_version(uint32 * major, uint32 * minor)
460{
461 feature_version_req feature_req;
462 feature_version_rsp feature_rsp;
463 int ret = 0;
464
465 feature_req.feature_id = TZBSP_FVER_SSD;
466
467 ret = scm_call(TZBSP_SVC_INFO,
468 TZ_INFO_GET_FEATURE_ID,
469 &feature_req,
470 sizeof(feature_req),
471 &feature_rsp,
472 sizeof(feature_rsp));
473 if(!ret)
474 *major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
475
476 return ret;
477}
478
479int scm_protect_keystore(uint32_t * img_ptr, uint32_t img_len)
480{
481 int ret=0;
482 ssd_protect_keystore_req protect_req;
483 ssd_protect_keystore_rsp protect_rsp;
484
485 protect_req.keystore_ptr = img_ptr;
486 protect_req.keystore_len = img_len;
487
488 arch_clean_invalidate_cache_range((addr_t) img_ptr, img_len);
489
490 ret = scm_call(SCM_SVC_SSD,
491 SSD_PROTECT_KEYSTORE_ID,
492 &protect_req,
493 sizeof(protect_req),
494 &protect_rsp,
495 sizeof(protect_rsp));
496 if(!ret)
497 {
498 if(protect_rsp.status == TZBSP_SSD_PKS_SUCCESS)
499 dprintf(INFO,"Successfully loaded the keystore ");
500 else
501 {
502 dprintf(INFO,"Loading keystore failed status %d ",protect_rsp.status);
503 ret = protect_rsp.status;
504 }
505 }
506 else
507 dprintf(INFO,"scm_call failed ");
508
509 return ret;
510}
511
Shashank Mittal162244e2011-08-08 19:01:25 -0700512void set_tamper_fuse_cmd()
513{
514 uint32_t svc_id;
515 uint32_t cmd_id;
516 void *cmd_buf;
517 size_t cmd_len;
518 void *resp_buf = NULL;
519 size_t resp_len = 0;
520
521 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
522 cmd_buf = (void *)&fuse_id;
523 cmd_len = sizeof(fuse_id);
524
Ajay Dudanib01e5062011-12-03 23:23:42 -0800525 /*no response */
Shashank Mittal162244e2011-08-08 19:01:25 -0700526 resp_buf = NULL;
527 resp_len = 0;
528
529 svc_id = SCM_SVC_FUSE;
530 cmd_id = SCM_BLOW_SW_FUSE_ID;
531
532 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
533 return;
534}
535
536uint8_t get_tamper_fuse_cmd()
537{
538 uint32_t svc_id;
539 uint32_t cmd_id;
540 void *cmd_buf;
541 size_t cmd_len;
542 size_t resp_len = 0;
543 uint8_t resp_buf;
544
545 uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
546 cmd_buf = (void *)&fuse_id;
547 cmd_len = sizeof(fuse_id);
548
Ajay Dudanib01e5062011-12-03 23:23:42 -0800549 /*response */
Shashank Mittal162244e2011-08-08 19:01:25 -0700550 resp_len = sizeof(resp_buf);
551
552 svc_id = SCM_SVC_FUSE;
553 cmd_id = SCM_IS_SW_FUSE_BLOWN_ID;
554
555 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
556 return resp_buf;
557}
Deepa Dinamani193874e2012-02-07 14:00:04 -0800558
Amir Samuelov4620ad22013-03-13 11:30:05 +0200559#define SHA256_DIGEST_LENGTH (256/8)
560/*
561 * struct qseecom_save_partition_hash_req
562 * @partition_id - partition id.
563 * @digest[SHA256_DIGEST_LENGTH] - sha256 digest.
564 */
565struct qseecom_save_partition_hash_req {
566 uint32_t partition_id; /* in */
567 uint8_t digest[SHA256_DIGEST_LENGTH]; /* in */
568};
569
570
571void save_kernel_hash_cmd(void *digest)
572{
573 uint32_t svc_id;
574 uint32_t cmd_id;
575 void *cmd_buf;
576 size_t cmd_len;
577 void *resp_buf = NULL;
578 size_t resp_len = 0;
579 struct qseecom_save_partition_hash_req req;
580
581 /*no response */
582 resp_buf = NULL;
583 resp_len = 0;
584
585 req.partition_id = 0; /* kernel */
586 memcpy(req.digest, digest, sizeof(req.digest));
587
588 svc_id = SCM_SVC_ES;
589 cmd_id = SCM_SAVE_PARTITION_HASH_ID;
590 cmd_buf = (void *)&req;
591 cmd_len = sizeof(req);
592
593 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
594}
595
Deepa Dinamani193874e2012-02-07 14:00:04 -0800596/*
597 * Switches the CE1 channel between ADM and register usage.
598 * channel : AP_CE_REGISTER_USE, CE1 uses register interface
599 * : AP_CE_ADM_USE, CE1 uses ADM interface
600 */
601uint8_t switch_ce_chn_cmd(enum ap_ce_channel_type channel)
602{
603 uint32_t svc_id;
604 uint32_t cmd_id;
605 void *cmd_buf;
606 size_t cmd_len;
607 size_t resp_len = 0;
608 uint8_t resp_buf;
609
610 struct {
611 uint32_t resource;
612 uint32_t chn_id;
613 }__PACKED switch_ce_chn_buf;
614
615 switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
616 switch_ce_chn_buf.chn_id = channel;
617 cmd_buf = (void *)&switch_ce_chn_buf;
618 cmd_len = sizeof(switch_ce_chn_buf);
619
620 /*response */
621 resp_len = sizeof(resp_buf);
622
623 svc_id = SCM_SVC_CE_CHN_SWITCH_ID;
624 cmd_id = SCM_CE_CHN_SWITCH_ID;
625
626 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, &resp_buf, resp_len);
627 return resp_buf;
628}
629
Channagoud Kadabi179df0b2013-12-12 14:53:31 -0800630int scm_halt_pmic_arbiter()
631{
632 int ret = 0;
633
634 ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
635
636 return ret;
637}
Abhimanyu Kapurb3207fb2014-01-27 21:33:23 -0800638
639/* Execption Level exec secure-os call
640 * Jumps to kernel via secure-os and does not return
641 * on successful jump. System parameters are setup &
642 * passed on to secure-os and are utilized to boot the
643 * kernel.
644 *
645 @ kernel_entry : kernel entry point passed in as link register.
646 @ dtb_offset : dt blob address passed in as w0.
647 @ svc_id : indicates direction of switch 32->64 or 64->32
648 *
649 * Assumes all sanity checks have been performed on arguments.
650 */
651
652void scm_elexec_call(paddr_t kernel_entry, paddr_t dtb_offset)
653{
654 uint32_t svc_id = SCM_SVC_MILESTONE_32_64_ID;
655 uint32_t cmd_id = SCM_SVC_MILESTONE_CMD_ID;
656 void *cmd_buf;
657 size_t cmd_len;
658 static el1_system_param param;
659
660 param.el1_x0 = dtb_offset;
661 param.el1_elr = kernel_entry;
662
663 /* Command Buffer */
664 cmd_buf = (void *)&param;
665 cmd_len = sizeof(el1_system_param);
666
667 /* Response Buffer = Null as no response expected */
668 dprintf(INFO, "Jumping to kernel via monitor\n");
669 scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
670
671 /* Assert if execution ever reaches here */
672 dprintf(CRITICAL, "Failed to jump to kernel\n");
673 ASSERT(0);
674}
Maria Yubeeeeaf2014-06-30 13:05:43 +0800675
676/* SCM Random Command */
677int scm_random(uint32_t * rbuf, uint32_t r_len)
678{
679 int ret;
680 struct tz_prng_data data;
681
682 data.out_buf = (uint8_t*) rbuf;
683 data.out_buf_size = r_len;
684
685 /*
686 * random buffer must be flushed/invalidated before and after TZ call.
687 */
688 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
689
690 ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
691
692 /* Invalidate the updated random buffer */
693 arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
694
695 return ret;
696}
697
698void * get_canary()
699{
700 void * canary;
701 if(scm_random(&canary, sizeof(canary))) {
702 dprintf(CRITICAL,"scm_call for random failed !!!");
703 /*
704 * fall back to use lib rand API if scm call failed.
705 */
706 canary = (void *)rand();
707 }
708
709 return canary;
710}
Aparna Mallavarapu6875ade2014-06-16 22:15:28 +0530711
712int scm_xpu_err_fatal_init()
713{
714 uint32_t ret = 0;
715 uint32_t response = 0;
716 tz_xpu_prot_cmd cmd;
717
718 cmd.config = ERR_FATAL_ENABLE;
719 cmd.spare = 0;
720
721 ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
722 sizeof(response));
723
724 if (ret)
725 dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
726 else
727 dprintf(INFO, "Configured XPU violations to be fatal errors\n");
728
729 return ret;
730}