Merge "platform: msm_shared: Add new scm interface"
diff --git a/platform/msm_shared/include/scm.h b/platform/msm_shared/include/scm.h
index 1268db9..de046a4 100644
--- a/platform/msm_shared/include/scm.h
+++ b/platform/msm_shared/include/scm.h
@@ -29,6 +29,34 @@
#ifndef __SCM_H__
#define __SCM_H__
+/* ARM SCM format support related flags */
+#define SIP_SVC_CALLS 0x02000000
+#define MAKE_SIP_SCM_CMD(svc_id, cmd_id) ((((svc_id << 8) | (cmd_id)) & 0xFFFF) | SIP_SVC_CALLS)
+#define MAKE_SCM_VAR_ARGS(num_args, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, ...) (\
+ (((t0) & 0xff) << 4) | \
+ (((t1) & 0xff) << 6) | \
+ (((t2) & 0xff) << 8) | \
+ (((t3) & 0xff) << 10) | \
+ (((t4) & 0xff) << 12) | \
+ (((t5) & 0xff) << 14) | \
+ (((t6) & 0xff) << 16) | \
+ (((t7) & 0xff) << 18) | \
+ (((t8) & 0xff) << 20) | \
+ (((t9) & 0xff) << 22) | \
+ (num_args & 0xffff))
+#define MAKE_SCM_ARGS(...) MAKE_SCM_VAR_ARGS(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define SCM_ATOMIC_BIT BIT(31)
+#define SCM_MAX_ARG_LEN 5
+#define SCM_INDIR_MAX_LEN 10
+
+enum
+{
+ SMC_PARAM_TYPE_VALUE = 0,
+ SMC_PARAM_TYPE_BUFFER_READ,
+ SMC_PARAM_TYPE_BUFFER_READWRITE,
+ SMC_PARAM_TYPE_BUFFER_VALIDATION,
+} scm_arg_type;
+
/* 8 Byte SSD magic number (LE) */
#define DECRYPT_MAGIC_0 0x73737A74
#define DECRYPT_MAGIC_1 0x676D6964
@@ -126,12 +154,62 @@
uint32_t out_buf_size;
}__packed;
+/* SCM support as per ARM spec */
+/*
+ * Structure to define the argument for scm call
+ * x0: is the command ID
+ * x1: Number of argument & type of arguments
+ * : Type can be any of
+ * : SMC_PARAM_TYPE_VALUE 0
+ * : SMC_PARAM_TYPE_BUFFER_READ 1
+ * : SMC_PARAM_TYPE_BUFFER_READWRITE 2
+ * : SMC_PARAM_TYPE_BUFFER_VALIDATION 3
+ * @Note: Number of argument count starts from X2.
+ * x2-x4: Arguments
+ * X5[10]: if the number of argument is more, an indirect
+ * : list can be passed here.
+ */
+typedef struct {
+ uint32_t x0;/* command ID details as per ARMv8 spec :
+ 0:7 command, 8:15 service id
+ 0x02000000: SIP calls
+ 30: SMC32 or SMC64
+ 31: Standard or fast calls*/
+ uint32_t x1; /* # of args and attributes for buffers
+ * 0-3: arg #
+ * 4-5: type of arg1
+ * 6-7: type of arg2
+ * :
+ * :
+ * 20-21: type of arg8
+ * 22-23: type of arg9
+ */
+ uint32_t x2; /* Param1 */
+ uint32_t x3; /* Param2 */
+ uint32_t x4; /* Param3 */
+ uint32_t x5[10]; /* Indirect parameter list */
+ uint32_t atomic; /* To indicate if its standard or fast call */
+} scmcall_arg;
+
+/* Return value for the SCM call:
+ * SCM call returns values in register if its less than
+ * 12 bytes, anything greater need to be input buffer + input len
+ * arguments
+ */
+typedef struct
+{
+ uint32_t x1;
+ uint32_t x2;
+ uint32_t x3;
+} scmcall_ret;
+
/* Service IDs */
#define SCM_SVC_BOOT 0x01
#define TZBSP_SVC_INFO 0x06
#define SCM_SVC_SSD 0x07
#define SVC_MEMORY_PROTECTION 0x0C
#define TZ_SVC_CRYPTO 0x0A
+#define SCM_SVC_INFO 0x06
/*Service specific command IDs */
#define ERR_FATAL_ENABLE 0x0
@@ -151,6 +229,7 @@
#define TZ_INFO_GET_FEATURE_ID 0x03
#define PRNG_CMD_ID 0x01
+#define IS_CALL_AVAIL_CMD 0x01
/* Download Mode specific arguments to be passed to TZ */
#define SCM_EDLOAD_MODE 0x02
@@ -235,6 +314,11 @@
/* API to configure XPU violations as fatal */
int scm_xpu_err_fatal_init();
+/* APIs to support ARM scm standard
+ * Takes arguments : x0-x5 and returns result
+ * in x0-x3*/
+uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret);
+
/**
* struct scm_command - one SCM command buffer
* @len: total available memory for command and response
@@ -278,4 +362,8 @@
uint32_t buf_offset;
uint32_t is_complete;
};
+/* Perform any scm init needed before making scm calls
+ * Used for checking if armv8 SCM support present
+ */
+void scm_init();
#endif
diff --git a/platform/msm_shared/scm.c b/platform/msm_shared/scm.c
index b2ebfa3..48f62be 100644
--- a/platform/msm_shared/scm.c
+++ b/platform/msm_shared/scm.c
@@ -50,6 +50,33 @@
SCM_MASK_IRQS | \
((n) & 0xf))
+/* SCM interface as per ARM spec present? */
+bool scm_arm_support;
+
+static void scm_arm_support_available(uint32_t svc_id, uint32_t cmd_id)
+{
+ uint32_t ret;
+ scmcall_arg scm_arg = {0};
+ scmcall_arg scm_ret = {0};
+ /* Make a call to check if SCM call available using new interface,
+ * if this returns 0 then scm implementation as per arm spec
+ * otherwise use the old interface for scm calls
+ */
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x1);
+ scm_arg.x2 = MAKE_SIP_SCM_CMD(svc_id, cmd_id);
+
+ ret = scm_call2(&scm_arg, &scm_ret);
+
+ if (!ret)
+ scm_arm_support = true;
+}
+
+
+void scm_init()
+{
+ scm_arm_support_available(SCM_SVC_INFO, IS_CALL_AVAIL_CMD);
+}
/**
* alloc_scm_command() - Allocate an SCM command
@@ -255,17 +282,30 @@
secure_cfg.id = id;
secure_cfg.spare = 0;
+ scmcall_arg scm_arg = {0};
- ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
- NULL, 0);
+ if(!scm_arm_support)
+ {
+ ret = scm_call(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG, &secure_cfg, sizeof(secure_cfg),
+ NULL, 0);
+ }
+ else
+ {
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, IOMMU_SECURE_CFG);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x2);
+ scm_arg.x2 = id;
+ scm_arg.x3 = 0x0; /* Spare unused */
- if (ret) {
+ ret = scm_call2(&scm_arg, NULL);
+ }
+
+ if (ret)
+ {
dprintf(CRITICAL, "Secure Config failed\n");
ret = 1;
}
return ret;
-
}
/* SCM Encrypt Command */
@@ -274,6 +314,12 @@
int ret;
img_req cmd;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return -1;
+ }
+
cmd.img_ptr = (uint32*) img_ptr;
cmd.img_len_ptr = img_len_ptr;
@@ -302,6 +348,12 @@
int ret;
img_req cmd;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return -1;
+ }
+
cmd.img_ptr = (uint32*) img_ptr;
cmd.img_len_ptr = img_len_ptr;
@@ -332,6 +384,12 @@
ssd_parse_md_rsp parse_rsp;
int prev_len = 0;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return -1;
+ }
+
/* Populate meta-data ptr. Here md_len is the meta-data length.
* The Code below follows a growing length approach. First send
* min(img_len_ptr,SSD_HEADER_MIN_SIZE) say 128 bytes for example.
@@ -397,6 +455,12 @@
ssd_decrypt_img_frag_req decrypt_req;
ssd_decrypt_img_frag_rsp decrypt_rsp;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return -1;
+ }
+
ret = ssd_image_is_encrypted(img_ptr,img_len_ptr,&ctx_id);
switch(ret)
{
@@ -461,15 +525,30 @@
feature_version_req feature_req;
feature_version_rsp feature_rsp;
int ret = 0;
+ scmcall_arg scm_arg = {0};
+ scmcall_ret scm_ret = {0};
feature_req.feature_id = TZBSP_FVER_SSD;
- ret = scm_call(TZBSP_SVC_INFO,
- TZ_INFO_GET_FEATURE_ID,
- &feature_req,
- sizeof(feature_req),
- &feature_rsp,
- sizeof(feature_rsp));
+ if (!scm_arm_support)
+ {
+ ret = scm_call(TZBSP_SVC_INFO,
+ TZ_INFO_GET_FEATURE_ID,
+ &feature_req,
+ sizeof(feature_req),
+ &feature_rsp,
+ sizeof(feature_rsp));
+ }
+ else
+ {
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(TZBSP_SVC_INFO, TZ_INFO_GET_FEATURE_ID);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x1);
+ scm_arg.x2 = feature_req.feature_id;
+
+ ret = scm_call2(&scm_arg, &scm_ret);
+ feature_rsp.version = scm_ret.x1;
+ }
+
if(!ret)
*major = TZBSP_GET_FEATURE_VERSION(feature_rsp.version);
@@ -482,6 +561,12 @@
ssd_protect_keystore_req protect_req;
ssd_protect_keystore_rsp protect_rsp;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return -1;
+ }
+
protect_req.keystore_ptr = img_ptr;
protect_req.keystore_len = img_len;
@@ -522,6 +607,12 @@
cmd_buf = (void *)&fuse_id;
cmd_len = sizeof(fuse_id);
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return;
+ }
+
/*no response */
resp_buf = NULL;
resp_len = 0;
@@ -543,6 +634,13 @@
uint8_t resp_buf;
uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE;
+
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return;
+ }
+
cmd_buf = (void *)&fuse_id;
cmd_len = sizeof(fuse_id);
@@ -577,6 +675,7 @@
void *resp_buf = NULL;
size_t resp_len = 0;
struct qseecom_save_partition_hash_req req;
+ scmcall_arg scm_arg = {0};
/*no response */
resp_buf = NULL;
@@ -585,12 +684,26 @@
req.partition_id = 0; /* kernel */
memcpy(req.digest, digest, sizeof(req.digest));
- svc_id = SCM_SVC_ES;
- cmd_id = SCM_SAVE_PARTITION_HASH_ID;
- cmd_buf = (void *)&req;
- cmd_len = sizeof(req);
+ if (!scm_arm_support)
+ {
+ svc_id = SCM_SVC_ES;
+ cmd_id = SCM_SAVE_PARTITION_HASH_ID;
+ cmd_buf = (void *)&req;
+ cmd_len = sizeof(req);
- scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
+ scm_call(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, resp_len);
+ }
+ else
+ {
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x3, 0, SMC_PARAM_TYPE_BUFFER_READWRITE);
+ scm_arg.x2 = req.partition_id;
+ scm_arg.x3 = (uint8_t *)&req.digest;
+ scm_arg.x4 = sizeof(req.digest);
+
+ if (scm_call2(&scm_arg, NULL))
+ dprintf(CRITICAL, "Failed to Save kernel hash\n");
+ }
}
/*
@@ -612,6 +725,12 @@
uint32_t chn_id;
}__PACKED switch_ce_chn_buf;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return 0;
+ }
+
switch_ce_chn_buf.resource = TZ_RESOURCE_CE_AP;
switch_ce_chn_buf.chn_id = channel;
cmd_buf = (void *)&switch_ce_chn_buf;
@@ -631,6 +750,12 @@
{
int ret = 0;
+ if (scm_arm_support)
+ {
+ dprintf(INFO, "%s:SCM call is not supported\n",__func__);
+ return -1;
+ }
+
ret = scm_call_atomic(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
return ret;
@@ -656,17 +781,31 @@
void *cmd_buf;
size_t cmd_len;
static el1_system_param param;
+ scmcall_arg scm_arg = {0};
param.el1_x0 = dtb_offset;
param.el1_elr = kernel_entry;
- /* Command Buffer */
- cmd_buf = (void *)¶m;
- cmd_len = sizeof(el1_system_param);
-
/* Response Buffer = Null as no response expected */
dprintf(INFO, "Jumping to kernel via monitor\n");
- scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
+
+ if (!scm_arm_support)
+ {
+ /* Command Buffer */
+ cmd_buf = (void *)¶m;
+ cmd_len = sizeof(el1_system_param);
+
+ scm_call(svc_id, cmd_id, cmd_buf, cmd_len, NULL, 0);
+ }
+ else
+ {
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(SCM_SVC_MILESTONE_32_64_ID, SCM_SVC_MILESTONE_CMD_ID);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x2, SMC_PARAM_TYPE_BUFFER_READ);
+ scm_arg.x2 = (void *)¶m;
+ scm_arg.x3 = sizeof(el1_system_param);
+
+ scm_call2(&scm_arg, NULL);
+ }
/* Assert if execution ever reaches here */
dprintf(CRITICAL, "Failed to jump to kernel\n");
@@ -678,19 +817,36 @@
{
int ret;
struct tz_prng_data data;
+ scmcall_arg scm_arg = {0};
- data.out_buf = (uint8_t*) rbuf;
- data.out_buf_size = r_len;
+ if (!scm_arm_support)
+ {
+ data.out_buf = (uint8_t*) rbuf;
+ data.out_buf_size = r_len;
- /*
- * random buffer must be flushed/invalidated before and after TZ call.
- */
- arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
+ /*
+ * random buffer must be flushed/invalidated before and after TZ call.
+ */
+ arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
- ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
+ ret = scm_call(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data, sizeof(data), NULL, 0);
- /* Invalidate the updated random buffer */
- arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
+ /* Invalidate the updated random buffer */
+ arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
+ }
+ else
+ {
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_CRYPTO, PRNG_CMD_ID);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x2,SMC_PARAM_TYPE_BUFFER_READWRITE);
+ scm_arg.x2 = (uint8_t *) rbuf;
+ scm_arg.x3 = r_len;
+
+ ret = scm_call2(&scm_arg, NULL);
+ if (!ret)
+ arch_clean_invalidate_cache_range((addr_t) rbuf, r_len);
+ else
+ dprintf(CRITICAL, "Secure canary SCM failed: %x\n", ret);
+ }
return ret;
}
@@ -714,12 +870,26 @@
uint32_t ret = 0;
uint32_t response = 0;
tz_xpu_prot_cmd cmd;
+ scmcall_arg scm_arg = {0};
+ scmcall_ret scm_ret = {0};
- cmd.config = ERR_FATAL_ENABLE;
- cmd.spare = 0;
+ if (!scm_arm_support)
+ {
+ cmd.config = ERR_FATAL_ENABLE;
+ cmd.spare = 0;
- ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
- sizeof(response));
+ ret = scm_call(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL, &cmd, sizeof(cmd), &response,
+ sizeof(response));
+ }
+ else
+ {
+ scm_arg.x0 = MAKE_SIP_SCM_CMD(SVC_MEMORY_PROTECTION, XPU_ERR_FATAL);
+ scm_arg.x1 = MAKE_SCM_ARGS(0x2);
+ scm_arg.x2 = ERR_FATAL_ENABLE;
+ scm_arg.x3 = 0x0;
+ ret = scm_call2(&scm_arg, &scm_ret);
+ response = scm_ret.x1;
+ }
if (ret)
dprintf(CRITICAL, "Failed to set XPU violations as fatal errors: %u\n", response);
@@ -728,3 +898,74 @@
return ret;
}
+
+static uint32_t scm_call_a32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, scmcall_ret *ret)
+{
+ register uint32_t r0 __asm__("r0") = x0;
+ register uint32_t r1 __asm__("r1") = x1;
+ register uint32_t r2 __asm__("r2") = x2;
+ register uint32_t r3 __asm__("r3") = x3;
+ register uint32_t r4 __asm__("r4") = x4;
+ register uint32_t r5 __asm__("r5") = x5;
+
+ __asm__ volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ __asmeq("%3", "r3")
+ __asmeq("%4", "r0")
+ __asmeq("%5", "r1")
+ __asmeq("%6", "r2")
+ __asmeq("%7", "r3")
+ __asmeq("%8", "r4")
+ __asmeq("%9", "r5")
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
+
+ if (ret)
+ {
+ ret->x1 = r1;
+ ret->x2 = r2;
+ ret->x3 = r3;
+ }
+
+ return r0;
+}
+
+uint32_t scm_call2(scmcall_arg *arg, scmcall_ret *ret)
+{
+ uint32_t *indir_arg = NULL;
+ uint32_t x5;
+ int i;
+ uint32_t rc;
+
+ arg->x0 = arg->atomic ? (arg->x0 | SCM_ATOMIC_BIT) : arg->x0;
+ x5 = arg->x5[0];
+
+ if ((arg->x1 & 0xF) > SCM_MAX_ARG_LEN)
+ {
+ indir_arg = memalign(CACHE_LINE, (SCM_INDIR_MAX_LEN * sizeof(uint32_t)));
+ ASSERT(indir_arg);
+
+ for (i = 0 ; i < SCM_INDIR_MAX_LEN; i++)
+ {
+ indir_arg[i] = arg->x5[i];
+ }
+ arch_clean_invalidate_cache_range((addr_t) indir_arg, ROUNDUP((SCM_INDIR_MAX_LEN * sizeof(uint32_t)), CACHE_LINE));
+ x5 = (addr_t) indir_arg;
+ }
+
+ rc = scm_call_a32(arg->x0, arg->x1, arg->x2, arg->x3, arg->x4, x5, ret);
+
+ if (rc)
+ {
+ dprintf(CRITICAL, "SCM call: 0x%x failed with :%x\n", arg->x0, rc);
+ return rc;
+ }
+
+ if (indir_arg)
+ free(indir_arg);
+
+ return 0;
+}