blob: 8e5987cfa94439d465257f32fe7fdbdc254ec10f [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
Zhen Kong26e62742018-05-04 17:19:06 -0700192 int abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700193};
194
195struct qseecom_registered_app_list {
196 struct list_head list;
197 u32 app_id;
198 u32 ref_cnt;
199 char app_name[MAX_APP_NAME_SIZE];
200 u32 app_arch;
201 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700202 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 u32 blocked_on_listener_id;
204};
205
206struct qseecom_registered_kclient_list {
207 struct list_head list;
208 struct qseecom_handle *handle;
209};
210
211struct qseecom_ce_info_use {
212 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
213 unsigned int unit_num;
214 unsigned int num_ce_pipe_entries;
215 struct qseecom_ce_pipe_entry *ce_pipe_entry;
216 bool alloc;
217 uint32_t type;
218};
219
220struct ce_hw_usage_info {
221 uint32_t qsee_ce_hw_instance;
222 uint32_t num_fde;
223 struct qseecom_ce_info_use *fde;
224 uint32_t num_pfe;
225 struct qseecom_ce_info_use *pfe;
226};
227
228struct qseecom_clk {
229 enum qseecom_ce_hw_instance instance;
230 struct clk *ce_core_clk;
231 struct clk *ce_clk;
232 struct clk *ce_core_src_clk;
233 struct clk *ce_bus_clk;
234 uint32_t clk_access_cnt;
235};
236
237struct qseecom_control {
238 struct ion_client *ion_clnt; /* Ion client */
239 struct list_head registered_listener_list_head;
240 spinlock_t registered_listener_list_lock;
241
242 struct list_head registered_app_list_head;
243 spinlock_t registered_app_list_lock;
244
245 struct list_head registered_kclient_list_head;
246 spinlock_t registered_kclient_list_lock;
247
248 wait_queue_head_t send_resp_wq;
249 int send_resp_flag;
250
251 uint32_t qseos_version;
252 uint32_t qsee_version;
253 struct device *pdev;
254 bool whitelist_support;
255 bool commonlib_loaded;
256 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700257 struct ce_hw_usage_info ce_info;
258
259 int qsee_bw_count;
260 int qsee_sfpb_bw_count;
261
262 uint32_t qsee_perf_client;
263 struct qseecom_clk qsee;
264 struct qseecom_clk ce_drv;
265
266 bool support_bus_scaling;
267 bool support_fde;
268 bool support_pfe;
269 bool fde_key_size;
270 uint32_t cumulative_mode;
271 enum qseecom_bandwidth_request_mode current_mode;
272 struct timer_list bw_scale_down_timer;
273 struct work_struct bw_inactive_req_ws;
274 struct cdev cdev;
275 bool timer_running;
276 bool no_clock_support;
277 unsigned int ce_opp_freq_hz;
278 bool appsbl_qseecom_support;
279 uint32_t qsee_reentrancy_support;
280
281 uint32_t app_block_ref_cnt;
282 wait_queue_head_t app_block_wq;
283 atomic_t qseecom_state;
284 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700285 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700286};
287
288struct qseecom_sec_buf_fd_info {
289 bool is_sec_buf_fd;
290 size_t size;
291 void *vbase;
292 dma_addr_t pbase;
293};
294
295struct qseecom_param_memref {
296 uint32_t buffer;
297 uint32_t size;
298};
299
300struct qseecom_client_handle {
301 u32 app_id;
302 u8 *sb_virt;
303 phys_addr_t sb_phys;
304 unsigned long user_virt_sb_base;
305 size_t sb_length;
306 struct ion_handle *ihandle; /* Retrieve phy addr */
307 char app_name[MAX_APP_NAME_SIZE];
308 u32 app_arch;
309 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
310};
311
312struct qseecom_listener_handle {
313 u32 id;
314};
315
316static struct qseecom_control qseecom;
317
318struct qseecom_dev_handle {
319 enum qseecom_client_handle_type type;
320 union {
321 struct qseecom_client_handle client;
322 struct qseecom_listener_handle listener;
323 };
324 bool released;
325 int abort;
326 wait_queue_head_t abort_wq;
327 atomic_t ioctl_count;
328 bool perf_enabled;
329 bool fast_load_enabled;
330 enum qseecom_bandwidth_request_mode mode;
331 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
332 uint32_t sglist_cnt;
333 bool use_legacy_cmd;
334};
335
336struct qseecom_key_id_usage_desc {
337 uint8_t desc[QSEECOM_KEY_ID_SIZE];
338};
339
340struct qseecom_crypto_info {
341 unsigned int unit_num;
342 unsigned int ce;
343 unsigned int pipe_pair;
344};
345
346static struct qseecom_key_id_usage_desc key_id_array[] = {
347 {
348 .desc = "Undefined Usage Index",
349 },
350
351 {
352 .desc = "Full Disk Encryption",
353 },
354
355 {
356 .desc = "Per File Encryption",
357 },
358
359 {
360 .desc = "UFS ICE Full Disk Encryption",
361 },
362
363 {
364 .desc = "SDCC ICE Full Disk Encryption",
365 },
366};
367
368/* Function proto types */
369static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
370static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
371static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
372static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
373static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
374static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
375 char *cmnlib_name);
376static int qseecom_enable_ice_setup(int usage);
377static int qseecom_disable_ice_setup(int usage);
378static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
379static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
384 void __user *argp);
385
386static int get_qseecom_keymaster_status(char *str)
387{
388 get_option(&str, &qseecom.is_apps_region_protected);
389 return 1;
390}
391__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
392
393static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
394 const void *req_buf, void *resp_buf)
395{
396 int ret = 0;
397 uint32_t smc_id = 0;
398 uint32_t qseos_cmd_id = 0;
399 struct scm_desc desc = {0};
400 struct qseecom_command_scm_resp *scm_resp = NULL;
401
402 if (!req_buf || !resp_buf) {
403 pr_err("Invalid buffer pointer\n");
404 return -EINVAL;
405 }
406 qseos_cmd_id = *(uint32_t *)req_buf;
407 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
408
409 switch (svc_id) {
410 case 6: {
411 if (tz_cmd_id == 3) {
412 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
413 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
414 desc.args[0] = *(uint32_t *)req_buf;
415 } else {
416 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
417 svc_id, tz_cmd_id);
418 return -EINVAL;
419 }
420 ret = scm_call2(smc_id, &desc);
421 break;
422 }
423 case SCM_SVC_ES: {
424 switch (tz_cmd_id) {
425 case SCM_SAVE_PARTITION_HASH_ID: {
426 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
427 struct qseecom_save_partition_hash_req *p_hash_req =
428 (struct qseecom_save_partition_hash_req *)
429 req_buf;
430 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
431
432 if (!tzbuf)
433 return -ENOMEM;
434 memset(tzbuf, 0, tzbuflen);
435 memcpy(tzbuf, p_hash_req->digest,
436 SHA256_DIGEST_LENGTH);
437 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
438 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
439 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
440 desc.args[0] = p_hash_req->partition_id;
441 desc.args[1] = virt_to_phys(tzbuf);
442 desc.args[2] = SHA256_DIGEST_LENGTH;
443 ret = scm_call2(smc_id, &desc);
444 kzfree(tzbuf);
445 break;
446 }
447 default: {
448 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
449 tz_cmd_id);
450 ret = -EINVAL;
451 break;
452 }
453 } /* end of switch (tz_cmd_id) */
454 break;
455 } /* end of case SCM_SVC_ES */
456 case SCM_SVC_TZSCHEDULER: {
457 switch (qseos_cmd_id) {
458 case QSEOS_APP_START_COMMAND: {
459 struct qseecom_load_app_ireq *req;
460 struct qseecom_load_app_64bit_ireq *req_64bit;
461
462 smc_id = TZ_OS_APP_START_ID;
463 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
464 if (qseecom.qsee_version < QSEE_VERSION_40) {
465 req = (struct qseecom_load_app_ireq *)req_buf;
466 desc.args[0] = req->mdt_len;
467 desc.args[1] = req->img_len;
468 desc.args[2] = req->phy_addr;
469 } else {
470 req_64bit =
471 (struct qseecom_load_app_64bit_ireq *)
472 req_buf;
473 desc.args[0] = req_64bit->mdt_len;
474 desc.args[1] = req_64bit->img_len;
475 desc.args[2] = req_64bit->phy_addr;
476 }
477 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
478 ret = scm_call2(smc_id, &desc);
479 break;
480 }
481 case QSEOS_APP_SHUTDOWN_COMMAND: {
482 struct qseecom_unload_app_ireq *req;
483
484 req = (struct qseecom_unload_app_ireq *)req_buf;
485 smc_id = TZ_OS_APP_SHUTDOWN_ID;
486 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
487 desc.args[0] = req->app_id;
488 ret = scm_call2(smc_id, &desc);
489 break;
490 }
491 case QSEOS_APP_LOOKUP_COMMAND: {
492 struct qseecom_check_app_ireq *req;
493 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
494 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
495
496 if (!tzbuf)
497 return -ENOMEM;
498 req = (struct qseecom_check_app_ireq *)req_buf;
499 pr_debug("Lookup app_name = %s\n", req->app_name);
500 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
501 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
502 smc_id = TZ_OS_APP_LOOKUP_ID;
503 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
504 desc.args[0] = virt_to_phys(tzbuf);
505 desc.args[1] = strlen(req->app_name);
506 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
507 ret = scm_call2(smc_id, &desc);
508 kzfree(tzbuf);
509 break;
510 }
511 case QSEOS_APP_REGION_NOTIFICATION: {
512 struct qsee_apps_region_info_ireq *req;
513 struct qsee_apps_region_info_64bit_ireq *req_64bit;
514
515 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
516 desc.arginfo =
517 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
518 if (qseecom.qsee_version < QSEE_VERSION_40) {
519 req = (struct qsee_apps_region_info_ireq *)
520 req_buf;
521 desc.args[0] = req->addr;
522 desc.args[1] = req->size;
523 } else {
524 req_64bit =
525 (struct qsee_apps_region_info_64bit_ireq *)
526 req_buf;
527 desc.args[0] = req_64bit->addr;
528 desc.args[1] = req_64bit->size;
529 }
530 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
531 ret = scm_call2(smc_id, &desc);
532 break;
533 }
534 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
535 struct qseecom_load_lib_image_ireq *req;
536 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
537
538 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
539 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
540 if (qseecom.qsee_version < QSEE_VERSION_40) {
541 req = (struct qseecom_load_lib_image_ireq *)
542 req_buf;
543 desc.args[0] = req->mdt_len;
544 desc.args[1] = req->img_len;
545 desc.args[2] = req->phy_addr;
546 } else {
547 req_64bit =
548 (struct qseecom_load_lib_image_64bit_ireq *)
549 req_buf;
550 desc.args[0] = req_64bit->mdt_len;
551 desc.args[1] = req_64bit->img_len;
552 desc.args[2] = req_64bit->phy_addr;
553 }
554 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
555 ret = scm_call2(smc_id, &desc);
556 break;
557 }
558 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
559 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
560 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
561 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
562 ret = scm_call2(smc_id, &desc);
563 break;
564 }
565 case QSEOS_REGISTER_LISTENER: {
566 struct qseecom_register_listener_ireq *req;
567 struct qseecom_register_listener_64bit_ireq *req_64bit;
568
569 desc.arginfo =
570 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
571 if (qseecom.qsee_version < QSEE_VERSION_40) {
572 req = (struct qseecom_register_listener_ireq *)
573 req_buf;
574 desc.args[0] = req->listener_id;
575 desc.args[1] = req->sb_ptr;
576 desc.args[2] = req->sb_len;
577 } else {
578 req_64bit =
579 (struct qseecom_register_listener_64bit_ireq *)
580 req_buf;
581 desc.args[0] = req_64bit->listener_id;
582 desc.args[1] = req_64bit->sb_ptr;
583 desc.args[2] = req_64bit->sb_len;
584 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700585 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700586 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
587 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
588 ret = scm_call2(smc_id, &desc);
589 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700590 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700591 smc_id = TZ_OS_REGISTER_LISTENER_ID;
592 __qseecom_reentrancy_check_if_no_app_blocked(
593 smc_id);
594 ret = scm_call2(smc_id, &desc);
595 }
596 break;
597 }
598 case QSEOS_DEREGISTER_LISTENER: {
599 struct qseecom_unregister_listener_ireq *req;
600
601 req = (struct qseecom_unregister_listener_ireq *)
602 req_buf;
603 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
604 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
605 desc.args[0] = req->listener_id;
606 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
607 ret = scm_call2(smc_id, &desc);
608 break;
609 }
610 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
611 struct qseecom_client_listener_data_irsp *req;
612
613 req = (struct qseecom_client_listener_data_irsp *)
614 req_buf;
615 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
616 desc.arginfo =
617 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
618 desc.args[0] = req->listener_id;
619 desc.args[1] = req->status;
620 ret = scm_call2(smc_id, &desc);
621 break;
622 }
623 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
624 struct qseecom_client_listener_data_irsp *req;
625 struct qseecom_client_listener_data_64bit_irsp *req_64;
626
627 smc_id =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
629 desc.arginfo =
630 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
631 if (qseecom.qsee_version < QSEE_VERSION_40) {
632 req =
633 (struct qseecom_client_listener_data_irsp *)
634 req_buf;
635 desc.args[0] = req->listener_id;
636 desc.args[1] = req->status;
637 desc.args[2] = req->sglistinfo_ptr;
638 desc.args[3] = req->sglistinfo_len;
639 } else {
640 req_64 =
641 (struct qseecom_client_listener_data_64bit_irsp *)
642 req_buf;
643 desc.args[0] = req_64->listener_id;
644 desc.args[1] = req_64->status;
645 desc.args[2] = req_64->sglistinfo_ptr;
646 desc.args[3] = req_64->sglistinfo_len;
647 }
648 ret = scm_call2(smc_id, &desc);
649 break;
650 }
651 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
652 struct qseecom_load_app_ireq *req;
653 struct qseecom_load_app_64bit_ireq *req_64bit;
654
655 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
656 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
657 if (qseecom.qsee_version < QSEE_VERSION_40) {
658 req = (struct qseecom_load_app_ireq *)req_buf;
659 desc.args[0] = req->mdt_len;
660 desc.args[1] = req->img_len;
661 desc.args[2] = req->phy_addr;
662 } else {
663 req_64bit =
664 (struct qseecom_load_app_64bit_ireq *)req_buf;
665 desc.args[0] = req_64bit->mdt_len;
666 desc.args[1] = req_64bit->img_len;
667 desc.args[2] = req_64bit->phy_addr;
668 }
669 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
670 ret = scm_call2(smc_id, &desc);
671 break;
672 }
673 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
674 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
675 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
676 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
677 ret = scm_call2(smc_id, &desc);
678 break;
679 }
680
681 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
682 struct qseecom_client_send_data_ireq *req;
683 struct qseecom_client_send_data_64bit_ireq *req_64bit;
684
685 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
686 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
687 if (qseecom.qsee_version < QSEE_VERSION_40) {
688 req = (struct qseecom_client_send_data_ireq *)
689 req_buf;
690 desc.args[0] = req->app_id;
691 desc.args[1] = req->req_ptr;
692 desc.args[2] = req->req_len;
693 desc.args[3] = req->rsp_ptr;
694 desc.args[4] = req->rsp_len;
695 } else {
696 req_64bit =
697 (struct qseecom_client_send_data_64bit_ireq *)
698 req_buf;
699 desc.args[0] = req_64bit->app_id;
700 desc.args[1] = req_64bit->req_ptr;
701 desc.args[2] = req_64bit->req_len;
702 desc.args[3] = req_64bit->rsp_ptr;
703 desc.args[4] = req_64bit->rsp_len;
704 }
705 ret = scm_call2(smc_id, &desc);
706 break;
707 }
708 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
709 struct qseecom_client_send_data_ireq *req;
710 struct qseecom_client_send_data_64bit_ireq *req_64bit;
711
712 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
713 desc.arginfo =
714 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
715 if (qseecom.qsee_version < QSEE_VERSION_40) {
716 req = (struct qseecom_client_send_data_ireq *)
717 req_buf;
718 desc.args[0] = req->app_id;
719 desc.args[1] = req->req_ptr;
720 desc.args[2] = req->req_len;
721 desc.args[3] = req->rsp_ptr;
722 desc.args[4] = req->rsp_len;
723 desc.args[5] = req->sglistinfo_ptr;
724 desc.args[6] = req->sglistinfo_len;
725 } else {
726 req_64bit =
727 (struct qseecom_client_send_data_64bit_ireq *)
728 req_buf;
729 desc.args[0] = req_64bit->app_id;
730 desc.args[1] = req_64bit->req_ptr;
731 desc.args[2] = req_64bit->req_len;
732 desc.args[3] = req_64bit->rsp_ptr;
733 desc.args[4] = req_64bit->rsp_len;
734 desc.args[5] = req_64bit->sglistinfo_ptr;
735 desc.args[6] = req_64bit->sglistinfo_len;
736 }
737 ret = scm_call2(smc_id, &desc);
738 break;
739 }
740 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
741 struct qseecom_client_send_service_ireq *req;
742
743 req = (struct qseecom_client_send_service_ireq *)
744 req_buf;
745 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
746 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
747 desc.args[0] = req->key_type;
748 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
749 ret = scm_call2(smc_id, &desc);
750 break;
751 }
752 case QSEOS_RPMB_ERASE_COMMAND: {
753 smc_id = TZ_OS_RPMB_ERASE_ID;
754 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
755 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
756 ret = scm_call2(smc_id, &desc);
757 break;
758 }
759 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
760 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
761 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
762 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
763 ret = scm_call2(smc_id, &desc);
764 break;
765 }
766 case QSEOS_GENERATE_KEY: {
767 u32 tzbuflen = PAGE_ALIGN(sizeof
768 (struct qseecom_key_generate_ireq) -
769 sizeof(uint32_t));
770 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
771
772 if (!tzbuf)
773 return -ENOMEM;
774 memset(tzbuf, 0, tzbuflen);
775 memcpy(tzbuf, req_buf + sizeof(uint32_t),
776 (sizeof(struct qseecom_key_generate_ireq) -
777 sizeof(uint32_t)));
778 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
779 smc_id = TZ_OS_KS_GEN_KEY_ID;
780 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
781 desc.args[0] = virt_to_phys(tzbuf);
782 desc.args[1] = tzbuflen;
783 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
784 ret = scm_call2(smc_id, &desc);
785 kzfree(tzbuf);
786 break;
787 }
788 case QSEOS_DELETE_KEY: {
789 u32 tzbuflen = PAGE_ALIGN(sizeof
790 (struct qseecom_key_delete_ireq) -
791 sizeof(uint32_t));
792 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
793
794 if (!tzbuf)
795 return -ENOMEM;
796 memset(tzbuf, 0, tzbuflen);
797 memcpy(tzbuf, req_buf + sizeof(uint32_t),
798 (sizeof(struct qseecom_key_delete_ireq) -
799 sizeof(uint32_t)));
800 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
801 smc_id = TZ_OS_KS_DEL_KEY_ID;
802 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
803 desc.args[0] = virt_to_phys(tzbuf);
804 desc.args[1] = tzbuflen;
805 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
806 ret = scm_call2(smc_id, &desc);
807 kzfree(tzbuf);
808 break;
809 }
810 case QSEOS_SET_KEY: {
811 u32 tzbuflen = PAGE_ALIGN(sizeof
812 (struct qseecom_key_select_ireq) -
813 sizeof(uint32_t));
814 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
815
816 if (!tzbuf)
817 return -ENOMEM;
818 memset(tzbuf, 0, tzbuflen);
819 memcpy(tzbuf, req_buf + sizeof(uint32_t),
820 (sizeof(struct qseecom_key_select_ireq) -
821 sizeof(uint32_t)));
822 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
823 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
824 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
825 desc.args[0] = virt_to_phys(tzbuf);
826 desc.args[1] = tzbuflen;
827 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
828 ret = scm_call2(smc_id, &desc);
829 kzfree(tzbuf);
830 break;
831 }
832 case QSEOS_UPDATE_KEY_USERINFO: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_userinfo_update_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
842 (struct qseecom_key_userinfo_update_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
846 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
850 ret = scm_call2(smc_id, &desc);
851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_TEE_OPEN_SESSION: {
855 struct qseecom_qteec_ireq *req;
856 struct qseecom_qteec_64bit_ireq *req_64bit;
857
858 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
859 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
860 if (qseecom.qsee_version < QSEE_VERSION_40) {
861 req = (struct qseecom_qteec_ireq *)req_buf;
862 desc.args[0] = req->app_id;
863 desc.args[1] = req->req_ptr;
864 desc.args[2] = req->req_len;
865 desc.args[3] = req->resp_ptr;
866 desc.args[4] = req->resp_len;
867 } else {
868 req_64bit = (struct qseecom_qteec_64bit_ireq *)
869 req_buf;
870 desc.args[0] = req_64bit->app_id;
871 desc.args[1] = req_64bit->req_ptr;
872 desc.args[2] = req_64bit->req_len;
873 desc.args[3] = req_64bit->resp_ptr;
874 desc.args[4] = req_64bit->resp_len;
875 }
876 ret = scm_call2(smc_id, &desc);
877 break;
878 }
879 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
880 struct qseecom_qteec_ireq *req;
881 struct qseecom_qteec_64bit_ireq *req_64bit;
882
883 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
884 desc.arginfo =
885 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
886 if (qseecom.qsee_version < QSEE_VERSION_40) {
887 req = (struct qseecom_qteec_ireq *)req_buf;
888 desc.args[0] = req->app_id;
889 desc.args[1] = req->req_ptr;
890 desc.args[2] = req->req_len;
891 desc.args[3] = req->resp_ptr;
892 desc.args[4] = req->resp_len;
893 desc.args[5] = req->sglistinfo_ptr;
894 desc.args[6] = req->sglistinfo_len;
895 } else {
896 req_64bit = (struct qseecom_qteec_64bit_ireq *)
897 req_buf;
898 desc.args[0] = req_64bit->app_id;
899 desc.args[1] = req_64bit->req_ptr;
900 desc.args[2] = req_64bit->req_len;
901 desc.args[3] = req_64bit->resp_ptr;
902 desc.args[4] = req_64bit->resp_len;
903 desc.args[5] = req_64bit->sglistinfo_ptr;
904 desc.args[6] = req_64bit->sglistinfo_len;
905 }
906 ret = scm_call2(smc_id, &desc);
907 break;
908 }
909 case QSEOS_TEE_INVOKE_COMMAND: {
910 struct qseecom_qteec_ireq *req;
911 struct qseecom_qteec_64bit_ireq *req_64bit;
912
913 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
914 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
915 if (qseecom.qsee_version < QSEE_VERSION_40) {
916 req = (struct qseecom_qteec_ireq *)req_buf;
917 desc.args[0] = req->app_id;
918 desc.args[1] = req->req_ptr;
919 desc.args[2] = req->req_len;
920 desc.args[3] = req->resp_ptr;
921 desc.args[4] = req->resp_len;
922 } else {
923 req_64bit = (struct qseecom_qteec_64bit_ireq *)
924 req_buf;
925 desc.args[0] = req_64bit->app_id;
926 desc.args[1] = req_64bit->req_ptr;
927 desc.args[2] = req_64bit->req_len;
928 desc.args[3] = req_64bit->resp_ptr;
929 desc.args[4] = req_64bit->resp_len;
930 }
931 ret = scm_call2(smc_id, &desc);
932 break;
933 }
934 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
935 struct qseecom_qteec_ireq *req;
936 struct qseecom_qteec_64bit_ireq *req_64bit;
937
938 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
939 desc.arginfo =
940 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
941 if (qseecom.qsee_version < QSEE_VERSION_40) {
942 req = (struct qseecom_qteec_ireq *)req_buf;
943 desc.args[0] = req->app_id;
944 desc.args[1] = req->req_ptr;
945 desc.args[2] = req->req_len;
946 desc.args[3] = req->resp_ptr;
947 desc.args[4] = req->resp_len;
948 desc.args[5] = req->sglistinfo_ptr;
949 desc.args[6] = req->sglistinfo_len;
950 } else {
951 req_64bit = (struct qseecom_qteec_64bit_ireq *)
952 req_buf;
953 desc.args[0] = req_64bit->app_id;
954 desc.args[1] = req_64bit->req_ptr;
955 desc.args[2] = req_64bit->req_len;
956 desc.args[3] = req_64bit->resp_ptr;
957 desc.args[4] = req_64bit->resp_len;
958 desc.args[5] = req_64bit->sglistinfo_ptr;
959 desc.args[6] = req_64bit->sglistinfo_len;
960 }
961 ret = scm_call2(smc_id, &desc);
962 break;
963 }
964 case QSEOS_TEE_CLOSE_SESSION: {
965 struct qseecom_qteec_ireq *req;
966 struct qseecom_qteec_64bit_ireq *req_64bit;
967
968 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
969 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
970 if (qseecom.qsee_version < QSEE_VERSION_40) {
971 req = (struct qseecom_qteec_ireq *)req_buf;
972 desc.args[0] = req->app_id;
973 desc.args[1] = req->req_ptr;
974 desc.args[2] = req->req_len;
975 desc.args[3] = req->resp_ptr;
976 desc.args[4] = req->resp_len;
977 } else {
978 req_64bit = (struct qseecom_qteec_64bit_ireq *)
979 req_buf;
980 desc.args[0] = req_64bit->app_id;
981 desc.args[1] = req_64bit->req_ptr;
982 desc.args[2] = req_64bit->req_len;
983 desc.args[3] = req_64bit->resp_ptr;
984 desc.args[4] = req_64bit->resp_len;
985 }
986 ret = scm_call2(smc_id, &desc);
987 break;
988 }
989 case QSEOS_TEE_REQUEST_CANCELLATION: {
990 struct qseecom_qteec_ireq *req;
991 struct qseecom_qteec_64bit_ireq *req_64bit;
992
993 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
994 desc.arginfo =
995 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
996 if (qseecom.qsee_version < QSEE_VERSION_40) {
997 req = (struct qseecom_qteec_ireq *)req_buf;
998 desc.args[0] = req->app_id;
999 desc.args[1] = req->req_ptr;
1000 desc.args[2] = req->req_len;
1001 desc.args[3] = req->resp_ptr;
1002 desc.args[4] = req->resp_len;
1003 } else {
1004 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1005 req_buf;
1006 desc.args[0] = req_64bit->app_id;
1007 desc.args[1] = req_64bit->req_ptr;
1008 desc.args[2] = req_64bit->req_len;
1009 desc.args[3] = req_64bit->resp_ptr;
1010 desc.args[4] = req_64bit->resp_len;
1011 }
1012 ret = scm_call2(smc_id, &desc);
1013 break;
1014 }
1015 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1016 struct qseecom_continue_blocked_request_ireq *req =
1017 (struct qseecom_continue_blocked_request_ireq *)
1018 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001019 if (qseecom.smcinvoke_support)
1020 smc_id =
1021 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1022 else
1023 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001024 desc.arginfo =
1025 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001026 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001027 ret = scm_call2(smc_id, &desc);
1028 break;
1029 }
1030 default: {
1031 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1032 qseos_cmd_id);
1033 ret = -EINVAL;
1034 break;
1035 }
1036 } /*end of switch (qsee_cmd_id) */
1037 break;
1038 } /*end of case SCM_SVC_TZSCHEDULER*/
1039 default: {
1040 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1041 svc_id);
1042 ret = -EINVAL;
1043 break;
1044 }
1045 } /*end of switch svc_id */
1046 scm_resp->result = desc.ret[0];
1047 scm_resp->resp_type = desc.ret[1];
1048 scm_resp->data = desc.ret[2];
1049 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1050 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1051 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1052 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1053 return ret;
1054}
1055
1056
1057static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1058 size_t cmd_len, void *resp_buf, size_t resp_len)
1059{
1060 if (!is_scm_armv8())
1061 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1062 resp_buf, resp_len);
1063 else
1064 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1065}
1066
1067static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1068 struct qseecom_register_listener_req *svc)
1069{
1070 struct qseecom_registered_listener_list *ptr;
1071 int unique = 1;
1072 unsigned long flags;
1073
1074 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1075 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1076 if (ptr->svc.listener_id == svc->listener_id) {
1077 pr_err("Service id: %u is already registered\n",
1078 ptr->svc.listener_id);
1079 unique = 0;
1080 break;
1081 }
1082 }
1083 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1084 return unique;
1085}
1086
1087static struct qseecom_registered_listener_list *__qseecom_find_svc(
1088 int32_t listener_id)
1089{
1090 struct qseecom_registered_listener_list *entry = NULL;
1091 unsigned long flags;
1092
1093 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1094 list_for_each_entry(entry,
1095 &qseecom.registered_listener_list_head, list) {
1096 if (entry->svc.listener_id == listener_id)
1097 break;
1098 }
1099 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1100
1101 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1102 pr_err("Service id: %u is not found\n", listener_id);
1103 return NULL;
1104 }
1105
1106 return entry;
1107}
1108
1109static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1110 struct qseecom_dev_handle *handle,
1111 struct qseecom_register_listener_req *listener)
1112{
1113 int ret = 0;
1114 struct qseecom_register_listener_ireq req;
1115 struct qseecom_register_listener_64bit_ireq req_64bit;
1116 struct qseecom_command_scm_resp resp;
1117 ion_phys_addr_t pa;
1118 void *cmd_buf = NULL;
1119 size_t cmd_len;
1120
1121 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001122 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 listener->ifd_data_fd);
1124 if (IS_ERR_OR_NULL(svc->ihandle)) {
1125 pr_err("Ion client could not retrieve the handle\n");
1126 return -ENOMEM;
1127 }
1128
1129 /* Get the physical address of the ION BUF */
1130 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1131 if (ret) {
1132 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1133 ret);
1134 return ret;
1135 }
1136 /* Populate the structure for sending scm call to load image */
1137 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1138 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1139 pr_err("ION memory mapping for listener shared buffer failed\n");
1140 return -ENOMEM;
1141 }
1142 svc->sb_phys = (phys_addr_t)pa;
1143
1144 if (qseecom.qsee_version < QSEE_VERSION_40) {
1145 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1146 req.listener_id = svc->svc.listener_id;
1147 req.sb_len = svc->sb_length;
1148 req.sb_ptr = (uint32_t)svc->sb_phys;
1149 cmd_buf = (void *)&req;
1150 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1151 } else {
1152 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1153 req_64bit.listener_id = svc->svc.listener_id;
1154 req_64bit.sb_len = svc->sb_length;
1155 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1156 cmd_buf = (void *)&req_64bit;
1157 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1158 }
1159
1160 resp.result = QSEOS_RESULT_INCOMPLETE;
1161
1162 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1163 &resp, sizeof(resp));
1164 if (ret) {
1165 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1166 return -EINVAL;
1167 }
1168
1169 if (resp.result != QSEOS_RESULT_SUCCESS) {
1170 pr_err("Error SB registration req: resp.result = %d\n",
1171 resp.result);
1172 return -EPERM;
1173 }
1174 return 0;
1175}
1176
1177static int qseecom_register_listener(struct qseecom_dev_handle *data,
1178 void __user *argp)
1179{
1180 int ret = 0;
1181 unsigned long flags;
1182 struct qseecom_register_listener_req rcvd_lstnr;
1183 struct qseecom_registered_listener_list *new_entry;
1184
1185 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1186 if (ret) {
1187 pr_err("copy_from_user failed\n");
1188 return ret;
1189 }
1190 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1191 rcvd_lstnr.sb_size))
1192 return -EFAULT;
1193
1194 data->listener.id = 0;
1195 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1196 pr_err("Service is not unique and is already registered\n");
1197 data->released = true;
1198 return -EBUSY;
1199 }
1200
1201 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1202 if (!new_entry)
1203 return -ENOMEM;
1204 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1205 new_entry->rcv_req_flag = 0;
1206
1207 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1208 new_entry->sb_length = rcvd_lstnr.sb_size;
1209 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1210 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1211 pr_err("qseecom_set_sb_memoryfailed\n");
1212 kzfree(new_entry);
1213 return -ENOMEM;
1214 }
1215
1216 data->listener.id = rcvd_lstnr.listener_id;
1217 init_waitqueue_head(&new_entry->rcv_req_wq);
1218 init_waitqueue_head(&new_entry->listener_block_app_wq);
1219 new_entry->send_resp_flag = 0;
1220 new_entry->listener_in_use = false;
1221 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1222 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1223 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1224
1225 return ret;
1226}
1227
Zhen Kong26e62742018-05-04 17:19:06 -07001228static void __qseecom_listener_abort_all(int abort)
1229{
1230 struct qseecom_registered_listener_list *entry = NULL;
1231 unsigned long flags;
1232
1233 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1234 list_for_each_entry(entry,
1235 &qseecom.registered_listener_list_head, list) {
1236 pr_debug("set abort %d for listener %d\n",
1237 abort, entry->svc.listener_id);
1238 entry->abort = abort;
1239 }
1240 if (abort)
1241 wake_up_interruptible_all(&qseecom.send_resp_wq);
1242 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1243}
1244
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001245static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1246{
1247 int ret = 0;
1248 unsigned long flags;
1249 uint32_t unmap_mem = 0;
1250 struct qseecom_register_listener_ireq req;
1251 struct qseecom_registered_listener_list *ptr_svc = NULL;
1252 struct qseecom_command_scm_resp resp;
1253 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1254
1255 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1256 req.listener_id = data->listener.id;
1257 resp.result = QSEOS_RESULT_INCOMPLETE;
1258
1259 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1260 sizeof(req), &resp, sizeof(resp));
1261 if (ret) {
1262 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1263 ret, data->listener.id);
1264 return ret;
1265 }
1266
1267 if (resp.result != QSEOS_RESULT_SUCCESS) {
1268 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1269 resp.result, data->listener.id);
1270 return -EPERM;
1271 }
1272
1273 data->abort = 1;
1274 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1275 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1276 list) {
1277 if (ptr_svc->svc.listener_id == data->listener.id) {
Zhen Kong26e62742018-05-04 17:19:06 -07001278 ptr_svc->abort = 1;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 wake_up_all(&ptr_svc->rcv_req_wq);
1280 break;
1281 }
1282 }
1283 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1284
1285 while (atomic_read(&data->ioctl_count) > 1) {
1286 if (wait_event_freezable(data->abort_wq,
1287 atomic_read(&data->ioctl_count) <= 1)) {
1288 pr_err("Interrupted from abort\n");
1289 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001290 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 }
1292 }
1293
1294 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1295 list_for_each_entry(ptr_svc,
1296 &qseecom.registered_listener_list_head, list) {
1297 if (ptr_svc->svc.listener_id == data->listener.id) {
1298 if (ptr_svc->sb_virt) {
1299 unmap_mem = 1;
1300 ihandle = ptr_svc->ihandle;
1301 }
1302 list_del(&ptr_svc->list);
1303 kzfree(ptr_svc);
1304 break;
1305 }
1306 }
1307 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1308
1309 /* Unmap the memory */
1310 if (unmap_mem) {
1311 if (!IS_ERR_OR_NULL(ihandle)) {
1312 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1313 ion_free(qseecom.ion_clnt, ihandle);
1314 }
1315 }
1316 data->released = true;
1317 return ret;
1318}
1319
1320static int __qseecom_set_msm_bus_request(uint32_t mode)
1321{
1322 int ret = 0;
1323 struct qseecom_clk *qclk;
1324
1325 qclk = &qseecom.qsee;
1326 if (qclk->ce_core_src_clk != NULL) {
1327 if (mode == INACTIVE) {
1328 __qseecom_disable_clk(CLK_QSEE);
1329 } else {
1330 ret = __qseecom_enable_clk(CLK_QSEE);
1331 if (ret)
1332 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1333 ret, mode);
1334 }
1335 }
1336
1337 if ((!ret) && (qseecom.current_mode != mode)) {
1338 ret = msm_bus_scale_client_update_request(
1339 qseecom.qsee_perf_client, mode);
1340 if (ret) {
1341 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1342 ret, mode);
1343 if (qclk->ce_core_src_clk != NULL) {
1344 if (mode == INACTIVE) {
1345 ret = __qseecom_enable_clk(CLK_QSEE);
1346 if (ret)
1347 pr_err("CLK enable failed\n");
1348 } else
1349 __qseecom_disable_clk(CLK_QSEE);
1350 }
1351 }
1352 qseecom.current_mode = mode;
1353 }
1354 return ret;
1355}
1356
1357static void qseecom_bw_inactive_req_work(struct work_struct *work)
1358{
1359 mutex_lock(&app_access_lock);
1360 mutex_lock(&qsee_bw_mutex);
1361 if (qseecom.timer_running)
1362 __qseecom_set_msm_bus_request(INACTIVE);
1363 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1364 qseecom.current_mode, qseecom.cumulative_mode);
1365 qseecom.timer_running = false;
1366 mutex_unlock(&qsee_bw_mutex);
1367 mutex_unlock(&app_access_lock);
1368}
1369
1370static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1371{
1372 schedule_work(&qseecom.bw_inactive_req_ws);
1373}
1374
1375static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1376{
1377 struct qseecom_clk *qclk;
1378 int ret = 0;
1379
1380 mutex_lock(&clk_access_lock);
1381 if (ce == CLK_QSEE)
1382 qclk = &qseecom.qsee;
1383 else
1384 qclk = &qseecom.ce_drv;
1385
1386 if (qclk->clk_access_cnt > 2) {
1387 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1388 ret = -EINVAL;
1389 goto err_dec_ref_cnt;
1390 }
1391 if (qclk->clk_access_cnt == 2)
1392 qclk->clk_access_cnt--;
1393
1394err_dec_ref_cnt:
1395 mutex_unlock(&clk_access_lock);
1396 return ret;
1397}
1398
1399
1400static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1401{
1402 int32_t ret = 0;
1403 int32_t request_mode = INACTIVE;
1404
1405 mutex_lock(&qsee_bw_mutex);
1406 if (mode == 0) {
1407 if (qseecom.cumulative_mode > MEDIUM)
1408 request_mode = HIGH;
1409 else
1410 request_mode = qseecom.cumulative_mode;
1411 } else {
1412 request_mode = mode;
1413 }
1414
1415 ret = __qseecom_set_msm_bus_request(request_mode);
1416 if (ret) {
1417 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1418 ret, request_mode);
1419 goto err_scale_timer;
1420 }
1421
1422 if (qseecom.timer_running) {
1423 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1424 if (ret) {
1425 pr_err("Failed to decrease clk ref count.\n");
1426 goto err_scale_timer;
1427 }
1428 del_timer_sync(&(qseecom.bw_scale_down_timer));
1429 qseecom.timer_running = false;
1430 }
1431err_scale_timer:
1432 mutex_unlock(&qsee_bw_mutex);
1433 return ret;
1434}
1435
1436
1437static int qseecom_unregister_bus_bandwidth_needs(
1438 struct qseecom_dev_handle *data)
1439{
1440 int32_t ret = 0;
1441
1442 qseecom.cumulative_mode -= data->mode;
1443 data->mode = INACTIVE;
1444
1445 return ret;
1446}
1447
1448static int __qseecom_register_bus_bandwidth_needs(
1449 struct qseecom_dev_handle *data, uint32_t request_mode)
1450{
1451 int32_t ret = 0;
1452
1453 if (data->mode == INACTIVE) {
1454 qseecom.cumulative_mode += request_mode;
1455 data->mode = request_mode;
1456 } else {
1457 if (data->mode != request_mode) {
1458 qseecom.cumulative_mode -= data->mode;
1459 qseecom.cumulative_mode += request_mode;
1460 data->mode = request_mode;
1461 }
1462 }
1463 return ret;
1464}
1465
1466static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1467{
1468 int ret = 0;
1469
1470 ret = qsee_vote_for_clock(data, CLK_DFAB);
1471 if (ret) {
1472 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1473 goto perf_enable_exit;
1474 }
1475 ret = qsee_vote_for_clock(data, CLK_SFPB);
1476 if (ret) {
1477 qsee_disable_clock_vote(data, CLK_DFAB);
1478 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1479 goto perf_enable_exit;
1480 }
1481
1482perf_enable_exit:
1483 return ret;
1484}
1485
1486static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1487 void __user *argp)
1488{
1489 int32_t ret = 0;
1490 int32_t req_mode;
1491
1492 if (qseecom.no_clock_support)
1493 return 0;
1494
1495 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1496 if (ret) {
1497 pr_err("copy_from_user failed\n");
1498 return ret;
1499 }
1500 if (req_mode > HIGH) {
1501 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1502 return -EINVAL;
1503 }
1504
1505 /*
1506 * Register bus bandwidth needs if bus scaling feature is enabled;
1507 * otherwise, qseecom enable/disable clocks for the client directly.
1508 */
1509 if (qseecom.support_bus_scaling) {
1510 mutex_lock(&qsee_bw_mutex);
1511 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1512 mutex_unlock(&qsee_bw_mutex);
1513 } else {
1514 pr_debug("Bus scaling feature is NOT enabled\n");
1515 pr_debug("request bandwidth mode %d for the client\n",
1516 req_mode);
1517 if (req_mode != INACTIVE) {
1518 ret = qseecom_perf_enable(data);
1519 if (ret)
1520 pr_err("Failed to vote for clock with err %d\n",
1521 ret);
1522 } else {
1523 qsee_disable_clock_vote(data, CLK_DFAB);
1524 qsee_disable_clock_vote(data, CLK_SFPB);
1525 }
1526 }
1527 return ret;
1528}
1529
1530static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1531{
1532 if (qseecom.no_clock_support)
1533 return;
1534
1535 mutex_lock(&qsee_bw_mutex);
1536 qseecom.bw_scale_down_timer.expires = jiffies +
1537 msecs_to_jiffies(duration);
1538 mod_timer(&(qseecom.bw_scale_down_timer),
1539 qseecom.bw_scale_down_timer.expires);
1540 qseecom.timer_running = true;
1541 mutex_unlock(&qsee_bw_mutex);
1542}
1543
1544static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1545{
1546 if (!qseecom.support_bus_scaling)
1547 qsee_disable_clock_vote(data, CLK_SFPB);
1548 else
1549 __qseecom_add_bw_scale_down_timer(
1550 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1551}
1552
1553static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1554{
1555 int ret = 0;
1556
1557 if (qseecom.support_bus_scaling) {
1558 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1559 if (ret)
1560 pr_err("Failed to set bw MEDIUM.\n");
1561 } else {
1562 ret = qsee_vote_for_clock(data, CLK_SFPB);
1563 if (ret)
1564 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1565 }
1566 return ret;
1567}
1568
1569static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1570 void __user *argp)
1571{
1572 ion_phys_addr_t pa;
1573 int32_t ret;
1574 struct qseecom_set_sb_mem_param_req req;
1575 size_t len;
1576
1577 /* Copy the relevant information needed for loading the image */
1578 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1579 return -EFAULT;
1580
1581 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1582 (req.sb_len == 0)) {
1583 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1584 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1585 return -EFAULT;
1586 }
1587 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1588 req.sb_len))
1589 return -EFAULT;
1590
1591 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001592 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001593 req.ifd_data_fd);
1594 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1595 pr_err("Ion client could not retrieve the handle\n");
1596 return -ENOMEM;
1597 }
1598 /* Get the physical address of the ION BUF */
1599 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1600 if (ret) {
1601
1602 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1603 ret);
1604 return ret;
1605 }
1606
1607 if (len < req.sb_len) {
1608 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1609 req.sb_len, len);
1610 return -EINVAL;
1611 }
1612 /* Populate the structure for sending scm call to load image */
1613 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1614 data->client.ihandle);
1615 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1616 pr_err("ION memory mapping for client shared buf failed\n");
1617 return -ENOMEM;
1618 }
1619 data->client.sb_phys = (phys_addr_t)pa;
1620 data->client.sb_length = req.sb_len;
1621 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1622 return 0;
1623}
1624
Zhen Kong26e62742018-05-04 17:19:06 -07001625static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1626 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001627{
1628 int ret;
1629
1630 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001631 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001632}
1633
1634static int __qseecom_reentrancy_listener_has_sent_rsp(
1635 struct qseecom_dev_handle *data,
1636 struct qseecom_registered_listener_list *ptr_svc)
1637{
1638 int ret;
1639
1640 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001641 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001642}
1643
1644static void __qseecom_clean_listener_sglistinfo(
1645 struct qseecom_registered_listener_list *ptr_svc)
1646{
1647 if (ptr_svc->sglist_cnt) {
1648 memset(ptr_svc->sglistinfo_ptr, 0,
1649 SGLISTINFO_TABLE_SIZE);
1650 ptr_svc->sglist_cnt = 0;
1651 }
1652}
1653
1654static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1655 struct qseecom_command_scm_resp *resp)
1656{
1657 int ret = 0;
1658 int rc = 0;
1659 uint32_t lstnr;
1660 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001661 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1662 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1663 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001664 struct qseecom_registered_listener_list *ptr_svc = NULL;
1665 sigset_t new_sigset;
1666 sigset_t old_sigset;
1667 uint32_t status;
1668 void *cmd_buf = NULL;
1669 size_t cmd_len;
1670 struct sglist_info *table = NULL;
1671
1672 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1673 lstnr = resp->data;
1674 /*
1675 * Wake up blocking lsitener service with the lstnr id
1676 */
1677 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1678 flags);
1679 list_for_each_entry(ptr_svc,
1680 &qseecom.registered_listener_list_head, list) {
1681 if (ptr_svc->svc.listener_id == lstnr) {
1682 ptr_svc->listener_in_use = true;
1683 ptr_svc->rcv_req_flag = 1;
1684 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1685 break;
1686 }
1687 }
1688 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1689 flags);
1690
1691 if (ptr_svc == NULL) {
1692 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001693 rc = -EINVAL;
1694 status = QSEOS_RESULT_FAILURE;
1695 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001696 }
1697
1698 if (!ptr_svc->ihandle) {
1699 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001700 rc = -EINVAL;
1701 status = QSEOS_RESULT_FAILURE;
1702 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001703 }
1704
1705 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001706 pr_err("Service %d does not exist\n",
1707 lstnr);
1708 rc = -ERESTARTSYS;
1709 ptr_svc = NULL;
1710 status = QSEOS_RESULT_FAILURE;
1711 goto err_resp;
1712 }
1713
1714 if (ptr_svc->abort == 1) {
1715 pr_err("Service %d abort %d\n",
1716 lstnr, ptr_svc->abort);
1717 rc = -ENODEV;
1718 status = QSEOS_RESULT_FAILURE;
1719 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001720 }
1721 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1722
1723 /* initialize the new signal mask with all signals*/
1724 sigfillset(&new_sigset);
1725 /* block all signals */
1726 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1727
1728 do {
1729 /*
1730 * When reentrancy is not supported, check global
1731 * send_resp_flag; otherwise, check this listener's
1732 * send_resp_flag.
1733 */
1734 if (!qseecom.qsee_reentrancy_support &&
1735 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001736 __qseecom_listener_has_sent_rsp(
1737 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001738 break;
1739 }
1740
1741 if (qseecom.qsee_reentrancy_support &&
1742 !wait_event_freezable(qseecom.send_resp_wq,
1743 __qseecom_reentrancy_listener_has_sent_rsp(
1744 data, ptr_svc))) {
1745 break;
1746 }
1747 } while (1);
1748
1749 /* restore signal mask */
1750 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001751 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001752 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1753 data->client.app_id, lstnr, ret);
1754 rc = -ENODEV;
1755 status = QSEOS_RESULT_FAILURE;
1756 } else {
1757 status = QSEOS_RESULT_SUCCESS;
1758 }
Zhen Kong26e62742018-05-04 17:19:06 -07001759err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001760 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001761 if (ptr_svc) {
1762 ptr_svc->send_resp_flag = 0;
1763 table = ptr_svc->sglistinfo_ptr;
1764 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001765 if (qseecom.qsee_version < QSEE_VERSION_40) {
1766 send_data_rsp.listener_id = lstnr;
1767 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001768 if (table) {
1769 send_data_rsp.sglistinfo_ptr =
1770 (uint32_t)virt_to_phys(table);
1771 send_data_rsp.sglistinfo_len =
1772 SGLISTINFO_TABLE_SIZE;
1773 dmac_flush_range((void *)table,
1774 (void *)table + SGLISTINFO_TABLE_SIZE);
1775 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776 cmd_buf = (void *)&send_data_rsp;
1777 cmd_len = sizeof(send_data_rsp);
1778 } else {
1779 send_data_rsp_64bit.listener_id = lstnr;
1780 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001781 if (table) {
1782 send_data_rsp_64bit.sglistinfo_ptr =
1783 virt_to_phys(table);
1784 send_data_rsp_64bit.sglistinfo_len =
1785 SGLISTINFO_TABLE_SIZE;
1786 dmac_flush_range((void *)table,
1787 (void *)table + SGLISTINFO_TABLE_SIZE);
1788 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001789 cmd_buf = (void *)&send_data_rsp_64bit;
1790 cmd_len = sizeof(send_data_rsp_64bit);
1791 }
Zhen Kong7d500032018-08-06 16:58:31 -07001792 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001793 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1794 else
1795 *(uint32_t *)cmd_buf =
1796 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1797 if (ptr_svc) {
1798 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1799 ptr_svc->ihandle,
1800 ptr_svc->sb_virt, ptr_svc->sb_length,
1801 ION_IOC_CLEAN_INV_CACHES);
1802 if (ret) {
1803 pr_err("cache operation failed %d\n", ret);
1804 return ret;
1805 }
1806 }
1807
1808 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1809 ret = __qseecom_enable_clk(CLK_QSEE);
1810 if (ret)
1811 return ret;
1812 }
1813
1814 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1815 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001816 if (ptr_svc) {
1817 ptr_svc->listener_in_use = false;
1818 __qseecom_clean_listener_sglistinfo(ptr_svc);
1819 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001820 if (ret) {
1821 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1822 ret, data->client.app_id);
1823 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1824 __qseecom_disable_clk(CLK_QSEE);
1825 return ret;
1826 }
Zhen Kong26e62742018-05-04 17:19:06 -07001827 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1828 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001829 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1830 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1831 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1832 resp->result, data->client.app_id, lstnr);
1833 ret = -EINVAL;
1834 }
1835 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1836 __qseecom_disable_clk(CLK_QSEE);
1837
1838 }
1839 if (rc)
1840 return rc;
1841
1842 return ret;
1843}
1844
Zhen Konga91aaf02018-02-02 17:21:04 -08001845static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001846 struct qseecom_command_scm_resp *resp,
1847 struct qseecom_registered_app_list *ptr_app,
1848 struct qseecom_dev_handle *data)
1849{
1850 struct qseecom_registered_listener_list *list_ptr;
1851 int ret = 0;
1852 struct qseecom_continue_blocked_request_ireq ireq;
1853 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001854 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001855 sigset_t new_sigset;
1856 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001857 unsigned long flags;
1858 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001859
1860 if (!resp || !data) {
1861 pr_err("invalid resp or data pointer\n");
1862 ret = -EINVAL;
1863 goto exit;
1864 }
1865
1866 /* find app_id & img_name from list */
1867 if (!ptr_app) {
1868 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1869 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1870 list) {
1871 if ((ptr_app->app_id == data->client.app_id) &&
1872 (!strcmp(ptr_app->app_name,
1873 data->client.app_name))) {
1874 found_app = true;
1875 break;
1876 }
1877 }
1878 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1879 flags);
1880 if (!found_app) {
1881 pr_err("app_id %d (%s) is not found\n",
1882 data->client.app_id,
1883 (char *)data->client.app_name);
1884 ret = -ENOENT;
1885 goto exit;
1886 }
1887 }
1888
Zhen Kongd8cc0052017-11-13 15:13:31 -08001889 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001890 session_id = resp->resp_type;
1891 list_ptr = __qseecom_find_svc(resp->data);
1892 if (!list_ptr) {
1893 pr_err("Invalid listener ID %d\n", resp->data);
1894 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001895 goto exit;
1896 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001897 ptr_app->blocked_on_listener_id = resp->data;
1898
1899 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1900 resp->data, list_ptr->listener_in_use,
1901 session_id, data->client.app_id);
1902
1903 /* sleep until listener is available */
1904 sigfillset(&new_sigset);
1905 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1906
1907 do {
1908 qseecom.app_block_ref_cnt++;
1909 ptr_app->app_blocked = true;
1910 mutex_unlock(&app_access_lock);
1911 wait_event_freezable(
1912 list_ptr->listener_block_app_wq,
1913 !list_ptr->listener_in_use);
1914 mutex_lock(&app_access_lock);
1915 ptr_app->app_blocked = false;
1916 qseecom.app_block_ref_cnt--;
1917 } while (list_ptr->listener_in_use);
1918
1919 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1920
1921 ptr_app->blocked_on_listener_id = 0;
1922 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1923 resp->data, session_id, data->client.app_id);
1924
1925 /* notify TZ that listener is available */
1926 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1927
1928 if (qseecom.smcinvoke_support)
1929 ireq.app_or_session_id = session_id;
1930 else
1931 ireq.app_or_session_id = data->client.app_id;
1932
1933 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1934 &ireq, sizeof(ireq),
1935 &continue_resp, sizeof(continue_resp));
1936 if (ret && qseecom.smcinvoke_support) {
1937 /* retry with legacy cmd */
1938 qseecom.smcinvoke_support = false;
1939 ireq.app_or_session_id = data->client.app_id;
1940 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1941 &ireq, sizeof(ireq),
1942 &continue_resp, sizeof(continue_resp));
1943 qseecom.smcinvoke_support = true;
1944 if (ret) {
1945 pr_err("unblock app %d or session %d fail\n",
1946 data->client.app_id, session_id);
1947 goto exit;
1948 }
1949 }
1950 resp->result = continue_resp.result;
1951 resp->resp_type = continue_resp.resp_type;
1952 resp->data = continue_resp.data;
1953 pr_debug("unblock resp = %d\n", resp->result);
1954 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1955
1956 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1957 pr_err("Unexpected unblock resp %d\n", resp->result);
1958 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001959 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001960exit:
1961 return ret;
1962}
1963
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001964static int __qseecom_reentrancy_process_incomplete_cmd(
1965 struct qseecom_dev_handle *data,
1966 struct qseecom_command_scm_resp *resp)
1967{
1968 int ret = 0;
1969 int rc = 0;
1970 uint32_t lstnr;
1971 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001972 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1973 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1974 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001975 struct qseecom_registered_listener_list *ptr_svc = NULL;
1976 sigset_t new_sigset;
1977 sigset_t old_sigset;
1978 uint32_t status;
1979 void *cmd_buf = NULL;
1980 size_t cmd_len;
1981 struct sglist_info *table = NULL;
1982
Zhen Kong26e62742018-05-04 17:19:06 -07001983 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001984 lstnr = resp->data;
1985 /*
1986 * Wake up blocking lsitener service with the lstnr id
1987 */
1988 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1989 flags);
1990 list_for_each_entry(ptr_svc,
1991 &qseecom.registered_listener_list_head, list) {
1992 if (ptr_svc->svc.listener_id == lstnr) {
1993 ptr_svc->listener_in_use = true;
1994 ptr_svc->rcv_req_flag = 1;
1995 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1996 break;
1997 }
1998 }
1999 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2000 flags);
2001
2002 if (ptr_svc == NULL) {
2003 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002004 rc = -EINVAL;
2005 status = QSEOS_RESULT_FAILURE;
2006 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002007 }
2008
2009 if (!ptr_svc->ihandle) {
2010 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002011 rc = -EINVAL;
2012 status = QSEOS_RESULT_FAILURE;
2013 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002014 }
2015
2016 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002017 pr_err("Service %d does not exist\n",
2018 lstnr);
2019 rc = -ERESTARTSYS;
2020 ptr_svc = NULL;
2021 status = QSEOS_RESULT_FAILURE;
2022 goto err_resp;
2023 }
2024
2025 if (ptr_svc->abort == 1) {
2026 pr_err("Service %d abort %d\n",
2027 lstnr, ptr_svc->abort);
2028 rc = -ENODEV;
2029 status = QSEOS_RESULT_FAILURE;
2030 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002031 }
2032 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2033
2034 /* initialize the new signal mask with all signals*/
2035 sigfillset(&new_sigset);
2036
2037 /* block all signals */
2038 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2039
2040 /* unlock mutex btw waking listener and sleep-wait */
2041 mutex_unlock(&app_access_lock);
2042 do {
2043 if (!wait_event_freezable(qseecom.send_resp_wq,
2044 __qseecom_reentrancy_listener_has_sent_rsp(
2045 data, ptr_svc))) {
2046 break;
2047 }
2048 } while (1);
2049 /* lock mutex again after resp sent */
2050 mutex_lock(&app_access_lock);
2051 ptr_svc->send_resp_flag = 0;
2052 qseecom.send_resp_flag = 0;
2053
2054 /* restore signal mask */
2055 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002056 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002057 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2058 data->client.app_id, lstnr, ret);
2059 rc = -ENODEV;
2060 status = QSEOS_RESULT_FAILURE;
2061 } else {
2062 status = QSEOS_RESULT_SUCCESS;
2063 }
Zhen Kong26e62742018-05-04 17:19:06 -07002064err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002065 if (ptr_svc)
2066 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002067 if (qseecom.qsee_version < QSEE_VERSION_40) {
2068 send_data_rsp.listener_id = lstnr;
2069 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002070 if (table) {
2071 send_data_rsp.sglistinfo_ptr =
2072 (uint32_t)virt_to_phys(table);
2073 send_data_rsp.sglistinfo_len =
2074 SGLISTINFO_TABLE_SIZE;
2075 dmac_flush_range((void *)table,
2076 (void *)table + SGLISTINFO_TABLE_SIZE);
2077 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002078 cmd_buf = (void *)&send_data_rsp;
2079 cmd_len = sizeof(send_data_rsp);
2080 } else {
2081 send_data_rsp_64bit.listener_id = lstnr;
2082 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002083 if (table) {
2084 send_data_rsp_64bit.sglistinfo_ptr =
2085 virt_to_phys(table);
2086 send_data_rsp_64bit.sglistinfo_len =
2087 SGLISTINFO_TABLE_SIZE;
2088 dmac_flush_range((void *)table,
2089 (void *)table + SGLISTINFO_TABLE_SIZE);
2090 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002091 cmd_buf = (void *)&send_data_rsp_64bit;
2092 cmd_len = sizeof(send_data_rsp_64bit);
2093 }
Zhen Kong7d500032018-08-06 16:58:31 -07002094 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002095 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2096 else
2097 *(uint32_t *)cmd_buf =
2098 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2099 if (ptr_svc) {
2100 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2101 ptr_svc->ihandle,
2102 ptr_svc->sb_virt, ptr_svc->sb_length,
2103 ION_IOC_CLEAN_INV_CACHES);
2104 if (ret) {
2105 pr_err("cache operation failed %d\n", ret);
2106 return ret;
2107 }
2108 }
2109 if (lstnr == RPMB_SERVICE) {
2110 ret = __qseecom_enable_clk(CLK_QSEE);
2111 if (ret)
2112 return ret;
2113 }
2114
2115 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2116 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002117 if (ptr_svc) {
2118 ptr_svc->listener_in_use = false;
2119 __qseecom_clean_listener_sglistinfo(ptr_svc);
2120 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2121 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002122
2123 if (ret) {
2124 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2125 ret, data->client.app_id);
2126 goto exit;
2127 }
2128
2129 switch (resp->result) {
2130 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2131 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2132 lstnr, data->client.app_id, resp->data);
2133 if (lstnr == resp->data) {
2134 pr_err("lstnr %d should not be blocked!\n",
2135 lstnr);
2136 ret = -EINVAL;
2137 goto exit;
2138 }
2139 ret = __qseecom_process_reentrancy_blocked_on_listener(
2140 resp, NULL, data);
2141 if (ret) {
2142 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2143 data->client.app_id,
2144 data->client.app_name, resp->data);
2145 goto exit;
2146 }
2147 case QSEOS_RESULT_SUCCESS:
2148 case QSEOS_RESULT_INCOMPLETE:
2149 break;
2150 default:
2151 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2152 resp->result, data->client.app_id, lstnr);
2153 ret = -EINVAL;
2154 goto exit;
2155 }
2156exit:
2157 if (lstnr == RPMB_SERVICE)
2158 __qseecom_disable_clk(CLK_QSEE);
2159
2160 }
2161 if (rc)
2162 return rc;
2163
2164 return ret;
2165}
2166
2167/*
2168 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2169 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2170 * So, needs to first check if no app blocked before sending OS level scm call,
2171 * then wait until all apps are unblocked.
2172 */
2173static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2174{
2175 sigset_t new_sigset, old_sigset;
2176
2177 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2178 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2179 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2180 /* thread sleep until this app unblocked */
2181 while (qseecom.app_block_ref_cnt > 0) {
2182 sigfillset(&new_sigset);
2183 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2184 mutex_unlock(&app_access_lock);
2185 do {
2186 if (!wait_event_freezable(qseecom.app_block_wq,
2187 (qseecom.app_block_ref_cnt == 0)))
2188 break;
2189 } while (1);
2190 mutex_lock(&app_access_lock);
2191 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2192 }
2193 }
2194}
2195
2196/*
2197 * scm_call of send data will fail if this TA is blocked or there are more
2198 * than one TA requesting listener services; So, first check to see if need
2199 * to wait.
2200 */
2201static void __qseecom_reentrancy_check_if_this_app_blocked(
2202 struct qseecom_registered_app_list *ptr_app)
2203{
2204 sigset_t new_sigset, old_sigset;
2205
2206 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002207 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002208 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2209 /* thread sleep until this app unblocked */
2210 sigfillset(&new_sigset);
2211 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2212 mutex_unlock(&app_access_lock);
2213 do {
2214 if (!wait_event_freezable(qseecom.app_block_wq,
2215 (!ptr_app->app_blocked &&
2216 qseecom.app_block_ref_cnt <= 1)))
2217 break;
2218 } while (1);
2219 mutex_lock(&app_access_lock);
2220 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2221 }
Zhen Kongdea10592018-07-30 17:50:10 -07002222 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002223 }
2224}
2225
2226static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2227 uint32_t *app_id)
2228{
2229 int32_t ret;
2230 struct qseecom_command_scm_resp resp;
2231 bool found_app = false;
2232 struct qseecom_registered_app_list *entry = NULL;
2233 unsigned long flags = 0;
2234
2235 if (!app_id) {
2236 pr_err("Null pointer to app_id\n");
2237 return -EINVAL;
2238 }
2239 *app_id = 0;
2240
2241 /* check if app exists and has been registered locally */
2242 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2243 list_for_each_entry(entry,
2244 &qseecom.registered_app_list_head, list) {
2245 if (!strcmp(entry->app_name, req.app_name)) {
2246 found_app = true;
2247 break;
2248 }
2249 }
2250 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2251 if (found_app) {
2252 pr_debug("Found app with id %d\n", entry->app_id);
2253 *app_id = entry->app_id;
2254 return 0;
2255 }
2256
2257 memset((void *)&resp, 0, sizeof(resp));
2258
2259 /* SCM_CALL to check if app_id for the mentioned app exists */
2260 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2261 sizeof(struct qseecom_check_app_ireq),
2262 &resp, sizeof(resp));
2263 if (ret) {
2264 pr_err("scm_call to check if app is already loaded failed\n");
2265 return -EINVAL;
2266 }
2267
2268 if (resp.result == QSEOS_RESULT_FAILURE)
2269 return 0;
2270
2271 switch (resp.resp_type) {
2272 /*qsee returned listener type response */
2273 case QSEOS_LISTENER_ID:
2274 pr_err("resp type is of listener type instead of app");
2275 return -EINVAL;
2276 case QSEOS_APP_ID:
2277 *app_id = resp.data;
2278 return 0;
2279 default:
2280 pr_err("invalid resp type (%d) from qsee",
2281 resp.resp_type);
2282 return -ENODEV;
2283 }
2284}
2285
2286static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2287{
2288 struct qseecom_registered_app_list *entry = NULL;
2289 unsigned long flags = 0;
2290 u32 app_id = 0;
2291 struct ion_handle *ihandle; /* Ion handle */
2292 struct qseecom_load_img_req load_img_req;
2293 int32_t ret = 0;
2294 ion_phys_addr_t pa = 0;
2295 size_t len;
2296 struct qseecom_command_scm_resp resp;
2297 struct qseecom_check_app_ireq req;
2298 struct qseecom_load_app_ireq load_req;
2299 struct qseecom_load_app_64bit_ireq load_req_64bit;
2300 void *cmd_buf = NULL;
2301 size_t cmd_len;
2302 bool first_time = false;
2303
2304 /* Copy the relevant information needed for loading the image */
2305 if (copy_from_user(&load_img_req,
2306 (void __user *)argp,
2307 sizeof(struct qseecom_load_img_req))) {
2308 pr_err("copy_from_user failed\n");
2309 return -EFAULT;
2310 }
2311
2312 /* Check and load cmnlib */
2313 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2314 if (!qseecom.commonlib_loaded &&
2315 load_img_req.app_arch == ELFCLASS32) {
2316 ret = qseecom_load_commonlib_image(data, "cmnlib");
2317 if (ret) {
2318 pr_err("failed to load cmnlib\n");
2319 return -EIO;
2320 }
2321 qseecom.commonlib_loaded = true;
2322 pr_debug("cmnlib is loaded\n");
2323 }
2324
2325 if (!qseecom.commonlib64_loaded &&
2326 load_img_req.app_arch == ELFCLASS64) {
2327 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2328 if (ret) {
2329 pr_err("failed to load cmnlib64\n");
2330 return -EIO;
2331 }
2332 qseecom.commonlib64_loaded = true;
2333 pr_debug("cmnlib64 is loaded\n");
2334 }
2335 }
2336
2337 if (qseecom.support_bus_scaling) {
2338 mutex_lock(&qsee_bw_mutex);
2339 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2340 mutex_unlock(&qsee_bw_mutex);
2341 if (ret)
2342 return ret;
2343 }
2344
2345 /* Vote for the SFPB clock */
2346 ret = __qseecom_enable_clk_scale_up(data);
2347 if (ret)
2348 goto enable_clk_err;
2349
2350 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2351 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2352 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2353
2354 ret = __qseecom_check_app_exists(req, &app_id);
2355 if (ret < 0)
2356 goto loadapp_err;
2357
2358 if (app_id) {
2359 pr_debug("App id %d (%s) already exists\n", app_id,
2360 (char *)(req.app_name));
2361 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2362 list_for_each_entry(entry,
2363 &qseecom.registered_app_list_head, list){
2364 if (entry->app_id == app_id) {
2365 entry->ref_cnt++;
2366 break;
2367 }
2368 }
2369 spin_unlock_irqrestore(
2370 &qseecom.registered_app_list_lock, flags);
2371 ret = 0;
2372 } else {
2373 first_time = true;
2374 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2375 (char *)(load_img_req.img_name));
2376 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002377 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002378 load_img_req.ifd_data_fd);
2379 if (IS_ERR_OR_NULL(ihandle)) {
2380 pr_err("Ion client could not retrieve the handle\n");
2381 ret = -ENOMEM;
2382 goto loadapp_err;
2383 }
2384
2385 /* Get the physical address of the ION BUF */
2386 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2387 if (ret) {
2388 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2389 ret);
2390 goto loadapp_err;
2391 }
2392 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2393 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2394 len, load_img_req.mdt_len,
2395 load_img_req.img_len);
2396 ret = -EINVAL;
2397 goto loadapp_err;
2398 }
2399 /* Populate the structure for sending scm call to load image */
2400 if (qseecom.qsee_version < QSEE_VERSION_40) {
2401 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2402 load_req.mdt_len = load_img_req.mdt_len;
2403 load_req.img_len = load_img_req.img_len;
2404 strlcpy(load_req.app_name, load_img_req.img_name,
2405 MAX_APP_NAME_SIZE);
2406 load_req.phy_addr = (uint32_t)pa;
2407 cmd_buf = (void *)&load_req;
2408 cmd_len = sizeof(struct qseecom_load_app_ireq);
2409 } else {
2410 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2411 load_req_64bit.mdt_len = load_img_req.mdt_len;
2412 load_req_64bit.img_len = load_img_req.img_len;
2413 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2414 MAX_APP_NAME_SIZE);
2415 load_req_64bit.phy_addr = (uint64_t)pa;
2416 cmd_buf = (void *)&load_req_64bit;
2417 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2418 }
2419
2420 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2421 ION_IOC_CLEAN_INV_CACHES);
2422 if (ret) {
2423 pr_err("cache operation failed %d\n", ret);
2424 goto loadapp_err;
2425 }
2426
2427 /* SCM_CALL to load the app and get the app_id back */
2428 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2429 cmd_len, &resp, sizeof(resp));
2430 if (ret) {
2431 pr_err("scm_call to load app failed\n");
2432 if (!IS_ERR_OR_NULL(ihandle))
2433 ion_free(qseecom.ion_clnt, ihandle);
2434 ret = -EINVAL;
2435 goto loadapp_err;
2436 }
2437
2438 if (resp.result == QSEOS_RESULT_FAILURE) {
2439 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2440 if (!IS_ERR_OR_NULL(ihandle))
2441 ion_free(qseecom.ion_clnt, ihandle);
2442 ret = -EFAULT;
2443 goto loadapp_err;
2444 }
2445
2446 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2447 ret = __qseecom_process_incomplete_cmd(data, &resp);
2448 if (ret) {
2449 pr_err("process_incomplete_cmd failed err: %d\n",
2450 ret);
2451 if (!IS_ERR_OR_NULL(ihandle))
2452 ion_free(qseecom.ion_clnt, ihandle);
2453 ret = -EFAULT;
2454 goto loadapp_err;
2455 }
2456 }
2457
2458 if (resp.result != QSEOS_RESULT_SUCCESS) {
2459 pr_err("scm_call failed resp.result unknown, %d\n",
2460 resp.result);
2461 if (!IS_ERR_OR_NULL(ihandle))
2462 ion_free(qseecom.ion_clnt, ihandle);
2463 ret = -EFAULT;
2464 goto loadapp_err;
2465 }
2466
2467 app_id = resp.data;
2468
2469 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2470 if (!entry) {
2471 ret = -ENOMEM;
2472 goto loadapp_err;
2473 }
2474 entry->app_id = app_id;
2475 entry->ref_cnt = 1;
2476 entry->app_arch = load_img_req.app_arch;
2477 /*
2478 * keymaster app may be first loaded as "keymaste" by qseecomd,
2479 * and then used as "keymaster" on some targets. To avoid app
2480 * name checking error, register "keymaster" into app_list and
2481 * thread private data.
2482 */
2483 if (!strcmp(load_img_req.img_name, "keymaste"))
2484 strlcpy(entry->app_name, "keymaster",
2485 MAX_APP_NAME_SIZE);
2486 else
2487 strlcpy(entry->app_name, load_img_req.img_name,
2488 MAX_APP_NAME_SIZE);
2489 entry->app_blocked = false;
2490 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002491 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002492
2493 /* Deallocate the handle */
2494 if (!IS_ERR_OR_NULL(ihandle))
2495 ion_free(qseecom.ion_clnt, ihandle);
2496
2497 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2498 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2499 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2500 flags);
2501
2502 pr_warn("App with id %u (%s) now loaded\n", app_id,
2503 (char *)(load_img_req.img_name));
2504 }
2505 data->client.app_id = app_id;
2506 data->client.app_arch = load_img_req.app_arch;
2507 if (!strcmp(load_img_req.img_name, "keymaste"))
2508 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2509 else
2510 strlcpy(data->client.app_name, load_img_req.img_name,
2511 MAX_APP_NAME_SIZE);
2512 load_img_req.app_id = app_id;
2513 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2514 pr_err("copy_to_user failed\n");
2515 ret = -EFAULT;
2516 if (first_time == true) {
2517 spin_lock_irqsave(
2518 &qseecom.registered_app_list_lock, flags);
2519 list_del(&entry->list);
2520 spin_unlock_irqrestore(
2521 &qseecom.registered_app_list_lock, flags);
2522 kzfree(entry);
2523 }
2524 }
2525
2526loadapp_err:
2527 __qseecom_disable_clk_scale_down(data);
2528enable_clk_err:
2529 if (qseecom.support_bus_scaling) {
2530 mutex_lock(&qsee_bw_mutex);
2531 qseecom_unregister_bus_bandwidth_needs(data);
2532 mutex_unlock(&qsee_bw_mutex);
2533 }
2534 return ret;
2535}
2536
2537static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2538{
2539 int ret = 1; /* Set unload app */
2540
2541 wake_up_all(&qseecom.send_resp_wq);
2542 if (qseecom.qsee_reentrancy_support)
2543 mutex_unlock(&app_access_lock);
2544 while (atomic_read(&data->ioctl_count) > 1) {
2545 if (wait_event_freezable(data->abort_wq,
2546 atomic_read(&data->ioctl_count) <= 1)) {
2547 pr_err("Interrupted from abort\n");
2548 ret = -ERESTARTSYS;
2549 break;
2550 }
2551 }
2552 if (qseecom.qsee_reentrancy_support)
2553 mutex_lock(&app_access_lock);
2554 return ret;
2555}
2556
2557static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2558{
2559 int ret = 0;
2560
2561 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2562 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2563 ion_free(qseecom.ion_clnt, data->client.ihandle);
2564 data->client.ihandle = NULL;
2565 }
2566 return ret;
2567}
2568
2569static int qseecom_unload_app(struct qseecom_dev_handle *data,
2570 bool app_crash)
2571{
2572 unsigned long flags;
2573 unsigned long flags1;
2574 int ret = 0;
2575 struct qseecom_command_scm_resp resp;
2576 struct qseecom_registered_app_list *ptr_app = NULL;
2577 bool unload = false;
2578 bool found_app = false;
2579 bool found_dead_app = false;
2580
2581 if (!data) {
2582 pr_err("Invalid/uninitialized device handle\n");
2583 return -EINVAL;
2584 }
2585
2586 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2587 pr_debug("Do not unload keymaster app from tz\n");
2588 goto unload_exit;
2589 }
2590
2591 __qseecom_cleanup_app(data);
2592 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2593
2594 if (data->client.app_id > 0) {
2595 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2596 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2597 list) {
2598 if (ptr_app->app_id == data->client.app_id) {
2599 if (!strcmp((void *)ptr_app->app_name,
2600 (void *)data->client.app_name)) {
2601 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002602 if (ptr_app->app_blocked ||
2603 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002604 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002605 if (app_crash || ptr_app->ref_cnt == 1)
2606 unload = true;
2607 break;
2608 }
2609 found_dead_app = true;
2610 break;
2611 }
2612 }
2613 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2614 flags);
2615 if (found_app == false && found_dead_app == false) {
2616 pr_err("Cannot find app with id = %d (%s)\n",
2617 data->client.app_id,
2618 (char *)data->client.app_name);
2619 ret = -EINVAL;
2620 goto unload_exit;
2621 }
2622 }
2623
2624 if (found_dead_app)
2625 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2626 (char *)data->client.app_name);
2627
2628 if (unload) {
2629 struct qseecom_unload_app_ireq req;
2630 /* Populate the structure for sending scm call to load image */
2631 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2632 req.app_id = data->client.app_id;
2633
2634 /* SCM_CALL to unload the app */
2635 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2636 sizeof(struct qseecom_unload_app_ireq),
2637 &resp, sizeof(resp));
2638 if (ret) {
2639 pr_err("scm_call to unload app (id = %d) failed\n",
2640 req.app_id);
2641 ret = -EFAULT;
2642 goto unload_exit;
2643 } else {
2644 pr_warn("App id %d now unloaded\n", req.app_id);
2645 }
2646 if (resp.result == QSEOS_RESULT_FAILURE) {
2647 pr_err("app (%d) unload_failed!!\n",
2648 data->client.app_id);
2649 ret = -EFAULT;
2650 goto unload_exit;
2651 }
2652 if (resp.result == QSEOS_RESULT_SUCCESS)
2653 pr_debug("App (%d) is unloaded!!\n",
2654 data->client.app_id);
2655 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2656 ret = __qseecom_process_incomplete_cmd(data, &resp);
2657 if (ret) {
2658 pr_err("process_incomplete_cmd fail err: %d\n",
2659 ret);
2660 goto unload_exit;
2661 }
2662 }
2663 }
2664
Zhen Kong7d500032018-08-06 16:58:31 -07002665unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002666 if (found_app) {
2667 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2668 if (app_crash) {
2669 ptr_app->ref_cnt = 0;
2670 pr_debug("app_crash: ref_count = 0\n");
2671 } else {
2672 if (ptr_app->ref_cnt == 1) {
2673 ptr_app->ref_cnt = 0;
2674 pr_debug("ref_count set to 0\n");
2675 } else {
2676 ptr_app->ref_cnt--;
2677 pr_debug("Can't unload app(%d) inuse\n",
2678 ptr_app->app_id);
2679 }
2680 }
2681 if (unload) {
2682 list_del(&ptr_app->list);
2683 kzfree(ptr_app);
2684 }
2685 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2686 flags1);
2687 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002688 qseecom_unmap_ion_allocated_memory(data);
2689 data->released = true;
2690 return ret;
2691}
2692
2693static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2694 unsigned long virt)
2695{
2696 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2697}
2698
2699static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2700 unsigned long virt)
2701{
2702 return (uintptr_t)data->client.sb_virt +
2703 (virt - data->client.user_virt_sb_base);
2704}
2705
2706int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2707 struct qseecom_send_svc_cmd_req *req_ptr,
2708 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2709{
2710 int ret = 0;
2711 void *req_buf = NULL;
2712
2713 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2714 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2715 req_ptr, send_svc_ireq_ptr);
2716 return -EINVAL;
2717 }
2718
2719 /* Clients need to ensure req_buf is at base offset of shared buffer */
2720 if ((uintptr_t)req_ptr->cmd_req_buf !=
2721 data_ptr->client.user_virt_sb_base) {
2722 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2723 return -EINVAL;
2724 }
2725
2726 if (data_ptr->client.sb_length <
2727 sizeof(struct qseecom_rpmb_provision_key)) {
2728 pr_err("shared buffer is too small to hold key type\n");
2729 return -EINVAL;
2730 }
2731 req_buf = data_ptr->client.sb_virt;
2732
2733 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2734 send_svc_ireq_ptr->key_type =
2735 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2736 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2737 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2738 data_ptr, (uintptr_t)req_ptr->resp_buf));
2739 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2740
2741 return ret;
2742}
2743
2744int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2745 struct qseecom_send_svc_cmd_req *req_ptr,
2746 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2747{
2748 int ret = 0;
2749 uint32_t reqd_len_sb_in = 0;
2750
2751 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2752 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2753 req_ptr, send_svc_ireq_ptr);
2754 return -EINVAL;
2755 }
2756
2757 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2758 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2759 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2760 pr_err("Required: %u, Available: %zu\n",
2761 reqd_len_sb_in, data_ptr->client.sb_length);
2762 return -ENOMEM;
2763 }
2764
2765 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2766 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2767 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2768 data_ptr, (uintptr_t)req_ptr->resp_buf));
2769 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2770
2771 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2772 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2773
2774
2775 return ret;
2776}
2777
2778static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2779 struct qseecom_send_svc_cmd_req *req)
2780{
2781 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2782 pr_err("req or cmd buffer or response buffer is null\n");
2783 return -EINVAL;
2784 }
2785
2786 if (!data || !data->client.ihandle) {
2787 pr_err("Client or client handle is not initialized\n");
2788 return -EINVAL;
2789 }
2790
2791 if (data->client.sb_virt == NULL) {
2792 pr_err("sb_virt null\n");
2793 return -EINVAL;
2794 }
2795
2796 if (data->client.user_virt_sb_base == 0) {
2797 pr_err("user_virt_sb_base is null\n");
2798 return -EINVAL;
2799 }
2800
2801 if (data->client.sb_length == 0) {
2802 pr_err("sb_length is 0\n");
2803 return -EINVAL;
2804 }
2805
2806 if (((uintptr_t)req->cmd_req_buf <
2807 data->client.user_virt_sb_base) ||
2808 ((uintptr_t)req->cmd_req_buf >=
2809 (data->client.user_virt_sb_base + data->client.sb_length))) {
2810 pr_err("cmd buffer address not within shared bufffer\n");
2811 return -EINVAL;
2812 }
2813 if (((uintptr_t)req->resp_buf <
2814 data->client.user_virt_sb_base) ||
2815 ((uintptr_t)req->resp_buf >=
2816 (data->client.user_virt_sb_base + data->client.sb_length))) {
2817 pr_err("response buffer address not within shared bufffer\n");
2818 return -EINVAL;
2819 }
2820 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2821 (req->cmd_req_len > data->client.sb_length) ||
2822 (req->resp_len > data->client.sb_length)) {
2823 pr_err("cmd buf length or response buf length not valid\n");
2824 return -EINVAL;
2825 }
2826 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2827 pr_err("Integer overflow detected in req_len & rsp_len\n");
2828 return -EINVAL;
2829 }
2830
2831 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2832 pr_debug("Not enough memory to fit cmd_buf.\n");
2833 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2834 (req->cmd_req_len + req->resp_len),
2835 data->client.sb_length);
2836 return -ENOMEM;
2837 }
2838 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2839 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2840 return -EINVAL;
2841 }
2842 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2843 pr_err("Integer overflow in resp_len & resp_buf\n");
2844 return -EINVAL;
2845 }
2846 if (data->client.user_virt_sb_base >
2847 (ULONG_MAX - data->client.sb_length)) {
2848 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2849 return -EINVAL;
2850 }
2851 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2852 ((uintptr_t)data->client.user_virt_sb_base +
2853 data->client.sb_length)) ||
2854 (((uintptr_t)req->resp_buf + req->resp_len) >
2855 ((uintptr_t)data->client.user_virt_sb_base +
2856 data->client.sb_length))) {
2857 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2858 return -EINVAL;
2859 }
2860 return 0;
2861}
2862
2863static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2864 void __user *argp)
2865{
2866 int ret = 0;
2867 struct qseecom_client_send_service_ireq send_svc_ireq;
2868 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2869 struct qseecom_command_scm_resp resp;
2870 struct qseecom_send_svc_cmd_req req;
2871 void *send_req_ptr;
2872 size_t req_buf_size;
2873
2874 /*struct qseecom_command_scm_resp resp;*/
2875
2876 if (copy_from_user(&req,
2877 (void __user *)argp,
2878 sizeof(req))) {
2879 pr_err("copy_from_user failed\n");
2880 return -EFAULT;
2881 }
2882
2883 if (__validate_send_service_cmd_inputs(data, &req))
2884 return -EINVAL;
2885
2886 data->type = QSEECOM_SECURE_SERVICE;
2887
2888 switch (req.cmd_id) {
2889 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2890 case QSEOS_RPMB_ERASE_COMMAND:
2891 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2892 send_req_ptr = &send_svc_ireq;
2893 req_buf_size = sizeof(send_svc_ireq);
2894 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2895 send_req_ptr))
2896 return -EINVAL;
2897 break;
2898 case QSEOS_FSM_LTEOTA_REQ_CMD:
2899 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2900 case QSEOS_FSM_IKE_REQ_CMD:
2901 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2902 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2903 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2904 case QSEOS_FSM_ENCFS_REQ_CMD:
2905 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2906 send_req_ptr = &send_fsm_key_svc_ireq;
2907 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2908 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2909 send_req_ptr))
2910 return -EINVAL;
2911 break;
2912 default:
2913 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2914 return -EINVAL;
2915 }
2916
2917 if (qseecom.support_bus_scaling) {
2918 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2919 if (ret) {
2920 pr_err("Fail to set bw HIGH\n");
2921 return ret;
2922 }
2923 } else {
2924 ret = qseecom_perf_enable(data);
2925 if (ret) {
2926 pr_err("Failed to vote for clocks with err %d\n", ret);
2927 goto exit;
2928 }
2929 }
2930
2931 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2932 data->client.sb_virt, data->client.sb_length,
2933 ION_IOC_CLEAN_INV_CACHES);
2934 if (ret) {
2935 pr_err("cache operation failed %d\n", ret);
2936 goto exit;
2937 }
2938 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2939 (const void *)send_req_ptr,
2940 req_buf_size, &resp, sizeof(resp));
2941 if (ret) {
2942 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2943 if (!qseecom.support_bus_scaling) {
2944 qsee_disable_clock_vote(data, CLK_DFAB);
2945 qsee_disable_clock_vote(data, CLK_SFPB);
2946 } else {
2947 __qseecom_add_bw_scale_down_timer(
2948 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2949 }
2950 goto exit;
2951 }
2952 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2953 data->client.sb_virt, data->client.sb_length,
2954 ION_IOC_INV_CACHES);
2955 if (ret) {
2956 pr_err("cache operation failed %d\n", ret);
2957 goto exit;
2958 }
2959 switch (resp.result) {
2960 case QSEOS_RESULT_SUCCESS:
2961 break;
2962 case QSEOS_RESULT_INCOMPLETE:
2963 pr_debug("qseos_result_incomplete\n");
2964 ret = __qseecom_process_incomplete_cmd(data, &resp);
2965 if (ret) {
2966 pr_err("process_incomplete_cmd fail with result: %d\n",
2967 resp.result);
2968 }
2969 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2970 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302971 if (put_user(resp.result,
2972 (uint32_t __user *)req.resp_buf)) {
2973 ret = -EINVAL;
2974 goto exit;
2975 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002976 ret = 0;
2977 }
2978 break;
2979 case QSEOS_RESULT_FAILURE:
2980 pr_err("scm call failed with resp.result: %d\n", resp.result);
2981 ret = -EINVAL;
2982 break;
2983 default:
2984 pr_err("Response result %d not supported\n",
2985 resp.result);
2986 ret = -EINVAL;
2987 break;
2988 }
2989 if (!qseecom.support_bus_scaling) {
2990 qsee_disable_clock_vote(data, CLK_DFAB);
2991 qsee_disable_clock_vote(data, CLK_SFPB);
2992 } else {
2993 __qseecom_add_bw_scale_down_timer(
2994 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2995 }
2996
2997exit:
2998 return ret;
2999}
3000
3001static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3002 struct qseecom_send_cmd_req *req)
3003
3004{
3005 if (!data || !data->client.ihandle) {
3006 pr_err("Client or client handle is not initialized\n");
3007 return -EINVAL;
3008 }
3009 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3010 (req->cmd_req_buf == NULL)) {
3011 pr_err("cmd buffer or response buffer is null\n");
3012 return -EINVAL;
3013 }
3014 if (((uintptr_t)req->cmd_req_buf <
3015 data->client.user_virt_sb_base) ||
3016 ((uintptr_t)req->cmd_req_buf >=
3017 (data->client.user_virt_sb_base + data->client.sb_length))) {
3018 pr_err("cmd buffer address not within shared bufffer\n");
3019 return -EINVAL;
3020 }
3021 if (((uintptr_t)req->resp_buf <
3022 data->client.user_virt_sb_base) ||
3023 ((uintptr_t)req->resp_buf >=
3024 (data->client.user_virt_sb_base + data->client.sb_length))) {
3025 pr_err("response buffer address not within shared bufffer\n");
3026 return -EINVAL;
3027 }
3028 if ((req->cmd_req_len == 0) ||
3029 (req->cmd_req_len > data->client.sb_length) ||
3030 (req->resp_len > data->client.sb_length)) {
3031 pr_err("cmd buf length or response buf length not valid\n");
3032 return -EINVAL;
3033 }
3034 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3035 pr_err("Integer overflow detected in req_len & rsp_len\n");
3036 return -EINVAL;
3037 }
3038
3039 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3040 pr_debug("Not enough memory to fit cmd_buf.\n");
3041 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3042 (req->cmd_req_len + req->resp_len),
3043 data->client.sb_length);
3044 return -ENOMEM;
3045 }
3046 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3047 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3048 return -EINVAL;
3049 }
3050 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3051 pr_err("Integer overflow in resp_len & resp_buf\n");
3052 return -EINVAL;
3053 }
3054 if (data->client.user_virt_sb_base >
3055 (ULONG_MAX - data->client.sb_length)) {
3056 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3057 return -EINVAL;
3058 }
3059 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3060 ((uintptr_t)data->client.user_virt_sb_base +
3061 data->client.sb_length)) ||
3062 (((uintptr_t)req->resp_buf + req->resp_len) >
3063 ((uintptr_t)data->client.user_virt_sb_base +
3064 data->client.sb_length))) {
3065 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3066 return -EINVAL;
3067 }
3068 return 0;
3069}
3070
3071int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3072 struct qseecom_registered_app_list *ptr_app,
3073 struct qseecom_dev_handle *data)
3074{
3075 int ret = 0;
3076
3077 switch (resp->result) {
3078 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3079 pr_warn("App(%d) %s is blocked on listener %d\n",
3080 data->client.app_id, data->client.app_name,
3081 resp->data);
3082 ret = __qseecom_process_reentrancy_blocked_on_listener(
3083 resp, ptr_app, data);
3084 if (ret) {
3085 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3086 data->client.app_id, data->client.app_name, resp->data);
3087 return ret;
3088 }
3089
3090 case QSEOS_RESULT_INCOMPLETE:
3091 qseecom.app_block_ref_cnt++;
3092 ptr_app->app_blocked = true;
3093 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3094 ptr_app->app_blocked = false;
3095 qseecom.app_block_ref_cnt--;
3096 wake_up_interruptible(&qseecom.app_block_wq);
3097 if (ret)
3098 pr_err("process_incomplete_cmd failed err: %d\n",
3099 ret);
3100 return ret;
3101 case QSEOS_RESULT_SUCCESS:
3102 return ret;
3103 default:
3104 pr_err("Response result %d not supported\n",
3105 resp->result);
3106 return -EINVAL;
3107 }
3108}
3109
3110static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3111 struct qseecom_send_cmd_req *req)
3112{
3113 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003114 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003115 u32 reqd_len_sb_in = 0;
3116 struct qseecom_client_send_data_ireq send_data_req = {0};
3117 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3118 struct qseecom_command_scm_resp resp;
3119 unsigned long flags;
3120 struct qseecom_registered_app_list *ptr_app;
3121 bool found_app = false;
3122 void *cmd_buf = NULL;
3123 size_t cmd_len;
3124 struct sglist_info *table = data->sglistinfo_ptr;
3125
3126 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3127 /* find app_id & img_name from list */
3128 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3129 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3130 list) {
3131 if ((ptr_app->app_id == data->client.app_id) &&
3132 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3133 found_app = true;
3134 break;
3135 }
3136 }
3137 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3138
3139 if (!found_app) {
3140 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3141 (char *)data->client.app_name);
3142 return -ENOENT;
3143 }
3144
3145 if (qseecom.qsee_version < QSEE_VERSION_40) {
3146 send_data_req.app_id = data->client.app_id;
3147 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3148 data, (uintptr_t)req->cmd_req_buf));
3149 send_data_req.req_len = req->cmd_req_len;
3150 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3151 data, (uintptr_t)req->resp_buf));
3152 send_data_req.rsp_len = req->resp_len;
3153 send_data_req.sglistinfo_ptr =
3154 (uint32_t)virt_to_phys(table);
3155 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3156 dmac_flush_range((void *)table,
3157 (void *)table + SGLISTINFO_TABLE_SIZE);
3158 cmd_buf = (void *)&send_data_req;
3159 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3160 } else {
3161 send_data_req_64bit.app_id = data->client.app_id;
3162 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3163 (uintptr_t)req->cmd_req_buf);
3164 send_data_req_64bit.req_len = req->cmd_req_len;
3165 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3166 (uintptr_t)req->resp_buf);
3167 send_data_req_64bit.rsp_len = req->resp_len;
3168 /* check if 32bit app's phys_addr region is under 4GB.*/
3169 if ((data->client.app_arch == ELFCLASS32) &&
3170 ((send_data_req_64bit.req_ptr >=
3171 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3172 (send_data_req_64bit.rsp_ptr >=
3173 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3174 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3175 data->client.app_name,
3176 send_data_req_64bit.req_ptr,
3177 send_data_req_64bit.req_len,
3178 send_data_req_64bit.rsp_ptr,
3179 send_data_req_64bit.rsp_len);
3180 return -EFAULT;
3181 }
3182 send_data_req_64bit.sglistinfo_ptr =
3183 (uint64_t)virt_to_phys(table);
3184 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3185 dmac_flush_range((void *)table,
3186 (void *)table + SGLISTINFO_TABLE_SIZE);
3187 cmd_buf = (void *)&send_data_req_64bit;
3188 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3189 }
3190
3191 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3192 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3193 else
3194 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3195
3196 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3197 data->client.sb_virt,
3198 reqd_len_sb_in,
3199 ION_IOC_CLEAN_INV_CACHES);
3200 if (ret) {
3201 pr_err("cache operation failed %d\n", ret);
3202 return ret;
3203 }
3204
3205 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3206
3207 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3208 cmd_buf, cmd_len,
3209 &resp, sizeof(resp));
3210 if (ret) {
3211 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3212 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003213 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003214 }
3215
3216 if (qseecom.qsee_reentrancy_support) {
3217 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003218 if (ret)
3219 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003220 } else {
3221 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3222 ret = __qseecom_process_incomplete_cmd(data, &resp);
3223 if (ret) {
3224 pr_err("process_incomplete_cmd failed err: %d\n",
3225 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003226 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003227 }
3228 } else {
3229 if (resp.result != QSEOS_RESULT_SUCCESS) {
3230 pr_err("Response result %d not supported\n",
3231 resp.result);
3232 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003233 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003234 }
3235 }
3236 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003237exit:
3238 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003239 data->client.sb_virt, data->client.sb_length,
3240 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003241 if (ret2) {
3242 pr_err("cache operation failed %d\n", ret2);
3243 return ret2;
3244 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003245 return ret;
3246}
3247
3248static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3249{
3250 int ret = 0;
3251 struct qseecom_send_cmd_req req;
3252
3253 ret = copy_from_user(&req, argp, sizeof(req));
3254 if (ret) {
3255 pr_err("copy_from_user failed\n");
3256 return ret;
3257 }
3258
3259 if (__validate_send_cmd_inputs(data, &req))
3260 return -EINVAL;
3261
3262 ret = __qseecom_send_cmd(data, &req);
3263
3264 if (ret)
3265 return ret;
3266
3267 return ret;
3268}
3269
3270int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3271 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3272 struct qseecom_dev_handle *data, int i) {
3273
3274 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3275 (req->ifd_data[i].fd > 0)) {
3276 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3277 (req->ifd_data[i].cmd_buf_offset >
3278 req->cmd_req_len - sizeof(uint32_t))) {
3279 pr_err("Invalid offset (req len) 0x%x\n",
3280 req->ifd_data[i].cmd_buf_offset);
3281 return -EINVAL;
3282 }
3283 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3284 (lstnr_resp->ifd_data[i].fd > 0)) {
3285 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3286 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3287 lstnr_resp->resp_len - sizeof(uint32_t))) {
3288 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3289 lstnr_resp->ifd_data[i].cmd_buf_offset);
3290 return -EINVAL;
3291 }
3292 }
3293 return 0;
3294}
3295
3296static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3297 struct qseecom_dev_handle *data)
3298{
3299 struct ion_handle *ihandle;
3300 char *field;
3301 int ret = 0;
3302 int i = 0;
3303 uint32_t len = 0;
3304 struct scatterlist *sg;
3305 struct qseecom_send_modfd_cmd_req *req = NULL;
3306 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3307 struct qseecom_registered_listener_list *this_lstnr = NULL;
3308 uint32_t offset;
3309 struct sg_table *sg_ptr;
3310
3311 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3312 (data->type != QSEECOM_CLIENT_APP))
3313 return -EFAULT;
3314
3315 if (msg == NULL) {
3316 pr_err("Invalid address\n");
3317 return -EINVAL;
3318 }
3319 if (data->type == QSEECOM_LISTENER_SERVICE) {
3320 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3321 this_lstnr = __qseecom_find_svc(data->listener.id);
3322 if (IS_ERR_OR_NULL(this_lstnr)) {
3323 pr_err("Invalid listener ID\n");
3324 return -ENOMEM;
3325 }
3326 } else {
3327 req = (struct qseecom_send_modfd_cmd_req *)msg;
3328 }
3329
3330 for (i = 0; i < MAX_ION_FD; i++) {
3331 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3332 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003333 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003334 req->ifd_data[i].fd);
3335 if (IS_ERR_OR_NULL(ihandle)) {
3336 pr_err("Ion client can't retrieve the handle\n");
3337 return -ENOMEM;
3338 }
3339 field = (char *) req->cmd_req_buf +
3340 req->ifd_data[i].cmd_buf_offset;
3341 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3342 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003343 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003344 lstnr_resp->ifd_data[i].fd);
3345 if (IS_ERR_OR_NULL(ihandle)) {
3346 pr_err("Ion client can't retrieve the handle\n");
3347 return -ENOMEM;
3348 }
3349 field = lstnr_resp->resp_buf_ptr +
3350 lstnr_resp->ifd_data[i].cmd_buf_offset;
3351 } else {
3352 continue;
3353 }
3354 /* Populate the cmd data structure with the phys_addr */
3355 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3356 if (IS_ERR_OR_NULL(sg_ptr)) {
3357 pr_err("IOn client could not retrieve sg table\n");
3358 goto err;
3359 }
3360 if (sg_ptr->nents == 0) {
3361 pr_err("Num of scattered entries is 0\n");
3362 goto err;
3363 }
3364 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3365 pr_err("Num of scattered entries");
3366 pr_err(" (%d) is greater than max supported %d\n",
3367 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3368 goto err;
3369 }
3370 sg = sg_ptr->sgl;
3371 if (sg_ptr->nents == 1) {
3372 uint32_t *update;
3373
3374 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3375 goto err;
3376 if ((data->type == QSEECOM_CLIENT_APP &&
3377 (data->client.app_arch == ELFCLASS32 ||
3378 data->client.app_arch == ELFCLASS64)) ||
3379 (data->type == QSEECOM_LISTENER_SERVICE)) {
3380 /*
3381 * Check if sg list phy add region is under 4GB
3382 */
3383 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3384 (!cleanup) &&
3385 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3386 >= PHY_ADDR_4G - sg->length)) {
3387 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3388 data->client.app_name,
3389 &(sg_dma_address(sg_ptr->sgl)),
3390 sg->length);
3391 goto err;
3392 }
3393 update = (uint32_t *) field;
3394 *update = cleanup ? 0 :
3395 (uint32_t)sg_dma_address(sg_ptr->sgl);
3396 } else {
3397 pr_err("QSEE app arch %u is not supported\n",
3398 data->client.app_arch);
3399 goto err;
3400 }
3401 len += (uint32_t)sg->length;
3402 } else {
3403 struct qseecom_sg_entry *update;
3404 int j = 0;
3405
3406 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3407 (req->ifd_data[i].fd > 0)) {
3408
3409 if ((req->cmd_req_len <
3410 SG_ENTRY_SZ * sg_ptr->nents) ||
3411 (req->ifd_data[i].cmd_buf_offset >
3412 (req->cmd_req_len -
3413 SG_ENTRY_SZ * sg_ptr->nents))) {
3414 pr_err("Invalid offset = 0x%x\n",
3415 req->ifd_data[i].cmd_buf_offset);
3416 goto err;
3417 }
3418
3419 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3420 (lstnr_resp->ifd_data[i].fd > 0)) {
3421
3422 if ((lstnr_resp->resp_len <
3423 SG_ENTRY_SZ * sg_ptr->nents) ||
3424 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3425 (lstnr_resp->resp_len -
3426 SG_ENTRY_SZ * sg_ptr->nents))) {
3427 goto err;
3428 }
3429 }
3430 if ((data->type == QSEECOM_CLIENT_APP &&
3431 (data->client.app_arch == ELFCLASS32 ||
3432 data->client.app_arch == ELFCLASS64)) ||
3433 (data->type == QSEECOM_LISTENER_SERVICE)) {
3434 update = (struct qseecom_sg_entry *)field;
3435 for (j = 0; j < sg_ptr->nents; j++) {
3436 /*
3437 * Check if sg list PA is under 4GB
3438 */
3439 if ((qseecom.qsee_version >=
3440 QSEE_VERSION_40) &&
3441 (!cleanup) &&
3442 ((uint64_t)(sg_dma_address(sg))
3443 >= PHY_ADDR_4G - sg->length)) {
3444 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3445 data->client.app_name,
3446 &(sg_dma_address(sg)),
3447 sg->length);
3448 goto err;
3449 }
3450 update->phys_addr = cleanup ? 0 :
3451 (uint32_t)sg_dma_address(sg);
3452 update->len = cleanup ? 0 : sg->length;
3453 update++;
3454 len += sg->length;
3455 sg = sg_next(sg);
3456 }
3457 } else {
3458 pr_err("QSEE app arch %u is not supported\n",
3459 data->client.app_arch);
3460 goto err;
3461 }
3462 }
3463
3464 if (cleanup) {
3465 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3466 ihandle, NULL, len,
3467 ION_IOC_INV_CACHES);
3468 if (ret) {
3469 pr_err("cache operation failed %d\n", ret);
3470 goto err;
3471 }
3472 } else {
3473 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3474 ihandle, NULL, len,
3475 ION_IOC_CLEAN_INV_CACHES);
3476 if (ret) {
3477 pr_err("cache operation failed %d\n", ret);
3478 goto err;
3479 }
3480 if (data->type == QSEECOM_CLIENT_APP) {
3481 offset = req->ifd_data[i].cmd_buf_offset;
3482 data->sglistinfo_ptr[i].indexAndFlags =
3483 SGLISTINFO_SET_INDEX_FLAG(
3484 (sg_ptr->nents == 1), 0, offset);
3485 data->sglistinfo_ptr[i].sizeOrCount =
3486 (sg_ptr->nents == 1) ?
3487 sg->length : sg_ptr->nents;
3488 data->sglist_cnt = i + 1;
3489 } else {
3490 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3491 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3492 (uintptr_t)this_lstnr->sb_virt);
3493 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3494 SGLISTINFO_SET_INDEX_FLAG(
3495 (sg_ptr->nents == 1), 0, offset);
3496 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3497 (sg_ptr->nents == 1) ?
3498 sg->length : sg_ptr->nents;
3499 this_lstnr->sglist_cnt = i + 1;
3500 }
3501 }
3502 /* Deallocate the handle */
3503 if (!IS_ERR_OR_NULL(ihandle))
3504 ion_free(qseecom.ion_clnt, ihandle);
3505 }
3506 return ret;
3507err:
3508 if (!IS_ERR_OR_NULL(ihandle))
3509 ion_free(qseecom.ion_clnt, ihandle);
3510 return -ENOMEM;
3511}
3512
3513static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3514 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3515{
3516 struct scatterlist *sg = sg_ptr->sgl;
3517 struct qseecom_sg_entry_64bit *sg_entry;
3518 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3519 void *buf;
3520 uint i;
3521 size_t size;
3522 dma_addr_t coh_pmem;
3523
3524 if (fd_idx >= MAX_ION_FD) {
3525 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3526 return -ENOMEM;
3527 }
3528 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3529 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3530 /* Allocate a contiguous kernel buffer */
3531 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3532 size = (size + PAGE_SIZE) & PAGE_MASK;
3533 buf = dma_alloc_coherent(qseecom.pdev,
3534 size, &coh_pmem, GFP_KERNEL);
3535 if (buf == NULL) {
3536 pr_err("failed to alloc memory for sg buf\n");
3537 return -ENOMEM;
3538 }
3539 /* update qseecom_sg_list_buf_hdr_64bit */
3540 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3541 buf_hdr->new_buf_phys_addr = coh_pmem;
3542 buf_hdr->nents_total = sg_ptr->nents;
3543 /* save the left sg entries into new allocated buf */
3544 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3545 for (i = 0; i < sg_ptr->nents; i++) {
3546 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3547 sg_entry->len = sg->length;
3548 sg_entry++;
3549 sg = sg_next(sg);
3550 }
3551
3552 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3553 data->client.sec_buf_fd[fd_idx].vbase = buf;
3554 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3555 data->client.sec_buf_fd[fd_idx].size = size;
3556
3557 return 0;
3558}
3559
3560static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3561 struct qseecom_dev_handle *data)
3562{
3563 struct ion_handle *ihandle;
3564 char *field;
3565 int ret = 0;
3566 int i = 0;
3567 uint32_t len = 0;
3568 struct scatterlist *sg;
3569 struct qseecom_send_modfd_cmd_req *req = NULL;
3570 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3571 struct qseecom_registered_listener_list *this_lstnr = NULL;
3572 uint32_t offset;
3573 struct sg_table *sg_ptr;
3574
3575 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3576 (data->type != QSEECOM_CLIENT_APP))
3577 return -EFAULT;
3578
3579 if (msg == NULL) {
3580 pr_err("Invalid address\n");
3581 return -EINVAL;
3582 }
3583 if (data->type == QSEECOM_LISTENER_SERVICE) {
3584 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3585 this_lstnr = __qseecom_find_svc(data->listener.id);
3586 if (IS_ERR_OR_NULL(this_lstnr)) {
3587 pr_err("Invalid listener ID\n");
3588 return -ENOMEM;
3589 }
3590 } else {
3591 req = (struct qseecom_send_modfd_cmd_req *)msg;
3592 }
3593
3594 for (i = 0; i < MAX_ION_FD; i++) {
3595 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3596 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003597 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003598 req->ifd_data[i].fd);
3599 if (IS_ERR_OR_NULL(ihandle)) {
3600 pr_err("Ion client can't retrieve the handle\n");
3601 return -ENOMEM;
3602 }
3603 field = (char *) req->cmd_req_buf +
3604 req->ifd_data[i].cmd_buf_offset;
3605 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3606 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003607 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003608 lstnr_resp->ifd_data[i].fd);
3609 if (IS_ERR_OR_NULL(ihandle)) {
3610 pr_err("Ion client can't retrieve the handle\n");
3611 return -ENOMEM;
3612 }
3613 field = lstnr_resp->resp_buf_ptr +
3614 lstnr_resp->ifd_data[i].cmd_buf_offset;
3615 } else {
3616 continue;
3617 }
3618 /* Populate the cmd data structure with the phys_addr */
3619 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3620 if (IS_ERR_OR_NULL(sg_ptr)) {
3621 pr_err("IOn client could not retrieve sg table\n");
3622 goto err;
3623 }
3624 if (sg_ptr->nents == 0) {
3625 pr_err("Num of scattered entries is 0\n");
3626 goto err;
3627 }
3628 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3629 pr_warn("Num of scattered entries");
3630 pr_warn(" (%d) is greater than %d\n",
3631 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3632 if (cleanup) {
3633 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3634 data->client.sec_buf_fd[i].vbase)
3635 dma_free_coherent(qseecom.pdev,
3636 data->client.sec_buf_fd[i].size,
3637 data->client.sec_buf_fd[i].vbase,
3638 data->client.sec_buf_fd[i].pbase);
3639 } else {
3640 ret = __qseecom_allocate_sg_list_buffer(data,
3641 field, i, sg_ptr);
3642 if (ret) {
3643 pr_err("Failed to allocate sg list buffer\n");
3644 goto err;
3645 }
3646 }
3647 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3648 sg = sg_ptr->sgl;
3649 goto cleanup;
3650 }
3651 sg = sg_ptr->sgl;
3652 if (sg_ptr->nents == 1) {
3653 uint64_t *update_64bit;
3654
3655 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3656 goto err;
3657 /* 64bit app uses 64bit address */
3658 update_64bit = (uint64_t *) field;
3659 *update_64bit = cleanup ? 0 :
3660 (uint64_t)sg_dma_address(sg_ptr->sgl);
3661 len += (uint32_t)sg->length;
3662 } else {
3663 struct qseecom_sg_entry_64bit *update_64bit;
3664 int j = 0;
3665
3666 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3667 (req->ifd_data[i].fd > 0)) {
3668
3669 if ((req->cmd_req_len <
3670 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3671 (req->ifd_data[i].cmd_buf_offset >
3672 (req->cmd_req_len -
3673 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3674 pr_err("Invalid offset = 0x%x\n",
3675 req->ifd_data[i].cmd_buf_offset);
3676 goto err;
3677 }
3678
3679 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3680 (lstnr_resp->ifd_data[i].fd > 0)) {
3681
3682 if ((lstnr_resp->resp_len <
3683 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3684 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3685 (lstnr_resp->resp_len -
3686 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3687 goto err;
3688 }
3689 }
3690 /* 64bit app uses 64bit address */
3691 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3692 for (j = 0; j < sg_ptr->nents; j++) {
3693 update_64bit->phys_addr = cleanup ? 0 :
3694 (uint64_t)sg_dma_address(sg);
3695 update_64bit->len = cleanup ? 0 :
3696 (uint32_t)sg->length;
3697 update_64bit++;
3698 len += sg->length;
3699 sg = sg_next(sg);
3700 }
3701 }
3702cleanup:
3703 if (cleanup) {
3704 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3705 ihandle, NULL, len,
3706 ION_IOC_INV_CACHES);
3707 if (ret) {
3708 pr_err("cache operation failed %d\n", ret);
3709 goto err;
3710 }
3711 } else {
3712 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3713 ihandle, NULL, len,
3714 ION_IOC_CLEAN_INV_CACHES);
3715 if (ret) {
3716 pr_err("cache operation failed %d\n", ret);
3717 goto err;
3718 }
3719 if (data->type == QSEECOM_CLIENT_APP) {
3720 offset = req->ifd_data[i].cmd_buf_offset;
3721 data->sglistinfo_ptr[i].indexAndFlags =
3722 SGLISTINFO_SET_INDEX_FLAG(
3723 (sg_ptr->nents == 1), 1, offset);
3724 data->sglistinfo_ptr[i].sizeOrCount =
3725 (sg_ptr->nents == 1) ?
3726 sg->length : sg_ptr->nents;
3727 data->sglist_cnt = i + 1;
3728 } else {
3729 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3730 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3731 (uintptr_t)this_lstnr->sb_virt);
3732 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3733 SGLISTINFO_SET_INDEX_FLAG(
3734 (sg_ptr->nents == 1), 1, offset);
3735 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3736 (sg_ptr->nents == 1) ?
3737 sg->length : sg_ptr->nents;
3738 this_lstnr->sglist_cnt = i + 1;
3739 }
3740 }
3741 /* Deallocate the handle */
3742 if (!IS_ERR_OR_NULL(ihandle))
3743 ion_free(qseecom.ion_clnt, ihandle);
3744 }
3745 return ret;
3746err:
3747 for (i = 0; i < MAX_ION_FD; i++)
3748 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3749 data->client.sec_buf_fd[i].vbase)
3750 dma_free_coherent(qseecom.pdev,
3751 data->client.sec_buf_fd[i].size,
3752 data->client.sec_buf_fd[i].vbase,
3753 data->client.sec_buf_fd[i].pbase);
3754 if (!IS_ERR_OR_NULL(ihandle))
3755 ion_free(qseecom.ion_clnt, ihandle);
3756 return -ENOMEM;
3757}
3758
3759static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3760 void __user *argp,
3761 bool is_64bit_addr)
3762{
3763 int ret = 0;
3764 int i;
3765 struct qseecom_send_modfd_cmd_req req;
3766 struct qseecom_send_cmd_req send_cmd_req;
3767
3768 ret = copy_from_user(&req, argp, sizeof(req));
3769 if (ret) {
3770 pr_err("copy_from_user failed\n");
3771 return ret;
3772 }
3773
3774 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3775 send_cmd_req.cmd_req_len = req.cmd_req_len;
3776 send_cmd_req.resp_buf = req.resp_buf;
3777 send_cmd_req.resp_len = req.resp_len;
3778
3779 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3780 return -EINVAL;
3781
3782 /* validate offsets */
3783 for (i = 0; i < MAX_ION_FD; i++) {
3784 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3785 pr_err("Invalid offset %d = 0x%x\n",
3786 i, req.ifd_data[i].cmd_buf_offset);
3787 return -EINVAL;
3788 }
3789 }
3790 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3791 (uintptr_t)req.cmd_req_buf);
3792 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3793 (uintptr_t)req.resp_buf);
3794
3795 if (!is_64bit_addr) {
3796 ret = __qseecom_update_cmd_buf(&req, false, data);
3797 if (ret)
3798 return ret;
3799 ret = __qseecom_send_cmd(data, &send_cmd_req);
3800 if (ret)
3801 return ret;
3802 ret = __qseecom_update_cmd_buf(&req, true, data);
3803 if (ret)
3804 return ret;
3805 } else {
3806 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3807 if (ret)
3808 return ret;
3809 ret = __qseecom_send_cmd(data, &send_cmd_req);
3810 if (ret)
3811 return ret;
3812 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3813 if (ret)
3814 return ret;
3815 }
3816
3817 return ret;
3818}
3819
3820static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3821 void __user *argp)
3822{
3823 return __qseecom_send_modfd_cmd(data, argp, false);
3824}
3825
3826static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3827 void __user *argp)
3828{
3829 return __qseecom_send_modfd_cmd(data, argp, true);
3830}
3831
3832
3833
3834static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3835 struct qseecom_registered_listener_list *svc)
3836{
3837 int ret;
3838
3839 ret = (svc->rcv_req_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07003840 return ret || data->abort || svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003841}
3842
3843static int qseecom_receive_req(struct qseecom_dev_handle *data)
3844{
3845 int ret = 0;
3846 struct qseecom_registered_listener_list *this_lstnr;
3847
3848 this_lstnr = __qseecom_find_svc(data->listener.id);
3849 if (!this_lstnr) {
3850 pr_err("Invalid listener ID\n");
3851 return -ENODATA;
3852 }
3853
3854 while (1) {
3855 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3856 __qseecom_listener_has_rcvd_req(data,
3857 this_lstnr))) {
3858 pr_debug("Interrupted: exiting Listener Service = %d\n",
3859 (uint32_t)data->listener.id);
3860 /* woken up for different reason */
3861 return -ERESTARTSYS;
3862 }
3863
Zhen Kong26e62742018-05-04 17:19:06 -07003864 if (data->abort || this_lstnr->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003865 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003866 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003867 return -ENODEV;
3868 }
3869 this_lstnr->rcv_req_flag = 0;
3870 break;
3871 }
3872 return ret;
3873}
3874
3875static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3876{
3877 unsigned char app_arch = 0;
3878 struct elf32_hdr *ehdr;
3879 struct elf64_hdr *ehdr64;
3880
3881 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3882
3883 switch (app_arch) {
3884 case ELFCLASS32: {
3885 ehdr = (struct elf32_hdr *)fw_entry->data;
3886 if (fw_entry->size < sizeof(*ehdr)) {
3887 pr_err("%s: Not big enough to be an elf32 header\n",
3888 qseecom.pdev->init_name);
3889 return false;
3890 }
3891 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3892 pr_err("%s: Not an elf32 header\n",
3893 qseecom.pdev->init_name);
3894 return false;
3895 }
3896 if (ehdr->e_phnum == 0) {
3897 pr_err("%s: No loadable segments\n",
3898 qseecom.pdev->init_name);
3899 return false;
3900 }
3901 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3902 sizeof(struct elf32_hdr) > fw_entry->size) {
3903 pr_err("%s: Program headers not within mdt\n",
3904 qseecom.pdev->init_name);
3905 return false;
3906 }
3907 break;
3908 }
3909 case ELFCLASS64: {
3910 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3911 if (fw_entry->size < sizeof(*ehdr64)) {
3912 pr_err("%s: Not big enough to be an elf64 header\n",
3913 qseecom.pdev->init_name);
3914 return false;
3915 }
3916 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3917 pr_err("%s: Not an elf64 header\n",
3918 qseecom.pdev->init_name);
3919 return false;
3920 }
3921 if (ehdr64->e_phnum == 0) {
3922 pr_err("%s: No loadable segments\n",
3923 qseecom.pdev->init_name);
3924 return false;
3925 }
3926 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3927 sizeof(struct elf64_hdr) > fw_entry->size) {
3928 pr_err("%s: Program headers not within mdt\n",
3929 qseecom.pdev->init_name);
3930 return false;
3931 }
3932 break;
3933 }
3934 default: {
3935 pr_err("QSEE app arch %u is not supported\n", app_arch);
3936 return false;
3937 }
3938 }
3939 return true;
3940}
3941
3942static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3943 uint32_t *app_arch)
3944{
3945 int ret = -1;
3946 int i = 0, rc = 0;
3947 const struct firmware *fw_entry = NULL;
3948 char fw_name[MAX_APP_NAME_SIZE];
3949 struct elf32_hdr *ehdr;
3950 struct elf64_hdr *ehdr64;
3951 int num_images = 0;
3952
3953 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3954 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3955 if (rc) {
3956 pr_err("error with request_firmware\n");
3957 ret = -EIO;
3958 goto err;
3959 }
3960 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3961 ret = -EIO;
3962 goto err;
3963 }
3964 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3965 *fw_size = fw_entry->size;
3966 if (*app_arch == ELFCLASS32) {
3967 ehdr = (struct elf32_hdr *)fw_entry->data;
3968 num_images = ehdr->e_phnum;
3969 } else if (*app_arch == ELFCLASS64) {
3970 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3971 num_images = ehdr64->e_phnum;
3972 } else {
3973 pr_err("QSEE %s app, arch %u is not supported\n",
3974 appname, *app_arch);
3975 ret = -EIO;
3976 goto err;
3977 }
3978 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3979 release_firmware(fw_entry);
3980 fw_entry = NULL;
3981 for (i = 0; i < num_images; i++) {
3982 memset(fw_name, 0, sizeof(fw_name));
3983 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3984 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3985 if (ret)
3986 goto err;
3987 if (*fw_size > U32_MAX - fw_entry->size) {
3988 pr_err("QSEE %s app file size overflow\n", appname);
3989 ret = -EINVAL;
3990 goto err;
3991 }
3992 *fw_size += fw_entry->size;
3993 release_firmware(fw_entry);
3994 fw_entry = NULL;
3995 }
3996
3997 return ret;
3998err:
3999 if (fw_entry)
4000 release_firmware(fw_entry);
4001 *fw_size = 0;
4002 return ret;
4003}
4004
4005static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4006 uint32_t fw_size,
4007 struct qseecom_load_app_ireq *load_req)
4008{
4009 int ret = -1;
4010 int i = 0, rc = 0;
4011 const struct firmware *fw_entry = NULL;
4012 char fw_name[MAX_APP_NAME_SIZE];
4013 u8 *img_data_ptr = img_data;
4014 struct elf32_hdr *ehdr;
4015 struct elf64_hdr *ehdr64;
4016 int num_images = 0;
4017 unsigned char app_arch = 0;
4018
4019 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4020 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4021 if (rc) {
4022 ret = -EIO;
4023 goto err;
4024 }
4025
4026 load_req->img_len = fw_entry->size;
4027 if (load_req->img_len > fw_size) {
4028 pr_err("app %s size %zu is larger than buf size %u\n",
4029 appname, fw_entry->size, fw_size);
4030 ret = -EINVAL;
4031 goto err;
4032 }
4033 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4034 img_data_ptr = img_data_ptr + fw_entry->size;
4035 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4036
4037 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4038 if (app_arch == ELFCLASS32) {
4039 ehdr = (struct elf32_hdr *)fw_entry->data;
4040 num_images = ehdr->e_phnum;
4041 } else if (app_arch == ELFCLASS64) {
4042 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4043 num_images = ehdr64->e_phnum;
4044 } else {
4045 pr_err("QSEE %s app, arch %u is not supported\n",
4046 appname, app_arch);
4047 ret = -EIO;
4048 goto err;
4049 }
4050 release_firmware(fw_entry);
4051 fw_entry = NULL;
4052 for (i = 0; i < num_images; i++) {
4053 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4054 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4055 if (ret) {
4056 pr_err("Failed to locate blob %s\n", fw_name);
4057 goto err;
4058 }
4059 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4060 (fw_entry->size + load_req->img_len > fw_size)) {
4061 pr_err("Invalid file size for %s\n", fw_name);
4062 ret = -EINVAL;
4063 goto err;
4064 }
4065 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4066 img_data_ptr = img_data_ptr + fw_entry->size;
4067 load_req->img_len += fw_entry->size;
4068 release_firmware(fw_entry);
4069 fw_entry = NULL;
4070 }
4071 return ret;
4072err:
4073 release_firmware(fw_entry);
4074 return ret;
4075}
4076
4077static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4078 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4079{
4080 size_t len = 0;
4081 int ret = 0;
4082 ion_phys_addr_t pa;
4083 struct ion_handle *ihandle = NULL;
4084 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004085 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004086
Zhen Kong3dd92792017-12-08 09:47:15 -08004087 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004088 if (retry++) {
4089 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004090 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004091 mutex_lock(&app_access_lock);
4092 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004093 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4094 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4095 } while (IS_ERR_OR_NULL(ihandle) &&
4096 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004097
4098 if (IS_ERR_OR_NULL(ihandle)) {
4099 pr_err("ION alloc failed\n");
4100 return -ENOMEM;
4101 }
4102 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4103 ihandle);
4104
4105 if (IS_ERR_OR_NULL(img_data)) {
4106 pr_err("ION memory mapping for image loading failed\n");
4107 ret = -ENOMEM;
4108 goto exit_ion_free;
4109 }
4110 /* Get the physical address of the ION BUF */
4111 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4112 if (ret) {
4113 pr_err("physical memory retrieval failure\n");
4114 ret = -EIO;
4115 goto exit_ion_unmap_kernel;
4116 }
4117
4118 *pihandle = ihandle;
4119 *data = img_data;
4120 *paddr = pa;
4121 return ret;
4122
4123exit_ion_unmap_kernel:
4124 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4125exit_ion_free:
4126 ion_free(qseecom.ion_clnt, ihandle);
4127 ihandle = NULL;
4128 return ret;
4129}
4130
4131static void __qseecom_free_img_data(struct ion_handle **ihandle)
4132{
4133 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4134 ion_free(qseecom.ion_clnt, *ihandle);
4135 *ihandle = NULL;
4136}
4137
4138static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4139 uint32_t *app_id)
4140{
4141 int ret = -1;
4142 uint32_t fw_size = 0;
4143 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4144 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4145 struct qseecom_command_scm_resp resp;
4146 u8 *img_data = NULL;
4147 ion_phys_addr_t pa = 0;
4148 struct ion_handle *ihandle = NULL;
4149 void *cmd_buf = NULL;
4150 size_t cmd_len;
4151 uint32_t app_arch = 0;
4152
4153 if (!data || !appname || !app_id) {
4154 pr_err("Null pointer to data or appname or appid\n");
4155 return -EINVAL;
4156 }
4157 *app_id = 0;
4158 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4159 return -EIO;
4160 data->client.app_arch = app_arch;
4161
4162 /* Check and load cmnlib */
4163 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4164 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4165 ret = qseecom_load_commonlib_image(data, "cmnlib");
4166 if (ret) {
4167 pr_err("failed to load cmnlib\n");
4168 return -EIO;
4169 }
4170 qseecom.commonlib_loaded = true;
4171 pr_debug("cmnlib is loaded\n");
4172 }
4173
4174 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4175 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4176 if (ret) {
4177 pr_err("failed to load cmnlib64\n");
4178 return -EIO;
4179 }
4180 qseecom.commonlib64_loaded = true;
4181 pr_debug("cmnlib64 is loaded\n");
4182 }
4183 }
4184
4185 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4186 if (ret)
4187 return ret;
4188
4189 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4190 if (ret) {
4191 ret = -EIO;
4192 goto exit_free_img_data;
4193 }
4194
4195 /* Populate the load_req parameters */
4196 if (qseecom.qsee_version < QSEE_VERSION_40) {
4197 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4198 load_req.mdt_len = load_req.mdt_len;
4199 load_req.img_len = load_req.img_len;
4200 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4201 load_req.phy_addr = (uint32_t)pa;
4202 cmd_buf = (void *)&load_req;
4203 cmd_len = sizeof(struct qseecom_load_app_ireq);
4204 } else {
4205 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4206 load_req_64bit.mdt_len = load_req.mdt_len;
4207 load_req_64bit.img_len = load_req.img_len;
4208 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4209 load_req_64bit.phy_addr = (uint64_t)pa;
4210 cmd_buf = (void *)&load_req_64bit;
4211 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4212 }
4213
4214 if (qseecom.support_bus_scaling) {
4215 mutex_lock(&qsee_bw_mutex);
4216 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4217 mutex_unlock(&qsee_bw_mutex);
4218 if (ret) {
4219 ret = -EIO;
4220 goto exit_free_img_data;
4221 }
4222 }
4223
4224 ret = __qseecom_enable_clk_scale_up(data);
4225 if (ret) {
4226 ret = -EIO;
4227 goto exit_unregister_bus_bw_need;
4228 }
4229
4230 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4231 img_data, fw_size,
4232 ION_IOC_CLEAN_INV_CACHES);
4233 if (ret) {
4234 pr_err("cache operation failed %d\n", ret);
4235 goto exit_disable_clk_vote;
4236 }
4237
4238 /* SCM_CALL to load the image */
4239 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4240 &resp, sizeof(resp));
4241 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004242 pr_err("scm_call to load failed : ret %d, result %x\n",
4243 ret, resp.result);
4244 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4245 ret = -EEXIST;
4246 else
4247 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004248 goto exit_disable_clk_vote;
4249 }
4250
4251 switch (resp.result) {
4252 case QSEOS_RESULT_SUCCESS:
4253 *app_id = resp.data;
4254 break;
4255 case QSEOS_RESULT_INCOMPLETE:
4256 ret = __qseecom_process_incomplete_cmd(data, &resp);
4257 if (ret)
4258 pr_err("process_incomplete_cmd FAILED\n");
4259 else
4260 *app_id = resp.data;
4261 break;
4262 case QSEOS_RESULT_FAILURE:
4263 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4264 break;
4265 default:
4266 pr_err("scm call return unknown response %d\n", resp.result);
4267 ret = -EINVAL;
4268 break;
4269 }
4270
4271exit_disable_clk_vote:
4272 __qseecom_disable_clk_scale_down(data);
4273
4274exit_unregister_bus_bw_need:
4275 if (qseecom.support_bus_scaling) {
4276 mutex_lock(&qsee_bw_mutex);
4277 qseecom_unregister_bus_bandwidth_needs(data);
4278 mutex_unlock(&qsee_bw_mutex);
4279 }
4280
4281exit_free_img_data:
4282 __qseecom_free_img_data(&ihandle);
4283 return ret;
4284}
4285
4286static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4287 char *cmnlib_name)
4288{
4289 int ret = 0;
4290 uint32_t fw_size = 0;
4291 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4292 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4293 struct qseecom_command_scm_resp resp;
4294 u8 *img_data = NULL;
4295 ion_phys_addr_t pa = 0;
4296 void *cmd_buf = NULL;
4297 size_t cmd_len;
4298 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004299 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004300
4301 if (!cmnlib_name) {
4302 pr_err("cmnlib_name is NULL\n");
4303 return -EINVAL;
4304 }
4305 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4306 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4307 cmnlib_name, strlen(cmnlib_name));
4308 return -EINVAL;
4309 }
4310
4311 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4312 return -EIO;
4313
Zhen Kong3bafb312017-10-18 10:27:20 -07004314 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004315 &img_data, fw_size, &pa);
4316 if (ret)
4317 return -EIO;
4318
4319 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4320 if (ret) {
4321 ret = -EIO;
4322 goto exit_free_img_data;
4323 }
4324 if (qseecom.qsee_version < QSEE_VERSION_40) {
4325 load_req.phy_addr = (uint32_t)pa;
4326 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4327 cmd_buf = (void *)&load_req;
4328 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4329 } else {
4330 load_req_64bit.phy_addr = (uint64_t)pa;
4331 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4332 load_req_64bit.img_len = load_req.img_len;
4333 load_req_64bit.mdt_len = load_req.mdt_len;
4334 cmd_buf = (void *)&load_req_64bit;
4335 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4336 }
4337
4338 if (qseecom.support_bus_scaling) {
4339 mutex_lock(&qsee_bw_mutex);
4340 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4341 mutex_unlock(&qsee_bw_mutex);
4342 if (ret) {
4343 ret = -EIO;
4344 goto exit_free_img_data;
4345 }
4346 }
4347
4348 /* Vote for the SFPB clock */
4349 ret = __qseecom_enable_clk_scale_up(data);
4350 if (ret) {
4351 ret = -EIO;
4352 goto exit_unregister_bus_bw_need;
4353 }
4354
Zhen Kong3bafb312017-10-18 10:27:20 -07004355 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004356 img_data, fw_size,
4357 ION_IOC_CLEAN_INV_CACHES);
4358 if (ret) {
4359 pr_err("cache operation failed %d\n", ret);
4360 goto exit_disable_clk_vote;
4361 }
4362
4363 /* SCM_CALL to load the image */
4364 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4365 &resp, sizeof(resp));
4366 if (ret) {
4367 pr_err("scm_call to load failed : ret %d\n", ret);
4368 ret = -EIO;
4369 goto exit_disable_clk_vote;
4370 }
4371
4372 switch (resp.result) {
4373 case QSEOS_RESULT_SUCCESS:
4374 break;
4375 case QSEOS_RESULT_FAILURE:
4376 pr_err("scm call failed w/response result%d\n", resp.result);
4377 ret = -EINVAL;
4378 goto exit_disable_clk_vote;
4379 case QSEOS_RESULT_INCOMPLETE:
4380 ret = __qseecom_process_incomplete_cmd(data, &resp);
4381 if (ret) {
4382 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4383 goto exit_disable_clk_vote;
4384 }
4385 break;
4386 default:
4387 pr_err("scm call return unknown response %d\n", resp.result);
4388 ret = -EINVAL;
4389 goto exit_disable_clk_vote;
4390 }
4391
4392exit_disable_clk_vote:
4393 __qseecom_disable_clk_scale_down(data);
4394
4395exit_unregister_bus_bw_need:
4396 if (qseecom.support_bus_scaling) {
4397 mutex_lock(&qsee_bw_mutex);
4398 qseecom_unregister_bus_bandwidth_needs(data);
4399 mutex_unlock(&qsee_bw_mutex);
4400 }
4401
4402exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004403 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004404 return ret;
4405}
4406
4407static int qseecom_unload_commonlib_image(void)
4408{
4409 int ret = -EINVAL;
4410 struct qseecom_unload_lib_image_ireq unload_req = {0};
4411 struct qseecom_command_scm_resp resp;
4412
4413 /* Populate the remaining parameters */
4414 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4415
4416 /* SCM_CALL to load the image */
4417 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4418 sizeof(struct qseecom_unload_lib_image_ireq),
4419 &resp, sizeof(resp));
4420 if (ret) {
4421 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4422 ret = -EIO;
4423 } else {
4424 switch (resp.result) {
4425 case QSEOS_RESULT_SUCCESS:
4426 break;
4427 case QSEOS_RESULT_FAILURE:
4428 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4429 break;
4430 default:
4431 pr_err("scm call return unknown response %d\n",
4432 resp.result);
4433 ret = -EINVAL;
4434 break;
4435 }
4436 }
4437
4438 return ret;
4439}
4440
4441int qseecom_start_app(struct qseecom_handle **handle,
4442 char *app_name, uint32_t size)
4443{
4444 int32_t ret = 0;
4445 unsigned long flags = 0;
4446 struct qseecom_dev_handle *data = NULL;
4447 struct qseecom_check_app_ireq app_ireq;
4448 struct qseecom_registered_app_list *entry = NULL;
4449 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4450 bool found_app = false;
4451 size_t len;
4452 ion_phys_addr_t pa;
4453 uint32_t fw_size, app_arch;
4454 uint32_t app_id = 0;
4455
4456 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4457 pr_err("Not allowed to be called in %d state\n",
4458 atomic_read(&qseecom.qseecom_state));
4459 return -EPERM;
4460 }
4461 if (!app_name) {
4462 pr_err("failed to get the app name\n");
4463 return -EINVAL;
4464 }
4465
Zhen Kong64a6d7282017-06-16 11:55:07 -07004466 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004467 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004468 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004469 return -EINVAL;
4470 }
4471
4472 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4473 if (!(*handle))
4474 return -ENOMEM;
4475
4476 data = kzalloc(sizeof(*data), GFP_KERNEL);
4477 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304478 ret = -ENOMEM;
4479 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004480 }
4481 data->abort = 0;
4482 data->type = QSEECOM_CLIENT_APP;
4483 data->released = false;
4484 data->client.sb_length = size;
4485 data->client.user_virt_sb_base = 0;
4486 data->client.ihandle = NULL;
4487
4488 init_waitqueue_head(&data->abort_wq);
4489
4490 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4491 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4492 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4493 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304494 ret = -ENOMEM;
4495 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004496 }
4497 mutex_lock(&app_access_lock);
4498
Zhen Kong5d02be92018-05-29 16:17:29 -07004499recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004500 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4501 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4502 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4503 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304504 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004505
4506 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4507 if (app_id) {
4508 pr_warn("App id %d for [%s] app exists\n", app_id,
4509 (char *)app_ireq.app_name);
4510 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4511 list_for_each_entry(entry,
4512 &qseecom.registered_app_list_head, list){
4513 if (entry->app_id == app_id) {
4514 entry->ref_cnt++;
4515 found_app = true;
4516 break;
4517 }
4518 }
4519 spin_unlock_irqrestore(
4520 &qseecom.registered_app_list_lock, flags);
4521 if (!found_app)
4522 pr_warn("App_id %d [%s] was loaded but not registered\n",
4523 ret, (char *)app_ireq.app_name);
4524 } else {
4525 /* load the app and get the app_id */
4526 pr_debug("%s: Loading app for the first time'\n",
4527 qseecom.pdev->init_name);
4528 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004529 if (ret == -EEXIST) {
4530 pr_err("recheck if TA %s is loaded\n", app_name);
4531 goto recheck;
4532 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304533 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004534 }
4535 data->client.app_id = app_id;
4536 if (!found_app) {
4537 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4538 if (!entry) {
4539 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304540 ret = -ENOMEM;
4541 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004542 }
4543 entry->app_id = app_id;
4544 entry->ref_cnt = 1;
4545 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4546 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4547 ret = -EIO;
Zhen Konga6e3f512017-01-20 12:22:23 -08004548 kfree(entry);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304549 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004550 }
4551 entry->app_arch = app_arch;
4552 entry->app_blocked = false;
4553 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004554 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004555 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4556 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4557 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4558 flags);
4559 }
4560
4561 /* Get the physical address of the ION BUF */
4562 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4563 if (ret) {
4564 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4565 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304566 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004567 }
4568
4569 /* Populate the structure for sending scm call to load image */
4570 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4571 data->client.ihandle);
4572 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4573 pr_err("ION memory mapping for client shared buf failed\n");
4574 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304575 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004576 }
4577 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4578 data->client.sb_phys = (phys_addr_t)pa;
4579 (*handle)->dev = (void *)data;
4580 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4581 (*handle)->sbuf_len = data->client.sb_length;
4582
4583 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4584 if (!kclient_entry) {
4585 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304586 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004587 }
4588 kclient_entry->handle = *handle;
4589
4590 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4591 list_add_tail(&kclient_entry->list,
4592 &qseecom.registered_kclient_list_head);
4593 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4594
4595 mutex_unlock(&app_access_lock);
4596 return 0;
4597
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304598exit_ion_unmap_kernel:
4599 if (!IS_ERR_OR_NULL(data->client.ihandle))
4600 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4601exit_entry_free:
4602 kfree(entry);
4603exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004604 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304605 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4606 ion_free(qseecom.ion_clnt, data->client.ihandle);
4607 data->client.ihandle = NULL;
4608 }
4609exit_data_free:
4610 kfree(data);
4611exit_handle_free:
4612 if (*handle) {
4613 kfree(*handle);
4614 *handle = NULL;
4615 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004616 return ret;
4617}
4618EXPORT_SYMBOL(qseecom_start_app);
4619
4620int qseecom_shutdown_app(struct qseecom_handle **handle)
4621{
4622 int ret = -EINVAL;
4623 struct qseecom_dev_handle *data;
4624
4625 struct qseecom_registered_kclient_list *kclient = NULL;
4626 unsigned long flags = 0;
4627 bool found_handle = false;
4628
4629 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4630 pr_err("Not allowed to be called in %d state\n",
4631 atomic_read(&qseecom.qseecom_state));
4632 return -EPERM;
4633 }
4634
4635 if ((handle == NULL) || (*handle == NULL)) {
4636 pr_err("Handle is not initialized\n");
4637 return -EINVAL;
4638 }
4639 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4640 mutex_lock(&app_access_lock);
4641
4642 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4643 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4644 list) {
4645 if (kclient->handle == (*handle)) {
4646 list_del(&kclient->list);
4647 found_handle = true;
4648 break;
4649 }
4650 }
4651 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4652 if (!found_handle)
4653 pr_err("Unable to find the handle, exiting\n");
4654 else
4655 ret = qseecom_unload_app(data, false);
4656
4657 mutex_unlock(&app_access_lock);
4658 if (ret == 0) {
4659 kzfree(data);
4660 kzfree(*handle);
4661 kzfree(kclient);
4662 *handle = NULL;
4663 }
4664
4665 return ret;
4666}
4667EXPORT_SYMBOL(qseecom_shutdown_app);
4668
4669int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4670 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4671{
4672 int ret = 0;
4673 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4674 struct qseecom_dev_handle *data;
4675 bool perf_enabled = false;
4676
4677 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4678 pr_err("Not allowed to be called in %d state\n",
4679 atomic_read(&qseecom.qseecom_state));
4680 return -EPERM;
4681 }
4682
4683 if (handle == NULL) {
4684 pr_err("Handle is not initialized\n");
4685 return -EINVAL;
4686 }
4687 data = handle->dev;
4688
4689 req.cmd_req_len = sbuf_len;
4690 req.resp_len = rbuf_len;
4691 req.cmd_req_buf = send_buf;
4692 req.resp_buf = resp_buf;
4693
4694 if (__validate_send_cmd_inputs(data, &req))
4695 return -EINVAL;
4696
4697 mutex_lock(&app_access_lock);
4698 if (qseecom.support_bus_scaling) {
4699 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4700 if (ret) {
4701 pr_err("Failed to set bw.\n");
4702 mutex_unlock(&app_access_lock);
4703 return ret;
4704 }
4705 }
4706 /*
4707 * On targets where crypto clock is handled by HLOS,
4708 * if clk_access_cnt is zero and perf_enabled is false,
4709 * then the crypto clock was not enabled before sending cmd
4710 * to tz, qseecom will enable the clock to avoid service failure.
4711 */
4712 if (!qseecom.no_clock_support &&
4713 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4714 pr_debug("ce clock is not enabled!\n");
4715 ret = qseecom_perf_enable(data);
4716 if (ret) {
4717 pr_err("Failed to vote for clock with err %d\n",
4718 ret);
4719 mutex_unlock(&app_access_lock);
4720 return -EINVAL;
4721 }
4722 perf_enabled = true;
4723 }
4724 if (!strcmp(data->client.app_name, "securemm"))
4725 data->use_legacy_cmd = true;
4726
4727 ret = __qseecom_send_cmd(data, &req);
4728 data->use_legacy_cmd = false;
4729 if (qseecom.support_bus_scaling)
4730 __qseecom_add_bw_scale_down_timer(
4731 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4732
4733 if (perf_enabled) {
4734 qsee_disable_clock_vote(data, CLK_DFAB);
4735 qsee_disable_clock_vote(data, CLK_SFPB);
4736 }
4737
4738 mutex_unlock(&app_access_lock);
4739
4740 if (ret)
4741 return ret;
4742
4743 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4744 req.resp_len, req.resp_buf);
4745 return ret;
4746}
4747EXPORT_SYMBOL(qseecom_send_command);
4748
4749int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4750{
4751 int ret = 0;
4752
4753 if ((handle == NULL) || (handle->dev == NULL)) {
4754 pr_err("No valid kernel client\n");
4755 return -EINVAL;
4756 }
4757 if (high) {
4758 if (qseecom.support_bus_scaling) {
4759 mutex_lock(&qsee_bw_mutex);
4760 __qseecom_register_bus_bandwidth_needs(handle->dev,
4761 HIGH);
4762 mutex_unlock(&qsee_bw_mutex);
4763 } else {
4764 ret = qseecom_perf_enable(handle->dev);
4765 if (ret)
4766 pr_err("Failed to vote for clock with err %d\n",
4767 ret);
4768 }
4769 } else {
4770 if (!qseecom.support_bus_scaling) {
4771 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4772 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4773 } else {
4774 mutex_lock(&qsee_bw_mutex);
4775 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4776 mutex_unlock(&qsee_bw_mutex);
4777 }
4778 }
4779 return ret;
4780}
4781EXPORT_SYMBOL(qseecom_set_bandwidth);
4782
4783int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4784{
4785 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4786 struct qseecom_dev_handle dummy_private_data = {0};
4787 struct qseecom_command_scm_resp resp;
4788 int ret = 0;
4789
4790 if (!desc) {
4791 pr_err("desc is NULL\n");
4792 return -EINVAL;
4793 }
4794
4795 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004796 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004797 resp.data = desc->ret[2]; /*listener_id*/
4798
Zhen Konge7f525f2017-12-01 18:26:25 -08004799 dummy_private_data.client.app_id = desc->ret[1];
4800 dummy_app_entry.app_id = desc->ret[1];
4801
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004802 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004803 if (qseecom.qsee_reentrancy_support)
4804 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004805 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004806 else
4807 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4808 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004809 mutex_unlock(&app_access_lock);
4810 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004811 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004812 (int)desc->ret[0], (int)desc->ret[2],
4813 (int)desc->ret[1], ret);
4814 desc->ret[0] = resp.result;
4815 desc->ret[1] = resp.resp_type;
4816 desc->ret[2] = resp.data;
4817 return ret;
4818}
4819EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4820
4821static int qseecom_send_resp(void)
4822{
4823 qseecom.send_resp_flag = 1;
4824 wake_up_interruptible(&qseecom.send_resp_wq);
4825 return 0;
4826}
4827
4828static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4829{
4830 struct qseecom_registered_listener_list *this_lstnr = NULL;
4831
4832 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4833 this_lstnr = __qseecom_find_svc(data->listener.id);
4834 if (this_lstnr == NULL)
4835 return -EINVAL;
4836 qseecom.send_resp_flag = 1;
4837 this_lstnr->send_resp_flag = 1;
4838 wake_up_interruptible(&qseecom.send_resp_wq);
4839 return 0;
4840}
4841
4842static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4843 struct qseecom_send_modfd_listener_resp *resp,
4844 struct qseecom_registered_listener_list *this_lstnr)
4845{
4846 int i;
4847
4848 if (!data || !resp || !this_lstnr) {
4849 pr_err("listener handle or resp msg is null\n");
4850 return -EINVAL;
4851 }
4852
4853 if (resp->resp_buf_ptr == NULL) {
4854 pr_err("resp buffer is null\n");
4855 return -EINVAL;
4856 }
4857 /* validate resp buf length */
4858 if ((resp->resp_len == 0) ||
4859 (resp->resp_len > this_lstnr->sb_length)) {
4860 pr_err("resp buf length %d not valid\n", resp->resp_len);
4861 return -EINVAL;
4862 }
4863
4864 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4865 pr_err("Integer overflow in resp_len & resp_buf\n");
4866 return -EINVAL;
4867 }
4868 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4869 (ULONG_MAX - this_lstnr->sb_length)) {
4870 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4871 return -EINVAL;
4872 }
4873 /* validate resp buf */
4874 if (((uintptr_t)resp->resp_buf_ptr <
4875 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4876 ((uintptr_t)resp->resp_buf_ptr >=
4877 ((uintptr_t)this_lstnr->user_virt_sb_base +
4878 this_lstnr->sb_length)) ||
4879 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4880 ((uintptr_t)this_lstnr->user_virt_sb_base +
4881 this_lstnr->sb_length))) {
4882 pr_err("resp buf is out of shared buffer region\n");
4883 return -EINVAL;
4884 }
4885
4886 /* validate offsets */
4887 for (i = 0; i < MAX_ION_FD; i++) {
4888 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4889 pr_err("Invalid offset %d = 0x%x\n",
4890 i, resp->ifd_data[i].cmd_buf_offset);
4891 return -EINVAL;
4892 }
4893 }
4894
4895 return 0;
4896}
4897
4898static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4899 void __user *argp, bool is_64bit_addr)
4900{
4901 struct qseecom_send_modfd_listener_resp resp;
4902 struct qseecom_registered_listener_list *this_lstnr = NULL;
4903
4904 if (copy_from_user(&resp, argp, sizeof(resp))) {
4905 pr_err("copy_from_user failed");
4906 return -EINVAL;
4907 }
4908
4909 this_lstnr = __qseecom_find_svc(data->listener.id);
4910 if (this_lstnr == NULL)
4911 return -EINVAL;
4912
4913 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4914 return -EINVAL;
4915
4916 resp.resp_buf_ptr = this_lstnr->sb_virt +
4917 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4918
4919 if (!is_64bit_addr)
4920 __qseecom_update_cmd_buf(&resp, false, data);
4921 else
4922 __qseecom_update_cmd_buf_64(&resp, false, data);
4923 qseecom.send_resp_flag = 1;
4924 this_lstnr->send_resp_flag = 1;
4925 wake_up_interruptible(&qseecom.send_resp_wq);
4926 return 0;
4927}
4928
4929static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4930 void __user *argp)
4931{
4932 return __qseecom_send_modfd_resp(data, argp, false);
4933}
4934
4935static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4936 void __user *argp)
4937{
4938 return __qseecom_send_modfd_resp(data, argp, true);
4939}
4940
4941static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4942 void __user *argp)
4943{
4944 struct qseecom_qseos_version_req req;
4945
4946 if (copy_from_user(&req, argp, sizeof(req))) {
4947 pr_err("copy_from_user failed");
4948 return -EINVAL;
4949 }
4950 req.qseos_version = qseecom.qseos_version;
4951 if (copy_to_user(argp, &req, sizeof(req))) {
4952 pr_err("copy_to_user failed");
4953 return -EINVAL;
4954 }
4955 return 0;
4956}
4957
4958static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4959{
4960 int rc = 0;
4961 struct qseecom_clk *qclk = NULL;
4962
4963 if (qseecom.no_clock_support)
4964 return 0;
4965
4966 if (ce == CLK_QSEE)
4967 qclk = &qseecom.qsee;
4968 if (ce == CLK_CE_DRV)
4969 qclk = &qseecom.ce_drv;
4970
4971 if (qclk == NULL) {
4972 pr_err("CLK type not supported\n");
4973 return -EINVAL;
4974 }
4975 mutex_lock(&clk_access_lock);
4976
4977 if (qclk->clk_access_cnt == ULONG_MAX) {
4978 pr_err("clk_access_cnt beyond limitation\n");
4979 goto err;
4980 }
4981 if (qclk->clk_access_cnt > 0) {
4982 qclk->clk_access_cnt++;
4983 mutex_unlock(&clk_access_lock);
4984 return rc;
4985 }
4986
4987 /* Enable CE core clk */
4988 if (qclk->ce_core_clk != NULL) {
4989 rc = clk_prepare_enable(qclk->ce_core_clk);
4990 if (rc) {
4991 pr_err("Unable to enable/prepare CE core clk\n");
4992 goto err;
4993 }
4994 }
4995 /* Enable CE clk */
4996 if (qclk->ce_clk != NULL) {
4997 rc = clk_prepare_enable(qclk->ce_clk);
4998 if (rc) {
4999 pr_err("Unable to enable/prepare CE iface clk\n");
5000 goto ce_clk_err;
5001 }
5002 }
5003 /* Enable AXI clk */
5004 if (qclk->ce_bus_clk != NULL) {
5005 rc = clk_prepare_enable(qclk->ce_bus_clk);
5006 if (rc) {
5007 pr_err("Unable to enable/prepare CE bus clk\n");
5008 goto ce_bus_clk_err;
5009 }
5010 }
5011 qclk->clk_access_cnt++;
5012 mutex_unlock(&clk_access_lock);
5013 return 0;
5014
5015ce_bus_clk_err:
5016 if (qclk->ce_clk != NULL)
5017 clk_disable_unprepare(qclk->ce_clk);
5018ce_clk_err:
5019 if (qclk->ce_core_clk != NULL)
5020 clk_disable_unprepare(qclk->ce_core_clk);
5021err:
5022 mutex_unlock(&clk_access_lock);
5023 return -EIO;
5024}
5025
5026static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5027{
5028 struct qseecom_clk *qclk;
5029
5030 if (qseecom.no_clock_support)
5031 return;
5032
5033 if (ce == CLK_QSEE)
5034 qclk = &qseecom.qsee;
5035 else
5036 qclk = &qseecom.ce_drv;
5037
5038 mutex_lock(&clk_access_lock);
5039
5040 if (qclk->clk_access_cnt == 0) {
5041 mutex_unlock(&clk_access_lock);
5042 return;
5043 }
5044
5045 if (qclk->clk_access_cnt == 1) {
5046 if (qclk->ce_clk != NULL)
5047 clk_disable_unprepare(qclk->ce_clk);
5048 if (qclk->ce_core_clk != NULL)
5049 clk_disable_unprepare(qclk->ce_core_clk);
5050 if (qclk->ce_bus_clk != NULL)
5051 clk_disable_unprepare(qclk->ce_bus_clk);
5052 }
5053 qclk->clk_access_cnt--;
5054 mutex_unlock(&clk_access_lock);
5055}
5056
5057static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5058 int32_t clk_type)
5059{
5060 int ret = 0;
5061 struct qseecom_clk *qclk;
5062
5063 if (qseecom.no_clock_support)
5064 return 0;
5065
5066 qclk = &qseecom.qsee;
5067 if (!qseecom.qsee_perf_client)
5068 return ret;
5069
5070 switch (clk_type) {
5071 case CLK_DFAB:
5072 mutex_lock(&qsee_bw_mutex);
5073 if (!qseecom.qsee_bw_count) {
5074 if (qseecom.qsee_sfpb_bw_count > 0)
5075 ret = msm_bus_scale_client_update_request(
5076 qseecom.qsee_perf_client, 3);
5077 else {
5078 if (qclk->ce_core_src_clk != NULL)
5079 ret = __qseecom_enable_clk(CLK_QSEE);
5080 if (!ret) {
5081 ret =
5082 msm_bus_scale_client_update_request(
5083 qseecom.qsee_perf_client, 1);
5084 if ((ret) &&
5085 (qclk->ce_core_src_clk != NULL))
5086 __qseecom_disable_clk(CLK_QSEE);
5087 }
5088 }
5089 if (ret)
5090 pr_err("DFAB Bandwidth req failed (%d)\n",
5091 ret);
5092 else {
5093 qseecom.qsee_bw_count++;
5094 data->perf_enabled = true;
5095 }
5096 } else {
5097 qseecom.qsee_bw_count++;
5098 data->perf_enabled = true;
5099 }
5100 mutex_unlock(&qsee_bw_mutex);
5101 break;
5102 case CLK_SFPB:
5103 mutex_lock(&qsee_bw_mutex);
5104 if (!qseecom.qsee_sfpb_bw_count) {
5105 if (qseecom.qsee_bw_count > 0)
5106 ret = msm_bus_scale_client_update_request(
5107 qseecom.qsee_perf_client, 3);
5108 else {
5109 if (qclk->ce_core_src_clk != NULL)
5110 ret = __qseecom_enable_clk(CLK_QSEE);
5111 if (!ret) {
5112 ret =
5113 msm_bus_scale_client_update_request(
5114 qseecom.qsee_perf_client, 2);
5115 if ((ret) &&
5116 (qclk->ce_core_src_clk != NULL))
5117 __qseecom_disable_clk(CLK_QSEE);
5118 }
5119 }
5120
5121 if (ret)
5122 pr_err("SFPB Bandwidth req failed (%d)\n",
5123 ret);
5124 else {
5125 qseecom.qsee_sfpb_bw_count++;
5126 data->fast_load_enabled = true;
5127 }
5128 } else {
5129 qseecom.qsee_sfpb_bw_count++;
5130 data->fast_load_enabled = true;
5131 }
5132 mutex_unlock(&qsee_bw_mutex);
5133 break;
5134 default:
5135 pr_err("Clock type not defined\n");
5136 break;
5137 }
5138 return ret;
5139}
5140
5141static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5142 int32_t clk_type)
5143{
5144 int32_t ret = 0;
5145 struct qseecom_clk *qclk;
5146
5147 qclk = &qseecom.qsee;
5148
5149 if (qseecom.no_clock_support)
5150 return;
5151 if (!qseecom.qsee_perf_client)
5152 return;
5153
5154 switch (clk_type) {
5155 case CLK_DFAB:
5156 mutex_lock(&qsee_bw_mutex);
5157 if (qseecom.qsee_bw_count == 0) {
5158 pr_err("Client error.Extra call to disable DFAB clk\n");
5159 mutex_unlock(&qsee_bw_mutex);
5160 return;
5161 }
5162
5163 if (qseecom.qsee_bw_count == 1) {
5164 if (qseecom.qsee_sfpb_bw_count > 0)
5165 ret = msm_bus_scale_client_update_request(
5166 qseecom.qsee_perf_client, 2);
5167 else {
5168 ret = msm_bus_scale_client_update_request(
5169 qseecom.qsee_perf_client, 0);
5170 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5171 __qseecom_disable_clk(CLK_QSEE);
5172 }
5173 if (ret)
5174 pr_err("SFPB Bandwidth req fail (%d)\n",
5175 ret);
5176 else {
5177 qseecom.qsee_bw_count--;
5178 data->perf_enabled = false;
5179 }
5180 } else {
5181 qseecom.qsee_bw_count--;
5182 data->perf_enabled = false;
5183 }
5184 mutex_unlock(&qsee_bw_mutex);
5185 break;
5186 case CLK_SFPB:
5187 mutex_lock(&qsee_bw_mutex);
5188 if (qseecom.qsee_sfpb_bw_count == 0) {
5189 pr_err("Client error.Extra call to disable SFPB clk\n");
5190 mutex_unlock(&qsee_bw_mutex);
5191 return;
5192 }
5193 if (qseecom.qsee_sfpb_bw_count == 1) {
5194 if (qseecom.qsee_bw_count > 0)
5195 ret = msm_bus_scale_client_update_request(
5196 qseecom.qsee_perf_client, 1);
5197 else {
5198 ret = msm_bus_scale_client_update_request(
5199 qseecom.qsee_perf_client, 0);
5200 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5201 __qseecom_disable_clk(CLK_QSEE);
5202 }
5203 if (ret)
5204 pr_err("SFPB Bandwidth req fail (%d)\n",
5205 ret);
5206 else {
5207 qseecom.qsee_sfpb_bw_count--;
5208 data->fast_load_enabled = false;
5209 }
5210 } else {
5211 qseecom.qsee_sfpb_bw_count--;
5212 data->fast_load_enabled = false;
5213 }
5214 mutex_unlock(&qsee_bw_mutex);
5215 break;
5216 default:
5217 pr_err("Clock type not defined\n");
5218 break;
5219 }
5220
5221}
5222
5223static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5224 void __user *argp)
5225{
5226 struct ion_handle *ihandle; /* Ion handle */
5227 struct qseecom_load_img_req load_img_req;
5228 int uret = 0;
5229 int ret;
5230 ion_phys_addr_t pa = 0;
5231 size_t len;
5232 struct qseecom_load_app_ireq load_req;
5233 struct qseecom_load_app_64bit_ireq load_req_64bit;
5234 struct qseecom_command_scm_resp resp;
5235 void *cmd_buf = NULL;
5236 size_t cmd_len;
5237 /* Copy the relevant information needed for loading the image */
5238 if (copy_from_user(&load_img_req,
5239 (void __user *)argp,
5240 sizeof(struct qseecom_load_img_req))) {
5241 pr_err("copy_from_user failed\n");
5242 return -EFAULT;
5243 }
5244
5245 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005246 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005247 load_img_req.ifd_data_fd);
5248 if (IS_ERR_OR_NULL(ihandle)) {
5249 pr_err("Ion client could not retrieve the handle\n");
5250 return -ENOMEM;
5251 }
5252
5253 /* Get the physical address of the ION BUF */
5254 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5255 if (ret) {
5256 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5257 ret);
5258 return ret;
5259 }
5260 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5261 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5262 len, load_img_req.mdt_len,
5263 load_img_req.img_len);
5264 return ret;
5265 }
5266 /* Populate the structure for sending scm call to load image */
5267 if (qseecom.qsee_version < QSEE_VERSION_40) {
5268 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5269 load_req.mdt_len = load_img_req.mdt_len;
5270 load_req.img_len = load_img_req.img_len;
5271 load_req.phy_addr = (uint32_t)pa;
5272 cmd_buf = (void *)&load_req;
5273 cmd_len = sizeof(struct qseecom_load_app_ireq);
5274 } else {
5275 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5276 load_req_64bit.mdt_len = load_img_req.mdt_len;
5277 load_req_64bit.img_len = load_img_req.img_len;
5278 load_req_64bit.phy_addr = (uint64_t)pa;
5279 cmd_buf = (void *)&load_req_64bit;
5280 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5281 }
5282
5283 if (qseecom.support_bus_scaling) {
5284 mutex_lock(&qsee_bw_mutex);
5285 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5286 mutex_unlock(&qsee_bw_mutex);
5287 if (ret) {
5288 ret = -EIO;
5289 goto exit_cpu_restore;
5290 }
5291 }
5292
5293 /* Vote for the SFPB clock */
5294 ret = __qseecom_enable_clk_scale_up(data);
5295 if (ret) {
5296 ret = -EIO;
5297 goto exit_register_bus_bandwidth_needs;
5298 }
5299 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5300 ION_IOC_CLEAN_INV_CACHES);
5301 if (ret) {
5302 pr_err("cache operation failed %d\n", ret);
5303 goto exit_disable_clock;
5304 }
5305 /* SCM_CALL to load the external elf */
5306 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5307 &resp, sizeof(resp));
5308 if (ret) {
5309 pr_err("scm_call to load failed : ret %d\n",
5310 ret);
5311 ret = -EFAULT;
5312 goto exit_disable_clock;
5313 }
5314
5315 switch (resp.result) {
5316 case QSEOS_RESULT_SUCCESS:
5317 break;
5318 case QSEOS_RESULT_INCOMPLETE:
5319 pr_err("%s: qseos result incomplete\n", __func__);
5320 ret = __qseecom_process_incomplete_cmd(data, &resp);
5321 if (ret)
5322 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5323 break;
5324 case QSEOS_RESULT_FAILURE:
5325 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5326 ret = -EFAULT;
5327 break;
5328 default:
5329 pr_err("scm_call response result %d not supported\n",
5330 resp.result);
5331 ret = -EFAULT;
5332 break;
5333 }
5334
5335exit_disable_clock:
5336 __qseecom_disable_clk_scale_down(data);
5337
5338exit_register_bus_bandwidth_needs:
5339 if (qseecom.support_bus_scaling) {
5340 mutex_lock(&qsee_bw_mutex);
5341 uret = qseecom_unregister_bus_bandwidth_needs(data);
5342 mutex_unlock(&qsee_bw_mutex);
5343 if (uret)
5344 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5345 uret, ret);
5346 }
5347
5348exit_cpu_restore:
5349 /* Deallocate the handle */
5350 if (!IS_ERR_OR_NULL(ihandle))
5351 ion_free(qseecom.ion_clnt, ihandle);
5352 return ret;
5353}
5354
5355static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5356{
5357 int ret = 0;
5358 struct qseecom_command_scm_resp resp;
5359 struct qseecom_unload_app_ireq req;
5360
5361 /* unavailable client app */
5362 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5363
5364 /* Populate the structure for sending scm call to unload image */
5365 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5366
5367 /* SCM_CALL to unload the external elf */
5368 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5369 sizeof(struct qseecom_unload_app_ireq),
5370 &resp, sizeof(resp));
5371 if (ret) {
5372 pr_err("scm_call to unload failed : ret %d\n",
5373 ret);
5374 ret = -EFAULT;
5375 goto qseecom_unload_external_elf_scm_err;
5376 }
5377 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5378 ret = __qseecom_process_incomplete_cmd(data, &resp);
5379 if (ret)
5380 pr_err("process_incomplete_cmd fail err: %d\n",
5381 ret);
5382 } else {
5383 if (resp.result != QSEOS_RESULT_SUCCESS) {
5384 pr_err("scm_call to unload image failed resp.result =%d\n",
5385 resp.result);
5386 ret = -EFAULT;
5387 }
5388 }
5389
5390qseecom_unload_external_elf_scm_err:
5391
5392 return ret;
5393}
5394
5395static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5396 void __user *argp)
5397{
5398
5399 int32_t ret;
5400 struct qseecom_qseos_app_load_query query_req;
5401 struct qseecom_check_app_ireq req;
5402 struct qseecom_registered_app_list *entry = NULL;
5403 unsigned long flags = 0;
5404 uint32_t app_arch = 0, app_id = 0;
5405 bool found_app = false;
5406
5407 /* Copy the relevant information needed for loading the image */
5408 if (copy_from_user(&query_req,
5409 (void __user *)argp,
5410 sizeof(struct qseecom_qseos_app_load_query))) {
5411 pr_err("copy_from_user failed\n");
5412 return -EFAULT;
5413 }
5414
5415 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5416 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5417 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5418
5419 ret = __qseecom_check_app_exists(req, &app_id);
5420 if (ret) {
5421 pr_err(" scm call to check if app is loaded failed");
5422 return ret; /* scm call failed */
5423 }
5424 if (app_id) {
5425 pr_debug("App id %d (%s) already exists\n", app_id,
5426 (char *)(req.app_name));
5427 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5428 list_for_each_entry(entry,
5429 &qseecom.registered_app_list_head, list){
5430 if (entry->app_id == app_id) {
5431 app_arch = entry->app_arch;
5432 entry->ref_cnt++;
5433 found_app = true;
5434 break;
5435 }
5436 }
5437 spin_unlock_irqrestore(
5438 &qseecom.registered_app_list_lock, flags);
5439 data->client.app_id = app_id;
5440 query_req.app_id = app_id;
5441 if (app_arch) {
5442 data->client.app_arch = app_arch;
5443 query_req.app_arch = app_arch;
5444 } else {
5445 data->client.app_arch = 0;
5446 query_req.app_arch = 0;
5447 }
5448 strlcpy(data->client.app_name, query_req.app_name,
5449 MAX_APP_NAME_SIZE);
5450 /*
5451 * If app was loaded by appsbl before and was not registered,
5452 * regiser this app now.
5453 */
5454 if (!found_app) {
5455 pr_debug("Register app %d [%s] which was loaded before\n",
5456 ret, (char *)query_req.app_name);
5457 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5458 if (!entry) {
5459 pr_err("kmalloc for app entry failed\n");
5460 return -ENOMEM;
5461 }
5462 entry->app_id = app_id;
5463 entry->ref_cnt = 1;
5464 entry->app_arch = data->client.app_arch;
5465 strlcpy(entry->app_name, data->client.app_name,
5466 MAX_APP_NAME_SIZE);
5467 entry->app_blocked = false;
5468 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005469 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005470 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5471 flags);
5472 list_add_tail(&entry->list,
5473 &qseecom.registered_app_list_head);
5474 spin_unlock_irqrestore(
5475 &qseecom.registered_app_list_lock, flags);
5476 }
5477 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5478 pr_err("copy_to_user failed\n");
5479 return -EFAULT;
5480 }
5481 return -EEXIST; /* app already loaded */
5482 } else {
5483 return 0; /* app not loaded */
5484 }
5485}
5486
5487static int __qseecom_get_ce_pipe_info(
5488 enum qseecom_key_management_usage_type usage,
5489 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5490{
5491 int ret = -EINVAL;
5492 int i, j;
5493 struct qseecom_ce_info_use *p = NULL;
5494 int total = 0;
5495 struct qseecom_ce_pipe_entry *pcepipe;
5496
5497 switch (usage) {
5498 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5499 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5500 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5501 if (qseecom.support_fde) {
5502 p = qseecom.ce_info.fde;
5503 total = qseecom.ce_info.num_fde;
5504 } else {
5505 pr_err("system does not support fde\n");
5506 return -EINVAL;
5507 }
5508 break;
5509 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5510 if (qseecom.support_pfe) {
5511 p = qseecom.ce_info.pfe;
5512 total = qseecom.ce_info.num_pfe;
5513 } else {
5514 pr_err("system does not support pfe\n");
5515 return -EINVAL;
5516 }
5517 break;
5518 default:
5519 pr_err("unsupported usage %d\n", usage);
5520 return -EINVAL;
5521 }
5522
5523 for (j = 0; j < total; j++) {
5524 if (p->unit_num == unit) {
5525 pcepipe = p->ce_pipe_entry;
5526 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5527 (*ce_hw)[i] = pcepipe->ce_num;
5528 *pipe = pcepipe->ce_pipe_pair;
5529 pcepipe++;
5530 }
5531 ret = 0;
5532 break;
5533 }
5534 p++;
5535 }
5536 return ret;
5537}
5538
5539static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5540 enum qseecom_key_management_usage_type usage,
5541 struct qseecom_key_generate_ireq *ireq)
5542{
5543 struct qseecom_command_scm_resp resp;
5544 int ret;
5545
5546 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5547 usage >= QSEOS_KM_USAGE_MAX) {
5548 pr_err("Error:: unsupported usage %d\n", usage);
5549 return -EFAULT;
5550 }
5551 ret = __qseecom_enable_clk(CLK_QSEE);
5552 if (ret)
5553 return ret;
5554
5555 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5556 ireq, sizeof(struct qseecom_key_generate_ireq),
5557 &resp, sizeof(resp));
5558 if (ret) {
5559 if (ret == -EINVAL &&
5560 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5561 pr_debug("Key ID exists.\n");
5562 ret = 0;
5563 } else {
5564 pr_err("scm call to generate key failed : %d\n", ret);
5565 ret = -EFAULT;
5566 }
5567 goto generate_key_exit;
5568 }
5569
5570 switch (resp.result) {
5571 case QSEOS_RESULT_SUCCESS:
5572 break;
5573 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5574 pr_debug("Key ID exists.\n");
5575 break;
5576 case QSEOS_RESULT_INCOMPLETE:
5577 ret = __qseecom_process_incomplete_cmd(data, &resp);
5578 if (ret) {
5579 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5580 pr_debug("Key ID exists.\n");
5581 ret = 0;
5582 } else {
5583 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5584 resp.result);
5585 }
5586 }
5587 break;
5588 case QSEOS_RESULT_FAILURE:
5589 default:
5590 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5591 ret = -EINVAL;
5592 break;
5593 }
5594generate_key_exit:
5595 __qseecom_disable_clk(CLK_QSEE);
5596 return ret;
5597}
5598
5599static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5600 enum qseecom_key_management_usage_type usage,
5601 struct qseecom_key_delete_ireq *ireq)
5602{
5603 struct qseecom_command_scm_resp resp;
5604 int ret;
5605
5606 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5607 usage >= QSEOS_KM_USAGE_MAX) {
5608 pr_err("Error:: unsupported usage %d\n", usage);
5609 return -EFAULT;
5610 }
5611 ret = __qseecom_enable_clk(CLK_QSEE);
5612 if (ret)
5613 return ret;
5614
5615 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5616 ireq, sizeof(struct qseecom_key_delete_ireq),
5617 &resp, sizeof(struct qseecom_command_scm_resp));
5618 if (ret) {
5619 if (ret == -EINVAL &&
5620 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5621 pr_debug("Max attempts to input password reached.\n");
5622 ret = -ERANGE;
5623 } else {
5624 pr_err("scm call to delete key failed : %d\n", ret);
5625 ret = -EFAULT;
5626 }
5627 goto del_key_exit;
5628 }
5629
5630 switch (resp.result) {
5631 case QSEOS_RESULT_SUCCESS:
5632 break;
5633 case QSEOS_RESULT_INCOMPLETE:
5634 ret = __qseecom_process_incomplete_cmd(data, &resp);
5635 if (ret) {
5636 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5637 resp.result);
5638 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5639 pr_debug("Max attempts to input password reached.\n");
5640 ret = -ERANGE;
5641 }
5642 }
5643 break;
5644 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5645 pr_debug("Max attempts to input password reached.\n");
5646 ret = -ERANGE;
5647 break;
5648 case QSEOS_RESULT_FAILURE:
5649 default:
5650 pr_err("Delete key scm call failed resp.result %d\n",
5651 resp.result);
5652 ret = -EINVAL;
5653 break;
5654 }
5655del_key_exit:
5656 __qseecom_disable_clk(CLK_QSEE);
5657 return ret;
5658}
5659
5660static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5661 enum qseecom_key_management_usage_type usage,
5662 struct qseecom_key_select_ireq *ireq)
5663{
5664 struct qseecom_command_scm_resp resp;
5665 int ret;
5666
5667 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5668 usage >= QSEOS_KM_USAGE_MAX) {
5669 pr_err("Error:: unsupported usage %d\n", usage);
5670 return -EFAULT;
5671 }
5672 ret = __qseecom_enable_clk(CLK_QSEE);
5673 if (ret)
5674 return ret;
5675
5676 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5677 ret = __qseecom_enable_clk(CLK_CE_DRV);
5678 if (ret)
5679 return ret;
5680 }
5681
5682 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5683 ireq, sizeof(struct qseecom_key_select_ireq),
5684 &resp, sizeof(struct qseecom_command_scm_resp));
5685 if (ret) {
5686 if (ret == -EINVAL &&
5687 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5688 pr_debug("Max attempts to input password reached.\n");
5689 ret = -ERANGE;
5690 } else if (ret == -EINVAL &&
5691 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5692 pr_debug("Set Key operation under processing...\n");
5693 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5694 } else {
5695 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5696 ret);
5697 ret = -EFAULT;
5698 }
5699 goto set_key_exit;
5700 }
5701
5702 switch (resp.result) {
5703 case QSEOS_RESULT_SUCCESS:
5704 break;
5705 case QSEOS_RESULT_INCOMPLETE:
5706 ret = __qseecom_process_incomplete_cmd(data, &resp);
5707 if (ret) {
5708 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5709 resp.result);
5710 if (resp.result ==
5711 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5712 pr_debug("Set Key operation under processing...\n");
5713 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5714 }
5715 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5716 pr_debug("Max attempts to input password reached.\n");
5717 ret = -ERANGE;
5718 }
5719 }
5720 break;
5721 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5722 pr_debug("Max attempts to input password reached.\n");
5723 ret = -ERANGE;
5724 break;
5725 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5726 pr_debug("Set Key operation under processing...\n");
5727 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5728 break;
5729 case QSEOS_RESULT_FAILURE:
5730 default:
5731 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5732 ret = -EINVAL;
5733 break;
5734 }
5735set_key_exit:
5736 __qseecom_disable_clk(CLK_QSEE);
5737 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5738 __qseecom_disable_clk(CLK_CE_DRV);
5739 return ret;
5740}
5741
5742static int __qseecom_update_current_key_user_info(
5743 struct qseecom_dev_handle *data,
5744 enum qseecom_key_management_usage_type usage,
5745 struct qseecom_key_userinfo_update_ireq *ireq)
5746{
5747 struct qseecom_command_scm_resp resp;
5748 int ret;
5749
5750 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5751 usage >= QSEOS_KM_USAGE_MAX) {
5752 pr_err("Error:: unsupported usage %d\n", usage);
5753 return -EFAULT;
5754 }
5755 ret = __qseecom_enable_clk(CLK_QSEE);
5756 if (ret)
5757 return ret;
5758
5759 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5760 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5761 &resp, sizeof(struct qseecom_command_scm_resp));
5762 if (ret) {
5763 if (ret == -EINVAL &&
5764 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5765 pr_debug("Set Key operation under processing...\n");
5766 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5767 } else {
5768 pr_err("scm call to update key userinfo failed: %d\n",
5769 ret);
5770 __qseecom_disable_clk(CLK_QSEE);
5771 return -EFAULT;
5772 }
5773 }
5774
5775 switch (resp.result) {
5776 case QSEOS_RESULT_SUCCESS:
5777 break;
5778 case QSEOS_RESULT_INCOMPLETE:
5779 ret = __qseecom_process_incomplete_cmd(data, &resp);
5780 if (resp.result ==
5781 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5782 pr_debug("Set Key operation under processing...\n");
5783 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5784 }
5785 if (ret)
5786 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5787 resp.result);
5788 break;
5789 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5790 pr_debug("Update Key operation under processing...\n");
5791 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5792 break;
5793 case QSEOS_RESULT_FAILURE:
5794 default:
5795 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5796 ret = -EINVAL;
5797 break;
5798 }
5799
5800 __qseecom_disable_clk(CLK_QSEE);
5801 return ret;
5802}
5803
5804
5805static int qseecom_enable_ice_setup(int usage)
5806{
5807 int ret = 0;
5808
5809 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5810 ret = qcom_ice_setup_ice_hw("ufs", true);
5811 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5812 ret = qcom_ice_setup_ice_hw("sdcc", true);
5813
5814 return ret;
5815}
5816
5817static int qseecom_disable_ice_setup(int usage)
5818{
5819 int ret = 0;
5820
5821 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5822 ret = qcom_ice_setup_ice_hw("ufs", false);
5823 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5824 ret = qcom_ice_setup_ice_hw("sdcc", false);
5825
5826 return ret;
5827}
5828
5829static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5830{
5831 struct qseecom_ce_info_use *pce_info_use, *p;
5832 int total = 0;
5833 int i;
5834
5835 switch (usage) {
5836 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5837 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5838 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5839 p = qseecom.ce_info.fde;
5840 total = qseecom.ce_info.num_fde;
5841 break;
5842 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5843 p = qseecom.ce_info.pfe;
5844 total = qseecom.ce_info.num_pfe;
5845 break;
5846 default:
5847 pr_err("unsupported usage %d\n", usage);
5848 return -EINVAL;
5849 }
5850
5851 pce_info_use = NULL;
5852
5853 for (i = 0; i < total; i++) {
5854 if (p->unit_num == unit) {
5855 pce_info_use = p;
5856 break;
5857 }
5858 p++;
5859 }
5860 if (!pce_info_use) {
5861 pr_err("can not find %d\n", unit);
5862 return -EINVAL;
5863 }
5864 return pce_info_use->num_ce_pipe_entries;
5865}
5866
5867static int qseecom_create_key(struct qseecom_dev_handle *data,
5868 void __user *argp)
5869{
5870 int i;
5871 uint32_t *ce_hw = NULL;
5872 uint32_t pipe = 0;
5873 int ret = 0;
5874 uint32_t flags = 0;
5875 struct qseecom_create_key_req create_key_req;
5876 struct qseecom_key_generate_ireq generate_key_ireq;
5877 struct qseecom_key_select_ireq set_key_ireq;
5878 uint32_t entries = 0;
5879
5880 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5881 if (ret) {
5882 pr_err("copy_from_user failed\n");
5883 return ret;
5884 }
5885
5886 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5887 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5888 pr_err("unsupported usage %d\n", create_key_req.usage);
5889 ret = -EFAULT;
5890 return ret;
5891 }
5892 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5893 create_key_req.usage);
5894 if (entries <= 0) {
5895 pr_err("no ce instance for usage %d instance %d\n",
5896 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5897 ret = -EINVAL;
5898 return ret;
5899 }
5900
5901 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5902 if (!ce_hw) {
5903 ret = -ENOMEM;
5904 return ret;
5905 }
5906 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5907 DEFAULT_CE_INFO_UNIT);
5908 if (ret) {
5909 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5910 ret = -EINVAL;
5911 goto free_buf;
5912 }
5913
5914 if (qseecom.fde_key_size)
5915 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5916 else
5917 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5918
5919 generate_key_ireq.flags = flags;
5920 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5921 memset((void *)generate_key_ireq.key_id,
5922 0, QSEECOM_KEY_ID_SIZE);
5923 memset((void *)generate_key_ireq.hash32,
5924 0, QSEECOM_HASH_SIZE);
5925 memcpy((void *)generate_key_ireq.key_id,
5926 (void *)key_id_array[create_key_req.usage].desc,
5927 QSEECOM_KEY_ID_SIZE);
5928 memcpy((void *)generate_key_ireq.hash32,
5929 (void *)create_key_req.hash32,
5930 QSEECOM_HASH_SIZE);
5931
5932 ret = __qseecom_generate_and_save_key(data,
5933 create_key_req.usage, &generate_key_ireq);
5934 if (ret) {
5935 pr_err("Failed to generate key on storage: %d\n", ret);
5936 goto free_buf;
5937 }
5938
5939 for (i = 0; i < entries; i++) {
5940 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5941 if (create_key_req.usage ==
5942 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5943 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5944 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5945
5946 } else if (create_key_req.usage ==
5947 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5948 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5949 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5950
5951 } else {
5952 set_key_ireq.ce = ce_hw[i];
5953 set_key_ireq.pipe = pipe;
5954 }
5955 set_key_ireq.flags = flags;
5956
5957 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5958 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5959 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5960 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5961 memcpy((void *)set_key_ireq.key_id,
5962 (void *)key_id_array[create_key_req.usage].desc,
5963 QSEECOM_KEY_ID_SIZE);
5964 memcpy((void *)set_key_ireq.hash32,
5965 (void *)create_key_req.hash32,
5966 QSEECOM_HASH_SIZE);
5967 /*
5968 * It will return false if it is GPCE based crypto instance or
5969 * ICE is setup properly
5970 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005971 ret = qseecom_enable_ice_setup(create_key_req.usage);
5972 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005973 goto free_buf;
5974
5975 do {
5976 ret = __qseecom_set_clear_ce_key(data,
5977 create_key_req.usage,
5978 &set_key_ireq);
5979 /*
5980 * wait a little before calling scm again to let other
5981 * processes run
5982 */
5983 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5984 msleep(50);
5985
5986 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5987
5988 qseecom_disable_ice_setup(create_key_req.usage);
5989
5990 if (ret) {
5991 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5992 pipe, ce_hw[i], ret);
5993 goto free_buf;
5994 } else {
5995 pr_err("Set the key successfully\n");
5996 if ((create_key_req.usage ==
5997 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5998 (create_key_req.usage ==
5999 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6000 goto free_buf;
6001 }
6002 }
6003
6004free_buf:
6005 kzfree(ce_hw);
6006 return ret;
6007}
6008
6009static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6010 void __user *argp)
6011{
6012 uint32_t *ce_hw = NULL;
6013 uint32_t pipe = 0;
6014 int ret = 0;
6015 uint32_t flags = 0;
6016 int i, j;
6017 struct qseecom_wipe_key_req wipe_key_req;
6018 struct qseecom_key_delete_ireq delete_key_ireq;
6019 struct qseecom_key_select_ireq clear_key_ireq;
6020 uint32_t entries = 0;
6021
6022 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6023 if (ret) {
6024 pr_err("copy_from_user failed\n");
6025 return ret;
6026 }
6027
6028 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6029 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6030 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6031 ret = -EFAULT;
6032 return ret;
6033 }
6034
6035 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6036 wipe_key_req.usage);
6037 if (entries <= 0) {
6038 pr_err("no ce instance for usage %d instance %d\n",
6039 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6040 ret = -EINVAL;
6041 return ret;
6042 }
6043
6044 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6045 if (!ce_hw) {
6046 ret = -ENOMEM;
6047 return ret;
6048 }
6049
6050 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6051 DEFAULT_CE_INFO_UNIT);
6052 if (ret) {
6053 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6054 ret = -EINVAL;
6055 goto free_buf;
6056 }
6057
6058 if (wipe_key_req.wipe_key_flag) {
6059 delete_key_ireq.flags = flags;
6060 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6061 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6062 memcpy((void *)delete_key_ireq.key_id,
6063 (void *)key_id_array[wipe_key_req.usage].desc,
6064 QSEECOM_KEY_ID_SIZE);
6065 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6066
6067 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6068 &delete_key_ireq);
6069 if (ret) {
6070 pr_err("Failed to delete key from ssd storage: %d\n",
6071 ret);
6072 ret = -EFAULT;
6073 goto free_buf;
6074 }
6075 }
6076
6077 for (j = 0; j < entries; j++) {
6078 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6079 if (wipe_key_req.usage ==
6080 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6081 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6082 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6083 } else if (wipe_key_req.usage ==
6084 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6085 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6086 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6087 } else {
6088 clear_key_ireq.ce = ce_hw[j];
6089 clear_key_ireq.pipe = pipe;
6090 }
6091 clear_key_ireq.flags = flags;
6092 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6093 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6094 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6095 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6096
6097 /*
6098 * It will return false if it is GPCE based crypto instance or
6099 * ICE is setup properly
6100 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006101 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6102 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006103 goto free_buf;
6104
6105 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6106 &clear_key_ireq);
6107
6108 qseecom_disable_ice_setup(wipe_key_req.usage);
6109
6110 if (ret) {
6111 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6112 pipe, ce_hw[j], ret);
6113 ret = -EFAULT;
6114 goto free_buf;
6115 }
6116 }
6117
6118free_buf:
6119 kzfree(ce_hw);
6120 return ret;
6121}
6122
6123static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6124 void __user *argp)
6125{
6126 int ret = 0;
6127 uint32_t flags = 0;
6128 struct qseecom_update_key_userinfo_req update_key_req;
6129 struct qseecom_key_userinfo_update_ireq ireq;
6130
6131 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6132 if (ret) {
6133 pr_err("copy_from_user failed\n");
6134 return ret;
6135 }
6136
6137 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6138 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6139 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6140 return -EFAULT;
6141 }
6142
6143 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6144
6145 if (qseecom.fde_key_size)
6146 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6147 else
6148 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6149
6150 ireq.flags = flags;
6151 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6152 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6153 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6154 memcpy((void *)ireq.key_id,
6155 (void *)key_id_array[update_key_req.usage].desc,
6156 QSEECOM_KEY_ID_SIZE);
6157 memcpy((void *)ireq.current_hash32,
6158 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6159 memcpy((void *)ireq.new_hash32,
6160 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6161
6162 do {
6163 ret = __qseecom_update_current_key_user_info(data,
6164 update_key_req.usage,
6165 &ireq);
6166 /*
6167 * wait a little before calling scm again to let other
6168 * processes run
6169 */
6170 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6171 msleep(50);
6172
6173 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6174 if (ret) {
6175 pr_err("Failed to update key info: %d\n", ret);
6176 return ret;
6177 }
6178 return ret;
6179
6180}
6181static int qseecom_is_es_activated(void __user *argp)
6182{
Zhen Kong26e62742018-05-04 17:19:06 -07006183 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006184 struct qseecom_command_scm_resp resp;
6185 int ret;
6186
6187 if (qseecom.qsee_version < QSEE_VERSION_04) {
6188 pr_err("invalid qsee version\n");
6189 return -ENODEV;
6190 }
6191
6192 if (argp == NULL) {
6193 pr_err("arg is null\n");
6194 return -EINVAL;
6195 }
6196
6197 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6198 &req, sizeof(req), &resp, sizeof(resp));
6199 if (ret) {
6200 pr_err("scm_call failed\n");
6201 return ret;
6202 }
6203
6204 req.is_activated = resp.result;
6205 ret = copy_to_user(argp, &req, sizeof(req));
6206 if (ret) {
6207 pr_err("copy_to_user failed\n");
6208 return ret;
6209 }
6210
6211 return 0;
6212}
6213
6214static int qseecom_save_partition_hash(void __user *argp)
6215{
6216 struct qseecom_save_partition_hash_req req;
6217 struct qseecom_command_scm_resp resp;
6218 int ret;
6219
6220 memset(&resp, 0x00, sizeof(resp));
6221
6222 if (qseecom.qsee_version < QSEE_VERSION_04) {
6223 pr_err("invalid qsee version\n");
6224 return -ENODEV;
6225 }
6226
6227 if (argp == NULL) {
6228 pr_err("arg is null\n");
6229 return -EINVAL;
6230 }
6231
6232 ret = copy_from_user(&req, argp, sizeof(req));
6233 if (ret) {
6234 pr_err("copy_from_user failed\n");
6235 return ret;
6236 }
6237
6238 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6239 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6240 if (ret) {
6241 pr_err("qseecom_scm_call failed\n");
6242 return ret;
6243 }
6244
6245 return 0;
6246}
6247
6248static int qseecom_mdtp_cipher_dip(void __user *argp)
6249{
6250 struct qseecom_mdtp_cipher_dip_req req;
6251 u32 tzbuflenin, tzbuflenout;
6252 char *tzbufin = NULL, *tzbufout = NULL;
6253 struct scm_desc desc = {0};
6254 int ret;
6255
6256 do {
6257 /* Copy the parameters from userspace */
6258 if (argp == NULL) {
6259 pr_err("arg is null\n");
6260 ret = -EINVAL;
6261 break;
6262 }
6263
6264 ret = copy_from_user(&req, argp, sizeof(req));
6265 if (ret) {
6266 pr_err("copy_from_user failed, ret= %d\n", ret);
6267 break;
6268 }
6269
6270 if (req.in_buf == NULL || req.out_buf == NULL ||
6271 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6272 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6273 req.direction > 1) {
6274 pr_err("invalid parameters\n");
6275 ret = -EINVAL;
6276 break;
6277 }
6278
6279 /* Copy the input buffer from userspace to kernel space */
6280 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6281 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6282 if (!tzbufin) {
6283 pr_err("error allocating in buffer\n");
6284 ret = -ENOMEM;
6285 break;
6286 }
6287
6288 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6289 if (ret) {
6290 pr_err("copy_from_user failed, ret=%d\n", ret);
6291 break;
6292 }
6293
6294 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6295
6296 /* Prepare the output buffer in kernel space */
6297 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6298 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6299 if (!tzbufout) {
6300 pr_err("error allocating out buffer\n");
6301 ret = -ENOMEM;
6302 break;
6303 }
6304
6305 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6306
6307 /* Send the command to TZ */
6308 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6309 desc.args[0] = virt_to_phys(tzbufin);
6310 desc.args[1] = req.in_buf_size;
6311 desc.args[2] = virt_to_phys(tzbufout);
6312 desc.args[3] = req.out_buf_size;
6313 desc.args[4] = req.direction;
6314
6315 ret = __qseecom_enable_clk(CLK_QSEE);
6316 if (ret)
6317 break;
6318
6319 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6320
6321 __qseecom_disable_clk(CLK_QSEE);
6322
6323 if (ret) {
6324 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6325 ret);
6326 break;
6327 }
6328
6329 /* Copy the output buffer from kernel space to userspace */
6330 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6331 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6332 if (ret) {
6333 pr_err("copy_to_user failed, ret=%d\n", ret);
6334 break;
6335 }
6336 } while (0);
6337
6338 kzfree(tzbufin);
6339 kzfree(tzbufout);
6340
6341 return ret;
6342}
6343
6344static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6345 struct qseecom_qteec_req *req)
6346{
6347 if (!data || !data->client.ihandle) {
6348 pr_err("Client or client handle is not initialized\n");
6349 return -EINVAL;
6350 }
6351
6352 if (data->type != QSEECOM_CLIENT_APP)
6353 return -EFAULT;
6354
6355 if (req->req_len > UINT_MAX - req->resp_len) {
6356 pr_err("Integer overflow detected in req_len & rsp_len\n");
6357 return -EINVAL;
6358 }
6359
6360 if (req->req_len + req->resp_len > data->client.sb_length) {
6361 pr_debug("Not enough memory to fit cmd_buf.\n");
6362 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6363 (req->req_len + req->resp_len), data->client.sb_length);
6364 return -ENOMEM;
6365 }
6366
6367 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6368 pr_err("cmd buffer or response buffer is null\n");
6369 return -EINVAL;
6370 }
6371 if (((uintptr_t)req->req_ptr <
6372 data->client.user_virt_sb_base) ||
6373 ((uintptr_t)req->req_ptr >=
6374 (data->client.user_virt_sb_base + data->client.sb_length))) {
6375 pr_err("cmd buffer address not within shared bufffer\n");
6376 return -EINVAL;
6377 }
6378
6379 if (((uintptr_t)req->resp_ptr <
6380 data->client.user_virt_sb_base) ||
6381 ((uintptr_t)req->resp_ptr >=
6382 (data->client.user_virt_sb_base + data->client.sb_length))) {
6383 pr_err("response buffer address not within shared bufffer\n");
6384 return -EINVAL;
6385 }
6386
6387 if ((req->req_len == 0) || (req->resp_len == 0)) {
6388 pr_err("cmd buf lengtgh/response buf length not valid\n");
6389 return -EINVAL;
6390 }
6391
6392 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6393 pr_err("Integer overflow in req_len & req_ptr\n");
6394 return -EINVAL;
6395 }
6396
6397 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6398 pr_err("Integer overflow in resp_len & resp_ptr\n");
6399 return -EINVAL;
6400 }
6401
6402 if (data->client.user_virt_sb_base >
6403 (ULONG_MAX - data->client.sb_length)) {
6404 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6405 return -EINVAL;
6406 }
6407 if ((((uintptr_t)req->req_ptr + req->req_len) >
6408 ((uintptr_t)data->client.user_virt_sb_base +
6409 data->client.sb_length)) ||
6410 (((uintptr_t)req->resp_ptr + req->resp_len) >
6411 ((uintptr_t)data->client.user_virt_sb_base +
6412 data->client.sb_length))) {
6413 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6414 return -EINVAL;
6415 }
6416 return 0;
6417}
6418
6419static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6420 uint32_t fd_idx, struct sg_table *sg_ptr)
6421{
6422 struct scatterlist *sg = sg_ptr->sgl;
6423 struct qseecom_sg_entry *sg_entry;
6424 void *buf;
6425 uint i;
6426 size_t size;
6427 dma_addr_t coh_pmem;
6428
6429 if (fd_idx >= MAX_ION_FD) {
6430 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6431 return -ENOMEM;
6432 }
6433 /*
6434 * Allocate a buffer, populate it with number of entry plus
6435 * each sg entry's phy addr and length; then return the
6436 * phy_addr of the buffer.
6437 */
6438 size = sizeof(uint32_t) +
6439 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6440 size = (size + PAGE_SIZE) & PAGE_MASK;
6441 buf = dma_alloc_coherent(qseecom.pdev,
6442 size, &coh_pmem, GFP_KERNEL);
6443 if (buf == NULL) {
6444 pr_err("failed to alloc memory for sg buf\n");
6445 return -ENOMEM;
6446 }
6447 *(uint32_t *)buf = sg_ptr->nents;
6448 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6449 for (i = 0; i < sg_ptr->nents; i++) {
6450 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6451 sg_entry->len = sg->length;
6452 sg_entry++;
6453 sg = sg_next(sg);
6454 }
6455 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6456 data->client.sec_buf_fd[fd_idx].vbase = buf;
6457 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6458 data->client.sec_buf_fd[fd_idx].size = size;
6459 return 0;
6460}
6461
6462static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6463 struct qseecom_dev_handle *data, bool cleanup)
6464{
6465 struct ion_handle *ihandle;
6466 int ret = 0;
6467 int i = 0;
6468 uint32_t *update;
6469 struct sg_table *sg_ptr = NULL;
6470 struct scatterlist *sg;
6471 struct qseecom_param_memref *memref;
6472
6473 if (req == NULL) {
6474 pr_err("Invalid address\n");
6475 return -EINVAL;
6476 }
6477 for (i = 0; i < MAX_ION_FD; i++) {
6478 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006479 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006480 req->ifd_data[i].fd);
6481 if (IS_ERR_OR_NULL(ihandle)) {
6482 pr_err("Ion client can't retrieve the handle\n");
6483 return -ENOMEM;
6484 }
6485 if ((req->req_len < sizeof(uint32_t)) ||
6486 (req->ifd_data[i].cmd_buf_offset >
6487 req->req_len - sizeof(uint32_t))) {
6488 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6489 req->req_len,
6490 req->ifd_data[i].cmd_buf_offset);
6491 return -EINVAL;
6492 }
6493 update = (uint32_t *)((char *) req->req_ptr +
6494 req->ifd_data[i].cmd_buf_offset);
6495 if (!update) {
6496 pr_err("update pointer is NULL\n");
6497 return -EINVAL;
6498 }
6499 } else {
6500 continue;
6501 }
6502 /* Populate the cmd data structure with the phys_addr */
6503 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6504 if (IS_ERR_OR_NULL(sg_ptr)) {
6505 pr_err("IOn client could not retrieve sg table\n");
6506 goto err;
6507 }
6508 sg = sg_ptr->sgl;
6509 if (sg == NULL) {
6510 pr_err("sg is NULL\n");
6511 goto err;
6512 }
6513 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6514 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6515 sg_ptr->nents, sg->length);
6516 goto err;
6517 }
6518 /* clean up buf for pre-allocated fd */
6519 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6520 (*update)) {
6521 if (data->client.sec_buf_fd[i].vbase)
6522 dma_free_coherent(qseecom.pdev,
6523 data->client.sec_buf_fd[i].size,
6524 data->client.sec_buf_fd[i].vbase,
6525 data->client.sec_buf_fd[i].pbase);
6526 memset((void *)update, 0,
6527 sizeof(struct qseecom_param_memref));
6528 memset(&(data->client.sec_buf_fd[i]), 0,
6529 sizeof(struct qseecom_sec_buf_fd_info));
6530 goto clean;
6531 }
6532
6533 if (*update == 0) {
6534 /* update buf for pre-allocated fd from secure heap*/
6535 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6536 sg_ptr);
6537 if (ret) {
6538 pr_err("Failed to handle buf for fd[%d]\n", i);
6539 goto err;
6540 }
6541 memref = (struct qseecom_param_memref *)update;
6542 memref->buffer =
6543 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6544 memref->size =
6545 (uint32_t)(data->client.sec_buf_fd[i].size);
6546 } else {
6547 /* update buf for fd from non-secure qseecom heap */
6548 if (sg_ptr->nents != 1) {
6549 pr_err("Num of scat entr (%d) invalid\n",
6550 sg_ptr->nents);
6551 goto err;
6552 }
6553 if (cleanup)
6554 *update = 0;
6555 else
6556 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6557 }
6558clean:
6559 if (cleanup) {
6560 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6561 ihandle, NULL, sg->length,
6562 ION_IOC_INV_CACHES);
6563 if (ret) {
6564 pr_err("cache operation failed %d\n", ret);
6565 goto err;
6566 }
6567 } else {
6568 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6569 ihandle, NULL, sg->length,
6570 ION_IOC_CLEAN_INV_CACHES);
6571 if (ret) {
6572 pr_err("cache operation failed %d\n", ret);
6573 goto err;
6574 }
6575 data->sglistinfo_ptr[i].indexAndFlags =
6576 SGLISTINFO_SET_INDEX_FLAG(
6577 (sg_ptr->nents == 1), 0,
6578 req->ifd_data[i].cmd_buf_offset);
6579 data->sglistinfo_ptr[i].sizeOrCount =
6580 (sg_ptr->nents == 1) ?
6581 sg->length : sg_ptr->nents;
6582 data->sglist_cnt = i + 1;
6583 }
6584 /* Deallocate the handle */
6585 if (!IS_ERR_OR_NULL(ihandle))
6586 ion_free(qseecom.ion_clnt, ihandle);
6587 }
6588 return ret;
6589err:
6590 if (!IS_ERR_OR_NULL(ihandle))
6591 ion_free(qseecom.ion_clnt, ihandle);
6592 return -ENOMEM;
6593}
6594
6595static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6596 struct qseecom_qteec_req *req, uint32_t cmd_id)
6597{
6598 struct qseecom_command_scm_resp resp;
6599 struct qseecom_qteec_ireq ireq;
6600 struct qseecom_qteec_64bit_ireq ireq_64bit;
6601 struct qseecom_registered_app_list *ptr_app;
6602 bool found_app = false;
6603 unsigned long flags;
6604 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006605 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006606 uint32_t reqd_len_sb_in = 0;
6607 void *cmd_buf = NULL;
6608 size_t cmd_len;
6609 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306610 void *req_ptr = NULL;
6611 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006612
6613 ret = __qseecom_qteec_validate_msg(data, req);
6614 if (ret)
6615 return ret;
6616
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306617 req_ptr = req->req_ptr;
6618 resp_ptr = req->resp_ptr;
6619
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006620 /* find app_id & img_name from list */
6621 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6622 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6623 list) {
6624 if ((ptr_app->app_id == data->client.app_id) &&
6625 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6626 found_app = true;
6627 break;
6628 }
6629 }
6630 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6631 if (!found_app) {
6632 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6633 (char *)data->client.app_name);
6634 return -ENOENT;
6635 }
6636
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306637 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6638 (uintptr_t)req->req_ptr);
6639 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6640 (uintptr_t)req->resp_ptr);
6641
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006642 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6643 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6644 ret = __qseecom_update_qteec_req_buf(
6645 (struct qseecom_qteec_modfd_req *)req, data, false);
6646 if (ret)
6647 return ret;
6648 }
6649
6650 if (qseecom.qsee_version < QSEE_VERSION_40) {
6651 ireq.app_id = data->client.app_id;
6652 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306653 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006654 ireq.req_len = req->req_len;
6655 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306656 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006657 ireq.resp_len = req->resp_len;
6658 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6659 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6660 dmac_flush_range((void *)table,
6661 (void *)table + SGLISTINFO_TABLE_SIZE);
6662 cmd_buf = (void *)&ireq;
6663 cmd_len = sizeof(struct qseecom_qteec_ireq);
6664 } else {
6665 ireq_64bit.app_id = data->client.app_id;
6666 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306667 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006668 ireq_64bit.req_len = req->req_len;
6669 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306670 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006671 ireq_64bit.resp_len = req->resp_len;
6672 if ((data->client.app_arch == ELFCLASS32) &&
6673 ((ireq_64bit.req_ptr >=
6674 PHY_ADDR_4G - ireq_64bit.req_len) ||
6675 (ireq_64bit.resp_ptr >=
6676 PHY_ADDR_4G - ireq_64bit.resp_len))){
6677 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6678 data->client.app_name, data->client.app_id);
6679 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6680 ireq_64bit.req_ptr, ireq_64bit.req_len,
6681 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6682 return -EFAULT;
6683 }
6684 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6685 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6686 dmac_flush_range((void *)table,
6687 (void *)table + SGLISTINFO_TABLE_SIZE);
6688 cmd_buf = (void *)&ireq_64bit;
6689 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6690 }
6691 if (qseecom.whitelist_support == true
6692 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6693 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6694 else
6695 *(uint32_t *)cmd_buf = cmd_id;
6696
6697 reqd_len_sb_in = req->req_len + req->resp_len;
6698 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6699 data->client.sb_virt,
6700 reqd_len_sb_in,
6701 ION_IOC_CLEAN_INV_CACHES);
6702 if (ret) {
6703 pr_err("cache operation failed %d\n", ret);
6704 return ret;
6705 }
6706
6707 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6708
6709 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6710 cmd_buf, cmd_len,
6711 &resp, sizeof(resp));
6712 if (ret) {
6713 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6714 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006715 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006716 }
6717
6718 if (qseecom.qsee_reentrancy_support) {
6719 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006720 if (ret)
6721 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006722 } else {
6723 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6724 ret = __qseecom_process_incomplete_cmd(data, &resp);
6725 if (ret) {
6726 pr_err("process_incomplete_cmd failed err: %d\n",
6727 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006728 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006729 }
6730 } else {
6731 if (resp.result != QSEOS_RESULT_SUCCESS) {
6732 pr_err("Response result %d not supported\n",
6733 resp.result);
6734 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006735 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006736 }
6737 }
6738 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006739exit:
6740 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006741 data->client.sb_virt, data->client.sb_length,
6742 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006743 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006744 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006745 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006746 }
6747
6748 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6749 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006750 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006751 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006752 if (ret2)
6753 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006754 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006755 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006756}
6757
6758static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6759 void __user *argp)
6760{
6761 struct qseecom_qteec_modfd_req req;
6762 int ret = 0;
6763
6764 ret = copy_from_user(&req, argp,
6765 sizeof(struct qseecom_qteec_modfd_req));
6766 if (ret) {
6767 pr_err("copy_from_user failed\n");
6768 return ret;
6769 }
6770 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6771 QSEOS_TEE_OPEN_SESSION);
6772
6773 return ret;
6774}
6775
6776static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6777 void __user *argp)
6778{
6779 struct qseecom_qteec_req req;
6780 int ret = 0;
6781
6782 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6783 if (ret) {
6784 pr_err("copy_from_user failed\n");
6785 return ret;
6786 }
6787 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6788 return ret;
6789}
6790
6791static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6792 void __user *argp)
6793{
6794 struct qseecom_qteec_modfd_req req;
6795 struct qseecom_command_scm_resp resp;
6796 struct qseecom_qteec_ireq ireq;
6797 struct qseecom_qteec_64bit_ireq ireq_64bit;
6798 struct qseecom_registered_app_list *ptr_app;
6799 bool found_app = false;
6800 unsigned long flags;
6801 int ret = 0;
6802 int i = 0;
6803 uint32_t reqd_len_sb_in = 0;
6804 void *cmd_buf = NULL;
6805 size_t cmd_len;
6806 struct sglist_info *table = data->sglistinfo_ptr;
6807 void *req_ptr = NULL;
6808 void *resp_ptr = NULL;
6809
6810 ret = copy_from_user(&req, argp,
6811 sizeof(struct qseecom_qteec_modfd_req));
6812 if (ret) {
6813 pr_err("copy_from_user failed\n");
6814 return ret;
6815 }
6816 ret = __qseecom_qteec_validate_msg(data,
6817 (struct qseecom_qteec_req *)(&req));
6818 if (ret)
6819 return ret;
6820 req_ptr = req.req_ptr;
6821 resp_ptr = req.resp_ptr;
6822
6823 /* find app_id & img_name from list */
6824 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6825 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6826 list) {
6827 if ((ptr_app->app_id == data->client.app_id) &&
6828 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6829 found_app = true;
6830 break;
6831 }
6832 }
6833 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6834 if (!found_app) {
6835 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6836 (char *)data->client.app_name);
6837 return -ENOENT;
6838 }
6839
6840 /* validate offsets */
6841 for (i = 0; i < MAX_ION_FD; i++) {
6842 if (req.ifd_data[i].fd) {
6843 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6844 return -EINVAL;
6845 }
6846 }
6847 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6848 (uintptr_t)req.req_ptr);
6849 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6850 (uintptr_t)req.resp_ptr);
6851 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6852 if (ret)
6853 return ret;
6854
6855 if (qseecom.qsee_version < QSEE_VERSION_40) {
6856 ireq.app_id = data->client.app_id;
6857 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6858 (uintptr_t)req_ptr);
6859 ireq.req_len = req.req_len;
6860 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6861 (uintptr_t)resp_ptr);
6862 ireq.resp_len = req.resp_len;
6863 cmd_buf = (void *)&ireq;
6864 cmd_len = sizeof(struct qseecom_qteec_ireq);
6865 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6866 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6867 dmac_flush_range((void *)table,
6868 (void *)table + SGLISTINFO_TABLE_SIZE);
6869 } else {
6870 ireq_64bit.app_id = data->client.app_id;
6871 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6872 (uintptr_t)req_ptr);
6873 ireq_64bit.req_len = req.req_len;
6874 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6875 (uintptr_t)resp_ptr);
6876 ireq_64bit.resp_len = req.resp_len;
6877 cmd_buf = (void *)&ireq_64bit;
6878 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6879 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6880 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6881 dmac_flush_range((void *)table,
6882 (void *)table + SGLISTINFO_TABLE_SIZE);
6883 }
6884 reqd_len_sb_in = req.req_len + req.resp_len;
6885 if (qseecom.whitelist_support == true)
6886 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6887 else
6888 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6889
6890 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6891 data->client.sb_virt,
6892 reqd_len_sb_in,
6893 ION_IOC_CLEAN_INV_CACHES);
6894 if (ret) {
6895 pr_err("cache operation failed %d\n", ret);
6896 return ret;
6897 }
6898
6899 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6900
6901 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6902 cmd_buf, cmd_len,
6903 &resp, sizeof(resp));
6904 if (ret) {
6905 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6906 ret, data->client.app_id);
6907 return ret;
6908 }
6909
6910 if (qseecom.qsee_reentrancy_support) {
6911 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6912 } else {
6913 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6914 ret = __qseecom_process_incomplete_cmd(data, &resp);
6915 if (ret) {
6916 pr_err("process_incomplete_cmd failed err: %d\n",
6917 ret);
6918 return ret;
6919 }
6920 } else {
6921 if (resp.result != QSEOS_RESULT_SUCCESS) {
6922 pr_err("Response result %d not supported\n",
6923 resp.result);
6924 ret = -EINVAL;
6925 }
6926 }
6927 }
6928 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6929 if (ret)
6930 return ret;
6931
6932 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6933 data->client.sb_virt, data->client.sb_length,
6934 ION_IOC_INV_CACHES);
6935 if (ret) {
6936 pr_err("cache operation failed %d\n", ret);
6937 return ret;
6938 }
6939 return 0;
6940}
6941
6942static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6943 void __user *argp)
6944{
6945 struct qseecom_qteec_modfd_req req;
6946 int ret = 0;
6947
6948 ret = copy_from_user(&req, argp,
6949 sizeof(struct qseecom_qteec_modfd_req));
6950 if (ret) {
6951 pr_err("copy_from_user failed\n");
6952 return ret;
6953 }
6954 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6955 QSEOS_TEE_REQUEST_CANCELLATION);
6956
6957 return ret;
6958}
6959
6960static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6961{
6962 if (data->sglist_cnt) {
6963 memset(data->sglistinfo_ptr, 0,
6964 SGLISTINFO_TABLE_SIZE);
6965 data->sglist_cnt = 0;
6966 }
6967}
6968
6969static inline long qseecom_ioctl(struct file *file,
6970 unsigned int cmd, unsigned long arg)
6971{
6972 int ret = 0;
6973 struct qseecom_dev_handle *data = file->private_data;
6974 void __user *argp = (void __user *) arg;
6975 bool perf_enabled = false;
6976
6977 if (!data) {
6978 pr_err("Invalid/uninitialized device handle\n");
6979 return -EINVAL;
6980 }
6981
6982 if (data->abort) {
6983 pr_err("Aborting qseecom driver\n");
6984 return -ENODEV;
6985 }
6986
6987 switch (cmd) {
6988 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6989 if (data->type != QSEECOM_GENERIC) {
6990 pr_err("reg lstnr req: invalid handle (%d)\n",
6991 data->type);
6992 ret = -EINVAL;
6993 break;
6994 }
6995 pr_debug("ioctl register_listener_req()\n");
6996 mutex_lock(&app_access_lock);
6997 atomic_inc(&data->ioctl_count);
6998 data->type = QSEECOM_LISTENER_SERVICE;
6999 ret = qseecom_register_listener(data, argp);
7000 atomic_dec(&data->ioctl_count);
7001 wake_up_all(&data->abort_wq);
7002 mutex_unlock(&app_access_lock);
7003 if (ret)
7004 pr_err("failed qseecom_register_listener: %d\n", ret);
7005 break;
7006 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307007 case QSEECOM_IOCTL_SET_ICE_INFO: {
7008 struct qseecom_ice_data_t ice_data;
7009
7010 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7011 if (ret) {
7012 pr_err("copy_from_user failed\n");
7013 return -EFAULT;
7014 }
7015 qcom_ice_set_fde_flag(ice_data.flag);
7016 break;
7017 }
7018
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007019 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7020 if ((data->listener.id == 0) ||
7021 (data->type != QSEECOM_LISTENER_SERVICE)) {
7022 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7023 data->type, data->listener.id);
7024 ret = -EINVAL;
7025 break;
7026 }
7027 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kong26e62742018-05-04 17:19:06 -07007028 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007029 mutex_lock(&app_access_lock);
7030 atomic_inc(&data->ioctl_count);
7031 ret = qseecom_unregister_listener(data);
7032 atomic_dec(&data->ioctl_count);
7033 wake_up_all(&data->abort_wq);
7034 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007035 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007036 if (ret)
7037 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7038 break;
7039 }
7040 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7041 if ((data->client.app_id == 0) ||
7042 (data->type != QSEECOM_CLIENT_APP)) {
7043 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7044 data->type, data->client.app_id);
7045 ret = -EINVAL;
7046 break;
7047 }
7048 /* Only one client allowed here at a time */
7049 mutex_lock(&app_access_lock);
7050 if (qseecom.support_bus_scaling) {
7051 /* register bus bw in case the client doesn't do it */
7052 if (!data->mode) {
7053 mutex_lock(&qsee_bw_mutex);
7054 __qseecom_register_bus_bandwidth_needs(
7055 data, HIGH);
7056 mutex_unlock(&qsee_bw_mutex);
7057 }
7058 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7059 if (ret) {
7060 pr_err("Failed to set bw.\n");
7061 ret = -EINVAL;
7062 mutex_unlock(&app_access_lock);
7063 break;
7064 }
7065 }
7066 /*
7067 * On targets where crypto clock is handled by HLOS,
7068 * if clk_access_cnt is zero and perf_enabled is false,
7069 * then the crypto clock was not enabled before sending cmd to
7070 * tz, qseecom will enable the clock to avoid service failure.
7071 */
7072 if (!qseecom.no_clock_support &&
7073 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7074 pr_debug("ce clock is not enabled!\n");
7075 ret = qseecom_perf_enable(data);
7076 if (ret) {
7077 pr_err("Failed to vote for clock with err %d\n",
7078 ret);
7079 mutex_unlock(&app_access_lock);
7080 ret = -EINVAL;
7081 break;
7082 }
7083 perf_enabled = true;
7084 }
7085 atomic_inc(&data->ioctl_count);
7086 ret = qseecom_send_cmd(data, argp);
7087 if (qseecom.support_bus_scaling)
7088 __qseecom_add_bw_scale_down_timer(
7089 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7090 if (perf_enabled) {
7091 qsee_disable_clock_vote(data, CLK_DFAB);
7092 qsee_disable_clock_vote(data, CLK_SFPB);
7093 }
7094 atomic_dec(&data->ioctl_count);
7095 wake_up_all(&data->abort_wq);
7096 mutex_unlock(&app_access_lock);
7097 if (ret)
7098 pr_err("failed qseecom_send_cmd: %d\n", ret);
7099 break;
7100 }
7101 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7102 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7103 if ((data->client.app_id == 0) ||
7104 (data->type != QSEECOM_CLIENT_APP)) {
7105 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7106 data->type, data->client.app_id);
7107 ret = -EINVAL;
7108 break;
7109 }
7110 /* Only one client allowed here at a time */
7111 mutex_lock(&app_access_lock);
7112 if (qseecom.support_bus_scaling) {
7113 if (!data->mode) {
7114 mutex_lock(&qsee_bw_mutex);
7115 __qseecom_register_bus_bandwidth_needs(
7116 data, HIGH);
7117 mutex_unlock(&qsee_bw_mutex);
7118 }
7119 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7120 if (ret) {
7121 pr_err("Failed to set bw.\n");
7122 mutex_unlock(&app_access_lock);
7123 ret = -EINVAL;
7124 break;
7125 }
7126 }
7127 /*
7128 * On targets where crypto clock is handled by HLOS,
7129 * if clk_access_cnt is zero and perf_enabled is false,
7130 * then the crypto clock was not enabled before sending cmd to
7131 * tz, qseecom will enable the clock to avoid service failure.
7132 */
7133 if (!qseecom.no_clock_support &&
7134 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7135 pr_debug("ce clock is not enabled!\n");
7136 ret = qseecom_perf_enable(data);
7137 if (ret) {
7138 pr_err("Failed to vote for clock with err %d\n",
7139 ret);
7140 mutex_unlock(&app_access_lock);
7141 ret = -EINVAL;
7142 break;
7143 }
7144 perf_enabled = true;
7145 }
7146 atomic_inc(&data->ioctl_count);
7147 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7148 ret = qseecom_send_modfd_cmd(data, argp);
7149 else
7150 ret = qseecom_send_modfd_cmd_64(data, argp);
7151 if (qseecom.support_bus_scaling)
7152 __qseecom_add_bw_scale_down_timer(
7153 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7154 if (perf_enabled) {
7155 qsee_disable_clock_vote(data, CLK_DFAB);
7156 qsee_disable_clock_vote(data, CLK_SFPB);
7157 }
7158 atomic_dec(&data->ioctl_count);
7159 wake_up_all(&data->abort_wq);
7160 mutex_unlock(&app_access_lock);
7161 if (ret)
7162 pr_err("failed qseecom_send_cmd: %d\n", ret);
7163 __qseecom_clean_data_sglistinfo(data);
7164 break;
7165 }
7166 case QSEECOM_IOCTL_RECEIVE_REQ: {
7167 if ((data->listener.id == 0) ||
7168 (data->type != QSEECOM_LISTENER_SERVICE)) {
7169 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7170 data->type, data->listener.id);
7171 ret = -EINVAL;
7172 break;
7173 }
7174 atomic_inc(&data->ioctl_count);
7175 ret = qseecom_receive_req(data);
7176 atomic_dec(&data->ioctl_count);
7177 wake_up_all(&data->abort_wq);
7178 if (ret && (ret != -ERESTARTSYS))
7179 pr_err("failed qseecom_receive_req: %d\n", ret);
7180 break;
7181 }
7182 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7183 if ((data->listener.id == 0) ||
7184 (data->type != QSEECOM_LISTENER_SERVICE)) {
7185 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7186 data->type, data->listener.id);
7187 ret = -EINVAL;
7188 break;
7189 }
7190 atomic_inc(&data->ioctl_count);
7191 if (!qseecom.qsee_reentrancy_support)
7192 ret = qseecom_send_resp();
7193 else
7194 ret = qseecom_reentrancy_send_resp(data);
7195 atomic_dec(&data->ioctl_count);
7196 wake_up_all(&data->abort_wq);
7197 if (ret)
7198 pr_err("failed qseecom_send_resp: %d\n", ret);
7199 break;
7200 }
7201 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7202 if ((data->type != QSEECOM_CLIENT_APP) &&
7203 (data->type != QSEECOM_GENERIC) &&
7204 (data->type != QSEECOM_SECURE_SERVICE)) {
7205 pr_err("set mem param req: invalid handle (%d)\n",
7206 data->type);
7207 ret = -EINVAL;
7208 break;
7209 }
7210 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7211 mutex_lock(&app_access_lock);
7212 atomic_inc(&data->ioctl_count);
7213 ret = qseecom_set_client_mem_param(data, argp);
7214 atomic_dec(&data->ioctl_count);
7215 mutex_unlock(&app_access_lock);
7216 if (ret)
7217 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7218 ret);
7219 break;
7220 }
7221 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7222 if ((data->type != QSEECOM_GENERIC) &&
7223 (data->type != QSEECOM_CLIENT_APP)) {
7224 pr_err("load app req: invalid handle (%d)\n",
7225 data->type);
7226 ret = -EINVAL;
7227 break;
7228 }
7229 data->type = QSEECOM_CLIENT_APP;
7230 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7231 mutex_lock(&app_access_lock);
7232 atomic_inc(&data->ioctl_count);
7233 ret = qseecom_load_app(data, argp);
7234 atomic_dec(&data->ioctl_count);
7235 mutex_unlock(&app_access_lock);
7236 if (ret)
7237 pr_err("failed load_app request: %d\n", ret);
7238 break;
7239 }
7240 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7241 if ((data->client.app_id == 0) ||
7242 (data->type != QSEECOM_CLIENT_APP)) {
7243 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7244 data->type, data->client.app_id);
7245 ret = -EINVAL;
7246 break;
7247 }
7248 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7249 mutex_lock(&app_access_lock);
7250 atomic_inc(&data->ioctl_count);
7251 ret = qseecom_unload_app(data, false);
7252 atomic_dec(&data->ioctl_count);
7253 mutex_unlock(&app_access_lock);
7254 if (ret)
7255 pr_err("failed unload_app request: %d\n", ret);
7256 break;
7257 }
7258 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7259 atomic_inc(&data->ioctl_count);
7260 ret = qseecom_get_qseos_version(data, argp);
7261 if (ret)
7262 pr_err("qseecom_get_qseos_version: %d\n", ret);
7263 atomic_dec(&data->ioctl_count);
7264 break;
7265 }
7266 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7267 if ((data->type != QSEECOM_GENERIC) &&
7268 (data->type != QSEECOM_CLIENT_APP)) {
7269 pr_err("perf enable req: invalid handle (%d)\n",
7270 data->type);
7271 ret = -EINVAL;
7272 break;
7273 }
7274 if ((data->type == QSEECOM_CLIENT_APP) &&
7275 (data->client.app_id == 0)) {
7276 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7277 data->type, data->client.app_id);
7278 ret = -EINVAL;
7279 break;
7280 }
7281 atomic_inc(&data->ioctl_count);
7282 if (qseecom.support_bus_scaling) {
7283 mutex_lock(&qsee_bw_mutex);
7284 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7285 mutex_unlock(&qsee_bw_mutex);
7286 } else {
7287 ret = qseecom_perf_enable(data);
7288 if (ret)
7289 pr_err("Fail to vote for clocks %d\n", ret);
7290 }
7291 atomic_dec(&data->ioctl_count);
7292 break;
7293 }
7294 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7295 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7296 (data->type != QSEECOM_CLIENT_APP)) {
7297 pr_err("perf disable req: invalid handle (%d)\n",
7298 data->type);
7299 ret = -EINVAL;
7300 break;
7301 }
7302 if ((data->type == QSEECOM_CLIENT_APP) &&
7303 (data->client.app_id == 0)) {
7304 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7305 data->type, data->client.app_id);
7306 ret = -EINVAL;
7307 break;
7308 }
7309 atomic_inc(&data->ioctl_count);
7310 if (!qseecom.support_bus_scaling) {
7311 qsee_disable_clock_vote(data, CLK_DFAB);
7312 qsee_disable_clock_vote(data, CLK_SFPB);
7313 } else {
7314 mutex_lock(&qsee_bw_mutex);
7315 qseecom_unregister_bus_bandwidth_needs(data);
7316 mutex_unlock(&qsee_bw_mutex);
7317 }
7318 atomic_dec(&data->ioctl_count);
7319 break;
7320 }
7321
7322 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7323 /* If crypto clock is not handled by HLOS, return directly. */
7324 if (qseecom.no_clock_support) {
7325 pr_debug("crypto clock is not handled by HLOS\n");
7326 break;
7327 }
7328 if ((data->client.app_id == 0) ||
7329 (data->type != QSEECOM_CLIENT_APP)) {
7330 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7331 data->type, data->client.app_id);
7332 ret = -EINVAL;
7333 break;
7334 }
7335 atomic_inc(&data->ioctl_count);
7336 ret = qseecom_scale_bus_bandwidth(data, argp);
7337 atomic_dec(&data->ioctl_count);
7338 break;
7339 }
7340 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7341 if (data->type != QSEECOM_GENERIC) {
7342 pr_err("load ext elf req: invalid client handle (%d)\n",
7343 data->type);
7344 ret = -EINVAL;
7345 break;
7346 }
7347 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7348 data->released = true;
7349 mutex_lock(&app_access_lock);
7350 atomic_inc(&data->ioctl_count);
7351 ret = qseecom_load_external_elf(data, argp);
7352 atomic_dec(&data->ioctl_count);
7353 mutex_unlock(&app_access_lock);
7354 if (ret)
7355 pr_err("failed load_external_elf request: %d\n", ret);
7356 break;
7357 }
7358 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7359 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7360 pr_err("unload ext elf req: invalid handle (%d)\n",
7361 data->type);
7362 ret = -EINVAL;
7363 break;
7364 }
7365 data->released = true;
7366 mutex_lock(&app_access_lock);
7367 atomic_inc(&data->ioctl_count);
7368 ret = qseecom_unload_external_elf(data);
7369 atomic_dec(&data->ioctl_count);
7370 mutex_unlock(&app_access_lock);
7371 if (ret)
7372 pr_err("failed unload_app request: %d\n", ret);
7373 break;
7374 }
7375 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7376 data->type = QSEECOM_CLIENT_APP;
7377 mutex_lock(&app_access_lock);
7378 atomic_inc(&data->ioctl_count);
7379 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7380 ret = qseecom_query_app_loaded(data, argp);
7381 atomic_dec(&data->ioctl_count);
7382 mutex_unlock(&app_access_lock);
7383 break;
7384 }
7385 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7386 if (data->type != QSEECOM_GENERIC) {
7387 pr_err("send cmd svc req: invalid handle (%d)\n",
7388 data->type);
7389 ret = -EINVAL;
7390 break;
7391 }
7392 data->type = QSEECOM_SECURE_SERVICE;
7393 if (qseecom.qsee_version < QSEE_VERSION_03) {
7394 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7395 qseecom.qsee_version);
7396 return -EINVAL;
7397 }
7398 mutex_lock(&app_access_lock);
7399 atomic_inc(&data->ioctl_count);
7400 ret = qseecom_send_service_cmd(data, argp);
7401 atomic_dec(&data->ioctl_count);
7402 mutex_unlock(&app_access_lock);
7403 break;
7404 }
7405 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7406 if (!(qseecom.support_pfe || qseecom.support_fde))
7407 pr_err("Features requiring key init not supported\n");
7408 if (data->type != QSEECOM_GENERIC) {
7409 pr_err("create key req: invalid handle (%d)\n",
7410 data->type);
7411 ret = -EINVAL;
7412 break;
7413 }
7414 if (qseecom.qsee_version < QSEE_VERSION_05) {
7415 pr_err("Create Key feature unsupported: qsee ver %u\n",
7416 qseecom.qsee_version);
7417 return -EINVAL;
7418 }
7419 data->released = true;
7420 mutex_lock(&app_access_lock);
7421 atomic_inc(&data->ioctl_count);
7422 ret = qseecom_create_key(data, argp);
7423 if (ret)
7424 pr_err("failed to create encryption key: %d\n", ret);
7425
7426 atomic_dec(&data->ioctl_count);
7427 mutex_unlock(&app_access_lock);
7428 break;
7429 }
7430 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7431 if (!(qseecom.support_pfe || qseecom.support_fde))
7432 pr_err("Features requiring key init not supported\n");
7433 if (data->type != QSEECOM_GENERIC) {
7434 pr_err("wipe key req: invalid handle (%d)\n",
7435 data->type);
7436 ret = -EINVAL;
7437 break;
7438 }
7439 if (qseecom.qsee_version < QSEE_VERSION_05) {
7440 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7441 qseecom.qsee_version);
7442 return -EINVAL;
7443 }
7444 data->released = true;
7445 mutex_lock(&app_access_lock);
7446 atomic_inc(&data->ioctl_count);
7447 ret = qseecom_wipe_key(data, argp);
7448 if (ret)
7449 pr_err("failed to wipe encryption key: %d\n", ret);
7450 atomic_dec(&data->ioctl_count);
7451 mutex_unlock(&app_access_lock);
7452 break;
7453 }
7454 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7455 if (!(qseecom.support_pfe || qseecom.support_fde))
7456 pr_err("Features requiring key init not supported\n");
7457 if (data->type != QSEECOM_GENERIC) {
7458 pr_err("update key req: invalid handle (%d)\n",
7459 data->type);
7460 ret = -EINVAL;
7461 break;
7462 }
7463 if (qseecom.qsee_version < QSEE_VERSION_05) {
7464 pr_err("Update Key feature unsupported in qsee ver %u\n",
7465 qseecom.qsee_version);
7466 return -EINVAL;
7467 }
7468 data->released = true;
7469 mutex_lock(&app_access_lock);
7470 atomic_inc(&data->ioctl_count);
7471 ret = qseecom_update_key_user_info(data, argp);
7472 if (ret)
7473 pr_err("failed to update key user info: %d\n", ret);
7474 atomic_dec(&data->ioctl_count);
7475 mutex_unlock(&app_access_lock);
7476 break;
7477 }
7478 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7479 if (data->type != QSEECOM_GENERIC) {
7480 pr_err("save part hash req: invalid handle (%d)\n",
7481 data->type);
7482 ret = -EINVAL;
7483 break;
7484 }
7485 data->released = true;
7486 mutex_lock(&app_access_lock);
7487 atomic_inc(&data->ioctl_count);
7488 ret = qseecom_save_partition_hash(argp);
7489 atomic_dec(&data->ioctl_count);
7490 mutex_unlock(&app_access_lock);
7491 break;
7492 }
7493 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7494 if (data->type != QSEECOM_GENERIC) {
7495 pr_err("ES activated req: invalid handle (%d)\n",
7496 data->type);
7497 ret = -EINVAL;
7498 break;
7499 }
7500 data->released = true;
7501 mutex_lock(&app_access_lock);
7502 atomic_inc(&data->ioctl_count);
7503 ret = qseecom_is_es_activated(argp);
7504 atomic_dec(&data->ioctl_count);
7505 mutex_unlock(&app_access_lock);
7506 break;
7507 }
7508 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7509 if (data->type != QSEECOM_GENERIC) {
7510 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7511 data->type);
7512 ret = -EINVAL;
7513 break;
7514 }
7515 data->released = true;
7516 mutex_lock(&app_access_lock);
7517 atomic_inc(&data->ioctl_count);
7518 ret = qseecom_mdtp_cipher_dip(argp);
7519 atomic_dec(&data->ioctl_count);
7520 mutex_unlock(&app_access_lock);
7521 break;
7522 }
7523 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7524 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7525 if ((data->listener.id == 0) ||
7526 (data->type != QSEECOM_LISTENER_SERVICE)) {
7527 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7528 data->type, data->listener.id);
7529 ret = -EINVAL;
7530 break;
7531 }
7532 atomic_inc(&data->ioctl_count);
7533 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7534 ret = qseecom_send_modfd_resp(data, argp);
7535 else
7536 ret = qseecom_send_modfd_resp_64(data, argp);
7537 atomic_dec(&data->ioctl_count);
7538 wake_up_all(&data->abort_wq);
7539 if (ret)
7540 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7541 __qseecom_clean_data_sglistinfo(data);
7542 break;
7543 }
7544 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7545 if ((data->client.app_id == 0) ||
7546 (data->type != QSEECOM_CLIENT_APP)) {
7547 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7548 data->type, data->client.app_id);
7549 ret = -EINVAL;
7550 break;
7551 }
7552 if (qseecom.qsee_version < QSEE_VERSION_40) {
7553 pr_err("GP feature unsupported: qsee ver %u\n",
7554 qseecom.qsee_version);
7555 return -EINVAL;
7556 }
7557 /* Only one client allowed here at a time */
7558 mutex_lock(&app_access_lock);
7559 atomic_inc(&data->ioctl_count);
7560 ret = qseecom_qteec_open_session(data, argp);
7561 atomic_dec(&data->ioctl_count);
7562 wake_up_all(&data->abort_wq);
7563 mutex_unlock(&app_access_lock);
7564 if (ret)
7565 pr_err("failed open_session_cmd: %d\n", ret);
7566 __qseecom_clean_data_sglistinfo(data);
7567 break;
7568 }
7569 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7570 if ((data->client.app_id == 0) ||
7571 (data->type != QSEECOM_CLIENT_APP)) {
7572 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7573 data->type, data->client.app_id);
7574 ret = -EINVAL;
7575 break;
7576 }
7577 if (qseecom.qsee_version < QSEE_VERSION_40) {
7578 pr_err("GP feature unsupported: qsee ver %u\n",
7579 qseecom.qsee_version);
7580 return -EINVAL;
7581 }
7582 /* Only one client allowed here at a time */
7583 mutex_lock(&app_access_lock);
7584 atomic_inc(&data->ioctl_count);
7585 ret = qseecom_qteec_close_session(data, argp);
7586 atomic_dec(&data->ioctl_count);
7587 wake_up_all(&data->abort_wq);
7588 mutex_unlock(&app_access_lock);
7589 if (ret)
7590 pr_err("failed close_session_cmd: %d\n", ret);
7591 break;
7592 }
7593 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7594 if ((data->client.app_id == 0) ||
7595 (data->type != QSEECOM_CLIENT_APP)) {
7596 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7597 data->type, data->client.app_id);
7598 ret = -EINVAL;
7599 break;
7600 }
7601 if (qseecom.qsee_version < QSEE_VERSION_40) {
7602 pr_err("GP feature unsupported: qsee ver %u\n",
7603 qseecom.qsee_version);
7604 return -EINVAL;
7605 }
7606 /* Only one client allowed here at a time */
7607 mutex_lock(&app_access_lock);
7608 atomic_inc(&data->ioctl_count);
7609 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7610 atomic_dec(&data->ioctl_count);
7611 wake_up_all(&data->abort_wq);
7612 mutex_unlock(&app_access_lock);
7613 if (ret)
7614 pr_err("failed Invoke cmd: %d\n", ret);
7615 __qseecom_clean_data_sglistinfo(data);
7616 break;
7617 }
7618 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7619 if ((data->client.app_id == 0) ||
7620 (data->type != QSEECOM_CLIENT_APP)) {
7621 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7622 data->type, data->client.app_id);
7623 ret = -EINVAL;
7624 break;
7625 }
7626 if (qseecom.qsee_version < QSEE_VERSION_40) {
7627 pr_err("GP feature unsupported: qsee ver %u\n",
7628 qseecom.qsee_version);
7629 return -EINVAL;
7630 }
7631 /* Only one client allowed here at a time */
7632 mutex_lock(&app_access_lock);
7633 atomic_inc(&data->ioctl_count);
7634 ret = qseecom_qteec_request_cancellation(data, argp);
7635 atomic_dec(&data->ioctl_count);
7636 wake_up_all(&data->abort_wq);
7637 mutex_unlock(&app_access_lock);
7638 if (ret)
7639 pr_err("failed request_cancellation: %d\n", ret);
7640 break;
7641 }
7642 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7643 atomic_inc(&data->ioctl_count);
7644 ret = qseecom_get_ce_info(data, argp);
7645 if (ret)
7646 pr_err("failed get fde ce pipe info: %d\n", ret);
7647 atomic_dec(&data->ioctl_count);
7648 break;
7649 }
7650 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7651 atomic_inc(&data->ioctl_count);
7652 ret = qseecom_free_ce_info(data, argp);
7653 if (ret)
7654 pr_err("failed get fde ce pipe info: %d\n", ret);
7655 atomic_dec(&data->ioctl_count);
7656 break;
7657 }
7658 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7659 atomic_inc(&data->ioctl_count);
7660 ret = qseecom_query_ce_info(data, argp);
7661 if (ret)
7662 pr_err("failed get fde ce pipe info: %d\n", ret);
7663 atomic_dec(&data->ioctl_count);
7664 break;
7665 }
7666 default:
7667 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7668 return -EINVAL;
7669 }
7670 return ret;
7671}
7672
7673static int qseecom_open(struct inode *inode, struct file *file)
7674{
7675 int ret = 0;
7676 struct qseecom_dev_handle *data;
7677
7678 data = kzalloc(sizeof(*data), GFP_KERNEL);
7679 if (!data)
7680 return -ENOMEM;
7681 file->private_data = data;
7682 data->abort = 0;
7683 data->type = QSEECOM_GENERIC;
7684 data->released = false;
7685 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7686 data->mode = INACTIVE;
7687 init_waitqueue_head(&data->abort_wq);
7688 atomic_set(&data->ioctl_count, 0);
7689 return ret;
7690}
7691
7692static int qseecom_release(struct inode *inode, struct file *file)
7693{
7694 struct qseecom_dev_handle *data = file->private_data;
7695 int ret = 0;
7696
7697 if (data->released == false) {
7698 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7699 data->type, data->mode, data);
7700 switch (data->type) {
7701 case QSEECOM_LISTENER_SERVICE:
Zhen Kong26e62742018-05-04 17:19:06 -07007702 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007703 mutex_lock(&app_access_lock);
7704 ret = qseecom_unregister_listener(data);
7705 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007706 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007707 break;
7708 case QSEECOM_CLIENT_APP:
7709 mutex_lock(&app_access_lock);
7710 ret = qseecom_unload_app(data, true);
7711 mutex_unlock(&app_access_lock);
7712 break;
7713 case QSEECOM_SECURE_SERVICE:
7714 case QSEECOM_GENERIC:
7715 ret = qseecom_unmap_ion_allocated_memory(data);
7716 if (ret)
7717 pr_err("Ion Unmap failed\n");
7718 break;
7719 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7720 break;
7721 default:
7722 pr_err("Unsupported clnt_handle_type %d",
7723 data->type);
7724 break;
7725 }
7726 }
7727
7728 if (qseecom.support_bus_scaling) {
7729 mutex_lock(&qsee_bw_mutex);
7730 if (data->mode != INACTIVE) {
7731 qseecom_unregister_bus_bandwidth_needs(data);
7732 if (qseecom.cumulative_mode == INACTIVE) {
7733 ret = __qseecom_set_msm_bus_request(INACTIVE);
7734 if (ret)
7735 pr_err("Fail to scale down bus\n");
7736 }
7737 }
7738 mutex_unlock(&qsee_bw_mutex);
7739 } else {
7740 if (data->fast_load_enabled == true)
7741 qsee_disable_clock_vote(data, CLK_SFPB);
7742 if (data->perf_enabled == true)
7743 qsee_disable_clock_vote(data, CLK_DFAB);
7744 }
7745 kfree(data);
7746
7747 return ret;
7748}
7749
7750#ifdef CONFIG_COMPAT
7751#include "compat_qseecom.c"
7752#else
7753#define compat_qseecom_ioctl NULL
7754#endif
7755
7756static const struct file_operations qseecom_fops = {
7757 .owner = THIS_MODULE,
7758 .unlocked_ioctl = qseecom_ioctl,
7759 .compat_ioctl = compat_qseecom_ioctl,
7760 .open = qseecom_open,
7761 .release = qseecom_release
7762};
7763
7764static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7765{
7766 int rc = 0;
7767 struct device *pdev;
7768 struct qseecom_clk *qclk;
7769 char *core_clk_src = NULL;
7770 char *core_clk = NULL;
7771 char *iface_clk = NULL;
7772 char *bus_clk = NULL;
7773
7774 switch (ce) {
7775 case CLK_QSEE: {
7776 core_clk_src = "core_clk_src";
7777 core_clk = "core_clk";
7778 iface_clk = "iface_clk";
7779 bus_clk = "bus_clk";
7780 qclk = &qseecom.qsee;
7781 qclk->instance = CLK_QSEE;
7782 break;
7783 };
7784 case CLK_CE_DRV: {
7785 core_clk_src = "ce_drv_core_clk_src";
7786 core_clk = "ce_drv_core_clk";
7787 iface_clk = "ce_drv_iface_clk";
7788 bus_clk = "ce_drv_bus_clk";
7789 qclk = &qseecom.ce_drv;
7790 qclk->instance = CLK_CE_DRV;
7791 break;
7792 };
7793 default:
7794 pr_err("Invalid ce hw instance: %d!\n", ce);
7795 return -EIO;
7796 }
7797
7798 if (qseecom.no_clock_support) {
7799 qclk->ce_core_clk = NULL;
7800 qclk->ce_clk = NULL;
7801 qclk->ce_bus_clk = NULL;
7802 qclk->ce_core_src_clk = NULL;
7803 return 0;
7804 }
7805
7806 pdev = qseecom.pdev;
7807
7808 /* Get CE3 src core clk. */
7809 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7810 if (!IS_ERR(qclk->ce_core_src_clk)) {
7811 rc = clk_set_rate(qclk->ce_core_src_clk,
7812 qseecom.ce_opp_freq_hz);
7813 if (rc) {
7814 clk_put(qclk->ce_core_src_clk);
7815 qclk->ce_core_src_clk = NULL;
7816 pr_err("Unable to set the core src clk @%uMhz.\n",
7817 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7818 return -EIO;
7819 }
7820 } else {
7821 pr_warn("Unable to get CE core src clk, set to NULL\n");
7822 qclk->ce_core_src_clk = NULL;
7823 }
7824
7825 /* Get CE core clk */
7826 qclk->ce_core_clk = clk_get(pdev, core_clk);
7827 if (IS_ERR(qclk->ce_core_clk)) {
7828 rc = PTR_ERR(qclk->ce_core_clk);
7829 pr_err("Unable to get CE core clk\n");
7830 if (qclk->ce_core_src_clk != NULL)
7831 clk_put(qclk->ce_core_src_clk);
7832 return -EIO;
7833 }
7834
7835 /* Get CE Interface clk */
7836 qclk->ce_clk = clk_get(pdev, iface_clk);
7837 if (IS_ERR(qclk->ce_clk)) {
7838 rc = PTR_ERR(qclk->ce_clk);
7839 pr_err("Unable to get CE interface clk\n");
7840 if (qclk->ce_core_src_clk != NULL)
7841 clk_put(qclk->ce_core_src_clk);
7842 clk_put(qclk->ce_core_clk);
7843 return -EIO;
7844 }
7845
7846 /* Get CE AXI clk */
7847 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7848 if (IS_ERR(qclk->ce_bus_clk)) {
7849 rc = PTR_ERR(qclk->ce_bus_clk);
7850 pr_err("Unable to get CE BUS interface clk\n");
7851 if (qclk->ce_core_src_clk != NULL)
7852 clk_put(qclk->ce_core_src_clk);
7853 clk_put(qclk->ce_core_clk);
7854 clk_put(qclk->ce_clk);
7855 return -EIO;
7856 }
7857
7858 return rc;
7859}
7860
7861static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7862{
7863 struct qseecom_clk *qclk;
7864
7865 if (ce == CLK_QSEE)
7866 qclk = &qseecom.qsee;
7867 else
7868 qclk = &qseecom.ce_drv;
7869
7870 if (qclk->ce_clk != NULL) {
7871 clk_put(qclk->ce_clk);
7872 qclk->ce_clk = NULL;
7873 }
7874 if (qclk->ce_core_clk != NULL) {
7875 clk_put(qclk->ce_core_clk);
7876 qclk->ce_core_clk = NULL;
7877 }
7878 if (qclk->ce_bus_clk != NULL) {
7879 clk_put(qclk->ce_bus_clk);
7880 qclk->ce_bus_clk = NULL;
7881 }
7882 if (qclk->ce_core_src_clk != NULL) {
7883 clk_put(qclk->ce_core_src_clk);
7884 qclk->ce_core_src_clk = NULL;
7885 }
7886 qclk->instance = CLK_INVALID;
7887}
7888
7889static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7890{
7891 int rc = 0;
7892 uint32_t hlos_num_ce_hw_instances;
7893 uint32_t disk_encrypt_pipe;
7894 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007895 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007896 int i;
7897 const int *tbl;
7898 int size;
7899 int entry;
7900 struct qseecom_crypto_info *pfde_tbl = NULL;
7901 struct qseecom_crypto_info *p;
7902 int tbl_size;
7903 int j;
7904 bool old_db = true;
7905 struct qseecom_ce_info_use *pce_info_use;
7906 uint32_t *unit_tbl = NULL;
7907 int total_units = 0;
7908 struct qseecom_ce_pipe_entry *pce_entry;
7909
7910 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7911 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7912
7913 if (of_property_read_u32((&pdev->dev)->of_node,
7914 "qcom,qsee-ce-hw-instance",
7915 &qseecom.ce_info.qsee_ce_hw_instance)) {
7916 pr_err("Fail to get qsee ce hw instance information.\n");
7917 rc = -EINVAL;
7918 goto out;
7919 } else {
7920 pr_debug("qsee-ce-hw-instance=0x%x\n",
7921 qseecom.ce_info.qsee_ce_hw_instance);
7922 }
7923
7924 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7925 "qcom,support-fde");
7926 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7927 "qcom,support-pfe");
7928
7929 if (!qseecom.support_pfe && !qseecom.support_fde) {
7930 pr_warn("Device does not support PFE/FDE");
7931 goto out;
7932 }
7933
7934 if (qseecom.support_fde)
7935 tbl = of_get_property((&pdev->dev)->of_node,
7936 "qcom,full-disk-encrypt-info", &size);
7937 else
7938 tbl = NULL;
7939 if (tbl) {
7940 old_db = false;
7941 if (size % sizeof(struct qseecom_crypto_info)) {
7942 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7943 size);
7944 rc = -EINVAL;
7945 goto out;
7946 }
7947 tbl_size = size / sizeof
7948 (struct qseecom_crypto_info);
7949
7950 pfde_tbl = kzalloc(size, GFP_KERNEL);
7951 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7952 total_units = 0;
7953
7954 if (!pfde_tbl || !unit_tbl) {
7955 pr_err("failed to alloc memory\n");
7956 rc = -ENOMEM;
7957 goto out;
7958 }
7959 if (of_property_read_u32_array((&pdev->dev)->of_node,
7960 "qcom,full-disk-encrypt-info",
7961 (u32 *)pfde_tbl, size/sizeof(u32))) {
7962 pr_err("failed to read full-disk-encrypt-info tbl\n");
7963 rc = -EINVAL;
7964 goto out;
7965 }
7966
7967 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7968 for (j = 0; j < total_units; j++) {
7969 if (p->unit_num == *(unit_tbl + j))
7970 break;
7971 }
7972 if (j == total_units) {
7973 *(unit_tbl + total_units) = p->unit_num;
7974 total_units++;
7975 }
7976 }
7977
7978 qseecom.ce_info.num_fde = total_units;
7979 pce_info_use = qseecom.ce_info.fde = kcalloc(
7980 total_units, sizeof(struct qseecom_ce_info_use),
7981 GFP_KERNEL);
7982 if (!pce_info_use) {
7983 pr_err("failed to alloc memory\n");
7984 rc = -ENOMEM;
7985 goto out;
7986 }
7987
7988 for (j = 0; j < total_units; j++, pce_info_use++) {
7989 pce_info_use->unit_num = *(unit_tbl + j);
7990 pce_info_use->alloc = false;
7991 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7992 pce_info_use->num_ce_pipe_entries = 0;
7993 pce_info_use->ce_pipe_entry = NULL;
7994 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7995 if (p->unit_num == pce_info_use->unit_num)
7996 pce_info_use->num_ce_pipe_entries++;
7997 }
7998
7999 entry = pce_info_use->num_ce_pipe_entries;
8000 pce_entry = pce_info_use->ce_pipe_entry =
8001 kcalloc(entry,
8002 sizeof(struct qseecom_ce_pipe_entry),
8003 GFP_KERNEL);
8004 if (pce_entry == NULL) {
8005 pr_err("failed to alloc memory\n");
8006 rc = -ENOMEM;
8007 goto out;
8008 }
8009
8010 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8011 if (p->unit_num == pce_info_use->unit_num) {
8012 pce_entry->ce_num = p->ce;
8013 pce_entry->ce_pipe_pair =
8014 p->pipe_pair;
8015 pce_entry->valid = true;
8016 pce_entry++;
8017 }
8018 }
8019 }
8020 kfree(unit_tbl);
8021 unit_tbl = NULL;
8022 kfree(pfde_tbl);
8023 pfde_tbl = NULL;
8024 }
8025
8026 if (qseecom.support_pfe)
8027 tbl = of_get_property((&pdev->dev)->of_node,
8028 "qcom,per-file-encrypt-info", &size);
8029 else
8030 tbl = NULL;
8031 if (tbl) {
8032 old_db = false;
8033 if (size % sizeof(struct qseecom_crypto_info)) {
8034 pr_err("per-file-encrypt-info tbl size(%d)\n",
8035 size);
8036 rc = -EINVAL;
8037 goto out;
8038 }
8039 tbl_size = size / sizeof
8040 (struct qseecom_crypto_info);
8041
8042 pfde_tbl = kzalloc(size, GFP_KERNEL);
8043 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8044 total_units = 0;
8045 if (!pfde_tbl || !unit_tbl) {
8046 pr_err("failed to alloc memory\n");
8047 rc = -ENOMEM;
8048 goto out;
8049 }
8050 if (of_property_read_u32_array((&pdev->dev)->of_node,
8051 "qcom,per-file-encrypt-info",
8052 (u32 *)pfde_tbl, size/sizeof(u32))) {
8053 pr_err("failed to read per-file-encrypt-info tbl\n");
8054 rc = -EINVAL;
8055 goto out;
8056 }
8057
8058 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8059 for (j = 0; j < total_units; j++) {
8060 if (p->unit_num == *(unit_tbl + j))
8061 break;
8062 }
8063 if (j == total_units) {
8064 *(unit_tbl + total_units) = p->unit_num;
8065 total_units++;
8066 }
8067 }
8068
8069 qseecom.ce_info.num_pfe = total_units;
8070 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8071 total_units, sizeof(struct qseecom_ce_info_use),
8072 GFP_KERNEL);
8073 if (!pce_info_use) {
8074 pr_err("failed to alloc memory\n");
8075 rc = -ENOMEM;
8076 goto out;
8077 }
8078
8079 for (j = 0; j < total_units; j++, pce_info_use++) {
8080 pce_info_use->unit_num = *(unit_tbl + j);
8081 pce_info_use->alloc = false;
8082 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8083 pce_info_use->num_ce_pipe_entries = 0;
8084 pce_info_use->ce_pipe_entry = NULL;
8085 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8086 if (p->unit_num == pce_info_use->unit_num)
8087 pce_info_use->num_ce_pipe_entries++;
8088 }
8089
8090 entry = pce_info_use->num_ce_pipe_entries;
8091 pce_entry = pce_info_use->ce_pipe_entry =
8092 kcalloc(entry,
8093 sizeof(struct qseecom_ce_pipe_entry),
8094 GFP_KERNEL);
8095 if (pce_entry == NULL) {
8096 pr_err("failed to alloc memory\n");
8097 rc = -ENOMEM;
8098 goto out;
8099 }
8100
8101 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8102 if (p->unit_num == pce_info_use->unit_num) {
8103 pce_entry->ce_num = p->ce;
8104 pce_entry->ce_pipe_pair =
8105 p->pipe_pair;
8106 pce_entry->valid = true;
8107 pce_entry++;
8108 }
8109 }
8110 }
8111 kfree(unit_tbl);
8112 unit_tbl = NULL;
8113 kfree(pfde_tbl);
8114 pfde_tbl = NULL;
8115 }
8116
8117 if (!old_db)
8118 goto out1;
8119
8120 if (of_property_read_bool((&pdev->dev)->of_node,
8121 "qcom,support-multiple-ce-hw-instance")) {
8122 if (of_property_read_u32((&pdev->dev)->of_node,
8123 "qcom,hlos-num-ce-hw-instances",
8124 &hlos_num_ce_hw_instances)) {
8125 pr_err("Fail: get hlos number of ce hw instance\n");
8126 rc = -EINVAL;
8127 goto out;
8128 }
8129 } else {
8130 hlos_num_ce_hw_instances = 1;
8131 }
8132
8133 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8134 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8135 MAX_CE_PIPE_PAIR_PER_UNIT);
8136 rc = -EINVAL;
8137 goto out;
8138 }
8139
8140 if (of_property_read_u32_array((&pdev->dev)->of_node,
8141 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8142 hlos_num_ce_hw_instances)) {
8143 pr_err("Fail: get hlos ce hw instance info\n");
8144 rc = -EINVAL;
8145 goto out;
8146 }
8147
8148 if (qseecom.support_fde) {
8149 pce_info_use = qseecom.ce_info.fde =
8150 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8151 if (!pce_info_use) {
8152 pr_err("failed to alloc memory\n");
8153 rc = -ENOMEM;
8154 goto out;
8155 }
8156 /* by default for old db */
8157 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8158 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8159 pce_info_use->alloc = false;
8160 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8161 pce_info_use->ce_pipe_entry = NULL;
8162 if (of_property_read_u32((&pdev->dev)->of_node,
8163 "qcom,disk-encrypt-pipe-pair",
8164 &disk_encrypt_pipe)) {
8165 pr_err("Fail to get FDE pipe information.\n");
8166 rc = -EINVAL;
8167 goto out;
8168 } else {
8169 pr_debug("disk-encrypt-pipe-pair=0x%x",
8170 disk_encrypt_pipe);
8171 }
8172 entry = pce_info_use->num_ce_pipe_entries =
8173 hlos_num_ce_hw_instances;
8174 pce_entry = pce_info_use->ce_pipe_entry =
8175 kcalloc(entry,
8176 sizeof(struct qseecom_ce_pipe_entry),
8177 GFP_KERNEL);
8178 if (pce_entry == NULL) {
8179 pr_err("failed to alloc memory\n");
8180 rc = -ENOMEM;
8181 goto out;
8182 }
8183 for (i = 0; i < entry; i++) {
8184 pce_entry->ce_num = hlos_ce_hw_instance[i];
8185 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8186 pce_entry->valid = 1;
8187 pce_entry++;
8188 }
8189 } else {
8190 pr_warn("Device does not support FDE");
8191 disk_encrypt_pipe = 0xff;
8192 }
8193 if (qseecom.support_pfe) {
8194 pce_info_use = qseecom.ce_info.pfe =
8195 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8196 if (!pce_info_use) {
8197 pr_err("failed to alloc memory\n");
8198 rc = -ENOMEM;
8199 goto out;
8200 }
8201 /* by default for old db */
8202 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8203 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8204 pce_info_use->alloc = false;
8205 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8206 pce_info_use->ce_pipe_entry = NULL;
8207
8208 if (of_property_read_u32((&pdev->dev)->of_node,
8209 "qcom,file-encrypt-pipe-pair",
8210 &file_encrypt_pipe)) {
8211 pr_err("Fail to get PFE pipe information.\n");
8212 rc = -EINVAL;
8213 goto out;
8214 } else {
8215 pr_debug("file-encrypt-pipe-pair=0x%x",
8216 file_encrypt_pipe);
8217 }
8218 entry = pce_info_use->num_ce_pipe_entries =
8219 hlos_num_ce_hw_instances;
8220 pce_entry = pce_info_use->ce_pipe_entry =
8221 kcalloc(entry,
8222 sizeof(struct qseecom_ce_pipe_entry),
8223 GFP_KERNEL);
8224 if (pce_entry == NULL) {
8225 pr_err("failed to alloc memory\n");
8226 rc = -ENOMEM;
8227 goto out;
8228 }
8229 for (i = 0; i < entry; i++) {
8230 pce_entry->ce_num = hlos_ce_hw_instance[i];
8231 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8232 pce_entry->valid = 1;
8233 pce_entry++;
8234 }
8235 } else {
8236 pr_warn("Device does not support PFE");
8237 file_encrypt_pipe = 0xff;
8238 }
8239
8240out1:
8241 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8242 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8243out:
8244 if (rc) {
8245 if (qseecom.ce_info.fde) {
8246 pce_info_use = qseecom.ce_info.fde;
8247 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8248 pce_entry = pce_info_use->ce_pipe_entry;
8249 kfree(pce_entry);
8250 pce_info_use++;
8251 }
8252 }
8253 kfree(qseecom.ce_info.fde);
8254 qseecom.ce_info.fde = NULL;
8255 if (qseecom.ce_info.pfe) {
8256 pce_info_use = qseecom.ce_info.pfe;
8257 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8258 pce_entry = pce_info_use->ce_pipe_entry;
8259 kfree(pce_entry);
8260 pce_info_use++;
8261 }
8262 }
8263 kfree(qseecom.ce_info.pfe);
8264 qseecom.ce_info.pfe = NULL;
8265 }
8266 kfree(unit_tbl);
8267 kfree(pfde_tbl);
8268 return rc;
8269}
8270
8271static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8272 void __user *argp)
8273{
8274 struct qseecom_ce_info_req req;
8275 struct qseecom_ce_info_req *pinfo = &req;
8276 int ret = 0;
8277 int i;
8278 unsigned int entries;
8279 struct qseecom_ce_info_use *pce_info_use, *p;
8280 int total = 0;
8281 bool found = false;
8282 struct qseecom_ce_pipe_entry *pce_entry;
8283
8284 ret = copy_from_user(pinfo, argp,
8285 sizeof(struct qseecom_ce_info_req));
8286 if (ret) {
8287 pr_err("copy_from_user failed\n");
8288 return ret;
8289 }
8290
8291 switch (pinfo->usage) {
8292 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8293 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8294 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8295 if (qseecom.support_fde) {
8296 p = qseecom.ce_info.fde;
8297 total = qseecom.ce_info.num_fde;
8298 } else {
8299 pr_err("system does not support fde\n");
8300 return -EINVAL;
8301 }
8302 break;
8303 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8304 if (qseecom.support_pfe) {
8305 p = qseecom.ce_info.pfe;
8306 total = qseecom.ce_info.num_pfe;
8307 } else {
8308 pr_err("system does not support pfe\n");
8309 return -EINVAL;
8310 }
8311 break;
8312 default:
8313 pr_err("unsupported usage %d\n", pinfo->usage);
8314 return -EINVAL;
8315 }
8316
8317 pce_info_use = NULL;
8318 for (i = 0; i < total; i++) {
8319 if (!p->alloc)
8320 pce_info_use = p;
8321 else if (!memcmp(p->handle, pinfo->handle,
8322 MAX_CE_INFO_HANDLE_SIZE)) {
8323 pce_info_use = p;
8324 found = true;
8325 break;
8326 }
8327 p++;
8328 }
8329
8330 if (pce_info_use == NULL)
8331 return -EBUSY;
8332
8333 pinfo->unit_num = pce_info_use->unit_num;
8334 if (!pce_info_use->alloc) {
8335 pce_info_use->alloc = true;
8336 memcpy(pce_info_use->handle,
8337 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8338 }
8339 if (pce_info_use->num_ce_pipe_entries >
8340 MAX_CE_PIPE_PAIR_PER_UNIT)
8341 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8342 else
8343 entries = pce_info_use->num_ce_pipe_entries;
8344 pinfo->num_ce_pipe_entries = entries;
8345 pce_entry = pce_info_use->ce_pipe_entry;
8346 for (i = 0; i < entries; i++, pce_entry++)
8347 pinfo->ce_pipe_entry[i] = *pce_entry;
8348 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8349 pinfo->ce_pipe_entry[i].valid = 0;
8350
8351 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8352 pr_err("copy_to_user failed\n");
8353 ret = -EFAULT;
8354 }
8355 return ret;
8356}
8357
8358static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8359 void __user *argp)
8360{
8361 struct qseecom_ce_info_req req;
8362 struct qseecom_ce_info_req *pinfo = &req;
8363 int ret = 0;
8364 struct qseecom_ce_info_use *p;
8365 int total = 0;
8366 int i;
8367 bool found = false;
8368
8369 ret = copy_from_user(pinfo, argp,
8370 sizeof(struct qseecom_ce_info_req));
8371 if (ret)
8372 return ret;
8373
8374 switch (pinfo->usage) {
8375 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8376 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8377 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8378 if (qseecom.support_fde) {
8379 p = qseecom.ce_info.fde;
8380 total = qseecom.ce_info.num_fde;
8381 } else {
8382 pr_err("system does not support fde\n");
8383 return -EINVAL;
8384 }
8385 break;
8386 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8387 if (qseecom.support_pfe) {
8388 p = qseecom.ce_info.pfe;
8389 total = qseecom.ce_info.num_pfe;
8390 } else {
8391 pr_err("system does not support pfe\n");
8392 return -EINVAL;
8393 }
8394 break;
8395 default:
8396 pr_err("unsupported usage %d\n", pinfo->usage);
8397 return -EINVAL;
8398 }
8399
8400 for (i = 0; i < total; i++) {
8401 if (p->alloc &&
8402 !memcmp(p->handle, pinfo->handle,
8403 MAX_CE_INFO_HANDLE_SIZE)) {
8404 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8405 p->alloc = false;
8406 found = true;
8407 break;
8408 }
8409 p++;
8410 }
8411 return ret;
8412}
8413
8414static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8415 void __user *argp)
8416{
8417 struct qseecom_ce_info_req req;
8418 struct qseecom_ce_info_req *pinfo = &req;
8419 int ret = 0;
8420 int i;
8421 unsigned int entries;
8422 struct qseecom_ce_info_use *pce_info_use, *p;
8423 int total = 0;
8424 bool found = false;
8425 struct qseecom_ce_pipe_entry *pce_entry;
8426
8427 ret = copy_from_user(pinfo, argp,
8428 sizeof(struct qseecom_ce_info_req));
8429 if (ret)
8430 return ret;
8431
8432 switch (pinfo->usage) {
8433 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8434 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8435 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8436 if (qseecom.support_fde) {
8437 p = qseecom.ce_info.fde;
8438 total = qseecom.ce_info.num_fde;
8439 } else {
8440 pr_err("system does not support fde\n");
8441 return -EINVAL;
8442 }
8443 break;
8444 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8445 if (qseecom.support_pfe) {
8446 p = qseecom.ce_info.pfe;
8447 total = qseecom.ce_info.num_pfe;
8448 } else {
8449 pr_err("system does not support pfe\n");
8450 return -EINVAL;
8451 }
8452 break;
8453 default:
8454 pr_err("unsupported usage %d\n", pinfo->usage);
8455 return -EINVAL;
8456 }
8457
8458 pce_info_use = NULL;
8459 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8460 pinfo->num_ce_pipe_entries = 0;
8461 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8462 pinfo->ce_pipe_entry[i].valid = 0;
8463
8464 for (i = 0; i < total; i++) {
8465
8466 if (p->alloc && !memcmp(p->handle,
8467 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8468 pce_info_use = p;
8469 found = true;
8470 break;
8471 }
8472 p++;
8473 }
8474 if (!pce_info_use)
8475 goto out;
8476 pinfo->unit_num = pce_info_use->unit_num;
8477 if (pce_info_use->num_ce_pipe_entries >
8478 MAX_CE_PIPE_PAIR_PER_UNIT)
8479 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8480 else
8481 entries = pce_info_use->num_ce_pipe_entries;
8482 pinfo->num_ce_pipe_entries = entries;
8483 pce_entry = pce_info_use->ce_pipe_entry;
8484 for (i = 0; i < entries; i++, pce_entry++)
8485 pinfo->ce_pipe_entry[i] = *pce_entry;
8486 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8487 pinfo->ce_pipe_entry[i].valid = 0;
8488out:
8489 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8490 pr_err("copy_to_user failed\n");
8491 ret = -EFAULT;
8492 }
8493 return ret;
8494}
8495
8496/*
8497 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8498 * then whitelist feature is not supported.
8499 */
8500static int qseecom_check_whitelist_feature(void)
8501{
8502 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8503
8504 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8505}
8506
8507static int qseecom_probe(struct platform_device *pdev)
8508{
8509 int rc;
8510 int i;
8511 uint32_t feature = 10;
8512 struct device *class_dev;
8513 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8514 struct qseecom_command_scm_resp resp;
8515 struct qseecom_ce_info_use *pce_info_use = NULL;
8516
8517 qseecom.qsee_bw_count = 0;
8518 qseecom.qsee_perf_client = 0;
8519 qseecom.qsee_sfpb_bw_count = 0;
8520
8521 qseecom.qsee.ce_core_clk = NULL;
8522 qseecom.qsee.ce_clk = NULL;
8523 qseecom.qsee.ce_core_src_clk = NULL;
8524 qseecom.qsee.ce_bus_clk = NULL;
8525
8526 qseecom.cumulative_mode = 0;
8527 qseecom.current_mode = INACTIVE;
8528 qseecom.support_bus_scaling = false;
8529 qseecom.support_fde = false;
8530 qseecom.support_pfe = false;
8531
8532 qseecom.ce_drv.ce_core_clk = NULL;
8533 qseecom.ce_drv.ce_clk = NULL;
8534 qseecom.ce_drv.ce_core_src_clk = NULL;
8535 qseecom.ce_drv.ce_bus_clk = NULL;
8536 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8537
8538 qseecom.app_block_ref_cnt = 0;
8539 init_waitqueue_head(&qseecom.app_block_wq);
8540 qseecom.whitelist_support = true;
8541
8542 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8543 if (rc < 0) {
8544 pr_err("alloc_chrdev_region failed %d\n", rc);
8545 return rc;
8546 }
8547
8548 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8549 if (IS_ERR(driver_class)) {
8550 rc = -ENOMEM;
8551 pr_err("class_create failed %d\n", rc);
8552 goto exit_unreg_chrdev_region;
8553 }
8554
8555 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8556 QSEECOM_DEV);
8557 if (IS_ERR(class_dev)) {
8558 pr_err("class_device_create failed %d\n", rc);
8559 rc = -ENOMEM;
8560 goto exit_destroy_class;
8561 }
8562
8563 cdev_init(&qseecom.cdev, &qseecom_fops);
8564 qseecom.cdev.owner = THIS_MODULE;
8565
8566 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8567 if (rc < 0) {
8568 pr_err("cdev_add failed %d\n", rc);
8569 goto exit_destroy_device;
8570 }
8571
8572 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8573 spin_lock_init(&qseecom.registered_listener_list_lock);
8574 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8575 spin_lock_init(&qseecom.registered_app_list_lock);
8576 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8577 spin_lock_init(&qseecom.registered_kclient_list_lock);
8578 init_waitqueue_head(&qseecom.send_resp_wq);
8579 qseecom.send_resp_flag = 0;
8580
8581 qseecom.qsee_version = QSEEE_VERSION_00;
8582 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8583 &resp, sizeof(resp));
8584 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8585 if (rc) {
8586 pr_err("Failed to get QSEE version info %d\n", rc);
8587 goto exit_del_cdev;
8588 }
8589 qseecom.qsee_version = resp.result;
8590 qseecom.qseos_version = QSEOS_VERSION_14;
8591 qseecom.commonlib_loaded = false;
8592 qseecom.commonlib64_loaded = false;
8593 qseecom.pdev = class_dev;
8594 /* Create ION msm client */
8595 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8596 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8597 pr_err("Ion client cannot be created\n");
8598 rc = -ENOMEM;
8599 goto exit_del_cdev;
8600 }
8601
8602 /* register client for bus scaling */
8603 if (pdev->dev.of_node) {
8604 qseecom.pdev->of_node = pdev->dev.of_node;
8605 qseecom.support_bus_scaling =
8606 of_property_read_bool((&pdev->dev)->of_node,
8607 "qcom,support-bus-scaling");
8608 rc = qseecom_retrieve_ce_data(pdev);
8609 if (rc)
8610 goto exit_destroy_ion_client;
8611 qseecom.appsbl_qseecom_support =
8612 of_property_read_bool((&pdev->dev)->of_node,
8613 "qcom,appsbl-qseecom-support");
8614 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8615 qseecom.appsbl_qseecom_support);
8616
8617 qseecom.commonlib64_loaded =
8618 of_property_read_bool((&pdev->dev)->of_node,
8619 "qcom,commonlib64-loaded-by-uefi");
8620 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8621 qseecom.commonlib64_loaded);
8622 qseecom.fde_key_size =
8623 of_property_read_bool((&pdev->dev)->of_node,
8624 "qcom,fde-key-size");
8625 qseecom.no_clock_support =
8626 of_property_read_bool((&pdev->dev)->of_node,
8627 "qcom,no-clock-support");
8628 if (!qseecom.no_clock_support) {
8629 pr_info("qseecom clocks handled by other subsystem\n");
8630 } else {
8631 pr_info("no-clock-support=0x%x",
8632 qseecom.no_clock_support);
8633 }
8634
8635 if (of_property_read_u32((&pdev->dev)->of_node,
8636 "qcom,qsee-reentrancy-support",
8637 &qseecom.qsee_reentrancy_support)) {
8638 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8639 qseecom.qsee_reentrancy_support = 0;
8640 } else {
8641 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8642 qseecom.qsee_reentrancy_support);
8643 }
8644
8645 /*
8646 * The qseecom bus scaling flag can not be enabled when
8647 * crypto clock is not handled by HLOS.
8648 */
8649 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8650 pr_err("support_bus_scaling flag can not be enabled.\n");
8651 rc = -EINVAL;
8652 goto exit_destroy_ion_client;
8653 }
8654
8655 if (of_property_read_u32((&pdev->dev)->of_node,
8656 "qcom,ce-opp-freq",
8657 &qseecom.ce_opp_freq_hz)) {
8658 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8659 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8660 }
8661 rc = __qseecom_init_clk(CLK_QSEE);
8662 if (rc)
8663 goto exit_destroy_ion_client;
8664
8665 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8666 (qseecom.support_pfe || qseecom.support_fde)) {
8667 rc = __qseecom_init_clk(CLK_CE_DRV);
8668 if (rc) {
8669 __qseecom_deinit_clk(CLK_QSEE);
8670 goto exit_destroy_ion_client;
8671 }
8672 } else {
8673 struct qseecom_clk *qclk;
8674
8675 qclk = &qseecom.qsee;
8676 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8677 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8678 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8679 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8680 }
8681
8682 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8683 msm_bus_cl_get_pdata(pdev);
8684 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8685 (!qseecom.is_apps_region_protected &&
8686 !qseecom.appsbl_qseecom_support)) {
8687 struct resource *resource = NULL;
8688 struct qsee_apps_region_info_ireq req;
8689 struct qsee_apps_region_info_64bit_ireq req_64bit;
8690 struct qseecom_command_scm_resp resp;
8691 void *cmd_buf = NULL;
8692 size_t cmd_len;
8693
8694 resource = platform_get_resource_byname(pdev,
8695 IORESOURCE_MEM, "secapp-region");
8696 if (resource) {
8697 if (qseecom.qsee_version < QSEE_VERSION_40) {
8698 req.qsee_cmd_id =
8699 QSEOS_APP_REGION_NOTIFICATION;
8700 req.addr = (uint32_t)resource->start;
8701 req.size = resource_size(resource);
8702 cmd_buf = (void *)&req;
8703 cmd_len = sizeof(struct
8704 qsee_apps_region_info_ireq);
8705 pr_warn("secure app region addr=0x%x size=0x%x",
8706 req.addr, req.size);
8707 } else {
8708 req_64bit.qsee_cmd_id =
8709 QSEOS_APP_REGION_NOTIFICATION;
8710 req_64bit.addr = resource->start;
8711 req_64bit.size = resource_size(
8712 resource);
8713 cmd_buf = (void *)&req_64bit;
8714 cmd_len = sizeof(struct
8715 qsee_apps_region_info_64bit_ireq);
8716 pr_warn("secure app region addr=0x%llx size=0x%x",
8717 req_64bit.addr, req_64bit.size);
8718 }
8719 } else {
8720 pr_err("Fail to get secure app region info\n");
8721 rc = -EINVAL;
8722 goto exit_deinit_clock;
8723 }
8724 rc = __qseecom_enable_clk(CLK_QSEE);
8725 if (rc) {
8726 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8727 rc = -EIO;
8728 goto exit_deinit_clock;
8729 }
8730 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8731 cmd_buf, cmd_len,
8732 &resp, sizeof(resp));
8733 __qseecom_disable_clk(CLK_QSEE);
8734 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8735 pr_err("send secapp reg fail %d resp.res %d\n",
8736 rc, resp.result);
8737 rc = -EINVAL;
8738 goto exit_deinit_clock;
8739 }
8740 }
8741 /*
8742 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8743 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8744 * Pls add "qseecom.commonlib64_loaded = true" here too.
8745 */
8746 if (qseecom.is_apps_region_protected ||
8747 qseecom.appsbl_qseecom_support)
8748 qseecom.commonlib_loaded = true;
8749 } else {
8750 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8751 pdev->dev.platform_data;
8752 }
8753 if (qseecom.support_bus_scaling) {
8754 init_timer(&(qseecom.bw_scale_down_timer));
8755 INIT_WORK(&qseecom.bw_inactive_req_ws,
8756 qseecom_bw_inactive_req_work);
8757 qseecom.bw_scale_down_timer.function =
8758 qseecom_scale_bus_bandwidth_timer_callback;
8759 }
8760 qseecom.timer_running = false;
8761 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8762 qseecom_platform_support);
8763
8764 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8765 pr_warn("qseecom.whitelist_support = %d\n",
8766 qseecom.whitelist_support);
8767
8768 if (!qseecom.qsee_perf_client)
8769 pr_err("Unable to register bus client\n");
8770
8771 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8772 return 0;
8773
8774exit_deinit_clock:
8775 __qseecom_deinit_clk(CLK_QSEE);
8776 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8777 (qseecom.support_pfe || qseecom.support_fde))
8778 __qseecom_deinit_clk(CLK_CE_DRV);
8779exit_destroy_ion_client:
8780 if (qseecom.ce_info.fde) {
8781 pce_info_use = qseecom.ce_info.fde;
8782 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8783 kzfree(pce_info_use->ce_pipe_entry);
8784 pce_info_use++;
8785 }
8786 kfree(qseecom.ce_info.fde);
8787 }
8788 if (qseecom.ce_info.pfe) {
8789 pce_info_use = qseecom.ce_info.pfe;
8790 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8791 kzfree(pce_info_use->ce_pipe_entry);
8792 pce_info_use++;
8793 }
8794 kfree(qseecom.ce_info.pfe);
8795 }
8796 ion_client_destroy(qseecom.ion_clnt);
8797exit_del_cdev:
8798 cdev_del(&qseecom.cdev);
8799exit_destroy_device:
8800 device_destroy(driver_class, qseecom_device_no);
8801exit_destroy_class:
8802 class_destroy(driver_class);
8803exit_unreg_chrdev_region:
8804 unregister_chrdev_region(qseecom_device_no, 1);
8805 return rc;
8806}
8807
8808static int qseecom_remove(struct platform_device *pdev)
8809{
8810 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308811 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008812 unsigned long flags = 0;
8813 int ret = 0;
8814 int i;
8815 struct qseecom_ce_pipe_entry *pce_entry;
8816 struct qseecom_ce_info_use *pce_info_use;
8817
8818 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8819 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8820
Monika Singhe711b162018-04-24 09:54:50 +05308821 list_for_each_entry_safe(kclient, kclient_tmp,
8822 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008823
8824 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008825 if (!kclient->handle) {
8826 list_del(&kclient->list);
8827 kzfree(kclient);
8828 break;
8829 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008830
8831 list_del(&kclient->list);
8832 mutex_lock(&app_access_lock);
8833 ret = qseecom_unload_app(kclient->handle->dev, false);
8834 mutex_unlock(&app_access_lock);
8835 if (!ret) {
8836 kzfree(kclient->handle->dev);
8837 kzfree(kclient->handle);
8838 kzfree(kclient);
8839 }
8840 }
8841
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008842 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8843
8844 if (qseecom.qseos_version > QSEEE_VERSION_00)
8845 qseecom_unload_commonlib_image();
8846
8847 if (qseecom.qsee_perf_client)
8848 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8849 0);
8850 if (pdev->dev.platform_data != NULL)
8851 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8852
8853 if (qseecom.support_bus_scaling) {
8854 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8855 del_timer_sync(&qseecom.bw_scale_down_timer);
8856 }
8857
8858 if (qseecom.ce_info.fde) {
8859 pce_info_use = qseecom.ce_info.fde;
8860 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8861 pce_entry = pce_info_use->ce_pipe_entry;
8862 kfree(pce_entry);
8863 pce_info_use++;
8864 }
8865 }
8866 kfree(qseecom.ce_info.fde);
8867 if (qseecom.ce_info.pfe) {
8868 pce_info_use = qseecom.ce_info.pfe;
8869 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8870 pce_entry = pce_info_use->ce_pipe_entry;
8871 kfree(pce_entry);
8872 pce_info_use++;
8873 }
8874 }
8875 kfree(qseecom.ce_info.pfe);
8876
8877 /* register client for bus scaling */
8878 if (pdev->dev.of_node) {
8879 __qseecom_deinit_clk(CLK_QSEE);
8880 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8881 (qseecom.support_pfe || qseecom.support_fde))
8882 __qseecom_deinit_clk(CLK_CE_DRV);
8883 }
8884
8885 ion_client_destroy(qseecom.ion_clnt);
8886
8887 cdev_del(&qseecom.cdev);
8888
8889 device_destroy(driver_class, qseecom_device_no);
8890
8891 class_destroy(driver_class);
8892
8893 unregister_chrdev_region(qseecom_device_no, 1);
8894
8895 return ret;
8896}
8897
8898static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8899{
8900 int ret = 0;
8901 struct qseecom_clk *qclk;
8902
8903 qclk = &qseecom.qsee;
8904 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8905 if (qseecom.no_clock_support)
8906 return 0;
8907
8908 mutex_lock(&qsee_bw_mutex);
8909 mutex_lock(&clk_access_lock);
8910
8911 if (qseecom.current_mode != INACTIVE) {
8912 ret = msm_bus_scale_client_update_request(
8913 qseecom.qsee_perf_client, INACTIVE);
8914 if (ret)
8915 pr_err("Fail to scale down bus\n");
8916 else
8917 qseecom.current_mode = INACTIVE;
8918 }
8919
8920 if (qclk->clk_access_cnt) {
8921 if (qclk->ce_clk != NULL)
8922 clk_disable_unprepare(qclk->ce_clk);
8923 if (qclk->ce_core_clk != NULL)
8924 clk_disable_unprepare(qclk->ce_core_clk);
8925 if (qclk->ce_bus_clk != NULL)
8926 clk_disable_unprepare(qclk->ce_bus_clk);
8927 }
8928
8929 del_timer_sync(&(qseecom.bw_scale_down_timer));
8930 qseecom.timer_running = false;
8931
8932 mutex_unlock(&clk_access_lock);
8933 mutex_unlock(&qsee_bw_mutex);
8934 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8935
8936 return 0;
8937}
8938
8939static int qseecom_resume(struct platform_device *pdev)
8940{
8941 int mode = 0;
8942 int ret = 0;
8943 struct qseecom_clk *qclk;
8944
8945 qclk = &qseecom.qsee;
8946 if (qseecom.no_clock_support)
8947 goto exit;
8948
8949 mutex_lock(&qsee_bw_mutex);
8950 mutex_lock(&clk_access_lock);
8951 if (qseecom.cumulative_mode >= HIGH)
8952 mode = HIGH;
8953 else
8954 mode = qseecom.cumulative_mode;
8955
8956 if (qseecom.cumulative_mode != INACTIVE) {
8957 ret = msm_bus_scale_client_update_request(
8958 qseecom.qsee_perf_client, mode);
8959 if (ret)
8960 pr_err("Fail to scale up bus to %d\n", mode);
8961 else
8962 qseecom.current_mode = mode;
8963 }
8964
8965 if (qclk->clk_access_cnt) {
8966 if (qclk->ce_core_clk != NULL) {
8967 ret = clk_prepare_enable(qclk->ce_core_clk);
8968 if (ret) {
8969 pr_err("Unable to enable/prep CE core clk\n");
8970 qclk->clk_access_cnt = 0;
8971 goto err;
8972 }
8973 }
8974 if (qclk->ce_clk != NULL) {
8975 ret = clk_prepare_enable(qclk->ce_clk);
8976 if (ret) {
8977 pr_err("Unable to enable/prep CE iface clk\n");
8978 qclk->clk_access_cnt = 0;
8979 goto ce_clk_err;
8980 }
8981 }
8982 if (qclk->ce_bus_clk != NULL) {
8983 ret = clk_prepare_enable(qclk->ce_bus_clk);
8984 if (ret) {
8985 pr_err("Unable to enable/prep CE bus clk\n");
8986 qclk->clk_access_cnt = 0;
8987 goto ce_bus_clk_err;
8988 }
8989 }
8990 }
8991
8992 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8993 qseecom.bw_scale_down_timer.expires = jiffies +
8994 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8995 mod_timer(&(qseecom.bw_scale_down_timer),
8996 qseecom.bw_scale_down_timer.expires);
8997 qseecom.timer_running = true;
8998 }
8999
9000 mutex_unlock(&clk_access_lock);
9001 mutex_unlock(&qsee_bw_mutex);
9002 goto exit;
9003
9004ce_bus_clk_err:
9005 if (qclk->ce_clk)
9006 clk_disable_unprepare(qclk->ce_clk);
9007ce_clk_err:
9008 if (qclk->ce_core_clk)
9009 clk_disable_unprepare(qclk->ce_core_clk);
9010err:
9011 mutex_unlock(&clk_access_lock);
9012 mutex_unlock(&qsee_bw_mutex);
9013 ret = -EIO;
9014exit:
9015 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9016 return ret;
9017}
9018
9019static const struct of_device_id qseecom_match[] = {
9020 {
9021 .compatible = "qcom,qseecom",
9022 },
9023 {}
9024};
9025
9026static struct platform_driver qseecom_plat_driver = {
9027 .probe = qseecom_probe,
9028 .remove = qseecom_remove,
9029 .suspend = qseecom_suspend,
9030 .resume = qseecom_resume,
9031 .driver = {
9032 .name = "qseecom",
9033 .owner = THIS_MODULE,
9034 .of_match_table = qseecom_match,
9035 },
9036};
9037
9038static int qseecom_init(void)
9039{
9040 return platform_driver_register(&qseecom_plat_driver);
9041}
9042
9043static void qseecom_exit(void)
9044{
9045 platform_driver_unregister(&qseecom_plat_driver);
9046}
9047
9048MODULE_LICENSE("GPL v2");
9049MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9050
9051module_init(qseecom_init);
9052module_exit(qseecom_exit);