blob: a8ffda2ac0a8502798544c08ed17a46a39c0c863 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
Zhen Kong26e62742018-05-04 17:19:06 -0700192 int abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700193};
194
195struct qseecom_registered_app_list {
196 struct list_head list;
197 u32 app_id;
198 u32 ref_cnt;
199 char app_name[MAX_APP_NAME_SIZE];
200 u32 app_arch;
201 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700202 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 u32 blocked_on_listener_id;
204};
205
206struct qseecom_registered_kclient_list {
207 struct list_head list;
208 struct qseecom_handle *handle;
209};
210
211struct qseecom_ce_info_use {
212 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
213 unsigned int unit_num;
214 unsigned int num_ce_pipe_entries;
215 struct qseecom_ce_pipe_entry *ce_pipe_entry;
216 bool alloc;
217 uint32_t type;
218};
219
220struct ce_hw_usage_info {
221 uint32_t qsee_ce_hw_instance;
222 uint32_t num_fde;
223 struct qseecom_ce_info_use *fde;
224 uint32_t num_pfe;
225 struct qseecom_ce_info_use *pfe;
226};
227
228struct qseecom_clk {
229 enum qseecom_ce_hw_instance instance;
230 struct clk *ce_core_clk;
231 struct clk *ce_clk;
232 struct clk *ce_core_src_clk;
233 struct clk *ce_bus_clk;
234 uint32_t clk_access_cnt;
235};
236
237struct qseecom_control {
238 struct ion_client *ion_clnt; /* Ion client */
239 struct list_head registered_listener_list_head;
240 spinlock_t registered_listener_list_lock;
241
242 struct list_head registered_app_list_head;
243 spinlock_t registered_app_list_lock;
244
245 struct list_head registered_kclient_list_head;
246 spinlock_t registered_kclient_list_lock;
247
248 wait_queue_head_t send_resp_wq;
249 int send_resp_flag;
250
251 uint32_t qseos_version;
252 uint32_t qsee_version;
253 struct device *pdev;
254 bool whitelist_support;
255 bool commonlib_loaded;
256 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700257 struct ce_hw_usage_info ce_info;
258
259 int qsee_bw_count;
260 int qsee_sfpb_bw_count;
261
262 uint32_t qsee_perf_client;
263 struct qseecom_clk qsee;
264 struct qseecom_clk ce_drv;
265
266 bool support_bus_scaling;
267 bool support_fde;
268 bool support_pfe;
269 bool fde_key_size;
270 uint32_t cumulative_mode;
271 enum qseecom_bandwidth_request_mode current_mode;
272 struct timer_list bw_scale_down_timer;
273 struct work_struct bw_inactive_req_ws;
274 struct cdev cdev;
275 bool timer_running;
276 bool no_clock_support;
277 unsigned int ce_opp_freq_hz;
278 bool appsbl_qseecom_support;
279 uint32_t qsee_reentrancy_support;
280
281 uint32_t app_block_ref_cnt;
282 wait_queue_head_t app_block_wq;
283 atomic_t qseecom_state;
284 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700285 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700286};
287
288struct qseecom_sec_buf_fd_info {
289 bool is_sec_buf_fd;
290 size_t size;
291 void *vbase;
292 dma_addr_t pbase;
293};
294
295struct qseecom_param_memref {
296 uint32_t buffer;
297 uint32_t size;
298};
299
300struct qseecom_client_handle {
301 u32 app_id;
302 u8 *sb_virt;
303 phys_addr_t sb_phys;
304 unsigned long user_virt_sb_base;
305 size_t sb_length;
306 struct ion_handle *ihandle; /* Retrieve phy addr */
307 char app_name[MAX_APP_NAME_SIZE];
308 u32 app_arch;
309 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
310};
311
312struct qseecom_listener_handle {
313 u32 id;
314};
315
316static struct qseecom_control qseecom;
317
318struct qseecom_dev_handle {
319 enum qseecom_client_handle_type type;
320 union {
321 struct qseecom_client_handle client;
322 struct qseecom_listener_handle listener;
323 };
324 bool released;
325 int abort;
326 wait_queue_head_t abort_wq;
327 atomic_t ioctl_count;
328 bool perf_enabled;
329 bool fast_load_enabled;
330 enum qseecom_bandwidth_request_mode mode;
331 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
332 uint32_t sglist_cnt;
333 bool use_legacy_cmd;
334};
335
336struct qseecom_key_id_usage_desc {
337 uint8_t desc[QSEECOM_KEY_ID_SIZE];
338};
339
340struct qseecom_crypto_info {
341 unsigned int unit_num;
342 unsigned int ce;
343 unsigned int pipe_pair;
344};
345
346static struct qseecom_key_id_usage_desc key_id_array[] = {
347 {
348 .desc = "Undefined Usage Index",
349 },
350
351 {
352 .desc = "Full Disk Encryption",
353 },
354
355 {
356 .desc = "Per File Encryption",
357 },
358
359 {
360 .desc = "UFS ICE Full Disk Encryption",
361 },
362
363 {
364 .desc = "SDCC ICE Full Disk Encryption",
365 },
366};
367
368/* Function proto types */
369static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
370static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
371static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
372static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
373static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
374static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
375 char *cmnlib_name);
376static int qseecom_enable_ice_setup(int usage);
377static int qseecom_disable_ice_setup(int usage);
378static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
379static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
384 void __user *argp);
385
386static int get_qseecom_keymaster_status(char *str)
387{
388 get_option(&str, &qseecom.is_apps_region_protected);
389 return 1;
390}
391__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
392
393static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
394 const void *req_buf, void *resp_buf)
395{
396 int ret = 0;
397 uint32_t smc_id = 0;
398 uint32_t qseos_cmd_id = 0;
399 struct scm_desc desc = {0};
400 struct qseecom_command_scm_resp *scm_resp = NULL;
401
402 if (!req_buf || !resp_buf) {
403 pr_err("Invalid buffer pointer\n");
404 return -EINVAL;
405 }
406 qseos_cmd_id = *(uint32_t *)req_buf;
407 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
408
409 switch (svc_id) {
410 case 6: {
411 if (tz_cmd_id == 3) {
412 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
413 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
414 desc.args[0] = *(uint32_t *)req_buf;
415 } else {
416 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
417 svc_id, tz_cmd_id);
418 return -EINVAL;
419 }
420 ret = scm_call2(smc_id, &desc);
421 break;
422 }
423 case SCM_SVC_ES: {
424 switch (tz_cmd_id) {
425 case SCM_SAVE_PARTITION_HASH_ID: {
426 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
427 struct qseecom_save_partition_hash_req *p_hash_req =
428 (struct qseecom_save_partition_hash_req *)
429 req_buf;
430 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
431
432 if (!tzbuf)
433 return -ENOMEM;
434 memset(tzbuf, 0, tzbuflen);
435 memcpy(tzbuf, p_hash_req->digest,
436 SHA256_DIGEST_LENGTH);
437 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
438 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
439 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
440 desc.args[0] = p_hash_req->partition_id;
441 desc.args[1] = virt_to_phys(tzbuf);
442 desc.args[2] = SHA256_DIGEST_LENGTH;
443 ret = scm_call2(smc_id, &desc);
444 kzfree(tzbuf);
445 break;
446 }
447 default: {
448 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
449 tz_cmd_id);
450 ret = -EINVAL;
451 break;
452 }
453 } /* end of switch (tz_cmd_id) */
454 break;
455 } /* end of case SCM_SVC_ES */
456 case SCM_SVC_TZSCHEDULER: {
457 switch (qseos_cmd_id) {
458 case QSEOS_APP_START_COMMAND: {
459 struct qseecom_load_app_ireq *req;
460 struct qseecom_load_app_64bit_ireq *req_64bit;
461
462 smc_id = TZ_OS_APP_START_ID;
463 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
464 if (qseecom.qsee_version < QSEE_VERSION_40) {
465 req = (struct qseecom_load_app_ireq *)req_buf;
466 desc.args[0] = req->mdt_len;
467 desc.args[1] = req->img_len;
468 desc.args[2] = req->phy_addr;
469 } else {
470 req_64bit =
471 (struct qseecom_load_app_64bit_ireq *)
472 req_buf;
473 desc.args[0] = req_64bit->mdt_len;
474 desc.args[1] = req_64bit->img_len;
475 desc.args[2] = req_64bit->phy_addr;
476 }
477 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
478 ret = scm_call2(smc_id, &desc);
479 break;
480 }
481 case QSEOS_APP_SHUTDOWN_COMMAND: {
482 struct qseecom_unload_app_ireq *req;
483
484 req = (struct qseecom_unload_app_ireq *)req_buf;
485 smc_id = TZ_OS_APP_SHUTDOWN_ID;
486 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
487 desc.args[0] = req->app_id;
488 ret = scm_call2(smc_id, &desc);
489 break;
490 }
491 case QSEOS_APP_LOOKUP_COMMAND: {
492 struct qseecom_check_app_ireq *req;
493 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
494 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
495
496 if (!tzbuf)
497 return -ENOMEM;
498 req = (struct qseecom_check_app_ireq *)req_buf;
499 pr_debug("Lookup app_name = %s\n", req->app_name);
500 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
501 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
502 smc_id = TZ_OS_APP_LOOKUP_ID;
503 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
504 desc.args[0] = virt_to_phys(tzbuf);
505 desc.args[1] = strlen(req->app_name);
506 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
507 ret = scm_call2(smc_id, &desc);
508 kzfree(tzbuf);
509 break;
510 }
511 case QSEOS_APP_REGION_NOTIFICATION: {
512 struct qsee_apps_region_info_ireq *req;
513 struct qsee_apps_region_info_64bit_ireq *req_64bit;
514
515 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
516 desc.arginfo =
517 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
518 if (qseecom.qsee_version < QSEE_VERSION_40) {
519 req = (struct qsee_apps_region_info_ireq *)
520 req_buf;
521 desc.args[0] = req->addr;
522 desc.args[1] = req->size;
523 } else {
524 req_64bit =
525 (struct qsee_apps_region_info_64bit_ireq *)
526 req_buf;
527 desc.args[0] = req_64bit->addr;
528 desc.args[1] = req_64bit->size;
529 }
530 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
531 ret = scm_call2(smc_id, &desc);
532 break;
533 }
534 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
535 struct qseecom_load_lib_image_ireq *req;
536 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
537
538 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
539 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
540 if (qseecom.qsee_version < QSEE_VERSION_40) {
541 req = (struct qseecom_load_lib_image_ireq *)
542 req_buf;
543 desc.args[0] = req->mdt_len;
544 desc.args[1] = req->img_len;
545 desc.args[2] = req->phy_addr;
546 } else {
547 req_64bit =
548 (struct qseecom_load_lib_image_64bit_ireq *)
549 req_buf;
550 desc.args[0] = req_64bit->mdt_len;
551 desc.args[1] = req_64bit->img_len;
552 desc.args[2] = req_64bit->phy_addr;
553 }
554 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
555 ret = scm_call2(smc_id, &desc);
556 break;
557 }
558 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
559 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
560 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
561 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
562 ret = scm_call2(smc_id, &desc);
563 break;
564 }
565 case QSEOS_REGISTER_LISTENER: {
566 struct qseecom_register_listener_ireq *req;
567 struct qseecom_register_listener_64bit_ireq *req_64bit;
568
569 desc.arginfo =
570 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
571 if (qseecom.qsee_version < QSEE_VERSION_40) {
572 req = (struct qseecom_register_listener_ireq *)
573 req_buf;
574 desc.args[0] = req->listener_id;
575 desc.args[1] = req->sb_ptr;
576 desc.args[2] = req->sb_len;
577 } else {
578 req_64bit =
579 (struct qseecom_register_listener_64bit_ireq *)
580 req_buf;
581 desc.args[0] = req_64bit->listener_id;
582 desc.args[1] = req_64bit->sb_ptr;
583 desc.args[2] = req_64bit->sb_len;
584 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700585 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700586 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
587 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
588 ret = scm_call2(smc_id, &desc);
589 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700590 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700591 smc_id = TZ_OS_REGISTER_LISTENER_ID;
592 __qseecom_reentrancy_check_if_no_app_blocked(
593 smc_id);
594 ret = scm_call2(smc_id, &desc);
595 }
596 break;
597 }
598 case QSEOS_DEREGISTER_LISTENER: {
599 struct qseecom_unregister_listener_ireq *req;
600
601 req = (struct qseecom_unregister_listener_ireq *)
602 req_buf;
603 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
604 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
605 desc.args[0] = req->listener_id;
606 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
607 ret = scm_call2(smc_id, &desc);
608 break;
609 }
610 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
611 struct qseecom_client_listener_data_irsp *req;
612
613 req = (struct qseecom_client_listener_data_irsp *)
614 req_buf;
615 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
616 desc.arginfo =
617 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
618 desc.args[0] = req->listener_id;
619 desc.args[1] = req->status;
620 ret = scm_call2(smc_id, &desc);
621 break;
622 }
623 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
624 struct qseecom_client_listener_data_irsp *req;
625 struct qseecom_client_listener_data_64bit_irsp *req_64;
626
627 smc_id =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
629 desc.arginfo =
630 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
631 if (qseecom.qsee_version < QSEE_VERSION_40) {
632 req =
633 (struct qseecom_client_listener_data_irsp *)
634 req_buf;
635 desc.args[0] = req->listener_id;
636 desc.args[1] = req->status;
637 desc.args[2] = req->sglistinfo_ptr;
638 desc.args[3] = req->sglistinfo_len;
639 } else {
640 req_64 =
641 (struct qseecom_client_listener_data_64bit_irsp *)
642 req_buf;
643 desc.args[0] = req_64->listener_id;
644 desc.args[1] = req_64->status;
645 desc.args[2] = req_64->sglistinfo_ptr;
646 desc.args[3] = req_64->sglistinfo_len;
647 }
648 ret = scm_call2(smc_id, &desc);
649 break;
650 }
651 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
652 struct qseecom_load_app_ireq *req;
653 struct qseecom_load_app_64bit_ireq *req_64bit;
654
655 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
656 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
657 if (qseecom.qsee_version < QSEE_VERSION_40) {
658 req = (struct qseecom_load_app_ireq *)req_buf;
659 desc.args[0] = req->mdt_len;
660 desc.args[1] = req->img_len;
661 desc.args[2] = req->phy_addr;
662 } else {
663 req_64bit =
664 (struct qseecom_load_app_64bit_ireq *)req_buf;
665 desc.args[0] = req_64bit->mdt_len;
666 desc.args[1] = req_64bit->img_len;
667 desc.args[2] = req_64bit->phy_addr;
668 }
669 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
670 ret = scm_call2(smc_id, &desc);
671 break;
672 }
673 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
674 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
675 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
676 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
677 ret = scm_call2(smc_id, &desc);
678 break;
679 }
680
681 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
682 struct qseecom_client_send_data_ireq *req;
683 struct qseecom_client_send_data_64bit_ireq *req_64bit;
684
685 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
686 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
687 if (qseecom.qsee_version < QSEE_VERSION_40) {
688 req = (struct qseecom_client_send_data_ireq *)
689 req_buf;
690 desc.args[0] = req->app_id;
691 desc.args[1] = req->req_ptr;
692 desc.args[2] = req->req_len;
693 desc.args[3] = req->rsp_ptr;
694 desc.args[4] = req->rsp_len;
695 } else {
696 req_64bit =
697 (struct qseecom_client_send_data_64bit_ireq *)
698 req_buf;
699 desc.args[0] = req_64bit->app_id;
700 desc.args[1] = req_64bit->req_ptr;
701 desc.args[2] = req_64bit->req_len;
702 desc.args[3] = req_64bit->rsp_ptr;
703 desc.args[4] = req_64bit->rsp_len;
704 }
705 ret = scm_call2(smc_id, &desc);
706 break;
707 }
708 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
709 struct qseecom_client_send_data_ireq *req;
710 struct qseecom_client_send_data_64bit_ireq *req_64bit;
711
712 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
713 desc.arginfo =
714 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
715 if (qseecom.qsee_version < QSEE_VERSION_40) {
716 req = (struct qseecom_client_send_data_ireq *)
717 req_buf;
718 desc.args[0] = req->app_id;
719 desc.args[1] = req->req_ptr;
720 desc.args[2] = req->req_len;
721 desc.args[3] = req->rsp_ptr;
722 desc.args[4] = req->rsp_len;
723 desc.args[5] = req->sglistinfo_ptr;
724 desc.args[6] = req->sglistinfo_len;
725 } else {
726 req_64bit =
727 (struct qseecom_client_send_data_64bit_ireq *)
728 req_buf;
729 desc.args[0] = req_64bit->app_id;
730 desc.args[1] = req_64bit->req_ptr;
731 desc.args[2] = req_64bit->req_len;
732 desc.args[3] = req_64bit->rsp_ptr;
733 desc.args[4] = req_64bit->rsp_len;
734 desc.args[5] = req_64bit->sglistinfo_ptr;
735 desc.args[6] = req_64bit->sglistinfo_len;
736 }
737 ret = scm_call2(smc_id, &desc);
738 break;
739 }
740 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
741 struct qseecom_client_send_service_ireq *req;
742
743 req = (struct qseecom_client_send_service_ireq *)
744 req_buf;
745 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
746 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
747 desc.args[0] = req->key_type;
748 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
749 ret = scm_call2(smc_id, &desc);
750 break;
751 }
752 case QSEOS_RPMB_ERASE_COMMAND: {
753 smc_id = TZ_OS_RPMB_ERASE_ID;
754 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
755 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
756 ret = scm_call2(smc_id, &desc);
757 break;
758 }
759 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
760 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
761 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
762 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
763 ret = scm_call2(smc_id, &desc);
764 break;
765 }
766 case QSEOS_GENERATE_KEY: {
767 u32 tzbuflen = PAGE_ALIGN(sizeof
768 (struct qseecom_key_generate_ireq) -
769 sizeof(uint32_t));
770 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
771
772 if (!tzbuf)
773 return -ENOMEM;
774 memset(tzbuf, 0, tzbuflen);
775 memcpy(tzbuf, req_buf + sizeof(uint32_t),
776 (sizeof(struct qseecom_key_generate_ireq) -
777 sizeof(uint32_t)));
778 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
779 smc_id = TZ_OS_KS_GEN_KEY_ID;
780 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
781 desc.args[0] = virt_to_phys(tzbuf);
782 desc.args[1] = tzbuflen;
783 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
784 ret = scm_call2(smc_id, &desc);
785 kzfree(tzbuf);
786 break;
787 }
788 case QSEOS_DELETE_KEY: {
789 u32 tzbuflen = PAGE_ALIGN(sizeof
790 (struct qseecom_key_delete_ireq) -
791 sizeof(uint32_t));
792 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
793
794 if (!tzbuf)
795 return -ENOMEM;
796 memset(tzbuf, 0, tzbuflen);
797 memcpy(tzbuf, req_buf + sizeof(uint32_t),
798 (sizeof(struct qseecom_key_delete_ireq) -
799 sizeof(uint32_t)));
800 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
801 smc_id = TZ_OS_KS_DEL_KEY_ID;
802 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
803 desc.args[0] = virt_to_phys(tzbuf);
804 desc.args[1] = tzbuflen;
805 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
806 ret = scm_call2(smc_id, &desc);
807 kzfree(tzbuf);
808 break;
809 }
810 case QSEOS_SET_KEY: {
811 u32 tzbuflen = PAGE_ALIGN(sizeof
812 (struct qseecom_key_select_ireq) -
813 sizeof(uint32_t));
814 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
815
816 if (!tzbuf)
817 return -ENOMEM;
818 memset(tzbuf, 0, tzbuflen);
819 memcpy(tzbuf, req_buf + sizeof(uint32_t),
820 (sizeof(struct qseecom_key_select_ireq) -
821 sizeof(uint32_t)));
822 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
823 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
824 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
825 desc.args[0] = virt_to_phys(tzbuf);
826 desc.args[1] = tzbuflen;
827 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
828 ret = scm_call2(smc_id, &desc);
829 kzfree(tzbuf);
830 break;
831 }
832 case QSEOS_UPDATE_KEY_USERINFO: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_userinfo_update_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
842 (struct qseecom_key_userinfo_update_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
846 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
850 ret = scm_call2(smc_id, &desc);
851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_TEE_OPEN_SESSION: {
855 struct qseecom_qteec_ireq *req;
856 struct qseecom_qteec_64bit_ireq *req_64bit;
857
858 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
859 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
860 if (qseecom.qsee_version < QSEE_VERSION_40) {
861 req = (struct qseecom_qteec_ireq *)req_buf;
862 desc.args[0] = req->app_id;
863 desc.args[1] = req->req_ptr;
864 desc.args[2] = req->req_len;
865 desc.args[3] = req->resp_ptr;
866 desc.args[4] = req->resp_len;
867 } else {
868 req_64bit = (struct qseecom_qteec_64bit_ireq *)
869 req_buf;
870 desc.args[0] = req_64bit->app_id;
871 desc.args[1] = req_64bit->req_ptr;
872 desc.args[2] = req_64bit->req_len;
873 desc.args[3] = req_64bit->resp_ptr;
874 desc.args[4] = req_64bit->resp_len;
875 }
876 ret = scm_call2(smc_id, &desc);
877 break;
878 }
879 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
880 struct qseecom_qteec_ireq *req;
881 struct qseecom_qteec_64bit_ireq *req_64bit;
882
883 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
884 desc.arginfo =
885 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
886 if (qseecom.qsee_version < QSEE_VERSION_40) {
887 req = (struct qseecom_qteec_ireq *)req_buf;
888 desc.args[0] = req->app_id;
889 desc.args[1] = req->req_ptr;
890 desc.args[2] = req->req_len;
891 desc.args[3] = req->resp_ptr;
892 desc.args[4] = req->resp_len;
893 desc.args[5] = req->sglistinfo_ptr;
894 desc.args[6] = req->sglistinfo_len;
895 } else {
896 req_64bit = (struct qseecom_qteec_64bit_ireq *)
897 req_buf;
898 desc.args[0] = req_64bit->app_id;
899 desc.args[1] = req_64bit->req_ptr;
900 desc.args[2] = req_64bit->req_len;
901 desc.args[3] = req_64bit->resp_ptr;
902 desc.args[4] = req_64bit->resp_len;
903 desc.args[5] = req_64bit->sglistinfo_ptr;
904 desc.args[6] = req_64bit->sglistinfo_len;
905 }
906 ret = scm_call2(smc_id, &desc);
907 break;
908 }
909 case QSEOS_TEE_INVOKE_COMMAND: {
910 struct qseecom_qteec_ireq *req;
911 struct qseecom_qteec_64bit_ireq *req_64bit;
912
913 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
914 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
915 if (qseecom.qsee_version < QSEE_VERSION_40) {
916 req = (struct qseecom_qteec_ireq *)req_buf;
917 desc.args[0] = req->app_id;
918 desc.args[1] = req->req_ptr;
919 desc.args[2] = req->req_len;
920 desc.args[3] = req->resp_ptr;
921 desc.args[4] = req->resp_len;
922 } else {
923 req_64bit = (struct qseecom_qteec_64bit_ireq *)
924 req_buf;
925 desc.args[0] = req_64bit->app_id;
926 desc.args[1] = req_64bit->req_ptr;
927 desc.args[2] = req_64bit->req_len;
928 desc.args[3] = req_64bit->resp_ptr;
929 desc.args[4] = req_64bit->resp_len;
930 }
931 ret = scm_call2(smc_id, &desc);
932 break;
933 }
934 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
935 struct qseecom_qteec_ireq *req;
936 struct qseecom_qteec_64bit_ireq *req_64bit;
937
938 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
939 desc.arginfo =
940 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
941 if (qseecom.qsee_version < QSEE_VERSION_40) {
942 req = (struct qseecom_qteec_ireq *)req_buf;
943 desc.args[0] = req->app_id;
944 desc.args[1] = req->req_ptr;
945 desc.args[2] = req->req_len;
946 desc.args[3] = req->resp_ptr;
947 desc.args[4] = req->resp_len;
948 desc.args[5] = req->sglistinfo_ptr;
949 desc.args[6] = req->sglistinfo_len;
950 } else {
951 req_64bit = (struct qseecom_qteec_64bit_ireq *)
952 req_buf;
953 desc.args[0] = req_64bit->app_id;
954 desc.args[1] = req_64bit->req_ptr;
955 desc.args[2] = req_64bit->req_len;
956 desc.args[3] = req_64bit->resp_ptr;
957 desc.args[4] = req_64bit->resp_len;
958 desc.args[5] = req_64bit->sglistinfo_ptr;
959 desc.args[6] = req_64bit->sglistinfo_len;
960 }
961 ret = scm_call2(smc_id, &desc);
962 break;
963 }
964 case QSEOS_TEE_CLOSE_SESSION: {
965 struct qseecom_qteec_ireq *req;
966 struct qseecom_qteec_64bit_ireq *req_64bit;
967
968 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
969 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
970 if (qseecom.qsee_version < QSEE_VERSION_40) {
971 req = (struct qseecom_qteec_ireq *)req_buf;
972 desc.args[0] = req->app_id;
973 desc.args[1] = req->req_ptr;
974 desc.args[2] = req->req_len;
975 desc.args[3] = req->resp_ptr;
976 desc.args[4] = req->resp_len;
977 } else {
978 req_64bit = (struct qseecom_qteec_64bit_ireq *)
979 req_buf;
980 desc.args[0] = req_64bit->app_id;
981 desc.args[1] = req_64bit->req_ptr;
982 desc.args[2] = req_64bit->req_len;
983 desc.args[3] = req_64bit->resp_ptr;
984 desc.args[4] = req_64bit->resp_len;
985 }
986 ret = scm_call2(smc_id, &desc);
987 break;
988 }
989 case QSEOS_TEE_REQUEST_CANCELLATION: {
990 struct qseecom_qteec_ireq *req;
991 struct qseecom_qteec_64bit_ireq *req_64bit;
992
993 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
994 desc.arginfo =
995 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
996 if (qseecom.qsee_version < QSEE_VERSION_40) {
997 req = (struct qseecom_qteec_ireq *)req_buf;
998 desc.args[0] = req->app_id;
999 desc.args[1] = req->req_ptr;
1000 desc.args[2] = req->req_len;
1001 desc.args[3] = req->resp_ptr;
1002 desc.args[4] = req->resp_len;
1003 } else {
1004 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1005 req_buf;
1006 desc.args[0] = req_64bit->app_id;
1007 desc.args[1] = req_64bit->req_ptr;
1008 desc.args[2] = req_64bit->req_len;
1009 desc.args[3] = req_64bit->resp_ptr;
1010 desc.args[4] = req_64bit->resp_len;
1011 }
1012 ret = scm_call2(smc_id, &desc);
1013 break;
1014 }
1015 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1016 struct qseecom_continue_blocked_request_ireq *req =
1017 (struct qseecom_continue_blocked_request_ireq *)
1018 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001019 if (qseecom.smcinvoke_support)
1020 smc_id =
1021 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1022 else
1023 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001024 desc.arginfo =
1025 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001026 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001027 ret = scm_call2(smc_id, &desc);
1028 break;
1029 }
1030 default: {
1031 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1032 qseos_cmd_id);
1033 ret = -EINVAL;
1034 break;
1035 }
1036 } /*end of switch (qsee_cmd_id) */
1037 break;
1038 } /*end of case SCM_SVC_TZSCHEDULER*/
1039 default: {
1040 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1041 svc_id);
1042 ret = -EINVAL;
1043 break;
1044 }
1045 } /*end of switch svc_id */
1046 scm_resp->result = desc.ret[0];
1047 scm_resp->resp_type = desc.ret[1];
1048 scm_resp->data = desc.ret[2];
1049 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1050 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1051 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1052 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1053 return ret;
1054}
1055
1056
1057static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1058 size_t cmd_len, void *resp_buf, size_t resp_len)
1059{
1060 if (!is_scm_armv8())
1061 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1062 resp_buf, resp_len);
1063 else
1064 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1065}
1066
1067static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1068 struct qseecom_register_listener_req *svc)
1069{
1070 struct qseecom_registered_listener_list *ptr;
1071 int unique = 1;
1072 unsigned long flags;
1073
1074 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1075 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1076 if (ptr->svc.listener_id == svc->listener_id) {
1077 pr_err("Service id: %u is already registered\n",
1078 ptr->svc.listener_id);
1079 unique = 0;
1080 break;
1081 }
1082 }
1083 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1084 return unique;
1085}
1086
1087static struct qseecom_registered_listener_list *__qseecom_find_svc(
1088 int32_t listener_id)
1089{
1090 struct qseecom_registered_listener_list *entry = NULL;
1091 unsigned long flags;
1092
1093 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1094 list_for_each_entry(entry,
1095 &qseecom.registered_listener_list_head, list) {
1096 if (entry->svc.listener_id == listener_id)
1097 break;
1098 }
1099 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1100
1101 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1102 pr_err("Service id: %u is not found\n", listener_id);
1103 return NULL;
1104 }
1105
1106 return entry;
1107}
1108
1109static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1110 struct qseecom_dev_handle *handle,
1111 struct qseecom_register_listener_req *listener)
1112{
1113 int ret = 0;
1114 struct qseecom_register_listener_ireq req;
1115 struct qseecom_register_listener_64bit_ireq req_64bit;
1116 struct qseecom_command_scm_resp resp;
1117 ion_phys_addr_t pa;
1118 void *cmd_buf = NULL;
1119 size_t cmd_len;
1120
1121 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001122 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 listener->ifd_data_fd);
1124 if (IS_ERR_OR_NULL(svc->ihandle)) {
1125 pr_err("Ion client could not retrieve the handle\n");
1126 return -ENOMEM;
1127 }
1128
1129 /* Get the physical address of the ION BUF */
1130 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1131 if (ret) {
1132 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1133 ret);
1134 return ret;
1135 }
1136 /* Populate the structure for sending scm call to load image */
1137 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1138 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1139 pr_err("ION memory mapping for listener shared buffer failed\n");
1140 return -ENOMEM;
1141 }
1142 svc->sb_phys = (phys_addr_t)pa;
1143
1144 if (qseecom.qsee_version < QSEE_VERSION_40) {
1145 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1146 req.listener_id = svc->svc.listener_id;
1147 req.sb_len = svc->sb_length;
1148 req.sb_ptr = (uint32_t)svc->sb_phys;
1149 cmd_buf = (void *)&req;
1150 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1151 } else {
1152 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1153 req_64bit.listener_id = svc->svc.listener_id;
1154 req_64bit.sb_len = svc->sb_length;
1155 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1156 cmd_buf = (void *)&req_64bit;
1157 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1158 }
1159
1160 resp.result = QSEOS_RESULT_INCOMPLETE;
1161
1162 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1163 &resp, sizeof(resp));
1164 if (ret) {
1165 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1166 return -EINVAL;
1167 }
1168
1169 if (resp.result != QSEOS_RESULT_SUCCESS) {
1170 pr_err("Error SB registration req: resp.result = %d\n",
1171 resp.result);
1172 return -EPERM;
1173 }
1174 return 0;
1175}
1176
1177static int qseecom_register_listener(struct qseecom_dev_handle *data,
1178 void __user *argp)
1179{
1180 int ret = 0;
1181 unsigned long flags;
1182 struct qseecom_register_listener_req rcvd_lstnr;
1183 struct qseecom_registered_listener_list *new_entry;
1184
1185 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1186 if (ret) {
1187 pr_err("copy_from_user failed\n");
1188 return ret;
1189 }
1190 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1191 rcvd_lstnr.sb_size))
1192 return -EFAULT;
1193
Zhen Kong3c674612018-09-06 22:51:27 -07001194 data->listener.id = rcvd_lstnr.listener_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001195 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001196 pr_err("Service %d is not unique and failed to register\n",
1197 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001198 data->released = true;
1199 return -EBUSY;
1200 }
1201
1202 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1203 if (!new_entry)
1204 return -ENOMEM;
1205 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1206 new_entry->rcv_req_flag = 0;
1207
1208 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1209 new_entry->sb_length = rcvd_lstnr.sb_size;
1210 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1211 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001212 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1213 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001214 kzfree(new_entry);
1215 return -ENOMEM;
1216 }
1217
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001218 init_waitqueue_head(&new_entry->rcv_req_wq);
1219 init_waitqueue_head(&new_entry->listener_block_app_wq);
1220 new_entry->send_resp_flag = 0;
1221 new_entry->listener_in_use = false;
1222 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1223 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1224 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1225
Zhen Kong3c674612018-09-06 22:51:27 -07001226 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001227 return ret;
1228}
1229
Zhen Kong26e62742018-05-04 17:19:06 -07001230static void __qseecom_listener_abort_all(int abort)
1231{
1232 struct qseecom_registered_listener_list *entry = NULL;
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1236 list_for_each_entry(entry,
1237 &qseecom.registered_listener_list_head, list) {
1238 pr_debug("set abort %d for listener %d\n",
1239 abort, entry->svc.listener_id);
1240 entry->abort = abort;
1241 }
1242 if (abort)
1243 wake_up_interruptible_all(&qseecom.send_resp_wq);
1244 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1245}
1246
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001247static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1248{
1249 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001250 struct qseecom_register_listener_ireq req;
1251 struct qseecom_registered_listener_list *ptr_svc = NULL;
1252 struct qseecom_command_scm_resp resp;
1253 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1254
Zhen Kong3c674612018-09-06 22:51:27 -07001255 ptr_svc = __qseecom_find_svc(data->listener.id);
1256 if (!ptr_svc) {
1257 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1258 return -ENODATA;
1259 }
1260
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1262 req.listener_id = data->listener.id;
1263 resp.result = QSEOS_RESULT_INCOMPLETE;
1264
1265 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1266 sizeof(req), &resp, sizeof(resp));
1267 if (ret) {
1268 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1269 ret, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001270 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272
1273 if (resp.result != QSEOS_RESULT_SUCCESS) {
1274 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1275 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001276 ret = -EPERM;
1277 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278 }
1279
1280 data->abort = 1;
Zhen Kong3c674612018-09-06 22:51:27 -07001281 ptr_svc->abort = 1;
1282 wake_up_all(&ptr_svc->rcv_req_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001283
1284 while (atomic_read(&data->ioctl_count) > 1) {
1285 if (wait_event_freezable(data->abort_wq,
1286 atomic_read(&data->ioctl_count) <= 1)) {
1287 pr_err("Interrupted from abort\n");
1288 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001289 }
1290 }
1291
Zhen Kong3c674612018-09-06 22:51:27 -07001292exit:
1293 if (ptr_svc->sb_virt) {
1294 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (!IS_ERR_OR_NULL(ihandle)) {
1296 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1297 ion_free(qseecom.ion_clnt, ihandle);
1298 }
1299 }
Zhen Kong3c674612018-09-06 22:51:27 -07001300 list_del(&ptr_svc->list);
1301 kzfree(ptr_svc);
1302
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001303 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001304 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001305 return ret;
1306}
1307
1308static int __qseecom_set_msm_bus_request(uint32_t mode)
1309{
1310 int ret = 0;
1311 struct qseecom_clk *qclk;
1312
1313 qclk = &qseecom.qsee;
1314 if (qclk->ce_core_src_clk != NULL) {
1315 if (mode == INACTIVE) {
1316 __qseecom_disable_clk(CLK_QSEE);
1317 } else {
1318 ret = __qseecom_enable_clk(CLK_QSEE);
1319 if (ret)
1320 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1321 ret, mode);
1322 }
1323 }
1324
1325 if ((!ret) && (qseecom.current_mode != mode)) {
1326 ret = msm_bus_scale_client_update_request(
1327 qseecom.qsee_perf_client, mode);
1328 if (ret) {
1329 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1330 ret, mode);
1331 if (qclk->ce_core_src_clk != NULL) {
1332 if (mode == INACTIVE) {
1333 ret = __qseecom_enable_clk(CLK_QSEE);
1334 if (ret)
1335 pr_err("CLK enable failed\n");
1336 } else
1337 __qseecom_disable_clk(CLK_QSEE);
1338 }
1339 }
1340 qseecom.current_mode = mode;
1341 }
1342 return ret;
1343}
1344
1345static void qseecom_bw_inactive_req_work(struct work_struct *work)
1346{
1347 mutex_lock(&app_access_lock);
1348 mutex_lock(&qsee_bw_mutex);
1349 if (qseecom.timer_running)
1350 __qseecom_set_msm_bus_request(INACTIVE);
1351 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1352 qseecom.current_mode, qseecom.cumulative_mode);
1353 qseecom.timer_running = false;
1354 mutex_unlock(&qsee_bw_mutex);
1355 mutex_unlock(&app_access_lock);
1356}
1357
1358static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1359{
1360 schedule_work(&qseecom.bw_inactive_req_ws);
1361}
1362
1363static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1364{
1365 struct qseecom_clk *qclk;
1366 int ret = 0;
1367
1368 mutex_lock(&clk_access_lock);
1369 if (ce == CLK_QSEE)
1370 qclk = &qseecom.qsee;
1371 else
1372 qclk = &qseecom.ce_drv;
1373
1374 if (qclk->clk_access_cnt > 2) {
1375 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1376 ret = -EINVAL;
1377 goto err_dec_ref_cnt;
1378 }
1379 if (qclk->clk_access_cnt == 2)
1380 qclk->clk_access_cnt--;
1381
1382err_dec_ref_cnt:
1383 mutex_unlock(&clk_access_lock);
1384 return ret;
1385}
1386
1387
1388static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1389{
1390 int32_t ret = 0;
1391 int32_t request_mode = INACTIVE;
1392
1393 mutex_lock(&qsee_bw_mutex);
1394 if (mode == 0) {
1395 if (qseecom.cumulative_mode > MEDIUM)
1396 request_mode = HIGH;
1397 else
1398 request_mode = qseecom.cumulative_mode;
1399 } else {
1400 request_mode = mode;
1401 }
1402
1403 ret = __qseecom_set_msm_bus_request(request_mode);
1404 if (ret) {
1405 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1406 ret, request_mode);
1407 goto err_scale_timer;
1408 }
1409
1410 if (qseecom.timer_running) {
1411 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1412 if (ret) {
1413 pr_err("Failed to decrease clk ref count.\n");
1414 goto err_scale_timer;
1415 }
1416 del_timer_sync(&(qseecom.bw_scale_down_timer));
1417 qseecom.timer_running = false;
1418 }
1419err_scale_timer:
1420 mutex_unlock(&qsee_bw_mutex);
1421 return ret;
1422}
1423
1424
1425static int qseecom_unregister_bus_bandwidth_needs(
1426 struct qseecom_dev_handle *data)
1427{
1428 int32_t ret = 0;
1429
1430 qseecom.cumulative_mode -= data->mode;
1431 data->mode = INACTIVE;
1432
1433 return ret;
1434}
1435
1436static int __qseecom_register_bus_bandwidth_needs(
1437 struct qseecom_dev_handle *data, uint32_t request_mode)
1438{
1439 int32_t ret = 0;
1440
1441 if (data->mode == INACTIVE) {
1442 qseecom.cumulative_mode += request_mode;
1443 data->mode = request_mode;
1444 } else {
1445 if (data->mode != request_mode) {
1446 qseecom.cumulative_mode -= data->mode;
1447 qseecom.cumulative_mode += request_mode;
1448 data->mode = request_mode;
1449 }
1450 }
1451 return ret;
1452}
1453
1454static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1455{
1456 int ret = 0;
1457
1458 ret = qsee_vote_for_clock(data, CLK_DFAB);
1459 if (ret) {
1460 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1461 goto perf_enable_exit;
1462 }
1463 ret = qsee_vote_for_clock(data, CLK_SFPB);
1464 if (ret) {
1465 qsee_disable_clock_vote(data, CLK_DFAB);
1466 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1467 goto perf_enable_exit;
1468 }
1469
1470perf_enable_exit:
1471 return ret;
1472}
1473
1474static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1475 void __user *argp)
1476{
1477 int32_t ret = 0;
1478 int32_t req_mode;
1479
1480 if (qseecom.no_clock_support)
1481 return 0;
1482
1483 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1484 if (ret) {
1485 pr_err("copy_from_user failed\n");
1486 return ret;
1487 }
1488 if (req_mode > HIGH) {
1489 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1490 return -EINVAL;
1491 }
1492
1493 /*
1494 * Register bus bandwidth needs if bus scaling feature is enabled;
1495 * otherwise, qseecom enable/disable clocks for the client directly.
1496 */
1497 if (qseecom.support_bus_scaling) {
1498 mutex_lock(&qsee_bw_mutex);
1499 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1500 mutex_unlock(&qsee_bw_mutex);
1501 } else {
1502 pr_debug("Bus scaling feature is NOT enabled\n");
1503 pr_debug("request bandwidth mode %d for the client\n",
1504 req_mode);
1505 if (req_mode != INACTIVE) {
1506 ret = qseecom_perf_enable(data);
1507 if (ret)
1508 pr_err("Failed to vote for clock with err %d\n",
1509 ret);
1510 } else {
1511 qsee_disable_clock_vote(data, CLK_DFAB);
1512 qsee_disable_clock_vote(data, CLK_SFPB);
1513 }
1514 }
1515 return ret;
1516}
1517
1518static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1519{
1520 if (qseecom.no_clock_support)
1521 return;
1522
1523 mutex_lock(&qsee_bw_mutex);
1524 qseecom.bw_scale_down_timer.expires = jiffies +
1525 msecs_to_jiffies(duration);
1526 mod_timer(&(qseecom.bw_scale_down_timer),
1527 qseecom.bw_scale_down_timer.expires);
1528 qseecom.timer_running = true;
1529 mutex_unlock(&qsee_bw_mutex);
1530}
1531
1532static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1533{
1534 if (!qseecom.support_bus_scaling)
1535 qsee_disable_clock_vote(data, CLK_SFPB);
1536 else
1537 __qseecom_add_bw_scale_down_timer(
1538 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1539}
1540
1541static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1542{
1543 int ret = 0;
1544
1545 if (qseecom.support_bus_scaling) {
1546 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1547 if (ret)
1548 pr_err("Failed to set bw MEDIUM.\n");
1549 } else {
1550 ret = qsee_vote_for_clock(data, CLK_SFPB);
1551 if (ret)
1552 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1553 }
1554 return ret;
1555}
1556
1557static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1558 void __user *argp)
1559{
1560 ion_phys_addr_t pa;
1561 int32_t ret;
1562 struct qseecom_set_sb_mem_param_req req;
1563 size_t len;
1564
1565 /* Copy the relevant information needed for loading the image */
1566 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1567 return -EFAULT;
1568
1569 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1570 (req.sb_len == 0)) {
1571 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1572 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1573 return -EFAULT;
1574 }
1575 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1576 req.sb_len))
1577 return -EFAULT;
1578
1579 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001580 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001581 req.ifd_data_fd);
1582 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1583 pr_err("Ion client could not retrieve the handle\n");
1584 return -ENOMEM;
1585 }
1586 /* Get the physical address of the ION BUF */
1587 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1588 if (ret) {
1589
1590 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1591 ret);
1592 return ret;
1593 }
1594
1595 if (len < req.sb_len) {
1596 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1597 req.sb_len, len);
1598 return -EINVAL;
1599 }
1600 /* Populate the structure for sending scm call to load image */
1601 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1602 data->client.ihandle);
1603 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1604 pr_err("ION memory mapping for client shared buf failed\n");
1605 return -ENOMEM;
1606 }
1607 data->client.sb_phys = (phys_addr_t)pa;
1608 data->client.sb_length = req.sb_len;
1609 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1610 return 0;
1611}
1612
Zhen Kong26e62742018-05-04 17:19:06 -07001613static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1614 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001615{
1616 int ret;
1617
1618 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001619 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001620}
1621
1622static int __qseecom_reentrancy_listener_has_sent_rsp(
1623 struct qseecom_dev_handle *data,
1624 struct qseecom_registered_listener_list *ptr_svc)
1625{
1626 int ret;
1627
1628 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001629 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001630}
1631
1632static void __qseecom_clean_listener_sglistinfo(
1633 struct qseecom_registered_listener_list *ptr_svc)
1634{
1635 if (ptr_svc->sglist_cnt) {
1636 memset(ptr_svc->sglistinfo_ptr, 0,
1637 SGLISTINFO_TABLE_SIZE);
1638 ptr_svc->sglist_cnt = 0;
1639 }
1640}
1641
1642static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1643 struct qseecom_command_scm_resp *resp)
1644{
1645 int ret = 0;
1646 int rc = 0;
1647 uint32_t lstnr;
1648 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001649 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1650 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1651 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001652 struct qseecom_registered_listener_list *ptr_svc = NULL;
1653 sigset_t new_sigset;
1654 sigset_t old_sigset;
1655 uint32_t status;
1656 void *cmd_buf = NULL;
1657 size_t cmd_len;
1658 struct sglist_info *table = NULL;
1659
1660 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1661 lstnr = resp->data;
1662 /*
1663 * Wake up blocking lsitener service with the lstnr id
1664 */
1665 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1666 flags);
1667 list_for_each_entry(ptr_svc,
1668 &qseecom.registered_listener_list_head, list) {
1669 if (ptr_svc->svc.listener_id == lstnr) {
1670 ptr_svc->listener_in_use = true;
1671 ptr_svc->rcv_req_flag = 1;
1672 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1673 break;
1674 }
1675 }
1676 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1677 flags);
1678
1679 if (ptr_svc == NULL) {
1680 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001681 rc = -EINVAL;
1682 status = QSEOS_RESULT_FAILURE;
1683 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001684 }
1685
1686 if (!ptr_svc->ihandle) {
1687 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001688 rc = -EINVAL;
1689 status = QSEOS_RESULT_FAILURE;
1690 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001691 }
1692
1693 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001694 pr_err("Service %d does not exist\n",
1695 lstnr);
1696 rc = -ERESTARTSYS;
1697 ptr_svc = NULL;
1698 status = QSEOS_RESULT_FAILURE;
1699 goto err_resp;
1700 }
1701
1702 if (ptr_svc->abort == 1) {
1703 pr_err("Service %d abort %d\n",
1704 lstnr, ptr_svc->abort);
1705 rc = -ENODEV;
1706 status = QSEOS_RESULT_FAILURE;
1707 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001708 }
1709 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1710
1711 /* initialize the new signal mask with all signals*/
1712 sigfillset(&new_sigset);
1713 /* block all signals */
1714 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1715
1716 do {
1717 /*
1718 * When reentrancy is not supported, check global
1719 * send_resp_flag; otherwise, check this listener's
1720 * send_resp_flag.
1721 */
1722 if (!qseecom.qsee_reentrancy_support &&
1723 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001724 __qseecom_listener_has_sent_rsp(
1725 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001726 break;
1727 }
1728
1729 if (qseecom.qsee_reentrancy_support &&
1730 !wait_event_freezable(qseecom.send_resp_wq,
1731 __qseecom_reentrancy_listener_has_sent_rsp(
1732 data, ptr_svc))) {
1733 break;
1734 }
1735 } while (1);
1736
1737 /* restore signal mask */
1738 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001739 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001740 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1741 data->client.app_id, lstnr, ret);
1742 rc = -ENODEV;
1743 status = QSEOS_RESULT_FAILURE;
1744 } else {
1745 status = QSEOS_RESULT_SUCCESS;
1746 }
Zhen Kong26e62742018-05-04 17:19:06 -07001747err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001748 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001749 if (ptr_svc) {
1750 ptr_svc->send_resp_flag = 0;
1751 table = ptr_svc->sglistinfo_ptr;
1752 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001753 if (qseecom.qsee_version < QSEE_VERSION_40) {
1754 send_data_rsp.listener_id = lstnr;
1755 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001756 if (table) {
1757 send_data_rsp.sglistinfo_ptr =
1758 (uint32_t)virt_to_phys(table);
1759 send_data_rsp.sglistinfo_len =
1760 SGLISTINFO_TABLE_SIZE;
1761 dmac_flush_range((void *)table,
1762 (void *)table + SGLISTINFO_TABLE_SIZE);
1763 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001764 cmd_buf = (void *)&send_data_rsp;
1765 cmd_len = sizeof(send_data_rsp);
1766 } else {
1767 send_data_rsp_64bit.listener_id = lstnr;
1768 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001769 if (table) {
1770 send_data_rsp_64bit.sglistinfo_ptr =
1771 virt_to_phys(table);
1772 send_data_rsp_64bit.sglistinfo_len =
1773 SGLISTINFO_TABLE_SIZE;
1774 dmac_flush_range((void *)table,
1775 (void *)table + SGLISTINFO_TABLE_SIZE);
1776 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001777 cmd_buf = (void *)&send_data_rsp_64bit;
1778 cmd_len = sizeof(send_data_rsp_64bit);
1779 }
Zhen Kong7d500032018-08-06 16:58:31 -07001780 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001781 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1782 else
1783 *(uint32_t *)cmd_buf =
1784 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1785 if (ptr_svc) {
1786 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1787 ptr_svc->ihandle,
1788 ptr_svc->sb_virt, ptr_svc->sb_length,
1789 ION_IOC_CLEAN_INV_CACHES);
1790 if (ret) {
1791 pr_err("cache operation failed %d\n", ret);
1792 return ret;
1793 }
1794 }
1795
1796 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1797 ret = __qseecom_enable_clk(CLK_QSEE);
1798 if (ret)
1799 return ret;
1800 }
1801
1802 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1803 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001804 if (ptr_svc) {
1805 ptr_svc->listener_in_use = false;
1806 __qseecom_clean_listener_sglistinfo(ptr_svc);
1807 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001808 if (ret) {
1809 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1810 ret, data->client.app_id);
1811 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1812 __qseecom_disable_clk(CLK_QSEE);
1813 return ret;
1814 }
Zhen Kong26e62742018-05-04 17:19:06 -07001815 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1816 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001817 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1818 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1819 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1820 resp->result, data->client.app_id, lstnr);
1821 ret = -EINVAL;
1822 }
1823 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1824 __qseecom_disable_clk(CLK_QSEE);
1825
1826 }
1827 if (rc)
1828 return rc;
1829
1830 return ret;
1831}
1832
Zhen Konga91aaf02018-02-02 17:21:04 -08001833static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001834 struct qseecom_command_scm_resp *resp,
1835 struct qseecom_registered_app_list *ptr_app,
1836 struct qseecom_dev_handle *data)
1837{
1838 struct qseecom_registered_listener_list *list_ptr;
1839 int ret = 0;
1840 struct qseecom_continue_blocked_request_ireq ireq;
1841 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001842 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001843 sigset_t new_sigset;
1844 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001845 unsigned long flags;
1846 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001847
1848 if (!resp || !data) {
1849 pr_err("invalid resp or data pointer\n");
1850 ret = -EINVAL;
1851 goto exit;
1852 }
1853
1854 /* find app_id & img_name from list */
1855 if (!ptr_app) {
1856 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1857 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1858 list) {
1859 if ((ptr_app->app_id == data->client.app_id) &&
1860 (!strcmp(ptr_app->app_name,
1861 data->client.app_name))) {
1862 found_app = true;
1863 break;
1864 }
1865 }
1866 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1867 flags);
1868 if (!found_app) {
1869 pr_err("app_id %d (%s) is not found\n",
1870 data->client.app_id,
1871 (char *)data->client.app_name);
1872 ret = -ENOENT;
1873 goto exit;
1874 }
1875 }
1876
Zhen Kongd8cc0052017-11-13 15:13:31 -08001877 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001878 session_id = resp->resp_type;
1879 list_ptr = __qseecom_find_svc(resp->data);
1880 if (!list_ptr) {
1881 pr_err("Invalid listener ID %d\n", resp->data);
1882 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001883 goto exit;
1884 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001885 ptr_app->blocked_on_listener_id = resp->data;
1886
1887 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1888 resp->data, list_ptr->listener_in_use,
1889 session_id, data->client.app_id);
1890
1891 /* sleep until listener is available */
1892 sigfillset(&new_sigset);
1893 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1894
1895 do {
1896 qseecom.app_block_ref_cnt++;
1897 ptr_app->app_blocked = true;
1898 mutex_unlock(&app_access_lock);
1899 wait_event_freezable(
1900 list_ptr->listener_block_app_wq,
1901 !list_ptr->listener_in_use);
1902 mutex_lock(&app_access_lock);
1903 ptr_app->app_blocked = false;
1904 qseecom.app_block_ref_cnt--;
1905 } while (list_ptr->listener_in_use);
1906
1907 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1908
1909 ptr_app->blocked_on_listener_id = 0;
1910 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1911 resp->data, session_id, data->client.app_id);
1912
1913 /* notify TZ that listener is available */
1914 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1915
1916 if (qseecom.smcinvoke_support)
1917 ireq.app_or_session_id = session_id;
1918 else
1919 ireq.app_or_session_id = data->client.app_id;
1920
1921 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1922 &ireq, sizeof(ireq),
1923 &continue_resp, sizeof(continue_resp));
1924 if (ret && qseecom.smcinvoke_support) {
1925 /* retry with legacy cmd */
1926 qseecom.smcinvoke_support = false;
1927 ireq.app_or_session_id = data->client.app_id;
1928 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1929 &ireq, sizeof(ireq),
1930 &continue_resp, sizeof(continue_resp));
1931 qseecom.smcinvoke_support = true;
1932 if (ret) {
1933 pr_err("unblock app %d or session %d fail\n",
1934 data->client.app_id, session_id);
1935 goto exit;
1936 }
1937 }
1938 resp->result = continue_resp.result;
1939 resp->resp_type = continue_resp.resp_type;
1940 resp->data = continue_resp.data;
1941 pr_debug("unblock resp = %d\n", resp->result);
1942 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1943
1944 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1945 pr_err("Unexpected unblock resp %d\n", resp->result);
1946 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001947 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001948exit:
1949 return ret;
1950}
1951
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001952static int __qseecom_reentrancy_process_incomplete_cmd(
1953 struct qseecom_dev_handle *data,
1954 struct qseecom_command_scm_resp *resp)
1955{
1956 int ret = 0;
1957 int rc = 0;
1958 uint32_t lstnr;
1959 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001960 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1961 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1962 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001963 struct qseecom_registered_listener_list *ptr_svc = NULL;
1964 sigset_t new_sigset;
1965 sigset_t old_sigset;
1966 uint32_t status;
1967 void *cmd_buf = NULL;
1968 size_t cmd_len;
1969 struct sglist_info *table = NULL;
1970
Zhen Kong26e62742018-05-04 17:19:06 -07001971 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001972 lstnr = resp->data;
1973 /*
1974 * Wake up blocking lsitener service with the lstnr id
1975 */
1976 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1977 flags);
1978 list_for_each_entry(ptr_svc,
1979 &qseecom.registered_listener_list_head, list) {
1980 if (ptr_svc->svc.listener_id == lstnr) {
1981 ptr_svc->listener_in_use = true;
1982 ptr_svc->rcv_req_flag = 1;
1983 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1984 break;
1985 }
1986 }
1987 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1988 flags);
1989
1990 if (ptr_svc == NULL) {
1991 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001992 rc = -EINVAL;
1993 status = QSEOS_RESULT_FAILURE;
1994 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001995 }
1996
1997 if (!ptr_svc->ihandle) {
1998 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001999 rc = -EINVAL;
2000 status = QSEOS_RESULT_FAILURE;
2001 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002002 }
2003
2004 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002005 pr_err("Service %d does not exist\n",
2006 lstnr);
2007 rc = -ERESTARTSYS;
2008 ptr_svc = NULL;
2009 status = QSEOS_RESULT_FAILURE;
2010 goto err_resp;
2011 }
2012
2013 if (ptr_svc->abort == 1) {
2014 pr_err("Service %d abort %d\n",
2015 lstnr, ptr_svc->abort);
2016 rc = -ENODEV;
2017 status = QSEOS_RESULT_FAILURE;
2018 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002019 }
2020 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2021
2022 /* initialize the new signal mask with all signals*/
2023 sigfillset(&new_sigset);
2024
2025 /* block all signals */
2026 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2027
2028 /* unlock mutex btw waking listener and sleep-wait */
2029 mutex_unlock(&app_access_lock);
2030 do {
2031 if (!wait_event_freezable(qseecom.send_resp_wq,
2032 __qseecom_reentrancy_listener_has_sent_rsp(
2033 data, ptr_svc))) {
2034 break;
2035 }
2036 } while (1);
2037 /* lock mutex again after resp sent */
2038 mutex_lock(&app_access_lock);
2039 ptr_svc->send_resp_flag = 0;
2040 qseecom.send_resp_flag = 0;
2041
2042 /* restore signal mask */
2043 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002044 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002045 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2046 data->client.app_id, lstnr, ret);
2047 rc = -ENODEV;
2048 status = QSEOS_RESULT_FAILURE;
2049 } else {
2050 status = QSEOS_RESULT_SUCCESS;
2051 }
Zhen Kong26e62742018-05-04 17:19:06 -07002052err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002053 if (ptr_svc)
2054 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002055 if (qseecom.qsee_version < QSEE_VERSION_40) {
2056 send_data_rsp.listener_id = lstnr;
2057 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002058 if (table) {
2059 send_data_rsp.sglistinfo_ptr =
2060 (uint32_t)virt_to_phys(table);
2061 send_data_rsp.sglistinfo_len =
2062 SGLISTINFO_TABLE_SIZE;
2063 dmac_flush_range((void *)table,
2064 (void *)table + SGLISTINFO_TABLE_SIZE);
2065 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002066 cmd_buf = (void *)&send_data_rsp;
2067 cmd_len = sizeof(send_data_rsp);
2068 } else {
2069 send_data_rsp_64bit.listener_id = lstnr;
2070 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002071 if (table) {
2072 send_data_rsp_64bit.sglistinfo_ptr =
2073 virt_to_phys(table);
2074 send_data_rsp_64bit.sglistinfo_len =
2075 SGLISTINFO_TABLE_SIZE;
2076 dmac_flush_range((void *)table,
2077 (void *)table + SGLISTINFO_TABLE_SIZE);
2078 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002079 cmd_buf = (void *)&send_data_rsp_64bit;
2080 cmd_len = sizeof(send_data_rsp_64bit);
2081 }
Zhen Kong7d500032018-08-06 16:58:31 -07002082 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002083 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2084 else
2085 *(uint32_t *)cmd_buf =
2086 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2087 if (ptr_svc) {
2088 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2089 ptr_svc->ihandle,
2090 ptr_svc->sb_virt, ptr_svc->sb_length,
2091 ION_IOC_CLEAN_INV_CACHES);
2092 if (ret) {
2093 pr_err("cache operation failed %d\n", ret);
2094 return ret;
2095 }
2096 }
2097 if (lstnr == RPMB_SERVICE) {
2098 ret = __qseecom_enable_clk(CLK_QSEE);
2099 if (ret)
2100 return ret;
2101 }
2102
2103 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2104 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002105 if (ptr_svc) {
2106 ptr_svc->listener_in_use = false;
2107 __qseecom_clean_listener_sglistinfo(ptr_svc);
2108 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2109 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002110
2111 if (ret) {
2112 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2113 ret, data->client.app_id);
2114 goto exit;
2115 }
2116
2117 switch (resp->result) {
2118 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2119 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2120 lstnr, data->client.app_id, resp->data);
2121 if (lstnr == resp->data) {
2122 pr_err("lstnr %d should not be blocked!\n",
2123 lstnr);
2124 ret = -EINVAL;
2125 goto exit;
2126 }
2127 ret = __qseecom_process_reentrancy_blocked_on_listener(
2128 resp, NULL, data);
2129 if (ret) {
2130 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2131 data->client.app_id,
2132 data->client.app_name, resp->data);
2133 goto exit;
2134 }
2135 case QSEOS_RESULT_SUCCESS:
2136 case QSEOS_RESULT_INCOMPLETE:
2137 break;
2138 default:
2139 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2140 resp->result, data->client.app_id, lstnr);
2141 ret = -EINVAL;
2142 goto exit;
2143 }
2144exit:
2145 if (lstnr == RPMB_SERVICE)
2146 __qseecom_disable_clk(CLK_QSEE);
2147
2148 }
2149 if (rc)
2150 return rc;
2151
2152 return ret;
2153}
2154
2155/*
2156 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2157 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2158 * So, needs to first check if no app blocked before sending OS level scm call,
2159 * then wait until all apps are unblocked.
2160 */
2161static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2162{
2163 sigset_t new_sigset, old_sigset;
2164
2165 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2166 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2167 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2168 /* thread sleep until this app unblocked */
2169 while (qseecom.app_block_ref_cnt > 0) {
2170 sigfillset(&new_sigset);
2171 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2172 mutex_unlock(&app_access_lock);
2173 do {
2174 if (!wait_event_freezable(qseecom.app_block_wq,
2175 (qseecom.app_block_ref_cnt == 0)))
2176 break;
2177 } while (1);
2178 mutex_lock(&app_access_lock);
2179 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2180 }
2181 }
2182}
2183
2184/*
2185 * scm_call of send data will fail if this TA is blocked or there are more
2186 * than one TA requesting listener services; So, first check to see if need
2187 * to wait.
2188 */
2189static void __qseecom_reentrancy_check_if_this_app_blocked(
2190 struct qseecom_registered_app_list *ptr_app)
2191{
2192 sigset_t new_sigset, old_sigset;
2193
2194 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002195 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002196 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2197 /* thread sleep until this app unblocked */
2198 sigfillset(&new_sigset);
2199 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2200 mutex_unlock(&app_access_lock);
2201 do {
2202 if (!wait_event_freezable(qseecom.app_block_wq,
2203 (!ptr_app->app_blocked &&
2204 qseecom.app_block_ref_cnt <= 1)))
2205 break;
2206 } while (1);
2207 mutex_lock(&app_access_lock);
2208 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2209 }
Zhen Kongdea10592018-07-30 17:50:10 -07002210 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002211 }
2212}
2213
2214static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2215 uint32_t *app_id)
2216{
2217 int32_t ret;
2218 struct qseecom_command_scm_resp resp;
2219 bool found_app = false;
2220 struct qseecom_registered_app_list *entry = NULL;
2221 unsigned long flags = 0;
2222
2223 if (!app_id) {
2224 pr_err("Null pointer to app_id\n");
2225 return -EINVAL;
2226 }
2227 *app_id = 0;
2228
2229 /* check if app exists and has been registered locally */
2230 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2231 list_for_each_entry(entry,
2232 &qseecom.registered_app_list_head, list) {
2233 if (!strcmp(entry->app_name, req.app_name)) {
2234 found_app = true;
2235 break;
2236 }
2237 }
2238 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2239 if (found_app) {
2240 pr_debug("Found app with id %d\n", entry->app_id);
2241 *app_id = entry->app_id;
2242 return 0;
2243 }
2244
2245 memset((void *)&resp, 0, sizeof(resp));
2246
2247 /* SCM_CALL to check if app_id for the mentioned app exists */
2248 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2249 sizeof(struct qseecom_check_app_ireq),
2250 &resp, sizeof(resp));
2251 if (ret) {
2252 pr_err("scm_call to check if app is already loaded failed\n");
2253 return -EINVAL;
2254 }
2255
2256 if (resp.result == QSEOS_RESULT_FAILURE)
2257 return 0;
2258
2259 switch (resp.resp_type) {
2260 /*qsee returned listener type response */
2261 case QSEOS_LISTENER_ID:
2262 pr_err("resp type is of listener type instead of app");
2263 return -EINVAL;
2264 case QSEOS_APP_ID:
2265 *app_id = resp.data;
2266 return 0;
2267 default:
2268 pr_err("invalid resp type (%d) from qsee",
2269 resp.resp_type);
2270 return -ENODEV;
2271 }
2272}
2273
2274static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2275{
2276 struct qseecom_registered_app_list *entry = NULL;
2277 unsigned long flags = 0;
2278 u32 app_id = 0;
2279 struct ion_handle *ihandle; /* Ion handle */
2280 struct qseecom_load_img_req load_img_req;
2281 int32_t ret = 0;
2282 ion_phys_addr_t pa = 0;
2283 size_t len;
2284 struct qseecom_command_scm_resp resp;
2285 struct qseecom_check_app_ireq req;
2286 struct qseecom_load_app_ireq load_req;
2287 struct qseecom_load_app_64bit_ireq load_req_64bit;
2288 void *cmd_buf = NULL;
2289 size_t cmd_len;
2290 bool first_time = false;
2291
2292 /* Copy the relevant information needed for loading the image */
2293 if (copy_from_user(&load_img_req,
2294 (void __user *)argp,
2295 sizeof(struct qseecom_load_img_req))) {
2296 pr_err("copy_from_user failed\n");
2297 return -EFAULT;
2298 }
2299
2300 /* Check and load cmnlib */
2301 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2302 if (!qseecom.commonlib_loaded &&
2303 load_img_req.app_arch == ELFCLASS32) {
2304 ret = qseecom_load_commonlib_image(data, "cmnlib");
2305 if (ret) {
2306 pr_err("failed to load cmnlib\n");
2307 return -EIO;
2308 }
2309 qseecom.commonlib_loaded = true;
2310 pr_debug("cmnlib is loaded\n");
2311 }
2312
2313 if (!qseecom.commonlib64_loaded &&
2314 load_img_req.app_arch == ELFCLASS64) {
2315 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2316 if (ret) {
2317 pr_err("failed to load cmnlib64\n");
2318 return -EIO;
2319 }
2320 qseecom.commonlib64_loaded = true;
2321 pr_debug("cmnlib64 is loaded\n");
2322 }
2323 }
2324
2325 if (qseecom.support_bus_scaling) {
2326 mutex_lock(&qsee_bw_mutex);
2327 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2328 mutex_unlock(&qsee_bw_mutex);
2329 if (ret)
2330 return ret;
2331 }
2332
2333 /* Vote for the SFPB clock */
2334 ret = __qseecom_enable_clk_scale_up(data);
2335 if (ret)
2336 goto enable_clk_err;
2337
2338 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2339 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2340 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2341
2342 ret = __qseecom_check_app_exists(req, &app_id);
2343 if (ret < 0)
2344 goto loadapp_err;
2345
2346 if (app_id) {
2347 pr_debug("App id %d (%s) already exists\n", app_id,
2348 (char *)(req.app_name));
2349 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2350 list_for_each_entry(entry,
2351 &qseecom.registered_app_list_head, list){
2352 if (entry->app_id == app_id) {
2353 entry->ref_cnt++;
2354 break;
2355 }
2356 }
2357 spin_unlock_irqrestore(
2358 &qseecom.registered_app_list_lock, flags);
2359 ret = 0;
2360 } else {
2361 first_time = true;
2362 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2363 (char *)(load_img_req.img_name));
2364 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002365 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002366 load_img_req.ifd_data_fd);
2367 if (IS_ERR_OR_NULL(ihandle)) {
2368 pr_err("Ion client could not retrieve the handle\n");
2369 ret = -ENOMEM;
2370 goto loadapp_err;
2371 }
2372
2373 /* Get the physical address of the ION BUF */
2374 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2375 if (ret) {
2376 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2377 ret);
2378 goto loadapp_err;
2379 }
2380 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2381 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2382 len, load_img_req.mdt_len,
2383 load_img_req.img_len);
2384 ret = -EINVAL;
2385 goto loadapp_err;
2386 }
2387 /* Populate the structure for sending scm call to load image */
2388 if (qseecom.qsee_version < QSEE_VERSION_40) {
2389 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2390 load_req.mdt_len = load_img_req.mdt_len;
2391 load_req.img_len = load_img_req.img_len;
2392 strlcpy(load_req.app_name, load_img_req.img_name,
2393 MAX_APP_NAME_SIZE);
2394 load_req.phy_addr = (uint32_t)pa;
2395 cmd_buf = (void *)&load_req;
2396 cmd_len = sizeof(struct qseecom_load_app_ireq);
2397 } else {
2398 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2399 load_req_64bit.mdt_len = load_img_req.mdt_len;
2400 load_req_64bit.img_len = load_img_req.img_len;
2401 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2402 MAX_APP_NAME_SIZE);
2403 load_req_64bit.phy_addr = (uint64_t)pa;
2404 cmd_buf = (void *)&load_req_64bit;
2405 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2406 }
2407
2408 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2409 ION_IOC_CLEAN_INV_CACHES);
2410 if (ret) {
2411 pr_err("cache operation failed %d\n", ret);
2412 goto loadapp_err;
2413 }
2414
2415 /* SCM_CALL to load the app and get the app_id back */
2416 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2417 cmd_len, &resp, sizeof(resp));
2418 if (ret) {
2419 pr_err("scm_call to load app failed\n");
2420 if (!IS_ERR_OR_NULL(ihandle))
2421 ion_free(qseecom.ion_clnt, ihandle);
2422 ret = -EINVAL;
2423 goto loadapp_err;
2424 }
2425
2426 if (resp.result == QSEOS_RESULT_FAILURE) {
2427 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2428 if (!IS_ERR_OR_NULL(ihandle))
2429 ion_free(qseecom.ion_clnt, ihandle);
2430 ret = -EFAULT;
2431 goto loadapp_err;
2432 }
2433
2434 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2435 ret = __qseecom_process_incomplete_cmd(data, &resp);
2436 if (ret) {
2437 pr_err("process_incomplete_cmd failed err: %d\n",
2438 ret);
2439 if (!IS_ERR_OR_NULL(ihandle))
2440 ion_free(qseecom.ion_clnt, ihandle);
2441 ret = -EFAULT;
2442 goto loadapp_err;
2443 }
2444 }
2445
2446 if (resp.result != QSEOS_RESULT_SUCCESS) {
2447 pr_err("scm_call failed resp.result unknown, %d\n",
2448 resp.result);
2449 if (!IS_ERR_OR_NULL(ihandle))
2450 ion_free(qseecom.ion_clnt, ihandle);
2451 ret = -EFAULT;
2452 goto loadapp_err;
2453 }
2454
2455 app_id = resp.data;
2456
2457 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2458 if (!entry) {
2459 ret = -ENOMEM;
2460 goto loadapp_err;
2461 }
2462 entry->app_id = app_id;
2463 entry->ref_cnt = 1;
2464 entry->app_arch = load_img_req.app_arch;
2465 /*
2466 * keymaster app may be first loaded as "keymaste" by qseecomd,
2467 * and then used as "keymaster" on some targets. To avoid app
2468 * name checking error, register "keymaster" into app_list and
2469 * thread private data.
2470 */
2471 if (!strcmp(load_img_req.img_name, "keymaste"))
2472 strlcpy(entry->app_name, "keymaster",
2473 MAX_APP_NAME_SIZE);
2474 else
2475 strlcpy(entry->app_name, load_img_req.img_name,
2476 MAX_APP_NAME_SIZE);
2477 entry->app_blocked = false;
2478 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002479 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002480
2481 /* Deallocate the handle */
2482 if (!IS_ERR_OR_NULL(ihandle))
2483 ion_free(qseecom.ion_clnt, ihandle);
2484
2485 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2486 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2487 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2488 flags);
2489
2490 pr_warn("App with id %u (%s) now loaded\n", app_id,
2491 (char *)(load_img_req.img_name));
2492 }
2493 data->client.app_id = app_id;
2494 data->client.app_arch = load_img_req.app_arch;
2495 if (!strcmp(load_img_req.img_name, "keymaste"))
2496 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2497 else
2498 strlcpy(data->client.app_name, load_img_req.img_name,
2499 MAX_APP_NAME_SIZE);
2500 load_img_req.app_id = app_id;
2501 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2502 pr_err("copy_to_user failed\n");
2503 ret = -EFAULT;
2504 if (first_time == true) {
2505 spin_lock_irqsave(
2506 &qseecom.registered_app_list_lock, flags);
2507 list_del(&entry->list);
2508 spin_unlock_irqrestore(
2509 &qseecom.registered_app_list_lock, flags);
2510 kzfree(entry);
2511 }
2512 }
2513
2514loadapp_err:
2515 __qseecom_disable_clk_scale_down(data);
2516enable_clk_err:
2517 if (qseecom.support_bus_scaling) {
2518 mutex_lock(&qsee_bw_mutex);
2519 qseecom_unregister_bus_bandwidth_needs(data);
2520 mutex_unlock(&qsee_bw_mutex);
2521 }
2522 return ret;
2523}
2524
2525static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2526{
2527 int ret = 1; /* Set unload app */
2528
2529 wake_up_all(&qseecom.send_resp_wq);
2530 if (qseecom.qsee_reentrancy_support)
2531 mutex_unlock(&app_access_lock);
2532 while (atomic_read(&data->ioctl_count) > 1) {
2533 if (wait_event_freezable(data->abort_wq,
2534 atomic_read(&data->ioctl_count) <= 1)) {
2535 pr_err("Interrupted from abort\n");
2536 ret = -ERESTARTSYS;
2537 break;
2538 }
2539 }
2540 if (qseecom.qsee_reentrancy_support)
2541 mutex_lock(&app_access_lock);
2542 return ret;
2543}
2544
2545static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2546{
2547 int ret = 0;
2548
2549 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2550 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2551 ion_free(qseecom.ion_clnt, data->client.ihandle);
2552 data->client.ihandle = NULL;
2553 }
2554 return ret;
2555}
2556
2557static int qseecom_unload_app(struct qseecom_dev_handle *data,
2558 bool app_crash)
2559{
2560 unsigned long flags;
2561 unsigned long flags1;
2562 int ret = 0;
2563 struct qseecom_command_scm_resp resp;
2564 struct qseecom_registered_app_list *ptr_app = NULL;
2565 bool unload = false;
2566 bool found_app = false;
2567 bool found_dead_app = false;
2568
2569 if (!data) {
2570 pr_err("Invalid/uninitialized device handle\n");
2571 return -EINVAL;
2572 }
2573
2574 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2575 pr_debug("Do not unload keymaster app from tz\n");
2576 goto unload_exit;
2577 }
2578
2579 __qseecom_cleanup_app(data);
2580 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2581
2582 if (data->client.app_id > 0) {
2583 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2584 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2585 list) {
2586 if (ptr_app->app_id == data->client.app_id) {
2587 if (!strcmp((void *)ptr_app->app_name,
2588 (void *)data->client.app_name)) {
2589 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002590 if (ptr_app->app_blocked ||
2591 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002592 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002593 if (app_crash || ptr_app->ref_cnt == 1)
2594 unload = true;
2595 break;
2596 }
2597 found_dead_app = true;
2598 break;
2599 }
2600 }
2601 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2602 flags);
2603 if (found_app == false && found_dead_app == false) {
2604 pr_err("Cannot find app with id = %d (%s)\n",
2605 data->client.app_id,
2606 (char *)data->client.app_name);
2607 ret = -EINVAL;
2608 goto unload_exit;
2609 }
2610 }
2611
2612 if (found_dead_app)
2613 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2614 (char *)data->client.app_name);
2615
2616 if (unload) {
2617 struct qseecom_unload_app_ireq req;
2618 /* Populate the structure for sending scm call to load image */
2619 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2620 req.app_id = data->client.app_id;
2621
2622 /* SCM_CALL to unload the app */
2623 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2624 sizeof(struct qseecom_unload_app_ireq),
2625 &resp, sizeof(resp));
2626 if (ret) {
2627 pr_err("scm_call to unload app (id = %d) failed\n",
2628 req.app_id);
2629 ret = -EFAULT;
2630 goto unload_exit;
2631 } else {
2632 pr_warn("App id %d now unloaded\n", req.app_id);
2633 }
2634 if (resp.result == QSEOS_RESULT_FAILURE) {
2635 pr_err("app (%d) unload_failed!!\n",
2636 data->client.app_id);
2637 ret = -EFAULT;
2638 goto unload_exit;
2639 }
2640 if (resp.result == QSEOS_RESULT_SUCCESS)
2641 pr_debug("App (%d) is unloaded!!\n",
2642 data->client.app_id);
2643 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2644 ret = __qseecom_process_incomplete_cmd(data, &resp);
2645 if (ret) {
2646 pr_err("process_incomplete_cmd fail err: %d\n",
2647 ret);
2648 goto unload_exit;
2649 }
2650 }
2651 }
2652
Zhen Kong7d500032018-08-06 16:58:31 -07002653unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002654 if (found_app) {
2655 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2656 if (app_crash) {
2657 ptr_app->ref_cnt = 0;
2658 pr_debug("app_crash: ref_count = 0\n");
2659 } else {
2660 if (ptr_app->ref_cnt == 1) {
2661 ptr_app->ref_cnt = 0;
2662 pr_debug("ref_count set to 0\n");
2663 } else {
2664 ptr_app->ref_cnt--;
2665 pr_debug("Can't unload app(%d) inuse\n",
2666 ptr_app->app_id);
2667 }
2668 }
2669 if (unload) {
2670 list_del(&ptr_app->list);
2671 kzfree(ptr_app);
2672 }
2673 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2674 flags1);
2675 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002676 qseecom_unmap_ion_allocated_memory(data);
2677 data->released = true;
2678 return ret;
2679}
2680
2681static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2682 unsigned long virt)
2683{
2684 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2685}
2686
2687static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2688 unsigned long virt)
2689{
2690 return (uintptr_t)data->client.sb_virt +
2691 (virt - data->client.user_virt_sb_base);
2692}
2693
2694int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2695 struct qseecom_send_svc_cmd_req *req_ptr,
2696 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2697{
2698 int ret = 0;
2699 void *req_buf = NULL;
2700
2701 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2702 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2703 req_ptr, send_svc_ireq_ptr);
2704 return -EINVAL;
2705 }
2706
2707 /* Clients need to ensure req_buf is at base offset of shared buffer */
2708 if ((uintptr_t)req_ptr->cmd_req_buf !=
2709 data_ptr->client.user_virt_sb_base) {
2710 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2711 return -EINVAL;
2712 }
2713
2714 if (data_ptr->client.sb_length <
2715 sizeof(struct qseecom_rpmb_provision_key)) {
2716 pr_err("shared buffer is too small to hold key type\n");
2717 return -EINVAL;
2718 }
2719 req_buf = data_ptr->client.sb_virt;
2720
2721 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2722 send_svc_ireq_ptr->key_type =
2723 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2724 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2725 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2726 data_ptr, (uintptr_t)req_ptr->resp_buf));
2727 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2728
2729 return ret;
2730}
2731
2732int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2733 struct qseecom_send_svc_cmd_req *req_ptr,
2734 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2735{
2736 int ret = 0;
2737 uint32_t reqd_len_sb_in = 0;
2738
2739 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2740 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2741 req_ptr, send_svc_ireq_ptr);
2742 return -EINVAL;
2743 }
2744
2745 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2746 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2747 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2748 pr_err("Required: %u, Available: %zu\n",
2749 reqd_len_sb_in, data_ptr->client.sb_length);
2750 return -ENOMEM;
2751 }
2752
2753 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2754 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2755 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2756 data_ptr, (uintptr_t)req_ptr->resp_buf));
2757 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2758
2759 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2760 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2761
2762
2763 return ret;
2764}
2765
2766static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2767 struct qseecom_send_svc_cmd_req *req)
2768{
2769 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2770 pr_err("req or cmd buffer or response buffer is null\n");
2771 return -EINVAL;
2772 }
2773
2774 if (!data || !data->client.ihandle) {
2775 pr_err("Client or client handle is not initialized\n");
2776 return -EINVAL;
2777 }
2778
2779 if (data->client.sb_virt == NULL) {
2780 pr_err("sb_virt null\n");
2781 return -EINVAL;
2782 }
2783
2784 if (data->client.user_virt_sb_base == 0) {
2785 pr_err("user_virt_sb_base is null\n");
2786 return -EINVAL;
2787 }
2788
2789 if (data->client.sb_length == 0) {
2790 pr_err("sb_length is 0\n");
2791 return -EINVAL;
2792 }
2793
2794 if (((uintptr_t)req->cmd_req_buf <
2795 data->client.user_virt_sb_base) ||
2796 ((uintptr_t)req->cmd_req_buf >=
2797 (data->client.user_virt_sb_base + data->client.sb_length))) {
2798 pr_err("cmd buffer address not within shared bufffer\n");
2799 return -EINVAL;
2800 }
2801 if (((uintptr_t)req->resp_buf <
2802 data->client.user_virt_sb_base) ||
2803 ((uintptr_t)req->resp_buf >=
2804 (data->client.user_virt_sb_base + data->client.sb_length))) {
2805 pr_err("response buffer address not within shared bufffer\n");
2806 return -EINVAL;
2807 }
2808 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2809 (req->cmd_req_len > data->client.sb_length) ||
2810 (req->resp_len > data->client.sb_length)) {
2811 pr_err("cmd buf length or response buf length not valid\n");
2812 return -EINVAL;
2813 }
2814 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2815 pr_err("Integer overflow detected in req_len & rsp_len\n");
2816 return -EINVAL;
2817 }
2818
2819 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2820 pr_debug("Not enough memory to fit cmd_buf.\n");
2821 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2822 (req->cmd_req_len + req->resp_len),
2823 data->client.sb_length);
2824 return -ENOMEM;
2825 }
2826 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2827 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2828 return -EINVAL;
2829 }
2830 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2831 pr_err("Integer overflow in resp_len & resp_buf\n");
2832 return -EINVAL;
2833 }
2834 if (data->client.user_virt_sb_base >
2835 (ULONG_MAX - data->client.sb_length)) {
2836 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2837 return -EINVAL;
2838 }
2839 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2840 ((uintptr_t)data->client.user_virt_sb_base +
2841 data->client.sb_length)) ||
2842 (((uintptr_t)req->resp_buf + req->resp_len) >
2843 ((uintptr_t)data->client.user_virt_sb_base +
2844 data->client.sb_length))) {
2845 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2846 return -EINVAL;
2847 }
2848 return 0;
2849}
2850
2851static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2852 void __user *argp)
2853{
2854 int ret = 0;
2855 struct qseecom_client_send_service_ireq send_svc_ireq;
2856 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2857 struct qseecom_command_scm_resp resp;
2858 struct qseecom_send_svc_cmd_req req;
2859 void *send_req_ptr;
2860 size_t req_buf_size;
2861
2862 /*struct qseecom_command_scm_resp resp;*/
2863
2864 if (copy_from_user(&req,
2865 (void __user *)argp,
2866 sizeof(req))) {
2867 pr_err("copy_from_user failed\n");
2868 return -EFAULT;
2869 }
2870
2871 if (__validate_send_service_cmd_inputs(data, &req))
2872 return -EINVAL;
2873
2874 data->type = QSEECOM_SECURE_SERVICE;
2875
2876 switch (req.cmd_id) {
2877 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2878 case QSEOS_RPMB_ERASE_COMMAND:
2879 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2880 send_req_ptr = &send_svc_ireq;
2881 req_buf_size = sizeof(send_svc_ireq);
2882 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2883 send_req_ptr))
2884 return -EINVAL;
2885 break;
2886 case QSEOS_FSM_LTEOTA_REQ_CMD:
2887 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2888 case QSEOS_FSM_IKE_REQ_CMD:
2889 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2890 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2891 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2892 case QSEOS_FSM_ENCFS_REQ_CMD:
2893 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2894 send_req_ptr = &send_fsm_key_svc_ireq;
2895 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2896 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2897 send_req_ptr))
2898 return -EINVAL;
2899 break;
2900 default:
2901 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2902 return -EINVAL;
2903 }
2904
2905 if (qseecom.support_bus_scaling) {
2906 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2907 if (ret) {
2908 pr_err("Fail to set bw HIGH\n");
2909 return ret;
2910 }
2911 } else {
2912 ret = qseecom_perf_enable(data);
2913 if (ret) {
2914 pr_err("Failed to vote for clocks with err %d\n", ret);
2915 goto exit;
2916 }
2917 }
2918
2919 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2920 data->client.sb_virt, data->client.sb_length,
2921 ION_IOC_CLEAN_INV_CACHES);
2922 if (ret) {
2923 pr_err("cache operation failed %d\n", ret);
2924 goto exit;
2925 }
2926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2927 (const void *)send_req_ptr,
2928 req_buf_size, &resp, sizeof(resp));
2929 if (ret) {
2930 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2931 if (!qseecom.support_bus_scaling) {
2932 qsee_disable_clock_vote(data, CLK_DFAB);
2933 qsee_disable_clock_vote(data, CLK_SFPB);
2934 } else {
2935 __qseecom_add_bw_scale_down_timer(
2936 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2937 }
2938 goto exit;
2939 }
2940 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2941 data->client.sb_virt, data->client.sb_length,
2942 ION_IOC_INV_CACHES);
2943 if (ret) {
2944 pr_err("cache operation failed %d\n", ret);
2945 goto exit;
2946 }
2947 switch (resp.result) {
2948 case QSEOS_RESULT_SUCCESS:
2949 break;
2950 case QSEOS_RESULT_INCOMPLETE:
2951 pr_debug("qseos_result_incomplete\n");
2952 ret = __qseecom_process_incomplete_cmd(data, &resp);
2953 if (ret) {
2954 pr_err("process_incomplete_cmd fail with result: %d\n",
2955 resp.result);
2956 }
2957 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2958 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302959 if (put_user(resp.result,
2960 (uint32_t __user *)req.resp_buf)) {
2961 ret = -EINVAL;
2962 goto exit;
2963 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002964 ret = 0;
2965 }
2966 break;
2967 case QSEOS_RESULT_FAILURE:
2968 pr_err("scm call failed with resp.result: %d\n", resp.result);
2969 ret = -EINVAL;
2970 break;
2971 default:
2972 pr_err("Response result %d not supported\n",
2973 resp.result);
2974 ret = -EINVAL;
2975 break;
2976 }
2977 if (!qseecom.support_bus_scaling) {
2978 qsee_disable_clock_vote(data, CLK_DFAB);
2979 qsee_disable_clock_vote(data, CLK_SFPB);
2980 } else {
2981 __qseecom_add_bw_scale_down_timer(
2982 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2983 }
2984
2985exit:
2986 return ret;
2987}
2988
2989static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
2990 struct qseecom_send_cmd_req *req)
2991
2992{
2993 if (!data || !data->client.ihandle) {
2994 pr_err("Client or client handle is not initialized\n");
2995 return -EINVAL;
2996 }
2997 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
2998 (req->cmd_req_buf == NULL)) {
2999 pr_err("cmd buffer or response buffer is null\n");
3000 return -EINVAL;
3001 }
3002 if (((uintptr_t)req->cmd_req_buf <
3003 data->client.user_virt_sb_base) ||
3004 ((uintptr_t)req->cmd_req_buf >=
3005 (data->client.user_virt_sb_base + data->client.sb_length))) {
3006 pr_err("cmd buffer address not within shared bufffer\n");
3007 return -EINVAL;
3008 }
3009 if (((uintptr_t)req->resp_buf <
3010 data->client.user_virt_sb_base) ||
3011 ((uintptr_t)req->resp_buf >=
3012 (data->client.user_virt_sb_base + data->client.sb_length))) {
3013 pr_err("response buffer address not within shared bufffer\n");
3014 return -EINVAL;
3015 }
3016 if ((req->cmd_req_len == 0) ||
3017 (req->cmd_req_len > data->client.sb_length) ||
3018 (req->resp_len > data->client.sb_length)) {
3019 pr_err("cmd buf length or response buf length not valid\n");
3020 return -EINVAL;
3021 }
3022 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3023 pr_err("Integer overflow detected in req_len & rsp_len\n");
3024 return -EINVAL;
3025 }
3026
3027 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3028 pr_debug("Not enough memory to fit cmd_buf.\n");
3029 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3030 (req->cmd_req_len + req->resp_len),
3031 data->client.sb_length);
3032 return -ENOMEM;
3033 }
3034 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3035 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3036 return -EINVAL;
3037 }
3038 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3039 pr_err("Integer overflow in resp_len & resp_buf\n");
3040 return -EINVAL;
3041 }
3042 if (data->client.user_virt_sb_base >
3043 (ULONG_MAX - data->client.sb_length)) {
3044 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3045 return -EINVAL;
3046 }
3047 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3048 ((uintptr_t)data->client.user_virt_sb_base +
3049 data->client.sb_length)) ||
3050 (((uintptr_t)req->resp_buf + req->resp_len) >
3051 ((uintptr_t)data->client.user_virt_sb_base +
3052 data->client.sb_length))) {
3053 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3054 return -EINVAL;
3055 }
3056 return 0;
3057}
3058
3059int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3060 struct qseecom_registered_app_list *ptr_app,
3061 struct qseecom_dev_handle *data)
3062{
3063 int ret = 0;
3064
3065 switch (resp->result) {
3066 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3067 pr_warn("App(%d) %s is blocked on listener %d\n",
3068 data->client.app_id, data->client.app_name,
3069 resp->data);
3070 ret = __qseecom_process_reentrancy_blocked_on_listener(
3071 resp, ptr_app, data);
3072 if (ret) {
3073 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3074 data->client.app_id, data->client.app_name, resp->data);
3075 return ret;
3076 }
3077
3078 case QSEOS_RESULT_INCOMPLETE:
3079 qseecom.app_block_ref_cnt++;
3080 ptr_app->app_blocked = true;
3081 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3082 ptr_app->app_blocked = false;
3083 qseecom.app_block_ref_cnt--;
3084 wake_up_interruptible(&qseecom.app_block_wq);
3085 if (ret)
3086 pr_err("process_incomplete_cmd failed err: %d\n",
3087 ret);
3088 return ret;
3089 case QSEOS_RESULT_SUCCESS:
3090 return ret;
3091 default:
3092 pr_err("Response result %d not supported\n",
3093 resp->result);
3094 return -EINVAL;
3095 }
3096}
3097
3098static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3099 struct qseecom_send_cmd_req *req)
3100{
3101 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003102 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003103 u32 reqd_len_sb_in = 0;
3104 struct qseecom_client_send_data_ireq send_data_req = {0};
3105 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3106 struct qseecom_command_scm_resp resp;
3107 unsigned long flags;
3108 struct qseecom_registered_app_list *ptr_app;
3109 bool found_app = false;
3110 void *cmd_buf = NULL;
3111 size_t cmd_len;
3112 struct sglist_info *table = data->sglistinfo_ptr;
3113
3114 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3115 /* find app_id & img_name from list */
3116 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3117 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3118 list) {
3119 if ((ptr_app->app_id == data->client.app_id) &&
3120 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3121 found_app = true;
3122 break;
3123 }
3124 }
3125 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3126
3127 if (!found_app) {
3128 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3129 (char *)data->client.app_name);
3130 return -ENOENT;
3131 }
3132
3133 if (qseecom.qsee_version < QSEE_VERSION_40) {
3134 send_data_req.app_id = data->client.app_id;
3135 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3136 data, (uintptr_t)req->cmd_req_buf));
3137 send_data_req.req_len = req->cmd_req_len;
3138 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3139 data, (uintptr_t)req->resp_buf));
3140 send_data_req.rsp_len = req->resp_len;
3141 send_data_req.sglistinfo_ptr =
3142 (uint32_t)virt_to_phys(table);
3143 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3144 dmac_flush_range((void *)table,
3145 (void *)table + SGLISTINFO_TABLE_SIZE);
3146 cmd_buf = (void *)&send_data_req;
3147 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3148 } else {
3149 send_data_req_64bit.app_id = data->client.app_id;
3150 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3151 (uintptr_t)req->cmd_req_buf);
3152 send_data_req_64bit.req_len = req->cmd_req_len;
3153 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3154 (uintptr_t)req->resp_buf);
3155 send_data_req_64bit.rsp_len = req->resp_len;
3156 /* check if 32bit app's phys_addr region is under 4GB.*/
3157 if ((data->client.app_arch == ELFCLASS32) &&
3158 ((send_data_req_64bit.req_ptr >=
3159 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3160 (send_data_req_64bit.rsp_ptr >=
3161 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3162 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3163 data->client.app_name,
3164 send_data_req_64bit.req_ptr,
3165 send_data_req_64bit.req_len,
3166 send_data_req_64bit.rsp_ptr,
3167 send_data_req_64bit.rsp_len);
3168 return -EFAULT;
3169 }
3170 send_data_req_64bit.sglistinfo_ptr =
3171 (uint64_t)virt_to_phys(table);
3172 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3173 dmac_flush_range((void *)table,
3174 (void *)table + SGLISTINFO_TABLE_SIZE);
3175 cmd_buf = (void *)&send_data_req_64bit;
3176 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3177 }
3178
3179 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3180 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3181 else
3182 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3183
3184 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3185 data->client.sb_virt,
3186 reqd_len_sb_in,
3187 ION_IOC_CLEAN_INV_CACHES);
3188 if (ret) {
3189 pr_err("cache operation failed %d\n", ret);
3190 return ret;
3191 }
3192
3193 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3194
3195 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3196 cmd_buf, cmd_len,
3197 &resp, sizeof(resp));
3198 if (ret) {
3199 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3200 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003201 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003202 }
3203
3204 if (qseecom.qsee_reentrancy_support) {
3205 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003206 if (ret)
3207 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003208 } else {
3209 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3210 ret = __qseecom_process_incomplete_cmd(data, &resp);
3211 if (ret) {
3212 pr_err("process_incomplete_cmd failed err: %d\n",
3213 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003214 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003215 }
3216 } else {
3217 if (resp.result != QSEOS_RESULT_SUCCESS) {
3218 pr_err("Response result %d not supported\n",
3219 resp.result);
3220 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003221 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003222 }
3223 }
3224 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003225exit:
3226 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003227 data->client.sb_virt, data->client.sb_length,
3228 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003229 if (ret2) {
3230 pr_err("cache operation failed %d\n", ret2);
3231 return ret2;
3232 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003233 return ret;
3234}
3235
3236static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3237{
3238 int ret = 0;
3239 struct qseecom_send_cmd_req req;
3240
3241 ret = copy_from_user(&req, argp, sizeof(req));
3242 if (ret) {
3243 pr_err("copy_from_user failed\n");
3244 return ret;
3245 }
3246
3247 if (__validate_send_cmd_inputs(data, &req))
3248 return -EINVAL;
3249
3250 ret = __qseecom_send_cmd(data, &req);
3251
3252 if (ret)
3253 return ret;
3254
3255 return ret;
3256}
3257
3258int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3259 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3260 struct qseecom_dev_handle *data, int i) {
3261
3262 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3263 (req->ifd_data[i].fd > 0)) {
3264 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3265 (req->ifd_data[i].cmd_buf_offset >
3266 req->cmd_req_len - sizeof(uint32_t))) {
3267 pr_err("Invalid offset (req len) 0x%x\n",
3268 req->ifd_data[i].cmd_buf_offset);
3269 return -EINVAL;
3270 }
3271 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3272 (lstnr_resp->ifd_data[i].fd > 0)) {
3273 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3274 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3275 lstnr_resp->resp_len - sizeof(uint32_t))) {
3276 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3277 lstnr_resp->ifd_data[i].cmd_buf_offset);
3278 return -EINVAL;
3279 }
3280 }
3281 return 0;
3282}
3283
3284static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3285 struct qseecom_dev_handle *data)
3286{
3287 struct ion_handle *ihandle;
3288 char *field;
3289 int ret = 0;
3290 int i = 0;
3291 uint32_t len = 0;
3292 struct scatterlist *sg;
3293 struct qseecom_send_modfd_cmd_req *req = NULL;
3294 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3295 struct qseecom_registered_listener_list *this_lstnr = NULL;
3296 uint32_t offset;
3297 struct sg_table *sg_ptr;
3298
3299 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3300 (data->type != QSEECOM_CLIENT_APP))
3301 return -EFAULT;
3302
3303 if (msg == NULL) {
3304 pr_err("Invalid address\n");
3305 return -EINVAL;
3306 }
3307 if (data->type == QSEECOM_LISTENER_SERVICE) {
3308 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3309 this_lstnr = __qseecom_find_svc(data->listener.id);
3310 if (IS_ERR_OR_NULL(this_lstnr)) {
3311 pr_err("Invalid listener ID\n");
3312 return -ENOMEM;
3313 }
3314 } else {
3315 req = (struct qseecom_send_modfd_cmd_req *)msg;
3316 }
3317
3318 for (i = 0; i < MAX_ION_FD; i++) {
3319 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3320 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003321 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003322 req->ifd_data[i].fd);
3323 if (IS_ERR_OR_NULL(ihandle)) {
3324 pr_err("Ion client can't retrieve the handle\n");
3325 return -ENOMEM;
3326 }
3327 field = (char *) req->cmd_req_buf +
3328 req->ifd_data[i].cmd_buf_offset;
3329 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3330 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003331 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003332 lstnr_resp->ifd_data[i].fd);
3333 if (IS_ERR_OR_NULL(ihandle)) {
3334 pr_err("Ion client can't retrieve the handle\n");
3335 return -ENOMEM;
3336 }
3337 field = lstnr_resp->resp_buf_ptr +
3338 lstnr_resp->ifd_data[i].cmd_buf_offset;
3339 } else {
3340 continue;
3341 }
3342 /* Populate the cmd data structure with the phys_addr */
3343 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3344 if (IS_ERR_OR_NULL(sg_ptr)) {
3345 pr_err("IOn client could not retrieve sg table\n");
3346 goto err;
3347 }
3348 if (sg_ptr->nents == 0) {
3349 pr_err("Num of scattered entries is 0\n");
3350 goto err;
3351 }
3352 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3353 pr_err("Num of scattered entries");
3354 pr_err(" (%d) is greater than max supported %d\n",
3355 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3356 goto err;
3357 }
3358 sg = sg_ptr->sgl;
3359 if (sg_ptr->nents == 1) {
3360 uint32_t *update;
3361
3362 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3363 goto err;
3364 if ((data->type == QSEECOM_CLIENT_APP &&
3365 (data->client.app_arch == ELFCLASS32 ||
3366 data->client.app_arch == ELFCLASS64)) ||
3367 (data->type == QSEECOM_LISTENER_SERVICE)) {
3368 /*
3369 * Check if sg list phy add region is under 4GB
3370 */
3371 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3372 (!cleanup) &&
3373 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3374 >= PHY_ADDR_4G - sg->length)) {
3375 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3376 data->client.app_name,
3377 &(sg_dma_address(sg_ptr->sgl)),
3378 sg->length);
3379 goto err;
3380 }
3381 update = (uint32_t *) field;
3382 *update = cleanup ? 0 :
3383 (uint32_t)sg_dma_address(sg_ptr->sgl);
3384 } else {
3385 pr_err("QSEE app arch %u is not supported\n",
3386 data->client.app_arch);
3387 goto err;
3388 }
3389 len += (uint32_t)sg->length;
3390 } else {
3391 struct qseecom_sg_entry *update;
3392 int j = 0;
3393
3394 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3395 (req->ifd_data[i].fd > 0)) {
3396
3397 if ((req->cmd_req_len <
3398 SG_ENTRY_SZ * sg_ptr->nents) ||
3399 (req->ifd_data[i].cmd_buf_offset >
3400 (req->cmd_req_len -
3401 SG_ENTRY_SZ * sg_ptr->nents))) {
3402 pr_err("Invalid offset = 0x%x\n",
3403 req->ifd_data[i].cmd_buf_offset);
3404 goto err;
3405 }
3406
3407 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3408 (lstnr_resp->ifd_data[i].fd > 0)) {
3409
3410 if ((lstnr_resp->resp_len <
3411 SG_ENTRY_SZ * sg_ptr->nents) ||
3412 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3413 (lstnr_resp->resp_len -
3414 SG_ENTRY_SZ * sg_ptr->nents))) {
3415 goto err;
3416 }
3417 }
3418 if ((data->type == QSEECOM_CLIENT_APP &&
3419 (data->client.app_arch == ELFCLASS32 ||
3420 data->client.app_arch == ELFCLASS64)) ||
3421 (data->type == QSEECOM_LISTENER_SERVICE)) {
3422 update = (struct qseecom_sg_entry *)field;
3423 for (j = 0; j < sg_ptr->nents; j++) {
3424 /*
3425 * Check if sg list PA is under 4GB
3426 */
3427 if ((qseecom.qsee_version >=
3428 QSEE_VERSION_40) &&
3429 (!cleanup) &&
3430 ((uint64_t)(sg_dma_address(sg))
3431 >= PHY_ADDR_4G - sg->length)) {
3432 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3433 data->client.app_name,
3434 &(sg_dma_address(sg)),
3435 sg->length);
3436 goto err;
3437 }
3438 update->phys_addr = cleanup ? 0 :
3439 (uint32_t)sg_dma_address(sg);
3440 update->len = cleanup ? 0 : sg->length;
3441 update++;
3442 len += sg->length;
3443 sg = sg_next(sg);
3444 }
3445 } else {
3446 pr_err("QSEE app arch %u is not supported\n",
3447 data->client.app_arch);
3448 goto err;
3449 }
3450 }
3451
3452 if (cleanup) {
3453 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3454 ihandle, NULL, len,
3455 ION_IOC_INV_CACHES);
3456 if (ret) {
3457 pr_err("cache operation failed %d\n", ret);
3458 goto err;
3459 }
3460 } else {
3461 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3462 ihandle, NULL, len,
3463 ION_IOC_CLEAN_INV_CACHES);
3464 if (ret) {
3465 pr_err("cache operation failed %d\n", ret);
3466 goto err;
3467 }
3468 if (data->type == QSEECOM_CLIENT_APP) {
3469 offset = req->ifd_data[i].cmd_buf_offset;
3470 data->sglistinfo_ptr[i].indexAndFlags =
3471 SGLISTINFO_SET_INDEX_FLAG(
3472 (sg_ptr->nents == 1), 0, offset);
3473 data->sglistinfo_ptr[i].sizeOrCount =
3474 (sg_ptr->nents == 1) ?
3475 sg->length : sg_ptr->nents;
3476 data->sglist_cnt = i + 1;
3477 } else {
3478 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3479 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3480 (uintptr_t)this_lstnr->sb_virt);
3481 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3482 SGLISTINFO_SET_INDEX_FLAG(
3483 (sg_ptr->nents == 1), 0, offset);
3484 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3485 (sg_ptr->nents == 1) ?
3486 sg->length : sg_ptr->nents;
3487 this_lstnr->sglist_cnt = i + 1;
3488 }
3489 }
3490 /* Deallocate the handle */
3491 if (!IS_ERR_OR_NULL(ihandle))
3492 ion_free(qseecom.ion_clnt, ihandle);
3493 }
3494 return ret;
3495err:
3496 if (!IS_ERR_OR_NULL(ihandle))
3497 ion_free(qseecom.ion_clnt, ihandle);
3498 return -ENOMEM;
3499}
3500
3501static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3502 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3503{
3504 struct scatterlist *sg = sg_ptr->sgl;
3505 struct qseecom_sg_entry_64bit *sg_entry;
3506 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3507 void *buf;
3508 uint i;
3509 size_t size;
3510 dma_addr_t coh_pmem;
3511
3512 if (fd_idx >= MAX_ION_FD) {
3513 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3514 return -ENOMEM;
3515 }
3516 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3517 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3518 /* Allocate a contiguous kernel buffer */
3519 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3520 size = (size + PAGE_SIZE) & PAGE_MASK;
3521 buf = dma_alloc_coherent(qseecom.pdev,
3522 size, &coh_pmem, GFP_KERNEL);
3523 if (buf == NULL) {
3524 pr_err("failed to alloc memory for sg buf\n");
3525 return -ENOMEM;
3526 }
3527 /* update qseecom_sg_list_buf_hdr_64bit */
3528 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3529 buf_hdr->new_buf_phys_addr = coh_pmem;
3530 buf_hdr->nents_total = sg_ptr->nents;
3531 /* save the left sg entries into new allocated buf */
3532 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3533 for (i = 0; i < sg_ptr->nents; i++) {
3534 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3535 sg_entry->len = sg->length;
3536 sg_entry++;
3537 sg = sg_next(sg);
3538 }
3539
3540 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3541 data->client.sec_buf_fd[fd_idx].vbase = buf;
3542 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3543 data->client.sec_buf_fd[fd_idx].size = size;
3544
3545 return 0;
3546}
3547
3548static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3549 struct qseecom_dev_handle *data)
3550{
3551 struct ion_handle *ihandle;
3552 char *field;
3553 int ret = 0;
3554 int i = 0;
3555 uint32_t len = 0;
3556 struct scatterlist *sg;
3557 struct qseecom_send_modfd_cmd_req *req = NULL;
3558 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3559 struct qseecom_registered_listener_list *this_lstnr = NULL;
3560 uint32_t offset;
3561 struct sg_table *sg_ptr;
3562
3563 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3564 (data->type != QSEECOM_CLIENT_APP))
3565 return -EFAULT;
3566
3567 if (msg == NULL) {
3568 pr_err("Invalid address\n");
3569 return -EINVAL;
3570 }
3571 if (data->type == QSEECOM_LISTENER_SERVICE) {
3572 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3573 this_lstnr = __qseecom_find_svc(data->listener.id);
3574 if (IS_ERR_OR_NULL(this_lstnr)) {
3575 pr_err("Invalid listener ID\n");
3576 return -ENOMEM;
3577 }
3578 } else {
3579 req = (struct qseecom_send_modfd_cmd_req *)msg;
3580 }
3581
3582 for (i = 0; i < MAX_ION_FD; i++) {
3583 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3584 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003585 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003586 req->ifd_data[i].fd);
3587 if (IS_ERR_OR_NULL(ihandle)) {
3588 pr_err("Ion client can't retrieve the handle\n");
3589 return -ENOMEM;
3590 }
3591 field = (char *) req->cmd_req_buf +
3592 req->ifd_data[i].cmd_buf_offset;
3593 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3594 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003595 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003596 lstnr_resp->ifd_data[i].fd);
3597 if (IS_ERR_OR_NULL(ihandle)) {
3598 pr_err("Ion client can't retrieve the handle\n");
3599 return -ENOMEM;
3600 }
3601 field = lstnr_resp->resp_buf_ptr +
3602 lstnr_resp->ifd_data[i].cmd_buf_offset;
3603 } else {
3604 continue;
3605 }
3606 /* Populate the cmd data structure with the phys_addr */
3607 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3608 if (IS_ERR_OR_NULL(sg_ptr)) {
3609 pr_err("IOn client could not retrieve sg table\n");
3610 goto err;
3611 }
3612 if (sg_ptr->nents == 0) {
3613 pr_err("Num of scattered entries is 0\n");
3614 goto err;
3615 }
3616 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3617 pr_warn("Num of scattered entries");
3618 pr_warn(" (%d) is greater than %d\n",
3619 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3620 if (cleanup) {
3621 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3622 data->client.sec_buf_fd[i].vbase)
3623 dma_free_coherent(qseecom.pdev,
3624 data->client.sec_buf_fd[i].size,
3625 data->client.sec_buf_fd[i].vbase,
3626 data->client.sec_buf_fd[i].pbase);
3627 } else {
3628 ret = __qseecom_allocate_sg_list_buffer(data,
3629 field, i, sg_ptr);
3630 if (ret) {
3631 pr_err("Failed to allocate sg list buffer\n");
3632 goto err;
3633 }
3634 }
3635 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3636 sg = sg_ptr->sgl;
3637 goto cleanup;
3638 }
3639 sg = sg_ptr->sgl;
3640 if (sg_ptr->nents == 1) {
3641 uint64_t *update_64bit;
3642
3643 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3644 goto err;
3645 /* 64bit app uses 64bit address */
3646 update_64bit = (uint64_t *) field;
3647 *update_64bit = cleanup ? 0 :
3648 (uint64_t)sg_dma_address(sg_ptr->sgl);
3649 len += (uint32_t)sg->length;
3650 } else {
3651 struct qseecom_sg_entry_64bit *update_64bit;
3652 int j = 0;
3653
3654 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3655 (req->ifd_data[i].fd > 0)) {
3656
3657 if ((req->cmd_req_len <
3658 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3659 (req->ifd_data[i].cmd_buf_offset >
3660 (req->cmd_req_len -
3661 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3662 pr_err("Invalid offset = 0x%x\n",
3663 req->ifd_data[i].cmd_buf_offset);
3664 goto err;
3665 }
3666
3667 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3668 (lstnr_resp->ifd_data[i].fd > 0)) {
3669
3670 if ((lstnr_resp->resp_len <
3671 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3672 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3673 (lstnr_resp->resp_len -
3674 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3675 goto err;
3676 }
3677 }
3678 /* 64bit app uses 64bit address */
3679 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3680 for (j = 0; j < sg_ptr->nents; j++) {
3681 update_64bit->phys_addr = cleanup ? 0 :
3682 (uint64_t)sg_dma_address(sg);
3683 update_64bit->len = cleanup ? 0 :
3684 (uint32_t)sg->length;
3685 update_64bit++;
3686 len += sg->length;
3687 sg = sg_next(sg);
3688 }
3689 }
3690cleanup:
3691 if (cleanup) {
3692 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3693 ihandle, NULL, len,
3694 ION_IOC_INV_CACHES);
3695 if (ret) {
3696 pr_err("cache operation failed %d\n", ret);
3697 goto err;
3698 }
3699 } else {
3700 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3701 ihandle, NULL, len,
3702 ION_IOC_CLEAN_INV_CACHES);
3703 if (ret) {
3704 pr_err("cache operation failed %d\n", ret);
3705 goto err;
3706 }
3707 if (data->type == QSEECOM_CLIENT_APP) {
3708 offset = req->ifd_data[i].cmd_buf_offset;
3709 data->sglistinfo_ptr[i].indexAndFlags =
3710 SGLISTINFO_SET_INDEX_FLAG(
3711 (sg_ptr->nents == 1), 1, offset);
3712 data->sglistinfo_ptr[i].sizeOrCount =
3713 (sg_ptr->nents == 1) ?
3714 sg->length : sg_ptr->nents;
3715 data->sglist_cnt = i + 1;
3716 } else {
3717 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3718 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3719 (uintptr_t)this_lstnr->sb_virt);
3720 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3721 SGLISTINFO_SET_INDEX_FLAG(
3722 (sg_ptr->nents == 1), 1, offset);
3723 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3724 (sg_ptr->nents == 1) ?
3725 sg->length : sg_ptr->nents;
3726 this_lstnr->sglist_cnt = i + 1;
3727 }
3728 }
3729 /* Deallocate the handle */
3730 if (!IS_ERR_OR_NULL(ihandle))
3731 ion_free(qseecom.ion_clnt, ihandle);
3732 }
3733 return ret;
3734err:
3735 for (i = 0; i < MAX_ION_FD; i++)
3736 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3737 data->client.sec_buf_fd[i].vbase)
3738 dma_free_coherent(qseecom.pdev,
3739 data->client.sec_buf_fd[i].size,
3740 data->client.sec_buf_fd[i].vbase,
3741 data->client.sec_buf_fd[i].pbase);
3742 if (!IS_ERR_OR_NULL(ihandle))
3743 ion_free(qseecom.ion_clnt, ihandle);
3744 return -ENOMEM;
3745}
3746
3747static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3748 void __user *argp,
3749 bool is_64bit_addr)
3750{
3751 int ret = 0;
3752 int i;
3753 struct qseecom_send_modfd_cmd_req req;
3754 struct qseecom_send_cmd_req send_cmd_req;
3755
3756 ret = copy_from_user(&req, argp, sizeof(req));
3757 if (ret) {
3758 pr_err("copy_from_user failed\n");
3759 return ret;
3760 }
3761
3762 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3763 send_cmd_req.cmd_req_len = req.cmd_req_len;
3764 send_cmd_req.resp_buf = req.resp_buf;
3765 send_cmd_req.resp_len = req.resp_len;
3766
3767 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3768 return -EINVAL;
3769
3770 /* validate offsets */
3771 for (i = 0; i < MAX_ION_FD; i++) {
3772 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3773 pr_err("Invalid offset %d = 0x%x\n",
3774 i, req.ifd_data[i].cmd_buf_offset);
3775 return -EINVAL;
3776 }
3777 }
3778 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3779 (uintptr_t)req.cmd_req_buf);
3780 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3781 (uintptr_t)req.resp_buf);
3782
3783 if (!is_64bit_addr) {
3784 ret = __qseecom_update_cmd_buf(&req, false, data);
3785 if (ret)
3786 return ret;
3787 ret = __qseecom_send_cmd(data, &send_cmd_req);
3788 if (ret)
3789 return ret;
3790 ret = __qseecom_update_cmd_buf(&req, true, data);
3791 if (ret)
3792 return ret;
3793 } else {
3794 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3795 if (ret)
3796 return ret;
3797 ret = __qseecom_send_cmd(data, &send_cmd_req);
3798 if (ret)
3799 return ret;
3800 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3801 if (ret)
3802 return ret;
3803 }
3804
3805 return ret;
3806}
3807
3808static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3809 void __user *argp)
3810{
3811 return __qseecom_send_modfd_cmd(data, argp, false);
3812}
3813
3814static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3815 void __user *argp)
3816{
3817 return __qseecom_send_modfd_cmd(data, argp, true);
3818}
3819
3820
3821
3822static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3823 struct qseecom_registered_listener_list *svc)
3824{
3825 int ret;
3826
3827 ret = (svc->rcv_req_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07003828 return ret || data->abort || svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003829}
3830
3831static int qseecom_receive_req(struct qseecom_dev_handle *data)
3832{
3833 int ret = 0;
3834 struct qseecom_registered_listener_list *this_lstnr;
3835
3836 this_lstnr = __qseecom_find_svc(data->listener.id);
3837 if (!this_lstnr) {
3838 pr_err("Invalid listener ID\n");
3839 return -ENODATA;
3840 }
3841
3842 while (1) {
3843 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3844 __qseecom_listener_has_rcvd_req(data,
3845 this_lstnr))) {
3846 pr_debug("Interrupted: exiting Listener Service = %d\n",
3847 (uint32_t)data->listener.id);
3848 /* woken up for different reason */
3849 return -ERESTARTSYS;
3850 }
3851
Zhen Kong26e62742018-05-04 17:19:06 -07003852 if (data->abort || this_lstnr->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003853 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003854 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003855 return -ENODEV;
3856 }
3857 this_lstnr->rcv_req_flag = 0;
3858 break;
3859 }
3860 return ret;
3861}
3862
3863static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3864{
3865 unsigned char app_arch = 0;
3866 struct elf32_hdr *ehdr;
3867 struct elf64_hdr *ehdr64;
3868
3869 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3870
3871 switch (app_arch) {
3872 case ELFCLASS32: {
3873 ehdr = (struct elf32_hdr *)fw_entry->data;
3874 if (fw_entry->size < sizeof(*ehdr)) {
3875 pr_err("%s: Not big enough to be an elf32 header\n",
3876 qseecom.pdev->init_name);
3877 return false;
3878 }
3879 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3880 pr_err("%s: Not an elf32 header\n",
3881 qseecom.pdev->init_name);
3882 return false;
3883 }
3884 if (ehdr->e_phnum == 0) {
3885 pr_err("%s: No loadable segments\n",
3886 qseecom.pdev->init_name);
3887 return false;
3888 }
3889 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3890 sizeof(struct elf32_hdr) > fw_entry->size) {
3891 pr_err("%s: Program headers not within mdt\n",
3892 qseecom.pdev->init_name);
3893 return false;
3894 }
3895 break;
3896 }
3897 case ELFCLASS64: {
3898 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3899 if (fw_entry->size < sizeof(*ehdr64)) {
3900 pr_err("%s: Not big enough to be an elf64 header\n",
3901 qseecom.pdev->init_name);
3902 return false;
3903 }
3904 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3905 pr_err("%s: Not an elf64 header\n",
3906 qseecom.pdev->init_name);
3907 return false;
3908 }
3909 if (ehdr64->e_phnum == 0) {
3910 pr_err("%s: No loadable segments\n",
3911 qseecom.pdev->init_name);
3912 return false;
3913 }
3914 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3915 sizeof(struct elf64_hdr) > fw_entry->size) {
3916 pr_err("%s: Program headers not within mdt\n",
3917 qseecom.pdev->init_name);
3918 return false;
3919 }
3920 break;
3921 }
3922 default: {
3923 pr_err("QSEE app arch %u is not supported\n", app_arch);
3924 return false;
3925 }
3926 }
3927 return true;
3928}
3929
3930static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3931 uint32_t *app_arch)
3932{
3933 int ret = -1;
3934 int i = 0, rc = 0;
3935 const struct firmware *fw_entry = NULL;
3936 char fw_name[MAX_APP_NAME_SIZE];
3937 struct elf32_hdr *ehdr;
3938 struct elf64_hdr *ehdr64;
3939 int num_images = 0;
3940
3941 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3942 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3943 if (rc) {
3944 pr_err("error with request_firmware\n");
3945 ret = -EIO;
3946 goto err;
3947 }
3948 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3949 ret = -EIO;
3950 goto err;
3951 }
3952 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3953 *fw_size = fw_entry->size;
3954 if (*app_arch == ELFCLASS32) {
3955 ehdr = (struct elf32_hdr *)fw_entry->data;
3956 num_images = ehdr->e_phnum;
3957 } else if (*app_arch == ELFCLASS64) {
3958 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3959 num_images = ehdr64->e_phnum;
3960 } else {
3961 pr_err("QSEE %s app, arch %u is not supported\n",
3962 appname, *app_arch);
3963 ret = -EIO;
3964 goto err;
3965 }
3966 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3967 release_firmware(fw_entry);
3968 fw_entry = NULL;
3969 for (i = 0; i < num_images; i++) {
3970 memset(fw_name, 0, sizeof(fw_name));
3971 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3972 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3973 if (ret)
3974 goto err;
3975 if (*fw_size > U32_MAX - fw_entry->size) {
3976 pr_err("QSEE %s app file size overflow\n", appname);
3977 ret = -EINVAL;
3978 goto err;
3979 }
3980 *fw_size += fw_entry->size;
3981 release_firmware(fw_entry);
3982 fw_entry = NULL;
3983 }
3984
3985 return ret;
3986err:
3987 if (fw_entry)
3988 release_firmware(fw_entry);
3989 *fw_size = 0;
3990 return ret;
3991}
3992
3993static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
3994 uint32_t fw_size,
3995 struct qseecom_load_app_ireq *load_req)
3996{
3997 int ret = -1;
3998 int i = 0, rc = 0;
3999 const struct firmware *fw_entry = NULL;
4000 char fw_name[MAX_APP_NAME_SIZE];
4001 u8 *img_data_ptr = img_data;
4002 struct elf32_hdr *ehdr;
4003 struct elf64_hdr *ehdr64;
4004 int num_images = 0;
4005 unsigned char app_arch = 0;
4006
4007 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4008 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4009 if (rc) {
4010 ret = -EIO;
4011 goto err;
4012 }
4013
4014 load_req->img_len = fw_entry->size;
4015 if (load_req->img_len > fw_size) {
4016 pr_err("app %s size %zu is larger than buf size %u\n",
4017 appname, fw_entry->size, fw_size);
4018 ret = -EINVAL;
4019 goto err;
4020 }
4021 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4022 img_data_ptr = img_data_ptr + fw_entry->size;
4023 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4024
4025 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4026 if (app_arch == ELFCLASS32) {
4027 ehdr = (struct elf32_hdr *)fw_entry->data;
4028 num_images = ehdr->e_phnum;
4029 } else if (app_arch == ELFCLASS64) {
4030 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4031 num_images = ehdr64->e_phnum;
4032 } else {
4033 pr_err("QSEE %s app, arch %u is not supported\n",
4034 appname, app_arch);
4035 ret = -EIO;
4036 goto err;
4037 }
4038 release_firmware(fw_entry);
4039 fw_entry = NULL;
4040 for (i = 0; i < num_images; i++) {
4041 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4042 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4043 if (ret) {
4044 pr_err("Failed to locate blob %s\n", fw_name);
4045 goto err;
4046 }
4047 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4048 (fw_entry->size + load_req->img_len > fw_size)) {
4049 pr_err("Invalid file size for %s\n", fw_name);
4050 ret = -EINVAL;
4051 goto err;
4052 }
4053 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4054 img_data_ptr = img_data_ptr + fw_entry->size;
4055 load_req->img_len += fw_entry->size;
4056 release_firmware(fw_entry);
4057 fw_entry = NULL;
4058 }
4059 return ret;
4060err:
4061 release_firmware(fw_entry);
4062 return ret;
4063}
4064
4065static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4066 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4067{
4068 size_t len = 0;
4069 int ret = 0;
4070 ion_phys_addr_t pa;
4071 struct ion_handle *ihandle = NULL;
4072 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004073 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004074
Zhen Kong3dd92792017-12-08 09:47:15 -08004075 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004076 if (retry++) {
4077 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004078 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004079 mutex_lock(&app_access_lock);
4080 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004081 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4082 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4083 } while (IS_ERR_OR_NULL(ihandle) &&
4084 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004085
4086 if (IS_ERR_OR_NULL(ihandle)) {
4087 pr_err("ION alloc failed\n");
4088 return -ENOMEM;
4089 }
4090 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4091 ihandle);
4092
4093 if (IS_ERR_OR_NULL(img_data)) {
4094 pr_err("ION memory mapping for image loading failed\n");
4095 ret = -ENOMEM;
4096 goto exit_ion_free;
4097 }
4098 /* Get the physical address of the ION BUF */
4099 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4100 if (ret) {
4101 pr_err("physical memory retrieval failure\n");
4102 ret = -EIO;
4103 goto exit_ion_unmap_kernel;
4104 }
4105
4106 *pihandle = ihandle;
4107 *data = img_data;
4108 *paddr = pa;
4109 return ret;
4110
4111exit_ion_unmap_kernel:
4112 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4113exit_ion_free:
4114 ion_free(qseecom.ion_clnt, ihandle);
4115 ihandle = NULL;
4116 return ret;
4117}
4118
4119static void __qseecom_free_img_data(struct ion_handle **ihandle)
4120{
4121 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4122 ion_free(qseecom.ion_clnt, *ihandle);
4123 *ihandle = NULL;
4124}
4125
4126static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4127 uint32_t *app_id)
4128{
4129 int ret = -1;
4130 uint32_t fw_size = 0;
4131 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4132 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4133 struct qseecom_command_scm_resp resp;
4134 u8 *img_data = NULL;
4135 ion_phys_addr_t pa = 0;
4136 struct ion_handle *ihandle = NULL;
4137 void *cmd_buf = NULL;
4138 size_t cmd_len;
4139 uint32_t app_arch = 0;
4140
4141 if (!data || !appname || !app_id) {
4142 pr_err("Null pointer to data or appname or appid\n");
4143 return -EINVAL;
4144 }
4145 *app_id = 0;
4146 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4147 return -EIO;
4148 data->client.app_arch = app_arch;
4149
4150 /* Check and load cmnlib */
4151 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4152 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4153 ret = qseecom_load_commonlib_image(data, "cmnlib");
4154 if (ret) {
4155 pr_err("failed to load cmnlib\n");
4156 return -EIO;
4157 }
4158 qseecom.commonlib_loaded = true;
4159 pr_debug("cmnlib is loaded\n");
4160 }
4161
4162 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4163 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4164 if (ret) {
4165 pr_err("failed to load cmnlib64\n");
4166 return -EIO;
4167 }
4168 qseecom.commonlib64_loaded = true;
4169 pr_debug("cmnlib64 is loaded\n");
4170 }
4171 }
4172
4173 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4174 if (ret)
4175 return ret;
4176
4177 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4178 if (ret) {
4179 ret = -EIO;
4180 goto exit_free_img_data;
4181 }
4182
4183 /* Populate the load_req parameters */
4184 if (qseecom.qsee_version < QSEE_VERSION_40) {
4185 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4186 load_req.mdt_len = load_req.mdt_len;
4187 load_req.img_len = load_req.img_len;
4188 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4189 load_req.phy_addr = (uint32_t)pa;
4190 cmd_buf = (void *)&load_req;
4191 cmd_len = sizeof(struct qseecom_load_app_ireq);
4192 } else {
4193 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4194 load_req_64bit.mdt_len = load_req.mdt_len;
4195 load_req_64bit.img_len = load_req.img_len;
4196 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4197 load_req_64bit.phy_addr = (uint64_t)pa;
4198 cmd_buf = (void *)&load_req_64bit;
4199 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4200 }
4201
4202 if (qseecom.support_bus_scaling) {
4203 mutex_lock(&qsee_bw_mutex);
4204 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4205 mutex_unlock(&qsee_bw_mutex);
4206 if (ret) {
4207 ret = -EIO;
4208 goto exit_free_img_data;
4209 }
4210 }
4211
4212 ret = __qseecom_enable_clk_scale_up(data);
4213 if (ret) {
4214 ret = -EIO;
4215 goto exit_unregister_bus_bw_need;
4216 }
4217
4218 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4219 img_data, fw_size,
4220 ION_IOC_CLEAN_INV_CACHES);
4221 if (ret) {
4222 pr_err("cache operation failed %d\n", ret);
4223 goto exit_disable_clk_vote;
4224 }
4225
4226 /* SCM_CALL to load the image */
4227 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4228 &resp, sizeof(resp));
4229 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004230 pr_err("scm_call to load failed : ret %d, result %x\n",
4231 ret, resp.result);
4232 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4233 ret = -EEXIST;
4234 else
4235 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004236 goto exit_disable_clk_vote;
4237 }
4238
4239 switch (resp.result) {
4240 case QSEOS_RESULT_SUCCESS:
4241 *app_id = resp.data;
4242 break;
4243 case QSEOS_RESULT_INCOMPLETE:
4244 ret = __qseecom_process_incomplete_cmd(data, &resp);
4245 if (ret)
4246 pr_err("process_incomplete_cmd FAILED\n");
4247 else
4248 *app_id = resp.data;
4249 break;
4250 case QSEOS_RESULT_FAILURE:
4251 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4252 break;
4253 default:
4254 pr_err("scm call return unknown response %d\n", resp.result);
4255 ret = -EINVAL;
4256 break;
4257 }
4258
4259exit_disable_clk_vote:
4260 __qseecom_disable_clk_scale_down(data);
4261
4262exit_unregister_bus_bw_need:
4263 if (qseecom.support_bus_scaling) {
4264 mutex_lock(&qsee_bw_mutex);
4265 qseecom_unregister_bus_bandwidth_needs(data);
4266 mutex_unlock(&qsee_bw_mutex);
4267 }
4268
4269exit_free_img_data:
4270 __qseecom_free_img_data(&ihandle);
4271 return ret;
4272}
4273
4274static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4275 char *cmnlib_name)
4276{
4277 int ret = 0;
4278 uint32_t fw_size = 0;
4279 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4280 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4281 struct qseecom_command_scm_resp resp;
4282 u8 *img_data = NULL;
4283 ion_phys_addr_t pa = 0;
4284 void *cmd_buf = NULL;
4285 size_t cmd_len;
4286 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004287 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004288
4289 if (!cmnlib_name) {
4290 pr_err("cmnlib_name is NULL\n");
4291 return -EINVAL;
4292 }
4293 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4294 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4295 cmnlib_name, strlen(cmnlib_name));
4296 return -EINVAL;
4297 }
4298
4299 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4300 return -EIO;
4301
Zhen Kong3bafb312017-10-18 10:27:20 -07004302 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004303 &img_data, fw_size, &pa);
4304 if (ret)
4305 return -EIO;
4306
4307 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4308 if (ret) {
4309 ret = -EIO;
4310 goto exit_free_img_data;
4311 }
4312 if (qseecom.qsee_version < QSEE_VERSION_40) {
4313 load_req.phy_addr = (uint32_t)pa;
4314 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4315 cmd_buf = (void *)&load_req;
4316 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4317 } else {
4318 load_req_64bit.phy_addr = (uint64_t)pa;
4319 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4320 load_req_64bit.img_len = load_req.img_len;
4321 load_req_64bit.mdt_len = load_req.mdt_len;
4322 cmd_buf = (void *)&load_req_64bit;
4323 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4324 }
4325
4326 if (qseecom.support_bus_scaling) {
4327 mutex_lock(&qsee_bw_mutex);
4328 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4329 mutex_unlock(&qsee_bw_mutex);
4330 if (ret) {
4331 ret = -EIO;
4332 goto exit_free_img_data;
4333 }
4334 }
4335
4336 /* Vote for the SFPB clock */
4337 ret = __qseecom_enable_clk_scale_up(data);
4338 if (ret) {
4339 ret = -EIO;
4340 goto exit_unregister_bus_bw_need;
4341 }
4342
Zhen Kong3bafb312017-10-18 10:27:20 -07004343 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004344 img_data, fw_size,
4345 ION_IOC_CLEAN_INV_CACHES);
4346 if (ret) {
4347 pr_err("cache operation failed %d\n", ret);
4348 goto exit_disable_clk_vote;
4349 }
4350
4351 /* SCM_CALL to load the image */
4352 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4353 &resp, sizeof(resp));
4354 if (ret) {
4355 pr_err("scm_call to load failed : ret %d\n", ret);
4356 ret = -EIO;
4357 goto exit_disable_clk_vote;
4358 }
4359
4360 switch (resp.result) {
4361 case QSEOS_RESULT_SUCCESS:
4362 break;
4363 case QSEOS_RESULT_FAILURE:
4364 pr_err("scm call failed w/response result%d\n", resp.result);
4365 ret = -EINVAL;
4366 goto exit_disable_clk_vote;
4367 case QSEOS_RESULT_INCOMPLETE:
4368 ret = __qseecom_process_incomplete_cmd(data, &resp);
4369 if (ret) {
4370 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4371 goto exit_disable_clk_vote;
4372 }
4373 break;
4374 default:
4375 pr_err("scm call return unknown response %d\n", resp.result);
4376 ret = -EINVAL;
4377 goto exit_disable_clk_vote;
4378 }
4379
4380exit_disable_clk_vote:
4381 __qseecom_disable_clk_scale_down(data);
4382
4383exit_unregister_bus_bw_need:
4384 if (qseecom.support_bus_scaling) {
4385 mutex_lock(&qsee_bw_mutex);
4386 qseecom_unregister_bus_bandwidth_needs(data);
4387 mutex_unlock(&qsee_bw_mutex);
4388 }
4389
4390exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004391 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004392 return ret;
4393}
4394
4395static int qseecom_unload_commonlib_image(void)
4396{
4397 int ret = -EINVAL;
4398 struct qseecom_unload_lib_image_ireq unload_req = {0};
4399 struct qseecom_command_scm_resp resp;
4400
4401 /* Populate the remaining parameters */
4402 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4403
4404 /* SCM_CALL to load the image */
4405 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4406 sizeof(struct qseecom_unload_lib_image_ireq),
4407 &resp, sizeof(resp));
4408 if (ret) {
4409 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4410 ret = -EIO;
4411 } else {
4412 switch (resp.result) {
4413 case QSEOS_RESULT_SUCCESS:
4414 break;
4415 case QSEOS_RESULT_FAILURE:
4416 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4417 break;
4418 default:
4419 pr_err("scm call return unknown response %d\n",
4420 resp.result);
4421 ret = -EINVAL;
4422 break;
4423 }
4424 }
4425
4426 return ret;
4427}
4428
4429int qseecom_start_app(struct qseecom_handle **handle,
4430 char *app_name, uint32_t size)
4431{
4432 int32_t ret = 0;
4433 unsigned long flags = 0;
4434 struct qseecom_dev_handle *data = NULL;
4435 struct qseecom_check_app_ireq app_ireq;
4436 struct qseecom_registered_app_list *entry = NULL;
4437 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4438 bool found_app = false;
4439 size_t len;
4440 ion_phys_addr_t pa;
4441 uint32_t fw_size, app_arch;
4442 uint32_t app_id = 0;
4443
4444 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4445 pr_err("Not allowed to be called in %d state\n",
4446 atomic_read(&qseecom.qseecom_state));
4447 return -EPERM;
4448 }
4449 if (!app_name) {
4450 pr_err("failed to get the app name\n");
4451 return -EINVAL;
4452 }
4453
Zhen Kong64a6d7282017-06-16 11:55:07 -07004454 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004455 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004456 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004457 return -EINVAL;
4458 }
4459
4460 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4461 if (!(*handle))
4462 return -ENOMEM;
4463
4464 data = kzalloc(sizeof(*data), GFP_KERNEL);
4465 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304466 ret = -ENOMEM;
4467 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004468 }
4469 data->abort = 0;
4470 data->type = QSEECOM_CLIENT_APP;
4471 data->released = false;
4472 data->client.sb_length = size;
4473 data->client.user_virt_sb_base = 0;
4474 data->client.ihandle = NULL;
4475
4476 init_waitqueue_head(&data->abort_wq);
4477
4478 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4479 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4480 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4481 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304482 ret = -ENOMEM;
4483 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004484 }
4485 mutex_lock(&app_access_lock);
4486
Zhen Kong5d02be92018-05-29 16:17:29 -07004487recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004488 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4489 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4490 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4491 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304492 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004493
4494 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4495 if (app_id) {
4496 pr_warn("App id %d for [%s] app exists\n", app_id,
4497 (char *)app_ireq.app_name);
4498 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4499 list_for_each_entry(entry,
4500 &qseecom.registered_app_list_head, list){
4501 if (entry->app_id == app_id) {
4502 entry->ref_cnt++;
4503 found_app = true;
4504 break;
4505 }
4506 }
4507 spin_unlock_irqrestore(
4508 &qseecom.registered_app_list_lock, flags);
4509 if (!found_app)
4510 pr_warn("App_id %d [%s] was loaded but not registered\n",
4511 ret, (char *)app_ireq.app_name);
4512 } else {
4513 /* load the app and get the app_id */
4514 pr_debug("%s: Loading app for the first time'\n",
4515 qseecom.pdev->init_name);
4516 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004517 if (ret == -EEXIST) {
4518 pr_err("recheck if TA %s is loaded\n", app_name);
4519 goto recheck;
4520 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304521 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004522 }
4523 data->client.app_id = app_id;
4524 if (!found_app) {
4525 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4526 if (!entry) {
4527 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304528 ret = -ENOMEM;
4529 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004530 }
4531 entry->app_id = app_id;
4532 entry->ref_cnt = 1;
4533 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4534 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4535 ret = -EIO;
Zhen Konga6e3f512017-01-20 12:22:23 -08004536 kfree(entry);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304537 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004538 }
4539 entry->app_arch = app_arch;
4540 entry->app_blocked = false;
4541 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004542 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004543 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4544 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4545 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4546 flags);
4547 }
4548
4549 /* Get the physical address of the ION BUF */
4550 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4551 if (ret) {
4552 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4553 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304554 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004555 }
4556
4557 /* Populate the structure for sending scm call to load image */
4558 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4559 data->client.ihandle);
4560 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4561 pr_err("ION memory mapping for client shared buf failed\n");
4562 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304563 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004564 }
4565 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4566 data->client.sb_phys = (phys_addr_t)pa;
4567 (*handle)->dev = (void *)data;
4568 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4569 (*handle)->sbuf_len = data->client.sb_length;
4570
4571 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4572 if (!kclient_entry) {
4573 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304574 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004575 }
4576 kclient_entry->handle = *handle;
4577
4578 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4579 list_add_tail(&kclient_entry->list,
4580 &qseecom.registered_kclient_list_head);
4581 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4582
4583 mutex_unlock(&app_access_lock);
4584 return 0;
4585
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304586exit_ion_unmap_kernel:
4587 if (!IS_ERR_OR_NULL(data->client.ihandle))
4588 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4589exit_entry_free:
4590 kfree(entry);
4591exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004592 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304593 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4594 ion_free(qseecom.ion_clnt, data->client.ihandle);
4595 data->client.ihandle = NULL;
4596 }
4597exit_data_free:
4598 kfree(data);
4599exit_handle_free:
4600 if (*handle) {
4601 kfree(*handle);
4602 *handle = NULL;
4603 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004604 return ret;
4605}
4606EXPORT_SYMBOL(qseecom_start_app);
4607
4608int qseecom_shutdown_app(struct qseecom_handle **handle)
4609{
4610 int ret = -EINVAL;
4611 struct qseecom_dev_handle *data;
4612
4613 struct qseecom_registered_kclient_list *kclient = NULL;
4614 unsigned long flags = 0;
4615 bool found_handle = false;
4616
4617 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4618 pr_err("Not allowed to be called in %d state\n",
4619 atomic_read(&qseecom.qseecom_state));
4620 return -EPERM;
4621 }
4622
4623 if ((handle == NULL) || (*handle == NULL)) {
4624 pr_err("Handle is not initialized\n");
4625 return -EINVAL;
4626 }
4627 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4628 mutex_lock(&app_access_lock);
4629
4630 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4631 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4632 list) {
4633 if (kclient->handle == (*handle)) {
4634 list_del(&kclient->list);
4635 found_handle = true;
4636 break;
4637 }
4638 }
4639 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4640 if (!found_handle)
4641 pr_err("Unable to find the handle, exiting\n");
4642 else
4643 ret = qseecom_unload_app(data, false);
4644
4645 mutex_unlock(&app_access_lock);
4646 if (ret == 0) {
4647 kzfree(data);
4648 kzfree(*handle);
4649 kzfree(kclient);
4650 *handle = NULL;
4651 }
4652
4653 return ret;
4654}
4655EXPORT_SYMBOL(qseecom_shutdown_app);
4656
4657int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4658 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4659{
4660 int ret = 0;
4661 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4662 struct qseecom_dev_handle *data;
4663 bool perf_enabled = false;
4664
4665 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4666 pr_err("Not allowed to be called in %d state\n",
4667 atomic_read(&qseecom.qseecom_state));
4668 return -EPERM;
4669 }
4670
4671 if (handle == NULL) {
4672 pr_err("Handle is not initialized\n");
4673 return -EINVAL;
4674 }
4675 data = handle->dev;
4676
4677 req.cmd_req_len = sbuf_len;
4678 req.resp_len = rbuf_len;
4679 req.cmd_req_buf = send_buf;
4680 req.resp_buf = resp_buf;
4681
4682 if (__validate_send_cmd_inputs(data, &req))
4683 return -EINVAL;
4684
4685 mutex_lock(&app_access_lock);
4686 if (qseecom.support_bus_scaling) {
4687 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4688 if (ret) {
4689 pr_err("Failed to set bw.\n");
4690 mutex_unlock(&app_access_lock);
4691 return ret;
4692 }
4693 }
4694 /*
4695 * On targets where crypto clock is handled by HLOS,
4696 * if clk_access_cnt is zero and perf_enabled is false,
4697 * then the crypto clock was not enabled before sending cmd
4698 * to tz, qseecom will enable the clock to avoid service failure.
4699 */
4700 if (!qseecom.no_clock_support &&
4701 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4702 pr_debug("ce clock is not enabled!\n");
4703 ret = qseecom_perf_enable(data);
4704 if (ret) {
4705 pr_err("Failed to vote for clock with err %d\n",
4706 ret);
4707 mutex_unlock(&app_access_lock);
4708 return -EINVAL;
4709 }
4710 perf_enabled = true;
4711 }
4712 if (!strcmp(data->client.app_name, "securemm"))
4713 data->use_legacy_cmd = true;
4714
4715 ret = __qseecom_send_cmd(data, &req);
4716 data->use_legacy_cmd = false;
4717 if (qseecom.support_bus_scaling)
4718 __qseecom_add_bw_scale_down_timer(
4719 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4720
4721 if (perf_enabled) {
4722 qsee_disable_clock_vote(data, CLK_DFAB);
4723 qsee_disable_clock_vote(data, CLK_SFPB);
4724 }
4725
4726 mutex_unlock(&app_access_lock);
4727
4728 if (ret)
4729 return ret;
4730
4731 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4732 req.resp_len, req.resp_buf);
4733 return ret;
4734}
4735EXPORT_SYMBOL(qseecom_send_command);
4736
4737int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4738{
4739 int ret = 0;
4740
4741 if ((handle == NULL) || (handle->dev == NULL)) {
4742 pr_err("No valid kernel client\n");
4743 return -EINVAL;
4744 }
4745 if (high) {
4746 if (qseecom.support_bus_scaling) {
4747 mutex_lock(&qsee_bw_mutex);
4748 __qseecom_register_bus_bandwidth_needs(handle->dev,
4749 HIGH);
4750 mutex_unlock(&qsee_bw_mutex);
4751 } else {
4752 ret = qseecom_perf_enable(handle->dev);
4753 if (ret)
4754 pr_err("Failed to vote for clock with err %d\n",
4755 ret);
4756 }
4757 } else {
4758 if (!qseecom.support_bus_scaling) {
4759 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4760 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4761 } else {
4762 mutex_lock(&qsee_bw_mutex);
4763 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4764 mutex_unlock(&qsee_bw_mutex);
4765 }
4766 }
4767 return ret;
4768}
4769EXPORT_SYMBOL(qseecom_set_bandwidth);
4770
4771int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4772{
4773 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4774 struct qseecom_dev_handle dummy_private_data = {0};
4775 struct qseecom_command_scm_resp resp;
4776 int ret = 0;
4777
4778 if (!desc) {
4779 pr_err("desc is NULL\n");
4780 return -EINVAL;
4781 }
4782
4783 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004784 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004785 resp.data = desc->ret[2]; /*listener_id*/
4786
Zhen Konge7f525f2017-12-01 18:26:25 -08004787 dummy_private_data.client.app_id = desc->ret[1];
4788 dummy_app_entry.app_id = desc->ret[1];
4789
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004790 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004791 if (qseecom.qsee_reentrancy_support)
4792 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004793 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004794 else
4795 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4796 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004797 mutex_unlock(&app_access_lock);
4798 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004799 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004800 (int)desc->ret[0], (int)desc->ret[2],
4801 (int)desc->ret[1], ret);
4802 desc->ret[0] = resp.result;
4803 desc->ret[1] = resp.resp_type;
4804 desc->ret[2] = resp.data;
4805 return ret;
4806}
4807EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4808
4809static int qseecom_send_resp(void)
4810{
4811 qseecom.send_resp_flag = 1;
4812 wake_up_interruptible(&qseecom.send_resp_wq);
4813 return 0;
4814}
4815
4816static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4817{
4818 struct qseecom_registered_listener_list *this_lstnr = NULL;
4819
4820 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4821 this_lstnr = __qseecom_find_svc(data->listener.id);
4822 if (this_lstnr == NULL)
4823 return -EINVAL;
4824 qseecom.send_resp_flag = 1;
4825 this_lstnr->send_resp_flag = 1;
4826 wake_up_interruptible(&qseecom.send_resp_wq);
4827 return 0;
4828}
4829
4830static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4831 struct qseecom_send_modfd_listener_resp *resp,
4832 struct qseecom_registered_listener_list *this_lstnr)
4833{
4834 int i;
4835
4836 if (!data || !resp || !this_lstnr) {
4837 pr_err("listener handle or resp msg is null\n");
4838 return -EINVAL;
4839 }
4840
4841 if (resp->resp_buf_ptr == NULL) {
4842 pr_err("resp buffer is null\n");
4843 return -EINVAL;
4844 }
4845 /* validate resp buf length */
4846 if ((resp->resp_len == 0) ||
4847 (resp->resp_len > this_lstnr->sb_length)) {
4848 pr_err("resp buf length %d not valid\n", resp->resp_len);
4849 return -EINVAL;
4850 }
4851
4852 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4853 pr_err("Integer overflow in resp_len & resp_buf\n");
4854 return -EINVAL;
4855 }
4856 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4857 (ULONG_MAX - this_lstnr->sb_length)) {
4858 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4859 return -EINVAL;
4860 }
4861 /* validate resp buf */
4862 if (((uintptr_t)resp->resp_buf_ptr <
4863 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4864 ((uintptr_t)resp->resp_buf_ptr >=
4865 ((uintptr_t)this_lstnr->user_virt_sb_base +
4866 this_lstnr->sb_length)) ||
4867 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4868 ((uintptr_t)this_lstnr->user_virt_sb_base +
4869 this_lstnr->sb_length))) {
4870 pr_err("resp buf is out of shared buffer region\n");
4871 return -EINVAL;
4872 }
4873
4874 /* validate offsets */
4875 for (i = 0; i < MAX_ION_FD; i++) {
4876 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4877 pr_err("Invalid offset %d = 0x%x\n",
4878 i, resp->ifd_data[i].cmd_buf_offset);
4879 return -EINVAL;
4880 }
4881 }
4882
4883 return 0;
4884}
4885
4886static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4887 void __user *argp, bool is_64bit_addr)
4888{
4889 struct qseecom_send_modfd_listener_resp resp;
4890 struct qseecom_registered_listener_list *this_lstnr = NULL;
4891
4892 if (copy_from_user(&resp, argp, sizeof(resp))) {
4893 pr_err("copy_from_user failed");
4894 return -EINVAL;
4895 }
4896
4897 this_lstnr = __qseecom_find_svc(data->listener.id);
4898 if (this_lstnr == NULL)
4899 return -EINVAL;
4900
4901 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4902 return -EINVAL;
4903
4904 resp.resp_buf_ptr = this_lstnr->sb_virt +
4905 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4906
4907 if (!is_64bit_addr)
4908 __qseecom_update_cmd_buf(&resp, false, data);
4909 else
4910 __qseecom_update_cmd_buf_64(&resp, false, data);
4911 qseecom.send_resp_flag = 1;
4912 this_lstnr->send_resp_flag = 1;
4913 wake_up_interruptible(&qseecom.send_resp_wq);
4914 return 0;
4915}
4916
4917static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4918 void __user *argp)
4919{
4920 return __qseecom_send_modfd_resp(data, argp, false);
4921}
4922
4923static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4924 void __user *argp)
4925{
4926 return __qseecom_send_modfd_resp(data, argp, true);
4927}
4928
4929static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4930 void __user *argp)
4931{
4932 struct qseecom_qseos_version_req req;
4933
4934 if (copy_from_user(&req, argp, sizeof(req))) {
4935 pr_err("copy_from_user failed");
4936 return -EINVAL;
4937 }
4938 req.qseos_version = qseecom.qseos_version;
4939 if (copy_to_user(argp, &req, sizeof(req))) {
4940 pr_err("copy_to_user failed");
4941 return -EINVAL;
4942 }
4943 return 0;
4944}
4945
4946static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4947{
4948 int rc = 0;
4949 struct qseecom_clk *qclk = NULL;
4950
4951 if (qseecom.no_clock_support)
4952 return 0;
4953
4954 if (ce == CLK_QSEE)
4955 qclk = &qseecom.qsee;
4956 if (ce == CLK_CE_DRV)
4957 qclk = &qseecom.ce_drv;
4958
4959 if (qclk == NULL) {
4960 pr_err("CLK type not supported\n");
4961 return -EINVAL;
4962 }
4963 mutex_lock(&clk_access_lock);
4964
4965 if (qclk->clk_access_cnt == ULONG_MAX) {
4966 pr_err("clk_access_cnt beyond limitation\n");
4967 goto err;
4968 }
4969 if (qclk->clk_access_cnt > 0) {
4970 qclk->clk_access_cnt++;
4971 mutex_unlock(&clk_access_lock);
4972 return rc;
4973 }
4974
4975 /* Enable CE core clk */
4976 if (qclk->ce_core_clk != NULL) {
4977 rc = clk_prepare_enable(qclk->ce_core_clk);
4978 if (rc) {
4979 pr_err("Unable to enable/prepare CE core clk\n");
4980 goto err;
4981 }
4982 }
4983 /* Enable CE clk */
4984 if (qclk->ce_clk != NULL) {
4985 rc = clk_prepare_enable(qclk->ce_clk);
4986 if (rc) {
4987 pr_err("Unable to enable/prepare CE iface clk\n");
4988 goto ce_clk_err;
4989 }
4990 }
4991 /* Enable AXI clk */
4992 if (qclk->ce_bus_clk != NULL) {
4993 rc = clk_prepare_enable(qclk->ce_bus_clk);
4994 if (rc) {
4995 pr_err("Unable to enable/prepare CE bus clk\n");
4996 goto ce_bus_clk_err;
4997 }
4998 }
4999 qclk->clk_access_cnt++;
5000 mutex_unlock(&clk_access_lock);
5001 return 0;
5002
5003ce_bus_clk_err:
5004 if (qclk->ce_clk != NULL)
5005 clk_disable_unprepare(qclk->ce_clk);
5006ce_clk_err:
5007 if (qclk->ce_core_clk != NULL)
5008 clk_disable_unprepare(qclk->ce_core_clk);
5009err:
5010 mutex_unlock(&clk_access_lock);
5011 return -EIO;
5012}
5013
5014static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5015{
5016 struct qseecom_clk *qclk;
5017
5018 if (qseecom.no_clock_support)
5019 return;
5020
5021 if (ce == CLK_QSEE)
5022 qclk = &qseecom.qsee;
5023 else
5024 qclk = &qseecom.ce_drv;
5025
5026 mutex_lock(&clk_access_lock);
5027
5028 if (qclk->clk_access_cnt == 0) {
5029 mutex_unlock(&clk_access_lock);
5030 return;
5031 }
5032
5033 if (qclk->clk_access_cnt == 1) {
5034 if (qclk->ce_clk != NULL)
5035 clk_disable_unprepare(qclk->ce_clk);
5036 if (qclk->ce_core_clk != NULL)
5037 clk_disable_unprepare(qclk->ce_core_clk);
5038 if (qclk->ce_bus_clk != NULL)
5039 clk_disable_unprepare(qclk->ce_bus_clk);
5040 }
5041 qclk->clk_access_cnt--;
5042 mutex_unlock(&clk_access_lock);
5043}
5044
5045static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5046 int32_t clk_type)
5047{
5048 int ret = 0;
5049 struct qseecom_clk *qclk;
5050
5051 if (qseecom.no_clock_support)
5052 return 0;
5053
5054 qclk = &qseecom.qsee;
5055 if (!qseecom.qsee_perf_client)
5056 return ret;
5057
5058 switch (clk_type) {
5059 case CLK_DFAB:
5060 mutex_lock(&qsee_bw_mutex);
5061 if (!qseecom.qsee_bw_count) {
5062 if (qseecom.qsee_sfpb_bw_count > 0)
5063 ret = msm_bus_scale_client_update_request(
5064 qseecom.qsee_perf_client, 3);
5065 else {
5066 if (qclk->ce_core_src_clk != NULL)
5067 ret = __qseecom_enable_clk(CLK_QSEE);
5068 if (!ret) {
5069 ret =
5070 msm_bus_scale_client_update_request(
5071 qseecom.qsee_perf_client, 1);
5072 if ((ret) &&
5073 (qclk->ce_core_src_clk != NULL))
5074 __qseecom_disable_clk(CLK_QSEE);
5075 }
5076 }
5077 if (ret)
5078 pr_err("DFAB Bandwidth req failed (%d)\n",
5079 ret);
5080 else {
5081 qseecom.qsee_bw_count++;
5082 data->perf_enabled = true;
5083 }
5084 } else {
5085 qseecom.qsee_bw_count++;
5086 data->perf_enabled = true;
5087 }
5088 mutex_unlock(&qsee_bw_mutex);
5089 break;
5090 case CLK_SFPB:
5091 mutex_lock(&qsee_bw_mutex);
5092 if (!qseecom.qsee_sfpb_bw_count) {
5093 if (qseecom.qsee_bw_count > 0)
5094 ret = msm_bus_scale_client_update_request(
5095 qseecom.qsee_perf_client, 3);
5096 else {
5097 if (qclk->ce_core_src_clk != NULL)
5098 ret = __qseecom_enable_clk(CLK_QSEE);
5099 if (!ret) {
5100 ret =
5101 msm_bus_scale_client_update_request(
5102 qseecom.qsee_perf_client, 2);
5103 if ((ret) &&
5104 (qclk->ce_core_src_clk != NULL))
5105 __qseecom_disable_clk(CLK_QSEE);
5106 }
5107 }
5108
5109 if (ret)
5110 pr_err("SFPB Bandwidth req failed (%d)\n",
5111 ret);
5112 else {
5113 qseecom.qsee_sfpb_bw_count++;
5114 data->fast_load_enabled = true;
5115 }
5116 } else {
5117 qseecom.qsee_sfpb_bw_count++;
5118 data->fast_load_enabled = true;
5119 }
5120 mutex_unlock(&qsee_bw_mutex);
5121 break;
5122 default:
5123 pr_err("Clock type not defined\n");
5124 break;
5125 }
5126 return ret;
5127}
5128
5129static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5130 int32_t clk_type)
5131{
5132 int32_t ret = 0;
5133 struct qseecom_clk *qclk;
5134
5135 qclk = &qseecom.qsee;
5136
5137 if (qseecom.no_clock_support)
5138 return;
5139 if (!qseecom.qsee_perf_client)
5140 return;
5141
5142 switch (clk_type) {
5143 case CLK_DFAB:
5144 mutex_lock(&qsee_bw_mutex);
5145 if (qseecom.qsee_bw_count == 0) {
5146 pr_err("Client error.Extra call to disable DFAB clk\n");
5147 mutex_unlock(&qsee_bw_mutex);
5148 return;
5149 }
5150
5151 if (qseecom.qsee_bw_count == 1) {
5152 if (qseecom.qsee_sfpb_bw_count > 0)
5153 ret = msm_bus_scale_client_update_request(
5154 qseecom.qsee_perf_client, 2);
5155 else {
5156 ret = msm_bus_scale_client_update_request(
5157 qseecom.qsee_perf_client, 0);
5158 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5159 __qseecom_disable_clk(CLK_QSEE);
5160 }
5161 if (ret)
5162 pr_err("SFPB Bandwidth req fail (%d)\n",
5163 ret);
5164 else {
5165 qseecom.qsee_bw_count--;
5166 data->perf_enabled = false;
5167 }
5168 } else {
5169 qseecom.qsee_bw_count--;
5170 data->perf_enabled = false;
5171 }
5172 mutex_unlock(&qsee_bw_mutex);
5173 break;
5174 case CLK_SFPB:
5175 mutex_lock(&qsee_bw_mutex);
5176 if (qseecom.qsee_sfpb_bw_count == 0) {
5177 pr_err("Client error.Extra call to disable SFPB clk\n");
5178 mutex_unlock(&qsee_bw_mutex);
5179 return;
5180 }
5181 if (qseecom.qsee_sfpb_bw_count == 1) {
5182 if (qseecom.qsee_bw_count > 0)
5183 ret = msm_bus_scale_client_update_request(
5184 qseecom.qsee_perf_client, 1);
5185 else {
5186 ret = msm_bus_scale_client_update_request(
5187 qseecom.qsee_perf_client, 0);
5188 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5189 __qseecom_disable_clk(CLK_QSEE);
5190 }
5191 if (ret)
5192 pr_err("SFPB Bandwidth req fail (%d)\n",
5193 ret);
5194 else {
5195 qseecom.qsee_sfpb_bw_count--;
5196 data->fast_load_enabled = false;
5197 }
5198 } else {
5199 qseecom.qsee_sfpb_bw_count--;
5200 data->fast_load_enabled = false;
5201 }
5202 mutex_unlock(&qsee_bw_mutex);
5203 break;
5204 default:
5205 pr_err("Clock type not defined\n");
5206 break;
5207 }
5208
5209}
5210
5211static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5212 void __user *argp)
5213{
5214 struct ion_handle *ihandle; /* Ion handle */
5215 struct qseecom_load_img_req load_img_req;
5216 int uret = 0;
5217 int ret;
5218 ion_phys_addr_t pa = 0;
5219 size_t len;
5220 struct qseecom_load_app_ireq load_req;
5221 struct qseecom_load_app_64bit_ireq load_req_64bit;
5222 struct qseecom_command_scm_resp resp;
5223 void *cmd_buf = NULL;
5224 size_t cmd_len;
5225 /* Copy the relevant information needed for loading the image */
5226 if (copy_from_user(&load_img_req,
5227 (void __user *)argp,
5228 sizeof(struct qseecom_load_img_req))) {
5229 pr_err("copy_from_user failed\n");
5230 return -EFAULT;
5231 }
5232
5233 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005234 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005235 load_img_req.ifd_data_fd);
5236 if (IS_ERR_OR_NULL(ihandle)) {
5237 pr_err("Ion client could not retrieve the handle\n");
5238 return -ENOMEM;
5239 }
5240
5241 /* Get the physical address of the ION BUF */
5242 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5243 if (ret) {
5244 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5245 ret);
5246 return ret;
5247 }
5248 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5249 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5250 len, load_img_req.mdt_len,
5251 load_img_req.img_len);
5252 return ret;
5253 }
5254 /* Populate the structure for sending scm call to load image */
5255 if (qseecom.qsee_version < QSEE_VERSION_40) {
5256 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5257 load_req.mdt_len = load_img_req.mdt_len;
5258 load_req.img_len = load_img_req.img_len;
5259 load_req.phy_addr = (uint32_t)pa;
5260 cmd_buf = (void *)&load_req;
5261 cmd_len = sizeof(struct qseecom_load_app_ireq);
5262 } else {
5263 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5264 load_req_64bit.mdt_len = load_img_req.mdt_len;
5265 load_req_64bit.img_len = load_img_req.img_len;
5266 load_req_64bit.phy_addr = (uint64_t)pa;
5267 cmd_buf = (void *)&load_req_64bit;
5268 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5269 }
5270
5271 if (qseecom.support_bus_scaling) {
5272 mutex_lock(&qsee_bw_mutex);
5273 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5274 mutex_unlock(&qsee_bw_mutex);
5275 if (ret) {
5276 ret = -EIO;
5277 goto exit_cpu_restore;
5278 }
5279 }
5280
5281 /* Vote for the SFPB clock */
5282 ret = __qseecom_enable_clk_scale_up(data);
5283 if (ret) {
5284 ret = -EIO;
5285 goto exit_register_bus_bandwidth_needs;
5286 }
5287 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5288 ION_IOC_CLEAN_INV_CACHES);
5289 if (ret) {
5290 pr_err("cache operation failed %d\n", ret);
5291 goto exit_disable_clock;
5292 }
5293 /* SCM_CALL to load the external elf */
5294 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5295 &resp, sizeof(resp));
5296 if (ret) {
5297 pr_err("scm_call to load failed : ret %d\n",
5298 ret);
5299 ret = -EFAULT;
5300 goto exit_disable_clock;
5301 }
5302
5303 switch (resp.result) {
5304 case QSEOS_RESULT_SUCCESS:
5305 break;
5306 case QSEOS_RESULT_INCOMPLETE:
5307 pr_err("%s: qseos result incomplete\n", __func__);
5308 ret = __qseecom_process_incomplete_cmd(data, &resp);
5309 if (ret)
5310 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5311 break;
5312 case QSEOS_RESULT_FAILURE:
5313 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5314 ret = -EFAULT;
5315 break;
5316 default:
5317 pr_err("scm_call response result %d not supported\n",
5318 resp.result);
5319 ret = -EFAULT;
5320 break;
5321 }
5322
5323exit_disable_clock:
5324 __qseecom_disable_clk_scale_down(data);
5325
5326exit_register_bus_bandwidth_needs:
5327 if (qseecom.support_bus_scaling) {
5328 mutex_lock(&qsee_bw_mutex);
5329 uret = qseecom_unregister_bus_bandwidth_needs(data);
5330 mutex_unlock(&qsee_bw_mutex);
5331 if (uret)
5332 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5333 uret, ret);
5334 }
5335
5336exit_cpu_restore:
5337 /* Deallocate the handle */
5338 if (!IS_ERR_OR_NULL(ihandle))
5339 ion_free(qseecom.ion_clnt, ihandle);
5340 return ret;
5341}
5342
5343static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5344{
5345 int ret = 0;
5346 struct qseecom_command_scm_resp resp;
5347 struct qseecom_unload_app_ireq req;
5348
5349 /* unavailable client app */
5350 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5351
5352 /* Populate the structure for sending scm call to unload image */
5353 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5354
5355 /* SCM_CALL to unload the external elf */
5356 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5357 sizeof(struct qseecom_unload_app_ireq),
5358 &resp, sizeof(resp));
5359 if (ret) {
5360 pr_err("scm_call to unload failed : ret %d\n",
5361 ret);
5362 ret = -EFAULT;
5363 goto qseecom_unload_external_elf_scm_err;
5364 }
5365 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5366 ret = __qseecom_process_incomplete_cmd(data, &resp);
5367 if (ret)
5368 pr_err("process_incomplete_cmd fail err: %d\n",
5369 ret);
5370 } else {
5371 if (resp.result != QSEOS_RESULT_SUCCESS) {
5372 pr_err("scm_call to unload image failed resp.result =%d\n",
5373 resp.result);
5374 ret = -EFAULT;
5375 }
5376 }
5377
5378qseecom_unload_external_elf_scm_err:
5379
5380 return ret;
5381}
5382
5383static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5384 void __user *argp)
5385{
5386
5387 int32_t ret;
5388 struct qseecom_qseos_app_load_query query_req;
5389 struct qseecom_check_app_ireq req;
5390 struct qseecom_registered_app_list *entry = NULL;
5391 unsigned long flags = 0;
5392 uint32_t app_arch = 0, app_id = 0;
5393 bool found_app = false;
5394
5395 /* Copy the relevant information needed for loading the image */
5396 if (copy_from_user(&query_req,
5397 (void __user *)argp,
5398 sizeof(struct qseecom_qseos_app_load_query))) {
5399 pr_err("copy_from_user failed\n");
5400 return -EFAULT;
5401 }
5402
5403 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5404 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5405 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5406
5407 ret = __qseecom_check_app_exists(req, &app_id);
5408 if (ret) {
5409 pr_err(" scm call to check if app is loaded failed");
5410 return ret; /* scm call failed */
5411 }
5412 if (app_id) {
5413 pr_debug("App id %d (%s) already exists\n", app_id,
5414 (char *)(req.app_name));
5415 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5416 list_for_each_entry(entry,
5417 &qseecom.registered_app_list_head, list){
5418 if (entry->app_id == app_id) {
5419 app_arch = entry->app_arch;
5420 entry->ref_cnt++;
5421 found_app = true;
5422 break;
5423 }
5424 }
5425 spin_unlock_irqrestore(
5426 &qseecom.registered_app_list_lock, flags);
5427 data->client.app_id = app_id;
5428 query_req.app_id = app_id;
5429 if (app_arch) {
5430 data->client.app_arch = app_arch;
5431 query_req.app_arch = app_arch;
5432 } else {
5433 data->client.app_arch = 0;
5434 query_req.app_arch = 0;
5435 }
5436 strlcpy(data->client.app_name, query_req.app_name,
5437 MAX_APP_NAME_SIZE);
5438 /*
5439 * If app was loaded by appsbl before and was not registered,
5440 * regiser this app now.
5441 */
5442 if (!found_app) {
5443 pr_debug("Register app %d [%s] which was loaded before\n",
5444 ret, (char *)query_req.app_name);
5445 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5446 if (!entry) {
5447 pr_err("kmalloc for app entry failed\n");
5448 return -ENOMEM;
5449 }
5450 entry->app_id = app_id;
5451 entry->ref_cnt = 1;
5452 entry->app_arch = data->client.app_arch;
5453 strlcpy(entry->app_name, data->client.app_name,
5454 MAX_APP_NAME_SIZE);
5455 entry->app_blocked = false;
5456 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005457 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005458 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5459 flags);
5460 list_add_tail(&entry->list,
5461 &qseecom.registered_app_list_head);
5462 spin_unlock_irqrestore(
5463 &qseecom.registered_app_list_lock, flags);
5464 }
5465 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5466 pr_err("copy_to_user failed\n");
5467 return -EFAULT;
5468 }
5469 return -EEXIST; /* app already loaded */
5470 } else {
5471 return 0; /* app not loaded */
5472 }
5473}
5474
5475static int __qseecom_get_ce_pipe_info(
5476 enum qseecom_key_management_usage_type usage,
5477 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5478{
5479 int ret = -EINVAL;
5480 int i, j;
5481 struct qseecom_ce_info_use *p = NULL;
5482 int total = 0;
5483 struct qseecom_ce_pipe_entry *pcepipe;
5484
5485 switch (usage) {
5486 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5487 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5488 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5489 if (qseecom.support_fde) {
5490 p = qseecom.ce_info.fde;
5491 total = qseecom.ce_info.num_fde;
5492 } else {
5493 pr_err("system does not support fde\n");
5494 return -EINVAL;
5495 }
5496 break;
5497 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5498 if (qseecom.support_pfe) {
5499 p = qseecom.ce_info.pfe;
5500 total = qseecom.ce_info.num_pfe;
5501 } else {
5502 pr_err("system does not support pfe\n");
5503 return -EINVAL;
5504 }
5505 break;
5506 default:
5507 pr_err("unsupported usage %d\n", usage);
5508 return -EINVAL;
5509 }
5510
5511 for (j = 0; j < total; j++) {
5512 if (p->unit_num == unit) {
5513 pcepipe = p->ce_pipe_entry;
5514 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5515 (*ce_hw)[i] = pcepipe->ce_num;
5516 *pipe = pcepipe->ce_pipe_pair;
5517 pcepipe++;
5518 }
5519 ret = 0;
5520 break;
5521 }
5522 p++;
5523 }
5524 return ret;
5525}
5526
5527static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5528 enum qseecom_key_management_usage_type usage,
5529 struct qseecom_key_generate_ireq *ireq)
5530{
5531 struct qseecom_command_scm_resp resp;
5532 int ret;
5533
5534 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5535 usage >= QSEOS_KM_USAGE_MAX) {
5536 pr_err("Error:: unsupported usage %d\n", usage);
5537 return -EFAULT;
5538 }
5539 ret = __qseecom_enable_clk(CLK_QSEE);
5540 if (ret)
5541 return ret;
5542
5543 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5544 ireq, sizeof(struct qseecom_key_generate_ireq),
5545 &resp, sizeof(resp));
5546 if (ret) {
5547 if (ret == -EINVAL &&
5548 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5549 pr_debug("Key ID exists.\n");
5550 ret = 0;
5551 } else {
5552 pr_err("scm call to generate key failed : %d\n", ret);
5553 ret = -EFAULT;
5554 }
5555 goto generate_key_exit;
5556 }
5557
5558 switch (resp.result) {
5559 case QSEOS_RESULT_SUCCESS:
5560 break;
5561 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5562 pr_debug("Key ID exists.\n");
5563 break;
5564 case QSEOS_RESULT_INCOMPLETE:
5565 ret = __qseecom_process_incomplete_cmd(data, &resp);
5566 if (ret) {
5567 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5568 pr_debug("Key ID exists.\n");
5569 ret = 0;
5570 } else {
5571 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5572 resp.result);
5573 }
5574 }
5575 break;
5576 case QSEOS_RESULT_FAILURE:
5577 default:
5578 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5579 ret = -EINVAL;
5580 break;
5581 }
5582generate_key_exit:
5583 __qseecom_disable_clk(CLK_QSEE);
5584 return ret;
5585}
5586
5587static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5588 enum qseecom_key_management_usage_type usage,
5589 struct qseecom_key_delete_ireq *ireq)
5590{
5591 struct qseecom_command_scm_resp resp;
5592 int ret;
5593
5594 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5595 usage >= QSEOS_KM_USAGE_MAX) {
5596 pr_err("Error:: unsupported usage %d\n", usage);
5597 return -EFAULT;
5598 }
5599 ret = __qseecom_enable_clk(CLK_QSEE);
5600 if (ret)
5601 return ret;
5602
5603 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5604 ireq, sizeof(struct qseecom_key_delete_ireq),
5605 &resp, sizeof(struct qseecom_command_scm_resp));
5606 if (ret) {
5607 if (ret == -EINVAL &&
5608 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5609 pr_debug("Max attempts to input password reached.\n");
5610 ret = -ERANGE;
5611 } else {
5612 pr_err("scm call to delete key failed : %d\n", ret);
5613 ret = -EFAULT;
5614 }
5615 goto del_key_exit;
5616 }
5617
5618 switch (resp.result) {
5619 case QSEOS_RESULT_SUCCESS:
5620 break;
5621 case QSEOS_RESULT_INCOMPLETE:
5622 ret = __qseecom_process_incomplete_cmd(data, &resp);
5623 if (ret) {
5624 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5625 resp.result);
5626 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5627 pr_debug("Max attempts to input password reached.\n");
5628 ret = -ERANGE;
5629 }
5630 }
5631 break;
5632 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5633 pr_debug("Max attempts to input password reached.\n");
5634 ret = -ERANGE;
5635 break;
5636 case QSEOS_RESULT_FAILURE:
5637 default:
5638 pr_err("Delete key scm call failed resp.result %d\n",
5639 resp.result);
5640 ret = -EINVAL;
5641 break;
5642 }
5643del_key_exit:
5644 __qseecom_disable_clk(CLK_QSEE);
5645 return ret;
5646}
5647
5648static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5649 enum qseecom_key_management_usage_type usage,
5650 struct qseecom_key_select_ireq *ireq)
5651{
5652 struct qseecom_command_scm_resp resp;
5653 int ret;
5654
5655 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5656 usage >= QSEOS_KM_USAGE_MAX) {
5657 pr_err("Error:: unsupported usage %d\n", usage);
5658 return -EFAULT;
5659 }
5660 ret = __qseecom_enable_clk(CLK_QSEE);
5661 if (ret)
5662 return ret;
5663
5664 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5665 ret = __qseecom_enable_clk(CLK_CE_DRV);
5666 if (ret)
5667 return ret;
5668 }
5669
5670 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5671 ireq, sizeof(struct qseecom_key_select_ireq),
5672 &resp, sizeof(struct qseecom_command_scm_resp));
5673 if (ret) {
5674 if (ret == -EINVAL &&
5675 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5676 pr_debug("Max attempts to input password reached.\n");
5677 ret = -ERANGE;
5678 } else if (ret == -EINVAL &&
5679 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5680 pr_debug("Set Key operation under processing...\n");
5681 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5682 } else {
5683 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5684 ret);
5685 ret = -EFAULT;
5686 }
5687 goto set_key_exit;
5688 }
5689
5690 switch (resp.result) {
5691 case QSEOS_RESULT_SUCCESS:
5692 break;
5693 case QSEOS_RESULT_INCOMPLETE:
5694 ret = __qseecom_process_incomplete_cmd(data, &resp);
5695 if (ret) {
5696 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5697 resp.result);
5698 if (resp.result ==
5699 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5700 pr_debug("Set Key operation under processing...\n");
5701 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5702 }
5703 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5704 pr_debug("Max attempts to input password reached.\n");
5705 ret = -ERANGE;
5706 }
5707 }
5708 break;
5709 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5710 pr_debug("Max attempts to input password reached.\n");
5711 ret = -ERANGE;
5712 break;
5713 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5714 pr_debug("Set Key operation under processing...\n");
5715 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5716 break;
5717 case QSEOS_RESULT_FAILURE:
5718 default:
5719 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5720 ret = -EINVAL;
5721 break;
5722 }
5723set_key_exit:
5724 __qseecom_disable_clk(CLK_QSEE);
5725 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5726 __qseecom_disable_clk(CLK_CE_DRV);
5727 return ret;
5728}
5729
5730static int __qseecom_update_current_key_user_info(
5731 struct qseecom_dev_handle *data,
5732 enum qseecom_key_management_usage_type usage,
5733 struct qseecom_key_userinfo_update_ireq *ireq)
5734{
5735 struct qseecom_command_scm_resp resp;
5736 int ret;
5737
5738 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5739 usage >= QSEOS_KM_USAGE_MAX) {
5740 pr_err("Error:: unsupported usage %d\n", usage);
5741 return -EFAULT;
5742 }
5743 ret = __qseecom_enable_clk(CLK_QSEE);
5744 if (ret)
5745 return ret;
5746
5747 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5748 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5749 &resp, sizeof(struct qseecom_command_scm_resp));
5750 if (ret) {
5751 if (ret == -EINVAL &&
5752 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5753 pr_debug("Set Key operation under processing...\n");
5754 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5755 } else {
5756 pr_err("scm call to update key userinfo failed: %d\n",
5757 ret);
5758 __qseecom_disable_clk(CLK_QSEE);
5759 return -EFAULT;
5760 }
5761 }
5762
5763 switch (resp.result) {
5764 case QSEOS_RESULT_SUCCESS:
5765 break;
5766 case QSEOS_RESULT_INCOMPLETE:
5767 ret = __qseecom_process_incomplete_cmd(data, &resp);
5768 if (resp.result ==
5769 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5770 pr_debug("Set Key operation under processing...\n");
5771 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5772 }
5773 if (ret)
5774 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5775 resp.result);
5776 break;
5777 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5778 pr_debug("Update Key operation under processing...\n");
5779 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5780 break;
5781 case QSEOS_RESULT_FAILURE:
5782 default:
5783 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5784 ret = -EINVAL;
5785 break;
5786 }
5787
5788 __qseecom_disable_clk(CLK_QSEE);
5789 return ret;
5790}
5791
5792
5793static int qseecom_enable_ice_setup(int usage)
5794{
5795 int ret = 0;
5796
5797 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5798 ret = qcom_ice_setup_ice_hw("ufs", true);
5799 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5800 ret = qcom_ice_setup_ice_hw("sdcc", true);
5801
5802 return ret;
5803}
5804
5805static int qseecom_disable_ice_setup(int usage)
5806{
5807 int ret = 0;
5808
5809 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5810 ret = qcom_ice_setup_ice_hw("ufs", false);
5811 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5812 ret = qcom_ice_setup_ice_hw("sdcc", false);
5813
5814 return ret;
5815}
5816
5817static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5818{
5819 struct qseecom_ce_info_use *pce_info_use, *p;
5820 int total = 0;
5821 int i;
5822
5823 switch (usage) {
5824 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5825 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5826 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5827 p = qseecom.ce_info.fde;
5828 total = qseecom.ce_info.num_fde;
5829 break;
5830 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5831 p = qseecom.ce_info.pfe;
5832 total = qseecom.ce_info.num_pfe;
5833 break;
5834 default:
5835 pr_err("unsupported usage %d\n", usage);
5836 return -EINVAL;
5837 }
5838
5839 pce_info_use = NULL;
5840
5841 for (i = 0; i < total; i++) {
5842 if (p->unit_num == unit) {
5843 pce_info_use = p;
5844 break;
5845 }
5846 p++;
5847 }
5848 if (!pce_info_use) {
5849 pr_err("can not find %d\n", unit);
5850 return -EINVAL;
5851 }
5852 return pce_info_use->num_ce_pipe_entries;
5853}
5854
5855static int qseecom_create_key(struct qseecom_dev_handle *data,
5856 void __user *argp)
5857{
5858 int i;
5859 uint32_t *ce_hw = NULL;
5860 uint32_t pipe = 0;
5861 int ret = 0;
5862 uint32_t flags = 0;
5863 struct qseecom_create_key_req create_key_req;
5864 struct qseecom_key_generate_ireq generate_key_ireq;
5865 struct qseecom_key_select_ireq set_key_ireq;
5866 uint32_t entries = 0;
5867
5868 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5869 if (ret) {
5870 pr_err("copy_from_user failed\n");
5871 return ret;
5872 }
5873
5874 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5875 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5876 pr_err("unsupported usage %d\n", create_key_req.usage);
5877 ret = -EFAULT;
5878 return ret;
5879 }
5880 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5881 create_key_req.usage);
5882 if (entries <= 0) {
5883 pr_err("no ce instance for usage %d instance %d\n",
5884 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5885 ret = -EINVAL;
5886 return ret;
5887 }
5888
5889 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5890 if (!ce_hw) {
5891 ret = -ENOMEM;
5892 return ret;
5893 }
5894 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5895 DEFAULT_CE_INFO_UNIT);
5896 if (ret) {
5897 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5898 ret = -EINVAL;
5899 goto free_buf;
5900 }
5901
5902 if (qseecom.fde_key_size)
5903 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5904 else
5905 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5906
5907 generate_key_ireq.flags = flags;
5908 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5909 memset((void *)generate_key_ireq.key_id,
5910 0, QSEECOM_KEY_ID_SIZE);
5911 memset((void *)generate_key_ireq.hash32,
5912 0, QSEECOM_HASH_SIZE);
5913 memcpy((void *)generate_key_ireq.key_id,
5914 (void *)key_id_array[create_key_req.usage].desc,
5915 QSEECOM_KEY_ID_SIZE);
5916 memcpy((void *)generate_key_ireq.hash32,
5917 (void *)create_key_req.hash32,
5918 QSEECOM_HASH_SIZE);
5919
5920 ret = __qseecom_generate_and_save_key(data,
5921 create_key_req.usage, &generate_key_ireq);
5922 if (ret) {
5923 pr_err("Failed to generate key on storage: %d\n", ret);
5924 goto free_buf;
5925 }
5926
5927 for (i = 0; i < entries; i++) {
5928 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5929 if (create_key_req.usage ==
5930 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5931 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5932 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5933
5934 } else if (create_key_req.usage ==
5935 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5936 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5937 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5938
5939 } else {
5940 set_key_ireq.ce = ce_hw[i];
5941 set_key_ireq.pipe = pipe;
5942 }
5943 set_key_ireq.flags = flags;
5944
5945 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5946 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5947 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5948 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5949 memcpy((void *)set_key_ireq.key_id,
5950 (void *)key_id_array[create_key_req.usage].desc,
5951 QSEECOM_KEY_ID_SIZE);
5952 memcpy((void *)set_key_ireq.hash32,
5953 (void *)create_key_req.hash32,
5954 QSEECOM_HASH_SIZE);
5955 /*
5956 * It will return false if it is GPCE based crypto instance or
5957 * ICE is setup properly
5958 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005959 ret = qseecom_enable_ice_setup(create_key_req.usage);
5960 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005961 goto free_buf;
5962
5963 do {
5964 ret = __qseecom_set_clear_ce_key(data,
5965 create_key_req.usage,
5966 &set_key_ireq);
5967 /*
5968 * wait a little before calling scm again to let other
5969 * processes run
5970 */
5971 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5972 msleep(50);
5973
5974 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5975
5976 qseecom_disable_ice_setup(create_key_req.usage);
5977
5978 if (ret) {
5979 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5980 pipe, ce_hw[i], ret);
5981 goto free_buf;
5982 } else {
5983 pr_err("Set the key successfully\n");
5984 if ((create_key_req.usage ==
5985 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5986 (create_key_req.usage ==
5987 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5988 goto free_buf;
5989 }
5990 }
5991
5992free_buf:
5993 kzfree(ce_hw);
5994 return ret;
5995}
5996
5997static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5998 void __user *argp)
5999{
6000 uint32_t *ce_hw = NULL;
6001 uint32_t pipe = 0;
6002 int ret = 0;
6003 uint32_t flags = 0;
6004 int i, j;
6005 struct qseecom_wipe_key_req wipe_key_req;
6006 struct qseecom_key_delete_ireq delete_key_ireq;
6007 struct qseecom_key_select_ireq clear_key_ireq;
6008 uint32_t entries = 0;
6009
6010 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6011 if (ret) {
6012 pr_err("copy_from_user failed\n");
6013 return ret;
6014 }
6015
6016 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6017 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6018 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6019 ret = -EFAULT;
6020 return ret;
6021 }
6022
6023 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6024 wipe_key_req.usage);
6025 if (entries <= 0) {
6026 pr_err("no ce instance for usage %d instance %d\n",
6027 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6028 ret = -EINVAL;
6029 return ret;
6030 }
6031
6032 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6033 if (!ce_hw) {
6034 ret = -ENOMEM;
6035 return ret;
6036 }
6037
6038 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6039 DEFAULT_CE_INFO_UNIT);
6040 if (ret) {
6041 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6042 ret = -EINVAL;
6043 goto free_buf;
6044 }
6045
6046 if (wipe_key_req.wipe_key_flag) {
6047 delete_key_ireq.flags = flags;
6048 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6049 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6050 memcpy((void *)delete_key_ireq.key_id,
6051 (void *)key_id_array[wipe_key_req.usage].desc,
6052 QSEECOM_KEY_ID_SIZE);
6053 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6054
6055 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6056 &delete_key_ireq);
6057 if (ret) {
6058 pr_err("Failed to delete key from ssd storage: %d\n",
6059 ret);
6060 ret = -EFAULT;
6061 goto free_buf;
6062 }
6063 }
6064
6065 for (j = 0; j < entries; j++) {
6066 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6067 if (wipe_key_req.usage ==
6068 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6069 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6070 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6071 } else if (wipe_key_req.usage ==
6072 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6073 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6074 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6075 } else {
6076 clear_key_ireq.ce = ce_hw[j];
6077 clear_key_ireq.pipe = pipe;
6078 }
6079 clear_key_ireq.flags = flags;
6080 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6081 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6082 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6083 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6084
6085 /*
6086 * It will return false if it is GPCE based crypto instance or
6087 * ICE is setup properly
6088 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006089 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6090 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006091 goto free_buf;
6092
6093 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6094 &clear_key_ireq);
6095
6096 qseecom_disable_ice_setup(wipe_key_req.usage);
6097
6098 if (ret) {
6099 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6100 pipe, ce_hw[j], ret);
6101 ret = -EFAULT;
6102 goto free_buf;
6103 }
6104 }
6105
6106free_buf:
6107 kzfree(ce_hw);
6108 return ret;
6109}
6110
6111static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6112 void __user *argp)
6113{
6114 int ret = 0;
6115 uint32_t flags = 0;
6116 struct qseecom_update_key_userinfo_req update_key_req;
6117 struct qseecom_key_userinfo_update_ireq ireq;
6118
6119 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6120 if (ret) {
6121 pr_err("copy_from_user failed\n");
6122 return ret;
6123 }
6124
6125 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6126 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6127 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6128 return -EFAULT;
6129 }
6130
6131 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6132
6133 if (qseecom.fde_key_size)
6134 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6135 else
6136 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6137
6138 ireq.flags = flags;
6139 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6140 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6141 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6142 memcpy((void *)ireq.key_id,
6143 (void *)key_id_array[update_key_req.usage].desc,
6144 QSEECOM_KEY_ID_SIZE);
6145 memcpy((void *)ireq.current_hash32,
6146 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6147 memcpy((void *)ireq.new_hash32,
6148 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6149
6150 do {
6151 ret = __qseecom_update_current_key_user_info(data,
6152 update_key_req.usage,
6153 &ireq);
6154 /*
6155 * wait a little before calling scm again to let other
6156 * processes run
6157 */
6158 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6159 msleep(50);
6160
6161 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6162 if (ret) {
6163 pr_err("Failed to update key info: %d\n", ret);
6164 return ret;
6165 }
6166 return ret;
6167
6168}
6169static int qseecom_is_es_activated(void __user *argp)
6170{
Zhen Kong26e62742018-05-04 17:19:06 -07006171 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006172 struct qseecom_command_scm_resp resp;
6173 int ret;
6174
6175 if (qseecom.qsee_version < QSEE_VERSION_04) {
6176 pr_err("invalid qsee version\n");
6177 return -ENODEV;
6178 }
6179
6180 if (argp == NULL) {
6181 pr_err("arg is null\n");
6182 return -EINVAL;
6183 }
6184
6185 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6186 &req, sizeof(req), &resp, sizeof(resp));
6187 if (ret) {
6188 pr_err("scm_call failed\n");
6189 return ret;
6190 }
6191
6192 req.is_activated = resp.result;
6193 ret = copy_to_user(argp, &req, sizeof(req));
6194 if (ret) {
6195 pr_err("copy_to_user failed\n");
6196 return ret;
6197 }
6198
6199 return 0;
6200}
6201
6202static int qseecom_save_partition_hash(void __user *argp)
6203{
6204 struct qseecom_save_partition_hash_req req;
6205 struct qseecom_command_scm_resp resp;
6206 int ret;
6207
6208 memset(&resp, 0x00, sizeof(resp));
6209
6210 if (qseecom.qsee_version < QSEE_VERSION_04) {
6211 pr_err("invalid qsee version\n");
6212 return -ENODEV;
6213 }
6214
6215 if (argp == NULL) {
6216 pr_err("arg is null\n");
6217 return -EINVAL;
6218 }
6219
6220 ret = copy_from_user(&req, argp, sizeof(req));
6221 if (ret) {
6222 pr_err("copy_from_user failed\n");
6223 return ret;
6224 }
6225
6226 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6227 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6228 if (ret) {
6229 pr_err("qseecom_scm_call failed\n");
6230 return ret;
6231 }
6232
6233 return 0;
6234}
6235
6236static int qseecom_mdtp_cipher_dip(void __user *argp)
6237{
6238 struct qseecom_mdtp_cipher_dip_req req;
6239 u32 tzbuflenin, tzbuflenout;
6240 char *tzbufin = NULL, *tzbufout = NULL;
6241 struct scm_desc desc = {0};
6242 int ret;
6243
6244 do {
6245 /* Copy the parameters from userspace */
6246 if (argp == NULL) {
6247 pr_err("arg is null\n");
6248 ret = -EINVAL;
6249 break;
6250 }
6251
6252 ret = copy_from_user(&req, argp, sizeof(req));
6253 if (ret) {
6254 pr_err("copy_from_user failed, ret= %d\n", ret);
6255 break;
6256 }
6257
6258 if (req.in_buf == NULL || req.out_buf == NULL ||
6259 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6260 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6261 req.direction > 1) {
6262 pr_err("invalid parameters\n");
6263 ret = -EINVAL;
6264 break;
6265 }
6266
6267 /* Copy the input buffer from userspace to kernel space */
6268 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6269 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6270 if (!tzbufin) {
6271 pr_err("error allocating in buffer\n");
6272 ret = -ENOMEM;
6273 break;
6274 }
6275
6276 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6277 if (ret) {
6278 pr_err("copy_from_user failed, ret=%d\n", ret);
6279 break;
6280 }
6281
6282 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6283
6284 /* Prepare the output buffer in kernel space */
6285 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6286 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6287 if (!tzbufout) {
6288 pr_err("error allocating out buffer\n");
6289 ret = -ENOMEM;
6290 break;
6291 }
6292
6293 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6294
6295 /* Send the command to TZ */
6296 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6297 desc.args[0] = virt_to_phys(tzbufin);
6298 desc.args[1] = req.in_buf_size;
6299 desc.args[2] = virt_to_phys(tzbufout);
6300 desc.args[3] = req.out_buf_size;
6301 desc.args[4] = req.direction;
6302
6303 ret = __qseecom_enable_clk(CLK_QSEE);
6304 if (ret)
6305 break;
6306
6307 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6308
6309 __qseecom_disable_clk(CLK_QSEE);
6310
6311 if (ret) {
6312 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6313 ret);
6314 break;
6315 }
6316
6317 /* Copy the output buffer from kernel space to userspace */
6318 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6319 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6320 if (ret) {
6321 pr_err("copy_to_user failed, ret=%d\n", ret);
6322 break;
6323 }
6324 } while (0);
6325
6326 kzfree(tzbufin);
6327 kzfree(tzbufout);
6328
6329 return ret;
6330}
6331
6332static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6333 struct qseecom_qteec_req *req)
6334{
6335 if (!data || !data->client.ihandle) {
6336 pr_err("Client or client handle is not initialized\n");
6337 return -EINVAL;
6338 }
6339
6340 if (data->type != QSEECOM_CLIENT_APP)
6341 return -EFAULT;
6342
6343 if (req->req_len > UINT_MAX - req->resp_len) {
6344 pr_err("Integer overflow detected in req_len & rsp_len\n");
6345 return -EINVAL;
6346 }
6347
6348 if (req->req_len + req->resp_len > data->client.sb_length) {
6349 pr_debug("Not enough memory to fit cmd_buf.\n");
6350 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6351 (req->req_len + req->resp_len), data->client.sb_length);
6352 return -ENOMEM;
6353 }
6354
6355 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6356 pr_err("cmd buffer or response buffer is null\n");
6357 return -EINVAL;
6358 }
6359 if (((uintptr_t)req->req_ptr <
6360 data->client.user_virt_sb_base) ||
6361 ((uintptr_t)req->req_ptr >=
6362 (data->client.user_virt_sb_base + data->client.sb_length))) {
6363 pr_err("cmd buffer address not within shared bufffer\n");
6364 return -EINVAL;
6365 }
6366
6367 if (((uintptr_t)req->resp_ptr <
6368 data->client.user_virt_sb_base) ||
6369 ((uintptr_t)req->resp_ptr >=
6370 (data->client.user_virt_sb_base + data->client.sb_length))) {
6371 pr_err("response buffer address not within shared bufffer\n");
6372 return -EINVAL;
6373 }
6374
6375 if ((req->req_len == 0) || (req->resp_len == 0)) {
6376 pr_err("cmd buf lengtgh/response buf length not valid\n");
6377 return -EINVAL;
6378 }
6379
6380 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6381 pr_err("Integer overflow in req_len & req_ptr\n");
6382 return -EINVAL;
6383 }
6384
6385 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6386 pr_err("Integer overflow in resp_len & resp_ptr\n");
6387 return -EINVAL;
6388 }
6389
6390 if (data->client.user_virt_sb_base >
6391 (ULONG_MAX - data->client.sb_length)) {
6392 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6393 return -EINVAL;
6394 }
6395 if ((((uintptr_t)req->req_ptr + req->req_len) >
6396 ((uintptr_t)data->client.user_virt_sb_base +
6397 data->client.sb_length)) ||
6398 (((uintptr_t)req->resp_ptr + req->resp_len) >
6399 ((uintptr_t)data->client.user_virt_sb_base +
6400 data->client.sb_length))) {
6401 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6402 return -EINVAL;
6403 }
6404 return 0;
6405}
6406
6407static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6408 uint32_t fd_idx, struct sg_table *sg_ptr)
6409{
6410 struct scatterlist *sg = sg_ptr->sgl;
6411 struct qseecom_sg_entry *sg_entry;
6412 void *buf;
6413 uint i;
6414 size_t size;
6415 dma_addr_t coh_pmem;
6416
6417 if (fd_idx >= MAX_ION_FD) {
6418 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6419 return -ENOMEM;
6420 }
6421 /*
6422 * Allocate a buffer, populate it with number of entry plus
6423 * each sg entry's phy addr and length; then return the
6424 * phy_addr of the buffer.
6425 */
6426 size = sizeof(uint32_t) +
6427 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6428 size = (size + PAGE_SIZE) & PAGE_MASK;
6429 buf = dma_alloc_coherent(qseecom.pdev,
6430 size, &coh_pmem, GFP_KERNEL);
6431 if (buf == NULL) {
6432 pr_err("failed to alloc memory for sg buf\n");
6433 return -ENOMEM;
6434 }
6435 *(uint32_t *)buf = sg_ptr->nents;
6436 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6437 for (i = 0; i < sg_ptr->nents; i++) {
6438 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6439 sg_entry->len = sg->length;
6440 sg_entry++;
6441 sg = sg_next(sg);
6442 }
6443 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6444 data->client.sec_buf_fd[fd_idx].vbase = buf;
6445 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6446 data->client.sec_buf_fd[fd_idx].size = size;
6447 return 0;
6448}
6449
6450static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6451 struct qseecom_dev_handle *data, bool cleanup)
6452{
6453 struct ion_handle *ihandle;
6454 int ret = 0;
6455 int i = 0;
6456 uint32_t *update;
6457 struct sg_table *sg_ptr = NULL;
6458 struct scatterlist *sg;
6459 struct qseecom_param_memref *memref;
6460
6461 if (req == NULL) {
6462 pr_err("Invalid address\n");
6463 return -EINVAL;
6464 }
6465 for (i = 0; i < MAX_ION_FD; i++) {
6466 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006467 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006468 req->ifd_data[i].fd);
6469 if (IS_ERR_OR_NULL(ihandle)) {
6470 pr_err("Ion client can't retrieve the handle\n");
6471 return -ENOMEM;
6472 }
6473 if ((req->req_len < sizeof(uint32_t)) ||
6474 (req->ifd_data[i].cmd_buf_offset >
6475 req->req_len - sizeof(uint32_t))) {
6476 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6477 req->req_len,
6478 req->ifd_data[i].cmd_buf_offset);
6479 return -EINVAL;
6480 }
6481 update = (uint32_t *)((char *) req->req_ptr +
6482 req->ifd_data[i].cmd_buf_offset);
6483 if (!update) {
6484 pr_err("update pointer is NULL\n");
6485 return -EINVAL;
6486 }
6487 } else {
6488 continue;
6489 }
6490 /* Populate the cmd data structure with the phys_addr */
6491 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6492 if (IS_ERR_OR_NULL(sg_ptr)) {
6493 pr_err("IOn client could not retrieve sg table\n");
6494 goto err;
6495 }
6496 sg = sg_ptr->sgl;
6497 if (sg == NULL) {
6498 pr_err("sg is NULL\n");
6499 goto err;
6500 }
6501 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6502 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6503 sg_ptr->nents, sg->length);
6504 goto err;
6505 }
6506 /* clean up buf for pre-allocated fd */
6507 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6508 (*update)) {
6509 if (data->client.sec_buf_fd[i].vbase)
6510 dma_free_coherent(qseecom.pdev,
6511 data->client.sec_buf_fd[i].size,
6512 data->client.sec_buf_fd[i].vbase,
6513 data->client.sec_buf_fd[i].pbase);
6514 memset((void *)update, 0,
6515 sizeof(struct qseecom_param_memref));
6516 memset(&(data->client.sec_buf_fd[i]), 0,
6517 sizeof(struct qseecom_sec_buf_fd_info));
6518 goto clean;
6519 }
6520
6521 if (*update == 0) {
6522 /* update buf for pre-allocated fd from secure heap*/
6523 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6524 sg_ptr);
6525 if (ret) {
6526 pr_err("Failed to handle buf for fd[%d]\n", i);
6527 goto err;
6528 }
6529 memref = (struct qseecom_param_memref *)update;
6530 memref->buffer =
6531 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6532 memref->size =
6533 (uint32_t)(data->client.sec_buf_fd[i].size);
6534 } else {
6535 /* update buf for fd from non-secure qseecom heap */
6536 if (sg_ptr->nents != 1) {
6537 pr_err("Num of scat entr (%d) invalid\n",
6538 sg_ptr->nents);
6539 goto err;
6540 }
6541 if (cleanup)
6542 *update = 0;
6543 else
6544 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6545 }
6546clean:
6547 if (cleanup) {
6548 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6549 ihandle, NULL, sg->length,
6550 ION_IOC_INV_CACHES);
6551 if (ret) {
6552 pr_err("cache operation failed %d\n", ret);
6553 goto err;
6554 }
6555 } else {
6556 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6557 ihandle, NULL, sg->length,
6558 ION_IOC_CLEAN_INV_CACHES);
6559 if (ret) {
6560 pr_err("cache operation failed %d\n", ret);
6561 goto err;
6562 }
6563 data->sglistinfo_ptr[i].indexAndFlags =
6564 SGLISTINFO_SET_INDEX_FLAG(
6565 (sg_ptr->nents == 1), 0,
6566 req->ifd_data[i].cmd_buf_offset);
6567 data->sglistinfo_ptr[i].sizeOrCount =
6568 (sg_ptr->nents == 1) ?
6569 sg->length : sg_ptr->nents;
6570 data->sglist_cnt = i + 1;
6571 }
6572 /* Deallocate the handle */
6573 if (!IS_ERR_OR_NULL(ihandle))
6574 ion_free(qseecom.ion_clnt, ihandle);
6575 }
6576 return ret;
6577err:
6578 if (!IS_ERR_OR_NULL(ihandle))
6579 ion_free(qseecom.ion_clnt, ihandle);
6580 return -ENOMEM;
6581}
6582
6583static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6584 struct qseecom_qteec_req *req, uint32_t cmd_id)
6585{
6586 struct qseecom_command_scm_resp resp;
6587 struct qseecom_qteec_ireq ireq;
6588 struct qseecom_qteec_64bit_ireq ireq_64bit;
6589 struct qseecom_registered_app_list *ptr_app;
6590 bool found_app = false;
6591 unsigned long flags;
6592 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006593 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006594 uint32_t reqd_len_sb_in = 0;
6595 void *cmd_buf = NULL;
6596 size_t cmd_len;
6597 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306598 void *req_ptr = NULL;
6599 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006600
6601 ret = __qseecom_qteec_validate_msg(data, req);
6602 if (ret)
6603 return ret;
6604
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306605 req_ptr = req->req_ptr;
6606 resp_ptr = req->resp_ptr;
6607
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006608 /* find app_id & img_name from list */
6609 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6610 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6611 list) {
6612 if ((ptr_app->app_id == data->client.app_id) &&
6613 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6614 found_app = true;
6615 break;
6616 }
6617 }
6618 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6619 if (!found_app) {
6620 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6621 (char *)data->client.app_name);
6622 return -ENOENT;
6623 }
6624
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306625 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6626 (uintptr_t)req->req_ptr);
6627 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6628 (uintptr_t)req->resp_ptr);
6629
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006630 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6631 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6632 ret = __qseecom_update_qteec_req_buf(
6633 (struct qseecom_qteec_modfd_req *)req, data, false);
6634 if (ret)
6635 return ret;
6636 }
6637
6638 if (qseecom.qsee_version < QSEE_VERSION_40) {
6639 ireq.app_id = data->client.app_id;
6640 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306641 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006642 ireq.req_len = req->req_len;
6643 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306644 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006645 ireq.resp_len = req->resp_len;
6646 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6647 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6648 dmac_flush_range((void *)table,
6649 (void *)table + SGLISTINFO_TABLE_SIZE);
6650 cmd_buf = (void *)&ireq;
6651 cmd_len = sizeof(struct qseecom_qteec_ireq);
6652 } else {
6653 ireq_64bit.app_id = data->client.app_id;
6654 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306655 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006656 ireq_64bit.req_len = req->req_len;
6657 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306658 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006659 ireq_64bit.resp_len = req->resp_len;
6660 if ((data->client.app_arch == ELFCLASS32) &&
6661 ((ireq_64bit.req_ptr >=
6662 PHY_ADDR_4G - ireq_64bit.req_len) ||
6663 (ireq_64bit.resp_ptr >=
6664 PHY_ADDR_4G - ireq_64bit.resp_len))){
6665 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6666 data->client.app_name, data->client.app_id);
6667 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6668 ireq_64bit.req_ptr, ireq_64bit.req_len,
6669 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6670 return -EFAULT;
6671 }
6672 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6673 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6674 dmac_flush_range((void *)table,
6675 (void *)table + SGLISTINFO_TABLE_SIZE);
6676 cmd_buf = (void *)&ireq_64bit;
6677 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6678 }
6679 if (qseecom.whitelist_support == true
6680 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6681 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6682 else
6683 *(uint32_t *)cmd_buf = cmd_id;
6684
6685 reqd_len_sb_in = req->req_len + req->resp_len;
6686 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6687 data->client.sb_virt,
6688 reqd_len_sb_in,
6689 ION_IOC_CLEAN_INV_CACHES);
6690 if (ret) {
6691 pr_err("cache operation failed %d\n", ret);
6692 return ret;
6693 }
6694
6695 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6696
6697 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6698 cmd_buf, cmd_len,
6699 &resp, sizeof(resp));
6700 if (ret) {
6701 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6702 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006703 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006704 }
6705
6706 if (qseecom.qsee_reentrancy_support) {
6707 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006708 if (ret)
6709 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006710 } else {
6711 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6712 ret = __qseecom_process_incomplete_cmd(data, &resp);
6713 if (ret) {
6714 pr_err("process_incomplete_cmd failed err: %d\n",
6715 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006716 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006717 }
6718 } else {
6719 if (resp.result != QSEOS_RESULT_SUCCESS) {
6720 pr_err("Response result %d not supported\n",
6721 resp.result);
6722 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006723 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006724 }
6725 }
6726 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006727exit:
6728 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006729 data->client.sb_virt, data->client.sb_length,
6730 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006731 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006732 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006733 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006734 }
6735
6736 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6737 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006738 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006739 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006740 if (ret2)
6741 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006742 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006743 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006744}
6745
6746static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6747 void __user *argp)
6748{
6749 struct qseecom_qteec_modfd_req req;
6750 int ret = 0;
6751
6752 ret = copy_from_user(&req, argp,
6753 sizeof(struct qseecom_qteec_modfd_req));
6754 if (ret) {
6755 pr_err("copy_from_user failed\n");
6756 return ret;
6757 }
6758 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6759 QSEOS_TEE_OPEN_SESSION);
6760
6761 return ret;
6762}
6763
6764static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6765 void __user *argp)
6766{
6767 struct qseecom_qteec_req req;
6768 int ret = 0;
6769
6770 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6771 if (ret) {
6772 pr_err("copy_from_user failed\n");
6773 return ret;
6774 }
6775 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6776 return ret;
6777}
6778
6779static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6780 void __user *argp)
6781{
6782 struct qseecom_qteec_modfd_req req;
6783 struct qseecom_command_scm_resp resp;
6784 struct qseecom_qteec_ireq ireq;
6785 struct qseecom_qteec_64bit_ireq ireq_64bit;
6786 struct qseecom_registered_app_list *ptr_app;
6787 bool found_app = false;
6788 unsigned long flags;
6789 int ret = 0;
6790 int i = 0;
6791 uint32_t reqd_len_sb_in = 0;
6792 void *cmd_buf = NULL;
6793 size_t cmd_len;
6794 struct sglist_info *table = data->sglistinfo_ptr;
6795 void *req_ptr = NULL;
6796 void *resp_ptr = NULL;
6797
6798 ret = copy_from_user(&req, argp,
6799 sizeof(struct qseecom_qteec_modfd_req));
6800 if (ret) {
6801 pr_err("copy_from_user failed\n");
6802 return ret;
6803 }
6804 ret = __qseecom_qteec_validate_msg(data,
6805 (struct qseecom_qteec_req *)(&req));
6806 if (ret)
6807 return ret;
6808 req_ptr = req.req_ptr;
6809 resp_ptr = req.resp_ptr;
6810
6811 /* find app_id & img_name from list */
6812 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6813 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6814 list) {
6815 if ((ptr_app->app_id == data->client.app_id) &&
6816 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6817 found_app = true;
6818 break;
6819 }
6820 }
6821 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6822 if (!found_app) {
6823 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6824 (char *)data->client.app_name);
6825 return -ENOENT;
6826 }
6827
6828 /* validate offsets */
6829 for (i = 0; i < MAX_ION_FD; i++) {
6830 if (req.ifd_data[i].fd) {
6831 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6832 return -EINVAL;
6833 }
6834 }
6835 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6836 (uintptr_t)req.req_ptr);
6837 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6838 (uintptr_t)req.resp_ptr);
6839 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6840 if (ret)
6841 return ret;
6842
6843 if (qseecom.qsee_version < QSEE_VERSION_40) {
6844 ireq.app_id = data->client.app_id;
6845 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6846 (uintptr_t)req_ptr);
6847 ireq.req_len = req.req_len;
6848 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6849 (uintptr_t)resp_ptr);
6850 ireq.resp_len = req.resp_len;
6851 cmd_buf = (void *)&ireq;
6852 cmd_len = sizeof(struct qseecom_qteec_ireq);
6853 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6854 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6855 dmac_flush_range((void *)table,
6856 (void *)table + SGLISTINFO_TABLE_SIZE);
6857 } else {
6858 ireq_64bit.app_id = data->client.app_id;
6859 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6860 (uintptr_t)req_ptr);
6861 ireq_64bit.req_len = req.req_len;
6862 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6863 (uintptr_t)resp_ptr);
6864 ireq_64bit.resp_len = req.resp_len;
6865 cmd_buf = (void *)&ireq_64bit;
6866 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6867 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6868 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6869 dmac_flush_range((void *)table,
6870 (void *)table + SGLISTINFO_TABLE_SIZE);
6871 }
6872 reqd_len_sb_in = req.req_len + req.resp_len;
6873 if (qseecom.whitelist_support == true)
6874 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6875 else
6876 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6877
6878 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6879 data->client.sb_virt,
6880 reqd_len_sb_in,
6881 ION_IOC_CLEAN_INV_CACHES);
6882 if (ret) {
6883 pr_err("cache operation failed %d\n", ret);
6884 return ret;
6885 }
6886
6887 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6888
6889 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6890 cmd_buf, cmd_len,
6891 &resp, sizeof(resp));
6892 if (ret) {
6893 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6894 ret, data->client.app_id);
6895 return ret;
6896 }
6897
6898 if (qseecom.qsee_reentrancy_support) {
6899 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6900 } else {
6901 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6902 ret = __qseecom_process_incomplete_cmd(data, &resp);
6903 if (ret) {
6904 pr_err("process_incomplete_cmd failed err: %d\n",
6905 ret);
6906 return ret;
6907 }
6908 } else {
6909 if (resp.result != QSEOS_RESULT_SUCCESS) {
6910 pr_err("Response result %d not supported\n",
6911 resp.result);
6912 ret = -EINVAL;
6913 }
6914 }
6915 }
6916 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6917 if (ret)
6918 return ret;
6919
6920 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6921 data->client.sb_virt, data->client.sb_length,
6922 ION_IOC_INV_CACHES);
6923 if (ret) {
6924 pr_err("cache operation failed %d\n", ret);
6925 return ret;
6926 }
6927 return 0;
6928}
6929
6930static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6931 void __user *argp)
6932{
6933 struct qseecom_qteec_modfd_req req;
6934 int ret = 0;
6935
6936 ret = copy_from_user(&req, argp,
6937 sizeof(struct qseecom_qteec_modfd_req));
6938 if (ret) {
6939 pr_err("copy_from_user failed\n");
6940 return ret;
6941 }
6942 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6943 QSEOS_TEE_REQUEST_CANCELLATION);
6944
6945 return ret;
6946}
6947
6948static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6949{
6950 if (data->sglist_cnt) {
6951 memset(data->sglistinfo_ptr, 0,
6952 SGLISTINFO_TABLE_SIZE);
6953 data->sglist_cnt = 0;
6954 }
6955}
6956
6957static inline long qseecom_ioctl(struct file *file,
6958 unsigned int cmd, unsigned long arg)
6959{
6960 int ret = 0;
6961 struct qseecom_dev_handle *data = file->private_data;
6962 void __user *argp = (void __user *) arg;
6963 bool perf_enabled = false;
6964
6965 if (!data) {
6966 pr_err("Invalid/uninitialized device handle\n");
6967 return -EINVAL;
6968 }
6969
6970 if (data->abort) {
6971 pr_err("Aborting qseecom driver\n");
6972 return -ENODEV;
6973 }
6974
6975 switch (cmd) {
6976 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6977 if (data->type != QSEECOM_GENERIC) {
6978 pr_err("reg lstnr req: invalid handle (%d)\n",
6979 data->type);
6980 ret = -EINVAL;
6981 break;
6982 }
6983 pr_debug("ioctl register_listener_req()\n");
6984 mutex_lock(&app_access_lock);
6985 atomic_inc(&data->ioctl_count);
6986 data->type = QSEECOM_LISTENER_SERVICE;
6987 ret = qseecom_register_listener(data, argp);
6988 atomic_dec(&data->ioctl_count);
6989 wake_up_all(&data->abort_wq);
6990 mutex_unlock(&app_access_lock);
6991 if (ret)
6992 pr_err("failed qseecom_register_listener: %d\n", ret);
6993 break;
6994 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05306995 case QSEECOM_IOCTL_SET_ICE_INFO: {
6996 struct qseecom_ice_data_t ice_data;
6997
6998 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
6999 if (ret) {
7000 pr_err("copy_from_user failed\n");
7001 return -EFAULT;
7002 }
7003 qcom_ice_set_fde_flag(ice_data.flag);
7004 break;
7005 }
7006
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007007 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7008 if ((data->listener.id == 0) ||
7009 (data->type != QSEECOM_LISTENER_SERVICE)) {
7010 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7011 data->type, data->listener.id);
7012 ret = -EINVAL;
7013 break;
7014 }
7015 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kong26e62742018-05-04 17:19:06 -07007016 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007017 mutex_lock(&app_access_lock);
7018 atomic_inc(&data->ioctl_count);
7019 ret = qseecom_unregister_listener(data);
7020 atomic_dec(&data->ioctl_count);
7021 wake_up_all(&data->abort_wq);
7022 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007023 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007024 if (ret)
7025 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7026 break;
7027 }
7028 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7029 if ((data->client.app_id == 0) ||
7030 (data->type != QSEECOM_CLIENT_APP)) {
7031 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7032 data->type, data->client.app_id);
7033 ret = -EINVAL;
7034 break;
7035 }
7036 /* Only one client allowed here at a time */
7037 mutex_lock(&app_access_lock);
7038 if (qseecom.support_bus_scaling) {
7039 /* register bus bw in case the client doesn't do it */
7040 if (!data->mode) {
7041 mutex_lock(&qsee_bw_mutex);
7042 __qseecom_register_bus_bandwidth_needs(
7043 data, HIGH);
7044 mutex_unlock(&qsee_bw_mutex);
7045 }
7046 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7047 if (ret) {
7048 pr_err("Failed to set bw.\n");
7049 ret = -EINVAL;
7050 mutex_unlock(&app_access_lock);
7051 break;
7052 }
7053 }
7054 /*
7055 * On targets where crypto clock is handled by HLOS,
7056 * if clk_access_cnt is zero and perf_enabled is false,
7057 * then the crypto clock was not enabled before sending cmd to
7058 * tz, qseecom will enable the clock to avoid service failure.
7059 */
7060 if (!qseecom.no_clock_support &&
7061 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7062 pr_debug("ce clock is not enabled!\n");
7063 ret = qseecom_perf_enable(data);
7064 if (ret) {
7065 pr_err("Failed to vote for clock with err %d\n",
7066 ret);
7067 mutex_unlock(&app_access_lock);
7068 ret = -EINVAL;
7069 break;
7070 }
7071 perf_enabled = true;
7072 }
7073 atomic_inc(&data->ioctl_count);
7074 ret = qseecom_send_cmd(data, argp);
7075 if (qseecom.support_bus_scaling)
7076 __qseecom_add_bw_scale_down_timer(
7077 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7078 if (perf_enabled) {
7079 qsee_disable_clock_vote(data, CLK_DFAB);
7080 qsee_disable_clock_vote(data, CLK_SFPB);
7081 }
7082 atomic_dec(&data->ioctl_count);
7083 wake_up_all(&data->abort_wq);
7084 mutex_unlock(&app_access_lock);
7085 if (ret)
7086 pr_err("failed qseecom_send_cmd: %d\n", ret);
7087 break;
7088 }
7089 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7090 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7091 if ((data->client.app_id == 0) ||
7092 (data->type != QSEECOM_CLIENT_APP)) {
7093 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7094 data->type, data->client.app_id);
7095 ret = -EINVAL;
7096 break;
7097 }
7098 /* Only one client allowed here at a time */
7099 mutex_lock(&app_access_lock);
7100 if (qseecom.support_bus_scaling) {
7101 if (!data->mode) {
7102 mutex_lock(&qsee_bw_mutex);
7103 __qseecom_register_bus_bandwidth_needs(
7104 data, HIGH);
7105 mutex_unlock(&qsee_bw_mutex);
7106 }
7107 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7108 if (ret) {
7109 pr_err("Failed to set bw.\n");
7110 mutex_unlock(&app_access_lock);
7111 ret = -EINVAL;
7112 break;
7113 }
7114 }
7115 /*
7116 * On targets where crypto clock is handled by HLOS,
7117 * if clk_access_cnt is zero and perf_enabled is false,
7118 * then the crypto clock was not enabled before sending cmd to
7119 * tz, qseecom will enable the clock to avoid service failure.
7120 */
7121 if (!qseecom.no_clock_support &&
7122 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7123 pr_debug("ce clock is not enabled!\n");
7124 ret = qseecom_perf_enable(data);
7125 if (ret) {
7126 pr_err("Failed to vote for clock with err %d\n",
7127 ret);
7128 mutex_unlock(&app_access_lock);
7129 ret = -EINVAL;
7130 break;
7131 }
7132 perf_enabled = true;
7133 }
7134 atomic_inc(&data->ioctl_count);
7135 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7136 ret = qseecom_send_modfd_cmd(data, argp);
7137 else
7138 ret = qseecom_send_modfd_cmd_64(data, argp);
7139 if (qseecom.support_bus_scaling)
7140 __qseecom_add_bw_scale_down_timer(
7141 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7142 if (perf_enabled) {
7143 qsee_disable_clock_vote(data, CLK_DFAB);
7144 qsee_disable_clock_vote(data, CLK_SFPB);
7145 }
7146 atomic_dec(&data->ioctl_count);
7147 wake_up_all(&data->abort_wq);
7148 mutex_unlock(&app_access_lock);
7149 if (ret)
7150 pr_err("failed qseecom_send_cmd: %d\n", ret);
7151 __qseecom_clean_data_sglistinfo(data);
7152 break;
7153 }
7154 case QSEECOM_IOCTL_RECEIVE_REQ: {
7155 if ((data->listener.id == 0) ||
7156 (data->type != QSEECOM_LISTENER_SERVICE)) {
7157 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7158 data->type, data->listener.id);
7159 ret = -EINVAL;
7160 break;
7161 }
7162 atomic_inc(&data->ioctl_count);
7163 ret = qseecom_receive_req(data);
7164 atomic_dec(&data->ioctl_count);
7165 wake_up_all(&data->abort_wq);
7166 if (ret && (ret != -ERESTARTSYS))
7167 pr_err("failed qseecom_receive_req: %d\n", ret);
7168 break;
7169 }
7170 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7171 if ((data->listener.id == 0) ||
7172 (data->type != QSEECOM_LISTENER_SERVICE)) {
7173 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7174 data->type, data->listener.id);
7175 ret = -EINVAL;
7176 break;
7177 }
7178 atomic_inc(&data->ioctl_count);
7179 if (!qseecom.qsee_reentrancy_support)
7180 ret = qseecom_send_resp();
7181 else
7182 ret = qseecom_reentrancy_send_resp(data);
7183 atomic_dec(&data->ioctl_count);
7184 wake_up_all(&data->abort_wq);
7185 if (ret)
7186 pr_err("failed qseecom_send_resp: %d\n", ret);
7187 break;
7188 }
7189 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7190 if ((data->type != QSEECOM_CLIENT_APP) &&
7191 (data->type != QSEECOM_GENERIC) &&
7192 (data->type != QSEECOM_SECURE_SERVICE)) {
7193 pr_err("set mem param req: invalid handle (%d)\n",
7194 data->type);
7195 ret = -EINVAL;
7196 break;
7197 }
7198 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7199 mutex_lock(&app_access_lock);
7200 atomic_inc(&data->ioctl_count);
7201 ret = qseecom_set_client_mem_param(data, argp);
7202 atomic_dec(&data->ioctl_count);
7203 mutex_unlock(&app_access_lock);
7204 if (ret)
7205 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7206 ret);
7207 break;
7208 }
7209 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7210 if ((data->type != QSEECOM_GENERIC) &&
7211 (data->type != QSEECOM_CLIENT_APP)) {
7212 pr_err("load app req: invalid handle (%d)\n",
7213 data->type);
7214 ret = -EINVAL;
7215 break;
7216 }
7217 data->type = QSEECOM_CLIENT_APP;
7218 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7219 mutex_lock(&app_access_lock);
7220 atomic_inc(&data->ioctl_count);
7221 ret = qseecom_load_app(data, argp);
7222 atomic_dec(&data->ioctl_count);
7223 mutex_unlock(&app_access_lock);
7224 if (ret)
7225 pr_err("failed load_app request: %d\n", ret);
7226 break;
7227 }
7228 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7229 if ((data->client.app_id == 0) ||
7230 (data->type != QSEECOM_CLIENT_APP)) {
7231 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7232 data->type, data->client.app_id);
7233 ret = -EINVAL;
7234 break;
7235 }
7236 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7237 mutex_lock(&app_access_lock);
7238 atomic_inc(&data->ioctl_count);
7239 ret = qseecom_unload_app(data, false);
7240 atomic_dec(&data->ioctl_count);
7241 mutex_unlock(&app_access_lock);
7242 if (ret)
7243 pr_err("failed unload_app request: %d\n", ret);
7244 break;
7245 }
7246 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7247 atomic_inc(&data->ioctl_count);
7248 ret = qseecom_get_qseos_version(data, argp);
7249 if (ret)
7250 pr_err("qseecom_get_qseos_version: %d\n", ret);
7251 atomic_dec(&data->ioctl_count);
7252 break;
7253 }
7254 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7255 if ((data->type != QSEECOM_GENERIC) &&
7256 (data->type != QSEECOM_CLIENT_APP)) {
7257 pr_err("perf enable req: invalid handle (%d)\n",
7258 data->type);
7259 ret = -EINVAL;
7260 break;
7261 }
7262 if ((data->type == QSEECOM_CLIENT_APP) &&
7263 (data->client.app_id == 0)) {
7264 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7265 data->type, data->client.app_id);
7266 ret = -EINVAL;
7267 break;
7268 }
7269 atomic_inc(&data->ioctl_count);
7270 if (qseecom.support_bus_scaling) {
7271 mutex_lock(&qsee_bw_mutex);
7272 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7273 mutex_unlock(&qsee_bw_mutex);
7274 } else {
7275 ret = qseecom_perf_enable(data);
7276 if (ret)
7277 pr_err("Fail to vote for clocks %d\n", ret);
7278 }
7279 atomic_dec(&data->ioctl_count);
7280 break;
7281 }
7282 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7283 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7284 (data->type != QSEECOM_CLIENT_APP)) {
7285 pr_err("perf disable req: invalid handle (%d)\n",
7286 data->type);
7287 ret = -EINVAL;
7288 break;
7289 }
7290 if ((data->type == QSEECOM_CLIENT_APP) &&
7291 (data->client.app_id == 0)) {
7292 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7293 data->type, data->client.app_id);
7294 ret = -EINVAL;
7295 break;
7296 }
7297 atomic_inc(&data->ioctl_count);
7298 if (!qseecom.support_bus_scaling) {
7299 qsee_disable_clock_vote(data, CLK_DFAB);
7300 qsee_disable_clock_vote(data, CLK_SFPB);
7301 } else {
7302 mutex_lock(&qsee_bw_mutex);
7303 qseecom_unregister_bus_bandwidth_needs(data);
7304 mutex_unlock(&qsee_bw_mutex);
7305 }
7306 atomic_dec(&data->ioctl_count);
7307 break;
7308 }
7309
7310 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7311 /* If crypto clock is not handled by HLOS, return directly. */
7312 if (qseecom.no_clock_support) {
7313 pr_debug("crypto clock is not handled by HLOS\n");
7314 break;
7315 }
7316 if ((data->client.app_id == 0) ||
7317 (data->type != QSEECOM_CLIENT_APP)) {
7318 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7319 data->type, data->client.app_id);
7320 ret = -EINVAL;
7321 break;
7322 }
7323 atomic_inc(&data->ioctl_count);
7324 ret = qseecom_scale_bus_bandwidth(data, argp);
7325 atomic_dec(&data->ioctl_count);
7326 break;
7327 }
7328 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7329 if (data->type != QSEECOM_GENERIC) {
7330 pr_err("load ext elf req: invalid client handle (%d)\n",
7331 data->type);
7332 ret = -EINVAL;
7333 break;
7334 }
7335 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7336 data->released = true;
7337 mutex_lock(&app_access_lock);
7338 atomic_inc(&data->ioctl_count);
7339 ret = qseecom_load_external_elf(data, argp);
7340 atomic_dec(&data->ioctl_count);
7341 mutex_unlock(&app_access_lock);
7342 if (ret)
7343 pr_err("failed load_external_elf request: %d\n", ret);
7344 break;
7345 }
7346 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7347 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7348 pr_err("unload ext elf req: invalid handle (%d)\n",
7349 data->type);
7350 ret = -EINVAL;
7351 break;
7352 }
7353 data->released = true;
7354 mutex_lock(&app_access_lock);
7355 atomic_inc(&data->ioctl_count);
7356 ret = qseecom_unload_external_elf(data);
7357 atomic_dec(&data->ioctl_count);
7358 mutex_unlock(&app_access_lock);
7359 if (ret)
7360 pr_err("failed unload_app request: %d\n", ret);
7361 break;
7362 }
7363 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7364 data->type = QSEECOM_CLIENT_APP;
7365 mutex_lock(&app_access_lock);
7366 atomic_inc(&data->ioctl_count);
7367 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7368 ret = qseecom_query_app_loaded(data, argp);
7369 atomic_dec(&data->ioctl_count);
7370 mutex_unlock(&app_access_lock);
7371 break;
7372 }
7373 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7374 if (data->type != QSEECOM_GENERIC) {
7375 pr_err("send cmd svc req: invalid handle (%d)\n",
7376 data->type);
7377 ret = -EINVAL;
7378 break;
7379 }
7380 data->type = QSEECOM_SECURE_SERVICE;
7381 if (qseecom.qsee_version < QSEE_VERSION_03) {
7382 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7383 qseecom.qsee_version);
7384 return -EINVAL;
7385 }
7386 mutex_lock(&app_access_lock);
7387 atomic_inc(&data->ioctl_count);
7388 ret = qseecom_send_service_cmd(data, argp);
7389 atomic_dec(&data->ioctl_count);
7390 mutex_unlock(&app_access_lock);
7391 break;
7392 }
7393 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7394 if (!(qseecom.support_pfe || qseecom.support_fde))
7395 pr_err("Features requiring key init not supported\n");
7396 if (data->type != QSEECOM_GENERIC) {
7397 pr_err("create key req: invalid handle (%d)\n",
7398 data->type);
7399 ret = -EINVAL;
7400 break;
7401 }
7402 if (qseecom.qsee_version < QSEE_VERSION_05) {
7403 pr_err("Create Key feature unsupported: qsee ver %u\n",
7404 qseecom.qsee_version);
7405 return -EINVAL;
7406 }
7407 data->released = true;
7408 mutex_lock(&app_access_lock);
7409 atomic_inc(&data->ioctl_count);
7410 ret = qseecom_create_key(data, argp);
7411 if (ret)
7412 pr_err("failed to create encryption key: %d\n", ret);
7413
7414 atomic_dec(&data->ioctl_count);
7415 mutex_unlock(&app_access_lock);
7416 break;
7417 }
7418 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7419 if (!(qseecom.support_pfe || qseecom.support_fde))
7420 pr_err("Features requiring key init not supported\n");
7421 if (data->type != QSEECOM_GENERIC) {
7422 pr_err("wipe key req: invalid handle (%d)\n",
7423 data->type);
7424 ret = -EINVAL;
7425 break;
7426 }
7427 if (qseecom.qsee_version < QSEE_VERSION_05) {
7428 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7429 qseecom.qsee_version);
7430 return -EINVAL;
7431 }
7432 data->released = true;
7433 mutex_lock(&app_access_lock);
7434 atomic_inc(&data->ioctl_count);
7435 ret = qseecom_wipe_key(data, argp);
7436 if (ret)
7437 pr_err("failed to wipe encryption key: %d\n", ret);
7438 atomic_dec(&data->ioctl_count);
7439 mutex_unlock(&app_access_lock);
7440 break;
7441 }
7442 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7443 if (!(qseecom.support_pfe || qseecom.support_fde))
7444 pr_err("Features requiring key init not supported\n");
7445 if (data->type != QSEECOM_GENERIC) {
7446 pr_err("update key req: invalid handle (%d)\n",
7447 data->type);
7448 ret = -EINVAL;
7449 break;
7450 }
7451 if (qseecom.qsee_version < QSEE_VERSION_05) {
7452 pr_err("Update Key feature unsupported in qsee ver %u\n",
7453 qseecom.qsee_version);
7454 return -EINVAL;
7455 }
7456 data->released = true;
7457 mutex_lock(&app_access_lock);
7458 atomic_inc(&data->ioctl_count);
7459 ret = qseecom_update_key_user_info(data, argp);
7460 if (ret)
7461 pr_err("failed to update key user info: %d\n", ret);
7462 atomic_dec(&data->ioctl_count);
7463 mutex_unlock(&app_access_lock);
7464 break;
7465 }
7466 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7467 if (data->type != QSEECOM_GENERIC) {
7468 pr_err("save part hash req: invalid handle (%d)\n",
7469 data->type);
7470 ret = -EINVAL;
7471 break;
7472 }
7473 data->released = true;
7474 mutex_lock(&app_access_lock);
7475 atomic_inc(&data->ioctl_count);
7476 ret = qseecom_save_partition_hash(argp);
7477 atomic_dec(&data->ioctl_count);
7478 mutex_unlock(&app_access_lock);
7479 break;
7480 }
7481 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7482 if (data->type != QSEECOM_GENERIC) {
7483 pr_err("ES activated req: invalid handle (%d)\n",
7484 data->type);
7485 ret = -EINVAL;
7486 break;
7487 }
7488 data->released = true;
7489 mutex_lock(&app_access_lock);
7490 atomic_inc(&data->ioctl_count);
7491 ret = qseecom_is_es_activated(argp);
7492 atomic_dec(&data->ioctl_count);
7493 mutex_unlock(&app_access_lock);
7494 break;
7495 }
7496 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7497 if (data->type != QSEECOM_GENERIC) {
7498 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7499 data->type);
7500 ret = -EINVAL;
7501 break;
7502 }
7503 data->released = true;
7504 mutex_lock(&app_access_lock);
7505 atomic_inc(&data->ioctl_count);
7506 ret = qseecom_mdtp_cipher_dip(argp);
7507 atomic_dec(&data->ioctl_count);
7508 mutex_unlock(&app_access_lock);
7509 break;
7510 }
7511 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7512 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7513 if ((data->listener.id == 0) ||
7514 (data->type != QSEECOM_LISTENER_SERVICE)) {
7515 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7516 data->type, data->listener.id);
7517 ret = -EINVAL;
7518 break;
7519 }
7520 atomic_inc(&data->ioctl_count);
7521 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7522 ret = qseecom_send_modfd_resp(data, argp);
7523 else
7524 ret = qseecom_send_modfd_resp_64(data, argp);
7525 atomic_dec(&data->ioctl_count);
7526 wake_up_all(&data->abort_wq);
7527 if (ret)
7528 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7529 __qseecom_clean_data_sglistinfo(data);
7530 break;
7531 }
7532 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7533 if ((data->client.app_id == 0) ||
7534 (data->type != QSEECOM_CLIENT_APP)) {
7535 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7536 data->type, data->client.app_id);
7537 ret = -EINVAL;
7538 break;
7539 }
7540 if (qseecom.qsee_version < QSEE_VERSION_40) {
7541 pr_err("GP feature unsupported: qsee ver %u\n",
7542 qseecom.qsee_version);
7543 return -EINVAL;
7544 }
7545 /* Only one client allowed here at a time */
7546 mutex_lock(&app_access_lock);
7547 atomic_inc(&data->ioctl_count);
7548 ret = qseecom_qteec_open_session(data, argp);
7549 atomic_dec(&data->ioctl_count);
7550 wake_up_all(&data->abort_wq);
7551 mutex_unlock(&app_access_lock);
7552 if (ret)
7553 pr_err("failed open_session_cmd: %d\n", ret);
7554 __qseecom_clean_data_sglistinfo(data);
7555 break;
7556 }
7557 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7558 if ((data->client.app_id == 0) ||
7559 (data->type != QSEECOM_CLIENT_APP)) {
7560 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7561 data->type, data->client.app_id);
7562 ret = -EINVAL;
7563 break;
7564 }
7565 if (qseecom.qsee_version < QSEE_VERSION_40) {
7566 pr_err("GP feature unsupported: qsee ver %u\n",
7567 qseecom.qsee_version);
7568 return -EINVAL;
7569 }
7570 /* Only one client allowed here at a time */
7571 mutex_lock(&app_access_lock);
7572 atomic_inc(&data->ioctl_count);
7573 ret = qseecom_qteec_close_session(data, argp);
7574 atomic_dec(&data->ioctl_count);
7575 wake_up_all(&data->abort_wq);
7576 mutex_unlock(&app_access_lock);
7577 if (ret)
7578 pr_err("failed close_session_cmd: %d\n", ret);
7579 break;
7580 }
7581 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7582 if ((data->client.app_id == 0) ||
7583 (data->type != QSEECOM_CLIENT_APP)) {
7584 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7585 data->type, data->client.app_id);
7586 ret = -EINVAL;
7587 break;
7588 }
7589 if (qseecom.qsee_version < QSEE_VERSION_40) {
7590 pr_err("GP feature unsupported: qsee ver %u\n",
7591 qseecom.qsee_version);
7592 return -EINVAL;
7593 }
7594 /* Only one client allowed here at a time */
7595 mutex_lock(&app_access_lock);
7596 atomic_inc(&data->ioctl_count);
7597 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7598 atomic_dec(&data->ioctl_count);
7599 wake_up_all(&data->abort_wq);
7600 mutex_unlock(&app_access_lock);
7601 if (ret)
7602 pr_err("failed Invoke cmd: %d\n", ret);
7603 __qseecom_clean_data_sglistinfo(data);
7604 break;
7605 }
7606 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7607 if ((data->client.app_id == 0) ||
7608 (data->type != QSEECOM_CLIENT_APP)) {
7609 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7610 data->type, data->client.app_id);
7611 ret = -EINVAL;
7612 break;
7613 }
7614 if (qseecom.qsee_version < QSEE_VERSION_40) {
7615 pr_err("GP feature unsupported: qsee ver %u\n",
7616 qseecom.qsee_version);
7617 return -EINVAL;
7618 }
7619 /* Only one client allowed here at a time */
7620 mutex_lock(&app_access_lock);
7621 atomic_inc(&data->ioctl_count);
7622 ret = qseecom_qteec_request_cancellation(data, argp);
7623 atomic_dec(&data->ioctl_count);
7624 wake_up_all(&data->abort_wq);
7625 mutex_unlock(&app_access_lock);
7626 if (ret)
7627 pr_err("failed request_cancellation: %d\n", ret);
7628 break;
7629 }
7630 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7631 atomic_inc(&data->ioctl_count);
7632 ret = qseecom_get_ce_info(data, argp);
7633 if (ret)
7634 pr_err("failed get fde ce pipe info: %d\n", ret);
7635 atomic_dec(&data->ioctl_count);
7636 break;
7637 }
7638 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7639 atomic_inc(&data->ioctl_count);
7640 ret = qseecom_free_ce_info(data, argp);
7641 if (ret)
7642 pr_err("failed get fde ce pipe info: %d\n", ret);
7643 atomic_dec(&data->ioctl_count);
7644 break;
7645 }
7646 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7647 atomic_inc(&data->ioctl_count);
7648 ret = qseecom_query_ce_info(data, argp);
7649 if (ret)
7650 pr_err("failed get fde ce pipe info: %d\n", ret);
7651 atomic_dec(&data->ioctl_count);
7652 break;
7653 }
7654 default:
7655 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7656 return -EINVAL;
7657 }
7658 return ret;
7659}
7660
7661static int qseecom_open(struct inode *inode, struct file *file)
7662{
7663 int ret = 0;
7664 struct qseecom_dev_handle *data;
7665
7666 data = kzalloc(sizeof(*data), GFP_KERNEL);
7667 if (!data)
7668 return -ENOMEM;
7669 file->private_data = data;
7670 data->abort = 0;
7671 data->type = QSEECOM_GENERIC;
7672 data->released = false;
7673 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7674 data->mode = INACTIVE;
7675 init_waitqueue_head(&data->abort_wq);
7676 atomic_set(&data->ioctl_count, 0);
7677 return ret;
7678}
7679
7680static int qseecom_release(struct inode *inode, struct file *file)
7681{
7682 struct qseecom_dev_handle *data = file->private_data;
7683 int ret = 0;
7684
7685 if (data->released == false) {
7686 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7687 data->type, data->mode, data);
7688 switch (data->type) {
7689 case QSEECOM_LISTENER_SERVICE:
Zhen Kong26e62742018-05-04 17:19:06 -07007690 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007691 mutex_lock(&app_access_lock);
7692 ret = qseecom_unregister_listener(data);
7693 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007694 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007695 break;
7696 case QSEECOM_CLIENT_APP:
7697 mutex_lock(&app_access_lock);
7698 ret = qseecom_unload_app(data, true);
7699 mutex_unlock(&app_access_lock);
7700 break;
7701 case QSEECOM_SECURE_SERVICE:
7702 case QSEECOM_GENERIC:
7703 ret = qseecom_unmap_ion_allocated_memory(data);
7704 if (ret)
7705 pr_err("Ion Unmap failed\n");
7706 break;
7707 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7708 break;
7709 default:
7710 pr_err("Unsupported clnt_handle_type %d",
7711 data->type);
7712 break;
7713 }
7714 }
7715
7716 if (qseecom.support_bus_scaling) {
7717 mutex_lock(&qsee_bw_mutex);
7718 if (data->mode != INACTIVE) {
7719 qseecom_unregister_bus_bandwidth_needs(data);
7720 if (qseecom.cumulative_mode == INACTIVE) {
7721 ret = __qseecom_set_msm_bus_request(INACTIVE);
7722 if (ret)
7723 pr_err("Fail to scale down bus\n");
7724 }
7725 }
7726 mutex_unlock(&qsee_bw_mutex);
7727 } else {
7728 if (data->fast_load_enabled == true)
7729 qsee_disable_clock_vote(data, CLK_SFPB);
7730 if (data->perf_enabled == true)
7731 qsee_disable_clock_vote(data, CLK_DFAB);
7732 }
7733 kfree(data);
7734
7735 return ret;
7736}
7737
7738#ifdef CONFIG_COMPAT
7739#include "compat_qseecom.c"
7740#else
7741#define compat_qseecom_ioctl NULL
7742#endif
7743
7744static const struct file_operations qseecom_fops = {
7745 .owner = THIS_MODULE,
7746 .unlocked_ioctl = qseecom_ioctl,
7747 .compat_ioctl = compat_qseecom_ioctl,
7748 .open = qseecom_open,
7749 .release = qseecom_release
7750};
7751
7752static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7753{
7754 int rc = 0;
7755 struct device *pdev;
7756 struct qseecom_clk *qclk;
7757 char *core_clk_src = NULL;
7758 char *core_clk = NULL;
7759 char *iface_clk = NULL;
7760 char *bus_clk = NULL;
7761
7762 switch (ce) {
7763 case CLK_QSEE: {
7764 core_clk_src = "core_clk_src";
7765 core_clk = "core_clk";
7766 iface_clk = "iface_clk";
7767 bus_clk = "bus_clk";
7768 qclk = &qseecom.qsee;
7769 qclk->instance = CLK_QSEE;
7770 break;
7771 };
7772 case CLK_CE_DRV: {
7773 core_clk_src = "ce_drv_core_clk_src";
7774 core_clk = "ce_drv_core_clk";
7775 iface_clk = "ce_drv_iface_clk";
7776 bus_clk = "ce_drv_bus_clk";
7777 qclk = &qseecom.ce_drv;
7778 qclk->instance = CLK_CE_DRV;
7779 break;
7780 };
7781 default:
7782 pr_err("Invalid ce hw instance: %d!\n", ce);
7783 return -EIO;
7784 }
7785
7786 if (qseecom.no_clock_support) {
7787 qclk->ce_core_clk = NULL;
7788 qclk->ce_clk = NULL;
7789 qclk->ce_bus_clk = NULL;
7790 qclk->ce_core_src_clk = NULL;
7791 return 0;
7792 }
7793
7794 pdev = qseecom.pdev;
7795
7796 /* Get CE3 src core clk. */
7797 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7798 if (!IS_ERR(qclk->ce_core_src_clk)) {
7799 rc = clk_set_rate(qclk->ce_core_src_clk,
7800 qseecom.ce_opp_freq_hz);
7801 if (rc) {
7802 clk_put(qclk->ce_core_src_clk);
7803 qclk->ce_core_src_clk = NULL;
7804 pr_err("Unable to set the core src clk @%uMhz.\n",
7805 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7806 return -EIO;
7807 }
7808 } else {
7809 pr_warn("Unable to get CE core src clk, set to NULL\n");
7810 qclk->ce_core_src_clk = NULL;
7811 }
7812
7813 /* Get CE core clk */
7814 qclk->ce_core_clk = clk_get(pdev, core_clk);
7815 if (IS_ERR(qclk->ce_core_clk)) {
7816 rc = PTR_ERR(qclk->ce_core_clk);
7817 pr_err("Unable to get CE core clk\n");
7818 if (qclk->ce_core_src_clk != NULL)
7819 clk_put(qclk->ce_core_src_clk);
7820 return -EIO;
7821 }
7822
7823 /* Get CE Interface clk */
7824 qclk->ce_clk = clk_get(pdev, iface_clk);
7825 if (IS_ERR(qclk->ce_clk)) {
7826 rc = PTR_ERR(qclk->ce_clk);
7827 pr_err("Unable to get CE interface clk\n");
7828 if (qclk->ce_core_src_clk != NULL)
7829 clk_put(qclk->ce_core_src_clk);
7830 clk_put(qclk->ce_core_clk);
7831 return -EIO;
7832 }
7833
7834 /* Get CE AXI clk */
7835 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7836 if (IS_ERR(qclk->ce_bus_clk)) {
7837 rc = PTR_ERR(qclk->ce_bus_clk);
7838 pr_err("Unable to get CE BUS interface clk\n");
7839 if (qclk->ce_core_src_clk != NULL)
7840 clk_put(qclk->ce_core_src_clk);
7841 clk_put(qclk->ce_core_clk);
7842 clk_put(qclk->ce_clk);
7843 return -EIO;
7844 }
7845
7846 return rc;
7847}
7848
7849static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7850{
7851 struct qseecom_clk *qclk;
7852
7853 if (ce == CLK_QSEE)
7854 qclk = &qseecom.qsee;
7855 else
7856 qclk = &qseecom.ce_drv;
7857
7858 if (qclk->ce_clk != NULL) {
7859 clk_put(qclk->ce_clk);
7860 qclk->ce_clk = NULL;
7861 }
7862 if (qclk->ce_core_clk != NULL) {
7863 clk_put(qclk->ce_core_clk);
7864 qclk->ce_core_clk = NULL;
7865 }
7866 if (qclk->ce_bus_clk != NULL) {
7867 clk_put(qclk->ce_bus_clk);
7868 qclk->ce_bus_clk = NULL;
7869 }
7870 if (qclk->ce_core_src_clk != NULL) {
7871 clk_put(qclk->ce_core_src_clk);
7872 qclk->ce_core_src_clk = NULL;
7873 }
7874 qclk->instance = CLK_INVALID;
7875}
7876
7877static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7878{
7879 int rc = 0;
7880 uint32_t hlos_num_ce_hw_instances;
7881 uint32_t disk_encrypt_pipe;
7882 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007883 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007884 int i;
7885 const int *tbl;
7886 int size;
7887 int entry;
7888 struct qseecom_crypto_info *pfde_tbl = NULL;
7889 struct qseecom_crypto_info *p;
7890 int tbl_size;
7891 int j;
7892 bool old_db = true;
7893 struct qseecom_ce_info_use *pce_info_use;
7894 uint32_t *unit_tbl = NULL;
7895 int total_units = 0;
7896 struct qseecom_ce_pipe_entry *pce_entry;
7897
7898 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7899 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7900
7901 if (of_property_read_u32((&pdev->dev)->of_node,
7902 "qcom,qsee-ce-hw-instance",
7903 &qseecom.ce_info.qsee_ce_hw_instance)) {
7904 pr_err("Fail to get qsee ce hw instance information.\n");
7905 rc = -EINVAL;
7906 goto out;
7907 } else {
7908 pr_debug("qsee-ce-hw-instance=0x%x\n",
7909 qseecom.ce_info.qsee_ce_hw_instance);
7910 }
7911
7912 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7913 "qcom,support-fde");
7914 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7915 "qcom,support-pfe");
7916
7917 if (!qseecom.support_pfe && !qseecom.support_fde) {
7918 pr_warn("Device does not support PFE/FDE");
7919 goto out;
7920 }
7921
7922 if (qseecom.support_fde)
7923 tbl = of_get_property((&pdev->dev)->of_node,
7924 "qcom,full-disk-encrypt-info", &size);
7925 else
7926 tbl = NULL;
7927 if (tbl) {
7928 old_db = false;
7929 if (size % sizeof(struct qseecom_crypto_info)) {
7930 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7931 size);
7932 rc = -EINVAL;
7933 goto out;
7934 }
7935 tbl_size = size / sizeof
7936 (struct qseecom_crypto_info);
7937
7938 pfde_tbl = kzalloc(size, GFP_KERNEL);
7939 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7940 total_units = 0;
7941
7942 if (!pfde_tbl || !unit_tbl) {
7943 pr_err("failed to alloc memory\n");
7944 rc = -ENOMEM;
7945 goto out;
7946 }
7947 if (of_property_read_u32_array((&pdev->dev)->of_node,
7948 "qcom,full-disk-encrypt-info",
7949 (u32 *)pfde_tbl, size/sizeof(u32))) {
7950 pr_err("failed to read full-disk-encrypt-info tbl\n");
7951 rc = -EINVAL;
7952 goto out;
7953 }
7954
7955 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7956 for (j = 0; j < total_units; j++) {
7957 if (p->unit_num == *(unit_tbl + j))
7958 break;
7959 }
7960 if (j == total_units) {
7961 *(unit_tbl + total_units) = p->unit_num;
7962 total_units++;
7963 }
7964 }
7965
7966 qseecom.ce_info.num_fde = total_units;
7967 pce_info_use = qseecom.ce_info.fde = kcalloc(
7968 total_units, sizeof(struct qseecom_ce_info_use),
7969 GFP_KERNEL);
7970 if (!pce_info_use) {
7971 pr_err("failed to alloc memory\n");
7972 rc = -ENOMEM;
7973 goto out;
7974 }
7975
7976 for (j = 0; j < total_units; j++, pce_info_use++) {
7977 pce_info_use->unit_num = *(unit_tbl + j);
7978 pce_info_use->alloc = false;
7979 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7980 pce_info_use->num_ce_pipe_entries = 0;
7981 pce_info_use->ce_pipe_entry = NULL;
7982 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7983 if (p->unit_num == pce_info_use->unit_num)
7984 pce_info_use->num_ce_pipe_entries++;
7985 }
7986
7987 entry = pce_info_use->num_ce_pipe_entries;
7988 pce_entry = pce_info_use->ce_pipe_entry =
7989 kcalloc(entry,
7990 sizeof(struct qseecom_ce_pipe_entry),
7991 GFP_KERNEL);
7992 if (pce_entry == NULL) {
7993 pr_err("failed to alloc memory\n");
7994 rc = -ENOMEM;
7995 goto out;
7996 }
7997
7998 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7999 if (p->unit_num == pce_info_use->unit_num) {
8000 pce_entry->ce_num = p->ce;
8001 pce_entry->ce_pipe_pair =
8002 p->pipe_pair;
8003 pce_entry->valid = true;
8004 pce_entry++;
8005 }
8006 }
8007 }
8008 kfree(unit_tbl);
8009 unit_tbl = NULL;
8010 kfree(pfde_tbl);
8011 pfde_tbl = NULL;
8012 }
8013
8014 if (qseecom.support_pfe)
8015 tbl = of_get_property((&pdev->dev)->of_node,
8016 "qcom,per-file-encrypt-info", &size);
8017 else
8018 tbl = NULL;
8019 if (tbl) {
8020 old_db = false;
8021 if (size % sizeof(struct qseecom_crypto_info)) {
8022 pr_err("per-file-encrypt-info tbl size(%d)\n",
8023 size);
8024 rc = -EINVAL;
8025 goto out;
8026 }
8027 tbl_size = size / sizeof
8028 (struct qseecom_crypto_info);
8029
8030 pfde_tbl = kzalloc(size, GFP_KERNEL);
8031 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8032 total_units = 0;
8033 if (!pfde_tbl || !unit_tbl) {
8034 pr_err("failed to alloc memory\n");
8035 rc = -ENOMEM;
8036 goto out;
8037 }
8038 if (of_property_read_u32_array((&pdev->dev)->of_node,
8039 "qcom,per-file-encrypt-info",
8040 (u32 *)pfde_tbl, size/sizeof(u32))) {
8041 pr_err("failed to read per-file-encrypt-info tbl\n");
8042 rc = -EINVAL;
8043 goto out;
8044 }
8045
8046 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8047 for (j = 0; j < total_units; j++) {
8048 if (p->unit_num == *(unit_tbl + j))
8049 break;
8050 }
8051 if (j == total_units) {
8052 *(unit_tbl + total_units) = p->unit_num;
8053 total_units++;
8054 }
8055 }
8056
8057 qseecom.ce_info.num_pfe = total_units;
8058 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8059 total_units, sizeof(struct qseecom_ce_info_use),
8060 GFP_KERNEL);
8061 if (!pce_info_use) {
8062 pr_err("failed to alloc memory\n");
8063 rc = -ENOMEM;
8064 goto out;
8065 }
8066
8067 for (j = 0; j < total_units; j++, pce_info_use++) {
8068 pce_info_use->unit_num = *(unit_tbl + j);
8069 pce_info_use->alloc = false;
8070 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8071 pce_info_use->num_ce_pipe_entries = 0;
8072 pce_info_use->ce_pipe_entry = NULL;
8073 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8074 if (p->unit_num == pce_info_use->unit_num)
8075 pce_info_use->num_ce_pipe_entries++;
8076 }
8077
8078 entry = pce_info_use->num_ce_pipe_entries;
8079 pce_entry = pce_info_use->ce_pipe_entry =
8080 kcalloc(entry,
8081 sizeof(struct qseecom_ce_pipe_entry),
8082 GFP_KERNEL);
8083 if (pce_entry == NULL) {
8084 pr_err("failed to alloc memory\n");
8085 rc = -ENOMEM;
8086 goto out;
8087 }
8088
8089 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8090 if (p->unit_num == pce_info_use->unit_num) {
8091 pce_entry->ce_num = p->ce;
8092 pce_entry->ce_pipe_pair =
8093 p->pipe_pair;
8094 pce_entry->valid = true;
8095 pce_entry++;
8096 }
8097 }
8098 }
8099 kfree(unit_tbl);
8100 unit_tbl = NULL;
8101 kfree(pfde_tbl);
8102 pfde_tbl = NULL;
8103 }
8104
8105 if (!old_db)
8106 goto out1;
8107
8108 if (of_property_read_bool((&pdev->dev)->of_node,
8109 "qcom,support-multiple-ce-hw-instance")) {
8110 if (of_property_read_u32((&pdev->dev)->of_node,
8111 "qcom,hlos-num-ce-hw-instances",
8112 &hlos_num_ce_hw_instances)) {
8113 pr_err("Fail: get hlos number of ce hw instance\n");
8114 rc = -EINVAL;
8115 goto out;
8116 }
8117 } else {
8118 hlos_num_ce_hw_instances = 1;
8119 }
8120
8121 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8122 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8123 MAX_CE_PIPE_PAIR_PER_UNIT);
8124 rc = -EINVAL;
8125 goto out;
8126 }
8127
8128 if (of_property_read_u32_array((&pdev->dev)->of_node,
8129 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8130 hlos_num_ce_hw_instances)) {
8131 pr_err("Fail: get hlos ce hw instance info\n");
8132 rc = -EINVAL;
8133 goto out;
8134 }
8135
8136 if (qseecom.support_fde) {
8137 pce_info_use = qseecom.ce_info.fde =
8138 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8139 if (!pce_info_use) {
8140 pr_err("failed to alloc memory\n");
8141 rc = -ENOMEM;
8142 goto out;
8143 }
8144 /* by default for old db */
8145 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8146 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8147 pce_info_use->alloc = false;
8148 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8149 pce_info_use->ce_pipe_entry = NULL;
8150 if (of_property_read_u32((&pdev->dev)->of_node,
8151 "qcom,disk-encrypt-pipe-pair",
8152 &disk_encrypt_pipe)) {
8153 pr_err("Fail to get FDE pipe information.\n");
8154 rc = -EINVAL;
8155 goto out;
8156 } else {
8157 pr_debug("disk-encrypt-pipe-pair=0x%x",
8158 disk_encrypt_pipe);
8159 }
8160 entry = pce_info_use->num_ce_pipe_entries =
8161 hlos_num_ce_hw_instances;
8162 pce_entry = pce_info_use->ce_pipe_entry =
8163 kcalloc(entry,
8164 sizeof(struct qseecom_ce_pipe_entry),
8165 GFP_KERNEL);
8166 if (pce_entry == NULL) {
8167 pr_err("failed to alloc memory\n");
8168 rc = -ENOMEM;
8169 goto out;
8170 }
8171 for (i = 0; i < entry; i++) {
8172 pce_entry->ce_num = hlos_ce_hw_instance[i];
8173 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8174 pce_entry->valid = 1;
8175 pce_entry++;
8176 }
8177 } else {
8178 pr_warn("Device does not support FDE");
8179 disk_encrypt_pipe = 0xff;
8180 }
8181 if (qseecom.support_pfe) {
8182 pce_info_use = qseecom.ce_info.pfe =
8183 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8184 if (!pce_info_use) {
8185 pr_err("failed to alloc memory\n");
8186 rc = -ENOMEM;
8187 goto out;
8188 }
8189 /* by default for old db */
8190 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8191 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8192 pce_info_use->alloc = false;
8193 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8194 pce_info_use->ce_pipe_entry = NULL;
8195
8196 if (of_property_read_u32((&pdev->dev)->of_node,
8197 "qcom,file-encrypt-pipe-pair",
8198 &file_encrypt_pipe)) {
8199 pr_err("Fail to get PFE pipe information.\n");
8200 rc = -EINVAL;
8201 goto out;
8202 } else {
8203 pr_debug("file-encrypt-pipe-pair=0x%x",
8204 file_encrypt_pipe);
8205 }
8206 entry = pce_info_use->num_ce_pipe_entries =
8207 hlos_num_ce_hw_instances;
8208 pce_entry = pce_info_use->ce_pipe_entry =
8209 kcalloc(entry,
8210 sizeof(struct qseecom_ce_pipe_entry),
8211 GFP_KERNEL);
8212 if (pce_entry == NULL) {
8213 pr_err("failed to alloc memory\n");
8214 rc = -ENOMEM;
8215 goto out;
8216 }
8217 for (i = 0; i < entry; i++) {
8218 pce_entry->ce_num = hlos_ce_hw_instance[i];
8219 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8220 pce_entry->valid = 1;
8221 pce_entry++;
8222 }
8223 } else {
8224 pr_warn("Device does not support PFE");
8225 file_encrypt_pipe = 0xff;
8226 }
8227
8228out1:
8229 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8230 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8231out:
8232 if (rc) {
8233 if (qseecom.ce_info.fde) {
8234 pce_info_use = qseecom.ce_info.fde;
8235 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8236 pce_entry = pce_info_use->ce_pipe_entry;
8237 kfree(pce_entry);
8238 pce_info_use++;
8239 }
8240 }
8241 kfree(qseecom.ce_info.fde);
8242 qseecom.ce_info.fde = NULL;
8243 if (qseecom.ce_info.pfe) {
8244 pce_info_use = qseecom.ce_info.pfe;
8245 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8246 pce_entry = pce_info_use->ce_pipe_entry;
8247 kfree(pce_entry);
8248 pce_info_use++;
8249 }
8250 }
8251 kfree(qseecom.ce_info.pfe);
8252 qseecom.ce_info.pfe = NULL;
8253 }
8254 kfree(unit_tbl);
8255 kfree(pfde_tbl);
8256 return rc;
8257}
8258
8259static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8260 void __user *argp)
8261{
8262 struct qseecom_ce_info_req req;
8263 struct qseecom_ce_info_req *pinfo = &req;
8264 int ret = 0;
8265 int i;
8266 unsigned int entries;
8267 struct qseecom_ce_info_use *pce_info_use, *p;
8268 int total = 0;
8269 bool found = false;
8270 struct qseecom_ce_pipe_entry *pce_entry;
8271
8272 ret = copy_from_user(pinfo, argp,
8273 sizeof(struct qseecom_ce_info_req));
8274 if (ret) {
8275 pr_err("copy_from_user failed\n");
8276 return ret;
8277 }
8278
8279 switch (pinfo->usage) {
8280 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8281 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8282 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8283 if (qseecom.support_fde) {
8284 p = qseecom.ce_info.fde;
8285 total = qseecom.ce_info.num_fde;
8286 } else {
8287 pr_err("system does not support fde\n");
8288 return -EINVAL;
8289 }
8290 break;
8291 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8292 if (qseecom.support_pfe) {
8293 p = qseecom.ce_info.pfe;
8294 total = qseecom.ce_info.num_pfe;
8295 } else {
8296 pr_err("system does not support pfe\n");
8297 return -EINVAL;
8298 }
8299 break;
8300 default:
8301 pr_err("unsupported usage %d\n", pinfo->usage);
8302 return -EINVAL;
8303 }
8304
8305 pce_info_use = NULL;
8306 for (i = 0; i < total; i++) {
8307 if (!p->alloc)
8308 pce_info_use = p;
8309 else if (!memcmp(p->handle, pinfo->handle,
8310 MAX_CE_INFO_HANDLE_SIZE)) {
8311 pce_info_use = p;
8312 found = true;
8313 break;
8314 }
8315 p++;
8316 }
8317
8318 if (pce_info_use == NULL)
8319 return -EBUSY;
8320
8321 pinfo->unit_num = pce_info_use->unit_num;
8322 if (!pce_info_use->alloc) {
8323 pce_info_use->alloc = true;
8324 memcpy(pce_info_use->handle,
8325 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8326 }
8327 if (pce_info_use->num_ce_pipe_entries >
8328 MAX_CE_PIPE_PAIR_PER_UNIT)
8329 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8330 else
8331 entries = pce_info_use->num_ce_pipe_entries;
8332 pinfo->num_ce_pipe_entries = entries;
8333 pce_entry = pce_info_use->ce_pipe_entry;
8334 for (i = 0; i < entries; i++, pce_entry++)
8335 pinfo->ce_pipe_entry[i] = *pce_entry;
8336 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8337 pinfo->ce_pipe_entry[i].valid = 0;
8338
8339 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8340 pr_err("copy_to_user failed\n");
8341 ret = -EFAULT;
8342 }
8343 return ret;
8344}
8345
8346static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8347 void __user *argp)
8348{
8349 struct qseecom_ce_info_req req;
8350 struct qseecom_ce_info_req *pinfo = &req;
8351 int ret = 0;
8352 struct qseecom_ce_info_use *p;
8353 int total = 0;
8354 int i;
8355 bool found = false;
8356
8357 ret = copy_from_user(pinfo, argp,
8358 sizeof(struct qseecom_ce_info_req));
8359 if (ret)
8360 return ret;
8361
8362 switch (pinfo->usage) {
8363 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8364 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8365 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8366 if (qseecom.support_fde) {
8367 p = qseecom.ce_info.fde;
8368 total = qseecom.ce_info.num_fde;
8369 } else {
8370 pr_err("system does not support fde\n");
8371 return -EINVAL;
8372 }
8373 break;
8374 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8375 if (qseecom.support_pfe) {
8376 p = qseecom.ce_info.pfe;
8377 total = qseecom.ce_info.num_pfe;
8378 } else {
8379 pr_err("system does not support pfe\n");
8380 return -EINVAL;
8381 }
8382 break;
8383 default:
8384 pr_err("unsupported usage %d\n", pinfo->usage);
8385 return -EINVAL;
8386 }
8387
8388 for (i = 0; i < total; i++) {
8389 if (p->alloc &&
8390 !memcmp(p->handle, pinfo->handle,
8391 MAX_CE_INFO_HANDLE_SIZE)) {
8392 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8393 p->alloc = false;
8394 found = true;
8395 break;
8396 }
8397 p++;
8398 }
8399 return ret;
8400}
8401
8402static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8403 void __user *argp)
8404{
8405 struct qseecom_ce_info_req req;
8406 struct qseecom_ce_info_req *pinfo = &req;
8407 int ret = 0;
8408 int i;
8409 unsigned int entries;
8410 struct qseecom_ce_info_use *pce_info_use, *p;
8411 int total = 0;
8412 bool found = false;
8413 struct qseecom_ce_pipe_entry *pce_entry;
8414
8415 ret = copy_from_user(pinfo, argp,
8416 sizeof(struct qseecom_ce_info_req));
8417 if (ret)
8418 return ret;
8419
8420 switch (pinfo->usage) {
8421 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8422 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8423 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8424 if (qseecom.support_fde) {
8425 p = qseecom.ce_info.fde;
8426 total = qseecom.ce_info.num_fde;
8427 } else {
8428 pr_err("system does not support fde\n");
8429 return -EINVAL;
8430 }
8431 break;
8432 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8433 if (qseecom.support_pfe) {
8434 p = qseecom.ce_info.pfe;
8435 total = qseecom.ce_info.num_pfe;
8436 } else {
8437 pr_err("system does not support pfe\n");
8438 return -EINVAL;
8439 }
8440 break;
8441 default:
8442 pr_err("unsupported usage %d\n", pinfo->usage);
8443 return -EINVAL;
8444 }
8445
8446 pce_info_use = NULL;
8447 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8448 pinfo->num_ce_pipe_entries = 0;
8449 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8450 pinfo->ce_pipe_entry[i].valid = 0;
8451
8452 for (i = 0; i < total; i++) {
8453
8454 if (p->alloc && !memcmp(p->handle,
8455 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8456 pce_info_use = p;
8457 found = true;
8458 break;
8459 }
8460 p++;
8461 }
8462 if (!pce_info_use)
8463 goto out;
8464 pinfo->unit_num = pce_info_use->unit_num;
8465 if (pce_info_use->num_ce_pipe_entries >
8466 MAX_CE_PIPE_PAIR_PER_UNIT)
8467 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8468 else
8469 entries = pce_info_use->num_ce_pipe_entries;
8470 pinfo->num_ce_pipe_entries = entries;
8471 pce_entry = pce_info_use->ce_pipe_entry;
8472 for (i = 0; i < entries; i++, pce_entry++)
8473 pinfo->ce_pipe_entry[i] = *pce_entry;
8474 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8475 pinfo->ce_pipe_entry[i].valid = 0;
8476out:
8477 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8478 pr_err("copy_to_user failed\n");
8479 ret = -EFAULT;
8480 }
8481 return ret;
8482}
8483
8484/*
8485 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8486 * then whitelist feature is not supported.
8487 */
8488static int qseecom_check_whitelist_feature(void)
8489{
8490 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8491
8492 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8493}
8494
8495static int qseecom_probe(struct platform_device *pdev)
8496{
8497 int rc;
8498 int i;
8499 uint32_t feature = 10;
8500 struct device *class_dev;
8501 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8502 struct qseecom_command_scm_resp resp;
8503 struct qseecom_ce_info_use *pce_info_use = NULL;
8504
8505 qseecom.qsee_bw_count = 0;
8506 qseecom.qsee_perf_client = 0;
8507 qseecom.qsee_sfpb_bw_count = 0;
8508
8509 qseecom.qsee.ce_core_clk = NULL;
8510 qseecom.qsee.ce_clk = NULL;
8511 qseecom.qsee.ce_core_src_clk = NULL;
8512 qseecom.qsee.ce_bus_clk = NULL;
8513
8514 qseecom.cumulative_mode = 0;
8515 qseecom.current_mode = INACTIVE;
8516 qseecom.support_bus_scaling = false;
8517 qseecom.support_fde = false;
8518 qseecom.support_pfe = false;
8519
8520 qseecom.ce_drv.ce_core_clk = NULL;
8521 qseecom.ce_drv.ce_clk = NULL;
8522 qseecom.ce_drv.ce_core_src_clk = NULL;
8523 qseecom.ce_drv.ce_bus_clk = NULL;
8524 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8525
8526 qseecom.app_block_ref_cnt = 0;
8527 init_waitqueue_head(&qseecom.app_block_wq);
8528 qseecom.whitelist_support = true;
8529
8530 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8531 if (rc < 0) {
8532 pr_err("alloc_chrdev_region failed %d\n", rc);
8533 return rc;
8534 }
8535
8536 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8537 if (IS_ERR(driver_class)) {
8538 rc = -ENOMEM;
8539 pr_err("class_create failed %d\n", rc);
8540 goto exit_unreg_chrdev_region;
8541 }
8542
8543 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8544 QSEECOM_DEV);
8545 if (IS_ERR(class_dev)) {
8546 pr_err("class_device_create failed %d\n", rc);
8547 rc = -ENOMEM;
8548 goto exit_destroy_class;
8549 }
8550
8551 cdev_init(&qseecom.cdev, &qseecom_fops);
8552 qseecom.cdev.owner = THIS_MODULE;
8553
8554 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8555 if (rc < 0) {
8556 pr_err("cdev_add failed %d\n", rc);
8557 goto exit_destroy_device;
8558 }
8559
8560 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8561 spin_lock_init(&qseecom.registered_listener_list_lock);
8562 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8563 spin_lock_init(&qseecom.registered_app_list_lock);
8564 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8565 spin_lock_init(&qseecom.registered_kclient_list_lock);
8566 init_waitqueue_head(&qseecom.send_resp_wq);
8567 qseecom.send_resp_flag = 0;
8568
8569 qseecom.qsee_version = QSEEE_VERSION_00;
8570 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8571 &resp, sizeof(resp));
8572 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8573 if (rc) {
8574 pr_err("Failed to get QSEE version info %d\n", rc);
8575 goto exit_del_cdev;
8576 }
8577 qseecom.qsee_version = resp.result;
8578 qseecom.qseos_version = QSEOS_VERSION_14;
8579 qseecom.commonlib_loaded = false;
8580 qseecom.commonlib64_loaded = false;
8581 qseecom.pdev = class_dev;
8582 /* Create ION msm client */
8583 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8584 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8585 pr_err("Ion client cannot be created\n");
8586 rc = -ENOMEM;
8587 goto exit_del_cdev;
8588 }
8589
8590 /* register client for bus scaling */
8591 if (pdev->dev.of_node) {
8592 qseecom.pdev->of_node = pdev->dev.of_node;
8593 qseecom.support_bus_scaling =
8594 of_property_read_bool((&pdev->dev)->of_node,
8595 "qcom,support-bus-scaling");
8596 rc = qseecom_retrieve_ce_data(pdev);
8597 if (rc)
8598 goto exit_destroy_ion_client;
8599 qseecom.appsbl_qseecom_support =
8600 of_property_read_bool((&pdev->dev)->of_node,
8601 "qcom,appsbl-qseecom-support");
8602 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8603 qseecom.appsbl_qseecom_support);
8604
8605 qseecom.commonlib64_loaded =
8606 of_property_read_bool((&pdev->dev)->of_node,
8607 "qcom,commonlib64-loaded-by-uefi");
8608 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8609 qseecom.commonlib64_loaded);
8610 qseecom.fde_key_size =
8611 of_property_read_bool((&pdev->dev)->of_node,
8612 "qcom,fde-key-size");
8613 qseecom.no_clock_support =
8614 of_property_read_bool((&pdev->dev)->of_node,
8615 "qcom,no-clock-support");
8616 if (!qseecom.no_clock_support) {
8617 pr_info("qseecom clocks handled by other subsystem\n");
8618 } else {
8619 pr_info("no-clock-support=0x%x",
8620 qseecom.no_clock_support);
8621 }
8622
8623 if (of_property_read_u32((&pdev->dev)->of_node,
8624 "qcom,qsee-reentrancy-support",
8625 &qseecom.qsee_reentrancy_support)) {
8626 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8627 qseecom.qsee_reentrancy_support = 0;
8628 } else {
8629 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8630 qseecom.qsee_reentrancy_support);
8631 }
8632
8633 /*
8634 * The qseecom bus scaling flag can not be enabled when
8635 * crypto clock is not handled by HLOS.
8636 */
8637 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8638 pr_err("support_bus_scaling flag can not be enabled.\n");
8639 rc = -EINVAL;
8640 goto exit_destroy_ion_client;
8641 }
8642
8643 if (of_property_read_u32((&pdev->dev)->of_node,
8644 "qcom,ce-opp-freq",
8645 &qseecom.ce_opp_freq_hz)) {
8646 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8647 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8648 }
8649 rc = __qseecom_init_clk(CLK_QSEE);
8650 if (rc)
8651 goto exit_destroy_ion_client;
8652
8653 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8654 (qseecom.support_pfe || qseecom.support_fde)) {
8655 rc = __qseecom_init_clk(CLK_CE_DRV);
8656 if (rc) {
8657 __qseecom_deinit_clk(CLK_QSEE);
8658 goto exit_destroy_ion_client;
8659 }
8660 } else {
8661 struct qseecom_clk *qclk;
8662
8663 qclk = &qseecom.qsee;
8664 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8665 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8666 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8667 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8668 }
8669
8670 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8671 msm_bus_cl_get_pdata(pdev);
8672 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8673 (!qseecom.is_apps_region_protected &&
8674 !qseecom.appsbl_qseecom_support)) {
8675 struct resource *resource = NULL;
8676 struct qsee_apps_region_info_ireq req;
8677 struct qsee_apps_region_info_64bit_ireq req_64bit;
8678 struct qseecom_command_scm_resp resp;
8679 void *cmd_buf = NULL;
8680 size_t cmd_len;
8681
8682 resource = platform_get_resource_byname(pdev,
8683 IORESOURCE_MEM, "secapp-region");
8684 if (resource) {
8685 if (qseecom.qsee_version < QSEE_VERSION_40) {
8686 req.qsee_cmd_id =
8687 QSEOS_APP_REGION_NOTIFICATION;
8688 req.addr = (uint32_t)resource->start;
8689 req.size = resource_size(resource);
8690 cmd_buf = (void *)&req;
8691 cmd_len = sizeof(struct
8692 qsee_apps_region_info_ireq);
8693 pr_warn("secure app region addr=0x%x size=0x%x",
8694 req.addr, req.size);
8695 } else {
8696 req_64bit.qsee_cmd_id =
8697 QSEOS_APP_REGION_NOTIFICATION;
8698 req_64bit.addr = resource->start;
8699 req_64bit.size = resource_size(
8700 resource);
8701 cmd_buf = (void *)&req_64bit;
8702 cmd_len = sizeof(struct
8703 qsee_apps_region_info_64bit_ireq);
8704 pr_warn("secure app region addr=0x%llx size=0x%x",
8705 req_64bit.addr, req_64bit.size);
8706 }
8707 } else {
8708 pr_err("Fail to get secure app region info\n");
8709 rc = -EINVAL;
8710 goto exit_deinit_clock;
8711 }
8712 rc = __qseecom_enable_clk(CLK_QSEE);
8713 if (rc) {
8714 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8715 rc = -EIO;
8716 goto exit_deinit_clock;
8717 }
8718 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8719 cmd_buf, cmd_len,
8720 &resp, sizeof(resp));
8721 __qseecom_disable_clk(CLK_QSEE);
8722 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8723 pr_err("send secapp reg fail %d resp.res %d\n",
8724 rc, resp.result);
8725 rc = -EINVAL;
8726 goto exit_deinit_clock;
8727 }
8728 }
8729 /*
8730 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8731 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8732 * Pls add "qseecom.commonlib64_loaded = true" here too.
8733 */
8734 if (qseecom.is_apps_region_protected ||
8735 qseecom.appsbl_qseecom_support)
8736 qseecom.commonlib_loaded = true;
8737 } else {
8738 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8739 pdev->dev.platform_data;
8740 }
8741 if (qseecom.support_bus_scaling) {
8742 init_timer(&(qseecom.bw_scale_down_timer));
8743 INIT_WORK(&qseecom.bw_inactive_req_ws,
8744 qseecom_bw_inactive_req_work);
8745 qseecom.bw_scale_down_timer.function =
8746 qseecom_scale_bus_bandwidth_timer_callback;
8747 }
8748 qseecom.timer_running = false;
8749 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8750 qseecom_platform_support);
8751
8752 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8753 pr_warn("qseecom.whitelist_support = %d\n",
8754 qseecom.whitelist_support);
8755
8756 if (!qseecom.qsee_perf_client)
8757 pr_err("Unable to register bus client\n");
8758
8759 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8760 return 0;
8761
8762exit_deinit_clock:
8763 __qseecom_deinit_clk(CLK_QSEE);
8764 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8765 (qseecom.support_pfe || qseecom.support_fde))
8766 __qseecom_deinit_clk(CLK_CE_DRV);
8767exit_destroy_ion_client:
8768 if (qseecom.ce_info.fde) {
8769 pce_info_use = qseecom.ce_info.fde;
8770 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8771 kzfree(pce_info_use->ce_pipe_entry);
8772 pce_info_use++;
8773 }
8774 kfree(qseecom.ce_info.fde);
8775 }
8776 if (qseecom.ce_info.pfe) {
8777 pce_info_use = qseecom.ce_info.pfe;
8778 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8779 kzfree(pce_info_use->ce_pipe_entry);
8780 pce_info_use++;
8781 }
8782 kfree(qseecom.ce_info.pfe);
8783 }
8784 ion_client_destroy(qseecom.ion_clnt);
8785exit_del_cdev:
8786 cdev_del(&qseecom.cdev);
8787exit_destroy_device:
8788 device_destroy(driver_class, qseecom_device_no);
8789exit_destroy_class:
8790 class_destroy(driver_class);
8791exit_unreg_chrdev_region:
8792 unregister_chrdev_region(qseecom_device_no, 1);
8793 return rc;
8794}
8795
8796static int qseecom_remove(struct platform_device *pdev)
8797{
8798 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308799 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008800 unsigned long flags = 0;
8801 int ret = 0;
8802 int i;
8803 struct qseecom_ce_pipe_entry *pce_entry;
8804 struct qseecom_ce_info_use *pce_info_use;
8805
8806 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8807 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8808
Monika Singhe711b162018-04-24 09:54:50 +05308809 list_for_each_entry_safe(kclient, kclient_tmp,
8810 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008811
8812 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008813 if (!kclient->handle) {
8814 list_del(&kclient->list);
8815 kzfree(kclient);
8816 break;
8817 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008818
8819 list_del(&kclient->list);
8820 mutex_lock(&app_access_lock);
8821 ret = qseecom_unload_app(kclient->handle->dev, false);
8822 mutex_unlock(&app_access_lock);
8823 if (!ret) {
8824 kzfree(kclient->handle->dev);
8825 kzfree(kclient->handle);
8826 kzfree(kclient);
8827 }
8828 }
8829
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008830 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8831
8832 if (qseecom.qseos_version > QSEEE_VERSION_00)
8833 qseecom_unload_commonlib_image();
8834
8835 if (qseecom.qsee_perf_client)
8836 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8837 0);
8838 if (pdev->dev.platform_data != NULL)
8839 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8840
8841 if (qseecom.support_bus_scaling) {
8842 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8843 del_timer_sync(&qseecom.bw_scale_down_timer);
8844 }
8845
8846 if (qseecom.ce_info.fde) {
8847 pce_info_use = qseecom.ce_info.fde;
8848 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8849 pce_entry = pce_info_use->ce_pipe_entry;
8850 kfree(pce_entry);
8851 pce_info_use++;
8852 }
8853 }
8854 kfree(qseecom.ce_info.fde);
8855 if (qseecom.ce_info.pfe) {
8856 pce_info_use = qseecom.ce_info.pfe;
8857 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8858 pce_entry = pce_info_use->ce_pipe_entry;
8859 kfree(pce_entry);
8860 pce_info_use++;
8861 }
8862 }
8863 kfree(qseecom.ce_info.pfe);
8864
8865 /* register client for bus scaling */
8866 if (pdev->dev.of_node) {
8867 __qseecom_deinit_clk(CLK_QSEE);
8868 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8869 (qseecom.support_pfe || qseecom.support_fde))
8870 __qseecom_deinit_clk(CLK_CE_DRV);
8871 }
8872
8873 ion_client_destroy(qseecom.ion_clnt);
8874
8875 cdev_del(&qseecom.cdev);
8876
8877 device_destroy(driver_class, qseecom_device_no);
8878
8879 class_destroy(driver_class);
8880
8881 unregister_chrdev_region(qseecom_device_no, 1);
8882
8883 return ret;
8884}
8885
8886static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8887{
8888 int ret = 0;
8889 struct qseecom_clk *qclk;
8890
8891 qclk = &qseecom.qsee;
8892 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8893 if (qseecom.no_clock_support)
8894 return 0;
8895
8896 mutex_lock(&qsee_bw_mutex);
8897 mutex_lock(&clk_access_lock);
8898
8899 if (qseecom.current_mode != INACTIVE) {
8900 ret = msm_bus_scale_client_update_request(
8901 qseecom.qsee_perf_client, INACTIVE);
8902 if (ret)
8903 pr_err("Fail to scale down bus\n");
8904 else
8905 qseecom.current_mode = INACTIVE;
8906 }
8907
8908 if (qclk->clk_access_cnt) {
8909 if (qclk->ce_clk != NULL)
8910 clk_disable_unprepare(qclk->ce_clk);
8911 if (qclk->ce_core_clk != NULL)
8912 clk_disable_unprepare(qclk->ce_core_clk);
8913 if (qclk->ce_bus_clk != NULL)
8914 clk_disable_unprepare(qclk->ce_bus_clk);
8915 }
8916
8917 del_timer_sync(&(qseecom.bw_scale_down_timer));
8918 qseecom.timer_running = false;
8919
8920 mutex_unlock(&clk_access_lock);
8921 mutex_unlock(&qsee_bw_mutex);
8922 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8923
8924 return 0;
8925}
8926
8927static int qseecom_resume(struct platform_device *pdev)
8928{
8929 int mode = 0;
8930 int ret = 0;
8931 struct qseecom_clk *qclk;
8932
8933 qclk = &qseecom.qsee;
8934 if (qseecom.no_clock_support)
8935 goto exit;
8936
8937 mutex_lock(&qsee_bw_mutex);
8938 mutex_lock(&clk_access_lock);
8939 if (qseecom.cumulative_mode >= HIGH)
8940 mode = HIGH;
8941 else
8942 mode = qseecom.cumulative_mode;
8943
8944 if (qseecom.cumulative_mode != INACTIVE) {
8945 ret = msm_bus_scale_client_update_request(
8946 qseecom.qsee_perf_client, mode);
8947 if (ret)
8948 pr_err("Fail to scale up bus to %d\n", mode);
8949 else
8950 qseecom.current_mode = mode;
8951 }
8952
8953 if (qclk->clk_access_cnt) {
8954 if (qclk->ce_core_clk != NULL) {
8955 ret = clk_prepare_enable(qclk->ce_core_clk);
8956 if (ret) {
8957 pr_err("Unable to enable/prep CE core clk\n");
8958 qclk->clk_access_cnt = 0;
8959 goto err;
8960 }
8961 }
8962 if (qclk->ce_clk != NULL) {
8963 ret = clk_prepare_enable(qclk->ce_clk);
8964 if (ret) {
8965 pr_err("Unable to enable/prep CE iface clk\n");
8966 qclk->clk_access_cnt = 0;
8967 goto ce_clk_err;
8968 }
8969 }
8970 if (qclk->ce_bus_clk != NULL) {
8971 ret = clk_prepare_enable(qclk->ce_bus_clk);
8972 if (ret) {
8973 pr_err("Unable to enable/prep CE bus clk\n");
8974 qclk->clk_access_cnt = 0;
8975 goto ce_bus_clk_err;
8976 }
8977 }
8978 }
8979
8980 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8981 qseecom.bw_scale_down_timer.expires = jiffies +
8982 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8983 mod_timer(&(qseecom.bw_scale_down_timer),
8984 qseecom.bw_scale_down_timer.expires);
8985 qseecom.timer_running = true;
8986 }
8987
8988 mutex_unlock(&clk_access_lock);
8989 mutex_unlock(&qsee_bw_mutex);
8990 goto exit;
8991
8992ce_bus_clk_err:
8993 if (qclk->ce_clk)
8994 clk_disable_unprepare(qclk->ce_clk);
8995ce_clk_err:
8996 if (qclk->ce_core_clk)
8997 clk_disable_unprepare(qclk->ce_core_clk);
8998err:
8999 mutex_unlock(&clk_access_lock);
9000 mutex_unlock(&qsee_bw_mutex);
9001 ret = -EIO;
9002exit:
9003 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9004 return ret;
9005}
9006
9007static const struct of_device_id qseecom_match[] = {
9008 {
9009 .compatible = "qcom,qseecom",
9010 },
9011 {}
9012};
9013
9014static struct platform_driver qseecom_plat_driver = {
9015 .probe = qseecom_probe,
9016 .remove = qseecom_remove,
9017 .suspend = qseecom_suspend,
9018 .resume = qseecom_resume,
9019 .driver = {
9020 .name = "qseecom",
9021 .owner = THIS_MODULE,
9022 .of_match_table = qseecom_match,
9023 },
9024};
9025
9026static int qseecom_init(void)
9027{
9028 return platform_driver_register(&qseecom_plat_driver);
9029}
9030
9031static void qseecom_exit(void)
9032{
9033 platform_driver_unregister(&qseecom_plat_driver);
9034}
9035
9036MODULE_LICENSE("GPL v2");
9037MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9038
9039module_init(qseecom_init);
9040module_exit(qseecom_exit);