blob: 768d99efc723c451c9df7428d9e5da807801c2bb [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
255 struct ion_handle *cmnlib_ion_handle;
256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
583 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
584 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
585 ret = scm_call2(smc_id, &desc);
586 if (ret) {
587 smc_id = TZ_OS_REGISTER_LISTENER_ID;
588 __qseecom_reentrancy_check_if_no_app_blocked(
589 smc_id);
590 ret = scm_call2(smc_id, &desc);
591 }
592 break;
593 }
594 case QSEOS_DEREGISTER_LISTENER: {
595 struct qseecom_unregister_listener_ireq *req;
596
597 req = (struct qseecom_unregister_listener_ireq *)
598 req_buf;
599 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
600 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
601 desc.args[0] = req->listener_id;
602 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
603 ret = scm_call2(smc_id, &desc);
604 break;
605 }
606 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
607 struct qseecom_client_listener_data_irsp *req;
608
609 req = (struct qseecom_client_listener_data_irsp *)
610 req_buf;
611 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
612 desc.arginfo =
613 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
614 desc.args[0] = req->listener_id;
615 desc.args[1] = req->status;
616 ret = scm_call2(smc_id, &desc);
617 break;
618 }
619 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
620 struct qseecom_client_listener_data_irsp *req;
621 struct qseecom_client_listener_data_64bit_irsp *req_64;
622
623 smc_id =
624 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
625 desc.arginfo =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
627 if (qseecom.qsee_version < QSEE_VERSION_40) {
628 req =
629 (struct qseecom_client_listener_data_irsp *)
630 req_buf;
631 desc.args[0] = req->listener_id;
632 desc.args[1] = req->status;
633 desc.args[2] = req->sglistinfo_ptr;
634 desc.args[3] = req->sglistinfo_len;
635 } else {
636 req_64 =
637 (struct qseecom_client_listener_data_64bit_irsp *)
638 req_buf;
639 desc.args[0] = req_64->listener_id;
640 desc.args[1] = req_64->status;
641 desc.args[2] = req_64->sglistinfo_ptr;
642 desc.args[3] = req_64->sglistinfo_len;
643 }
644 ret = scm_call2(smc_id, &desc);
645 break;
646 }
647 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
648 struct qseecom_load_app_ireq *req;
649 struct qseecom_load_app_64bit_ireq *req_64bit;
650
651 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
652 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
653 if (qseecom.qsee_version < QSEE_VERSION_40) {
654 req = (struct qseecom_load_app_ireq *)req_buf;
655 desc.args[0] = req->mdt_len;
656 desc.args[1] = req->img_len;
657 desc.args[2] = req->phy_addr;
658 } else {
659 req_64bit =
660 (struct qseecom_load_app_64bit_ireq *)req_buf;
661 desc.args[0] = req_64bit->mdt_len;
662 desc.args[1] = req_64bit->img_len;
663 desc.args[2] = req_64bit->phy_addr;
664 }
665 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
666 ret = scm_call2(smc_id, &desc);
667 break;
668 }
669 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
670 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
671 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
672 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
673 ret = scm_call2(smc_id, &desc);
674 break;
675 }
676
677 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
678 struct qseecom_client_send_data_ireq *req;
679 struct qseecom_client_send_data_64bit_ireq *req_64bit;
680
681 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
682 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
683 if (qseecom.qsee_version < QSEE_VERSION_40) {
684 req = (struct qseecom_client_send_data_ireq *)
685 req_buf;
686 desc.args[0] = req->app_id;
687 desc.args[1] = req->req_ptr;
688 desc.args[2] = req->req_len;
689 desc.args[3] = req->rsp_ptr;
690 desc.args[4] = req->rsp_len;
691 } else {
692 req_64bit =
693 (struct qseecom_client_send_data_64bit_ireq *)
694 req_buf;
695 desc.args[0] = req_64bit->app_id;
696 desc.args[1] = req_64bit->req_ptr;
697 desc.args[2] = req_64bit->req_len;
698 desc.args[3] = req_64bit->rsp_ptr;
699 desc.args[4] = req_64bit->rsp_len;
700 }
701 ret = scm_call2(smc_id, &desc);
702 break;
703 }
704 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
705 struct qseecom_client_send_data_ireq *req;
706 struct qseecom_client_send_data_64bit_ireq *req_64bit;
707
708 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
709 desc.arginfo =
710 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
711 if (qseecom.qsee_version < QSEE_VERSION_40) {
712 req = (struct qseecom_client_send_data_ireq *)
713 req_buf;
714 desc.args[0] = req->app_id;
715 desc.args[1] = req->req_ptr;
716 desc.args[2] = req->req_len;
717 desc.args[3] = req->rsp_ptr;
718 desc.args[4] = req->rsp_len;
719 desc.args[5] = req->sglistinfo_ptr;
720 desc.args[6] = req->sglistinfo_len;
721 } else {
722 req_64bit =
723 (struct qseecom_client_send_data_64bit_ireq *)
724 req_buf;
725 desc.args[0] = req_64bit->app_id;
726 desc.args[1] = req_64bit->req_ptr;
727 desc.args[2] = req_64bit->req_len;
728 desc.args[3] = req_64bit->rsp_ptr;
729 desc.args[4] = req_64bit->rsp_len;
730 desc.args[5] = req_64bit->sglistinfo_ptr;
731 desc.args[6] = req_64bit->sglistinfo_len;
732 }
733 ret = scm_call2(smc_id, &desc);
734 break;
735 }
736 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
737 struct qseecom_client_send_service_ireq *req;
738
739 req = (struct qseecom_client_send_service_ireq *)
740 req_buf;
741 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
742 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
743 desc.args[0] = req->key_type;
744 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
745 ret = scm_call2(smc_id, &desc);
746 break;
747 }
748 case QSEOS_RPMB_ERASE_COMMAND: {
749 smc_id = TZ_OS_RPMB_ERASE_ID;
750 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
751 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
752 ret = scm_call2(smc_id, &desc);
753 break;
754 }
755 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
756 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
757 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
758 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
759 ret = scm_call2(smc_id, &desc);
760 break;
761 }
762 case QSEOS_GENERATE_KEY: {
763 u32 tzbuflen = PAGE_ALIGN(sizeof
764 (struct qseecom_key_generate_ireq) -
765 sizeof(uint32_t));
766 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
767
768 if (!tzbuf)
769 return -ENOMEM;
770 memset(tzbuf, 0, tzbuflen);
771 memcpy(tzbuf, req_buf + sizeof(uint32_t),
772 (sizeof(struct qseecom_key_generate_ireq) -
773 sizeof(uint32_t)));
774 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
775 smc_id = TZ_OS_KS_GEN_KEY_ID;
776 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
777 desc.args[0] = virt_to_phys(tzbuf);
778 desc.args[1] = tzbuflen;
779 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
780 ret = scm_call2(smc_id, &desc);
781 kzfree(tzbuf);
782 break;
783 }
784 case QSEOS_DELETE_KEY: {
785 u32 tzbuflen = PAGE_ALIGN(sizeof
786 (struct qseecom_key_delete_ireq) -
787 sizeof(uint32_t));
788 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
789
790 if (!tzbuf)
791 return -ENOMEM;
792 memset(tzbuf, 0, tzbuflen);
793 memcpy(tzbuf, req_buf + sizeof(uint32_t),
794 (sizeof(struct qseecom_key_delete_ireq) -
795 sizeof(uint32_t)));
796 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
797 smc_id = TZ_OS_KS_DEL_KEY_ID;
798 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
799 desc.args[0] = virt_to_phys(tzbuf);
800 desc.args[1] = tzbuflen;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
802 ret = scm_call2(smc_id, &desc);
803 kzfree(tzbuf);
804 break;
805 }
806 case QSEOS_SET_KEY: {
807 u32 tzbuflen = PAGE_ALIGN(sizeof
808 (struct qseecom_key_select_ireq) -
809 sizeof(uint32_t));
810 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
811
812 if (!tzbuf)
813 return -ENOMEM;
814 memset(tzbuf, 0, tzbuflen);
815 memcpy(tzbuf, req_buf + sizeof(uint32_t),
816 (sizeof(struct qseecom_key_select_ireq) -
817 sizeof(uint32_t)));
818 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
819 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
820 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
821 desc.args[0] = virt_to_phys(tzbuf);
822 desc.args[1] = tzbuflen;
823 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
824 ret = scm_call2(smc_id, &desc);
825 kzfree(tzbuf);
826 break;
827 }
828 case QSEOS_UPDATE_KEY_USERINFO: {
829 u32 tzbuflen = PAGE_ALIGN(sizeof
830 (struct qseecom_key_userinfo_update_ireq) -
831 sizeof(uint32_t));
832 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
833
834 if (!tzbuf)
835 return -ENOMEM;
836 memset(tzbuf, 0, tzbuflen);
837 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
838 (struct qseecom_key_userinfo_update_ireq) -
839 sizeof(uint32_t)));
840 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
841 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
842 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
843 desc.args[0] = virt_to_phys(tzbuf);
844 desc.args[1] = tzbuflen;
845 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
846 ret = scm_call2(smc_id, &desc);
847 kzfree(tzbuf);
848 break;
849 }
850 case QSEOS_TEE_OPEN_SESSION: {
851 struct qseecom_qteec_ireq *req;
852 struct qseecom_qteec_64bit_ireq *req_64bit;
853
854 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
855 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
856 if (qseecom.qsee_version < QSEE_VERSION_40) {
857 req = (struct qseecom_qteec_ireq *)req_buf;
858 desc.args[0] = req->app_id;
859 desc.args[1] = req->req_ptr;
860 desc.args[2] = req->req_len;
861 desc.args[3] = req->resp_ptr;
862 desc.args[4] = req->resp_len;
863 } else {
864 req_64bit = (struct qseecom_qteec_64bit_ireq *)
865 req_buf;
866 desc.args[0] = req_64bit->app_id;
867 desc.args[1] = req_64bit->req_ptr;
868 desc.args[2] = req_64bit->req_len;
869 desc.args[3] = req_64bit->resp_ptr;
870 desc.args[4] = req_64bit->resp_len;
871 }
872 ret = scm_call2(smc_id, &desc);
873 break;
874 }
875 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
876 struct qseecom_qteec_ireq *req;
877 struct qseecom_qteec_64bit_ireq *req_64bit;
878
879 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
880 desc.arginfo =
881 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
882 if (qseecom.qsee_version < QSEE_VERSION_40) {
883 req = (struct qseecom_qteec_ireq *)req_buf;
884 desc.args[0] = req->app_id;
885 desc.args[1] = req->req_ptr;
886 desc.args[2] = req->req_len;
887 desc.args[3] = req->resp_ptr;
888 desc.args[4] = req->resp_len;
889 desc.args[5] = req->sglistinfo_ptr;
890 desc.args[6] = req->sglistinfo_len;
891 } else {
892 req_64bit = (struct qseecom_qteec_64bit_ireq *)
893 req_buf;
894 desc.args[0] = req_64bit->app_id;
895 desc.args[1] = req_64bit->req_ptr;
896 desc.args[2] = req_64bit->req_len;
897 desc.args[3] = req_64bit->resp_ptr;
898 desc.args[4] = req_64bit->resp_len;
899 desc.args[5] = req_64bit->sglistinfo_ptr;
900 desc.args[6] = req_64bit->sglistinfo_len;
901 }
902 ret = scm_call2(smc_id, &desc);
903 break;
904 }
905 case QSEOS_TEE_INVOKE_COMMAND: {
906 struct qseecom_qteec_ireq *req;
907 struct qseecom_qteec_64bit_ireq *req_64bit;
908
909 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
910 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
911 if (qseecom.qsee_version < QSEE_VERSION_40) {
912 req = (struct qseecom_qteec_ireq *)req_buf;
913 desc.args[0] = req->app_id;
914 desc.args[1] = req->req_ptr;
915 desc.args[2] = req->req_len;
916 desc.args[3] = req->resp_ptr;
917 desc.args[4] = req->resp_len;
918 } else {
919 req_64bit = (struct qseecom_qteec_64bit_ireq *)
920 req_buf;
921 desc.args[0] = req_64bit->app_id;
922 desc.args[1] = req_64bit->req_ptr;
923 desc.args[2] = req_64bit->req_len;
924 desc.args[3] = req_64bit->resp_ptr;
925 desc.args[4] = req_64bit->resp_len;
926 }
927 ret = scm_call2(smc_id, &desc);
928 break;
929 }
930 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
931 struct qseecom_qteec_ireq *req;
932 struct qseecom_qteec_64bit_ireq *req_64bit;
933
934 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
935 desc.arginfo =
936 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
937 if (qseecom.qsee_version < QSEE_VERSION_40) {
938 req = (struct qseecom_qteec_ireq *)req_buf;
939 desc.args[0] = req->app_id;
940 desc.args[1] = req->req_ptr;
941 desc.args[2] = req->req_len;
942 desc.args[3] = req->resp_ptr;
943 desc.args[4] = req->resp_len;
944 desc.args[5] = req->sglistinfo_ptr;
945 desc.args[6] = req->sglistinfo_len;
946 } else {
947 req_64bit = (struct qseecom_qteec_64bit_ireq *)
948 req_buf;
949 desc.args[0] = req_64bit->app_id;
950 desc.args[1] = req_64bit->req_ptr;
951 desc.args[2] = req_64bit->req_len;
952 desc.args[3] = req_64bit->resp_ptr;
953 desc.args[4] = req_64bit->resp_len;
954 desc.args[5] = req_64bit->sglistinfo_ptr;
955 desc.args[6] = req_64bit->sglistinfo_len;
956 }
957 ret = scm_call2(smc_id, &desc);
958 break;
959 }
960 case QSEOS_TEE_CLOSE_SESSION: {
961 struct qseecom_qteec_ireq *req;
962 struct qseecom_qteec_64bit_ireq *req_64bit;
963
964 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
965 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
966 if (qseecom.qsee_version < QSEE_VERSION_40) {
967 req = (struct qseecom_qteec_ireq *)req_buf;
968 desc.args[0] = req->app_id;
969 desc.args[1] = req->req_ptr;
970 desc.args[2] = req->req_len;
971 desc.args[3] = req->resp_ptr;
972 desc.args[4] = req->resp_len;
973 } else {
974 req_64bit = (struct qseecom_qteec_64bit_ireq *)
975 req_buf;
976 desc.args[0] = req_64bit->app_id;
977 desc.args[1] = req_64bit->req_ptr;
978 desc.args[2] = req_64bit->req_len;
979 desc.args[3] = req_64bit->resp_ptr;
980 desc.args[4] = req_64bit->resp_len;
981 }
982 ret = scm_call2(smc_id, &desc);
983 break;
984 }
985 case QSEOS_TEE_REQUEST_CANCELLATION: {
986 struct qseecom_qteec_ireq *req;
987 struct qseecom_qteec_64bit_ireq *req_64bit;
988
989 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
990 desc.arginfo =
991 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
992 if (qseecom.qsee_version < QSEE_VERSION_40) {
993 req = (struct qseecom_qteec_ireq *)req_buf;
994 desc.args[0] = req->app_id;
995 desc.args[1] = req->req_ptr;
996 desc.args[2] = req->req_len;
997 desc.args[3] = req->resp_ptr;
998 desc.args[4] = req->resp_len;
999 } else {
1000 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1001 req_buf;
1002 desc.args[0] = req_64bit->app_id;
1003 desc.args[1] = req_64bit->req_ptr;
1004 desc.args[2] = req_64bit->req_len;
1005 desc.args[3] = req_64bit->resp_ptr;
1006 desc.args[4] = req_64bit->resp_len;
1007 }
1008 ret = scm_call2(smc_id, &desc);
1009 break;
1010 }
1011 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1012 struct qseecom_continue_blocked_request_ireq *req =
1013 (struct qseecom_continue_blocked_request_ireq *)
1014 req_buf;
1015 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
1016 desc.arginfo =
1017 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
1018 desc.args[0] = req->app_id;
1019 ret = scm_call2(smc_id, &desc);
1020 break;
1021 }
1022 default: {
1023 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1024 qseos_cmd_id);
1025 ret = -EINVAL;
1026 break;
1027 }
1028 } /*end of switch (qsee_cmd_id) */
1029 break;
1030 } /*end of case SCM_SVC_TZSCHEDULER*/
1031 default: {
1032 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1033 svc_id);
1034 ret = -EINVAL;
1035 break;
1036 }
1037 } /*end of switch svc_id */
1038 scm_resp->result = desc.ret[0];
1039 scm_resp->resp_type = desc.ret[1];
1040 scm_resp->data = desc.ret[2];
1041 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1042 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1043 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1044 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1045 return ret;
1046}
1047
1048
1049static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1050 size_t cmd_len, void *resp_buf, size_t resp_len)
1051{
1052 if (!is_scm_armv8())
1053 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1054 resp_buf, resp_len);
1055 else
1056 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1057}
1058
1059static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1060 struct qseecom_register_listener_req *svc)
1061{
1062 struct qseecom_registered_listener_list *ptr;
1063 int unique = 1;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1067 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1068 if (ptr->svc.listener_id == svc->listener_id) {
1069 pr_err("Service id: %u is already registered\n",
1070 ptr->svc.listener_id);
1071 unique = 0;
1072 break;
1073 }
1074 }
1075 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1076 return unique;
1077}
1078
1079static struct qseecom_registered_listener_list *__qseecom_find_svc(
1080 int32_t listener_id)
1081{
1082 struct qseecom_registered_listener_list *entry = NULL;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1086 list_for_each_entry(entry,
1087 &qseecom.registered_listener_list_head, list) {
1088 if (entry->svc.listener_id == listener_id)
1089 break;
1090 }
1091 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1092
1093 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1094 pr_err("Service id: %u is not found\n", listener_id);
1095 return NULL;
1096 }
1097
1098 return entry;
1099}
1100
1101static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1102 struct qseecom_dev_handle *handle,
1103 struct qseecom_register_listener_req *listener)
1104{
1105 int ret = 0;
1106 struct qseecom_register_listener_ireq req;
1107 struct qseecom_register_listener_64bit_ireq req_64bit;
1108 struct qseecom_command_scm_resp resp;
1109 ion_phys_addr_t pa;
1110 void *cmd_buf = NULL;
1111 size_t cmd_len;
1112
1113 /* Get the handle of the shared fd */
1114 svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
1115 listener->ifd_data_fd);
1116 if (IS_ERR_OR_NULL(svc->ihandle)) {
1117 pr_err("Ion client could not retrieve the handle\n");
1118 return -ENOMEM;
1119 }
1120
1121 /* Get the physical address of the ION BUF */
1122 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1123 if (ret) {
1124 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1125 ret);
1126 return ret;
1127 }
1128 /* Populate the structure for sending scm call to load image */
1129 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1130 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1131 pr_err("ION memory mapping for listener shared buffer failed\n");
1132 return -ENOMEM;
1133 }
1134 svc->sb_phys = (phys_addr_t)pa;
1135
1136 if (qseecom.qsee_version < QSEE_VERSION_40) {
1137 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1138 req.listener_id = svc->svc.listener_id;
1139 req.sb_len = svc->sb_length;
1140 req.sb_ptr = (uint32_t)svc->sb_phys;
1141 cmd_buf = (void *)&req;
1142 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1143 } else {
1144 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req_64bit.listener_id = svc->svc.listener_id;
1146 req_64bit.sb_len = svc->sb_length;
1147 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1148 cmd_buf = (void *)&req_64bit;
1149 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1150 }
1151
1152 resp.result = QSEOS_RESULT_INCOMPLETE;
1153
1154 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1155 &resp, sizeof(resp));
1156 if (ret) {
1157 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1158 return -EINVAL;
1159 }
1160
1161 if (resp.result != QSEOS_RESULT_SUCCESS) {
1162 pr_err("Error SB registration req: resp.result = %d\n",
1163 resp.result);
1164 return -EPERM;
1165 }
1166 return 0;
1167}
1168
1169static int qseecom_register_listener(struct qseecom_dev_handle *data,
1170 void __user *argp)
1171{
1172 int ret = 0;
1173 unsigned long flags;
1174 struct qseecom_register_listener_req rcvd_lstnr;
1175 struct qseecom_registered_listener_list *new_entry;
1176
1177 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1178 if (ret) {
1179 pr_err("copy_from_user failed\n");
1180 return ret;
1181 }
1182 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1183 rcvd_lstnr.sb_size))
1184 return -EFAULT;
1185
1186 data->listener.id = 0;
1187 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1188 pr_err("Service is not unique and is already registered\n");
1189 data->released = true;
1190 return -EBUSY;
1191 }
1192
1193 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1194 if (!new_entry)
1195 return -ENOMEM;
1196 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1197 new_entry->rcv_req_flag = 0;
1198
1199 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1200 new_entry->sb_length = rcvd_lstnr.sb_size;
1201 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1202 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1203 pr_err("qseecom_set_sb_memoryfailed\n");
1204 kzfree(new_entry);
1205 return -ENOMEM;
1206 }
1207
1208 data->listener.id = rcvd_lstnr.listener_id;
1209 init_waitqueue_head(&new_entry->rcv_req_wq);
1210 init_waitqueue_head(&new_entry->listener_block_app_wq);
1211 new_entry->send_resp_flag = 0;
1212 new_entry->listener_in_use = false;
1213 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1214 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1215 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1216
1217 return ret;
1218}
1219
1220static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1221{
1222 int ret = 0;
1223 unsigned long flags;
1224 uint32_t unmap_mem = 0;
1225 struct qseecom_register_listener_ireq req;
1226 struct qseecom_registered_listener_list *ptr_svc = NULL;
1227 struct qseecom_command_scm_resp resp;
1228 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1229
1230 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1231 req.listener_id = data->listener.id;
1232 resp.result = QSEOS_RESULT_INCOMPLETE;
1233
1234 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1235 sizeof(req), &resp, sizeof(resp));
1236 if (ret) {
1237 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1238 ret, data->listener.id);
1239 return ret;
1240 }
1241
1242 if (resp.result != QSEOS_RESULT_SUCCESS) {
1243 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1244 resp.result, data->listener.id);
1245 return -EPERM;
1246 }
1247
1248 data->abort = 1;
1249 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1250 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1251 list) {
1252 if (ptr_svc->svc.listener_id == data->listener.id) {
1253 wake_up_all(&ptr_svc->rcv_req_wq);
1254 break;
1255 }
1256 }
1257 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1258
1259 while (atomic_read(&data->ioctl_count) > 1) {
1260 if (wait_event_freezable(data->abort_wq,
1261 atomic_read(&data->ioctl_count) <= 1)) {
1262 pr_err("Interrupted from abort\n");
1263 ret = -ERESTARTSYS;
1264 break;
1265 }
1266 }
1267
1268 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1269 list_for_each_entry(ptr_svc,
1270 &qseecom.registered_listener_list_head, list) {
1271 if (ptr_svc->svc.listener_id == data->listener.id) {
1272 if (ptr_svc->sb_virt) {
1273 unmap_mem = 1;
1274 ihandle = ptr_svc->ihandle;
1275 }
1276 list_del(&ptr_svc->list);
1277 kzfree(ptr_svc);
1278 break;
1279 }
1280 }
1281 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1282
1283 /* Unmap the memory */
1284 if (unmap_mem) {
1285 if (!IS_ERR_OR_NULL(ihandle)) {
1286 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1287 ion_free(qseecom.ion_clnt, ihandle);
1288 }
1289 }
1290 data->released = true;
1291 return ret;
1292}
1293
1294static int __qseecom_set_msm_bus_request(uint32_t mode)
1295{
1296 int ret = 0;
1297 struct qseecom_clk *qclk;
1298
1299 qclk = &qseecom.qsee;
1300 if (qclk->ce_core_src_clk != NULL) {
1301 if (mode == INACTIVE) {
1302 __qseecom_disable_clk(CLK_QSEE);
1303 } else {
1304 ret = __qseecom_enable_clk(CLK_QSEE);
1305 if (ret)
1306 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1307 ret, mode);
1308 }
1309 }
1310
1311 if ((!ret) && (qseecom.current_mode != mode)) {
1312 ret = msm_bus_scale_client_update_request(
1313 qseecom.qsee_perf_client, mode);
1314 if (ret) {
1315 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1316 ret, mode);
1317 if (qclk->ce_core_src_clk != NULL) {
1318 if (mode == INACTIVE) {
1319 ret = __qseecom_enable_clk(CLK_QSEE);
1320 if (ret)
1321 pr_err("CLK enable failed\n");
1322 } else
1323 __qseecom_disable_clk(CLK_QSEE);
1324 }
1325 }
1326 qseecom.current_mode = mode;
1327 }
1328 return ret;
1329}
1330
1331static void qseecom_bw_inactive_req_work(struct work_struct *work)
1332{
1333 mutex_lock(&app_access_lock);
1334 mutex_lock(&qsee_bw_mutex);
1335 if (qseecom.timer_running)
1336 __qseecom_set_msm_bus_request(INACTIVE);
1337 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1338 qseecom.current_mode, qseecom.cumulative_mode);
1339 qseecom.timer_running = false;
1340 mutex_unlock(&qsee_bw_mutex);
1341 mutex_unlock(&app_access_lock);
1342}
1343
1344static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1345{
1346 schedule_work(&qseecom.bw_inactive_req_ws);
1347}
1348
1349static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1350{
1351 struct qseecom_clk *qclk;
1352 int ret = 0;
1353
1354 mutex_lock(&clk_access_lock);
1355 if (ce == CLK_QSEE)
1356 qclk = &qseecom.qsee;
1357 else
1358 qclk = &qseecom.ce_drv;
1359
1360 if (qclk->clk_access_cnt > 2) {
1361 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1362 ret = -EINVAL;
1363 goto err_dec_ref_cnt;
1364 }
1365 if (qclk->clk_access_cnt == 2)
1366 qclk->clk_access_cnt--;
1367
1368err_dec_ref_cnt:
1369 mutex_unlock(&clk_access_lock);
1370 return ret;
1371}
1372
1373
1374static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1375{
1376 int32_t ret = 0;
1377 int32_t request_mode = INACTIVE;
1378
1379 mutex_lock(&qsee_bw_mutex);
1380 if (mode == 0) {
1381 if (qseecom.cumulative_mode > MEDIUM)
1382 request_mode = HIGH;
1383 else
1384 request_mode = qseecom.cumulative_mode;
1385 } else {
1386 request_mode = mode;
1387 }
1388
1389 ret = __qseecom_set_msm_bus_request(request_mode);
1390 if (ret) {
1391 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1392 ret, request_mode);
1393 goto err_scale_timer;
1394 }
1395
1396 if (qseecom.timer_running) {
1397 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1398 if (ret) {
1399 pr_err("Failed to decrease clk ref count.\n");
1400 goto err_scale_timer;
1401 }
1402 del_timer_sync(&(qseecom.bw_scale_down_timer));
1403 qseecom.timer_running = false;
1404 }
1405err_scale_timer:
1406 mutex_unlock(&qsee_bw_mutex);
1407 return ret;
1408}
1409
1410
1411static int qseecom_unregister_bus_bandwidth_needs(
1412 struct qseecom_dev_handle *data)
1413{
1414 int32_t ret = 0;
1415
1416 qseecom.cumulative_mode -= data->mode;
1417 data->mode = INACTIVE;
1418
1419 return ret;
1420}
1421
1422static int __qseecom_register_bus_bandwidth_needs(
1423 struct qseecom_dev_handle *data, uint32_t request_mode)
1424{
1425 int32_t ret = 0;
1426
1427 if (data->mode == INACTIVE) {
1428 qseecom.cumulative_mode += request_mode;
1429 data->mode = request_mode;
1430 } else {
1431 if (data->mode != request_mode) {
1432 qseecom.cumulative_mode -= data->mode;
1433 qseecom.cumulative_mode += request_mode;
1434 data->mode = request_mode;
1435 }
1436 }
1437 return ret;
1438}
1439
1440static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1441{
1442 int ret = 0;
1443
1444 ret = qsee_vote_for_clock(data, CLK_DFAB);
1445 if (ret) {
1446 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1447 goto perf_enable_exit;
1448 }
1449 ret = qsee_vote_for_clock(data, CLK_SFPB);
1450 if (ret) {
1451 qsee_disable_clock_vote(data, CLK_DFAB);
1452 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455
1456perf_enable_exit:
1457 return ret;
1458}
1459
1460static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1461 void __user *argp)
1462{
1463 int32_t ret = 0;
1464 int32_t req_mode;
1465
1466 if (qseecom.no_clock_support)
1467 return 0;
1468
1469 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1470 if (ret) {
1471 pr_err("copy_from_user failed\n");
1472 return ret;
1473 }
1474 if (req_mode > HIGH) {
1475 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1476 return -EINVAL;
1477 }
1478
1479 /*
1480 * Register bus bandwidth needs if bus scaling feature is enabled;
1481 * otherwise, qseecom enable/disable clocks for the client directly.
1482 */
1483 if (qseecom.support_bus_scaling) {
1484 mutex_lock(&qsee_bw_mutex);
1485 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1486 mutex_unlock(&qsee_bw_mutex);
1487 } else {
1488 pr_debug("Bus scaling feature is NOT enabled\n");
1489 pr_debug("request bandwidth mode %d for the client\n",
1490 req_mode);
1491 if (req_mode != INACTIVE) {
1492 ret = qseecom_perf_enable(data);
1493 if (ret)
1494 pr_err("Failed to vote for clock with err %d\n",
1495 ret);
1496 } else {
1497 qsee_disable_clock_vote(data, CLK_DFAB);
1498 qsee_disable_clock_vote(data, CLK_SFPB);
1499 }
1500 }
1501 return ret;
1502}
1503
1504static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1505{
1506 if (qseecom.no_clock_support)
1507 return;
1508
1509 mutex_lock(&qsee_bw_mutex);
1510 qseecom.bw_scale_down_timer.expires = jiffies +
1511 msecs_to_jiffies(duration);
1512 mod_timer(&(qseecom.bw_scale_down_timer),
1513 qseecom.bw_scale_down_timer.expires);
1514 qseecom.timer_running = true;
1515 mutex_unlock(&qsee_bw_mutex);
1516}
1517
1518static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1519{
1520 if (!qseecom.support_bus_scaling)
1521 qsee_disable_clock_vote(data, CLK_SFPB);
1522 else
1523 __qseecom_add_bw_scale_down_timer(
1524 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1525}
1526
1527static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1528{
1529 int ret = 0;
1530
1531 if (qseecom.support_bus_scaling) {
1532 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1533 if (ret)
1534 pr_err("Failed to set bw MEDIUM.\n");
1535 } else {
1536 ret = qsee_vote_for_clock(data, CLK_SFPB);
1537 if (ret)
1538 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1539 }
1540 return ret;
1541}
1542
1543static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1544 void __user *argp)
1545{
1546 ion_phys_addr_t pa;
1547 int32_t ret;
1548 struct qseecom_set_sb_mem_param_req req;
1549 size_t len;
1550
1551 /* Copy the relevant information needed for loading the image */
1552 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1553 return -EFAULT;
1554
1555 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1556 (req.sb_len == 0)) {
1557 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1558 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1559 return -EFAULT;
1560 }
1561 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1562 req.sb_len))
1563 return -EFAULT;
1564
1565 /* Get the handle of the shared fd */
1566 data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt,
1567 req.ifd_data_fd);
1568 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1569 pr_err("Ion client could not retrieve the handle\n");
1570 return -ENOMEM;
1571 }
1572 /* Get the physical address of the ION BUF */
1573 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1574 if (ret) {
1575
1576 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1577 ret);
1578 return ret;
1579 }
1580
1581 if (len < req.sb_len) {
1582 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1583 req.sb_len, len);
1584 return -EINVAL;
1585 }
1586 /* Populate the structure for sending scm call to load image */
1587 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1588 data->client.ihandle);
1589 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1590 pr_err("ION memory mapping for client shared buf failed\n");
1591 return -ENOMEM;
1592 }
1593 data->client.sb_phys = (phys_addr_t)pa;
1594 data->client.sb_length = req.sb_len;
1595 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1596 return 0;
1597}
1598
1599static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1600{
1601 int ret;
1602
1603 ret = (qseecom.send_resp_flag != 0);
1604 return ret || data->abort;
1605}
1606
1607static int __qseecom_reentrancy_listener_has_sent_rsp(
1608 struct qseecom_dev_handle *data,
1609 struct qseecom_registered_listener_list *ptr_svc)
1610{
1611 int ret;
1612
1613 ret = (ptr_svc->send_resp_flag != 0);
1614 return ret || data->abort;
1615}
1616
1617static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1618 struct qseecom_command_scm_resp *resp,
1619 struct qseecom_client_listener_data_irsp *send_data_rsp,
1620 struct qseecom_registered_listener_list *ptr_svc,
1621 uint32_t lstnr) {
1622 int ret = 0;
1623
1624 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1625 qseecom.send_resp_flag = 0;
1626 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1627 send_data_rsp->listener_id = lstnr;
1628 if (ptr_svc)
1629 pr_warn("listener_id:%x, lstnr: %x\n",
1630 ptr_svc->svc.listener_id, lstnr);
1631 if (ptr_svc && ptr_svc->ihandle) {
1632 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1633 ptr_svc->sb_virt, ptr_svc->sb_length,
1634 ION_IOC_CLEAN_INV_CACHES);
1635 if (ret) {
1636 pr_err("cache operation failed %d\n", ret);
1637 return ret;
1638 }
1639 }
1640
1641 if (lstnr == RPMB_SERVICE) {
1642 ret = __qseecom_enable_clk(CLK_QSEE);
1643 if (ret)
1644 return ret;
1645 }
1646 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1647 sizeof(send_data_rsp), resp, sizeof(*resp));
1648 if (ret) {
1649 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1650 ret, data->client.app_id);
1651 if (lstnr == RPMB_SERVICE)
1652 __qseecom_disable_clk(CLK_QSEE);
1653 return ret;
1654 }
1655 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1656 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1657 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1658 resp->result, data->client.app_id, lstnr);
1659 ret = -EINVAL;
1660 }
1661 if (lstnr == RPMB_SERVICE)
1662 __qseecom_disable_clk(CLK_QSEE);
1663 return ret;
1664}
1665
1666static void __qseecom_clean_listener_sglistinfo(
1667 struct qseecom_registered_listener_list *ptr_svc)
1668{
1669 if (ptr_svc->sglist_cnt) {
1670 memset(ptr_svc->sglistinfo_ptr, 0,
1671 SGLISTINFO_TABLE_SIZE);
1672 ptr_svc->sglist_cnt = 0;
1673 }
1674}
1675
1676static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1677 struct qseecom_command_scm_resp *resp)
1678{
1679 int ret = 0;
1680 int rc = 0;
1681 uint32_t lstnr;
1682 unsigned long flags;
1683 struct qseecom_client_listener_data_irsp send_data_rsp;
1684 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1685 struct qseecom_registered_listener_list *ptr_svc = NULL;
1686 sigset_t new_sigset;
1687 sigset_t old_sigset;
1688 uint32_t status;
1689 void *cmd_buf = NULL;
1690 size_t cmd_len;
1691 struct sglist_info *table = NULL;
1692
1693 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1694 lstnr = resp->data;
1695 /*
1696 * Wake up blocking lsitener service with the lstnr id
1697 */
1698 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1699 flags);
1700 list_for_each_entry(ptr_svc,
1701 &qseecom.registered_listener_list_head, list) {
1702 if (ptr_svc->svc.listener_id == lstnr) {
1703 ptr_svc->listener_in_use = true;
1704 ptr_svc->rcv_req_flag = 1;
1705 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1706 break;
1707 }
1708 }
1709 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1710 flags);
1711
1712 if (ptr_svc == NULL) {
1713 pr_err("Listener Svc %d does not exist\n", lstnr);
1714 __qseecom_qseos_fail_return_resp_tz(data, resp,
1715 &send_data_rsp, ptr_svc, lstnr);
1716 return -EINVAL;
1717 }
1718
1719 if (!ptr_svc->ihandle) {
1720 pr_err("Client handle is not initialized\n");
1721 __qseecom_qseos_fail_return_resp_tz(data, resp,
1722 &send_data_rsp, ptr_svc, lstnr);
1723 return -EINVAL;
1724 }
1725
1726 if (ptr_svc->svc.listener_id != lstnr) {
1727 pr_warn("Service requested does not exist\n");
1728 __qseecom_qseos_fail_return_resp_tz(data, resp,
1729 &send_data_rsp, ptr_svc, lstnr);
1730 return -ERESTARTSYS;
1731 }
1732 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1733
1734 /* initialize the new signal mask with all signals*/
1735 sigfillset(&new_sigset);
1736 /* block all signals */
1737 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1738
1739 do {
1740 /*
1741 * When reentrancy is not supported, check global
1742 * send_resp_flag; otherwise, check this listener's
1743 * send_resp_flag.
1744 */
1745 if (!qseecom.qsee_reentrancy_support &&
1746 !wait_event_freezable(qseecom.send_resp_wq,
1747 __qseecom_listener_has_sent_rsp(data))) {
1748 break;
1749 }
1750
1751 if (qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_reentrancy_listener_has_sent_rsp(
1754 data, ptr_svc))) {
1755 break;
1756 }
1757 } while (1);
1758
1759 /* restore signal mask */
1760 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1761 if (data->abort) {
1762 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1763 data->client.app_id, lstnr, ret);
1764 rc = -ENODEV;
1765 status = QSEOS_RESULT_FAILURE;
1766 } else {
1767 status = QSEOS_RESULT_SUCCESS;
1768 }
1769
1770 qseecom.send_resp_flag = 0;
1771 ptr_svc->send_resp_flag = 0;
1772 table = ptr_svc->sglistinfo_ptr;
1773 if (qseecom.qsee_version < QSEE_VERSION_40) {
1774 send_data_rsp.listener_id = lstnr;
1775 send_data_rsp.status = status;
1776 send_data_rsp.sglistinfo_ptr =
1777 (uint32_t)virt_to_phys(table);
1778 send_data_rsp.sglistinfo_len =
1779 SGLISTINFO_TABLE_SIZE;
1780 dmac_flush_range((void *)table,
1781 (void *)table + SGLISTINFO_TABLE_SIZE);
1782 cmd_buf = (void *)&send_data_rsp;
1783 cmd_len = sizeof(send_data_rsp);
1784 } else {
1785 send_data_rsp_64bit.listener_id = lstnr;
1786 send_data_rsp_64bit.status = status;
1787 send_data_rsp_64bit.sglistinfo_ptr =
1788 virt_to_phys(table);
1789 send_data_rsp_64bit.sglistinfo_len =
1790 SGLISTINFO_TABLE_SIZE;
1791 dmac_flush_range((void *)table,
1792 (void *)table + SGLISTINFO_TABLE_SIZE);
1793 cmd_buf = (void *)&send_data_rsp_64bit;
1794 cmd_len = sizeof(send_data_rsp_64bit);
1795 }
1796 if (qseecom.whitelist_support == false)
1797 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1798 else
1799 *(uint32_t *)cmd_buf =
1800 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1801 if (ptr_svc) {
1802 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1803 ptr_svc->ihandle,
1804 ptr_svc->sb_virt, ptr_svc->sb_length,
1805 ION_IOC_CLEAN_INV_CACHES);
1806 if (ret) {
1807 pr_err("cache operation failed %d\n", ret);
1808 return ret;
1809 }
1810 }
1811
1812 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1813 ret = __qseecom_enable_clk(CLK_QSEE);
1814 if (ret)
1815 return ret;
1816 }
1817
1818 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1819 cmd_buf, cmd_len, resp, sizeof(*resp));
1820 ptr_svc->listener_in_use = false;
1821 __qseecom_clean_listener_sglistinfo(ptr_svc);
1822 if (ret) {
1823 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1824 ret, data->client.app_id);
1825 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1826 __qseecom_disable_clk(CLK_QSEE);
1827 return ret;
1828 }
1829 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1830 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1831 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1832 resp->result, data->client.app_id, lstnr);
1833 ret = -EINVAL;
1834 }
1835 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1836 __qseecom_disable_clk(CLK_QSEE);
1837
1838 }
1839 if (rc)
1840 return rc;
1841
1842 return ret;
1843}
1844
1845int __qseecom_process_reentrancy_blocked_on_listener(
1846 struct qseecom_command_scm_resp *resp,
1847 struct qseecom_registered_app_list *ptr_app,
1848 struct qseecom_dev_handle *data)
1849{
1850 struct qseecom_registered_listener_list *list_ptr;
1851 int ret = 0;
1852 struct qseecom_continue_blocked_request_ireq ireq;
1853 struct qseecom_command_scm_resp continue_resp;
1854 sigset_t new_sigset, old_sigset;
1855 unsigned long flags;
1856 bool found_app = false;
1857
1858 if (!resp || !data) {
1859 pr_err("invalid resp or data pointer\n");
1860 ret = -EINVAL;
1861 goto exit;
1862 }
1863
1864 /* find app_id & img_name from list */
1865 if (!ptr_app) {
1866 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1867 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1868 list) {
1869 if ((ptr_app->app_id == data->client.app_id) &&
1870 (!strcmp(ptr_app->app_name,
1871 data->client.app_name))) {
1872 found_app = true;
1873 break;
1874 }
1875 }
1876 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1877 flags);
1878 if (!found_app) {
1879 pr_err("app_id %d (%s) is not found\n",
1880 data->client.app_id,
1881 (char *)data->client.app_name);
1882 ret = -ENOENT;
1883 goto exit;
1884 }
1885 }
1886
1887 list_ptr = __qseecom_find_svc(resp->data);
1888 if (!list_ptr) {
1889 pr_err("Invalid listener ID\n");
1890 ret = -ENODATA;
1891 goto exit;
1892 }
1893 pr_debug("lsntr %d in_use = %d\n",
1894 resp->data, list_ptr->listener_in_use);
1895 ptr_app->blocked_on_listener_id = resp->data;
1896 /* sleep until listener is available */
1897 do {
1898 qseecom.app_block_ref_cnt++;
1899 ptr_app->app_blocked = true;
1900 sigfillset(&new_sigset);
1901 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1902 mutex_unlock(&app_access_lock);
1903 do {
1904 if (!wait_event_freezable(
1905 list_ptr->listener_block_app_wq,
1906 !list_ptr->listener_in_use)) {
1907 break;
1908 }
1909 } while (1);
1910 mutex_lock(&app_access_lock);
1911 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1912 ptr_app->app_blocked = false;
1913 qseecom.app_block_ref_cnt--;
1914 } while (list_ptr->listener_in_use == true);
1915 ptr_app->blocked_on_listener_id = 0;
1916 /* notify the blocked app that listener is available */
1917 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1918 resp->data, data->client.app_id,
1919 data->client.app_name);
1920 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1921 ireq.app_id = data->client.app_id;
1922 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1923 &ireq, sizeof(ireq),
1924 &continue_resp, sizeof(continue_resp));
1925 if (ret) {
1926 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1927 data->client.app_id,
1928 data->client.app_name, ret);
1929 goto exit;
1930 }
1931 /*
1932 * After TZ app is unblocked, then continue to next case
1933 * for incomplete request processing
1934 */
1935 resp->result = QSEOS_RESULT_INCOMPLETE;
1936exit:
1937 return ret;
1938}
1939
1940static int __qseecom_reentrancy_process_incomplete_cmd(
1941 struct qseecom_dev_handle *data,
1942 struct qseecom_command_scm_resp *resp)
1943{
1944 int ret = 0;
1945 int rc = 0;
1946 uint32_t lstnr;
1947 unsigned long flags;
1948 struct qseecom_client_listener_data_irsp send_data_rsp;
1949 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1950 struct qseecom_registered_listener_list *ptr_svc = NULL;
1951 sigset_t new_sigset;
1952 sigset_t old_sigset;
1953 uint32_t status;
1954 void *cmd_buf = NULL;
1955 size_t cmd_len;
1956 struct sglist_info *table = NULL;
1957
1958 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
1959 lstnr = resp->data;
1960 /*
1961 * Wake up blocking lsitener service with the lstnr id
1962 */
1963 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1964 flags);
1965 list_for_each_entry(ptr_svc,
1966 &qseecom.registered_listener_list_head, list) {
1967 if (ptr_svc->svc.listener_id == lstnr) {
1968 ptr_svc->listener_in_use = true;
1969 ptr_svc->rcv_req_flag = 1;
1970 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1971 break;
1972 }
1973 }
1974 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1975 flags);
1976
1977 if (ptr_svc == NULL) {
1978 pr_err("Listener Svc %d does not exist\n", lstnr);
1979 return -EINVAL;
1980 }
1981
1982 if (!ptr_svc->ihandle) {
1983 pr_err("Client handle is not initialized\n");
1984 return -EINVAL;
1985 }
1986
1987 if (ptr_svc->svc.listener_id != lstnr) {
1988 pr_warn("Service requested does not exist\n");
1989 return -ERESTARTSYS;
1990 }
1991 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1992
1993 /* initialize the new signal mask with all signals*/
1994 sigfillset(&new_sigset);
1995
1996 /* block all signals */
1997 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1998
1999 /* unlock mutex btw waking listener and sleep-wait */
2000 mutex_unlock(&app_access_lock);
2001 do {
2002 if (!wait_event_freezable(qseecom.send_resp_wq,
2003 __qseecom_reentrancy_listener_has_sent_rsp(
2004 data, ptr_svc))) {
2005 break;
2006 }
2007 } while (1);
2008 /* lock mutex again after resp sent */
2009 mutex_lock(&app_access_lock);
2010 ptr_svc->send_resp_flag = 0;
2011 qseecom.send_resp_flag = 0;
2012
2013 /* restore signal mask */
2014 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2015 if (data->abort) {
2016 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2017 data->client.app_id, lstnr, ret);
2018 rc = -ENODEV;
2019 status = QSEOS_RESULT_FAILURE;
2020 } else {
2021 status = QSEOS_RESULT_SUCCESS;
2022 }
2023 table = ptr_svc->sglistinfo_ptr;
2024 if (qseecom.qsee_version < QSEE_VERSION_40) {
2025 send_data_rsp.listener_id = lstnr;
2026 send_data_rsp.status = status;
2027 send_data_rsp.sglistinfo_ptr =
2028 (uint32_t)virt_to_phys(table);
2029 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2030 dmac_flush_range((void *)table,
2031 (void *)table + SGLISTINFO_TABLE_SIZE);
2032 cmd_buf = (void *)&send_data_rsp;
2033 cmd_len = sizeof(send_data_rsp);
2034 } else {
2035 send_data_rsp_64bit.listener_id = lstnr;
2036 send_data_rsp_64bit.status = status;
2037 send_data_rsp_64bit.sglistinfo_ptr =
2038 virt_to_phys(table);
2039 send_data_rsp_64bit.sglistinfo_len =
2040 SGLISTINFO_TABLE_SIZE;
2041 dmac_flush_range((void *)table,
2042 (void *)table + SGLISTINFO_TABLE_SIZE);
2043 cmd_buf = (void *)&send_data_rsp_64bit;
2044 cmd_len = sizeof(send_data_rsp_64bit);
2045 }
2046 if (qseecom.whitelist_support == false)
2047 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2048 else
2049 *(uint32_t *)cmd_buf =
2050 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2051 if (ptr_svc) {
2052 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2053 ptr_svc->ihandle,
2054 ptr_svc->sb_virt, ptr_svc->sb_length,
2055 ION_IOC_CLEAN_INV_CACHES);
2056 if (ret) {
2057 pr_err("cache operation failed %d\n", ret);
2058 return ret;
2059 }
2060 }
2061 if (lstnr == RPMB_SERVICE) {
2062 ret = __qseecom_enable_clk(CLK_QSEE);
2063 if (ret)
2064 return ret;
2065 }
2066
2067 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2068 cmd_buf, cmd_len, resp, sizeof(*resp));
2069 ptr_svc->listener_in_use = false;
2070 __qseecom_clean_listener_sglistinfo(ptr_svc);
2071 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2072
2073 if (ret) {
2074 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2075 ret, data->client.app_id);
2076 goto exit;
2077 }
2078
2079 switch (resp->result) {
2080 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2081 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2082 lstnr, data->client.app_id, resp->data);
2083 if (lstnr == resp->data) {
2084 pr_err("lstnr %d should not be blocked!\n",
2085 lstnr);
2086 ret = -EINVAL;
2087 goto exit;
2088 }
2089 ret = __qseecom_process_reentrancy_blocked_on_listener(
2090 resp, NULL, data);
2091 if (ret) {
2092 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2093 data->client.app_id,
2094 data->client.app_name, resp->data);
2095 goto exit;
2096 }
2097 case QSEOS_RESULT_SUCCESS:
2098 case QSEOS_RESULT_INCOMPLETE:
2099 break;
2100 default:
2101 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2102 resp->result, data->client.app_id, lstnr);
2103 ret = -EINVAL;
2104 goto exit;
2105 }
2106exit:
2107 if (lstnr == RPMB_SERVICE)
2108 __qseecom_disable_clk(CLK_QSEE);
2109
2110 }
2111 if (rc)
2112 return rc;
2113
2114 return ret;
2115}
2116
2117/*
2118 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2119 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2120 * So, needs to first check if no app blocked before sending OS level scm call,
2121 * then wait until all apps are unblocked.
2122 */
2123static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2124{
2125 sigset_t new_sigset, old_sigset;
2126
2127 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2128 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2129 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2130 /* thread sleep until this app unblocked */
2131 while (qseecom.app_block_ref_cnt > 0) {
2132 sigfillset(&new_sigset);
2133 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2134 mutex_unlock(&app_access_lock);
2135 do {
2136 if (!wait_event_freezable(qseecom.app_block_wq,
2137 (qseecom.app_block_ref_cnt == 0)))
2138 break;
2139 } while (1);
2140 mutex_lock(&app_access_lock);
2141 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2142 }
2143 }
2144}
2145
2146/*
2147 * scm_call of send data will fail if this TA is blocked or there are more
2148 * than one TA requesting listener services; So, first check to see if need
2149 * to wait.
2150 */
2151static void __qseecom_reentrancy_check_if_this_app_blocked(
2152 struct qseecom_registered_app_list *ptr_app)
2153{
2154 sigset_t new_sigset, old_sigset;
2155
2156 if (qseecom.qsee_reentrancy_support) {
2157 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2158 /* thread sleep until this app unblocked */
2159 sigfillset(&new_sigset);
2160 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2161 mutex_unlock(&app_access_lock);
2162 do {
2163 if (!wait_event_freezable(qseecom.app_block_wq,
2164 (!ptr_app->app_blocked &&
2165 qseecom.app_block_ref_cnt <= 1)))
2166 break;
2167 } while (1);
2168 mutex_lock(&app_access_lock);
2169 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2170 }
2171 }
2172}
2173
2174static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2175 uint32_t *app_id)
2176{
2177 int32_t ret;
2178 struct qseecom_command_scm_resp resp;
2179 bool found_app = false;
2180 struct qseecom_registered_app_list *entry = NULL;
2181 unsigned long flags = 0;
2182
2183 if (!app_id) {
2184 pr_err("Null pointer to app_id\n");
2185 return -EINVAL;
2186 }
2187 *app_id = 0;
2188
2189 /* check if app exists and has been registered locally */
2190 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2191 list_for_each_entry(entry,
2192 &qseecom.registered_app_list_head, list) {
2193 if (!strcmp(entry->app_name, req.app_name)) {
2194 found_app = true;
2195 break;
2196 }
2197 }
2198 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2199 if (found_app) {
2200 pr_debug("Found app with id %d\n", entry->app_id);
2201 *app_id = entry->app_id;
2202 return 0;
2203 }
2204
2205 memset((void *)&resp, 0, sizeof(resp));
2206
2207 /* SCM_CALL to check if app_id for the mentioned app exists */
2208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2209 sizeof(struct qseecom_check_app_ireq),
2210 &resp, sizeof(resp));
2211 if (ret) {
2212 pr_err("scm_call to check if app is already loaded failed\n");
2213 return -EINVAL;
2214 }
2215
2216 if (resp.result == QSEOS_RESULT_FAILURE)
2217 return 0;
2218
2219 switch (resp.resp_type) {
2220 /*qsee returned listener type response */
2221 case QSEOS_LISTENER_ID:
2222 pr_err("resp type is of listener type instead of app");
2223 return -EINVAL;
2224 case QSEOS_APP_ID:
2225 *app_id = resp.data;
2226 return 0;
2227 default:
2228 pr_err("invalid resp type (%d) from qsee",
2229 resp.resp_type);
2230 return -ENODEV;
2231 }
2232}
2233
2234static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2235{
2236 struct qseecom_registered_app_list *entry = NULL;
2237 unsigned long flags = 0;
2238 u32 app_id = 0;
2239 struct ion_handle *ihandle; /* Ion handle */
2240 struct qseecom_load_img_req load_img_req;
2241 int32_t ret = 0;
2242 ion_phys_addr_t pa = 0;
2243 size_t len;
2244 struct qseecom_command_scm_resp resp;
2245 struct qseecom_check_app_ireq req;
2246 struct qseecom_load_app_ireq load_req;
2247 struct qseecom_load_app_64bit_ireq load_req_64bit;
2248 void *cmd_buf = NULL;
2249 size_t cmd_len;
2250 bool first_time = false;
2251
2252 /* Copy the relevant information needed for loading the image */
2253 if (copy_from_user(&load_img_req,
2254 (void __user *)argp,
2255 sizeof(struct qseecom_load_img_req))) {
2256 pr_err("copy_from_user failed\n");
2257 return -EFAULT;
2258 }
2259
2260 /* Check and load cmnlib */
2261 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2262 if (!qseecom.commonlib_loaded &&
2263 load_img_req.app_arch == ELFCLASS32) {
2264 ret = qseecom_load_commonlib_image(data, "cmnlib");
2265 if (ret) {
2266 pr_err("failed to load cmnlib\n");
2267 return -EIO;
2268 }
2269 qseecom.commonlib_loaded = true;
2270 pr_debug("cmnlib is loaded\n");
2271 }
2272
2273 if (!qseecom.commonlib64_loaded &&
2274 load_img_req.app_arch == ELFCLASS64) {
2275 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2276 if (ret) {
2277 pr_err("failed to load cmnlib64\n");
2278 return -EIO;
2279 }
2280 qseecom.commonlib64_loaded = true;
2281 pr_debug("cmnlib64 is loaded\n");
2282 }
2283 }
2284
2285 if (qseecom.support_bus_scaling) {
2286 mutex_lock(&qsee_bw_mutex);
2287 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2288 mutex_unlock(&qsee_bw_mutex);
2289 if (ret)
2290 return ret;
2291 }
2292
2293 /* Vote for the SFPB clock */
2294 ret = __qseecom_enable_clk_scale_up(data);
2295 if (ret)
2296 goto enable_clk_err;
2297
2298 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2299 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2300 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2301
2302 ret = __qseecom_check_app_exists(req, &app_id);
2303 if (ret < 0)
2304 goto loadapp_err;
2305
2306 if (app_id) {
2307 pr_debug("App id %d (%s) already exists\n", app_id,
2308 (char *)(req.app_name));
2309 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2310 list_for_each_entry(entry,
2311 &qseecom.registered_app_list_head, list){
2312 if (entry->app_id == app_id) {
2313 entry->ref_cnt++;
2314 break;
2315 }
2316 }
2317 spin_unlock_irqrestore(
2318 &qseecom.registered_app_list_lock, flags);
2319 ret = 0;
2320 } else {
2321 first_time = true;
2322 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2323 (char *)(load_img_req.img_name));
2324 /* Get the handle of the shared fd */
2325 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
2326 load_img_req.ifd_data_fd);
2327 if (IS_ERR_OR_NULL(ihandle)) {
2328 pr_err("Ion client could not retrieve the handle\n");
2329 ret = -ENOMEM;
2330 goto loadapp_err;
2331 }
2332
2333 /* Get the physical address of the ION BUF */
2334 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2335 if (ret) {
2336 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2337 ret);
2338 goto loadapp_err;
2339 }
2340 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2341 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2342 len, load_img_req.mdt_len,
2343 load_img_req.img_len);
2344 ret = -EINVAL;
2345 goto loadapp_err;
2346 }
2347 /* Populate the structure for sending scm call to load image */
2348 if (qseecom.qsee_version < QSEE_VERSION_40) {
2349 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2350 load_req.mdt_len = load_img_req.mdt_len;
2351 load_req.img_len = load_img_req.img_len;
2352 strlcpy(load_req.app_name, load_img_req.img_name,
2353 MAX_APP_NAME_SIZE);
2354 load_req.phy_addr = (uint32_t)pa;
2355 cmd_buf = (void *)&load_req;
2356 cmd_len = sizeof(struct qseecom_load_app_ireq);
2357 } else {
2358 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2359 load_req_64bit.mdt_len = load_img_req.mdt_len;
2360 load_req_64bit.img_len = load_img_req.img_len;
2361 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2362 MAX_APP_NAME_SIZE);
2363 load_req_64bit.phy_addr = (uint64_t)pa;
2364 cmd_buf = (void *)&load_req_64bit;
2365 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2366 }
2367
2368 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2369 ION_IOC_CLEAN_INV_CACHES);
2370 if (ret) {
2371 pr_err("cache operation failed %d\n", ret);
2372 goto loadapp_err;
2373 }
2374
2375 /* SCM_CALL to load the app and get the app_id back */
2376 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2377 cmd_len, &resp, sizeof(resp));
2378 if (ret) {
2379 pr_err("scm_call to load app failed\n");
2380 if (!IS_ERR_OR_NULL(ihandle))
2381 ion_free(qseecom.ion_clnt, ihandle);
2382 ret = -EINVAL;
2383 goto loadapp_err;
2384 }
2385
2386 if (resp.result == QSEOS_RESULT_FAILURE) {
2387 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2388 if (!IS_ERR_OR_NULL(ihandle))
2389 ion_free(qseecom.ion_clnt, ihandle);
2390 ret = -EFAULT;
2391 goto loadapp_err;
2392 }
2393
2394 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2395 ret = __qseecom_process_incomplete_cmd(data, &resp);
2396 if (ret) {
2397 pr_err("process_incomplete_cmd failed err: %d\n",
2398 ret);
2399 if (!IS_ERR_OR_NULL(ihandle))
2400 ion_free(qseecom.ion_clnt, ihandle);
2401 ret = -EFAULT;
2402 goto loadapp_err;
2403 }
2404 }
2405
2406 if (resp.result != QSEOS_RESULT_SUCCESS) {
2407 pr_err("scm_call failed resp.result unknown, %d\n",
2408 resp.result);
2409 if (!IS_ERR_OR_NULL(ihandle))
2410 ion_free(qseecom.ion_clnt, ihandle);
2411 ret = -EFAULT;
2412 goto loadapp_err;
2413 }
2414
2415 app_id = resp.data;
2416
2417 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2418 if (!entry) {
2419 ret = -ENOMEM;
2420 goto loadapp_err;
2421 }
2422 entry->app_id = app_id;
2423 entry->ref_cnt = 1;
2424 entry->app_arch = load_img_req.app_arch;
2425 /*
2426 * keymaster app may be first loaded as "keymaste" by qseecomd,
2427 * and then used as "keymaster" on some targets. To avoid app
2428 * name checking error, register "keymaster" into app_list and
2429 * thread private data.
2430 */
2431 if (!strcmp(load_img_req.img_name, "keymaste"))
2432 strlcpy(entry->app_name, "keymaster",
2433 MAX_APP_NAME_SIZE);
2434 else
2435 strlcpy(entry->app_name, load_img_req.img_name,
2436 MAX_APP_NAME_SIZE);
2437 entry->app_blocked = false;
2438 entry->blocked_on_listener_id = 0;
2439
2440 /* Deallocate the handle */
2441 if (!IS_ERR_OR_NULL(ihandle))
2442 ion_free(qseecom.ion_clnt, ihandle);
2443
2444 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2445 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2446 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2447 flags);
2448
2449 pr_warn("App with id %u (%s) now loaded\n", app_id,
2450 (char *)(load_img_req.img_name));
2451 }
2452 data->client.app_id = app_id;
2453 data->client.app_arch = load_img_req.app_arch;
2454 if (!strcmp(load_img_req.img_name, "keymaste"))
2455 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2456 else
2457 strlcpy(data->client.app_name, load_img_req.img_name,
2458 MAX_APP_NAME_SIZE);
2459 load_img_req.app_id = app_id;
2460 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2461 pr_err("copy_to_user failed\n");
2462 ret = -EFAULT;
2463 if (first_time == true) {
2464 spin_lock_irqsave(
2465 &qseecom.registered_app_list_lock, flags);
2466 list_del(&entry->list);
2467 spin_unlock_irqrestore(
2468 &qseecom.registered_app_list_lock, flags);
2469 kzfree(entry);
2470 }
2471 }
2472
2473loadapp_err:
2474 __qseecom_disable_clk_scale_down(data);
2475enable_clk_err:
2476 if (qseecom.support_bus_scaling) {
2477 mutex_lock(&qsee_bw_mutex);
2478 qseecom_unregister_bus_bandwidth_needs(data);
2479 mutex_unlock(&qsee_bw_mutex);
2480 }
2481 return ret;
2482}
2483
2484static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2485{
2486 int ret = 1; /* Set unload app */
2487
2488 wake_up_all(&qseecom.send_resp_wq);
2489 if (qseecom.qsee_reentrancy_support)
2490 mutex_unlock(&app_access_lock);
2491 while (atomic_read(&data->ioctl_count) > 1) {
2492 if (wait_event_freezable(data->abort_wq,
2493 atomic_read(&data->ioctl_count) <= 1)) {
2494 pr_err("Interrupted from abort\n");
2495 ret = -ERESTARTSYS;
2496 break;
2497 }
2498 }
2499 if (qseecom.qsee_reentrancy_support)
2500 mutex_lock(&app_access_lock);
2501 return ret;
2502}
2503
2504static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2505{
2506 int ret = 0;
2507
2508 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2509 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2510 ion_free(qseecom.ion_clnt, data->client.ihandle);
2511 data->client.ihandle = NULL;
2512 }
2513 return ret;
2514}
2515
2516static int qseecom_unload_app(struct qseecom_dev_handle *data,
2517 bool app_crash)
2518{
2519 unsigned long flags;
2520 unsigned long flags1;
2521 int ret = 0;
2522 struct qseecom_command_scm_resp resp;
2523 struct qseecom_registered_app_list *ptr_app = NULL;
2524 bool unload = false;
2525 bool found_app = false;
2526 bool found_dead_app = false;
2527
2528 if (!data) {
2529 pr_err("Invalid/uninitialized device handle\n");
2530 return -EINVAL;
2531 }
2532
2533 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2534 pr_debug("Do not unload keymaster app from tz\n");
2535 goto unload_exit;
2536 }
2537
2538 __qseecom_cleanup_app(data);
2539 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2540
2541 if (data->client.app_id > 0) {
2542 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2543 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2544 list) {
2545 if (ptr_app->app_id == data->client.app_id) {
2546 if (!strcmp((void *)ptr_app->app_name,
2547 (void *)data->client.app_name)) {
2548 found_app = true;
2549 if (app_crash || ptr_app->ref_cnt == 1)
2550 unload = true;
2551 break;
2552 }
2553 found_dead_app = true;
2554 break;
2555 }
2556 }
2557 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2558 flags);
2559 if (found_app == false && found_dead_app == false) {
2560 pr_err("Cannot find app with id = %d (%s)\n",
2561 data->client.app_id,
2562 (char *)data->client.app_name);
2563 ret = -EINVAL;
2564 goto unload_exit;
2565 }
2566 }
2567
2568 if (found_dead_app)
2569 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2570 (char *)data->client.app_name);
2571
2572 if (unload) {
2573 struct qseecom_unload_app_ireq req;
2574 /* Populate the structure for sending scm call to load image */
2575 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2576 req.app_id = data->client.app_id;
2577
2578 /* SCM_CALL to unload the app */
2579 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2580 sizeof(struct qseecom_unload_app_ireq),
2581 &resp, sizeof(resp));
2582 if (ret) {
2583 pr_err("scm_call to unload app (id = %d) failed\n",
2584 req.app_id);
2585 ret = -EFAULT;
2586 goto unload_exit;
2587 } else {
2588 pr_warn("App id %d now unloaded\n", req.app_id);
2589 }
2590 if (resp.result == QSEOS_RESULT_FAILURE) {
2591 pr_err("app (%d) unload_failed!!\n",
2592 data->client.app_id);
2593 ret = -EFAULT;
2594 goto unload_exit;
2595 }
2596 if (resp.result == QSEOS_RESULT_SUCCESS)
2597 pr_debug("App (%d) is unloaded!!\n",
2598 data->client.app_id);
2599 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2600 ret = __qseecom_process_incomplete_cmd(data, &resp);
2601 if (ret) {
2602 pr_err("process_incomplete_cmd fail err: %d\n",
2603 ret);
2604 goto unload_exit;
2605 }
2606 }
2607 }
2608
2609 if (found_app) {
2610 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2611 if (app_crash) {
2612 ptr_app->ref_cnt = 0;
2613 pr_debug("app_crash: ref_count = 0\n");
2614 } else {
2615 if (ptr_app->ref_cnt == 1) {
2616 ptr_app->ref_cnt = 0;
2617 pr_debug("ref_count set to 0\n");
2618 } else {
2619 ptr_app->ref_cnt--;
2620 pr_debug("Can't unload app(%d) inuse\n",
2621 ptr_app->app_id);
2622 }
2623 }
2624 if (unload) {
2625 list_del(&ptr_app->list);
2626 kzfree(ptr_app);
2627 }
2628 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2629 flags1);
2630 }
2631unload_exit:
2632 qseecom_unmap_ion_allocated_memory(data);
2633 data->released = true;
2634 return ret;
2635}
2636
2637static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2638 unsigned long virt)
2639{
2640 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2641}
2642
2643static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2644 unsigned long virt)
2645{
2646 return (uintptr_t)data->client.sb_virt +
2647 (virt - data->client.user_virt_sb_base);
2648}
2649
2650int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2651 struct qseecom_send_svc_cmd_req *req_ptr,
2652 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2653{
2654 int ret = 0;
2655 void *req_buf = NULL;
2656
2657 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2658 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2659 req_ptr, send_svc_ireq_ptr);
2660 return -EINVAL;
2661 }
2662
2663 /* Clients need to ensure req_buf is at base offset of shared buffer */
2664 if ((uintptr_t)req_ptr->cmd_req_buf !=
2665 data_ptr->client.user_virt_sb_base) {
2666 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2667 return -EINVAL;
2668 }
2669
2670 if (data_ptr->client.sb_length <
2671 sizeof(struct qseecom_rpmb_provision_key)) {
2672 pr_err("shared buffer is too small to hold key type\n");
2673 return -EINVAL;
2674 }
2675 req_buf = data_ptr->client.sb_virt;
2676
2677 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2678 send_svc_ireq_ptr->key_type =
2679 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2680 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2681 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2682 data_ptr, (uintptr_t)req_ptr->resp_buf));
2683 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2684
2685 return ret;
2686}
2687
2688int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2689 struct qseecom_send_svc_cmd_req *req_ptr,
2690 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2691{
2692 int ret = 0;
2693 uint32_t reqd_len_sb_in = 0;
2694
2695 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2696 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2697 req_ptr, send_svc_ireq_ptr);
2698 return -EINVAL;
2699 }
2700
2701 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2702 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2703 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2704 pr_err("Required: %u, Available: %zu\n",
2705 reqd_len_sb_in, data_ptr->client.sb_length);
2706 return -ENOMEM;
2707 }
2708
2709 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2710 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2711 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2712 data_ptr, (uintptr_t)req_ptr->resp_buf));
2713 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2714
2715 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2716 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2717
2718
2719 return ret;
2720}
2721
2722static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2723 struct qseecom_send_svc_cmd_req *req)
2724{
2725 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2726 pr_err("req or cmd buffer or response buffer is null\n");
2727 return -EINVAL;
2728 }
2729
2730 if (!data || !data->client.ihandle) {
2731 pr_err("Client or client handle is not initialized\n");
2732 return -EINVAL;
2733 }
2734
2735 if (data->client.sb_virt == NULL) {
2736 pr_err("sb_virt null\n");
2737 return -EINVAL;
2738 }
2739
2740 if (data->client.user_virt_sb_base == 0) {
2741 pr_err("user_virt_sb_base is null\n");
2742 return -EINVAL;
2743 }
2744
2745 if (data->client.sb_length == 0) {
2746 pr_err("sb_length is 0\n");
2747 return -EINVAL;
2748 }
2749
2750 if (((uintptr_t)req->cmd_req_buf <
2751 data->client.user_virt_sb_base) ||
2752 ((uintptr_t)req->cmd_req_buf >=
2753 (data->client.user_virt_sb_base + data->client.sb_length))) {
2754 pr_err("cmd buffer address not within shared bufffer\n");
2755 return -EINVAL;
2756 }
2757 if (((uintptr_t)req->resp_buf <
2758 data->client.user_virt_sb_base) ||
2759 ((uintptr_t)req->resp_buf >=
2760 (data->client.user_virt_sb_base + data->client.sb_length))) {
2761 pr_err("response buffer address not within shared bufffer\n");
2762 return -EINVAL;
2763 }
2764 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2765 (req->cmd_req_len > data->client.sb_length) ||
2766 (req->resp_len > data->client.sb_length)) {
2767 pr_err("cmd buf length or response buf length not valid\n");
2768 return -EINVAL;
2769 }
2770 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2771 pr_err("Integer overflow detected in req_len & rsp_len\n");
2772 return -EINVAL;
2773 }
2774
2775 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2776 pr_debug("Not enough memory to fit cmd_buf.\n");
2777 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2778 (req->cmd_req_len + req->resp_len),
2779 data->client.sb_length);
2780 return -ENOMEM;
2781 }
2782 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2783 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2784 return -EINVAL;
2785 }
2786 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2787 pr_err("Integer overflow in resp_len & resp_buf\n");
2788 return -EINVAL;
2789 }
2790 if (data->client.user_virt_sb_base >
2791 (ULONG_MAX - data->client.sb_length)) {
2792 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2793 return -EINVAL;
2794 }
2795 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2796 ((uintptr_t)data->client.user_virt_sb_base +
2797 data->client.sb_length)) ||
2798 (((uintptr_t)req->resp_buf + req->resp_len) >
2799 ((uintptr_t)data->client.user_virt_sb_base +
2800 data->client.sb_length))) {
2801 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2802 return -EINVAL;
2803 }
2804 return 0;
2805}
2806
2807static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2808 void __user *argp)
2809{
2810 int ret = 0;
2811 struct qseecom_client_send_service_ireq send_svc_ireq;
2812 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2813 struct qseecom_command_scm_resp resp;
2814 struct qseecom_send_svc_cmd_req req;
2815 void *send_req_ptr;
2816 size_t req_buf_size;
2817
2818 /*struct qseecom_command_scm_resp resp;*/
2819
2820 if (copy_from_user(&req,
2821 (void __user *)argp,
2822 sizeof(req))) {
2823 pr_err("copy_from_user failed\n");
2824 return -EFAULT;
2825 }
2826
2827 if (__validate_send_service_cmd_inputs(data, &req))
2828 return -EINVAL;
2829
2830 data->type = QSEECOM_SECURE_SERVICE;
2831
2832 switch (req.cmd_id) {
2833 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2834 case QSEOS_RPMB_ERASE_COMMAND:
2835 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2836 send_req_ptr = &send_svc_ireq;
2837 req_buf_size = sizeof(send_svc_ireq);
2838 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2839 send_req_ptr))
2840 return -EINVAL;
2841 break;
2842 case QSEOS_FSM_LTEOTA_REQ_CMD:
2843 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2844 case QSEOS_FSM_IKE_REQ_CMD:
2845 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2846 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2847 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2848 case QSEOS_FSM_ENCFS_REQ_CMD:
2849 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2850 send_req_ptr = &send_fsm_key_svc_ireq;
2851 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2852 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2853 send_req_ptr))
2854 return -EINVAL;
2855 break;
2856 default:
2857 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2858 return -EINVAL;
2859 }
2860
2861 if (qseecom.support_bus_scaling) {
2862 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2863 if (ret) {
2864 pr_err("Fail to set bw HIGH\n");
2865 return ret;
2866 }
2867 } else {
2868 ret = qseecom_perf_enable(data);
2869 if (ret) {
2870 pr_err("Failed to vote for clocks with err %d\n", ret);
2871 goto exit;
2872 }
2873 }
2874
2875 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2876 data->client.sb_virt, data->client.sb_length,
2877 ION_IOC_CLEAN_INV_CACHES);
2878 if (ret) {
2879 pr_err("cache operation failed %d\n", ret);
2880 goto exit;
2881 }
2882 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2883 (const void *)send_req_ptr,
2884 req_buf_size, &resp, sizeof(resp));
2885 if (ret) {
2886 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2887 if (!qseecom.support_bus_scaling) {
2888 qsee_disable_clock_vote(data, CLK_DFAB);
2889 qsee_disable_clock_vote(data, CLK_SFPB);
2890 } else {
2891 __qseecom_add_bw_scale_down_timer(
2892 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2893 }
2894 goto exit;
2895 }
2896 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2897 data->client.sb_virt, data->client.sb_length,
2898 ION_IOC_INV_CACHES);
2899 if (ret) {
2900 pr_err("cache operation failed %d\n", ret);
2901 goto exit;
2902 }
2903 switch (resp.result) {
2904 case QSEOS_RESULT_SUCCESS:
2905 break;
2906 case QSEOS_RESULT_INCOMPLETE:
2907 pr_debug("qseos_result_incomplete\n");
2908 ret = __qseecom_process_incomplete_cmd(data, &resp);
2909 if (ret) {
2910 pr_err("process_incomplete_cmd fail with result: %d\n",
2911 resp.result);
2912 }
2913 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2914 pr_warn("RPMB key status is 0x%x\n", resp.result);
2915 *(uint32_t *)req.resp_buf = resp.result;
2916 ret = 0;
2917 }
2918 break;
2919 case QSEOS_RESULT_FAILURE:
2920 pr_err("scm call failed with resp.result: %d\n", resp.result);
2921 ret = -EINVAL;
2922 break;
2923 default:
2924 pr_err("Response result %d not supported\n",
2925 resp.result);
2926 ret = -EINVAL;
2927 break;
2928 }
2929 if (!qseecom.support_bus_scaling) {
2930 qsee_disable_clock_vote(data, CLK_DFAB);
2931 qsee_disable_clock_vote(data, CLK_SFPB);
2932 } else {
2933 __qseecom_add_bw_scale_down_timer(
2934 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2935 }
2936
2937exit:
2938 return ret;
2939}
2940
2941static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
2942 struct qseecom_send_cmd_req *req)
2943
2944{
2945 if (!data || !data->client.ihandle) {
2946 pr_err("Client or client handle is not initialized\n");
2947 return -EINVAL;
2948 }
2949 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
2950 (req->cmd_req_buf == NULL)) {
2951 pr_err("cmd buffer or response buffer is null\n");
2952 return -EINVAL;
2953 }
2954 if (((uintptr_t)req->cmd_req_buf <
2955 data->client.user_virt_sb_base) ||
2956 ((uintptr_t)req->cmd_req_buf >=
2957 (data->client.user_virt_sb_base + data->client.sb_length))) {
2958 pr_err("cmd buffer address not within shared bufffer\n");
2959 return -EINVAL;
2960 }
2961 if (((uintptr_t)req->resp_buf <
2962 data->client.user_virt_sb_base) ||
2963 ((uintptr_t)req->resp_buf >=
2964 (data->client.user_virt_sb_base + data->client.sb_length))) {
2965 pr_err("response buffer address not within shared bufffer\n");
2966 return -EINVAL;
2967 }
2968 if ((req->cmd_req_len == 0) ||
2969 (req->cmd_req_len > data->client.sb_length) ||
2970 (req->resp_len > data->client.sb_length)) {
2971 pr_err("cmd buf length or response buf length not valid\n");
2972 return -EINVAL;
2973 }
2974 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2975 pr_err("Integer overflow detected in req_len & rsp_len\n");
2976 return -EINVAL;
2977 }
2978
2979 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2980 pr_debug("Not enough memory to fit cmd_buf.\n");
2981 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2982 (req->cmd_req_len + req->resp_len),
2983 data->client.sb_length);
2984 return -ENOMEM;
2985 }
2986 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2987 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2988 return -EINVAL;
2989 }
2990 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2991 pr_err("Integer overflow in resp_len & resp_buf\n");
2992 return -EINVAL;
2993 }
2994 if (data->client.user_virt_sb_base >
2995 (ULONG_MAX - data->client.sb_length)) {
2996 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2997 return -EINVAL;
2998 }
2999 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3000 ((uintptr_t)data->client.user_virt_sb_base +
3001 data->client.sb_length)) ||
3002 (((uintptr_t)req->resp_buf + req->resp_len) >
3003 ((uintptr_t)data->client.user_virt_sb_base +
3004 data->client.sb_length))) {
3005 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3006 return -EINVAL;
3007 }
3008 return 0;
3009}
3010
3011int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3012 struct qseecom_registered_app_list *ptr_app,
3013 struct qseecom_dev_handle *data)
3014{
3015 int ret = 0;
3016
3017 switch (resp->result) {
3018 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3019 pr_warn("App(%d) %s is blocked on listener %d\n",
3020 data->client.app_id, data->client.app_name,
3021 resp->data);
3022 ret = __qseecom_process_reentrancy_blocked_on_listener(
3023 resp, ptr_app, data);
3024 if (ret) {
3025 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3026 data->client.app_id, data->client.app_name, resp->data);
3027 return ret;
3028 }
3029
3030 case QSEOS_RESULT_INCOMPLETE:
3031 qseecom.app_block_ref_cnt++;
3032 ptr_app->app_blocked = true;
3033 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3034 ptr_app->app_blocked = false;
3035 qseecom.app_block_ref_cnt--;
3036 wake_up_interruptible(&qseecom.app_block_wq);
3037 if (ret)
3038 pr_err("process_incomplete_cmd failed err: %d\n",
3039 ret);
3040 return ret;
3041 case QSEOS_RESULT_SUCCESS:
3042 return ret;
3043 default:
3044 pr_err("Response result %d not supported\n",
3045 resp->result);
3046 return -EINVAL;
3047 }
3048}
3049
3050static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3051 struct qseecom_send_cmd_req *req)
3052{
3053 int ret = 0;
3054 u32 reqd_len_sb_in = 0;
3055 struct qseecom_client_send_data_ireq send_data_req = {0};
3056 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3057 struct qseecom_command_scm_resp resp;
3058 unsigned long flags;
3059 struct qseecom_registered_app_list *ptr_app;
3060 bool found_app = false;
3061 void *cmd_buf = NULL;
3062 size_t cmd_len;
3063 struct sglist_info *table = data->sglistinfo_ptr;
3064
3065 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3066 /* find app_id & img_name from list */
3067 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3068 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3069 list) {
3070 if ((ptr_app->app_id == data->client.app_id) &&
3071 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3072 found_app = true;
3073 break;
3074 }
3075 }
3076 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3077
3078 if (!found_app) {
3079 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3080 (char *)data->client.app_name);
3081 return -ENOENT;
3082 }
3083
3084 if (qseecom.qsee_version < QSEE_VERSION_40) {
3085 send_data_req.app_id = data->client.app_id;
3086 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3087 data, (uintptr_t)req->cmd_req_buf));
3088 send_data_req.req_len = req->cmd_req_len;
3089 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3090 data, (uintptr_t)req->resp_buf));
3091 send_data_req.rsp_len = req->resp_len;
3092 send_data_req.sglistinfo_ptr =
3093 (uint32_t)virt_to_phys(table);
3094 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3095 dmac_flush_range((void *)table,
3096 (void *)table + SGLISTINFO_TABLE_SIZE);
3097 cmd_buf = (void *)&send_data_req;
3098 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3099 } else {
3100 send_data_req_64bit.app_id = data->client.app_id;
3101 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3102 (uintptr_t)req->cmd_req_buf);
3103 send_data_req_64bit.req_len = req->cmd_req_len;
3104 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3105 (uintptr_t)req->resp_buf);
3106 send_data_req_64bit.rsp_len = req->resp_len;
3107 /* check if 32bit app's phys_addr region is under 4GB.*/
3108 if ((data->client.app_arch == ELFCLASS32) &&
3109 ((send_data_req_64bit.req_ptr >=
3110 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3111 (send_data_req_64bit.rsp_ptr >=
3112 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3113 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3114 data->client.app_name,
3115 send_data_req_64bit.req_ptr,
3116 send_data_req_64bit.req_len,
3117 send_data_req_64bit.rsp_ptr,
3118 send_data_req_64bit.rsp_len);
3119 return -EFAULT;
3120 }
3121 send_data_req_64bit.sglistinfo_ptr =
3122 (uint64_t)virt_to_phys(table);
3123 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3124 dmac_flush_range((void *)table,
3125 (void *)table + SGLISTINFO_TABLE_SIZE);
3126 cmd_buf = (void *)&send_data_req_64bit;
3127 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3128 }
3129
3130 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3131 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3132 else
3133 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3134
3135 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3136 data->client.sb_virt,
3137 reqd_len_sb_in,
3138 ION_IOC_CLEAN_INV_CACHES);
3139 if (ret) {
3140 pr_err("cache operation failed %d\n", ret);
3141 return ret;
3142 }
3143
3144 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3145
3146 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3147 cmd_buf, cmd_len,
3148 &resp, sizeof(resp));
3149 if (ret) {
3150 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3151 ret, data->client.app_id);
3152 return ret;
3153 }
3154
3155 if (qseecom.qsee_reentrancy_support) {
3156 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
3157 } else {
3158 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3159 ret = __qseecom_process_incomplete_cmd(data, &resp);
3160 if (ret) {
3161 pr_err("process_incomplete_cmd failed err: %d\n",
3162 ret);
3163 return ret;
3164 }
3165 } else {
3166 if (resp.result != QSEOS_RESULT_SUCCESS) {
3167 pr_err("Response result %d not supported\n",
3168 resp.result);
3169 ret = -EINVAL;
3170 }
3171 }
3172 }
3173 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3174 data->client.sb_virt, data->client.sb_length,
3175 ION_IOC_INV_CACHES);
3176 if (ret)
3177 pr_err("cache operation failed %d\n", ret);
3178 return ret;
3179}
3180
3181static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3182{
3183 int ret = 0;
3184 struct qseecom_send_cmd_req req;
3185
3186 ret = copy_from_user(&req, argp, sizeof(req));
3187 if (ret) {
3188 pr_err("copy_from_user failed\n");
3189 return ret;
3190 }
3191
3192 if (__validate_send_cmd_inputs(data, &req))
3193 return -EINVAL;
3194
3195 ret = __qseecom_send_cmd(data, &req);
3196
3197 if (ret)
3198 return ret;
3199
3200 return ret;
3201}
3202
3203int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3204 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3205 struct qseecom_dev_handle *data, int i) {
3206
3207 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3208 (req->ifd_data[i].fd > 0)) {
3209 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3210 (req->ifd_data[i].cmd_buf_offset >
3211 req->cmd_req_len - sizeof(uint32_t))) {
3212 pr_err("Invalid offset (req len) 0x%x\n",
3213 req->ifd_data[i].cmd_buf_offset);
3214 return -EINVAL;
3215 }
3216 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3217 (lstnr_resp->ifd_data[i].fd > 0)) {
3218 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3219 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3220 lstnr_resp->resp_len - sizeof(uint32_t))) {
3221 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3222 lstnr_resp->ifd_data[i].cmd_buf_offset);
3223 return -EINVAL;
3224 }
3225 }
3226 return 0;
3227}
3228
3229static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3230 struct qseecom_dev_handle *data)
3231{
3232 struct ion_handle *ihandle;
3233 char *field;
3234 int ret = 0;
3235 int i = 0;
3236 uint32_t len = 0;
3237 struct scatterlist *sg;
3238 struct qseecom_send_modfd_cmd_req *req = NULL;
3239 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3240 struct qseecom_registered_listener_list *this_lstnr = NULL;
3241 uint32_t offset;
3242 struct sg_table *sg_ptr;
3243
3244 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3245 (data->type != QSEECOM_CLIENT_APP))
3246 return -EFAULT;
3247
3248 if (msg == NULL) {
3249 pr_err("Invalid address\n");
3250 return -EINVAL;
3251 }
3252 if (data->type == QSEECOM_LISTENER_SERVICE) {
3253 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3254 this_lstnr = __qseecom_find_svc(data->listener.id);
3255 if (IS_ERR_OR_NULL(this_lstnr)) {
3256 pr_err("Invalid listener ID\n");
3257 return -ENOMEM;
3258 }
3259 } else {
3260 req = (struct qseecom_send_modfd_cmd_req *)msg;
3261 }
3262
3263 for (i = 0; i < MAX_ION_FD; i++) {
3264 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3265 (req->ifd_data[i].fd > 0)) {
3266 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
3267 req->ifd_data[i].fd);
3268 if (IS_ERR_OR_NULL(ihandle)) {
3269 pr_err("Ion client can't retrieve the handle\n");
3270 return -ENOMEM;
3271 }
3272 field = (char *) req->cmd_req_buf +
3273 req->ifd_data[i].cmd_buf_offset;
3274 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3275 (lstnr_resp->ifd_data[i].fd > 0)) {
3276 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
3277 lstnr_resp->ifd_data[i].fd);
3278 if (IS_ERR_OR_NULL(ihandle)) {
3279 pr_err("Ion client can't retrieve the handle\n");
3280 return -ENOMEM;
3281 }
3282 field = lstnr_resp->resp_buf_ptr +
3283 lstnr_resp->ifd_data[i].cmd_buf_offset;
3284 } else {
3285 continue;
3286 }
3287 /* Populate the cmd data structure with the phys_addr */
3288 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3289 if (IS_ERR_OR_NULL(sg_ptr)) {
3290 pr_err("IOn client could not retrieve sg table\n");
3291 goto err;
3292 }
3293 if (sg_ptr->nents == 0) {
3294 pr_err("Num of scattered entries is 0\n");
3295 goto err;
3296 }
3297 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3298 pr_err("Num of scattered entries");
3299 pr_err(" (%d) is greater than max supported %d\n",
3300 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3301 goto err;
3302 }
3303 sg = sg_ptr->sgl;
3304 if (sg_ptr->nents == 1) {
3305 uint32_t *update;
3306
3307 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3308 goto err;
3309 if ((data->type == QSEECOM_CLIENT_APP &&
3310 (data->client.app_arch == ELFCLASS32 ||
3311 data->client.app_arch == ELFCLASS64)) ||
3312 (data->type == QSEECOM_LISTENER_SERVICE)) {
3313 /*
3314 * Check if sg list phy add region is under 4GB
3315 */
3316 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3317 (!cleanup) &&
3318 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3319 >= PHY_ADDR_4G - sg->length)) {
3320 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3321 data->client.app_name,
3322 &(sg_dma_address(sg_ptr->sgl)),
3323 sg->length);
3324 goto err;
3325 }
3326 update = (uint32_t *) field;
3327 *update = cleanup ? 0 :
3328 (uint32_t)sg_dma_address(sg_ptr->sgl);
3329 } else {
3330 pr_err("QSEE app arch %u is not supported\n",
3331 data->client.app_arch);
3332 goto err;
3333 }
3334 len += (uint32_t)sg->length;
3335 } else {
3336 struct qseecom_sg_entry *update;
3337 int j = 0;
3338
3339 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3340 (req->ifd_data[i].fd > 0)) {
3341
3342 if ((req->cmd_req_len <
3343 SG_ENTRY_SZ * sg_ptr->nents) ||
3344 (req->ifd_data[i].cmd_buf_offset >
3345 (req->cmd_req_len -
3346 SG_ENTRY_SZ * sg_ptr->nents))) {
3347 pr_err("Invalid offset = 0x%x\n",
3348 req->ifd_data[i].cmd_buf_offset);
3349 goto err;
3350 }
3351
3352 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3353 (lstnr_resp->ifd_data[i].fd > 0)) {
3354
3355 if ((lstnr_resp->resp_len <
3356 SG_ENTRY_SZ * sg_ptr->nents) ||
3357 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3358 (lstnr_resp->resp_len -
3359 SG_ENTRY_SZ * sg_ptr->nents))) {
3360 goto err;
3361 }
3362 }
3363 if ((data->type == QSEECOM_CLIENT_APP &&
3364 (data->client.app_arch == ELFCLASS32 ||
3365 data->client.app_arch == ELFCLASS64)) ||
3366 (data->type == QSEECOM_LISTENER_SERVICE)) {
3367 update = (struct qseecom_sg_entry *)field;
3368 for (j = 0; j < sg_ptr->nents; j++) {
3369 /*
3370 * Check if sg list PA is under 4GB
3371 */
3372 if ((qseecom.qsee_version >=
3373 QSEE_VERSION_40) &&
3374 (!cleanup) &&
3375 ((uint64_t)(sg_dma_address(sg))
3376 >= PHY_ADDR_4G - sg->length)) {
3377 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3378 data->client.app_name,
3379 &(sg_dma_address(sg)),
3380 sg->length);
3381 goto err;
3382 }
3383 update->phys_addr = cleanup ? 0 :
3384 (uint32_t)sg_dma_address(sg);
3385 update->len = cleanup ? 0 : sg->length;
3386 update++;
3387 len += sg->length;
3388 sg = sg_next(sg);
3389 }
3390 } else {
3391 pr_err("QSEE app arch %u is not supported\n",
3392 data->client.app_arch);
3393 goto err;
3394 }
3395 }
3396
3397 if (cleanup) {
3398 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3399 ihandle, NULL, len,
3400 ION_IOC_INV_CACHES);
3401 if (ret) {
3402 pr_err("cache operation failed %d\n", ret);
3403 goto err;
3404 }
3405 } else {
3406 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3407 ihandle, NULL, len,
3408 ION_IOC_CLEAN_INV_CACHES);
3409 if (ret) {
3410 pr_err("cache operation failed %d\n", ret);
3411 goto err;
3412 }
3413 if (data->type == QSEECOM_CLIENT_APP) {
3414 offset = req->ifd_data[i].cmd_buf_offset;
3415 data->sglistinfo_ptr[i].indexAndFlags =
3416 SGLISTINFO_SET_INDEX_FLAG(
3417 (sg_ptr->nents == 1), 0, offset);
3418 data->sglistinfo_ptr[i].sizeOrCount =
3419 (sg_ptr->nents == 1) ?
3420 sg->length : sg_ptr->nents;
3421 data->sglist_cnt = i + 1;
3422 } else {
3423 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3424 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3425 (uintptr_t)this_lstnr->sb_virt);
3426 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3427 SGLISTINFO_SET_INDEX_FLAG(
3428 (sg_ptr->nents == 1), 0, offset);
3429 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3430 (sg_ptr->nents == 1) ?
3431 sg->length : sg_ptr->nents;
3432 this_lstnr->sglist_cnt = i + 1;
3433 }
3434 }
3435 /* Deallocate the handle */
3436 if (!IS_ERR_OR_NULL(ihandle))
3437 ion_free(qseecom.ion_clnt, ihandle);
3438 }
3439 return ret;
3440err:
3441 if (!IS_ERR_OR_NULL(ihandle))
3442 ion_free(qseecom.ion_clnt, ihandle);
3443 return -ENOMEM;
3444}
3445
3446static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3447 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3448{
3449 struct scatterlist *sg = sg_ptr->sgl;
3450 struct qseecom_sg_entry_64bit *sg_entry;
3451 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3452 void *buf;
3453 uint i;
3454 size_t size;
3455 dma_addr_t coh_pmem;
3456
3457 if (fd_idx >= MAX_ION_FD) {
3458 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3459 return -ENOMEM;
3460 }
3461 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3462 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3463 /* Allocate a contiguous kernel buffer */
3464 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3465 size = (size + PAGE_SIZE) & PAGE_MASK;
3466 buf = dma_alloc_coherent(qseecom.pdev,
3467 size, &coh_pmem, GFP_KERNEL);
3468 if (buf == NULL) {
3469 pr_err("failed to alloc memory for sg buf\n");
3470 return -ENOMEM;
3471 }
3472 /* update qseecom_sg_list_buf_hdr_64bit */
3473 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3474 buf_hdr->new_buf_phys_addr = coh_pmem;
3475 buf_hdr->nents_total = sg_ptr->nents;
3476 /* save the left sg entries into new allocated buf */
3477 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3478 for (i = 0; i < sg_ptr->nents; i++) {
3479 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3480 sg_entry->len = sg->length;
3481 sg_entry++;
3482 sg = sg_next(sg);
3483 }
3484
3485 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3486 data->client.sec_buf_fd[fd_idx].vbase = buf;
3487 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3488 data->client.sec_buf_fd[fd_idx].size = size;
3489
3490 return 0;
3491}
3492
3493static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3494 struct qseecom_dev_handle *data)
3495{
3496 struct ion_handle *ihandle;
3497 char *field;
3498 int ret = 0;
3499 int i = 0;
3500 uint32_t len = 0;
3501 struct scatterlist *sg;
3502 struct qseecom_send_modfd_cmd_req *req = NULL;
3503 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3504 struct qseecom_registered_listener_list *this_lstnr = NULL;
3505 uint32_t offset;
3506 struct sg_table *sg_ptr;
3507
3508 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3509 (data->type != QSEECOM_CLIENT_APP))
3510 return -EFAULT;
3511
3512 if (msg == NULL) {
3513 pr_err("Invalid address\n");
3514 return -EINVAL;
3515 }
3516 if (data->type == QSEECOM_LISTENER_SERVICE) {
3517 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3518 this_lstnr = __qseecom_find_svc(data->listener.id);
3519 if (IS_ERR_OR_NULL(this_lstnr)) {
3520 pr_err("Invalid listener ID\n");
3521 return -ENOMEM;
3522 }
3523 } else {
3524 req = (struct qseecom_send_modfd_cmd_req *)msg;
3525 }
3526
3527 for (i = 0; i < MAX_ION_FD; i++) {
3528 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3529 (req->ifd_data[i].fd > 0)) {
3530 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
3531 req->ifd_data[i].fd);
3532 if (IS_ERR_OR_NULL(ihandle)) {
3533 pr_err("Ion client can't retrieve the handle\n");
3534 return -ENOMEM;
3535 }
3536 field = (char *) req->cmd_req_buf +
3537 req->ifd_data[i].cmd_buf_offset;
3538 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3539 (lstnr_resp->ifd_data[i].fd > 0)) {
3540 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
3541 lstnr_resp->ifd_data[i].fd);
3542 if (IS_ERR_OR_NULL(ihandle)) {
3543 pr_err("Ion client can't retrieve the handle\n");
3544 return -ENOMEM;
3545 }
3546 field = lstnr_resp->resp_buf_ptr +
3547 lstnr_resp->ifd_data[i].cmd_buf_offset;
3548 } else {
3549 continue;
3550 }
3551 /* Populate the cmd data structure with the phys_addr */
3552 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3553 if (IS_ERR_OR_NULL(sg_ptr)) {
3554 pr_err("IOn client could not retrieve sg table\n");
3555 goto err;
3556 }
3557 if (sg_ptr->nents == 0) {
3558 pr_err("Num of scattered entries is 0\n");
3559 goto err;
3560 }
3561 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3562 pr_warn("Num of scattered entries");
3563 pr_warn(" (%d) is greater than %d\n",
3564 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3565 if (cleanup) {
3566 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3567 data->client.sec_buf_fd[i].vbase)
3568 dma_free_coherent(qseecom.pdev,
3569 data->client.sec_buf_fd[i].size,
3570 data->client.sec_buf_fd[i].vbase,
3571 data->client.sec_buf_fd[i].pbase);
3572 } else {
3573 ret = __qseecom_allocate_sg_list_buffer(data,
3574 field, i, sg_ptr);
3575 if (ret) {
3576 pr_err("Failed to allocate sg list buffer\n");
3577 goto err;
3578 }
3579 }
3580 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3581 sg = sg_ptr->sgl;
3582 goto cleanup;
3583 }
3584 sg = sg_ptr->sgl;
3585 if (sg_ptr->nents == 1) {
3586 uint64_t *update_64bit;
3587
3588 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3589 goto err;
3590 /* 64bit app uses 64bit address */
3591 update_64bit = (uint64_t *) field;
3592 *update_64bit = cleanup ? 0 :
3593 (uint64_t)sg_dma_address(sg_ptr->sgl);
3594 len += (uint32_t)sg->length;
3595 } else {
3596 struct qseecom_sg_entry_64bit *update_64bit;
3597 int j = 0;
3598
3599 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3600 (req->ifd_data[i].fd > 0)) {
3601
3602 if ((req->cmd_req_len <
3603 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3604 (req->ifd_data[i].cmd_buf_offset >
3605 (req->cmd_req_len -
3606 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3607 pr_err("Invalid offset = 0x%x\n",
3608 req->ifd_data[i].cmd_buf_offset);
3609 goto err;
3610 }
3611
3612 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3613 (lstnr_resp->ifd_data[i].fd > 0)) {
3614
3615 if ((lstnr_resp->resp_len <
3616 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3617 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3618 (lstnr_resp->resp_len -
3619 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3620 goto err;
3621 }
3622 }
3623 /* 64bit app uses 64bit address */
3624 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3625 for (j = 0; j < sg_ptr->nents; j++) {
3626 update_64bit->phys_addr = cleanup ? 0 :
3627 (uint64_t)sg_dma_address(sg);
3628 update_64bit->len = cleanup ? 0 :
3629 (uint32_t)sg->length;
3630 update_64bit++;
3631 len += sg->length;
3632 sg = sg_next(sg);
3633 }
3634 }
3635cleanup:
3636 if (cleanup) {
3637 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3638 ihandle, NULL, len,
3639 ION_IOC_INV_CACHES);
3640 if (ret) {
3641 pr_err("cache operation failed %d\n", ret);
3642 goto err;
3643 }
3644 } else {
3645 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3646 ihandle, NULL, len,
3647 ION_IOC_CLEAN_INV_CACHES);
3648 if (ret) {
3649 pr_err("cache operation failed %d\n", ret);
3650 goto err;
3651 }
3652 if (data->type == QSEECOM_CLIENT_APP) {
3653 offset = req->ifd_data[i].cmd_buf_offset;
3654 data->sglistinfo_ptr[i].indexAndFlags =
3655 SGLISTINFO_SET_INDEX_FLAG(
3656 (sg_ptr->nents == 1), 1, offset);
3657 data->sglistinfo_ptr[i].sizeOrCount =
3658 (sg_ptr->nents == 1) ?
3659 sg->length : sg_ptr->nents;
3660 data->sglist_cnt = i + 1;
3661 } else {
3662 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3663 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3664 (uintptr_t)this_lstnr->sb_virt);
3665 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3666 SGLISTINFO_SET_INDEX_FLAG(
3667 (sg_ptr->nents == 1), 1, offset);
3668 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3669 (sg_ptr->nents == 1) ?
3670 sg->length : sg_ptr->nents;
3671 this_lstnr->sglist_cnt = i + 1;
3672 }
3673 }
3674 /* Deallocate the handle */
3675 if (!IS_ERR_OR_NULL(ihandle))
3676 ion_free(qseecom.ion_clnt, ihandle);
3677 }
3678 return ret;
3679err:
3680 for (i = 0; i < MAX_ION_FD; i++)
3681 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3682 data->client.sec_buf_fd[i].vbase)
3683 dma_free_coherent(qseecom.pdev,
3684 data->client.sec_buf_fd[i].size,
3685 data->client.sec_buf_fd[i].vbase,
3686 data->client.sec_buf_fd[i].pbase);
3687 if (!IS_ERR_OR_NULL(ihandle))
3688 ion_free(qseecom.ion_clnt, ihandle);
3689 return -ENOMEM;
3690}
3691
3692static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3693 void __user *argp,
3694 bool is_64bit_addr)
3695{
3696 int ret = 0;
3697 int i;
3698 struct qseecom_send_modfd_cmd_req req;
3699 struct qseecom_send_cmd_req send_cmd_req;
3700
3701 ret = copy_from_user(&req, argp, sizeof(req));
3702 if (ret) {
3703 pr_err("copy_from_user failed\n");
3704 return ret;
3705 }
3706
3707 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3708 send_cmd_req.cmd_req_len = req.cmd_req_len;
3709 send_cmd_req.resp_buf = req.resp_buf;
3710 send_cmd_req.resp_len = req.resp_len;
3711
3712 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3713 return -EINVAL;
3714
3715 /* validate offsets */
3716 for (i = 0; i < MAX_ION_FD; i++) {
3717 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3718 pr_err("Invalid offset %d = 0x%x\n",
3719 i, req.ifd_data[i].cmd_buf_offset);
3720 return -EINVAL;
3721 }
3722 }
3723 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3724 (uintptr_t)req.cmd_req_buf);
3725 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3726 (uintptr_t)req.resp_buf);
3727
3728 if (!is_64bit_addr) {
3729 ret = __qseecom_update_cmd_buf(&req, false, data);
3730 if (ret)
3731 return ret;
3732 ret = __qseecom_send_cmd(data, &send_cmd_req);
3733 if (ret)
3734 return ret;
3735 ret = __qseecom_update_cmd_buf(&req, true, data);
3736 if (ret)
3737 return ret;
3738 } else {
3739 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3740 if (ret)
3741 return ret;
3742 ret = __qseecom_send_cmd(data, &send_cmd_req);
3743 if (ret)
3744 return ret;
3745 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3746 if (ret)
3747 return ret;
3748 }
3749
3750 return ret;
3751}
3752
3753static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3754 void __user *argp)
3755{
3756 return __qseecom_send_modfd_cmd(data, argp, false);
3757}
3758
3759static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3760 void __user *argp)
3761{
3762 return __qseecom_send_modfd_cmd(data, argp, true);
3763}
3764
3765
3766
3767static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3768 struct qseecom_registered_listener_list *svc)
3769{
3770 int ret;
3771
3772 ret = (svc->rcv_req_flag != 0);
3773 return ret || data->abort;
3774}
3775
3776static int qseecom_receive_req(struct qseecom_dev_handle *data)
3777{
3778 int ret = 0;
3779 struct qseecom_registered_listener_list *this_lstnr;
3780
3781 this_lstnr = __qseecom_find_svc(data->listener.id);
3782 if (!this_lstnr) {
3783 pr_err("Invalid listener ID\n");
3784 return -ENODATA;
3785 }
3786
3787 while (1) {
3788 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3789 __qseecom_listener_has_rcvd_req(data,
3790 this_lstnr))) {
3791 pr_debug("Interrupted: exiting Listener Service = %d\n",
3792 (uint32_t)data->listener.id);
3793 /* woken up for different reason */
3794 return -ERESTARTSYS;
3795 }
3796
3797 if (data->abort) {
3798 pr_err("Aborting Listener Service = %d\n",
3799 (uint32_t)data->listener.id);
3800 return -ENODEV;
3801 }
3802 this_lstnr->rcv_req_flag = 0;
3803 break;
3804 }
3805 return ret;
3806}
3807
3808static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3809{
3810 unsigned char app_arch = 0;
3811 struct elf32_hdr *ehdr;
3812 struct elf64_hdr *ehdr64;
3813
3814 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3815
3816 switch (app_arch) {
3817 case ELFCLASS32: {
3818 ehdr = (struct elf32_hdr *)fw_entry->data;
3819 if (fw_entry->size < sizeof(*ehdr)) {
3820 pr_err("%s: Not big enough to be an elf32 header\n",
3821 qseecom.pdev->init_name);
3822 return false;
3823 }
3824 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3825 pr_err("%s: Not an elf32 header\n",
3826 qseecom.pdev->init_name);
3827 return false;
3828 }
3829 if (ehdr->e_phnum == 0) {
3830 pr_err("%s: No loadable segments\n",
3831 qseecom.pdev->init_name);
3832 return false;
3833 }
3834 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3835 sizeof(struct elf32_hdr) > fw_entry->size) {
3836 pr_err("%s: Program headers not within mdt\n",
3837 qseecom.pdev->init_name);
3838 return false;
3839 }
3840 break;
3841 }
3842 case ELFCLASS64: {
3843 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3844 if (fw_entry->size < sizeof(*ehdr64)) {
3845 pr_err("%s: Not big enough to be an elf64 header\n",
3846 qseecom.pdev->init_name);
3847 return false;
3848 }
3849 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3850 pr_err("%s: Not an elf64 header\n",
3851 qseecom.pdev->init_name);
3852 return false;
3853 }
3854 if (ehdr64->e_phnum == 0) {
3855 pr_err("%s: No loadable segments\n",
3856 qseecom.pdev->init_name);
3857 return false;
3858 }
3859 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3860 sizeof(struct elf64_hdr) > fw_entry->size) {
3861 pr_err("%s: Program headers not within mdt\n",
3862 qseecom.pdev->init_name);
3863 return false;
3864 }
3865 break;
3866 }
3867 default: {
3868 pr_err("QSEE app arch %u is not supported\n", app_arch);
3869 return false;
3870 }
3871 }
3872 return true;
3873}
3874
3875static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3876 uint32_t *app_arch)
3877{
3878 int ret = -1;
3879 int i = 0, rc = 0;
3880 const struct firmware *fw_entry = NULL;
3881 char fw_name[MAX_APP_NAME_SIZE];
3882 struct elf32_hdr *ehdr;
3883 struct elf64_hdr *ehdr64;
3884 int num_images = 0;
3885
3886 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3887 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3888 if (rc) {
3889 pr_err("error with request_firmware\n");
3890 ret = -EIO;
3891 goto err;
3892 }
3893 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3894 ret = -EIO;
3895 goto err;
3896 }
3897 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3898 *fw_size = fw_entry->size;
3899 if (*app_arch == ELFCLASS32) {
3900 ehdr = (struct elf32_hdr *)fw_entry->data;
3901 num_images = ehdr->e_phnum;
3902 } else if (*app_arch == ELFCLASS64) {
3903 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3904 num_images = ehdr64->e_phnum;
3905 } else {
3906 pr_err("QSEE %s app, arch %u is not supported\n",
3907 appname, *app_arch);
3908 ret = -EIO;
3909 goto err;
3910 }
3911 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3912 release_firmware(fw_entry);
3913 fw_entry = NULL;
3914 for (i = 0; i < num_images; i++) {
3915 memset(fw_name, 0, sizeof(fw_name));
3916 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3917 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3918 if (ret)
3919 goto err;
3920 if (*fw_size > U32_MAX - fw_entry->size) {
3921 pr_err("QSEE %s app file size overflow\n", appname);
3922 ret = -EINVAL;
3923 goto err;
3924 }
3925 *fw_size += fw_entry->size;
3926 release_firmware(fw_entry);
3927 fw_entry = NULL;
3928 }
3929
3930 return ret;
3931err:
3932 if (fw_entry)
3933 release_firmware(fw_entry);
3934 *fw_size = 0;
3935 return ret;
3936}
3937
3938static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
3939 uint32_t fw_size,
3940 struct qseecom_load_app_ireq *load_req)
3941{
3942 int ret = -1;
3943 int i = 0, rc = 0;
3944 const struct firmware *fw_entry = NULL;
3945 char fw_name[MAX_APP_NAME_SIZE];
3946 u8 *img_data_ptr = img_data;
3947 struct elf32_hdr *ehdr;
3948 struct elf64_hdr *ehdr64;
3949 int num_images = 0;
3950 unsigned char app_arch = 0;
3951
3952 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3953 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3954 if (rc) {
3955 ret = -EIO;
3956 goto err;
3957 }
3958
3959 load_req->img_len = fw_entry->size;
3960 if (load_req->img_len > fw_size) {
3961 pr_err("app %s size %zu is larger than buf size %u\n",
3962 appname, fw_entry->size, fw_size);
3963 ret = -EINVAL;
3964 goto err;
3965 }
3966 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
3967 img_data_ptr = img_data_ptr + fw_entry->size;
3968 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
3969
3970 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3971 if (app_arch == ELFCLASS32) {
3972 ehdr = (struct elf32_hdr *)fw_entry->data;
3973 num_images = ehdr->e_phnum;
3974 } else if (app_arch == ELFCLASS64) {
3975 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3976 num_images = ehdr64->e_phnum;
3977 } else {
3978 pr_err("QSEE %s app, arch %u is not supported\n",
3979 appname, app_arch);
3980 ret = -EIO;
3981 goto err;
3982 }
3983 release_firmware(fw_entry);
3984 fw_entry = NULL;
3985 for (i = 0; i < num_images; i++) {
3986 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3987 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3988 if (ret) {
3989 pr_err("Failed to locate blob %s\n", fw_name);
3990 goto err;
3991 }
3992 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
3993 (fw_entry->size + load_req->img_len > fw_size)) {
3994 pr_err("Invalid file size for %s\n", fw_name);
3995 ret = -EINVAL;
3996 goto err;
3997 }
3998 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
3999 img_data_ptr = img_data_ptr + fw_entry->size;
4000 load_req->img_len += fw_entry->size;
4001 release_firmware(fw_entry);
4002 fw_entry = NULL;
4003 }
4004 return ret;
4005err:
4006 release_firmware(fw_entry);
4007 return ret;
4008}
4009
4010static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4011 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4012{
4013 size_t len = 0;
4014 int ret = 0;
4015 ion_phys_addr_t pa;
4016 struct ion_handle *ihandle = NULL;
4017 u8 *img_data = NULL;
4018
4019 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4020 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4021
4022 if (IS_ERR_OR_NULL(ihandle)) {
4023 pr_err("ION alloc failed\n");
4024 return -ENOMEM;
4025 }
4026 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4027 ihandle);
4028
4029 if (IS_ERR_OR_NULL(img_data)) {
4030 pr_err("ION memory mapping for image loading failed\n");
4031 ret = -ENOMEM;
4032 goto exit_ion_free;
4033 }
4034 /* Get the physical address of the ION BUF */
4035 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4036 if (ret) {
4037 pr_err("physical memory retrieval failure\n");
4038 ret = -EIO;
4039 goto exit_ion_unmap_kernel;
4040 }
4041
4042 *pihandle = ihandle;
4043 *data = img_data;
4044 *paddr = pa;
4045 return ret;
4046
4047exit_ion_unmap_kernel:
4048 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4049exit_ion_free:
4050 ion_free(qseecom.ion_clnt, ihandle);
4051 ihandle = NULL;
4052 return ret;
4053}
4054
4055static void __qseecom_free_img_data(struct ion_handle **ihandle)
4056{
4057 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4058 ion_free(qseecom.ion_clnt, *ihandle);
4059 *ihandle = NULL;
4060}
4061
4062static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4063 uint32_t *app_id)
4064{
4065 int ret = -1;
4066 uint32_t fw_size = 0;
4067 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4068 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4069 struct qseecom_command_scm_resp resp;
4070 u8 *img_data = NULL;
4071 ion_phys_addr_t pa = 0;
4072 struct ion_handle *ihandle = NULL;
4073 void *cmd_buf = NULL;
4074 size_t cmd_len;
4075 uint32_t app_arch = 0;
4076
4077 if (!data || !appname || !app_id) {
4078 pr_err("Null pointer to data or appname or appid\n");
4079 return -EINVAL;
4080 }
4081 *app_id = 0;
4082 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4083 return -EIO;
4084 data->client.app_arch = app_arch;
4085
4086 /* Check and load cmnlib */
4087 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4088 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4089 ret = qseecom_load_commonlib_image(data, "cmnlib");
4090 if (ret) {
4091 pr_err("failed to load cmnlib\n");
4092 return -EIO;
4093 }
4094 qseecom.commonlib_loaded = true;
4095 pr_debug("cmnlib is loaded\n");
4096 }
4097
4098 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4099 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4100 if (ret) {
4101 pr_err("failed to load cmnlib64\n");
4102 return -EIO;
4103 }
4104 qseecom.commonlib64_loaded = true;
4105 pr_debug("cmnlib64 is loaded\n");
4106 }
4107 }
4108
4109 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4110 if (ret)
4111 return ret;
4112
4113 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4114 if (ret) {
4115 ret = -EIO;
4116 goto exit_free_img_data;
4117 }
4118
4119 /* Populate the load_req parameters */
4120 if (qseecom.qsee_version < QSEE_VERSION_40) {
4121 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4122 load_req.mdt_len = load_req.mdt_len;
4123 load_req.img_len = load_req.img_len;
4124 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4125 load_req.phy_addr = (uint32_t)pa;
4126 cmd_buf = (void *)&load_req;
4127 cmd_len = sizeof(struct qseecom_load_app_ireq);
4128 } else {
4129 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4130 load_req_64bit.mdt_len = load_req.mdt_len;
4131 load_req_64bit.img_len = load_req.img_len;
4132 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4133 load_req_64bit.phy_addr = (uint64_t)pa;
4134 cmd_buf = (void *)&load_req_64bit;
4135 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4136 }
4137
4138 if (qseecom.support_bus_scaling) {
4139 mutex_lock(&qsee_bw_mutex);
4140 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4141 mutex_unlock(&qsee_bw_mutex);
4142 if (ret) {
4143 ret = -EIO;
4144 goto exit_free_img_data;
4145 }
4146 }
4147
4148 ret = __qseecom_enable_clk_scale_up(data);
4149 if (ret) {
4150 ret = -EIO;
4151 goto exit_unregister_bus_bw_need;
4152 }
4153
4154 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4155 img_data, fw_size,
4156 ION_IOC_CLEAN_INV_CACHES);
4157 if (ret) {
4158 pr_err("cache operation failed %d\n", ret);
4159 goto exit_disable_clk_vote;
4160 }
4161
4162 /* SCM_CALL to load the image */
4163 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4164 &resp, sizeof(resp));
4165 if (ret) {
4166 pr_err("scm_call to load failed : ret %d\n", ret);
4167 ret = -EIO;
4168 goto exit_disable_clk_vote;
4169 }
4170
4171 switch (resp.result) {
4172 case QSEOS_RESULT_SUCCESS:
4173 *app_id = resp.data;
4174 break;
4175 case QSEOS_RESULT_INCOMPLETE:
4176 ret = __qseecom_process_incomplete_cmd(data, &resp);
4177 if (ret)
4178 pr_err("process_incomplete_cmd FAILED\n");
4179 else
4180 *app_id = resp.data;
4181 break;
4182 case QSEOS_RESULT_FAILURE:
4183 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4184 break;
4185 default:
4186 pr_err("scm call return unknown response %d\n", resp.result);
4187 ret = -EINVAL;
4188 break;
4189 }
4190
4191exit_disable_clk_vote:
4192 __qseecom_disable_clk_scale_down(data);
4193
4194exit_unregister_bus_bw_need:
4195 if (qseecom.support_bus_scaling) {
4196 mutex_lock(&qsee_bw_mutex);
4197 qseecom_unregister_bus_bandwidth_needs(data);
4198 mutex_unlock(&qsee_bw_mutex);
4199 }
4200
4201exit_free_img_data:
4202 __qseecom_free_img_data(&ihandle);
4203 return ret;
4204}
4205
4206static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4207 char *cmnlib_name)
4208{
4209 int ret = 0;
4210 uint32_t fw_size = 0;
4211 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4212 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4213 struct qseecom_command_scm_resp resp;
4214 u8 *img_data = NULL;
4215 ion_phys_addr_t pa = 0;
4216 void *cmd_buf = NULL;
4217 size_t cmd_len;
4218 uint32_t app_arch = 0;
4219
4220 if (!cmnlib_name) {
4221 pr_err("cmnlib_name is NULL\n");
4222 return -EINVAL;
4223 }
4224 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4225 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4226 cmnlib_name, strlen(cmnlib_name));
4227 return -EINVAL;
4228 }
4229
4230 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4231 return -EIO;
4232
4233 ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
4234 &img_data, fw_size, &pa);
4235 if (ret)
4236 return -EIO;
4237
4238 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4239 if (ret) {
4240 ret = -EIO;
4241 goto exit_free_img_data;
4242 }
4243 if (qseecom.qsee_version < QSEE_VERSION_40) {
4244 load_req.phy_addr = (uint32_t)pa;
4245 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4246 cmd_buf = (void *)&load_req;
4247 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4248 } else {
4249 load_req_64bit.phy_addr = (uint64_t)pa;
4250 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4251 load_req_64bit.img_len = load_req.img_len;
4252 load_req_64bit.mdt_len = load_req.mdt_len;
4253 cmd_buf = (void *)&load_req_64bit;
4254 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4255 }
4256
4257 if (qseecom.support_bus_scaling) {
4258 mutex_lock(&qsee_bw_mutex);
4259 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4260 mutex_unlock(&qsee_bw_mutex);
4261 if (ret) {
4262 ret = -EIO;
4263 goto exit_free_img_data;
4264 }
4265 }
4266
4267 /* Vote for the SFPB clock */
4268 ret = __qseecom_enable_clk_scale_up(data);
4269 if (ret) {
4270 ret = -EIO;
4271 goto exit_unregister_bus_bw_need;
4272 }
4273
4274 ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
4275 img_data, fw_size,
4276 ION_IOC_CLEAN_INV_CACHES);
4277 if (ret) {
4278 pr_err("cache operation failed %d\n", ret);
4279 goto exit_disable_clk_vote;
4280 }
4281
4282 /* SCM_CALL to load the image */
4283 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4284 &resp, sizeof(resp));
4285 if (ret) {
4286 pr_err("scm_call to load failed : ret %d\n", ret);
4287 ret = -EIO;
4288 goto exit_disable_clk_vote;
4289 }
4290
4291 switch (resp.result) {
4292 case QSEOS_RESULT_SUCCESS:
4293 break;
4294 case QSEOS_RESULT_FAILURE:
4295 pr_err("scm call failed w/response result%d\n", resp.result);
4296 ret = -EINVAL;
4297 goto exit_disable_clk_vote;
4298 case QSEOS_RESULT_INCOMPLETE:
4299 ret = __qseecom_process_incomplete_cmd(data, &resp);
4300 if (ret) {
4301 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4302 goto exit_disable_clk_vote;
4303 }
4304 break;
4305 default:
4306 pr_err("scm call return unknown response %d\n", resp.result);
4307 ret = -EINVAL;
4308 goto exit_disable_clk_vote;
4309 }
4310
4311exit_disable_clk_vote:
4312 __qseecom_disable_clk_scale_down(data);
4313
4314exit_unregister_bus_bw_need:
4315 if (qseecom.support_bus_scaling) {
4316 mutex_lock(&qsee_bw_mutex);
4317 qseecom_unregister_bus_bandwidth_needs(data);
4318 mutex_unlock(&qsee_bw_mutex);
4319 }
4320
4321exit_free_img_data:
4322 __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
4323 return ret;
4324}
4325
4326static int qseecom_unload_commonlib_image(void)
4327{
4328 int ret = -EINVAL;
4329 struct qseecom_unload_lib_image_ireq unload_req = {0};
4330 struct qseecom_command_scm_resp resp;
4331
4332 /* Populate the remaining parameters */
4333 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4334
4335 /* SCM_CALL to load the image */
4336 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4337 sizeof(struct qseecom_unload_lib_image_ireq),
4338 &resp, sizeof(resp));
4339 if (ret) {
4340 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4341 ret = -EIO;
4342 } else {
4343 switch (resp.result) {
4344 case QSEOS_RESULT_SUCCESS:
4345 break;
4346 case QSEOS_RESULT_FAILURE:
4347 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4348 break;
4349 default:
4350 pr_err("scm call return unknown response %d\n",
4351 resp.result);
4352 ret = -EINVAL;
4353 break;
4354 }
4355 }
4356
4357 return ret;
4358}
4359
4360int qseecom_start_app(struct qseecom_handle **handle,
4361 char *app_name, uint32_t size)
4362{
4363 int32_t ret = 0;
4364 unsigned long flags = 0;
4365 struct qseecom_dev_handle *data = NULL;
4366 struct qseecom_check_app_ireq app_ireq;
4367 struct qseecom_registered_app_list *entry = NULL;
4368 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4369 bool found_app = false;
4370 size_t len;
4371 ion_phys_addr_t pa;
4372 uint32_t fw_size, app_arch;
4373 uint32_t app_id = 0;
4374
4375 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4376 pr_err("Not allowed to be called in %d state\n",
4377 atomic_read(&qseecom.qseecom_state));
4378 return -EPERM;
4379 }
4380 if (!app_name) {
4381 pr_err("failed to get the app name\n");
4382 return -EINVAL;
4383 }
4384
4385 if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
4386 pr_err("The app_name (%s) with length %zu is not valid\n",
4387 app_name, strlen(app_name));
4388 return -EINVAL;
4389 }
4390
4391 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4392 if (!(*handle))
4393 return -ENOMEM;
4394
4395 data = kzalloc(sizeof(*data), GFP_KERNEL);
4396 if (!data) {
4397 if (ret == 0) {
4398 kfree(*handle);
4399 *handle = NULL;
4400 }
4401 return -ENOMEM;
4402 }
4403 data->abort = 0;
4404 data->type = QSEECOM_CLIENT_APP;
4405 data->released = false;
4406 data->client.sb_length = size;
4407 data->client.user_virt_sb_base = 0;
4408 data->client.ihandle = NULL;
4409
4410 init_waitqueue_head(&data->abort_wq);
4411
4412 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4413 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4414 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4415 pr_err("Ion client could not retrieve the handle\n");
4416 kfree(data);
4417 kfree(*handle);
4418 *handle = NULL;
4419 return -EINVAL;
4420 }
4421 mutex_lock(&app_access_lock);
4422
4423 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4424 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4425 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4426 if (ret)
4427 goto err;
4428
4429 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4430 if (app_id) {
4431 pr_warn("App id %d for [%s] app exists\n", app_id,
4432 (char *)app_ireq.app_name);
4433 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4434 list_for_each_entry(entry,
4435 &qseecom.registered_app_list_head, list){
4436 if (entry->app_id == app_id) {
4437 entry->ref_cnt++;
4438 found_app = true;
4439 break;
4440 }
4441 }
4442 spin_unlock_irqrestore(
4443 &qseecom.registered_app_list_lock, flags);
4444 if (!found_app)
4445 pr_warn("App_id %d [%s] was loaded but not registered\n",
4446 ret, (char *)app_ireq.app_name);
4447 } else {
4448 /* load the app and get the app_id */
4449 pr_debug("%s: Loading app for the first time'\n",
4450 qseecom.pdev->init_name);
4451 ret = __qseecom_load_fw(data, app_name, &app_id);
4452 if (ret < 0)
4453 goto err;
4454 }
4455 data->client.app_id = app_id;
4456 if (!found_app) {
4457 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4458 if (!entry) {
4459 pr_err("kmalloc for app entry failed\n");
4460 ret = -ENOMEM;
4461 goto err;
4462 }
4463 entry->app_id = app_id;
4464 entry->ref_cnt = 1;
4465 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4466 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4467 ret = -EIO;
4468 kfree(entry);
4469 goto err;
4470 }
4471 entry->app_arch = app_arch;
4472 entry->app_blocked = false;
4473 entry->blocked_on_listener_id = 0;
4474 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4475 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4476 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4477 flags);
4478 }
4479
4480 /* Get the physical address of the ION BUF */
4481 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4482 if (ret) {
4483 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4484 ret);
4485 goto err;
4486 }
4487
4488 /* Populate the structure for sending scm call to load image */
4489 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4490 data->client.ihandle);
4491 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4492 pr_err("ION memory mapping for client shared buf failed\n");
4493 ret = -ENOMEM;
4494 goto err;
4495 }
4496 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4497 data->client.sb_phys = (phys_addr_t)pa;
4498 (*handle)->dev = (void *)data;
4499 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4500 (*handle)->sbuf_len = data->client.sb_length;
4501
4502 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4503 if (!kclient_entry) {
4504 ret = -ENOMEM;
4505 goto err;
4506 }
4507 kclient_entry->handle = *handle;
4508
4509 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4510 list_add_tail(&kclient_entry->list,
4511 &qseecom.registered_kclient_list_head);
4512 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4513
4514 mutex_unlock(&app_access_lock);
4515 return 0;
4516
4517err:
4518 kfree(data);
4519 kfree(*handle);
4520 *handle = NULL;
4521 mutex_unlock(&app_access_lock);
4522 return ret;
4523}
4524EXPORT_SYMBOL(qseecom_start_app);
4525
4526int qseecom_shutdown_app(struct qseecom_handle **handle)
4527{
4528 int ret = -EINVAL;
4529 struct qseecom_dev_handle *data;
4530
4531 struct qseecom_registered_kclient_list *kclient = NULL;
4532 unsigned long flags = 0;
4533 bool found_handle = false;
4534
4535 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4536 pr_err("Not allowed to be called in %d state\n",
4537 atomic_read(&qseecom.qseecom_state));
4538 return -EPERM;
4539 }
4540
4541 if ((handle == NULL) || (*handle == NULL)) {
4542 pr_err("Handle is not initialized\n");
4543 return -EINVAL;
4544 }
4545 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4546 mutex_lock(&app_access_lock);
4547
4548 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4549 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4550 list) {
4551 if (kclient->handle == (*handle)) {
4552 list_del(&kclient->list);
4553 found_handle = true;
4554 break;
4555 }
4556 }
4557 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4558 if (!found_handle)
4559 pr_err("Unable to find the handle, exiting\n");
4560 else
4561 ret = qseecom_unload_app(data, false);
4562
4563 mutex_unlock(&app_access_lock);
4564 if (ret == 0) {
4565 kzfree(data);
4566 kzfree(*handle);
4567 kzfree(kclient);
4568 *handle = NULL;
4569 }
4570
4571 return ret;
4572}
4573EXPORT_SYMBOL(qseecom_shutdown_app);
4574
4575int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4576 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4577{
4578 int ret = 0;
4579 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4580 struct qseecom_dev_handle *data;
4581 bool perf_enabled = false;
4582
4583 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4584 pr_err("Not allowed to be called in %d state\n",
4585 atomic_read(&qseecom.qseecom_state));
4586 return -EPERM;
4587 }
4588
4589 if (handle == NULL) {
4590 pr_err("Handle is not initialized\n");
4591 return -EINVAL;
4592 }
4593 data = handle->dev;
4594
4595 req.cmd_req_len = sbuf_len;
4596 req.resp_len = rbuf_len;
4597 req.cmd_req_buf = send_buf;
4598 req.resp_buf = resp_buf;
4599
4600 if (__validate_send_cmd_inputs(data, &req))
4601 return -EINVAL;
4602
4603 mutex_lock(&app_access_lock);
4604 if (qseecom.support_bus_scaling) {
4605 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4606 if (ret) {
4607 pr_err("Failed to set bw.\n");
4608 mutex_unlock(&app_access_lock);
4609 return ret;
4610 }
4611 }
4612 /*
4613 * On targets where crypto clock is handled by HLOS,
4614 * if clk_access_cnt is zero and perf_enabled is false,
4615 * then the crypto clock was not enabled before sending cmd
4616 * to tz, qseecom will enable the clock to avoid service failure.
4617 */
4618 if (!qseecom.no_clock_support &&
4619 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4620 pr_debug("ce clock is not enabled!\n");
4621 ret = qseecom_perf_enable(data);
4622 if (ret) {
4623 pr_err("Failed to vote for clock with err %d\n",
4624 ret);
4625 mutex_unlock(&app_access_lock);
4626 return -EINVAL;
4627 }
4628 perf_enabled = true;
4629 }
4630 if (!strcmp(data->client.app_name, "securemm"))
4631 data->use_legacy_cmd = true;
4632
4633 ret = __qseecom_send_cmd(data, &req);
4634 data->use_legacy_cmd = false;
4635 if (qseecom.support_bus_scaling)
4636 __qseecom_add_bw_scale_down_timer(
4637 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4638
4639 if (perf_enabled) {
4640 qsee_disable_clock_vote(data, CLK_DFAB);
4641 qsee_disable_clock_vote(data, CLK_SFPB);
4642 }
4643
4644 mutex_unlock(&app_access_lock);
4645
4646 if (ret)
4647 return ret;
4648
4649 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4650 req.resp_len, req.resp_buf);
4651 return ret;
4652}
4653EXPORT_SYMBOL(qseecom_send_command);
4654
4655int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4656{
4657 int ret = 0;
4658
4659 if ((handle == NULL) || (handle->dev == NULL)) {
4660 pr_err("No valid kernel client\n");
4661 return -EINVAL;
4662 }
4663 if (high) {
4664 if (qseecom.support_bus_scaling) {
4665 mutex_lock(&qsee_bw_mutex);
4666 __qseecom_register_bus_bandwidth_needs(handle->dev,
4667 HIGH);
4668 mutex_unlock(&qsee_bw_mutex);
4669 } else {
4670 ret = qseecom_perf_enable(handle->dev);
4671 if (ret)
4672 pr_err("Failed to vote for clock with err %d\n",
4673 ret);
4674 }
4675 } else {
4676 if (!qseecom.support_bus_scaling) {
4677 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4678 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4679 } else {
4680 mutex_lock(&qsee_bw_mutex);
4681 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4682 mutex_unlock(&qsee_bw_mutex);
4683 }
4684 }
4685 return ret;
4686}
4687EXPORT_SYMBOL(qseecom_set_bandwidth);
4688
4689int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4690{
4691 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4692 struct qseecom_dev_handle dummy_private_data = {0};
4693 struct qseecom_command_scm_resp resp;
4694 int ret = 0;
4695
4696 if (!desc) {
4697 pr_err("desc is NULL\n");
4698 return -EINVAL;
4699 }
4700
4701 resp.result = desc->ret[0]; /*req_cmd*/
4702 resp.resp_type = desc->ret[1]; /*app_id*/
4703 resp.data = desc->ret[2]; /*listener_id*/
4704
4705 dummy_private_data.client.app_id = desc->ret[1];
4706 dummy_app_entry.app_id = desc->ret[1];
4707
4708 mutex_lock(&app_access_lock);
4709 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
4710 &dummy_private_data);
4711 mutex_unlock(&app_access_lock);
4712 if (ret)
4713 pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
4714 (int)desc->ret[0], (int)desc->ret[2],
4715 (int)desc->ret[1], ret);
4716 desc->ret[0] = resp.result;
4717 desc->ret[1] = resp.resp_type;
4718 desc->ret[2] = resp.data;
4719 return ret;
4720}
4721EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4722
4723static int qseecom_send_resp(void)
4724{
4725 qseecom.send_resp_flag = 1;
4726 wake_up_interruptible(&qseecom.send_resp_wq);
4727 return 0;
4728}
4729
4730static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4731{
4732 struct qseecom_registered_listener_list *this_lstnr = NULL;
4733
4734 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4735 this_lstnr = __qseecom_find_svc(data->listener.id);
4736 if (this_lstnr == NULL)
4737 return -EINVAL;
4738 qseecom.send_resp_flag = 1;
4739 this_lstnr->send_resp_flag = 1;
4740 wake_up_interruptible(&qseecom.send_resp_wq);
4741 return 0;
4742}
4743
4744static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4745 struct qseecom_send_modfd_listener_resp *resp,
4746 struct qseecom_registered_listener_list *this_lstnr)
4747{
4748 int i;
4749
4750 if (!data || !resp || !this_lstnr) {
4751 pr_err("listener handle or resp msg is null\n");
4752 return -EINVAL;
4753 }
4754
4755 if (resp->resp_buf_ptr == NULL) {
4756 pr_err("resp buffer is null\n");
4757 return -EINVAL;
4758 }
4759 /* validate resp buf length */
4760 if ((resp->resp_len == 0) ||
4761 (resp->resp_len > this_lstnr->sb_length)) {
4762 pr_err("resp buf length %d not valid\n", resp->resp_len);
4763 return -EINVAL;
4764 }
4765
4766 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4767 pr_err("Integer overflow in resp_len & resp_buf\n");
4768 return -EINVAL;
4769 }
4770 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4771 (ULONG_MAX - this_lstnr->sb_length)) {
4772 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4773 return -EINVAL;
4774 }
4775 /* validate resp buf */
4776 if (((uintptr_t)resp->resp_buf_ptr <
4777 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4778 ((uintptr_t)resp->resp_buf_ptr >=
4779 ((uintptr_t)this_lstnr->user_virt_sb_base +
4780 this_lstnr->sb_length)) ||
4781 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4782 ((uintptr_t)this_lstnr->user_virt_sb_base +
4783 this_lstnr->sb_length))) {
4784 pr_err("resp buf is out of shared buffer region\n");
4785 return -EINVAL;
4786 }
4787
4788 /* validate offsets */
4789 for (i = 0; i < MAX_ION_FD; i++) {
4790 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4791 pr_err("Invalid offset %d = 0x%x\n",
4792 i, resp->ifd_data[i].cmd_buf_offset);
4793 return -EINVAL;
4794 }
4795 }
4796
4797 return 0;
4798}
4799
4800static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4801 void __user *argp, bool is_64bit_addr)
4802{
4803 struct qseecom_send_modfd_listener_resp resp;
4804 struct qseecom_registered_listener_list *this_lstnr = NULL;
4805
4806 if (copy_from_user(&resp, argp, sizeof(resp))) {
4807 pr_err("copy_from_user failed");
4808 return -EINVAL;
4809 }
4810
4811 this_lstnr = __qseecom_find_svc(data->listener.id);
4812 if (this_lstnr == NULL)
4813 return -EINVAL;
4814
4815 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4816 return -EINVAL;
4817
4818 resp.resp_buf_ptr = this_lstnr->sb_virt +
4819 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4820
4821 if (!is_64bit_addr)
4822 __qseecom_update_cmd_buf(&resp, false, data);
4823 else
4824 __qseecom_update_cmd_buf_64(&resp, false, data);
4825 qseecom.send_resp_flag = 1;
4826 this_lstnr->send_resp_flag = 1;
4827 wake_up_interruptible(&qseecom.send_resp_wq);
4828 return 0;
4829}
4830
4831static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4832 void __user *argp)
4833{
4834 return __qseecom_send_modfd_resp(data, argp, false);
4835}
4836
4837static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4838 void __user *argp)
4839{
4840 return __qseecom_send_modfd_resp(data, argp, true);
4841}
4842
4843static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4844 void __user *argp)
4845{
4846 struct qseecom_qseos_version_req req;
4847
4848 if (copy_from_user(&req, argp, sizeof(req))) {
4849 pr_err("copy_from_user failed");
4850 return -EINVAL;
4851 }
4852 req.qseos_version = qseecom.qseos_version;
4853 if (copy_to_user(argp, &req, sizeof(req))) {
4854 pr_err("copy_to_user failed");
4855 return -EINVAL;
4856 }
4857 return 0;
4858}
4859
4860static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4861{
4862 int rc = 0;
4863 struct qseecom_clk *qclk = NULL;
4864
4865 if (qseecom.no_clock_support)
4866 return 0;
4867
4868 if (ce == CLK_QSEE)
4869 qclk = &qseecom.qsee;
4870 if (ce == CLK_CE_DRV)
4871 qclk = &qseecom.ce_drv;
4872
4873 if (qclk == NULL) {
4874 pr_err("CLK type not supported\n");
4875 return -EINVAL;
4876 }
4877 mutex_lock(&clk_access_lock);
4878
4879 if (qclk->clk_access_cnt == ULONG_MAX) {
4880 pr_err("clk_access_cnt beyond limitation\n");
4881 goto err;
4882 }
4883 if (qclk->clk_access_cnt > 0) {
4884 qclk->clk_access_cnt++;
4885 mutex_unlock(&clk_access_lock);
4886 return rc;
4887 }
4888
4889 /* Enable CE core clk */
4890 if (qclk->ce_core_clk != NULL) {
4891 rc = clk_prepare_enable(qclk->ce_core_clk);
4892 if (rc) {
4893 pr_err("Unable to enable/prepare CE core clk\n");
4894 goto err;
4895 }
4896 }
4897 /* Enable CE clk */
4898 if (qclk->ce_clk != NULL) {
4899 rc = clk_prepare_enable(qclk->ce_clk);
4900 if (rc) {
4901 pr_err("Unable to enable/prepare CE iface clk\n");
4902 goto ce_clk_err;
4903 }
4904 }
4905 /* Enable AXI clk */
4906 if (qclk->ce_bus_clk != NULL) {
4907 rc = clk_prepare_enable(qclk->ce_bus_clk);
4908 if (rc) {
4909 pr_err("Unable to enable/prepare CE bus clk\n");
4910 goto ce_bus_clk_err;
4911 }
4912 }
4913 qclk->clk_access_cnt++;
4914 mutex_unlock(&clk_access_lock);
4915 return 0;
4916
4917ce_bus_clk_err:
4918 if (qclk->ce_clk != NULL)
4919 clk_disable_unprepare(qclk->ce_clk);
4920ce_clk_err:
4921 if (qclk->ce_core_clk != NULL)
4922 clk_disable_unprepare(qclk->ce_core_clk);
4923err:
4924 mutex_unlock(&clk_access_lock);
4925 return -EIO;
4926}
4927
4928static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
4929{
4930 struct qseecom_clk *qclk;
4931
4932 if (qseecom.no_clock_support)
4933 return;
4934
4935 if (ce == CLK_QSEE)
4936 qclk = &qseecom.qsee;
4937 else
4938 qclk = &qseecom.ce_drv;
4939
4940 mutex_lock(&clk_access_lock);
4941
4942 if (qclk->clk_access_cnt == 0) {
4943 mutex_unlock(&clk_access_lock);
4944 return;
4945 }
4946
4947 if (qclk->clk_access_cnt == 1) {
4948 if (qclk->ce_clk != NULL)
4949 clk_disable_unprepare(qclk->ce_clk);
4950 if (qclk->ce_core_clk != NULL)
4951 clk_disable_unprepare(qclk->ce_core_clk);
4952 if (qclk->ce_bus_clk != NULL)
4953 clk_disable_unprepare(qclk->ce_bus_clk);
4954 }
4955 qclk->clk_access_cnt--;
4956 mutex_unlock(&clk_access_lock);
4957}
4958
4959static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
4960 int32_t clk_type)
4961{
4962 int ret = 0;
4963 struct qseecom_clk *qclk;
4964
4965 if (qseecom.no_clock_support)
4966 return 0;
4967
4968 qclk = &qseecom.qsee;
4969 if (!qseecom.qsee_perf_client)
4970 return ret;
4971
4972 switch (clk_type) {
4973 case CLK_DFAB:
4974 mutex_lock(&qsee_bw_mutex);
4975 if (!qseecom.qsee_bw_count) {
4976 if (qseecom.qsee_sfpb_bw_count > 0)
4977 ret = msm_bus_scale_client_update_request(
4978 qseecom.qsee_perf_client, 3);
4979 else {
4980 if (qclk->ce_core_src_clk != NULL)
4981 ret = __qseecom_enable_clk(CLK_QSEE);
4982 if (!ret) {
4983 ret =
4984 msm_bus_scale_client_update_request(
4985 qseecom.qsee_perf_client, 1);
4986 if ((ret) &&
4987 (qclk->ce_core_src_clk != NULL))
4988 __qseecom_disable_clk(CLK_QSEE);
4989 }
4990 }
4991 if (ret)
4992 pr_err("DFAB Bandwidth req failed (%d)\n",
4993 ret);
4994 else {
4995 qseecom.qsee_bw_count++;
4996 data->perf_enabled = true;
4997 }
4998 } else {
4999 qseecom.qsee_bw_count++;
5000 data->perf_enabled = true;
5001 }
5002 mutex_unlock(&qsee_bw_mutex);
5003 break;
5004 case CLK_SFPB:
5005 mutex_lock(&qsee_bw_mutex);
5006 if (!qseecom.qsee_sfpb_bw_count) {
5007 if (qseecom.qsee_bw_count > 0)
5008 ret = msm_bus_scale_client_update_request(
5009 qseecom.qsee_perf_client, 3);
5010 else {
5011 if (qclk->ce_core_src_clk != NULL)
5012 ret = __qseecom_enable_clk(CLK_QSEE);
5013 if (!ret) {
5014 ret =
5015 msm_bus_scale_client_update_request(
5016 qseecom.qsee_perf_client, 2);
5017 if ((ret) &&
5018 (qclk->ce_core_src_clk != NULL))
5019 __qseecom_disable_clk(CLK_QSEE);
5020 }
5021 }
5022
5023 if (ret)
5024 pr_err("SFPB Bandwidth req failed (%d)\n",
5025 ret);
5026 else {
5027 qseecom.qsee_sfpb_bw_count++;
5028 data->fast_load_enabled = true;
5029 }
5030 } else {
5031 qseecom.qsee_sfpb_bw_count++;
5032 data->fast_load_enabled = true;
5033 }
5034 mutex_unlock(&qsee_bw_mutex);
5035 break;
5036 default:
5037 pr_err("Clock type not defined\n");
5038 break;
5039 }
5040 return ret;
5041}
5042
5043static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5044 int32_t clk_type)
5045{
5046 int32_t ret = 0;
5047 struct qseecom_clk *qclk;
5048
5049 qclk = &qseecom.qsee;
5050
5051 if (qseecom.no_clock_support)
5052 return;
5053 if (!qseecom.qsee_perf_client)
5054 return;
5055
5056 switch (clk_type) {
5057 case CLK_DFAB:
5058 mutex_lock(&qsee_bw_mutex);
5059 if (qseecom.qsee_bw_count == 0) {
5060 pr_err("Client error.Extra call to disable DFAB clk\n");
5061 mutex_unlock(&qsee_bw_mutex);
5062 return;
5063 }
5064
5065 if (qseecom.qsee_bw_count == 1) {
5066 if (qseecom.qsee_sfpb_bw_count > 0)
5067 ret = msm_bus_scale_client_update_request(
5068 qseecom.qsee_perf_client, 2);
5069 else {
5070 ret = msm_bus_scale_client_update_request(
5071 qseecom.qsee_perf_client, 0);
5072 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5073 __qseecom_disable_clk(CLK_QSEE);
5074 }
5075 if (ret)
5076 pr_err("SFPB Bandwidth req fail (%d)\n",
5077 ret);
5078 else {
5079 qseecom.qsee_bw_count--;
5080 data->perf_enabled = false;
5081 }
5082 } else {
5083 qseecom.qsee_bw_count--;
5084 data->perf_enabled = false;
5085 }
5086 mutex_unlock(&qsee_bw_mutex);
5087 break;
5088 case CLK_SFPB:
5089 mutex_lock(&qsee_bw_mutex);
5090 if (qseecom.qsee_sfpb_bw_count == 0) {
5091 pr_err("Client error.Extra call to disable SFPB clk\n");
5092 mutex_unlock(&qsee_bw_mutex);
5093 return;
5094 }
5095 if (qseecom.qsee_sfpb_bw_count == 1) {
5096 if (qseecom.qsee_bw_count > 0)
5097 ret = msm_bus_scale_client_update_request(
5098 qseecom.qsee_perf_client, 1);
5099 else {
5100 ret = msm_bus_scale_client_update_request(
5101 qseecom.qsee_perf_client, 0);
5102 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5103 __qseecom_disable_clk(CLK_QSEE);
5104 }
5105 if (ret)
5106 pr_err("SFPB Bandwidth req fail (%d)\n",
5107 ret);
5108 else {
5109 qseecom.qsee_sfpb_bw_count--;
5110 data->fast_load_enabled = false;
5111 }
5112 } else {
5113 qseecom.qsee_sfpb_bw_count--;
5114 data->fast_load_enabled = false;
5115 }
5116 mutex_unlock(&qsee_bw_mutex);
5117 break;
5118 default:
5119 pr_err("Clock type not defined\n");
5120 break;
5121 }
5122
5123}
5124
5125static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5126 void __user *argp)
5127{
5128 struct ion_handle *ihandle; /* Ion handle */
5129 struct qseecom_load_img_req load_img_req;
5130 int uret = 0;
5131 int ret;
5132 ion_phys_addr_t pa = 0;
5133 size_t len;
5134 struct qseecom_load_app_ireq load_req;
5135 struct qseecom_load_app_64bit_ireq load_req_64bit;
5136 struct qseecom_command_scm_resp resp;
5137 void *cmd_buf = NULL;
5138 size_t cmd_len;
5139 /* Copy the relevant information needed for loading the image */
5140 if (copy_from_user(&load_img_req,
5141 (void __user *)argp,
5142 sizeof(struct qseecom_load_img_req))) {
5143 pr_err("copy_from_user failed\n");
5144 return -EFAULT;
5145 }
5146
5147 /* Get the handle of the shared fd */
5148 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
5149 load_img_req.ifd_data_fd);
5150 if (IS_ERR_OR_NULL(ihandle)) {
5151 pr_err("Ion client could not retrieve the handle\n");
5152 return -ENOMEM;
5153 }
5154
5155 /* Get the physical address of the ION BUF */
5156 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5157 if (ret) {
5158 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5159 ret);
5160 return ret;
5161 }
5162 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5163 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5164 len, load_img_req.mdt_len,
5165 load_img_req.img_len);
5166 return ret;
5167 }
5168 /* Populate the structure for sending scm call to load image */
5169 if (qseecom.qsee_version < QSEE_VERSION_40) {
5170 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5171 load_req.mdt_len = load_img_req.mdt_len;
5172 load_req.img_len = load_img_req.img_len;
5173 load_req.phy_addr = (uint32_t)pa;
5174 cmd_buf = (void *)&load_req;
5175 cmd_len = sizeof(struct qseecom_load_app_ireq);
5176 } else {
5177 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5178 load_req_64bit.mdt_len = load_img_req.mdt_len;
5179 load_req_64bit.img_len = load_img_req.img_len;
5180 load_req_64bit.phy_addr = (uint64_t)pa;
5181 cmd_buf = (void *)&load_req_64bit;
5182 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5183 }
5184
5185 if (qseecom.support_bus_scaling) {
5186 mutex_lock(&qsee_bw_mutex);
5187 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5188 mutex_unlock(&qsee_bw_mutex);
5189 if (ret) {
5190 ret = -EIO;
5191 goto exit_cpu_restore;
5192 }
5193 }
5194
5195 /* Vote for the SFPB clock */
5196 ret = __qseecom_enable_clk_scale_up(data);
5197 if (ret) {
5198 ret = -EIO;
5199 goto exit_register_bus_bandwidth_needs;
5200 }
5201 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5202 ION_IOC_CLEAN_INV_CACHES);
5203 if (ret) {
5204 pr_err("cache operation failed %d\n", ret);
5205 goto exit_disable_clock;
5206 }
5207 /* SCM_CALL to load the external elf */
5208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5209 &resp, sizeof(resp));
5210 if (ret) {
5211 pr_err("scm_call to load failed : ret %d\n",
5212 ret);
5213 ret = -EFAULT;
5214 goto exit_disable_clock;
5215 }
5216
5217 switch (resp.result) {
5218 case QSEOS_RESULT_SUCCESS:
5219 break;
5220 case QSEOS_RESULT_INCOMPLETE:
5221 pr_err("%s: qseos result incomplete\n", __func__);
5222 ret = __qseecom_process_incomplete_cmd(data, &resp);
5223 if (ret)
5224 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5225 break;
5226 case QSEOS_RESULT_FAILURE:
5227 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5228 ret = -EFAULT;
5229 break;
5230 default:
5231 pr_err("scm_call response result %d not supported\n",
5232 resp.result);
5233 ret = -EFAULT;
5234 break;
5235 }
5236
5237exit_disable_clock:
5238 __qseecom_disable_clk_scale_down(data);
5239
5240exit_register_bus_bandwidth_needs:
5241 if (qseecom.support_bus_scaling) {
5242 mutex_lock(&qsee_bw_mutex);
5243 uret = qseecom_unregister_bus_bandwidth_needs(data);
5244 mutex_unlock(&qsee_bw_mutex);
5245 if (uret)
5246 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5247 uret, ret);
5248 }
5249
5250exit_cpu_restore:
5251 /* Deallocate the handle */
5252 if (!IS_ERR_OR_NULL(ihandle))
5253 ion_free(qseecom.ion_clnt, ihandle);
5254 return ret;
5255}
5256
5257static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5258{
5259 int ret = 0;
5260 struct qseecom_command_scm_resp resp;
5261 struct qseecom_unload_app_ireq req;
5262
5263 /* unavailable client app */
5264 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5265
5266 /* Populate the structure for sending scm call to unload image */
5267 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5268
5269 /* SCM_CALL to unload the external elf */
5270 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5271 sizeof(struct qseecom_unload_app_ireq),
5272 &resp, sizeof(resp));
5273 if (ret) {
5274 pr_err("scm_call to unload failed : ret %d\n",
5275 ret);
5276 ret = -EFAULT;
5277 goto qseecom_unload_external_elf_scm_err;
5278 }
5279 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5280 ret = __qseecom_process_incomplete_cmd(data, &resp);
5281 if (ret)
5282 pr_err("process_incomplete_cmd fail err: %d\n",
5283 ret);
5284 } else {
5285 if (resp.result != QSEOS_RESULT_SUCCESS) {
5286 pr_err("scm_call to unload image failed resp.result =%d\n",
5287 resp.result);
5288 ret = -EFAULT;
5289 }
5290 }
5291
5292qseecom_unload_external_elf_scm_err:
5293
5294 return ret;
5295}
5296
5297static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5298 void __user *argp)
5299{
5300
5301 int32_t ret;
5302 struct qseecom_qseos_app_load_query query_req;
5303 struct qseecom_check_app_ireq req;
5304 struct qseecom_registered_app_list *entry = NULL;
5305 unsigned long flags = 0;
5306 uint32_t app_arch = 0, app_id = 0;
5307 bool found_app = false;
5308
5309 /* Copy the relevant information needed for loading the image */
5310 if (copy_from_user(&query_req,
5311 (void __user *)argp,
5312 sizeof(struct qseecom_qseos_app_load_query))) {
5313 pr_err("copy_from_user failed\n");
5314 return -EFAULT;
5315 }
5316
5317 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5318 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5319 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5320
5321 ret = __qseecom_check_app_exists(req, &app_id);
5322 if (ret) {
5323 pr_err(" scm call to check if app is loaded failed");
5324 return ret; /* scm call failed */
5325 }
5326 if (app_id) {
5327 pr_debug("App id %d (%s) already exists\n", app_id,
5328 (char *)(req.app_name));
5329 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5330 list_for_each_entry(entry,
5331 &qseecom.registered_app_list_head, list){
5332 if (entry->app_id == app_id) {
5333 app_arch = entry->app_arch;
5334 entry->ref_cnt++;
5335 found_app = true;
5336 break;
5337 }
5338 }
5339 spin_unlock_irqrestore(
5340 &qseecom.registered_app_list_lock, flags);
5341 data->client.app_id = app_id;
5342 query_req.app_id = app_id;
5343 if (app_arch) {
5344 data->client.app_arch = app_arch;
5345 query_req.app_arch = app_arch;
5346 } else {
5347 data->client.app_arch = 0;
5348 query_req.app_arch = 0;
5349 }
5350 strlcpy(data->client.app_name, query_req.app_name,
5351 MAX_APP_NAME_SIZE);
5352 /*
5353 * If app was loaded by appsbl before and was not registered,
5354 * regiser this app now.
5355 */
5356 if (!found_app) {
5357 pr_debug("Register app %d [%s] which was loaded before\n",
5358 ret, (char *)query_req.app_name);
5359 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5360 if (!entry) {
5361 pr_err("kmalloc for app entry failed\n");
5362 return -ENOMEM;
5363 }
5364 entry->app_id = app_id;
5365 entry->ref_cnt = 1;
5366 entry->app_arch = data->client.app_arch;
5367 strlcpy(entry->app_name, data->client.app_name,
5368 MAX_APP_NAME_SIZE);
5369 entry->app_blocked = false;
5370 entry->blocked_on_listener_id = 0;
5371 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5372 flags);
5373 list_add_tail(&entry->list,
5374 &qseecom.registered_app_list_head);
5375 spin_unlock_irqrestore(
5376 &qseecom.registered_app_list_lock, flags);
5377 }
5378 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5379 pr_err("copy_to_user failed\n");
5380 return -EFAULT;
5381 }
5382 return -EEXIST; /* app already loaded */
5383 } else {
5384 return 0; /* app not loaded */
5385 }
5386}
5387
5388static int __qseecom_get_ce_pipe_info(
5389 enum qseecom_key_management_usage_type usage,
5390 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5391{
5392 int ret = -EINVAL;
5393 int i, j;
5394 struct qseecom_ce_info_use *p = NULL;
5395 int total = 0;
5396 struct qseecom_ce_pipe_entry *pcepipe;
5397
5398 switch (usage) {
5399 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5400 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5401 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5402 if (qseecom.support_fde) {
5403 p = qseecom.ce_info.fde;
5404 total = qseecom.ce_info.num_fde;
5405 } else {
5406 pr_err("system does not support fde\n");
5407 return -EINVAL;
5408 }
5409 break;
5410 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5411 if (qseecom.support_pfe) {
5412 p = qseecom.ce_info.pfe;
5413 total = qseecom.ce_info.num_pfe;
5414 } else {
5415 pr_err("system does not support pfe\n");
5416 return -EINVAL;
5417 }
5418 break;
5419 default:
5420 pr_err("unsupported usage %d\n", usage);
5421 return -EINVAL;
5422 }
5423
5424 for (j = 0; j < total; j++) {
5425 if (p->unit_num == unit) {
5426 pcepipe = p->ce_pipe_entry;
5427 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5428 (*ce_hw)[i] = pcepipe->ce_num;
5429 *pipe = pcepipe->ce_pipe_pair;
5430 pcepipe++;
5431 }
5432 ret = 0;
5433 break;
5434 }
5435 p++;
5436 }
5437 return ret;
5438}
5439
5440static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5441 enum qseecom_key_management_usage_type usage,
5442 struct qseecom_key_generate_ireq *ireq)
5443{
5444 struct qseecom_command_scm_resp resp;
5445 int ret;
5446
5447 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5448 usage >= QSEOS_KM_USAGE_MAX) {
5449 pr_err("Error:: unsupported usage %d\n", usage);
5450 return -EFAULT;
5451 }
5452 ret = __qseecom_enable_clk(CLK_QSEE);
5453 if (ret)
5454 return ret;
5455
5456 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5457 ireq, sizeof(struct qseecom_key_generate_ireq),
5458 &resp, sizeof(resp));
5459 if (ret) {
5460 if (ret == -EINVAL &&
5461 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5462 pr_debug("Key ID exists.\n");
5463 ret = 0;
5464 } else {
5465 pr_err("scm call to generate key failed : %d\n", ret);
5466 ret = -EFAULT;
5467 }
5468 goto generate_key_exit;
5469 }
5470
5471 switch (resp.result) {
5472 case QSEOS_RESULT_SUCCESS:
5473 break;
5474 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5475 pr_debug("Key ID exists.\n");
5476 break;
5477 case QSEOS_RESULT_INCOMPLETE:
5478 ret = __qseecom_process_incomplete_cmd(data, &resp);
5479 if (ret) {
5480 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5481 pr_debug("Key ID exists.\n");
5482 ret = 0;
5483 } else {
5484 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5485 resp.result);
5486 }
5487 }
5488 break;
5489 case QSEOS_RESULT_FAILURE:
5490 default:
5491 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5492 ret = -EINVAL;
5493 break;
5494 }
5495generate_key_exit:
5496 __qseecom_disable_clk(CLK_QSEE);
5497 return ret;
5498}
5499
5500static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5501 enum qseecom_key_management_usage_type usage,
5502 struct qseecom_key_delete_ireq *ireq)
5503{
5504 struct qseecom_command_scm_resp resp;
5505 int ret;
5506
5507 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5508 usage >= QSEOS_KM_USAGE_MAX) {
5509 pr_err("Error:: unsupported usage %d\n", usage);
5510 return -EFAULT;
5511 }
5512 ret = __qseecom_enable_clk(CLK_QSEE);
5513 if (ret)
5514 return ret;
5515
5516 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5517 ireq, sizeof(struct qseecom_key_delete_ireq),
5518 &resp, sizeof(struct qseecom_command_scm_resp));
5519 if (ret) {
5520 if (ret == -EINVAL &&
5521 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5522 pr_debug("Max attempts to input password reached.\n");
5523 ret = -ERANGE;
5524 } else {
5525 pr_err("scm call to delete key failed : %d\n", ret);
5526 ret = -EFAULT;
5527 }
5528 goto del_key_exit;
5529 }
5530
5531 switch (resp.result) {
5532 case QSEOS_RESULT_SUCCESS:
5533 break;
5534 case QSEOS_RESULT_INCOMPLETE:
5535 ret = __qseecom_process_incomplete_cmd(data, &resp);
5536 if (ret) {
5537 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5538 resp.result);
5539 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5540 pr_debug("Max attempts to input password reached.\n");
5541 ret = -ERANGE;
5542 }
5543 }
5544 break;
5545 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5546 pr_debug("Max attempts to input password reached.\n");
5547 ret = -ERANGE;
5548 break;
5549 case QSEOS_RESULT_FAILURE:
5550 default:
5551 pr_err("Delete key scm call failed resp.result %d\n",
5552 resp.result);
5553 ret = -EINVAL;
5554 break;
5555 }
5556del_key_exit:
5557 __qseecom_disable_clk(CLK_QSEE);
5558 return ret;
5559}
5560
5561static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5562 enum qseecom_key_management_usage_type usage,
5563 struct qseecom_key_select_ireq *ireq)
5564{
5565 struct qseecom_command_scm_resp resp;
5566 int ret;
5567
5568 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5569 usage >= QSEOS_KM_USAGE_MAX) {
5570 pr_err("Error:: unsupported usage %d\n", usage);
5571 return -EFAULT;
5572 }
5573 ret = __qseecom_enable_clk(CLK_QSEE);
5574 if (ret)
5575 return ret;
5576
5577 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5578 ret = __qseecom_enable_clk(CLK_CE_DRV);
5579 if (ret)
5580 return ret;
5581 }
5582
5583 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5584 ireq, sizeof(struct qseecom_key_select_ireq),
5585 &resp, sizeof(struct qseecom_command_scm_resp));
5586 if (ret) {
5587 if (ret == -EINVAL &&
5588 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5589 pr_debug("Max attempts to input password reached.\n");
5590 ret = -ERANGE;
5591 } else if (ret == -EINVAL &&
5592 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5593 pr_debug("Set Key operation under processing...\n");
5594 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5595 } else {
5596 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5597 ret);
5598 ret = -EFAULT;
5599 }
5600 goto set_key_exit;
5601 }
5602
5603 switch (resp.result) {
5604 case QSEOS_RESULT_SUCCESS:
5605 break;
5606 case QSEOS_RESULT_INCOMPLETE:
5607 ret = __qseecom_process_incomplete_cmd(data, &resp);
5608 if (ret) {
5609 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5610 resp.result);
5611 if (resp.result ==
5612 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5613 pr_debug("Set Key operation under processing...\n");
5614 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5615 }
5616 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5617 pr_debug("Max attempts to input password reached.\n");
5618 ret = -ERANGE;
5619 }
5620 }
5621 break;
5622 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5623 pr_debug("Max attempts to input password reached.\n");
5624 ret = -ERANGE;
5625 break;
5626 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5627 pr_debug("Set Key operation under processing...\n");
5628 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5629 break;
5630 case QSEOS_RESULT_FAILURE:
5631 default:
5632 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5633 ret = -EINVAL;
5634 break;
5635 }
5636set_key_exit:
5637 __qseecom_disable_clk(CLK_QSEE);
5638 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5639 __qseecom_disable_clk(CLK_CE_DRV);
5640 return ret;
5641}
5642
5643static int __qseecom_update_current_key_user_info(
5644 struct qseecom_dev_handle *data,
5645 enum qseecom_key_management_usage_type usage,
5646 struct qseecom_key_userinfo_update_ireq *ireq)
5647{
5648 struct qseecom_command_scm_resp resp;
5649 int ret;
5650
5651 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5652 usage >= QSEOS_KM_USAGE_MAX) {
5653 pr_err("Error:: unsupported usage %d\n", usage);
5654 return -EFAULT;
5655 }
5656 ret = __qseecom_enable_clk(CLK_QSEE);
5657 if (ret)
5658 return ret;
5659
5660 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5661 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5662 &resp, sizeof(struct qseecom_command_scm_resp));
5663 if (ret) {
5664 if (ret == -EINVAL &&
5665 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5666 pr_debug("Set Key operation under processing...\n");
5667 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5668 } else {
5669 pr_err("scm call to update key userinfo failed: %d\n",
5670 ret);
5671 __qseecom_disable_clk(CLK_QSEE);
5672 return -EFAULT;
5673 }
5674 }
5675
5676 switch (resp.result) {
5677 case QSEOS_RESULT_SUCCESS:
5678 break;
5679 case QSEOS_RESULT_INCOMPLETE:
5680 ret = __qseecom_process_incomplete_cmd(data, &resp);
5681 if (resp.result ==
5682 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5683 pr_debug("Set Key operation under processing...\n");
5684 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5685 }
5686 if (ret)
5687 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5688 resp.result);
5689 break;
5690 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5691 pr_debug("Update Key operation under processing...\n");
5692 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5693 break;
5694 case QSEOS_RESULT_FAILURE:
5695 default:
5696 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5697 ret = -EINVAL;
5698 break;
5699 }
5700
5701 __qseecom_disable_clk(CLK_QSEE);
5702 return ret;
5703}
5704
5705
5706static int qseecom_enable_ice_setup(int usage)
5707{
5708 int ret = 0;
5709
5710 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5711 ret = qcom_ice_setup_ice_hw("ufs", true);
5712 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5713 ret = qcom_ice_setup_ice_hw("sdcc", true);
5714
5715 return ret;
5716}
5717
5718static int qseecom_disable_ice_setup(int usage)
5719{
5720 int ret = 0;
5721
5722 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5723 ret = qcom_ice_setup_ice_hw("ufs", false);
5724 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5725 ret = qcom_ice_setup_ice_hw("sdcc", false);
5726
5727 return ret;
5728}
5729
5730static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5731{
5732 struct qseecom_ce_info_use *pce_info_use, *p;
5733 int total = 0;
5734 int i;
5735
5736 switch (usage) {
5737 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5738 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5739 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5740 p = qseecom.ce_info.fde;
5741 total = qseecom.ce_info.num_fde;
5742 break;
5743 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5744 p = qseecom.ce_info.pfe;
5745 total = qseecom.ce_info.num_pfe;
5746 break;
5747 default:
5748 pr_err("unsupported usage %d\n", usage);
5749 return -EINVAL;
5750 }
5751
5752 pce_info_use = NULL;
5753
5754 for (i = 0; i < total; i++) {
5755 if (p->unit_num == unit) {
5756 pce_info_use = p;
5757 break;
5758 }
5759 p++;
5760 }
5761 if (!pce_info_use) {
5762 pr_err("can not find %d\n", unit);
5763 return -EINVAL;
5764 }
5765 return pce_info_use->num_ce_pipe_entries;
5766}
5767
5768static int qseecom_create_key(struct qseecom_dev_handle *data,
5769 void __user *argp)
5770{
5771 int i;
5772 uint32_t *ce_hw = NULL;
5773 uint32_t pipe = 0;
5774 int ret = 0;
5775 uint32_t flags = 0;
5776 struct qseecom_create_key_req create_key_req;
5777 struct qseecom_key_generate_ireq generate_key_ireq;
5778 struct qseecom_key_select_ireq set_key_ireq;
5779 uint32_t entries = 0;
5780
5781 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5782 if (ret) {
5783 pr_err("copy_from_user failed\n");
5784 return ret;
5785 }
5786
5787 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5788 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5789 pr_err("unsupported usage %d\n", create_key_req.usage);
5790 ret = -EFAULT;
5791 return ret;
5792 }
5793 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5794 create_key_req.usage);
5795 if (entries <= 0) {
5796 pr_err("no ce instance for usage %d instance %d\n",
5797 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5798 ret = -EINVAL;
5799 return ret;
5800 }
5801
5802 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5803 if (!ce_hw) {
5804 ret = -ENOMEM;
5805 return ret;
5806 }
5807 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5808 DEFAULT_CE_INFO_UNIT);
5809 if (ret) {
5810 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5811 ret = -EINVAL;
5812 goto free_buf;
5813 }
5814
5815 if (qseecom.fde_key_size)
5816 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5817 else
5818 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5819
5820 generate_key_ireq.flags = flags;
5821 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5822 memset((void *)generate_key_ireq.key_id,
5823 0, QSEECOM_KEY_ID_SIZE);
5824 memset((void *)generate_key_ireq.hash32,
5825 0, QSEECOM_HASH_SIZE);
5826 memcpy((void *)generate_key_ireq.key_id,
5827 (void *)key_id_array[create_key_req.usage].desc,
5828 QSEECOM_KEY_ID_SIZE);
5829 memcpy((void *)generate_key_ireq.hash32,
5830 (void *)create_key_req.hash32,
5831 QSEECOM_HASH_SIZE);
5832
5833 ret = __qseecom_generate_and_save_key(data,
5834 create_key_req.usage, &generate_key_ireq);
5835 if (ret) {
5836 pr_err("Failed to generate key on storage: %d\n", ret);
5837 goto free_buf;
5838 }
5839
5840 for (i = 0; i < entries; i++) {
5841 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5842 if (create_key_req.usage ==
5843 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5844 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5845 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5846
5847 } else if (create_key_req.usage ==
5848 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5849 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5850 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5851
5852 } else {
5853 set_key_ireq.ce = ce_hw[i];
5854 set_key_ireq.pipe = pipe;
5855 }
5856 set_key_ireq.flags = flags;
5857
5858 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5859 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5860 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5861 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5862 memcpy((void *)set_key_ireq.key_id,
5863 (void *)key_id_array[create_key_req.usage].desc,
5864 QSEECOM_KEY_ID_SIZE);
5865 memcpy((void *)set_key_ireq.hash32,
5866 (void *)create_key_req.hash32,
5867 QSEECOM_HASH_SIZE);
5868 /*
5869 * It will return false if it is GPCE based crypto instance or
5870 * ICE is setup properly
5871 */
5872 if (qseecom_enable_ice_setup(create_key_req.usage))
5873 goto free_buf;
5874
5875 do {
5876 ret = __qseecom_set_clear_ce_key(data,
5877 create_key_req.usage,
5878 &set_key_ireq);
5879 /*
5880 * wait a little before calling scm again to let other
5881 * processes run
5882 */
5883 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5884 msleep(50);
5885
5886 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5887
5888 qseecom_disable_ice_setup(create_key_req.usage);
5889
5890 if (ret) {
5891 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5892 pipe, ce_hw[i], ret);
5893 goto free_buf;
5894 } else {
5895 pr_err("Set the key successfully\n");
5896 if ((create_key_req.usage ==
5897 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5898 (create_key_req.usage ==
5899 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5900 goto free_buf;
5901 }
5902 }
5903
5904free_buf:
5905 kzfree(ce_hw);
5906 return ret;
5907}
5908
5909static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5910 void __user *argp)
5911{
5912 uint32_t *ce_hw = NULL;
5913 uint32_t pipe = 0;
5914 int ret = 0;
5915 uint32_t flags = 0;
5916 int i, j;
5917 struct qseecom_wipe_key_req wipe_key_req;
5918 struct qseecom_key_delete_ireq delete_key_ireq;
5919 struct qseecom_key_select_ireq clear_key_ireq;
5920 uint32_t entries = 0;
5921
5922 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
5923 if (ret) {
5924 pr_err("copy_from_user failed\n");
5925 return ret;
5926 }
5927
5928 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5929 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5930 pr_err("unsupported usage %d\n", wipe_key_req.usage);
5931 ret = -EFAULT;
5932 return ret;
5933 }
5934
5935 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5936 wipe_key_req.usage);
5937 if (entries <= 0) {
5938 pr_err("no ce instance for usage %d instance %d\n",
5939 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
5940 ret = -EINVAL;
5941 return ret;
5942 }
5943
5944 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5945 if (!ce_hw) {
5946 ret = -ENOMEM;
5947 return ret;
5948 }
5949
5950 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
5951 DEFAULT_CE_INFO_UNIT);
5952 if (ret) {
5953 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5954 ret = -EINVAL;
5955 goto free_buf;
5956 }
5957
5958 if (wipe_key_req.wipe_key_flag) {
5959 delete_key_ireq.flags = flags;
5960 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
5961 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5962 memcpy((void *)delete_key_ireq.key_id,
5963 (void *)key_id_array[wipe_key_req.usage].desc,
5964 QSEECOM_KEY_ID_SIZE);
5965 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5966
5967 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
5968 &delete_key_ireq);
5969 if (ret) {
5970 pr_err("Failed to delete key from ssd storage: %d\n",
5971 ret);
5972 ret = -EFAULT;
5973 goto free_buf;
5974 }
5975 }
5976
5977 for (j = 0; j < entries; j++) {
5978 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5979 if (wipe_key_req.usage ==
5980 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5981 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5982 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5983 } else if (wipe_key_req.usage ==
5984 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5985 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5986 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5987 } else {
5988 clear_key_ireq.ce = ce_hw[j];
5989 clear_key_ireq.pipe = pipe;
5990 }
5991 clear_key_ireq.flags = flags;
5992 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5993 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
5994 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
5995 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5996
5997 /*
5998 * It will return false if it is GPCE based crypto instance or
5999 * ICE is setup properly
6000 */
6001 if (qseecom_enable_ice_setup(wipe_key_req.usage))
6002 goto free_buf;
6003
6004 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6005 &clear_key_ireq);
6006
6007 qseecom_disable_ice_setup(wipe_key_req.usage);
6008
6009 if (ret) {
6010 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6011 pipe, ce_hw[j], ret);
6012 ret = -EFAULT;
6013 goto free_buf;
6014 }
6015 }
6016
6017free_buf:
6018 kzfree(ce_hw);
6019 return ret;
6020}
6021
6022static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6023 void __user *argp)
6024{
6025 int ret = 0;
6026 uint32_t flags = 0;
6027 struct qseecom_update_key_userinfo_req update_key_req;
6028 struct qseecom_key_userinfo_update_ireq ireq;
6029
6030 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6031 if (ret) {
6032 pr_err("copy_from_user failed\n");
6033 return ret;
6034 }
6035
6036 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6037 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6038 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6039 return -EFAULT;
6040 }
6041
6042 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6043
6044 if (qseecom.fde_key_size)
6045 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6046 else
6047 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6048
6049 ireq.flags = flags;
6050 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6051 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6052 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6053 memcpy((void *)ireq.key_id,
6054 (void *)key_id_array[update_key_req.usage].desc,
6055 QSEECOM_KEY_ID_SIZE);
6056 memcpy((void *)ireq.current_hash32,
6057 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6058 memcpy((void *)ireq.new_hash32,
6059 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6060
6061 do {
6062 ret = __qseecom_update_current_key_user_info(data,
6063 update_key_req.usage,
6064 &ireq);
6065 /*
6066 * wait a little before calling scm again to let other
6067 * processes run
6068 */
6069 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6070 msleep(50);
6071
6072 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6073 if (ret) {
6074 pr_err("Failed to update key info: %d\n", ret);
6075 return ret;
6076 }
6077 return ret;
6078
6079}
6080static int qseecom_is_es_activated(void __user *argp)
6081{
6082 struct qseecom_is_es_activated_req req;
6083 struct qseecom_command_scm_resp resp;
6084 int ret;
6085
6086 if (qseecom.qsee_version < QSEE_VERSION_04) {
6087 pr_err("invalid qsee version\n");
6088 return -ENODEV;
6089 }
6090
6091 if (argp == NULL) {
6092 pr_err("arg is null\n");
6093 return -EINVAL;
6094 }
6095
6096 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6097 &req, sizeof(req), &resp, sizeof(resp));
6098 if (ret) {
6099 pr_err("scm_call failed\n");
6100 return ret;
6101 }
6102
6103 req.is_activated = resp.result;
6104 ret = copy_to_user(argp, &req, sizeof(req));
6105 if (ret) {
6106 pr_err("copy_to_user failed\n");
6107 return ret;
6108 }
6109
6110 return 0;
6111}
6112
6113static int qseecom_save_partition_hash(void __user *argp)
6114{
6115 struct qseecom_save_partition_hash_req req;
6116 struct qseecom_command_scm_resp resp;
6117 int ret;
6118
6119 memset(&resp, 0x00, sizeof(resp));
6120
6121 if (qseecom.qsee_version < QSEE_VERSION_04) {
6122 pr_err("invalid qsee version\n");
6123 return -ENODEV;
6124 }
6125
6126 if (argp == NULL) {
6127 pr_err("arg is null\n");
6128 return -EINVAL;
6129 }
6130
6131 ret = copy_from_user(&req, argp, sizeof(req));
6132 if (ret) {
6133 pr_err("copy_from_user failed\n");
6134 return ret;
6135 }
6136
6137 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6138 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6139 if (ret) {
6140 pr_err("qseecom_scm_call failed\n");
6141 return ret;
6142 }
6143
6144 return 0;
6145}
6146
6147static int qseecom_mdtp_cipher_dip(void __user *argp)
6148{
6149 struct qseecom_mdtp_cipher_dip_req req;
6150 u32 tzbuflenin, tzbuflenout;
6151 char *tzbufin = NULL, *tzbufout = NULL;
6152 struct scm_desc desc = {0};
6153 int ret;
6154
6155 do {
6156 /* Copy the parameters from userspace */
6157 if (argp == NULL) {
6158 pr_err("arg is null\n");
6159 ret = -EINVAL;
6160 break;
6161 }
6162
6163 ret = copy_from_user(&req, argp, sizeof(req));
6164 if (ret) {
6165 pr_err("copy_from_user failed, ret= %d\n", ret);
6166 break;
6167 }
6168
6169 if (req.in_buf == NULL || req.out_buf == NULL ||
6170 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6171 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6172 req.direction > 1) {
6173 pr_err("invalid parameters\n");
6174 ret = -EINVAL;
6175 break;
6176 }
6177
6178 /* Copy the input buffer from userspace to kernel space */
6179 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6180 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6181 if (!tzbufin) {
6182 pr_err("error allocating in buffer\n");
6183 ret = -ENOMEM;
6184 break;
6185 }
6186
6187 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6188 if (ret) {
6189 pr_err("copy_from_user failed, ret=%d\n", ret);
6190 break;
6191 }
6192
6193 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6194
6195 /* Prepare the output buffer in kernel space */
6196 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6197 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6198 if (!tzbufout) {
6199 pr_err("error allocating out buffer\n");
6200 ret = -ENOMEM;
6201 break;
6202 }
6203
6204 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6205
6206 /* Send the command to TZ */
6207 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6208 desc.args[0] = virt_to_phys(tzbufin);
6209 desc.args[1] = req.in_buf_size;
6210 desc.args[2] = virt_to_phys(tzbufout);
6211 desc.args[3] = req.out_buf_size;
6212 desc.args[4] = req.direction;
6213
6214 ret = __qseecom_enable_clk(CLK_QSEE);
6215 if (ret)
6216 break;
6217
6218 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6219
6220 __qseecom_disable_clk(CLK_QSEE);
6221
6222 if (ret) {
6223 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6224 ret);
6225 break;
6226 }
6227
6228 /* Copy the output buffer from kernel space to userspace */
6229 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6230 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6231 if (ret) {
6232 pr_err("copy_to_user failed, ret=%d\n", ret);
6233 break;
6234 }
6235 } while (0);
6236
6237 kzfree(tzbufin);
6238 kzfree(tzbufout);
6239
6240 return ret;
6241}
6242
6243static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6244 struct qseecom_qteec_req *req)
6245{
6246 if (!data || !data->client.ihandle) {
6247 pr_err("Client or client handle is not initialized\n");
6248 return -EINVAL;
6249 }
6250
6251 if (data->type != QSEECOM_CLIENT_APP)
6252 return -EFAULT;
6253
6254 if (req->req_len > UINT_MAX - req->resp_len) {
6255 pr_err("Integer overflow detected in req_len & rsp_len\n");
6256 return -EINVAL;
6257 }
6258
6259 if (req->req_len + req->resp_len > data->client.sb_length) {
6260 pr_debug("Not enough memory to fit cmd_buf.\n");
6261 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6262 (req->req_len + req->resp_len), data->client.sb_length);
6263 return -ENOMEM;
6264 }
6265
6266 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6267 pr_err("cmd buffer or response buffer is null\n");
6268 return -EINVAL;
6269 }
6270 if (((uintptr_t)req->req_ptr <
6271 data->client.user_virt_sb_base) ||
6272 ((uintptr_t)req->req_ptr >=
6273 (data->client.user_virt_sb_base + data->client.sb_length))) {
6274 pr_err("cmd buffer address not within shared bufffer\n");
6275 return -EINVAL;
6276 }
6277
6278 if (((uintptr_t)req->resp_ptr <
6279 data->client.user_virt_sb_base) ||
6280 ((uintptr_t)req->resp_ptr >=
6281 (data->client.user_virt_sb_base + data->client.sb_length))) {
6282 pr_err("response buffer address not within shared bufffer\n");
6283 return -EINVAL;
6284 }
6285
6286 if ((req->req_len == 0) || (req->resp_len == 0)) {
6287 pr_err("cmd buf lengtgh/response buf length not valid\n");
6288 return -EINVAL;
6289 }
6290
6291 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6292 pr_err("Integer overflow in req_len & req_ptr\n");
6293 return -EINVAL;
6294 }
6295
6296 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6297 pr_err("Integer overflow in resp_len & resp_ptr\n");
6298 return -EINVAL;
6299 }
6300
6301 if (data->client.user_virt_sb_base >
6302 (ULONG_MAX - data->client.sb_length)) {
6303 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6304 return -EINVAL;
6305 }
6306 if ((((uintptr_t)req->req_ptr + req->req_len) >
6307 ((uintptr_t)data->client.user_virt_sb_base +
6308 data->client.sb_length)) ||
6309 (((uintptr_t)req->resp_ptr + req->resp_len) >
6310 ((uintptr_t)data->client.user_virt_sb_base +
6311 data->client.sb_length))) {
6312 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6313 return -EINVAL;
6314 }
6315 return 0;
6316}
6317
6318static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6319 uint32_t fd_idx, struct sg_table *sg_ptr)
6320{
6321 struct scatterlist *sg = sg_ptr->sgl;
6322 struct qseecom_sg_entry *sg_entry;
6323 void *buf;
6324 uint i;
6325 size_t size;
6326 dma_addr_t coh_pmem;
6327
6328 if (fd_idx >= MAX_ION_FD) {
6329 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6330 return -ENOMEM;
6331 }
6332 /*
6333 * Allocate a buffer, populate it with number of entry plus
6334 * each sg entry's phy addr and length; then return the
6335 * phy_addr of the buffer.
6336 */
6337 size = sizeof(uint32_t) +
6338 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6339 size = (size + PAGE_SIZE) & PAGE_MASK;
6340 buf = dma_alloc_coherent(qseecom.pdev,
6341 size, &coh_pmem, GFP_KERNEL);
6342 if (buf == NULL) {
6343 pr_err("failed to alloc memory for sg buf\n");
6344 return -ENOMEM;
6345 }
6346 *(uint32_t *)buf = sg_ptr->nents;
6347 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6348 for (i = 0; i < sg_ptr->nents; i++) {
6349 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6350 sg_entry->len = sg->length;
6351 sg_entry++;
6352 sg = sg_next(sg);
6353 }
6354 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6355 data->client.sec_buf_fd[fd_idx].vbase = buf;
6356 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6357 data->client.sec_buf_fd[fd_idx].size = size;
6358 return 0;
6359}
6360
6361static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6362 struct qseecom_dev_handle *data, bool cleanup)
6363{
6364 struct ion_handle *ihandle;
6365 int ret = 0;
6366 int i = 0;
6367 uint32_t *update;
6368 struct sg_table *sg_ptr = NULL;
6369 struct scatterlist *sg;
6370 struct qseecom_param_memref *memref;
6371
6372 if (req == NULL) {
6373 pr_err("Invalid address\n");
6374 return -EINVAL;
6375 }
6376 for (i = 0; i < MAX_ION_FD; i++) {
6377 if (req->ifd_data[i].fd > 0) {
6378 ihandle = ion_import_dma_buf(qseecom.ion_clnt,
6379 req->ifd_data[i].fd);
6380 if (IS_ERR_OR_NULL(ihandle)) {
6381 pr_err("Ion client can't retrieve the handle\n");
6382 return -ENOMEM;
6383 }
6384 if ((req->req_len < sizeof(uint32_t)) ||
6385 (req->ifd_data[i].cmd_buf_offset >
6386 req->req_len - sizeof(uint32_t))) {
6387 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6388 req->req_len,
6389 req->ifd_data[i].cmd_buf_offset);
6390 return -EINVAL;
6391 }
6392 update = (uint32_t *)((char *) req->req_ptr +
6393 req->ifd_data[i].cmd_buf_offset);
6394 if (!update) {
6395 pr_err("update pointer is NULL\n");
6396 return -EINVAL;
6397 }
6398 } else {
6399 continue;
6400 }
6401 /* Populate the cmd data structure with the phys_addr */
6402 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6403 if (IS_ERR_OR_NULL(sg_ptr)) {
6404 pr_err("IOn client could not retrieve sg table\n");
6405 goto err;
6406 }
6407 sg = sg_ptr->sgl;
6408 if (sg == NULL) {
6409 pr_err("sg is NULL\n");
6410 goto err;
6411 }
6412 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6413 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6414 sg_ptr->nents, sg->length);
6415 goto err;
6416 }
6417 /* clean up buf for pre-allocated fd */
6418 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6419 (*update)) {
6420 if (data->client.sec_buf_fd[i].vbase)
6421 dma_free_coherent(qseecom.pdev,
6422 data->client.sec_buf_fd[i].size,
6423 data->client.sec_buf_fd[i].vbase,
6424 data->client.sec_buf_fd[i].pbase);
6425 memset((void *)update, 0,
6426 sizeof(struct qseecom_param_memref));
6427 memset(&(data->client.sec_buf_fd[i]), 0,
6428 sizeof(struct qseecom_sec_buf_fd_info));
6429 goto clean;
6430 }
6431
6432 if (*update == 0) {
6433 /* update buf for pre-allocated fd from secure heap*/
6434 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6435 sg_ptr);
6436 if (ret) {
6437 pr_err("Failed to handle buf for fd[%d]\n", i);
6438 goto err;
6439 }
6440 memref = (struct qseecom_param_memref *)update;
6441 memref->buffer =
6442 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6443 memref->size =
6444 (uint32_t)(data->client.sec_buf_fd[i].size);
6445 } else {
6446 /* update buf for fd from non-secure qseecom heap */
6447 if (sg_ptr->nents != 1) {
6448 pr_err("Num of scat entr (%d) invalid\n",
6449 sg_ptr->nents);
6450 goto err;
6451 }
6452 if (cleanup)
6453 *update = 0;
6454 else
6455 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6456 }
6457clean:
6458 if (cleanup) {
6459 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6460 ihandle, NULL, sg->length,
6461 ION_IOC_INV_CACHES);
6462 if (ret) {
6463 pr_err("cache operation failed %d\n", ret);
6464 goto err;
6465 }
6466 } else {
6467 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6468 ihandle, NULL, sg->length,
6469 ION_IOC_CLEAN_INV_CACHES);
6470 if (ret) {
6471 pr_err("cache operation failed %d\n", ret);
6472 goto err;
6473 }
6474 data->sglistinfo_ptr[i].indexAndFlags =
6475 SGLISTINFO_SET_INDEX_FLAG(
6476 (sg_ptr->nents == 1), 0,
6477 req->ifd_data[i].cmd_buf_offset);
6478 data->sglistinfo_ptr[i].sizeOrCount =
6479 (sg_ptr->nents == 1) ?
6480 sg->length : sg_ptr->nents;
6481 data->sglist_cnt = i + 1;
6482 }
6483 /* Deallocate the handle */
6484 if (!IS_ERR_OR_NULL(ihandle))
6485 ion_free(qseecom.ion_clnt, ihandle);
6486 }
6487 return ret;
6488err:
6489 if (!IS_ERR_OR_NULL(ihandle))
6490 ion_free(qseecom.ion_clnt, ihandle);
6491 return -ENOMEM;
6492}
6493
6494static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6495 struct qseecom_qteec_req *req, uint32_t cmd_id)
6496{
6497 struct qseecom_command_scm_resp resp;
6498 struct qseecom_qteec_ireq ireq;
6499 struct qseecom_qteec_64bit_ireq ireq_64bit;
6500 struct qseecom_registered_app_list *ptr_app;
6501 bool found_app = false;
6502 unsigned long flags;
6503 int ret = 0;
6504 uint32_t reqd_len_sb_in = 0;
6505 void *cmd_buf = NULL;
6506 size_t cmd_len;
6507 struct sglist_info *table = data->sglistinfo_ptr;
6508
6509 ret = __qseecom_qteec_validate_msg(data, req);
6510 if (ret)
6511 return ret;
6512
6513 /* find app_id & img_name from list */
6514 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6515 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6516 list) {
6517 if ((ptr_app->app_id == data->client.app_id) &&
6518 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6519 found_app = true;
6520 break;
6521 }
6522 }
6523 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6524 if (!found_app) {
6525 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6526 (char *)data->client.app_name);
6527 return -ENOENT;
6528 }
6529
6530 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6531 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6532 ret = __qseecom_update_qteec_req_buf(
6533 (struct qseecom_qteec_modfd_req *)req, data, false);
6534 if (ret)
6535 return ret;
6536 }
6537
6538 if (qseecom.qsee_version < QSEE_VERSION_40) {
6539 ireq.app_id = data->client.app_id;
6540 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6541 (uintptr_t)req->req_ptr);
6542 ireq.req_len = req->req_len;
6543 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6544 (uintptr_t)req->resp_ptr);
6545 ireq.resp_len = req->resp_len;
6546 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6547 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6548 dmac_flush_range((void *)table,
6549 (void *)table + SGLISTINFO_TABLE_SIZE);
6550 cmd_buf = (void *)&ireq;
6551 cmd_len = sizeof(struct qseecom_qteec_ireq);
6552 } else {
6553 ireq_64bit.app_id = data->client.app_id;
6554 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6555 (uintptr_t)req->req_ptr);
6556 ireq_64bit.req_len = req->req_len;
6557 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6558 (uintptr_t)req->resp_ptr);
6559 ireq_64bit.resp_len = req->resp_len;
6560 if ((data->client.app_arch == ELFCLASS32) &&
6561 ((ireq_64bit.req_ptr >=
6562 PHY_ADDR_4G - ireq_64bit.req_len) ||
6563 (ireq_64bit.resp_ptr >=
6564 PHY_ADDR_4G - ireq_64bit.resp_len))){
6565 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6566 data->client.app_name, data->client.app_id);
6567 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6568 ireq_64bit.req_ptr, ireq_64bit.req_len,
6569 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6570 return -EFAULT;
6571 }
6572 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6573 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6574 dmac_flush_range((void *)table,
6575 (void *)table + SGLISTINFO_TABLE_SIZE);
6576 cmd_buf = (void *)&ireq_64bit;
6577 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6578 }
6579 if (qseecom.whitelist_support == true
6580 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6581 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6582 else
6583 *(uint32_t *)cmd_buf = cmd_id;
6584
6585 reqd_len_sb_in = req->req_len + req->resp_len;
6586 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6587 data->client.sb_virt,
6588 reqd_len_sb_in,
6589 ION_IOC_CLEAN_INV_CACHES);
6590 if (ret) {
6591 pr_err("cache operation failed %d\n", ret);
6592 return ret;
6593 }
6594
6595 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6596
6597 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6598 cmd_buf, cmd_len,
6599 &resp, sizeof(resp));
6600 if (ret) {
6601 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6602 ret, data->client.app_id);
6603 return ret;
6604 }
6605
6606 if (qseecom.qsee_reentrancy_support) {
6607 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6608 } else {
6609 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6610 ret = __qseecom_process_incomplete_cmd(data, &resp);
6611 if (ret) {
6612 pr_err("process_incomplete_cmd failed err: %d\n",
6613 ret);
6614 return ret;
6615 }
6616 } else {
6617 if (resp.result != QSEOS_RESULT_SUCCESS) {
6618 pr_err("Response result %d not supported\n",
6619 resp.result);
6620 ret = -EINVAL;
6621 }
6622 }
6623 }
6624 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6625 data->client.sb_virt, data->client.sb_length,
6626 ION_IOC_INV_CACHES);
6627 if (ret) {
6628 pr_err("cache operation failed %d\n", ret);
6629 return ret;
6630 }
6631
6632 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6633 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6634 ret = __qseecom_update_qteec_req_buf(
6635 (struct qseecom_qteec_modfd_req *)req, data, true);
6636 if (ret)
6637 return ret;
6638 }
6639 return 0;
6640}
6641
6642static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6643 void __user *argp)
6644{
6645 struct qseecom_qteec_modfd_req req;
6646 int ret = 0;
6647
6648 ret = copy_from_user(&req, argp,
6649 sizeof(struct qseecom_qteec_modfd_req));
6650 if (ret) {
6651 pr_err("copy_from_user failed\n");
6652 return ret;
6653 }
6654 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6655 QSEOS_TEE_OPEN_SESSION);
6656
6657 return ret;
6658}
6659
6660static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6661 void __user *argp)
6662{
6663 struct qseecom_qteec_req req;
6664 int ret = 0;
6665
6666 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6667 if (ret) {
6668 pr_err("copy_from_user failed\n");
6669 return ret;
6670 }
6671 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6672 return ret;
6673}
6674
6675static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6676 void __user *argp)
6677{
6678 struct qseecom_qteec_modfd_req req;
6679 struct qseecom_command_scm_resp resp;
6680 struct qseecom_qteec_ireq ireq;
6681 struct qseecom_qteec_64bit_ireq ireq_64bit;
6682 struct qseecom_registered_app_list *ptr_app;
6683 bool found_app = false;
6684 unsigned long flags;
6685 int ret = 0;
6686 int i = 0;
6687 uint32_t reqd_len_sb_in = 0;
6688 void *cmd_buf = NULL;
6689 size_t cmd_len;
6690 struct sglist_info *table = data->sglistinfo_ptr;
6691 void *req_ptr = NULL;
6692 void *resp_ptr = NULL;
6693
6694 ret = copy_from_user(&req, argp,
6695 sizeof(struct qseecom_qteec_modfd_req));
6696 if (ret) {
6697 pr_err("copy_from_user failed\n");
6698 return ret;
6699 }
6700 ret = __qseecom_qteec_validate_msg(data,
6701 (struct qseecom_qteec_req *)(&req));
6702 if (ret)
6703 return ret;
6704 req_ptr = req.req_ptr;
6705 resp_ptr = req.resp_ptr;
6706
6707 /* find app_id & img_name from list */
6708 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6709 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6710 list) {
6711 if ((ptr_app->app_id == data->client.app_id) &&
6712 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6713 found_app = true;
6714 break;
6715 }
6716 }
6717 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6718 if (!found_app) {
6719 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6720 (char *)data->client.app_name);
6721 return -ENOENT;
6722 }
6723
6724 /* validate offsets */
6725 for (i = 0; i < MAX_ION_FD; i++) {
6726 if (req.ifd_data[i].fd) {
6727 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6728 return -EINVAL;
6729 }
6730 }
6731 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6732 (uintptr_t)req.req_ptr);
6733 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6734 (uintptr_t)req.resp_ptr);
6735 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6736 if (ret)
6737 return ret;
6738
6739 if (qseecom.qsee_version < QSEE_VERSION_40) {
6740 ireq.app_id = data->client.app_id;
6741 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6742 (uintptr_t)req_ptr);
6743 ireq.req_len = req.req_len;
6744 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6745 (uintptr_t)resp_ptr);
6746 ireq.resp_len = req.resp_len;
6747 cmd_buf = (void *)&ireq;
6748 cmd_len = sizeof(struct qseecom_qteec_ireq);
6749 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6750 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6751 dmac_flush_range((void *)table,
6752 (void *)table + SGLISTINFO_TABLE_SIZE);
6753 } else {
6754 ireq_64bit.app_id = data->client.app_id;
6755 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6756 (uintptr_t)req_ptr);
6757 ireq_64bit.req_len = req.req_len;
6758 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6759 (uintptr_t)resp_ptr);
6760 ireq_64bit.resp_len = req.resp_len;
6761 cmd_buf = (void *)&ireq_64bit;
6762 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6763 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6764 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6765 dmac_flush_range((void *)table,
6766 (void *)table + SGLISTINFO_TABLE_SIZE);
6767 }
6768 reqd_len_sb_in = req.req_len + req.resp_len;
6769 if (qseecom.whitelist_support == true)
6770 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6771 else
6772 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6773
6774 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6775 data->client.sb_virt,
6776 reqd_len_sb_in,
6777 ION_IOC_CLEAN_INV_CACHES);
6778 if (ret) {
6779 pr_err("cache operation failed %d\n", ret);
6780 return ret;
6781 }
6782
6783 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6784
6785 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6786 cmd_buf, cmd_len,
6787 &resp, sizeof(resp));
6788 if (ret) {
6789 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6790 ret, data->client.app_id);
6791 return ret;
6792 }
6793
6794 if (qseecom.qsee_reentrancy_support) {
6795 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6796 } else {
6797 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6798 ret = __qseecom_process_incomplete_cmd(data, &resp);
6799 if (ret) {
6800 pr_err("process_incomplete_cmd failed err: %d\n",
6801 ret);
6802 return ret;
6803 }
6804 } else {
6805 if (resp.result != QSEOS_RESULT_SUCCESS) {
6806 pr_err("Response result %d not supported\n",
6807 resp.result);
6808 ret = -EINVAL;
6809 }
6810 }
6811 }
6812 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6813 if (ret)
6814 return ret;
6815
6816 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6817 data->client.sb_virt, data->client.sb_length,
6818 ION_IOC_INV_CACHES);
6819 if (ret) {
6820 pr_err("cache operation failed %d\n", ret);
6821 return ret;
6822 }
6823 return 0;
6824}
6825
6826static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6827 void __user *argp)
6828{
6829 struct qseecom_qteec_modfd_req req;
6830 int ret = 0;
6831
6832 ret = copy_from_user(&req, argp,
6833 sizeof(struct qseecom_qteec_modfd_req));
6834 if (ret) {
6835 pr_err("copy_from_user failed\n");
6836 return ret;
6837 }
6838 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6839 QSEOS_TEE_REQUEST_CANCELLATION);
6840
6841 return ret;
6842}
6843
6844static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6845{
6846 if (data->sglist_cnt) {
6847 memset(data->sglistinfo_ptr, 0,
6848 SGLISTINFO_TABLE_SIZE);
6849 data->sglist_cnt = 0;
6850 }
6851}
6852
6853static inline long qseecom_ioctl(struct file *file,
6854 unsigned int cmd, unsigned long arg)
6855{
6856 int ret = 0;
6857 struct qseecom_dev_handle *data = file->private_data;
6858 void __user *argp = (void __user *) arg;
6859 bool perf_enabled = false;
6860
6861 if (!data) {
6862 pr_err("Invalid/uninitialized device handle\n");
6863 return -EINVAL;
6864 }
6865
6866 if (data->abort) {
6867 pr_err("Aborting qseecom driver\n");
6868 return -ENODEV;
6869 }
6870
6871 switch (cmd) {
6872 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6873 if (data->type != QSEECOM_GENERIC) {
6874 pr_err("reg lstnr req: invalid handle (%d)\n",
6875 data->type);
6876 ret = -EINVAL;
6877 break;
6878 }
6879 pr_debug("ioctl register_listener_req()\n");
6880 mutex_lock(&app_access_lock);
6881 atomic_inc(&data->ioctl_count);
6882 data->type = QSEECOM_LISTENER_SERVICE;
6883 ret = qseecom_register_listener(data, argp);
6884 atomic_dec(&data->ioctl_count);
6885 wake_up_all(&data->abort_wq);
6886 mutex_unlock(&app_access_lock);
6887 if (ret)
6888 pr_err("failed qseecom_register_listener: %d\n", ret);
6889 break;
6890 }
6891 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6892 if ((data->listener.id == 0) ||
6893 (data->type != QSEECOM_LISTENER_SERVICE)) {
6894 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6895 data->type, data->listener.id);
6896 ret = -EINVAL;
6897 break;
6898 }
6899 pr_debug("ioctl unregister_listener_req()\n");
6900 mutex_lock(&app_access_lock);
6901 atomic_inc(&data->ioctl_count);
6902 ret = qseecom_unregister_listener(data);
6903 atomic_dec(&data->ioctl_count);
6904 wake_up_all(&data->abort_wq);
6905 mutex_unlock(&app_access_lock);
6906 if (ret)
6907 pr_err("failed qseecom_unregister_listener: %d\n", ret);
6908 break;
6909 }
6910 case QSEECOM_IOCTL_SEND_CMD_REQ: {
6911 if ((data->client.app_id == 0) ||
6912 (data->type != QSEECOM_CLIENT_APP)) {
6913 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
6914 data->type, data->client.app_id);
6915 ret = -EINVAL;
6916 break;
6917 }
6918 /* Only one client allowed here at a time */
6919 mutex_lock(&app_access_lock);
6920 if (qseecom.support_bus_scaling) {
6921 /* register bus bw in case the client doesn't do it */
6922 if (!data->mode) {
6923 mutex_lock(&qsee_bw_mutex);
6924 __qseecom_register_bus_bandwidth_needs(
6925 data, HIGH);
6926 mutex_unlock(&qsee_bw_mutex);
6927 }
6928 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
6929 if (ret) {
6930 pr_err("Failed to set bw.\n");
6931 ret = -EINVAL;
6932 mutex_unlock(&app_access_lock);
6933 break;
6934 }
6935 }
6936 /*
6937 * On targets where crypto clock is handled by HLOS,
6938 * if clk_access_cnt is zero and perf_enabled is false,
6939 * then the crypto clock was not enabled before sending cmd to
6940 * tz, qseecom will enable the clock to avoid service failure.
6941 */
6942 if (!qseecom.no_clock_support &&
6943 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
6944 pr_debug("ce clock is not enabled!\n");
6945 ret = qseecom_perf_enable(data);
6946 if (ret) {
6947 pr_err("Failed to vote for clock with err %d\n",
6948 ret);
6949 mutex_unlock(&app_access_lock);
6950 ret = -EINVAL;
6951 break;
6952 }
6953 perf_enabled = true;
6954 }
6955 atomic_inc(&data->ioctl_count);
6956 ret = qseecom_send_cmd(data, argp);
6957 if (qseecom.support_bus_scaling)
6958 __qseecom_add_bw_scale_down_timer(
6959 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
6960 if (perf_enabled) {
6961 qsee_disable_clock_vote(data, CLK_DFAB);
6962 qsee_disable_clock_vote(data, CLK_SFPB);
6963 }
6964 atomic_dec(&data->ioctl_count);
6965 wake_up_all(&data->abort_wq);
6966 mutex_unlock(&app_access_lock);
6967 if (ret)
6968 pr_err("failed qseecom_send_cmd: %d\n", ret);
6969 break;
6970 }
6971 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
6972 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
6973 if ((data->client.app_id == 0) ||
6974 (data->type != QSEECOM_CLIENT_APP)) {
6975 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
6976 data->type, data->client.app_id);
6977 ret = -EINVAL;
6978 break;
6979 }
6980 /* Only one client allowed here at a time */
6981 mutex_lock(&app_access_lock);
6982 if (qseecom.support_bus_scaling) {
6983 if (!data->mode) {
6984 mutex_lock(&qsee_bw_mutex);
6985 __qseecom_register_bus_bandwidth_needs(
6986 data, HIGH);
6987 mutex_unlock(&qsee_bw_mutex);
6988 }
6989 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
6990 if (ret) {
6991 pr_err("Failed to set bw.\n");
6992 mutex_unlock(&app_access_lock);
6993 ret = -EINVAL;
6994 break;
6995 }
6996 }
6997 /*
6998 * On targets where crypto clock is handled by HLOS,
6999 * if clk_access_cnt is zero and perf_enabled is false,
7000 * then the crypto clock was not enabled before sending cmd to
7001 * tz, qseecom will enable the clock to avoid service failure.
7002 */
7003 if (!qseecom.no_clock_support &&
7004 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7005 pr_debug("ce clock is not enabled!\n");
7006 ret = qseecom_perf_enable(data);
7007 if (ret) {
7008 pr_err("Failed to vote for clock with err %d\n",
7009 ret);
7010 mutex_unlock(&app_access_lock);
7011 ret = -EINVAL;
7012 break;
7013 }
7014 perf_enabled = true;
7015 }
7016 atomic_inc(&data->ioctl_count);
7017 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7018 ret = qseecom_send_modfd_cmd(data, argp);
7019 else
7020 ret = qseecom_send_modfd_cmd_64(data, argp);
7021 if (qseecom.support_bus_scaling)
7022 __qseecom_add_bw_scale_down_timer(
7023 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7024 if (perf_enabled) {
7025 qsee_disable_clock_vote(data, CLK_DFAB);
7026 qsee_disable_clock_vote(data, CLK_SFPB);
7027 }
7028 atomic_dec(&data->ioctl_count);
7029 wake_up_all(&data->abort_wq);
7030 mutex_unlock(&app_access_lock);
7031 if (ret)
7032 pr_err("failed qseecom_send_cmd: %d\n", ret);
7033 __qseecom_clean_data_sglistinfo(data);
7034 break;
7035 }
7036 case QSEECOM_IOCTL_RECEIVE_REQ: {
7037 if ((data->listener.id == 0) ||
7038 (data->type != QSEECOM_LISTENER_SERVICE)) {
7039 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7040 data->type, data->listener.id);
7041 ret = -EINVAL;
7042 break;
7043 }
7044 atomic_inc(&data->ioctl_count);
7045 ret = qseecom_receive_req(data);
7046 atomic_dec(&data->ioctl_count);
7047 wake_up_all(&data->abort_wq);
7048 if (ret && (ret != -ERESTARTSYS))
7049 pr_err("failed qseecom_receive_req: %d\n", ret);
7050 break;
7051 }
7052 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7053 if ((data->listener.id == 0) ||
7054 (data->type != QSEECOM_LISTENER_SERVICE)) {
7055 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7056 data->type, data->listener.id);
7057 ret = -EINVAL;
7058 break;
7059 }
7060 atomic_inc(&data->ioctl_count);
7061 if (!qseecom.qsee_reentrancy_support)
7062 ret = qseecom_send_resp();
7063 else
7064 ret = qseecom_reentrancy_send_resp(data);
7065 atomic_dec(&data->ioctl_count);
7066 wake_up_all(&data->abort_wq);
7067 if (ret)
7068 pr_err("failed qseecom_send_resp: %d\n", ret);
7069 break;
7070 }
7071 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7072 if ((data->type != QSEECOM_CLIENT_APP) &&
7073 (data->type != QSEECOM_GENERIC) &&
7074 (data->type != QSEECOM_SECURE_SERVICE)) {
7075 pr_err("set mem param req: invalid handle (%d)\n",
7076 data->type);
7077 ret = -EINVAL;
7078 break;
7079 }
7080 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7081 mutex_lock(&app_access_lock);
7082 atomic_inc(&data->ioctl_count);
7083 ret = qseecom_set_client_mem_param(data, argp);
7084 atomic_dec(&data->ioctl_count);
7085 mutex_unlock(&app_access_lock);
7086 if (ret)
7087 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7088 ret);
7089 break;
7090 }
7091 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7092 if ((data->type != QSEECOM_GENERIC) &&
7093 (data->type != QSEECOM_CLIENT_APP)) {
7094 pr_err("load app req: invalid handle (%d)\n",
7095 data->type);
7096 ret = -EINVAL;
7097 break;
7098 }
7099 data->type = QSEECOM_CLIENT_APP;
7100 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7101 mutex_lock(&app_access_lock);
7102 atomic_inc(&data->ioctl_count);
7103 ret = qseecom_load_app(data, argp);
7104 atomic_dec(&data->ioctl_count);
7105 mutex_unlock(&app_access_lock);
7106 if (ret)
7107 pr_err("failed load_app request: %d\n", ret);
7108 break;
7109 }
7110 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7111 if ((data->client.app_id == 0) ||
7112 (data->type != QSEECOM_CLIENT_APP)) {
7113 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7114 data->type, data->client.app_id);
7115 ret = -EINVAL;
7116 break;
7117 }
7118 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7119 mutex_lock(&app_access_lock);
7120 atomic_inc(&data->ioctl_count);
7121 ret = qseecom_unload_app(data, false);
7122 atomic_dec(&data->ioctl_count);
7123 mutex_unlock(&app_access_lock);
7124 if (ret)
7125 pr_err("failed unload_app request: %d\n", ret);
7126 break;
7127 }
7128 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7129 atomic_inc(&data->ioctl_count);
7130 ret = qseecom_get_qseos_version(data, argp);
7131 if (ret)
7132 pr_err("qseecom_get_qseos_version: %d\n", ret);
7133 atomic_dec(&data->ioctl_count);
7134 break;
7135 }
7136 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7137 if ((data->type != QSEECOM_GENERIC) &&
7138 (data->type != QSEECOM_CLIENT_APP)) {
7139 pr_err("perf enable req: invalid handle (%d)\n",
7140 data->type);
7141 ret = -EINVAL;
7142 break;
7143 }
7144 if ((data->type == QSEECOM_CLIENT_APP) &&
7145 (data->client.app_id == 0)) {
7146 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7147 data->type, data->client.app_id);
7148 ret = -EINVAL;
7149 break;
7150 }
7151 atomic_inc(&data->ioctl_count);
7152 if (qseecom.support_bus_scaling) {
7153 mutex_lock(&qsee_bw_mutex);
7154 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7155 mutex_unlock(&qsee_bw_mutex);
7156 } else {
7157 ret = qseecom_perf_enable(data);
7158 if (ret)
7159 pr_err("Fail to vote for clocks %d\n", ret);
7160 }
7161 atomic_dec(&data->ioctl_count);
7162 break;
7163 }
7164 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7165 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7166 (data->type != QSEECOM_CLIENT_APP)) {
7167 pr_err("perf disable req: invalid handle (%d)\n",
7168 data->type);
7169 ret = -EINVAL;
7170 break;
7171 }
7172 if ((data->type == QSEECOM_CLIENT_APP) &&
7173 (data->client.app_id == 0)) {
7174 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7175 data->type, data->client.app_id);
7176 ret = -EINVAL;
7177 break;
7178 }
7179 atomic_inc(&data->ioctl_count);
7180 if (!qseecom.support_bus_scaling) {
7181 qsee_disable_clock_vote(data, CLK_DFAB);
7182 qsee_disable_clock_vote(data, CLK_SFPB);
7183 } else {
7184 mutex_lock(&qsee_bw_mutex);
7185 qseecom_unregister_bus_bandwidth_needs(data);
7186 mutex_unlock(&qsee_bw_mutex);
7187 }
7188 atomic_dec(&data->ioctl_count);
7189 break;
7190 }
7191
7192 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7193 /* If crypto clock is not handled by HLOS, return directly. */
7194 if (qseecom.no_clock_support) {
7195 pr_debug("crypto clock is not handled by HLOS\n");
7196 break;
7197 }
7198 if ((data->client.app_id == 0) ||
7199 (data->type != QSEECOM_CLIENT_APP)) {
7200 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7201 data->type, data->client.app_id);
7202 ret = -EINVAL;
7203 break;
7204 }
7205 atomic_inc(&data->ioctl_count);
7206 ret = qseecom_scale_bus_bandwidth(data, argp);
7207 atomic_dec(&data->ioctl_count);
7208 break;
7209 }
7210 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7211 if (data->type != QSEECOM_GENERIC) {
7212 pr_err("load ext elf req: invalid client handle (%d)\n",
7213 data->type);
7214 ret = -EINVAL;
7215 break;
7216 }
7217 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7218 data->released = true;
7219 mutex_lock(&app_access_lock);
7220 atomic_inc(&data->ioctl_count);
7221 ret = qseecom_load_external_elf(data, argp);
7222 atomic_dec(&data->ioctl_count);
7223 mutex_unlock(&app_access_lock);
7224 if (ret)
7225 pr_err("failed load_external_elf request: %d\n", ret);
7226 break;
7227 }
7228 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7229 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7230 pr_err("unload ext elf req: invalid handle (%d)\n",
7231 data->type);
7232 ret = -EINVAL;
7233 break;
7234 }
7235 data->released = true;
7236 mutex_lock(&app_access_lock);
7237 atomic_inc(&data->ioctl_count);
7238 ret = qseecom_unload_external_elf(data);
7239 atomic_dec(&data->ioctl_count);
7240 mutex_unlock(&app_access_lock);
7241 if (ret)
7242 pr_err("failed unload_app request: %d\n", ret);
7243 break;
7244 }
7245 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7246 data->type = QSEECOM_CLIENT_APP;
7247 mutex_lock(&app_access_lock);
7248 atomic_inc(&data->ioctl_count);
7249 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7250 ret = qseecom_query_app_loaded(data, argp);
7251 atomic_dec(&data->ioctl_count);
7252 mutex_unlock(&app_access_lock);
7253 break;
7254 }
7255 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7256 if (data->type != QSEECOM_GENERIC) {
7257 pr_err("send cmd svc req: invalid handle (%d)\n",
7258 data->type);
7259 ret = -EINVAL;
7260 break;
7261 }
7262 data->type = QSEECOM_SECURE_SERVICE;
7263 if (qseecom.qsee_version < QSEE_VERSION_03) {
7264 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7265 qseecom.qsee_version);
7266 return -EINVAL;
7267 }
7268 mutex_lock(&app_access_lock);
7269 atomic_inc(&data->ioctl_count);
7270 ret = qseecom_send_service_cmd(data, argp);
7271 atomic_dec(&data->ioctl_count);
7272 mutex_unlock(&app_access_lock);
7273 break;
7274 }
7275 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7276 if (!(qseecom.support_pfe || qseecom.support_fde))
7277 pr_err("Features requiring key init not supported\n");
7278 if (data->type != QSEECOM_GENERIC) {
7279 pr_err("create key req: invalid handle (%d)\n",
7280 data->type);
7281 ret = -EINVAL;
7282 break;
7283 }
7284 if (qseecom.qsee_version < QSEE_VERSION_05) {
7285 pr_err("Create Key feature unsupported: qsee ver %u\n",
7286 qseecom.qsee_version);
7287 return -EINVAL;
7288 }
7289 data->released = true;
7290 mutex_lock(&app_access_lock);
7291 atomic_inc(&data->ioctl_count);
7292 ret = qseecom_create_key(data, argp);
7293 if (ret)
7294 pr_err("failed to create encryption key: %d\n", ret);
7295
7296 atomic_dec(&data->ioctl_count);
7297 mutex_unlock(&app_access_lock);
7298 break;
7299 }
7300 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7301 if (!(qseecom.support_pfe || qseecom.support_fde))
7302 pr_err("Features requiring key init not supported\n");
7303 if (data->type != QSEECOM_GENERIC) {
7304 pr_err("wipe key req: invalid handle (%d)\n",
7305 data->type);
7306 ret = -EINVAL;
7307 break;
7308 }
7309 if (qseecom.qsee_version < QSEE_VERSION_05) {
7310 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7311 qseecom.qsee_version);
7312 return -EINVAL;
7313 }
7314 data->released = true;
7315 mutex_lock(&app_access_lock);
7316 atomic_inc(&data->ioctl_count);
7317 ret = qseecom_wipe_key(data, argp);
7318 if (ret)
7319 pr_err("failed to wipe encryption key: %d\n", ret);
7320 atomic_dec(&data->ioctl_count);
7321 mutex_unlock(&app_access_lock);
7322 break;
7323 }
7324 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7325 if (!(qseecom.support_pfe || qseecom.support_fde))
7326 pr_err("Features requiring key init not supported\n");
7327 if (data->type != QSEECOM_GENERIC) {
7328 pr_err("update key req: invalid handle (%d)\n",
7329 data->type);
7330 ret = -EINVAL;
7331 break;
7332 }
7333 if (qseecom.qsee_version < QSEE_VERSION_05) {
7334 pr_err("Update Key feature unsupported in qsee ver %u\n",
7335 qseecom.qsee_version);
7336 return -EINVAL;
7337 }
7338 data->released = true;
7339 mutex_lock(&app_access_lock);
7340 atomic_inc(&data->ioctl_count);
7341 ret = qseecom_update_key_user_info(data, argp);
7342 if (ret)
7343 pr_err("failed to update key user info: %d\n", ret);
7344 atomic_dec(&data->ioctl_count);
7345 mutex_unlock(&app_access_lock);
7346 break;
7347 }
7348 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7349 if (data->type != QSEECOM_GENERIC) {
7350 pr_err("save part hash req: invalid handle (%d)\n",
7351 data->type);
7352 ret = -EINVAL;
7353 break;
7354 }
7355 data->released = true;
7356 mutex_lock(&app_access_lock);
7357 atomic_inc(&data->ioctl_count);
7358 ret = qseecom_save_partition_hash(argp);
7359 atomic_dec(&data->ioctl_count);
7360 mutex_unlock(&app_access_lock);
7361 break;
7362 }
7363 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7364 if (data->type != QSEECOM_GENERIC) {
7365 pr_err("ES activated req: invalid handle (%d)\n",
7366 data->type);
7367 ret = -EINVAL;
7368 break;
7369 }
7370 data->released = true;
7371 mutex_lock(&app_access_lock);
7372 atomic_inc(&data->ioctl_count);
7373 ret = qseecom_is_es_activated(argp);
7374 atomic_dec(&data->ioctl_count);
7375 mutex_unlock(&app_access_lock);
7376 break;
7377 }
7378 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7379 if (data->type != QSEECOM_GENERIC) {
7380 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7381 data->type);
7382 ret = -EINVAL;
7383 break;
7384 }
7385 data->released = true;
7386 mutex_lock(&app_access_lock);
7387 atomic_inc(&data->ioctl_count);
7388 ret = qseecom_mdtp_cipher_dip(argp);
7389 atomic_dec(&data->ioctl_count);
7390 mutex_unlock(&app_access_lock);
7391 break;
7392 }
7393 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7394 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7395 if ((data->listener.id == 0) ||
7396 (data->type != QSEECOM_LISTENER_SERVICE)) {
7397 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7398 data->type, data->listener.id);
7399 ret = -EINVAL;
7400 break;
7401 }
7402 atomic_inc(&data->ioctl_count);
7403 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7404 ret = qseecom_send_modfd_resp(data, argp);
7405 else
7406 ret = qseecom_send_modfd_resp_64(data, argp);
7407 atomic_dec(&data->ioctl_count);
7408 wake_up_all(&data->abort_wq);
7409 if (ret)
7410 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7411 __qseecom_clean_data_sglistinfo(data);
7412 break;
7413 }
7414 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7415 if ((data->client.app_id == 0) ||
7416 (data->type != QSEECOM_CLIENT_APP)) {
7417 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7418 data->type, data->client.app_id);
7419 ret = -EINVAL;
7420 break;
7421 }
7422 if (qseecom.qsee_version < QSEE_VERSION_40) {
7423 pr_err("GP feature unsupported: qsee ver %u\n",
7424 qseecom.qsee_version);
7425 return -EINVAL;
7426 }
7427 /* Only one client allowed here at a time */
7428 mutex_lock(&app_access_lock);
7429 atomic_inc(&data->ioctl_count);
7430 ret = qseecom_qteec_open_session(data, argp);
7431 atomic_dec(&data->ioctl_count);
7432 wake_up_all(&data->abort_wq);
7433 mutex_unlock(&app_access_lock);
7434 if (ret)
7435 pr_err("failed open_session_cmd: %d\n", ret);
7436 __qseecom_clean_data_sglistinfo(data);
7437 break;
7438 }
7439 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7440 if ((data->client.app_id == 0) ||
7441 (data->type != QSEECOM_CLIENT_APP)) {
7442 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7443 data->type, data->client.app_id);
7444 ret = -EINVAL;
7445 break;
7446 }
7447 if (qseecom.qsee_version < QSEE_VERSION_40) {
7448 pr_err("GP feature unsupported: qsee ver %u\n",
7449 qseecom.qsee_version);
7450 return -EINVAL;
7451 }
7452 /* Only one client allowed here at a time */
7453 mutex_lock(&app_access_lock);
7454 atomic_inc(&data->ioctl_count);
7455 ret = qseecom_qteec_close_session(data, argp);
7456 atomic_dec(&data->ioctl_count);
7457 wake_up_all(&data->abort_wq);
7458 mutex_unlock(&app_access_lock);
7459 if (ret)
7460 pr_err("failed close_session_cmd: %d\n", ret);
7461 break;
7462 }
7463 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7464 if ((data->client.app_id == 0) ||
7465 (data->type != QSEECOM_CLIENT_APP)) {
7466 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7467 data->type, data->client.app_id);
7468 ret = -EINVAL;
7469 break;
7470 }
7471 if (qseecom.qsee_version < QSEE_VERSION_40) {
7472 pr_err("GP feature unsupported: qsee ver %u\n",
7473 qseecom.qsee_version);
7474 return -EINVAL;
7475 }
7476 /* Only one client allowed here at a time */
7477 mutex_lock(&app_access_lock);
7478 atomic_inc(&data->ioctl_count);
7479 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7480 atomic_dec(&data->ioctl_count);
7481 wake_up_all(&data->abort_wq);
7482 mutex_unlock(&app_access_lock);
7483 if (ret)
7484 pr_err("failed Invoke cmd: %d\n", ret);
7485 __qseecom_clean_data_sglistinfo(data);
7486 break;
7487 }
7488 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7489 if ((data->client.app_id == 0) ||
7490 (data->type != QSEECOM_CLIENT_APP)) {
7491 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7492 data->type, data->client.app_id);
7493 ret = -EINVAL;
7494 break;
7495 }
7496 if (qseecom.qsee_version < QSEE_VERSION_40) {
7497 pr_err("GP feature unsupported: qsee ver %u\n",
7498 qseecom.qsee_version);
7499 return -EINVAL;
7500 }
7501 /* Only one client allowed here at a time */
7502 mutex_lock(&app_access_lock);
7503 atomic_inc(&data->ioctl_count);
7504 ret = qseecom_qteec_request_cancellation(data, argp);
7505 atomic_dec(&data->ioctl_count);
7506 wake_up_all(&data->abort_wq);
7507 mutex_unlock(&app_access_lock);
7508 if (ret)
7509 pr_err("failed request_cancellation: %d\n", ret);
7510 break;
7511 }
7512 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7513 atomic_inc(&data->ioctl_count);
7514 ret = qseecom_get_ce_info(data, argp);
7515 if (ret)
7516 pr_err("failed get fde ce pipe info: %d\n", ret);
7517 atomic_dec(&data->ioctl_count);
7518 break;
7519 }
7520 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7521 atomic_inc(&data->ioctl_count);
7522 ret = qseecom_free_ce_info(data, argp);
7523 if (ret)
7524 pr_err("failed get fde ce pipe info: %d\n", ret);
7525 atomic_dec(&data->ioctl_count);
7526 break;
7527 }
7528 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7529 atomic_inc(&data->ioctl_count);
7530 ret = qseecom_query_ce_info(data, argp);
7531 if (ret)
7532 pr_err("failed get fde ce pipe info: %d\n", ret);
7533 atomic_dec(&data->ioctl_count);
7534 break;
7535 }
7536 default:
7537 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7538 return -EINVAL;
7539 }
7540 return ret;
7541}
7542
7543static int qseecom_open(struct inode *inode, struct file *file)
7544{
7545 int ret = 0;
7546 struct qseecom_dev_handle *data;
7547
7548 data = kzalloc(sizeof(*data), GFP_KERNEL);
7549 if (!data)
7550 return -ENOMEM;
7551 file->private_data = data;
7552 data->abort = 0;
7553 data->type = QSEECOM_GENERIC;
7554 data->released = false;
7555 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7556 data->mode = INACTIVE;
7557 init_waitqueue_head(&data->abort_wq);
7558 atomic_set(&data->ioctl_count, 0);
7559 return ret;
7560}
7561
7562static int qseecom_release(struct inode *inode, struct file *file)
7563{
7564 struct qseecom_dev_handle *data = file->private_data;
7565 int ret = 0;
7566
7567 if (data->released == false) {
7568 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7569 data->type, data->mode, data);
7570 switch (data->type) {
7571 case QSEECOM_LISTENER_SERVICE:
7572 mutex_lock(&app_access_lock);
7573 ret = qseecom_unregister_listener(data);
7574 mutex_unlock(&app_access_lock);
7575 break;
7576 case QSEECOM_CLIENT_APP:
7577 mutex_lock(&app_access_lock);
7578 ret = qseecom_unload_app(data, true);
7579 mutex_unlock(&app_access_lock);
7580 break;
7581 case QSEECOM_SECURE_SERVICE:
7582 case QSEECOM_GENERIC:
7583 ret = qseecom_unmap_ion_allocated_memory(data);
7584 if (ret)
7585 pr_err("Ion Unmap failed\n");
7586 break;
7587 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7588 break;
7589 default:
7590 pr_err("Unsupported clnt_handle_type %d",
7591 data->type);
7592 break;
7593 }
7594 }
7595
7596 if (qseecom.support_bus_scaling) {
7597 mutex_lock(&qsee_bw_mutex);
7598 if (data->mode != INACTIVE) {
7599 qseecom_unregister_bus_bandwidth_needs(data);
7600 if (qseecom.cumulative_mode == INACTIVE) {
7601 ret = __qseecom_set_msm_bus_request(INACTIVE);
7602 if (ret)
7603 pr_err("Fail to scale down bus\n");
7604 }
7605 }
7606 mutex_unlock(&qsee_bw_mutex);
7607 } else {
7608 if (data->fast_load_enabled == true)
7609 qsee_disable_clock_vote(data, CLK_SFPB);
7610 if (data->perf_enabled == true)
7611 qsee_disable_clock_vote(data, CLK_DFAB);
7612 }
7613 kfree(data);
7614
7615 return ret;
7616}
7617
7618#ifdef CONFIG_COMPAT
7619#include "compat_qseecom.c"
7620#else
7621#define compat_qseecom_ioctl NULL
7622#endif
7623
7624static const struct file_operations qseecom_fops = {
7625 .owner = THIS_MODULE,
7626 .unlocked_ioctl = qseecom_ioctl,
7627 .compat_ioctl = compat_qseecom_ioctl,
7628 .open = qseecom_open,
7629 .release = qseecom_release
7630};
7631
7632static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7633{
7634 int rc = 0;
7635 struct device *pdev;
7636 struct qseecom_clk *qclk;
7637 char *core_clk_src = NULL;
7638 char *core_clk = NULL;
7639 char *iface_clk = NULL;
7640 char *bus_clk = NULL;
7641
7642 switch (ce) {
7643 case CLK_QSEE: {
7644 core_clk_src = "core_clk_src";
7645 core_clk = "core_clk";
7646 iface_clk = "iface_clk";
7647 bus_clk = "bus_clk";
7648 qclk = &qseecom.qsee;
7649 qclk->instance = CLK_QSEE;
7650 break;
7651 };
7652 case CLK_CE_DRV: {
7653 core_clk_src = "ce_drv_core_clk_src";
7654 core_clk = "ce_drv_core_clk";
7655 iface_clk = "ce_drv_iface_clk";
7656 bus_clk = "ce_drv_bus_clk";
7657 qclk = &qseecom.ce_drv;
7658 qclk->instance = CLK_CE_DRV;
7659 break;
7660 };
7661 default:
7662 pr_err("Invalid ce hw instance: %d!\n", ce);
7663 return -EIO;
7664 }
7665
7666 if (qseecom.no_clock_support) {
7667 qclk->ce_core_clk = NULL;
7668 qclk->ce_clk = NULL;
7669 qclk->ce_bus_clk = NULL;
7670 qclk->ce_core_src_clk = NULL;
7671 return 0;
7672 }
7673
7674 pdev = qseecom.pdev;
7675
7676 /* Get CE3 src core clk. */
7677 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7678 if (!IS_ERR(qclk->ce_core_src_clk)) {
7679 rc = clk_set_rate(qclk->ce_core_src_clk,
7680 qseecom.ce_opp_freq_hz);
7681 if (rc) {
7682 clk_put(qclk->ce_core_src_clk);
7683 qclk->ce_core_src_clk = NULL;
7684 pr_err("Unable to set the core src clk @%uMhz.\n",
7685 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7686 return -EIO;
7687 }
7688 } else {
7689 pr_warn("Unable to get CE core src clk, set to NULL\n");
7690 qclk->ce_core_src_clk = NULL;
7691 }
7692
7693 /* Get CE core clk */
7694 qclk->ce_core_clk = clk_get(pdev, core_clk);
7695 if (IS_ERR(qclk->ce_core_clk)) {
7696 rc = PTR_ERR(qclk->ce_core_clk);
7697 pr_err("Unable to get CE core clk\n");
7698 if (qclk->ce_core_src_clk != NULL)
7699 clk_put(qclk->ce_core_src_clk);
7700 return -EIO;
7701 }
7702
7703 /* Get CE Interface clk */
7704 qclk->ce_clk = clk_get(pdev, iface_clk);
7705 if (IS_ERR(qclk->ce_clk)) {
7706 rc = PTR_ERR(qclk->ce_clk);
7707 pr_err("Unable to get CE interface clk\n");
7708 if (qclk->ce_core_src_clk != NULL)
7709 clk_put(qclk->ce_core_src_clk);
7710 clk_put(qclk->ce_core_clk);
7711 return -EIO;
7712 }
7713
7714 /* Get CE AXI clk */
7715 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7716 if (IS_ERR(qclk->ce_bus_clk)) {
7717 rc = PTR_ERR(qclk->ce_bus_clk);
7718 pr_err("Unable to get CE BUS interface clk\n");
7719 if (qclk->ce_core_src_clk != NULL)
7720 clk_put(qclk->ce_core_src_clk);
7721 clk_put(qclk->ce_core_clk);
7722 clk_put(qclk->ce_clk);
7723 return -EIO;
7724 }
7725
7726 return rc;
7727}
7728
7729static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7730{
7731 struct qseecom_clk *qclk;
7732
7733 if (ce == CLK_QSEE)
7734 qclk = &qseecom.qsee;
7735 else
7736 qclk = &qseecom.ce_drv;
7737
7738 if (qclk->ce_clk != NULL) {
7739 clk_put(qclk->ce_clk);
7740 qclk->ce_clk = NULL;
7741 }
7742 if (qclk->ce_core_clk != NULL) {
7743 clk_put(qclk->ce_core_clk);
7744 qclk->ce_core_clk = NULL;
7745 }
7746 if (qclk->ce_bus_clk != NULL) {
7747 clk_put(qclk->ce_bus_clk);
7748 qclk->ce_bus_clk = NULL;
7749 }
7750 if (qclk->ce_core_src_clk != NULL) {
7751 clk_put(qclk->ce_core_src_clk);
7752 qclk->ce_core_src_clk = NULL;
7753 }
7754 qclk->instance = CLK_INVALID;
7755}
7756
7757static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7758{
7759 int rc = 0;
7760 uint32_t hlos_num_ce_hw_instances;
7761 uint32_t disk_encrypt_pipe;
7762 uint32_t file_encrypt_pipe;
7763 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
7764 int i;
7765 const int *tbl;
7766 int size;
7767 int entry;
7768 struct qseecom_crypto_info *pfde_tbl = NULL;
7769 struct qseecom_crypto_info *p;
7770 int tbl_size;
7771 int j;
7772 bool old_db = true;
7773 struct qseecom_ce_info_use *pce_info_use;
7774 uint32_t *unit_tbl = NULL;
7775 int total_units = 0;
7776 struct qseecom_ce_pipe_entry *pce_entry;
7777
7778 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7779 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7780
7781 if (of_property_read_u32((&pdev->dev)->of_node,
7782 "qcom,qsee-ce-hw-instance",
7783 &qseecom.ce_info.qsee_ce_hw_instance)) {
7784 pr_err("Fail to get qsee ce hw instance information.\n");
7785 rc = -EINVAL;
7786 goto out;
7787 } else {
7788 pr_debug("qsee-ce-hw-instance=0x%x\n",
7789 qseecom.ce_info.qsee_ce_hw_instance);
7790 }
7791
7792 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7793 "qcom,support-fde");
7794 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7795 "qcom,support-pfe");
7796
7797 if (!qseecom.support_pfe && !qseecom.support_fde) {
7798 pr_warn("Device does not support PFE/FDE");
7799 goto out;
7800 }
7801
7802 if (qseecom.support_fde)
7803 tbl = of_get_property((&pdev->dev)->of_node,
7804 "qcom,full-disk-encrypt-info", &size);
7805 else
7806 tbl = NULL;
7807 if (tbl) {
7808 old_db = false;
7809 if (size % sizeof(struct qseecom_crypto_info)) {
7810 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7811 size);
7812 rc = -EINVAL;
7813 goto out;
7814 }
7815 tbl_size = size / sizeof
7816 (struct qseecom_crypto_info);
7817
7818 pfde_tbl = kzalloc(size, GFP_KERNEL);
7819 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7820 total_units = 0;
7821
7822 if (!pfde_tbl || !unit_tbl) {
7823 pr_err("failed to alloc memory\n");
7824 rc = -ENOMEM;
7825 goto out;
7826 }
7827 if (of_property_read_u32_array((&pdev->dev)->of_node,
7828 "qcom,full-disk-encrypt-info",
7829 (u32 *)pfde_tbl, size/sizeof(u32))) {
7830 pr_err("failed to read full-disk-encrypt-info tbl\n");
7831 rc = -EINVAL;
7832 goto out;
7833 }
7834
7835 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7836 for (j = 0; j < total_units; j++) {
7837 if (p->unit_num == *(unit_tbl + j))
7838 break;
7839 }
7840 if (j == total_units) {
7841 *(unit_tbl + total_units) = p->unit_num;
7842 total_units++;
7843 }
7844 }
7845
7846 qseecom.ce_info.num_fde = total_units;
7847 pce_info_use = qseecom.ce_info.fde = kcalloc(
7848 total_units, sizeof(struct qseecom_ce_info_use),
7849 GFP_KERNEL);
7850 if (!pce_info_use) {
7851 pr_err("failed to alloc memory\n");
7852 rc = -ENOMEM;
7853 goto out;
7854 }
7855
7856 for (j = 0; j < total_units; j++, pce_info_use++) {
7857 pce_info_use->unit_num = *(unit_tbl + j);
7858 pce_info_use->alloc = false;
7859 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7860 pce_info_use->num_ce_pipe_entries = 0;
7861 pce_info_use->ce_pipe_entry = NULL;
7862 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7863 if (p->unit_num == pce_info_use->unit_num)
7864 pce_info_use->num_ce_pipe_entries++;
7865 }
7866
7867 entry = pce_info_use->num_ce_pipe_entries;
7868 pce_entry = pce_info_use->ce_pipe_entry =
7869 kcalloc(entry,
7870 sizeof(struct qseecom_ce_pipe_entry),
7871 GFP_KERNEL);
7872 if (pce_entry == NULL) {
7873 pr_err("failed to alloc memory\n");
7874 rc = -ENOMEM;
7875 goto out;
7876 }
7877
7878 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7879 if (p->unit_num == pce_info_use->unit_num) {
7880 pce_entry->ce_num = p->ce;
7881 pce_entry->ce_pipe_pair =
7882 p->pipe_pair;
7883 pce_entry->valid = true;
7884 pce_entry++;
7885 }
7886 }
7887 }
7888 kfree(unit_tbl);
7889 unit_tbl = NULL;
7890 kfree(pfde_tbl);
7891 pfde_tbl = NULL;
7892 }
7893
7894 if (qseecom.support_pfe)
7895 tbl = of_get_property((&pdev->dev)->of_node,
7896 "qcom,per-file-encrypt-info", &size);
7897 else
7898 tbl = NULL;
7899 if (tbl) {
7900 old_db = false;
7901 if (size % sizeof(struct qseecom_crypto_info)) {
7902 pr_err("per-file-encrypt-info tbl size(%d)\n",
7903 size);
7904 rc = -EINVAL;
7905 goto out;
7906 }
7907 tbl_size = size / sizeof
7908 (struct qseecom_crypto_info);
7909
7910 pfde_tbl = kzalloc(size, GFP_KERNEL);
7911 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7912 total_units = 0;
7913 if (!pfde_tbl || !unit_tbl) {
7914 pr_err("failed to alloc memory\n");
7915 rc = -ENOMEM;
7916 goto out;
7917 }
7918 if (of_property_read_u32_array((&pdev->dev)->of_node,
7919 "qcom,per-file-encrypt-info",
7920 (u32 *)pfde_tbl, size/sizeof(u32))) {
7921 pr_err("failed to read per-file-encrypt-info tbl\n");
7922 rc = -EINVAL;
7923 goto out;
7924 }
7925
7926 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7927 for (j = 0; j < total_units; j++) {
7928 if (p->unit_num == *(unit_tbl + j))
7929 break;
7930 }
7931 if (j == total_units) {
7932 *(unit_tbl + total_units) = p->unit_num;
7933 total_units++;
7934 }
7935 }
7936
7937 qseecom.ce_info.num_pfe = total_units;
7938 pce_info_use = qseecom.ce_info.pfe = kcalloc(
7939 total_units, sizeof(struct qseecom_ce_info_use),
7940 GFP_KERNEL);
7941 if (!pce_info_use) {
7942 pr_err("failed to alloc memory\n");
7943 rc = -ENOMEM;
7944 goto out;
7945 }
7946
7947 for (j = 0; j < total_units; j++, pce_info_use++) {
7948 pce_info_use->unit_num = *(unit_tbl + j);
7949 pce_info_use->alloc = false;
7950 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
7951 pce_info_use->num_ce_pipe_entries = 0;
7952 pce_info_use->ce_pipe_entry = NULL;
7953 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7954 if (p->unit_num == pce_info_use->unit_num)
7955 pce_info_use->num_ce_pipe_entries++;
7956 }
7957
7958 entry = pce_info_use->num_ce_pipe_entries;
7959 pce_entry = pce_info_use->ce_pipe_entry =
7960 kcalloc(entry,
7961 sizeof(struct qseecom_ce_pipe_entry),
7962 GFP_KERNEL);
7963 if (pce_entry == NULL) {
7964 pr_err("failed to alloc memory\n");
7965 rc = -ENOMEM;
7966 goto out;
7967 }
7968
7969 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7970 if (p->unit_num == pce_info_use->unit_num) {
7971 pce_entry->ce_num = p->ce;
7972 pce_entry->ce_pipe_pair =
7973 p->pipe_pair;
7974 pce_entry->valid = true;
7975 pce_entry++;
7976 }
7977 }
7978 }
7979 kfree(unit_tbl);
7980 unit_tbl = NULL;
7981 kfree(pfde_tbl);
7982 pfde_tbl = NULL;
7983 }
7984
7985 if (!old_db)
7986 goto out1;
7987
7988 if (of_property_read_bool((&pdev->dev)->of_node,
7989 "qcom,support-multiple-ce-hw-instance")) {
7990 if (of_property_read_u32((&pdev->dev)->of_node,
7991 "qcom,hlos-num-ce-hw-instances",
7992 &hlos_num_ce_hw_instances)) {
7993 pr_err("Fail: get hlos number of ce hw instance\n");
7994 rc = -EINVAL;
7995 goto out;
7996 }
7997 } else {
7998 hlos_num_ce_hw_instances = 1;
7999 }
8000
8001 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8002 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8003 MAX_CE_PIPE_PAIR_PER_UNIT);
8004 rc = -EINVAL;
8005 goto out;
8006 }
8007
8008 if (of_property_read_u32_array((&pdev->dev)->of_node,
8009 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8010 hlos_num_ce_hw_instances)) {
8011 pr_err("Fail: get hlos ce hw instance info\n");
8012 rc = -EINVAL;
8013 goto out;
8014 }
8015
8016 if (qseecom.support_fde) {
8017 pce_info_use = qseecom.ce_info.fde =
8018 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8019 if (!pce_info_use) {
8020 pr_err("failed to alloc memory\n");
8021 rc = -ENOMEM;
8022 goto out;
8023 }
8024 /* by default for old db */
8025 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8026 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8027 pce_info_use->alloc = false;
8028 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8029 pce_info_use->ce_pipe_entry = NULL;
8030 if (of_property_read_u32((&pdev->dev)->of_node,
8031 "qcom,disk-encrypt-pipe-pair",
8032 &disk_encrypt_pipe)) {
8033 pr_err("Fail to get FDE pipe information.\n");
8034 rc = -EINVAL;
8035 goto out;
8036 } else {
8037 pr_debug("disk-encrypt-pipe-pair=0x%x",
8038 disk_encrypt_pipe);
8039 }
8040 entry = pce_info_use->num_ce_pipe_entries =
8041 hlos_num_ce_hw_instances;
8042 pce_entry = pce_info_use->ce_pipe_entry =
8043 kcalloc(entry,
8044 sizeof(struct qseecom_ce_pipe_entry),
8045 GFP_KERNEL);
8046 if (pce_entry == NULL) {
8047 pr_err("failed to alloc memory\n");
8048 rc = -ENOMEM;
8049 goto out;
8050 }
8051 for (i = 0; i < entry; i++) {
8052 pce_entry->ce_num = hlos_ce_hw_instance[i];
8053 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8054 pce_entry->valid = 1;
8055 pce_entry++;
8056 }
8057 } else {
8058 pr_warn("Device does not support FDE");
8059 disk_encrypt_pipe = 0xff;
8060 }
8061 if (qseecom.support_pfe) {
8062 pce_info_use = qseecom.ce_info.pfe =
8063 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8064 if (!pce_info_use) {
8065 pr_err("failed to alloc memory\n");
8066 rc = -ENOMEM;
8067 goto out;
8068 }
8069 /* by default for old db */
8070 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8071 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8072 pce_info_use->alloc = false;
8073 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8074 pce_info_use->ce_pipe_entry = NULL;
8075
8076 if (of_property_read_u32((&pdev->dev)->of_node,
8077 "qcom,file-encrypt-pipe-pair",
8078 &file_encrypt_pipe)) {
8079 pr_err("Fail to get PFE pipe information.\n");
8080 rc = -EINVAL;
8081 goto out;
8082 } else {
8083 pr_debug("file-encrypt-pipe-pair=0x%x",
8084 file_encrypt_pipe);
8085 }
8086 entry = pce_info_use->num_ce_pipe_entries =
8087 hlos_num_ce_hw_instances;
8088 pce_entry = pce_info_use->ce_pipe_entry =
8089 kcalloc(entry,
8090 sizeof(struct qseecom_ce_pipe_entry),
8091 GFP_KERNEL);
8092 if (pce_entry == NULL) {
8093 pr_err("failed to alloc memory\n");
8094 rc = -ENOMEM;
8095 goto out;
8096 }
8097 for (i = 0; i < entry; i++) {
8098 pce_entry->ce_num = hlos_ce_hw_instance[i];
8099 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8100 pce_entry->valid = 1;
8101 pce_entry++;
8102 }
8103 } else {
8104 pr_warn("Device does not support PFE");
8105 file_encrypt_pipe = 0xff;
8106 }
8107
8108out1:
8109 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8110 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8111out:
8112 if (rc) {
8113 if (qseecom.ce_info.fde) {
8114 pce_info_use = qseecom.ce_info.fde;
8115 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8116 pce_entry = pce_info_use->ce_pipe_entry;
8117 kfree(pce_entry);
8118 pce_info_use++;
8119 }
8120 }
8121 kfree(qseecom.ce_info.fde);
8122 qseecom.ce_info.fde = NULL;
8123 if (qseecom.ce_info.pfe) {
8124 pce_info_use = qseecom.ce_info.pfe;
8125 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8126 pce_entry = pce_info_use->ce_pipe_entry;
8127 kfree(pce_entry);
8128 pce_info_use++;
8129 }
8130 }
8131 kfree(qseecom.ce_info.pfe);
8132 qseecom.ce_info.pfe = NULL;
8133 }
8134 kfree(unit_tbl);
8135 kfree(pfde_tbl);
8136 return rc;
8137}
8138
8139static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8140 void __user *argp)
8141{
8142 struct qseecom_ce_info_req req;
8143 struct qseecom_ce_info_req *pinfo = &req;
8144 int ret = 0;
8145 int i;
8146 unsigned int entries;
8147 struct qseecom_ce_info_use *pce_info_use, *p;
8148 int total = 0;
8149 bool found = false;
8150 struct qseecom_ce_pipe_entry *pce_entry;
8151
8152 ret = copy_from_user(pinfo, argp,
8153 sizeof(struct qseecom_ce_info_req));
8154 if (ret) {
8155 pr_err("copy_from_user failed\n");
8156 return ret;
8157 }
8158
8159 switch (pinfo->usage) {
8160 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8161 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8162 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8163 if (qseecom.support_fde) {
8164 p = qseecom.ce_info.fde;
8165 total = qseecom.ce_info.num_fde;
8166 } else {
8167 pr_err("system does not support fde\n");
8168 return -EINVAL;
8169 }
8170 break;
8171 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8172 if (qseecom.support_pfe) {
8173 p = qseecom.ce_info.pfe;
8174 total = qseecom.ce_info.num_pfe;
8175 } else {
8176 pr_err("system does not support pfe\n");
8177 return -EINVAL;
8178 }
8179 break;
8180 default:
8181 pr_err("unsupported usage %d\n", pinfo->usage);
8182 return -EINVAL;
8183 }
8184
8185 pce_info_use = NULL;
8186 for (i = 0; i < total; i++) {
8187 if (!p->alloc)
8188 pce_info_use = p;
8189 else if (!memcmp(p->handle, pinfo->handle,
8190 MAX_CE_INFO_HANDLE_SIZE)) {
8191 pce_info_use = p;
8192 found = true;
8193 break;
8194 }
8195 p++;
8196 }
8197
8198 if (pce_info_use == NULL)
8199 return -EBUSY;
8200
8201 pinfo->unit_num = pce_info_use->unit_num;
8202 if (!pce_info_use->alloc) {
8203 pce_info_use->alloc = true;
8204 memcpy(pce_info_use->handle,
8205 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8206 }
8207 if (pce_info_use->num_ce_pipe_entries >
8208 MAX_CE_PIPE_PAIR_PER_UNIT)
8209 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8210 else
8211 entries = pce_info_use->num_ce_pipe_entries;
8212 pinfo->num_ce_pipe_entries = entries;
8213 pce_entry = pce_info_use->ce_pipe_entry;
8214 for (i = 0; i < entries; i++, pce_entry++)
8215 pinfo->ce_pipe_entry[i] = *pce_entry;
8216 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8217 pinfo->ce_pipe_entry[i].valid = 0;
8218
8219 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8220 pr_err("copy_to_user failed\n");
8221 ret = -EFAULT;
8222 }
8223 return ret;
8224}
8225
8226static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8227 void __user *argp)
8228{
8229 struct qseecom_ce_info_req req;
8230 struct qseecom_ce_info_req *pinfo = &req;
8231 int ret = 0;
8232 struct qseecom_ce_info_use *p;
8233 int total = 0;
8234 int i;
8235 bool found = false;
8236
8237 ret = copy_from_user(pinfo, argp,
8238 sizeof(struct qseecom_ce_info_req));
8239 if (ret)
8240 return ret;
8241
8242 switch (pinfo->usage) {
8243 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8244 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8245 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8246 if (qseecom.support_fde) {
8247 p = qseecom.ce_info.fde;
8248 total = qseecom.ce_info.num_fde;
8249 } else {
8250 pr_err("system does not support fde\n");
8251 return -EINVAL;
8252 }
8253 break;
8254 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8255 if (qseecom.support_pfe) {
8256 p = qseecom.ce_info.pfe;
8257 total = qseecom.ce_info.num_pfe;
8258 } else {
8259 pr_err("system does not support pfe\n");
8260 return -EINVAL;
8261 }
8262 break;
8263 default:
8264 pr_err("unsupported usage %d\n", pinfo->usage);
8265 return -EINVAL;
8266 }
8267
8268 for (i = 0; i < total; i++) {
8269 if (p->alloc &&
8270 !memcmp(p->handle, pinfo->handle,
8271 MAX_CE_INFO_HANDLE_SIZE)) {
8272 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8273 p->alloc = false;
8274 found = true;
8275 break;
8276 }
8277 p++;
8278 }
8279 return ret;
8280}
8281
8282static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8283 void __user *argp)
8284{
8285 struct qseecom_ce_info_req req;
8286 struct qseecom_ce_info_req *pinfo = &req;
8287 int ret = 0;
8288 int i;
8289 unsigned int entries;
8290 struct qseecom_ce_info_use *pce_info_use, *p;
8291 int total = 0;
8292 bool found = false;
8293 struct qseecom_ce_pipe_entry *pce_entry;
8294
8295 ret = copy_from_user(pinfo, argp,
8296 sizeof(struct qseecom_ce_info_req));
8297 if (ret)
8298 return ret;
8299
8300 switch (pinfo->usage) {
8301 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8302 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8303 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8304 if (qseecom.support_fde) {
8305 p = qseecom.ce_info.fde;
8306 total = qseecom.ce_info.num_fde;
8307 } else {
8308 pr_err("system does not support fde\n");
8309 return -EINVAL;
8310 }
8311 break;
8312 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8313 if (qseecom.support_pfe) {
8314 p = qseecom.ce_info.pfe;
8315 total = qseecom.ce_info.num_pfe;
8316 } else {
8317 pr_err("system does not support pfe\n");
8318 return -EINVAL;
8319 }
8320 break;
8321 default:
8322 pr_err("unsupported usage %d\n", pinfo->usage);
8323 return -EINVAL;
8324 }
8325
8326 pce_info_use = NULL;
8327 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8328 pinfo->num_ce_pipe_entries = 0;
8329 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8330 pinfo->ce_pipe_entry[i].valid = 0;
8331
8332 for (i = 0; i < total; i++) {
8333
8334 if (p->alloc && !memcmp(p->handle,
8335 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8336 pce_info_use = p;
8337 found = true;
8338 break;
8339 }
8340 p++;
8341 }
8342 if (!pce_info_use)
8343 goto out;
8344 pinfo->unit_num = pce_info_use->unit_num;
8345 if (pce_info_use->num_ce_pipe_entries >
8346 MAX_CE_PIPE_PAIR_PER_UNIT)
8347 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8348 else
8349 entries = pce_info_use->num_ce_pipe_entries;
8350 pinfo->num_ce_pipe_entries = entries;
8351 pce_entry = pce_info_use->ce_pipe_entry;
8352 for (i = 0; i < entries; i++, pce_entry++)
8353 pinfo->ce_pipe_entry[i] = *pce_entry;
8354 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8355 pinfo->ce_pipe_entry[i].valid = 0;
8356out:
8357 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8358 pr_err("copy_to_user failed\n");
8359 ret = -EFAULT;
8360 }
8361 return ret;
8362}
8363
8364/*
8365 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8366 * then whitelist feature is not supported.
8367 */
8368static int qseecom_check_whitelist_feature(void)
8369{
8370 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8371
8372 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8373}
8374
8375static int qseecom_probe(struct platform_device *pdev)
8376{
8377 int rc;
8378 int i;
8379 uint32_t feature = 10;
8380 struct device *class_dev;
8381 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8382 struct qseecom_command_scm_resp resp;
8383 struct qseecom_ce_info_use *pce_info_use = NULL;
8384
8385 qseecom.qsee_bw_count = 0;
8386 qseecom.qsee_perf_client = 0;
8387 qseecom.qsee_sfpb_bw_count = 0;
8388
8389 qseecom.qsee.ce_core_clk = NULL;
8390 qseecom.qsee.ce_clk = NULL;
8391 qseecom.qsee.ce_core_src_clk = NULL;
8392 qseecom.qsee.ce_bus_clk = NULL;
8393
8394 qseecom.cumulative_mode = 0;
8395 qseecom.current_mode = INACTIVE;
8396 qseecom.support_bus_scaling = false;
8397 qseecom.support_fde = false;
8398 qseecom.support_pfe = false;
8399
8400 qseecom.ce_drv.ce_core_clk = NULL;
8401 qseecom.ce_drv.ce_clk = NULL;
8402 qseecom.ce_drv.ce_core_src_clk = NULL;
8403 qseecom.ce_drv.ce_bus_clk = NULL;
8404 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8405
8406 qseecom.app_block_ref_cnt = 0;
8407 init_waitqueue_head(&qseecom.app_block_wq);
8408 qseecom.whitelist_support = true;
8409
8410 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8411 if (rc < 0) {
8412 pr_err("alloc_chrdev_region failed %d\n", rc);
8413 return rc;
8414 }
8415
8416 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8417 if (IS_ERR(driver_class)) {
8418 rc = -ENOMEM;
8419 pr_err("class_create failed %d\n", rc);
8420 goto exit_unreg_chrdev_region;
8421 }
8422
8423 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8424 QSEECOM_DEV);
8425 if (IS_ERR(class_dev)) {
8426 pr_err("class_device_create failed %d\n", rc);
8427 rc = -ENOMEM;
8428 goto exit_destroy_class;
8429 }
8430
8431 cdev_init(&qseecom.cdev, &qseecom_fops);
8432 qseecom.cdev.owner = THIS_MODULE;
8433
8434 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8435 if (rc < 0) {
8436 pr_err("cdev_add failed %d\n", rc);
8437 goto exit_destroy_device;
8438 }
8439
8440 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8441 spin_lock_init(&qseecom.registered_listener_list_lock);
8442 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8443 spin_lock_init(&qseecom.registered_app_list_lock);
8444 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8445 spin_lock_init(&qseecom.registered_kclient_list_lock);
8446 init_waitqueue_head(&qseecom.send_resp_wq);
8447 qseecom.send_resp_flag = 0;
8448
8449 qseecom.qsee_version = QSEEE_VERSION_00;
8450 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8451 &resp, sizeof(resp));
8452 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8453 if (rc) {
8454 pr_err("Failed to get QSEE version info %d\n", rc);
8455 goto exit_del_cdev;
8456 }
8457 qseecom.qsee_version = resp.result;
8458 qseecom.qseos_version = QSEOS_VERSION_14;
8459 qseecom.commonlib_loaded = false;
8460 qseecom.commonlib64_loaded = false;
8461 qseecom.pdev = class_dev;
8462 /* Create ION msm client */
8463 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8464 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8465 pr_err("Ion client cannot be created\n");
8466 rc = -ENOMEM;
8467 goto exit_del_cdev;
8468 }
8469
8470 /* register client for bus scaling */
8471 if (pdev->dev.of_node) {
8472 qseecom.pdev->of_node = pdev->dev.of_node;
8473 qseecom.support_bus_scaling =
8474 of_property_read_bool((&pdev->dev)->of_node,
8475 "qcom,support-bus-scaling");
8476 rc = qseecom_retrieve_ce_data(pdev);
8477 if (rc)
8478 goto exit_destroy_ion_client;
8479 qseecom.appsbl_qseecom_support =
8480 of_property_read_bool((&pdev->dev)->of_node,
8481 "qcom,appsbl-qseecom-support");
8482 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8483 qseecom.appsbl_qseecom_support);
8484
8485 qseecom.commonlib64_loaded =
8486 of_property_read_bool((&pdev->dev)->of_node,
8487 "qcom,commonlib64-loaded-by-uefi");
8488 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8489 qseecom.commonlib64_loaded);
8490 qseecom.fde_key_size =
8491 of_property_read_bool((&pdev->dev)->of_node,
8492 "qcom,fde-key-size");
8493 qseecom.no_clock_support =
8494 of_property_read_bool((&pdev->dev)->of_node,
8495 "qcom,no-clock-support");
8496 if (!qseecom.no_clock_support) {
8497 pr_info("qseecom clocks handled by other subsystem\n");
8498 } else {
8499 pr_info("no-clock-support=0x%x",
8500 qseecom.no_clock_support);
8501 }
8502
8503 if (of_property_read_u32((&pdev->dev)->of_node,
8504 "qcom,qsee-reentrancy-support",
8505 &qseecom.qsee_reentrancy_support)) {
8506 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8507 qseecom.qsee_reentrancy_support = 0;
8508 } else {
8509 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8510 qseecom.qsee_reentrancy_support);
8511 }
8512
8513 /*
8514 * The qseecom bus scaling flag can not be enabled when
8515 * crypto clock is not handled by HLOS.
8516 */
8517 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8518 pr_err("support_bus_scaling flag can not be enabled.\n");
8519 rc = -EINVAL;
8520 goto exit_destroy_ion_client;
8521 }
8522
8523 if (of_property_read_u32((&pdev->dev)->of_node,
8524 "qcom,ce-opp-freq",
8525 &qseecom.ce_opp_freq_hz)) {
8526 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8527 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8528 }
8529 rc = __qseecom_init_clk(CLK_QSEE);
8530 if (rc)
8531 goto exit_destroy_ion_client;
8532
8533 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8534 (qseecom.support_pfe || qseecom.support_fde)) {
8535 rc = __qseecom_init_clk(CLK_CE_DRV);
8536 if (rc) {
8537 __qseecom_deinit_clk(CLK_QSEE);
8538 goto exit_destroy_ion_client;
8539 }
8540 } else {
8541 struct qseecom_clk *qclk;
8542
8543 qclk = &qseecom.qsee;
8544 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8545 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8546 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8547 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8548 }
8549
8550 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8551 msm_bus_cl_get_pdata(pdev);
8552 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8553 (!qseecom.is_apps_region_protected &&
8554 !qseecom.appsbl_qseecom_support)) {
8555 struct resource *resource = NULL;
8556 struct qsee_apps_region_info_ireq req;
8557 struct qsee_apps_region_info_64bit_ireq req_64bit;
8558 struct qseecom_command_scm_resp resp;
8559 void *cmd_buf = NULL;
8560 size_t cmd_len;
8561
8562 resource = platform_get_resource_byname(pdev,
8563 IORESOURCE_MEM, "secapp-region");
8564 if (resource) {
8565 if (qseecom.qsee_version < QSEE_VERSION_40) {
8566 req.qsee_cmd_id =
8567 QSEOS_APP_REGION_NOTIFICATION;
8568 req.addr = (uint32_t)resource->start;
8569 req.size = resource_size(resource);
8570 cmd_buf = (void *)&req;
8571 cmd_len = sizeof(struct
8572 qsee_apps_region_info_ireq);
8573 pr_warn("secure app region addr=0x%x size=0x%x",
8574 req.addr, req.size);
8575 } else {
8576 req_64bit.qsee_cmd_id =
8577 QSEOS_APP_REGION_NOTIFICATION;
8578 req_64bit.addr = resource->start;
8579 req_64bit.size = resource_size(
8580 resource);
8581 cmd_buf = (void *)&req_64bit;
8582 cmd_len = sizeof(struct
8583 qsee_apps_region_info_64bit_ireq);
8584 pr_warn("secure app region addr=0x%llx size=0x%x",
8585 req_64bit.addr, req_64bit.size);
8586 }
8587 } else {
8588 pr_err("Fail to get secure app region info\n");
8589 rc = -EINVAL;
8590 goto exit_deinit_clock;
8591 }
8592 rc = __qseecom_enable_clk(CLK_QSEE);
8593 if (rc) {
8594 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8595 rc = -EIO;
8596 goto exit_deinit_clock;
8597 }
8598 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8599 cmd_buf, cmd_len,
8600 &resp, sizeof(resp));
8601 __qseecom_disable_clk(CLK_QSEE);
8602 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8603 pr_err("send secapp reg fail %d resp.res %d\n",
8604 rc, resp.result);
8605 rc = -EINVAL;
8606 goto exit_deinit_clock;
8607 }
8608 }
8609 /*
8610 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8611 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8612 * Pls add "qseecom.commonlib64_loaded = true" here too.
8613 */
8614 if (qseecom.is_apps_region_protected ||
8615 qseecom.appsbl_qseecom_support)
8616 qseecom.commonlib_loaded = true;
8617 } else {
8618 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8619 pdev->dev.platform_data;
8620 }
8621 if (qseecom.support_bus_scaling) {
8622 init_timer(&(qseecom.bw_scale_down_timer));
8623 INIT_WORK(&qseecom.bw_inactive_req_ws,
8624 qseecom_bw_inactive_req_work);
8625 qseecom.bw_scale_down_timer.function =
8626 qseecom_scale_bus_bandwidth_timer_callback;
8627 }
8628 qseecom.timer_running = false;
8629 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8630 qseecom_platform_support);
8631
8632 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8633 pr_warn("qseecom.whitelist_support = %d\n",
8634 qseecom.whitelist_support);
8635
8636 if (!qseecom.qsee_perf_client)
8637 pr_err("Unable to register bus client\n");
8638
8639 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8640 return 0;
8641
8642exit_deinit_clock:
8643 __qseecom_deinit_clk(CLK_QSEE);
8644 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8645 (qseecom.support_pfe || qseecom.support_fde))
8646 __qseecom_deinit_clk(CLK_CE_DRV);
8647exit_destroy_ion_client:
8648 if (qseecom.ce_info.fde) {
8649 pce_info_use = qseecom.ce_info.fde;
8650 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8651 kzfree(pce_info_use->ce_pipe_entry);
8652 pce_info_use++;
8653 }
8654 kfree(qseecom.ce_info.fde);
8655 }
8656 if (qseecom.ce_info.pfe) {
8657 pce_info_use = qseecom.ce_info.pfe;
8658 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8659 kzfree(pce_info_use->ce_pipe_entry);
8660 pce_info_use++;
8661 }
8662 kfree(qseecom.ce_info.pfe);
8663 }
8664 ion_client_destroy(qseecom.ion_clnt);
8665exit_del_cdev:
8666 cdev_del(&qseecom.cdev);
8667exit_destroy_device:
8668 device_destroy(driver_class, qseecom_device_no);
8669exit_destroy_class:
8670 class_destroy(driver_class);
8671exit_unreg_chrdev_region:
8672 unregister_chrdev_region(qseecom_device_no, 1);
8673 return rc;
8674}
8675
8676static int qseecom_remove(struct platform_device *pdev)
8677{
8678 struct qseecom_registered_kclient_list *kclient = NULL;
8679 unsigned long flags = 0;
8680 int ret = 0;
8681 int i;
8682 struct qseecom_ce_pipe_entry *pce_entry;
8683 struct qseecom_ce_info_use *pce_info_use;
8684
8685 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8686 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8687
8688 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8689 list) {
8690 if (!kclient)
8691 goto exit_irqrestore;
8692
8693 /* Break the loop if client handle is NULL */
8694 if (!kclient->handle)
8695 goto exit_free_kclient;
8696
8697 if (list_empty(&kclient->list))
8698 goto exit_free_kc_handle;
8699
8700 list_del(&kclient->list);
8701 mutex_lock(&app_access_lock);
8702 ret = qseecom_unload_app(kclient->handle->dev, false);
8703 mutex_unlock(&app_access_lock);
8704 if (!ret) {
8705 kzfree(kclient->handle->dev);
8706 kzfree(kclient->handle);
8707 kzfree(kclient);
8708 }
8709 }
8710
8711exit_free_kc_handle:
8712 kzfree(kclient->handle);
8713exit_free_kclient:
8714 kzfree(kclient);
8715exit_irqrestore:
8716 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8717
8718 if (qseecom.qseos_version > QSEEE_VERSION_00)
8719 qseecom_unload_commonlib_image();
8720
8721 if (qseecom.qsee_perf_client)
8722 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8723 0);
8724 if (pdev->dev.platform_data != NULL)
8725 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8726
8727 if (qseecom.support_bus_scaling) {
8728 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8729 del_timer_sync(&qseecom.bw_scale_down_timer);
8730 }
8731
8732 if (qseecom.ce_info.fde) {
8733 pce_info_use = qseecom.ce_info.fde;
8734 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8735 pce_entry = pce_info_use->ce_pipe_entry;
8736 kfree(pce_entry);
8737 pce_info_use++;
8738 }
8739 }
8740 kfree(qseecom.ce_info.fde);
8741 if (qseecom.ce_info.pfe) {
8742 pce_info_use = qseecom.ce_info.pfe;
8743 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8744 pce_entry = pce_info_use->ce_pipe_entry;
8745 kfree(pce_entry);
8746 pce_info_use++;
8747 }
8748 }
8749 kfree(qseecom.ce_info.pfe);
8750
8751 /* register client for bus scaling */
8752 if (pdev->dev.of_node) {
8753 __qseecom_deinit_clk(CLK_QSEE);
8754 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8755 (qseecom.support_pfe || qseecom.support_fde))
8756 __qseecom_deinit_clk(CLK_CE_DRV);
8757 }
8758
8759 ion_client_destroy(qseecom.ion_clnt);
8760
8761 cdev_del(&qseecom.cdev);
8762
8763 device_destroy(driver_class, qseecom_device_no);
8764
8765 class_destroy(driver_class);
8766
8767 unregister_chrdev_region(qseecom_device_no, 1);
8768
8769 return ret;
8770}
8771
8772static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8773{
8774 int ret = 0;
8775 struct qseecom_clk *qclk;
8776
8777 qclk = &qseecom.qsee;
8778 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8779 if (qseecom.no_clock_support)
8780 return 0;
8781
8782 mutex_lock(&qsee_bw_mutex);
8783 mutex_lock(&clk_access_lock);
8784
8785 if (qseecom.current_mode != INACTIVE) {
8786 ret = msm_bus_scale_client_update_request(
8787 qseecom.qsee_perf_client, INACTIVE);
8788 if (ret)
8789 pr_err("Fail to scale down bus\n");
8790 else
8791 qseecom.current_mode = INACTIVE;
8792 }
8793
8794 if (qclk->clk_access_cnt) {
8795 if (qclk->ce_clk != NULL)
8796 clk_disable_unprepare(qclk->ce_clk);
8797 if (qclk->ce_core_clk != NULL)
8798 clk_disable_unprepare(qclk->ce_core_clk);
8799 if (qclk->ce_bus_clk != NULL)
8800 clk_disable_unprepare(qclk->ce_bus_clk);
8801 }
8802
8803 del_timer_sync(&(qseecom.bw_scale_down_timer));
8804 qseecom.timer_running = false;
8805
8806 mutex_unlock(&clk_access_lock);
8807 mutex_unlock(&qsee_bw_mutex);
8808 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8809
8810 return 0;
8811}
8812
8813static int qseecom_resume(struct platform_device *pdev)
8814{
8815 int mode = 0;
8816 int ret = 0;
8817 struct qseecom_clk *qclk;
8818
8819 qclk = &qseecom.qsee;
8820 if (qseecom.no_clock_support)
8821 goto exit;
8822
8823 mutex_lock(&qsee_bw_mutex);
8824 mutex_lock(&clk_access_lock);
8825 if (qseecom.cumulative_mode >= HIGH)
8826 mode = HIGH;
8827 else
8828 mode = qseecom.cumulative_mode;
8829
8830 if (qseecom.cumulative_mode != INACTIVE) {
8831 ret = msm_bus_scale_client_update_request(
8832 qseecom.qsee_perf_client, mode);
8833 if (ret)
8834 pr_err("Fail to scale up bus to %d\n", mode);
8835 else
8836 qseecom.current_mode = mode;
8837 }
8838
8839 if (qclk->clk_access_cnt) {
8840 if (qclk->ce_core_clk != NULL) {
8841 ret = clk_prepare_enable(qclk->ce_core_clk);
8842 if (ret) {
8843 pr_err("Unable to enable/prep CE core clk\n");
8844 qclk->clk_access_cnt = 0;
8845 goto err;
8846 }
8847 }
8848 if (qclk->ce_clk != NULL) {
8849 ret = clk_prepare_enable(qclk->ce_clk);
8850 if (ret) {
8851 pr_err("Unable to enable/prep CE iface clk\n");
8852 qclk->clk_access_cnt = 0;
8853 goto ce_clk_err;
8854 }
8855 }
8856 if (qclk->ce_bus_clk != NULL) {
8857 ret = clk_prepare_enable(qclk->ce_bus_clk);
8858 if (ret) {
8859 pr_err("Unable to enable/prep CE bus clk\n");
8860 qclk->clk_access_cnt = 0;
8861 goto ce_bus_clk_err;
8862 }
8863 }
8864 }
8865
8866 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8867 qseecom.bw_scale_down_timer.expires = jiffies +
8868 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8869 mod_timer(&(qseecom.bw_scale_down_timer),
8870 qseecom.bw_scale_down_timer.expires);
8871 qseecom.timer_running = true;
8872 }
8873
8874 mutex_unlock(&clk_access_lock);
8875 mutex_unlock(&qsee_bw_mutex);
8876 goto exit;
8877
8878ce_bus_clk_err:
8879 if (qclk->ce_clk)
8880 clk_disable_unprepare(qclk->ce_clk);
8881ce_clk_err:
8882 if (qclk->ce_core_clk)
8883 clk_disable_unprepare(qclk->ce_core_clk);
8884err:
8885 mutex_unlock(&clk_access_lock);
8886 mutex_unlock(&qsee_bw_mutex);
8887 ret = -EIO;
8888exit:
8889 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8890 return ret;
8891}
8892
8893static const struct of_device_id qseecom_match[] = {
8894 {
8895 .compatible = "qcom,qseecom",
8896 },
8897 {}
8898};
8899
8900static struct platform_driver qseecom_plat_driver = {
8901 .probe = qseecom_probe,
8902 .remove = qseecom_remove,
8903 .suspend = qseecom_suspend,
8904 .resume = qseecom_resume,
8905 .driver = {
8906 .name = "qseecom",
8907 .owner = THIS_MODULE,
8908 .of_match_table = qseecom_match,
8909 },
8910};
8911
8912static int qseecom_init(void)
8913{
8914 return platform_driver_register(&qseecom_plat_driver);
8915}
8916
8917static void qseecom_exit(void)
8918{
8919 platform_driver_unregister(&qseecom_plat_driver);
8920}
8921
8922MODULE_LICENSE("GPL v2");
8923MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
8924
8925module_init(qseecom_init);
8926module_exit(qseecom_exit);