blob: ea5396fe67b55d3940f94d766a4778664b99f3d8 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
Zhen Kong26e62742018-05-04 17:19:06 -0700192 int abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700193};
194
195struct qseecom_registered_app_list {
196 struct list_head list;
197 u32 app_id;
198 u32 ref_cnt;
199 char app_name[MAX_APP_NAME_SIZE];
200 u32 app_arch;
201 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700202 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 u32 blocked_on_listener_id;
204};
205
206struct qseecom_registered_kclient_list {
207 struct list_head list;
208 struct qseecom_handle *handle;
209};
210
211struct qseecom_ce_info_use {
212 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
213 unsigned int unit_num;
214 unsigned int num_ce_pipe_entries;
215 struct qseecom_ce_pipe_entry *ce_pipe_entry;
216 bool alloc;
217 uint32_t type;
218};
219
220struct ce_hw_usage_info {
221 uint32_t qsee_ce_hw_instance;
222 uint32_t num_fde;
223 struct qseecom_ce_info_use *fde;
224 uint32_t num_pfe;
225 struct qseecom_ce_info_use *pfe;
226};
227
228struct qseecom_clk {
229 enum qseecom_ce_hw_instance instance;
230 struct clk *ce_core_clk;
231 struct clk *ce_clk;
232 struct clk *ce_core_src_clk;
233 struct clk *ce_bus_clk;
234 uint32_t clk_access_cnt;
235};
236
237struct qseecom_control {
238 struct ion_client *ion_clnt; /* Ion client */
239 struct list_head registered_listener_list_head;
240 spinlock_t registered_listener_list_lock;
241
242 struct list_head registered_app_list_head;
243 spinlock_t registered_app_list_lock;
244
245 struct list_head registered_kclient_list_head;
246 spinlock_t registered_kclient_list_lock;
247
248 wait_queue_head_t send_resp_wq;
249 int send_resp_flag;
250
251 uint32_t qseos_version;
252 uint32_t qsee_version;
253 struct device *pdev;
254 bool whitelist_support;
255 bool commonlib_loaded;
256 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700257 struct ce_hw_usage_info ce_info;
258
259 int qsee_bw_count;
260 int qsee_sfpb_bw_count;
261
262 uint32_t qsee_perf_client;
263 struct qseecom_clk qsee;
264 struct qseecom_clk ce_drv;
265
266 bool support_bus_scaling;
267 bool support_fde;
268 bool support_pfe;
269 bool fde_key_size;
270 uint32_t cumulative_mode;
271 enum qseecom_bandwidth_request_mode current_mode;
272 struct timer_list bw_scale_down_timer;
273 struct work_struct bw_inactive_req_ws;
274 struct cdev cdev;
275 bool timer_running;
276 bool no_clock_support;
277 unsigned int ce_opp_freq_hz;
278 bool appsbl_qseecom_support;
279 uint32_t qsee_reentrancy_support;
280
281 uint32_t app_block_ref_cnt;
282 wait_queue_head_t app_block_wq;
283 atomic_t qseecom_state;
284 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700285 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700286};
287
288struct qseecom_sec_buf_fd_info {
289 bool is_sec_buf_fd;
290 size_t size;
291 void *vbase;
292 dma_addr_t pbase;
293};
294
295struct qseecom_param_memref {
296 uint32_t buffer;
297 uint32_t size;
298};
299
300struct qseecom_client_handle {
301 u32 app_id;
302 u8 *sb_virt;
303 phys_addr_t sb_phys;
304 unsigned long user_virt_sb_base;
305 size_t sb_length;
306 struct ion_handle *ihandle; /* Retrieve phy addr */
307 char app_name[MAX_APP_NAME_SIZE];
308 u32 app_arch;
309 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
310};
311
312struct qseecom_listener_handle {
313 u32 id;
314};
315
316static struct qseecom_control qseecom;
317
318struct qseecom_dev_handle {
319 enum qseecom_client_handle_type type;
320 union {
321 struct qseecom_client_handle client;
322 struct qseecom_listener_handle listener;
323 };
324 bool released;
325 int abort;
326 wait_queue_head_t abort_wq;
327 atomic_t ioctl_count;
328 bool perf_enabled;
329 bool fast_load_enabled;
330 enum qseecom_bandwidth_request_mode mode;
331 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
332 uint32_t sglist_cnt;
333 bool use_legacy_cmd;
334};
335
336struct qseecom_key_id_usage_desc {
337 uint8_t desc[QSEECOM_KEY_ID_SIZE];
338};
339
340struct qseecom_crypto_info {
341 unsigned int unit_num;
342 unsigned int ce;
343 unsigned int pipe_pair;
344};
345
346static struct qseecom_key_id_usage_desc key_id_array[] = {
347 {
348 .desc = "Undefined Usage Index",
349 },
350
351 {
352 .desc = "Full Disk Encryption",
353 },
354
355 {
356 .desc = "Per File Encryption",
357 },
358
359 {
360 .desc = "UFS ICE Full Disk Encryption",
361 },
362
363 {
364 .desc = "SDCC ICE Full Disk Encryption",
365 },
366};
367
368/* Function proto types */
369static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
370static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
371static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
372static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
373static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
374static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
375 char *cmnlib_name);
376static int qseecom_enable_ice_setup(int usage);
377static int qseecom_disable_ice_setup(int usage);
378static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
379static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
384 void __user *argp);
385
386static int get_qseecom_keymaster_status(char *str)
387{
388 get_option(&str, &qseecom.is_apps_region_protected);
389 return 1;
390}
391__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
392
393static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
394 const void *req_buf, void *resp_buf)
395{
396 int ret = 0;
397 uint32_t smc_id = 0;
398 uint32_t qseos_cmd_id = 0;
399 struct scm_desc desc = {0};
400 struct qseecom_command_scm_resp *scm_resp = NULL;
401
402 if (!req_buf || !resp_buf) {
403 pr_err("Invalid buffer pointer\n");
404 return -EINVAL;
405 }
406 qseos_cmd_id = *(uint32_t *)req_buf;
407 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
408
409 switch (svc_id) {
410 case 6: {
411 if (tz_cmd_id == 3) {
412 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
413 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
414 desc.args[0] = *(uint32_t *)req_buf;
415 } else {
416 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
417 svc_id, tz_cmd_id);
418 return -EINVAL;
419 }
420 ret = scm_call2(smc_id, &desc);
421 break;
422 }
423 case SCM_SVC_ES: {
424 switch (tz_cmd_id) {
425 case SCM_SAVE_PARTITION_HASH_ID: {
426 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
427 struct qseecom_save_partition_hash_req *p_hash_req =
428 (struct qseecom_save_partition_hash_req *)
429 req_buf;
430 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
431
432 if (!tzbuf)
433 return -ENOMEM;
434 memset(tzbuf, 0, tzbuflen);
435 memcpy(tzbuf, p_hash_req->digest,
436 SHA256_DIGEST_LENGTH);
437 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
438 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
439 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
440 desc.args[0] = p_hash_req->partition_id;
441 desc.args[1] = virt_to_phys(tzbuf);
442 desc.args[2] = SHA256_DIGEST_LENGTH;
443 ret = scm_call2(smc_id, &desc);
444 kzfree(tzbuf);
445 break;
446 }
447 default: {
448 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
449 tz_cmd_id);
450 ret = -EINVAL;
451 break;
452 }
453 } /* end of switch (tz_cmd_id) */
454 break;
455 } /* end of case SCM_SVC_ES */
456 case SCM_SVC_TZSCHEDULER: {
457 switch (qseos_cmd_id) {
458 case QSEOS_APP_START_COMMAND: {
459 struct qseecom_load_app_ireq *req;
460 struct qseecom_load_app_64bit_ireq *req_64bit;
461
462 smc_id = TZ_OS_APP_START_ID;
463 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
464 if (qseecom.qsee_version < QSEE_VERSION_40) {
465 req = (struct qseecom_load_app_ireq *)req_buf;
466 desc.args[0] = req->mdt_len;
467 desc.args[1] = req->img_len;
468 desc.args[2] = req->phy_addr;
469 } else {
470 req_64bit =
471 (struct qseecom_load_app_64bit_ireq *)
472 req_buf;
473 desc.args[0] = req_64bit->mdt_len;
474 desc.args[1] = req_64bit->img_len;
475 desc.args[2] = req_64bit->phy_addr;
476 }
477 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
478 ret = scm_call2(smc_id, &desc);
479 break;
480 }
481 case QSEOS_APP_SHUTDOWN_COMMAND: {
482 struct qseecom_unload_app_ireq *req;
483
484 req = (struct qseecom_unload_app_ireq *)req_buf;
485 smc_id = TZ_OS_APP_SHUTDOWN_ID;
486 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
487 desc.args[0] = req->app_id;
488 ret = scm_call2(smc_id, &desc);
489 break;
490 }
491 case QSEOS_APP_LOOKUP_COMMAND: {
492 struct qseecom_check_app_ireq *req;
493 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
494 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
495
496 if (!tzbuf)
497 return -ENOMEM;
498 req = (struct qseecom_check_app_ireq *)req_buf;
499 pr_debug("Lookup app_name = %s\n", req->app_name);
500 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
501 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
502 smc_id = TZ_OS_APP_LOOKUP_ID;
503 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
504 desc.args[0] = virt_to_phys(tzbuf);
505 desc.args[1] = strlen(req->app_name);
506 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
507 ret = scm_call2(smc_id, &desc);
508 kzfree(tzbuf);
509 break;
510 }
511 case QSEOS_APP_REGION_NOTIFICATION: {
512 struct qsee_apps_region_info_ireq *req;
513 struct qsee_apps_region_info_64bit_ireq *req_64bit;
514
515 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
516 desc.arginfo =
517 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
518 if (qseecom.qsee_version < QSEE_VERSION_40) {
519 req = (struct qsee_apps_region_info_ireq *)
520 req_buf;
521 desc.args[0] = req->addr;
522 desc.args[1] = req->size;
523 } else {
524 req_64bit =
525 (struct qsee_apps_region_info_64bit_ireq *)
526 req_buf;
527 desc.args[0] = req_64bit->addr;
528 desc.args[1] = req_64bit->size;
529 }
530 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
531 ret = scm_call2(smc_id, &desc);
532 break;
533 }
534 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
535 struct qseecom_load_lib_image_ireq *req;
536 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
537
538 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
539 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
540 if (qseecom.qsee_version < QSEE_VERSION_40) {
541 req = (struct qseecom_load_lib_image_ireq *)
542 req_buf;
543 desc.args[0] = req->mdt_len;
544 desc.args[1] = req->img_len;
545 desc.args[2] = req->phy_addr;
546 } else {
547 req_64bit =
548 (struct qseecom_load_lib_image_64bit_ireq *)
549 req_buf;
550 desc.args[0] = req_64bit->mdt_len;
551 desc.args[1] = req_64bit->img_len;
552 desc.args[2] = req_64bit->phy_addr;
553 }
554 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
555 ret = scm_call2(smc_id, &desc);
556 break;
557 }
558 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
559 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
560 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
561 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
562 ret = scm_call2(smc_id, &desc);
563 break;
564 }
565 case QSEOS_REGISTER_LISTENER: {
566 struct qseecom_register_listener_ireq *req;
567 struct qseecom_register_listener_64bit_ireq *req_64bit;
568
569 desc.arginfo =
570 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
571 if (qseecom.qsee_version < QSEE_VERSION_40) {
572 req = (struct qseecom_register_listener_ireq *)
573 req_buf;
574 desc.args[0] = req->listener_id;
575 desc.args[1] = req->sb_ptr;
576 desc.args[2] = req->sb_len;
577 } else {
578 req_64bit =
579 (struct qseecom_register_listener_64bit_ireq *)
580 req_buf;
581 desc.args[0] = req_64bit->listener_id;
582 desc.args[1] = req_64bit->sb_ptr;
583 desc.args[2] = req_64bit->sb_len;
584 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700585 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700586 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
587 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
588 ret = scm_call2(smc_id, &desc);
589 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700590 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700591 smc_id = TZ_OS_REGISTER_LISTENER_ID;
592 __qseecom_reentrancy_check_if_no_app_blocked(
593 smc_id);
594 ret = scm_call2(smc_id, &desc);
595 }
596 break;
597 }
598 case QSEOS_DEREGISTER_LISTENER: {
599 struct qseecom_unregister_listener_ireq *req;
600
601 req = (struct qseecom_unregister_listener_ireq *)
602 req_buf;
603 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
604 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
605 desc.args[0] = req->listener_id;
606 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
607 ret = scm_call2(smc_id, &desc);
608 break;
609 }
610 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
611 struct qseecom_client_listener_data_irsp *req;
612
613 req = (struct qseecom_client_listener_data_irsp *)
614 req_buf;
615 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
616 desc.arginfo =
617 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
618 desc.args[0] = req->listener_id;
619 desc.args[1] = req->status;
620 ret = scm_call2(smc_id, &desc);
621 break;
622 }
623 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
624 struct qseecom_client_listener_data_irsp *req;
625 struct qseecom_client_listener_data_64bit_irsp *req_64;
626
627 smc_id =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
629 desc.arginfo =
630 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
631 if (qseecom.qsee_version < QSEE_VERSION_40) {
632 req =
633 (struct qseecom_client_listener_data_irsp *)
634 req_buf;
635 desc.args[0] = req->listener_id;
636 desc.args[1] = req->status;
637 desc.args[2] = req->sglistinfo_ptr;
638 desc.args[3] = req->sglistinfo_len;
639 } else {
640 req_64 =
641 (struct qseecom_client_listener_data_64bit_irsp *)
642 req_buf;
643 desc.args[0] = req_64->listener_id;
644 desc.args[1] = req_64->status;
645 desc.args[2] = req_64->sglistinfo_ptr;
646 desc.args[3] = req_64->sglistinfo_len;
647 }
648 ret = scm_call2(smc_id, &desc);
649 break;
650 }
651 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
652 struct qseecom_load_app_ireq *req;
653 struct qseecom_load_app_64bit_ireq *req_64bit;
654
655 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
656 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
657 if (qseecom.qsee_version < QSEE_VERSION_40) {
658 req = (struct qseecom_load_app_ireq *)req_buf;
659 desc.args[0] = req->mdt_len;
660 desc.args[1] = req->img_len;
661 desc.args[2] = req->phy_addr;
662 } else {
663 req_64bit =
664 (struct qseecom_load_app_64bit_ireq *)req_buf;
665 desc.args[0] = req_64bit->mdt_len;
666 desc.args[1] = req_64bit->img_len;
667 desc.args[2] = req_64bit->phy_addr;
668 }
669 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
670 ret = scm_call2(smc_id, &desc);
671 break;
672 }
673 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
674 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
675 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
676 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
677 ret = scm_call2(smc_id, &desc);
678 break;
679 }
680
681 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
682 struct qseecom_client_send_data_ireq *req;
683 struct qseecom_client_send_data_64bit_ireq *req_64bit;
684
685 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
686 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
687 if (qseecom.qsee_version < QSEE_VERSION_40) {
688 req = (struct qseecom_client_send_data_ireq *)
689 req_buf;
690 desc.args[0] = req->app_id;
691 desc.args[1] = req->req_ptr;
692 desc.args[2] = req->req_len;
693 desc.args[3] = req->rsp_ptr;
694 desc.args[4] = req->rsp_len;
695 } else {
696 req_64bit =
697 (struct qseecom_client_send_data_64bit_ireq *)
698 req_buf;
699 desc.args[0] = req_64bit->app_id;
700 desc.args[1] = req_64bit->req_ptr;
701 desc.args[2] = req_64bit->req_len;
702 desc.args[3] = req_64bit->rsp_ptr;
703 desc.args[4] = req_64bit->rsp_len;
704 }
705 ret = scm_call2(smc_id, &desc);
706 break;
707 }
708 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
709 struct qseecom_client_send_data_ireq *req;
710 struct qseecom_client_send_data_64bit_ireq *req_64bit;
711
712 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
713 desc.arginfo =
714 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
715 if (qseecom.qsee_version < QSEE_VERSION_40) {
716 req = (struct qseecom_client_send_data_ireq *)
717 req_buf;
718 desc.args[0] = req->app_id;
719 desc.args[1] = req->req_ptr;
720 desc.args[2] = req->req_len;
721 desc.args[3] = req->rsp_ptr;
722 desc.args[4] = req->rsp_len;
723 desc.args[5] = req->sglistinfo_ptr;
724 desc.args[6] = req->sglistinfo_len;
725 } else {
726 req_64bit =
727 (struct qseecom_client_send_data_64bit_ireq *)
728 req_buf;
729 desc.args[0] = req_64bit->app_id;
730 desc.args[1] = req_64bit->req_ptr;
731 desc.args[2] = req_64bit->req_len;
732 desc.args[3] = req_64bit->rsp_ptr;
733 desc.args[4] = req_64bit->rsp_len;
734 desc.args[5] = req_64bit->sglistinfo_ptr;
735 desc.args[6] = req_64bit->sglistinfo_len;
736 }
737 ret = scm_call2(smc_id, &desc);
738 break;
739 }
740 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
741 struct qseecom_client_send_service_ireq *req;
742
743 req = (struct qseecom_client_send_service_ireq *)
744 req_buf;
745 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
746 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
747 desc.args[0] = req->key_type;
748 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
749 ret = scm_call2(smc_id, &desc);
750 break;
751 }
752 case QSEOS_RPMB_ERASE_COMMAND: {
753 smc_id = TZ_OS_RPMB_ERASE_ID;
754 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
755 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
756 ret = scm_call2(smc_id, &desc);
757 break;
758 }
759 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
760 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
761 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
762 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
763 ret = scm_call2(smc_id, &desc);
764 break;
765 }
766 case QSEOS_GENERATE_KEY: {
767 u32 tzbuflen = PAGE_ALIGN(sizeof
768 (struct qseecom_key_generate_ireq) -
769 sizeof(uint32_t));
770 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
771
772 if (!tzbuf)
773 return -ENOMEM;
774 memset(tzbuf, 0, tzbuflen);
775 memcpy(tzbuf, req_buf + sizeof(uint32_t),
776 (sizeof(struct qseecom_key_generate_ireq) -
777 sizeof(uint32_t)));
778 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
779 smc_id = TZ_OS_KS_GEN_KEY_ID;
780 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
781 desc.args[0] = virt_to_phys(tzbuf);
782 desc.args[1] = tzbuflen;
783 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
784 ret = scm_call2(smc_id, &desc);
785 kzfree(tzbuf);
786 break;
787 }
788 case QSEOS_DELETE_KEY: {
789 u32 tzbuflen = PAGE_ALIGN(sizeof
790 (struct qseecom_key_delete_ireq) -
791 sizeof(uint32_t));
792 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
793
794 if (!tzbuf)
795 return -ENOMEM;
796 memset(tzbuf, 0, tzbuflen);
797 memcpy(tzbuf, req_buf + sizeof(uint32_t),
798 (sizeof(struct qseecom_key_delete_ireq) -
799 sizeof(uint32_t)));
800 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
801 smc_id = TZ_OS_KS_DEL_KEY_ID;
802 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
803 desc.args[0] = virt_to_phys(tzbuf);
804 desc.args[1] = tzbuflen;
805 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
806 ret = scm_call2(smc_id, &desc);
807 kzfree(tzbuf);
808 break;
809 }
810 case QSEOS_SET_KEY: {
811 u32 tzbuflen = PAGE_ALIGN(sizeof
812 (struct qseecom_key_select_ireq) -
813 sizeof(uint32_t));
814 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
815
816 if (!tzbuf)
817 return -ENOMEM;
818 memset(tzbuf, 0, tzbuflen);
819 memcpy(tzbuf, req_buf + sizeof(uint32_t),
820 (sizeof(struct qseecom_key_select_ireq) -
821 sizeof(uint32_t)));
822 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
823 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
824 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
825 desc.args[0] = virt_to_phys(tzbuf);
826 desc.args[1] = tzbuflen;
827 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
828 ret = scm_call2(smc_id, &desc);
829 kzfree(tzbuf);
830 break;
831 }
832 case QSEOS_UPDATE_KEY_USERINFO: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_userinfo_update_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
842 (struct qseecom_key_userinfo_update_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
846 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
850 ret = scm_call2(smc_id, &desc);
851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_TEE_OPEN_SESSION: {
855 struct qseecom_qteec_ireq *req;
856 struct qseecom_qteec_64bit_ireq *req_64bit;
857
858 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
859 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
860 if (qseecom.qsee_version < QSEE_VERSION_40) {
861 req = (struct qseecom_qteec_ireq *)req_buf;
862 desc.args[0] = req->app_id;
863 desc.args[1] = req->req_ptr;
864 desc.args[2] = req->req_len;
865 desc.args[3] = req->resp_ptr;
866 desc.args[4] = req->resp_len;
867 } else {
868 req_64bit = (struct qseecom_qteec_64bit_ireq *)
869 req_buf;
870 desc.args[0] = req_64bit->app_id;
871 desc.args[1] = req_64bit->req_ptr;
872 desc.args[2] = req_64bit->req_len;
873 desc.args[3] = req_64bit->resp_ptr;
874 desc.args[4] = req_64bit->resp_len;
875 }
876 ret = scm_call2(smc_id, &desc);
877 break;
878 }
879 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
880 struct qseecom_qteec_ireq *req;
881 struct qseecom_qteec_64bit_ireq *req_64bit;
882
883 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
884 desc.arginfo =
885 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
886 if (qseecom.qsee_version < QSEE_VERSION_40) {
887 req = (struct qseecom_qteec_ireq *)req_buf;
888 desc.args[0] = req->app_id;
889 desc.args[1] = req->req_ptr;
890 desc.args[2] = req->req_len;
891 desc.args[3] = req->resp_ptr;
892 desc.args[4] = req->resp_len;
893 desc.args[5] = req->sglistinfo_ptr;
894 desc.args[6] = req->sglistinfo_len;
895 } else {
896 req_64bit = (struct qseecom_qteec_64bit_ireq *)
897 req_buf;
898 desc.args[0] = req_64bit->app_id;
899 desc.args[1] = req_64bit->req_ptr;
900 desc.args[2] = req_64bit->req_len;
901 desc.args[3] = req_64bit->resp_ptr;
902 desc.args[4] = req_64bit->resp_len;
903 desc.args[5] = req_64bit->sglistinfo_ptr;
904 desc.args[6] = req_64bit->sglistinfo_len;
905 }
906 ret = scm_call2(smc_id, &desc);
907 break;
908 }
909 case QSEOS_TEE_INVOKE_COMMAND: {
910 struct qseecom_qteec_ireq *req;
911 struct qseecom_qteec_64bit_ireq *req_64bit;
912
913 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
914 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
915 if (qseecom.qsee_version < QSEE_VERSION_40) {
916 req = (struct qseecom_qteec_ireq *)req_buf;
917 desc.args[0] = req->app_id;
918 desc.args[1] = req->req_ptr;
919 desc.args[2] = req->req_len;
920 desc.args[3] = req->resp_ptr;
921 desc.args[4] = req->resp_len;
922 } else {
923 req_64bit = (struct qseecom_qteec_64bit_ireq *)
924 req_buf;
925 desc.args[0] = req_64bit->app_id;
926 desc.args[1] = req_64bit->req_ptr;
927 desc.args[2] = req_64bit->req_len;
928 desc.args[3] = req_64bit->resp_ptr;
929 desc.args[4] = req_64bit->resp_len;
930 }
931 ret = scm_call2(smc_id, &desc);
932 break;
933 }
934 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
935 struct qseecom_qteec_ireq *req;
936 struct qseecom_qteec_64bit_ireq *req_64bit;
937
938 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
939 desc.arginfo =
940 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
941 if (qseecom.qsee_version < QSEE_VERSION_40) {
942 req = (struct qseecom_qteec_ireq *)req_buf;
943 desc.args[0] = req->app_id;
944 desc.args[1] = req->req_ptr;
945 desc.args[2] = req->req_len;
946 desc.args[3] = req->resp_ptr;
947 desc.args[4] = req->resp_len;
948 desc.args[5] = req->sglistinfo_ptr;
949 desc.args[6] = req->sglistinfo_len;
950 } else {
951 req_64bit = (struct qseecom_qteec_64bit_ireq *)
952 req_buf;
953 desc.args[0] = req_64bit->app_id;
954 desc.args[1] = req_64bit->req_ptr;
955 desc.args[2] = req_64bit->req_len;
956 desc.args[3] = req_64bit->resp_ptr;
957 desc.args[4] = req_64bit->resp_len;
958 desc.args[5] = req_64bit->sglistinfo_ptr;
959 desc.args[6] = req_64bit->sglistinfo_len;
960 }
961 ret = scm_call2(smc_id, &desc);
962 break;
963 }
964 case QSEOS_TEE_CLOSE_SESSION: {
965 struct qseecom_qteec_ireq *req;
966 struct qseecom_qteec_64bit_ireq *req_64bit;
967
968 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
969 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
970 if (qseecom.qsee_version < QSEE_VERSION_40) {
971 req = (struct qseecom_qteec_ireq *)req_buf;
972 desc.args[0] = req->app_id;
973 desc.args[1] = req->req_ptr;
974 desc.args[2] = req->req_len;
975 desc.args[3] = req->resp_ptr;
976 desc.args[4] = req->resp_len;
977 } else {
978 req_64bit = (struct qseecom_qteec_64bit_ireq *)
979 req_buf;
980 desc.args[0] = req_64bit->app_id;
981 desc.args[1] = req_64bit->req_ptr;
982 desc.args[2] = req_64bit->req_len;
983 desc.args[3] = req_64bit->resp_ptr;
984 desc.args[4] = req_64bit->resp_len;
985 }
986 ret = scm_call2(smc_id, &desc);
987 break;
988 }
989 case QSEOS_TEE_REQUEST_CANCELLATION: {
990 struct qseecom_qteec_ireq *req;
991 struct qseecom_qteec_64bit_ireq *req_64bit;
992
993 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
994 desc.arginfo =
995 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
996 if (qseecom.qsee_version < QSEE_VERSION_40) {
997 req = (struct qseecom_qteec_ireq *)req_buf;
998 desc.args[0] = req->app_id;
999 desc.args[1] = req->req_ptr;
1000 desc.args[2] = req->req_len;
1001 desc.args[3] = req->resp_ptr;
1002 desc.args[4] = req->resp_len;
1003 } else {
1004 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1005 req_buf;
1006 desc.args[0] = req_64bit->app_id;
1007 desc.args[1] = req_64bit->req_ptr;
1008 desc.args[2] = req_64bit->req_len;
1009 desc.args[3] = req_64bit->resp_ptr;
1010 desc.args[4] = req_64bit->resp_len;
1011 }
1012 ret = scm_call2(smc_id, &desc);
1013 break;
1014 }
1015 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1016 struct qseecom_continue_blocked_request_ireq *req =
1017 (struct qseecom_continue_blocked_request_ireq *)
1018 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001019 if (qseecom.smcinvoke_support)
1020 smc_id =
1021 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1022 else
1023 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001024 desc.arginfo =
1025 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001026 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001027 ret = scm_call2(smc_id, &desc);
1028 break;
1029 }
1030 default: {
1031 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1032 qseos_cmd_id);
1033 ret = -EINVAL;
1034 break;
1035 }
1036 } /*end of switch (qsee_cmd_id) */
1037 break;
1038 } /*end of case SCM_SVC_TZSCHEDULER*/
1039 default: {
1040 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1041 svc_id);
1042 ret = -EINVAL;
1043 break;
1044 }
1045 } /*end of switch svc_id */
1046 scm_resp->result = desc.ret[0];
1047 scm_resp->resp_type = desc.ret[1];
1048 scm_resp->data = desc.ret[2];
1049 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1050 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1051 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1052 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1053 return ret;
1054}
1055
1056
1057static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1058 size_t cmd_len, void *resp_buf, size_t resp_len)
1059{
1060 if (!is_scm_armv8())
1061 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1062 resp_buf, resp_len);
1063 else
1064 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1065}
1066
1067static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1068 struct qseecom_register_listener_req *svc)
1069{
1070 struct qseecom_registered_listener_list *ptr;
1071 int unique = 1;
1072 unsigned long flags;
1073
1074 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1075 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1076 if (ptr->svc.listener_id == svc->listener_id) {
1077 pr_err("Service id: %u is already registered\n",
1078 ptr->svc.listener_id);
1079 unique = 0;
1080 break;
1081 }
1082 }
1083 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1084 return unique;
1085}
1086
1087static struct qseecom_registered_listener_list *__qseecom_find_svc(
1088 int32_t listener_id)
1089{
1090 struct qseecom_registered_listener_list *entry = NULL;
1091 unsigned long flags;
1092
1093 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1094 list_for_each_entry(entry,
1095 &qseecom.registered_listener_list_head, list) {
1096 if (entry->svc.listener_id == listener_id)
1097 break;
1098 }
1099 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1100
1101 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1102 pr_err("Service id: %u is not found\n", listener_id);
1103 return NULL;
1104 }
1105
1106 return entry;
1107}
1108
1109static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1110 struct qseecom_dev_handle *handle,
1111 struct qseecom_register_listener_req *listener)
1112{
1113 int ret = 0;
1114 struct qseecom_register_listener_ireq req;
1115 struct qseecom_register_listener_64bit_ireq req_64bit;
1116 struct qseecom_command_scm_resp resp;
1117 ion_phys_addr_t pa;
1118 void *cmd_buf = NULL;
1119 size_t cmd_len;
1120
1121 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001122 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 listener->ifd_data_fd);
1124 if (IS_ERR_OR_NULL(svc->ihandle)) {
1125 pr_err("Ion client could not retrieve the handle\n");
1126 return -ENOMEM;
1127 }
1128
1129 /* Get the physical address of the ION BUF */
1130 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1131 if (ret) {
1132 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1133 ret);
1134 return ret;
1135 }
1136 /* Populate the structure for sending scm call to load image */
1137 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1138 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1139 pr_err("ION memory mapping for listener shared buffer failed\n");
1140 return -ENOMEM;
1141 }
1142 svc->sb_phys = (phys_addr_t)pa;
1143
1144 if (qseecom.qsee_version < QSEE_VERSION_40) {
1145 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1146 req.listener_id = svc->svc.listener_id;
1147 req.sb_len = svc->sb_length;
1148 req.sb_ptr = (uint32_t)svc->sb_phys;
1149 cmd_buf = (void *)&req;
1150 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1151 } else {
1152 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1153 req_64bit.listener_id = svc->svc.listener_id;
1154 req_64bit.sb_len = svc->sb_length;
1155 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1156 cmd_buf = (void *)&req_64bit;
1157 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1158 }
1159
1160 resp.result = QSEOS_RESULT_INCOMPLETE;
1161
1162 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1163 &resp, sizeof(resp));
1164 if (ret) {
1165 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1166 return -EINVAL;
1167 }
1168
1169 if (resp.result != QSEOS_RESULT_SUCCESS) {
1170 pr_err("Error SB registration req: resp.result = %d\n",
1171 resp.result);
1172 return -EPERM;
1173 }
1174 return 0;
1175}
1176
1177static int qseecom_register_listener(struct qseecom_dev_handle *data,
1178 void __user *argp)
1179{
1180 int ret = 0;
1181 unsigned long flags;
1182 struct qseecom_register_listener_req rcvd_lstnr;
1183 struct qseecom_registered_listener_list *new_entry;
1184
1185 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1186 if (ret) {
1187 pr_err("copy_from_user failed\n");
1188 return ret;
1189 }
1190 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1191 rcvd_lstnr.sb_size))
1192 return -EFAULT;
1193
Zhen Kong3c674612018-09-06 22:51:27 -07001194 data->listener.id = rcvd_lstnr.listener_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001195 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001196 pr_err("Service %d is not unique and failed to register\n",
1197 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001198 data->released = true;
1199 return -EBUSY;
1200 }
1201
1202 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1203 if (!new_entry)
1204 return -ENOMEM;
1205 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1206 new_entry->rcv_req_flag = 0;
1207
1208 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1209 new_entry->sb_length = rcvd_lstnr.sb_size;
1210 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1211 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001212 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1213 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001214 kzfree(new_entry);
1215 return -ENOMEM;
1216 }
1217
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001218 init_waitqueue_head(&new_entry->rcv_req_wq);
1219 init_waitqueue_head(&new_entry->listener_block_app_wq);
1220 new_entry->send_resp_flag = 0;
1221 new_entry->listener_in_use = false;
1222 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1223 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1224 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1225
Zhen Kong3c674612018-09-06 22:51:27 -07001226 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001227 return ret;
1228}
1229
Zhen Kong26e62742018-05-04 17:19:06 -07001230static void __qseecom_listener_abort_all(int abort)
1231{
1232 struct qseecom_registered_listener_list *entry = NULL;
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1236 list_for_each_entry(entry,
1237 &qseecom.registered_listener_list_head, list) {
1238 pr_debug("set abort %d for listener %d\n",
1239 abort, entry->svc.listener_id);
1240 entry->abort = abort;
1241 }
1242 if (abort)
1243 wake_up_interruptible_all(&qseecom.send_resp_wq);
1244 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1245}
1246
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001247static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1248{
1249 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001250 struct qseecom_register_listener_ireq req;
1251 struct qseecom_registered_listener_list *ptr_svc = NULL;
1252 struct qseecom_command_scm_resp resp;
1253 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1254
Zhen Kong3c674612018-09-06 22:51:27 -07001255 ptr_svc = __qseecom_find_svc(data->listener.id);
1256 if (!ptr_svc) {
1257 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1258 return -ENODATA;
1259 }
1260
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1262 req.listener_id = data->listener.id;
1263 resp.result = QSEOS_RESULT_INCOMPLETE;
1264
1265 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1266 sizeof(req), &resp, sizeof(resp));
1267 if (ret) {
1268 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1269 ret, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001270 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272
1273 if (resp.result != QSEOS_RESULT_SUCCESS) {
1274 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1275 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001276 ret = -EPERM;
1277 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278 }
1279
1280 data->abort = 1;
Zhen Kong3c674612018-09-06 22:51:27 -07001281 ptr_svc->abort = 1;
1282 wake_up_all(&ptr_svc->rcv_req_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001283
1284 while (atomic_read(&data->ioctl_count) > 1) {
1285 if (wait_event_freezable(data->abort_wq,
1286 atomic_read(&data->ioctl_count) <= 1)) {
1287 pr_err("Interrupted from abort\n");
1288 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001289 }
1290 }
1291
Zhen Kong3c674612018-09-06 22:51:27 -07001292exit:
1293 if (ptr_svc->sb_virt) {
1294 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (!IS_ERR_OR_NULL(ihandle)) {
1296 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1297 ion_free(qseecom.ion_clnt, ihandle);
1298 }
1299 }
Zhen Kong3c674612018-09-06 22:51:27 -07001300 list_del(&ptr_svc->list);
1301 kzfree(ptr_svc);
1302
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001303 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001304 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001305 return ret;
1306}
1307
1308static int __qseecom_set_msm_bus_request(uint32_t mode)
1309{
1310 int ret = 0;
1311 struct qseecom_clk *qclk;
1312
1313 qclk = &qseecom.qsee;
1314 if (qclk->ce_core_src_clk != NULL) {
1315 if (mode == INACTIVE) {
1316 __qseecom_disable_clk(CLK_QSEE);
1317 } else {
1318 ret = __qseecom_enable_clk(CLK_QSEE);
1319 if (ret)
1320 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1321 ret, mode);
1322 }
1323 }
1324
1325 if ((!ret) && (qseecom.current_mode != mode)) {
1326 ret = msm_bus_scale_client_update_request(
1327 qseecom.qsee_perf_client, mode);
1328 if (ret) {
1329 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1330 ret, mode);
1331 if (qclk->ce_core_src_clk != NULL) {
1332 if (mode == INACTIVE) {
1333 ret = __qseecom_enable_clk(CLK_QSEE);
1334 if (ret)
1335 pr_err("CLK enable failed\n");
1336 } else
1337 __qseecom_disable_clk(CLK_QSEE);
1338 }
1339 }
1340 qseecom.current_mode = mode;
1341 }
1342 return ret;
1343}
1344
1345static void qseecom_bw_inactive_req_work(struct work_struct *work)
1346{
1347 mutex_lock(&app_access_lock);
1348 mutex_lock(&qsee_bw_mutex);
1349 if (qseecom.timer_running)
1350 __qseecom_set_msm_bus_request(INACTIVE);
1351 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1352 qseecom.current_mode, qseecom.cumulative_mode);
1353 qseecom.timer_running = false;
1354 mutex_unlock(&qsee_bw_mutex);
1355 mutex_unlock(&app_access_lock);
1356}
1357
1358static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1359{
1360 schedule_work(&qseecom.bw_inactive_req_ws);
1361}
1362
1363static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1364{
1365 struct qseecom_clk *qclk;
1366 int ret = 0;
1367
1368 mutex_lock(&clk_access_lock);
1369 if (ce == CLK_QSEE)
1370 qclk = &qseecom.qsee;
1371 else
1372 qclk = &qseecom.ce_drv;
1373
1374 if (qclk->clk_access_cnt > 2) {
1375 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1376 ret = -EINVAL;
1377 goto err_dec_ref_cnt;
1378 }
1379 if (qclk->clk_access_cnt == 2)
1380 qclk->clk_access_cnt--;
1381
1382err_dec_ref_cnt:
1383 mutex_unlock(&clk_access_lock);
1384 return ret;
1385}
1386
1387
1388static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1389{
1390 int32_t ret = 0;
1391 int32_t request_mode = INACTIVE;
1392
1393 mutex_lock(&qsee_bw_mutex);
1394 if (mode == 0) {
1395 if (qseecom.cumulative_mode > MEDIUM)
1396 request_mode = HIGH;
1397 else
1398 request_mode = qseecom.cumulative_mode;
1399 } else {
1400 request_mode = mode;
1401 }
1402
1403 ret = __qseecom_set_msm_bus_request(request_mode);
1404 if (ret) {
1405 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1406 ret, request_mode);
1407 goto err_scale_timer;
1408 }
1409
1410 if (qseecom.timer_running) {
1411 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1412 if (ret) {
1413 pr_err("Failed to decrease clk ref count.\n");
1414 goto err_scale_timer;
1415 }
1416 del_timer_sync(&(qseecom.bw_scale_down_timer));
1417 qseecom.timer_running = false;
1418 }
1419err_scale_timer:
1420 mutex_unlock(&qsee_bw_mutex);
1421 return ret;
1422}
1423
1424
1425static int qseecom_unregister_bus_bandwidth_needs(
1426 struct qseecom_dev_handle *data)
1427{
1428 int32_t ret = 0;
1429
1430 qseecom.cumulative_mode -= data->mode;
1431 data->mode = INACTIVE;
1432
1433 return ret;
1434}
1435
1436static int __qseecom_register_bus_bandwidth_needs(
1437 struct qseecom_dev_handle *data, uint32_t request_mode)
1438{
1439 int32_t ret = 0;
1440
1441 if (data->mode == INACTIVE) {
1442 qseecom.cumulative_mode += request_mode;
1443 data->mode = request_mode;
1444 } else {
1445 if (data->mode != request_mode) {
1446 qseecom.cumulative_mode -= data->mode;
1447 qseecom.cumulative_mode += request_mode;
1448 data->mode = request_mode;
1449 }
1450 }
1451 return ret;
1452}
1453
1454static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1455{
1456 int ret = 0;
1457
1458 ret = qsee_vote_for_clock(data, CLK_DFAB);
1459 if (ret) {
1460 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1461 goto perf_enable_exit;
1462 }
1463 ret = qsee_vote_for_clock(data, CLK_SFPB);
1464 if (ret) {
1465 qsee_disable_clock_vote(data, CLK_DFAB);
1466 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1467 goto perf_enable_exit;
1468 }
1469
1470perf_enable_exit:
1471 return ret;
1472}
1473
1474static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1475 void __user *argp)
1476{
1477 int32_t ret = 0;
1478 int32_t req_mode;
1479
1480 if (qseecom.no_clock_support)
1481 return 0;
1482
1483 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1484 if (ret) {
1485 pr_err("copy_from_user failed\n");
1486 return ret;
1487 }
1488 if (req_mode > HIGH) {
1489 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1490 return -EINVAL;
1491 }
1492
1493 /*
1494 * Register bus bandwidth needs if bus scaling feature is enabled;
1495 * otherwise, qseecom enable/disable clocks for the client directly.
1496 */
1497 if (qseecom.support_bus_scaling) {
1498 mutex_lock(&qsee_bw_mutex);
1499 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1500 mutex_unlock(&qsee_bw_mutex);
1501 } else {
1502 pr_debug("Bus scaling feature is NOT enabled\n");
1503 pr_debug("request bandwidth mode %d for the client\n",
1504 req_mode);
1505 if (req_mode != INACTIVE) {
1506 ret = qseecom_perf_enable(data);
1507 if (ret)
1508 pr_err("Failed to vote for clock with err %d\n",
1509 ret);
1510 } else {
1511 qsee_disable_clock_vote(data, CLK_DFAB);
1512 qsee_disable_clock_vote(data, CLK_SFPB);
1513 }
1514 }
1515 return ret;
1516}
1517
1518static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1519{
1520 if (qseecom.no_clock_support)
1521 return;
1522
1523 mutex_lock(&qsee_bw_mutex);
1524 qseecom.bw_scale_down_timer.expires = jiffies +
1525 msecs_to_jiffies(duration);
1526 mod_timer(&(qseecom.bw_scale_down_timer),
1527 qseecom.bw_scale_down_timer.expires);
1528 qseecom.timer_running = true;
1529 mutex_unlock(&qsee_bw_mutex);
1530}
1531
1532static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1533{
1534 if (!qseecom.support_bus_scaling)
1535 qsee_disable_clock_vote(data, CLK_SFPB);
1536 else
1537 __qseecom_add_bw_scale_down_timer(
1538 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1539}
1540
1541static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1542{
1543 int ret = 0;
1544
1545 if (qseecom.support_bus_scaling) {
1546 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1547 if (ret)
1548 pr_err("Failed to set bw MEDIUM.\n");
1549 } else {
1550 ret = qsee_vote_for_clock(data, CLK_SFPB);
1551 if (ret)
1552 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1553 }
1554 return ret;
1555}
1556
1557static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1558 void __user *argp)
1559{
1560 ion_phys_addr_t pa;
1561 int32_t ret;
1562 struct qseecom_set_sb_mem_param_req req;
1563 size_t len;
1564
1565 /* Copy the relevant information needed for loading the image */
1566 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1567 return -EFAULT;
1568
1569 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1570 (req.sb_len == 0)) {
1571 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1572 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1573 return -EFAULT;
1574 }
1575 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1576 req.sb_len))
1577 return -EFAULT;
1578
1579 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001580 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001581 req.ifd_data_fd);
1582 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1583 pr_err("Ion client could not retrieve the handle\n");
1584 return -ENOMEM;
1585 }
1586 /* Get the physical address of the ION BUF */
1587 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1588 if (ret) {
1589
1590 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1591 ret);
1592 return ret;
1593 }
1594
1595 if (len < req.sb_len) {
1596 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1597 req.sb_len, len);
1598 return -EINVAL;
1599 }
1600 /* Populate the structure for sending scm call to load image */
1601 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1602 data->client.ihandle);
1603 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1604 pr_err("ION memory mapping for client shared buf failed\n");
1605 return -ENOMEM;
1606 }
1607 data->client.sb_phys = (phys_addr_t)pa;
1608 data->client.sb_length = req.sb_len;
1609 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1610 return 0;
1611}
1612
Zhen Kong26e62742018-05-04 17:19:06 -07001613static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1614 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001615{
1616 int ret;
1617
1618 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001619 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001620}
1621
1622static int __qseecom_reentrancy_listener_has_sent_rsp(
1623 struct qseecom_dev_handle *data,
1624 struct qseecom_registered_listener_list *ptr_svc)
1625{
1626 int ret;
1627
1628 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001629 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001630}
1631
1632static void __qseecom_clean_listener_sglistinfo(
1633 struct qseecom_registered_listener_list *ptr_svc)
1634{
1635 if (ptr_svc->sglist_cnt) {
1636 memset(ptr_svc->sglistinfo_ptr, 0,
1637 SGLISTINFO_TABLE_SIZE);
1638 ptr_svc->sglist_cnt = 0;
1639 }
1640}
1641
Zhen Kong25731112018-09-20 13:10:03 -07001642/* wake up listener receive request wq retry delay (ms) and max attemp count */
1643#define QSEECOM_WAKE_LISTENER_RCVWQ_DELAY 10
1644#define QSEECOM_WAKE_LISTENER_RCVWQ_MAX_ATTEMP 3
1645
1646static int __qseecom_retry_wake_up_listener_rcv_wq(
1647 struct qseecom_registered_listener_list *ptr_svc)
1648{
1649 int retry = 0;
1650
1651 while (ptr_svc->rcv_req_flag == 1 &&
1652 retry++ < QSEECOM_WAKE_LISTENER_RCVWQ_MAX_ATTEMP) {
1653 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1654 msleep(QSEECOM_WAKE_LISTENER_RCVWQ_DELAY);
1655 }
1656 return ptr_svc->rcv_req_flag == 1;
1657}
1658
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001659static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1660 struct qseecom_command_scm_resp *resp)
1661{
1662 int ret = 0;
1663 int rc = 0;
1664 uint32_t lstnr;
1665 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001666 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1667 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1668 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001669 struct qseecom_registered_listener_list *ptr_svc = NULL;
1670 sigset_t new_sigset;
1671 sigset_t old_sigset;
1672 uint32_t status;
1673 void *cmd_buf = NULL;
1674 size_t cmd_len;
1675 struct sglist_info *table = NULL;
1676
1677 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1678 lstnr = resp->data;
1679 /*
1680 * Wake up blocking lsitener service with the lstnr id
1681 */
1682 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1683 flags);
1684 list_for_each_entry(ptr_svc,
1685 &qseecom.registered_listener_list_head, list) {
1686 if (ptr_svc->svc.listener_id == lstnr) {
1687 ptr_svc->listener_in_use = true;
1688 ptr_svc->rcv_req_flag = 1;
1689 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1690 break;
1691 }
1692 }
1693 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1694 flags);
1695
1696 if (ptr_svc == NULL) {
1697 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001698 rc = -EINVAL;
1699 status = QSEOS_RESULT_FAILURE;
1700 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001701 }
1702
1703 if (!ptr_svc->ihandle) {
1704 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001705 rc = -EINVAL;
1706 status = QSEOS_RESULT_FAILURE;
1707 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001708 }
1709
1710 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001711 pr_err("Service %d does not exist\n",
1712 lstnr);
1713 rc = -ERESTARTSYS;
1714 ptr_svc = NULL;
1715 status = QSEOS_RESULT_FAILURE;
1716 goto err_resp;
1717 }
1718
1719 if (ptr_svc->abort == 1) {
1720 pr_err("Service %d abort %d\n",
1721 lstnr, ptr_svc->abort);
1722 rc = -ENODEV;
1723 status = QSEOS_RESULT_FAILURE;
1724 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001725 }
Zhen Kong25731112018-09-20 13:10:03 -07001726
1727 if (ptr_svc->rcv_req_flag == 1 &&
1728 __qseecom_retry_wake_up_listener_rcv_wq(ptr_svc)) {
1729 pr_err("Service %d is not ready to receive request\n",
1730 lstnr);
1731 rc = -ENOENT;
1732 status = QSEOS_RESULT_FAILURE;
1733 goto err_resp;
1734 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001735 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1736
1737 /* initialize the new signal mask with all signals*/
1738 sigfillset(&new_sigset);
1739 /* block all signals */
1740 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1741
1742 do {
1743 /*
1744 * When reentrancy is not supported, check global
1745 * send_resp_flag; otherwise, check this listener's
1746 * send_resp_flag.
1747 */
1748 if (!qseecom.qsee_reentrancy_support &&
1749 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001750 __qseecom_listener_has_sent_rsp(
1751 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001752 break;
1753 }
1754
1755 if (qseecom.qsee_reentrancy_support &&
1756 !wait_event_freezable(qseecom.send_resp_wq,
1757 __qseecom_reentrancy_listener_has_sent_rsp(
1758 data, ptr_svc))) {
1759 break;
1760 }
1761 } while (1);
1762
1763 /* restore signal mask */
1764 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001765 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001766 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1767 data->client.app_id, lstnr, ret);
1768 rc = -ENODEV;
1769 status = QSEOS_RESULT_FAILURE;
1770 } else {
1771 status = QSEOS_RESULT_SUCCESS;
1772 }
Zhen Kong26e62742018-05-04 17:19:06 -07001773err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001774 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001775 if (ptr_svc) {
1776 ptr_svc->send_resp_flag = 0;
1777 table = ptr_svc->sglistinfo_ptr;
1778 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001782 if (table) {
1783 send_data_rsp.sglistinfo_ptr =
1784 (uint32_t)virt_to_phys(table);
1785 send_data_rsp.sglistinfo_len =
1786 SGLISTINFO_TABLE_SIZE;
1787 dmac_flush_range((void *)table,
1788 (void *)table + SGLISTINFO_TABLE_SIZE);
1789 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001790 cmd_buf = (void *)&send_data_rsp;
1791 cmd_len = sizeof(send_data_rsp);
1792 } else {
1793 send_data_rsp_64bit.listener_id = lstnr;
1794 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001795 if (table) {
1796 send_data_rsp_64bit.sglistinfo_ptr =
1797 virt_to_phys(table);
1798 send_data_rsp_64bit.sglistinfo_len =
1799 SGLISTINFO_TABLE_SIZE;
1800 dmac_flush_range((void *)table,
1801 (void *)table + SGLISTINFO_TABLE_SIZE);
1802 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001803 cmd_buf = (void *)&send_data_rsp_64bit;
1804 cmd_len = sizeof(send_data_rsp_64bit);
1805 }
Zhen Kong7d500032018-08-06 16:58:31 -07001806 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001807 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1808 else
1809 *(uint32_t *)cmd_buf =
1810 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1811 if (ptr_svc) {
1812 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1813 ptr_svc->ihandle,
1814 ptr_svc->sb_virt, ptr_svc->sb_length,
1815 ION_IOC_CLEAN_INV_CACHES);
1816 if (ret) {
1817 pr_err("cache operation failed %d\n", ret);
1818 return ret;
1819 }
1820 }
1821
1822 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1823 ret = __qseecom_enable_clk(CLK_QSEE);
1824 if (ret)
1825 return ret;
1826 }
1827
1828 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1829 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001830 if (ptr_svc) {
1831 ptr_svc->listener_in_use = false;
1832 __qseecom_clean_listener_sglistinfo(ptr_svc);
1833 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001834 if (ret) {
1835 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1836 ret, data->client.app_id);
1837 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1838 __qseecom_disable_clk(CLK_QSEE);
1839 return ret;
1840 }
Zhen Kong26e62742018-05-04 17:19:06 -07001841 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1842 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001843 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1844 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1845 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1846 resp->result, data->client.app_id, lstnr);
1847 ret = -EINVAL;
1848 }
1849 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1850 __qseecom_disable_clk(CLK_QSEE);
1851
1852 }
1853 if (rc)
1854 return rc;
1855
1856 return ret;
1857}
1858
Zhen Konga91aaf02018-02-02 17:21:04 -08001859static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001860 struct qseecom_command_scm_resp *resp,
1861 struct qseecom_registered_app_list *ptr_app,
1862 struct qseecom_dev_handle *data)
1863{
1864 struct qseecom_registered_listener_list *list_ptr;
1865 int ret = 0;
1866 struct qseecom_continue_blocked_request_ireq ireq;
1867 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001868 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001869 sigset_t new_sigset;
1870 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001871 unsigned long flags;
1872 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001873
1874 if (!resp || !data) {
1875 pr_err("invalid resp or data pointer\n");
1876 ret = -EINVAL;
1877 goto exit;
1878 }
1879
1880 /* find app_id & img_name from list */
1881 if (!ptr_app) {
1882 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1883 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1884 list) {
1885 if ((ptr_app->app_id == data->client.app_id) &&
1886 (!strcmp(ptr_app->app_name,
1887 data->client.app_name))) {
1888 found_app = true;
1889 break;
1890 }
1891 }
1892 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1893 flags);
1894 if (!found_app) {
1895 pr_err("app_id %d (%s) is not found\n",
1896 data->client.app_id,
1897 (char *)data->client.app_name);
1898 ret = -ENOENT;
1899 goto exit;
1900 }
1901 }
1902
Zhen Kongd8cc0052017-11-13 15:13:31 -08001903 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001904 session_id = resp->resp_type;
1905 list_ptr = __qseecom_find_svc(resp->data);
1906 if (!list_ptr) {
1907 pr_err("Invalid listener ID %d\n", resp->data);
1908 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001909 goto exit;
1910 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001911 ptr_app->blocked_on_listener_id = resp->data;
1912
1913 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1914 resp->data, list_ptr->listener_in_use,
1915 session_id, data->client.app_id);
1916
1917 /* sleep until listener is available */
1918 sigfillset(&new_sigset);
1919 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1920
1921 do {
1922 qseecom.app_block_ref_cnt++;
1923 ptr_app->app_blocked = true;
1924 mutex_unlock(&app_access_lock);
1925 wait_event_freezable(
1926 list_ptr->listener_block_app_wq,
1927 !list_ptr->listener_in_use);
1928 mutex_lock(&app_access_lock);
1929 ptr_app->app_blocked = false;
1930 qseecom.app_block_ref_cnt--;
1931 } while (list_ptr->listener_in_use);
1932
1933 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1934
1935 ptr_app->blocked_on_listener_id = 0;
1936 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1937 resp->data, session_id, data->client.app_id);
1938
1939 /* notify TZ that listener is available */
1940 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1941
1942 if (qseecom.smcinvoke_support)
1943 ireq.app_or_session_id = session_id;
1944 else
1945 ireq.app_or_session_id = data->client.app_id;
1946
1947 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1948 &ireq, sizeof(ireq),
1949 &continue_resp, sizeof(continue_resp));
1950 if (ret && qseecom.smcinvoke_support) {
1951 /* retry with legacy cmd */
1952 qseecom.smcinvoke_support = false;
1953 ireq.app_or_session_id = data->client.app_id;
1954 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1955 &ireq, sizeof(ireq),
1956 &continue_resp, sizeof(continue_resp));
1957 qseecom.smcinvoke_support = true;
1958 if (ret) {
1959 pr_err("unblock app %d or session %d fail\n",
1960 data->client.app_id, session_id);
1961 goto exit;
1962 }
1963 }
1964 resp->result = continue_resp.result;
1965 resp->resp_type = continue_resp.resp_type;
1966 resp->data = continue_resp.data;
1967 pr_debug("unblock resp = %d\n", resp->result);
1968 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1969
1970 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1971 pr_err("Unexpected unblock resp %d\n", resp->result);
1972 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001973 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001974exit:
1975 return ret;
1976}
1977
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001978static int __qseecom_reentrancy_process_incomplete_cmd(
1979 struct qseecom_dev_handle *data,
1980 struct qseecom_command_scm_resp *resp)
1981{
1982 int ret = 0;
1983 int rc = 0;
1984 uint32_t lstnr;
1985 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001986 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1987 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1988 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001989 struct qseecom_registered_listener_list *ptr_svc = NULL;
1990 sigset_t new_sigset;
1991 sigset_t old_sigset;
1992 uint32_t status;
1993 void *cmd_buf = NULL;
1994 size_t cmd_len;
1995 struct sglist_info *table = NULL;
1996
Zhen Kong26e62742018-05-04 17:19:06 -07001997 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001998 lstnr = resp->data;
1999 /*
2000 * Wake up blocking lsitener service with the lstnr id
2001 */
2002 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2003 flags);
2004 list_for_each_entry(ptr_svc,
2005 &qseecom.registered_listener_list_head, list) {
2006 if (ptr_svc->svc.listener_id == lstnr) {
2007 ptr_svc->listener_in_use = true;
2008 ptr_svc->rcv_req_flag = 1;
2009 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2010 break;
2011 }
2012 }
2013 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2014 flags);
2015
2016 if (ptr_svc == NULL) {
2017 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002018 rc = -EINVAL;
2019 status = QSEOS_RESULT_FAILURE;
2020 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002021 }
2022
2023 if (!ptr_svc->ihandle) {
2024 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002025 rc = -EINVAL;
2026 status = QSEOS_RESULT_FAILURE;
2027 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002028 }
2029
2030 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002031 pr_err("Service %d does not exist\n",
2032 lstnr);
2033 rc = -ERESTARTSYS;
2034 ptr_svc = NULL;
2035 status = QSEOS_RESULT_FAILURE;
2036 goto err_resp;
2037 }
2038
2039 if (ptr_svc->abort == 1) {
2040 pr_err("Service %d abort %d\n",
2041 lstnr, ptr_svc->abort);
2042 rc = -ENODEV;
2043 status = QSEOS_RESULT_FAILURE;
2044 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002045 }
Zhen Kong25731112018-09-20 13:10:03 -07002046
2047 if (ptr_svc->rcv_req_flag == 1 &&
2048 __qseecom_retry_wake_up_listener_rcv_wq(ptr_svc)) {
2049 pr_err("Service %d is not ready to receive request\n",
2050 lstnr);
2051 rc = -ENOENT;
2052 status = QSEOS_RESULT_FAILURE;
2053 goto err_resp;
2054 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002055 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2056
2057 /* initialize the new signal mask with all signals*/
2058 sigfillset(&new_sigset);
2059
2060 /* block all signals */
2061 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2062
2063 /* unlock mutex btw waking listener and sleep-wait */
2064 mutex_unlock(&app_access_lock);
2065 do {
2066 if (!wait_event_freezable(qseecom.send_resp_wq,
2067 __qseecom_reentrancy_listener_has_sent_rsp(
2068 data, ptr_svc))) {
2069 break;
2070 }
2071 } while (1);
2072 /* lock mutex again after resp sent */
2073 mutex_lock(&app_access_lock);
2074 ptr_svc->send_resp_flag = 0;
2075 qseecom.send_resp_flag = 0;
2076
2077 /* restore signal mask */
2078 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002079 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002080 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2081 data->client.app_id, lstnr, ret);
2082 rc = -ENODEV;
2083 status = QSEOS_RESULT_FAILURE;
2084 } else {
2085 status = QSEOS_RESULT_SUCCESS;
2086 }
Zhen Kong26e62742018-05-04 17:19:06 -07002087err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002088 if (ptr_svc)
2089 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002090 if (qseecom.qsee_version < QSEE_VERSION_40) {
2091 send_data_rsp.listener_id = lstnr;
2092 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002093 if (table) {
2094 send_data_rsp.sglistinfo_ptr =
2095 (uint32_t)virt_to_phys(table);
2096 send_data_rsp.sglistinfo_len =
2097 SGLISTINFO_TABLE_SIZE;
2098 dmac_flush_range((void *)table,
2099 (void *)table + SGLISTINFO_TABLE_SIZE);
2100 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002101 cmd_buf = (void *)&send_data_rsp;
2102 cmd_len = sizeof(send_data_rsp);
2103 } else {
2104 send_data_rsp_64bit.listener_id = lstnr;
2105 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002106 if (table) {
2107 send_data_rsp_64bit.sglistinfo_ptr =
2108 virt_to_phys(table);
2109 send_data_rsp_64bit.sglistinfo_len =
2110 SGLISTINFO_TABLE_SIZE;
2111 dmac_flush_range((void *)table,
2112 (void *)table + SGLISTINFO_TABLE_SIZE);
2113 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002114 cmd_buf = (void *)&send_data_rsp_64bit;
2115 cmd_len = sizeof(send_data_rsp_64bit);
2116 }
Zhen Kong7d500032018-08-06 16:58:31 -07002117 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002118 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2119 else
2120 *(uint32_t *)cmd_buf =
2121 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2122 if (ptr_svc) {
2123 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2124 ptr_svc->ihandle,
2125 ptr_svc->sb_virt, ptr_svc->sb_length,
2126 ION_IOC_CLEAN_INV_CACHES);
2127 if (ret) {
2128 pr_err("cache operation failed %d\n", ret);
2129 return ret;
2130 }
2131 }
2132 if (lstnr == RPMB_SERVICE) {
2133 ret = __qseecom_enable_clk(CLK_QSEE);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2139 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002140 if (ptr_svc) {
2141 ptr_svc->listener_in_use = false;
2142 __qseecom_clean_listener_sglistinfo(ptr_svc);
2143 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2144 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002145
2146 if (ret) {
2147 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2148 ret, data->client.app_id);
2149 goto exit;
2150 }
2151
2152 switch (resp->result) {
2153 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2154 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2155 lstnr, data->client.app_id, resp->data);
2156 if (lstnr == resp->data) {
2157 pr_err("lstnr %d should not be blocked!\n",
2158 lstnr);
2159 ret = -EINVAL;
2160 goto exit;
2161 }
2162 ret = __qseecom_process_reentrancy_blocked_on_listener(
2163 resp, NULL, data);
2164 if (ret) {
2165 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2166 data->client.app_id,
2167 data->client.app_name, resp->data);
2168 goto exit;
2169 }
2170 case QSEOS_RESULT_SUCCESS:
2171 case QSEOS_RESULT_INCOMPLETE:
2172 break;
2173 default:
2174 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2175 resp->result, data->client.app_id, lstnr);
2176 ret = -EINVAL;
2177 goto exit;
2178 }
2179exit:
2180 if (lstnr == RPMB_SERVICE)
2181 __qseecom_disable_clk(CLK_QSEE);
2182
2183 }
2184 if (rc)
2185 return rc;
2186
2187 return ret;
2188}
2189
2190/*
2191 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2192 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2193 * So, needs to first check if no app blocked before sending OS level scm call,
2194 * then wait until all apps are unblocked.
2195 */
2196static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2197{
2198 sigset_t new_sigset, old_sigset;
2199
2200 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2201 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2202 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2203 /* thread sleep until this app unblocked */
2204 while (qseecom.app_block_ref_cnt > 0) {
2205 sigfillset(&new_sigset);
2206 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2207 mutex_unlock(&app_access_lock);
2208 do {
2209 if (!wait_event_freezable(qseecom.app_block_wq,
2210 (qseecom.app_block_ref_cnt == 0)))
2211 break;
2212 } while (1);
2213 mutex_lock(&app_access_lock);
2214 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2215 }
2216 }
2217}
2218
2219/*
2220 * scm_call of send data will fail if this TA is blocked or there are more
2221 * than one TA requesting listener services; So, first check to see if need
2222 * to wait.
2223 */
2224static void __qseecom_reentrancy_check_if_this_app_blocked(
2225 struct qseecom_registered_app_list *ptr_app)
2226{
2227 sigset_t new_sigset, old_sigset;
2228
2229 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002230 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002231 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2232 /* thread sleep until this app unblocked */
2233 sigfillset(&new_sigset);
2234 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2235 mutex_unlock(&app_access_lock);
2236 do {
2237 if (!wait_event_freezable(qseecom.app_block_wq,
2238 (!ptr_app->app_blocked &&
2239 qseecom.app_block_ref_cnt <= 1)))
2240 break;
2241 } while (1);
2242 mutex_lock(&app_access_lock);
2243 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2244 }
Zhen Kongdea10592018-07-30 17:50:10 -07002245 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002246 }
2247}
2248
2249static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2250 uint32_t *app_id)
2251{
2252 int32_t ret;
2253 struct qseecom_command_scm_resp resp;
2254 bool found_app = false;
2255 struct qseecom_registered_app_list *entry = NULL;
2256 unsigned long flags = 0;
2257
2258 if (!app_id) {
2259 pr_err("Null pointer to app_id\n");
2260 return -EINVAL;
2261 }
2262 *app_id = 0;
2263
2264 /* check if app exists and has been registered locally */
2265 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2266 list_for_each_entry(entry,
2267 &qseecom.registered_app_list_head, list) {
2268 if (!strcmp(entry->app_name, req.app_name)) {
2269 found_app = true;
2270 break;
2271 }
2272 }
2273 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2274 if (found_app) {
2275 pr_debug("Found app with id %d\n", entry->app_id);
2276 *app_id = entry->app_id;
2277 return 0;
2278 }
2279
2280 memset((void *)&resp, 0, sizeof(resp));
2281
2282 /* SCM_CALL to check if app_id for the mentioned app exists */
2283 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2284 sizeof(struct qseecom_check_app_ireq),
2285 &resp, sizeof(resp));
2286 if (ret) {
2287 pr_err("scm_call to check if app is already loaded failed\n");
2288 return -EINVAL;
2289 }
2290
2291 if (resp.result == QSEOS_RESULT_FAILURE)
2292 return 0;
2293
2294 switch (resp.resp_type) {
2295 /*qsee returned listener type response */
2296 case QSEOS_LISTENER_ID:
2297 pr_err("resp type is of listener type instead of app");
2298 return -EINVAL;
2299 case QSEOS_APP_ID:
2300 *app_id = resp.data;
2301 return 0;
2302 default:
2303 pr_err("invalid resp type (%d) from qsee",
2304 resp.resp_type);
2305 return -ENODEV;
2306 }
2307}
2308
2309static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2310{
2311 struct qseecom_registered_app_list *entry = NULL;
2312 unsigned long flags = 0;
2313 u32 app_id = 0;
2314 struct ion_handle *ihandle; /* Ion handle */
2315 struct qseecom_load_img_req load_img_req;
2316 int32_t ret = 0;
2317 ion_phys_addr_t pa = 0;
2318 size_t len;
2319 struct qseecom_command_scm_resp resp;
2320 struct qseecom_check_app_ireq req;
2321 struct qseecom_load_app_ireq load_req;
2322 struct qseecom_load_app_64bit_ireq load_req_64bit;
2323 void *cmd_buf = NULL;
2324 size_t cmd_len;
2325 bool first_time = false;
2326
2327 /* Copy the relevant information needed for loading the image */
2328 if (copy_from_user(&load_img_req,
2329 (void __user *)argp,
2330 sizeof(struct qseecom_load_img_req))) {
2331 pr_err("copy_from_user failed\n");
2332 return -EFAULT;
2333 }
2334
2335 /* Check and load cmnlib */
2336 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2337 if (!qseecom.commonlib_loaded &&
2338 load_img_req.app_arch == ELFCLASS32) {
2339 ret = qseecom_load_commonlib_image(data, "cmnlib");
2340 if (ret) {
2341 pr_err("failed to load cmnlib\n");
2342 return -EIO;
2343 }
2344 qseecom.commonlib_loaded = true;
2345 pr_debug("cmnlib is loaded\n");
2346 }
2347
2348 if (!qseecom.commonlib64_loaded &&
2349 load_img_req.app_arch == ELFCLASS64) {
2350 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2351 if (ret) {
2352 pr_err("failed to load cmnlib64\n");
2353 return -EIO;
2354 }
2355 qseecom.commonlib64_loaded = true;
2356 pr_debug("cmnlib64 is loaded\n");
2357 }
2358 }
2359
2360 if (qseecom.support_bus_scaling) {
2361 mutex_lock(&qsee_bw_mutex);
2362 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2363 mutex_unlock(&qsee_bw_mutex);
2364 if (ret)
2365 return ret;
2366 }
2367
2368 /* Vote for the SFPB clock */
2369 ret = __qseecom_enable_clk_scale_up(data);
2370 if (ret)
2371 goto enable_clk_err;
2372
2373 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2374 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2375 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2376
2377 ret = __qseecom_check_app_exists(req, &app_id);
2378 if (ret < 0)
2379 goto loadapp_err;
2380
2381 if (app_id) {
2382 pr_debug("App id %d (%s) already exists\n", app_id,
2383 (char *)(req.app_name));
2384 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2385 list_for_each_entry(entry,
2386 &qseecom.registered_app_list_head, list){
2387 if (entry->app_id == app_id) {
2388 entry->ref_cnt++;
2389 break;
2390 }
2391 }
2392 spin_unlock_irqrestore(
2393 &qseecom.registered_app_list_lock, flags);
2394 ret = 0;
2395 } else {
2396 first_time = true;
2397 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2398 (char *)(load_img_req.img_name));
2399 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002400 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002401 load_img_req.ifd_data_fd);
2402 if (IS_ERR_OR_NULL(ihandle)) {
2403 pr_err("Ion client could not retrieve the handle\n");
2404 ret = -ENOMEM;
2405 goto loadapp_err;
2406 }
2407
2408 /* Get the physical address of the ION BUF */
2409 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2410 if (ret) {
2411 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2412 ret);
2413 goto loadapp_err;
2414 }
2415 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2416 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2417 len, load_img_req.mdt_len,
2418 load_img_req.img_len);
2419 ret = -EINVAL;
2420 goto loadapp_err;
2421 }
2422 /* Populate the structure for sending scm call to load image */
2423 if (qseecom.qsee_version < QSEE_VERSION_40) {
2424 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2425 load_req.mdt_len = load_img_req.mdt_len;
2426 load_req.img_len = load_img_req.img_len;
2427 strlcpy(load_req.app_name, load_img_req.img_name,
2428 MAX_APP_NAME_SIZE);
2429 load_req.phy_addr = (uint32_t)pa;
2430 cmd_buf = (void *)&load_req;
2431 cmd_len = sizeof(struct qseecom_load_app_ireq);
2432 } else {
2433 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2434 load_req_64bit.mdt_len = load_img_req.mdt_len;
2435 load_req_64bit.img_len = load_img_req.img_len;
2436 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2437 MAX_APP_NAME_SIZE);
2438 load_req_64bit.phy_addr = (uint64_t)pa;
2439 cmd_buf = (void *)&load_req_64bit;
2440 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2441 }
2442
2443 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2444 ION_IOC_CLEAN_INV_CACHES);
2445 if (ret) {
2446 pr_err("cache operation failed %d\n", ret);
2447 goto loadapp_err;
2448 }
2449
2450 /* SCM_CALL to load the app and get the app_id back */
2451 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2452 cmd_len, &resp, sizeof(resp));
2453 if (ret) {
2454 pr_err("scm_call to load app failed\n");
2455 if (!IS_ERR_OR_NULL(ihandle))
2456 ion_free(qseecom.ion_clnt, ihandle);
2457 ret = -EINVAL;
2458 goto loadapp_err;
2459 }
2460
2461 if (resp.result == QSEOS_RESULT_FAILURE) {
2462 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2463 if (!IS_ERR_OR_NULL(ihandle))
2464 ion_free(qseecom.ion_clnt, ihandle);
2465 ret = -EFAULT;
2466 goto loadapp_err;
2467 }
2468
2469 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2470 ret = __qseecom_process_incomplete_cmd(data, &resp);
2471 if (ret) {
2472 pr_err("process_incomplete_cmd failed err: %d\n",
2473 ret);
2474 if (!IS_ERR_OR_NULL(ihandle))
2475 ion_free(qseecom.ion_clnt, ihandle);
2476 ret = -EFAULT;
2477 goto loadapp_err;
2478 }
2479 }
2480
2481 if (resp.result != QSEOS_RESULT_SUCCESS) {
2482 pr_err("scm_call failed resp.result unknown, %d\n",
2483 resp.result);
2484 if (!IS_ERR_OR_NULL(ihandle))
2485 ion_free(qseecom.ion_clnt, ihandle);
2486 ret = -EFAULT;
2487 goto loadapp_err;
2488 }
2489
2490 app_id = resp.data;
2491
2492 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2493 if (!entry) {
2494 ret = -ENOMEM;
2495 goto loadapp_err;
2496 }
2497 entry->app_id = app_id;
2498 entry->ref_cnt = 1;
2499 entry->app_arch = load_img_req.app_arch;
2500 /*
2501 * keymaster app may be first loaded as "keymaste" by qseecomd,
2502 * and then used as "keymaster" on some targets. To avoid app
2503 * name checking error, register "keymaster" into app_list and
2504 * thread private data.
2505 */
2506 if (!strcmp(load_img_req.img_name, "keymaste"))
2507 strlcpy(entry->app_name, "keymaster",
2508 MAX_APP_NAME_SIZE);
2509 else
2510 strlcpy(entry->app_name, load_img_req.img_name,
2511 MAX_APP_NAME_SIZE);
2512 entry->app_blocked = false;
2513 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002514 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002515
2516 /* Deallocate the handle */
2517 if (!IS_ERR_OR_NULL(ihandle))
2518 ion_free(qseecom.ion_clnt, ihandle);
2519
2520 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2521 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2522 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2523 flags);
2524
2525 pr_warn("App with id %u (%s) now loaded\n", app_id,
2526 (char *)(load_img_req.img_name));
2527 }
2528 data->client.app_id = app_id;
2529 data->client.app_arch = load_img_req.app_arch;
2530 if (!strcmp(load_img_req.img_name, "keymaste"))
2531 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2532 else
2533 strlcpy(data->client.app_name, load_img_req.img_name,
2534 MAX_APP_NAME_SIZE);
2535 load_img_req.app_id = app_id;
2536 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2537 pr_err("copy_to_user failed\n");
2538 ret = -EFAULT;
2539 if (first_time == true) {
2540 spin_lock_irqsave(
2541 &qseecom.registered_app_list_lock, flags);
2542 list_del(&entry->list);
2543 spin_unlock_irqrestore(
2544 &qseecom.registered_app_list_lock, flags);
2545 kzfree(entry);
2546 }
2547 }
2548
2549loadapp_err:
2550 __qseecom_disable_clk_scale_down(data);
2551enable_clk_err:
2552 if (qseecom.support_bus_scaling) {
2553 mutex_lock(&qsee_bw_mutex);
2554 qseecom_unregister_bus_bandwidth_needs(data);
2555 mutex_unlock(&qsee_bw_mutex);
2556 }
2557 return ret;
2558}
2559
2560static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2561{
2562 int ret = 1; /* Set unload app */
2563
2564 wake_up_all(&qseecom.send_resp_wq);
2565 if (qseecom.qsee_reentrancy_support)
2566 mutex_unlock(&app_access_lock);
2567 while (atomic_read(&data->ioctl_count) > 1) {
2568 if (wait_event_freezable(data->abort_wq,
2569 atomic_read(&data->ioctl_count) <= 1)) {
2570 pr_err("Interrupted from abort\n");
2571 ret = -ERESTARTSYS;
2572 break;
2573 }
2574 }
2575 if (qseecom.qsee_reentrancy_support)
2576 mutex_lock(&app_access_lock);
2577 return ret;
2578}
2579
2580static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2581{
2582 int ret = 0;
2583
2584 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2585 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2586 ion_free(qseecom.ion_clnt, data->client.ihandle);
2587 data->client.ihandle = NULL;
2588 }
2589 return ret;
2590}
2591
2592static int qseecom_unload_app(struct qseecom_dev_handle *data,
2593 bool app_crash)
2594{
2595 unsigned long flags;
2596 unsigned long flags1;
2597 int ret = 0;
2598 struct qseecom_command_scm_resp resp;
2599 struct qseecom_registered_app_list *ptr_app = NULL;
2600 bool unload = false;
2601 bool found_app = false;
2602 bool found_dead_app = false;
2603
2604 if (!data) {
2605 pr_err("Invalid/uninitialized device handle\n");
2606 return -EINVAL;
2607 }
2608
2609 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2610 pr_debug("Do not unload keymaster app from tz\n");
2611 goto unload_exit;
2612 }
2613
2614 __qseecom_cleanup_app(data);
2615 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2616
2617 if (data->client.app_id > 0) {
2618 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2619 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2620 list) {
2621 if (ptr_app->app_id == data->client.app_id) {
2622 if (!strcmp((void *)ptr_app->app_name,
2623 (void *)data->client.app_name)) {
2624 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002625 if (ptr_app->app_blocked ||
2626 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002627 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002628 if (app_crash || ptr_app->ref_cnt == 1)
2629 unload = true;
2630 break;
2631 }
2632 found_dead_app = true;
2633 break;
2634 }
2635 }
2636 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2637 flags);
2638 if (found_app == false && found_dead_app == false) {
2639 pr_err("Cannot find app with id = %d (%s)\n",
2640 data->client.app_id,
2641 (char *)data->client.app_name);
2642 ret = -EINVAL;
2643 goto unload_exit;
2644 }
2645 }
2646
2647 if (found_dead_app)
2648 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2649 (char *)data->client.app_name);
2650
2651 if (unload) {
2652 struct qseecom_unload_app_ireq req;
2653 /* Populate the structure for sending scm call to load image */
2654 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2655 req.app_id = data->client.app_id;
2656
2657 /* SCM_CALL to unload the app */
2658 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2659 sizeof(struct qseecom_unload_app_ireq),
2660 &resp, sizeof(resp));
2661 if (ret) {
2662 pr_err("scm_call to unload app (id = %d) failed\n",
2663 req.app_id);
2664 ret = -EFAULT;
2665 goto unload_exit;
2666 } else {
2667 pr_warn("App id %d now unloaded\n", req.app_id);
2668 }
2669 if (resp.result == QSEOS_RESULT_FAILURE) {
2670 pr_err("app (%d) unload_failed!!\n",
2671 data->client.app_id);
2672 ret = -EFAULT;
2673 goto unload_exit;
2674 }
2675 if (resp.result == QSEOS_RESULT_SUCCESS)
2676 pr_debug("App (%d) is unloaded!!\n",
2677 data->client.app_id);
2678 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2679 ret = __qseecom_process_incomplete_cmd(data, &resp);
2680 if (ret) {
2681 pr_err("process_incomplete_cmd fail err: %d\n",
2682 ret);
2683 goto unload_exit;
2684 }
2685 }
2686 }
2687
Zhen Kong7d500032018-08-06 16:58:31 -07002688unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002689 if (found_app) {
2690 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2691 if (app_crash) {
2692 ptr_app->ref_cnt = 0;
2693 pr_debug("app_crash: ref_count = 0\n");
2694 } else {
2695 if (ptr_app->ref_cnt == 1) {
2696 ptr_app->ref_cnt = 0;
2697 pr_debug("ref_count set to 0\n");
2698 } else {
2699 ptr_app->ref_cnt--;
2700 pr_debug("Can't unload app(%d) inuse\n",
2701 ptr_app->app_id);
2702 }
2703 }
2704 if (unload) {
2705 list_del(&ptr_app->list);
2706 kzfree(ptr_app);
2707 }
2708 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2709 flags1);
2710 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002711 qseecom_unmap_ion_allocated_memory(data);
2712 data->released = true;
2713 return ret;
2714}
2715
2716static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2717 unsigned long virt)
2718{
2719 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2720}
2721
2722static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2723 unsigned long virt)
2724{
2725 return (uintptr_t)data->client.sb_virt +
2726 (virt - data->client.user_virt_sb_base);
2727}
2728
2729int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2730 struct qseecom_send_svc_cmd_req *req_ptr,
2731 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2732{
2733 int ret = 0;
2734 void *req_buf = NULL;
2735
2736 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2737 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2738 req_ptr, send_svc_ireq_ptr);
2739 return -EINVAL;
2740 }
2741
2742 /* Clients need to ensure req_buf is at base offset of shared buffer */
2743 if ((uintptr_t)req_ptr->cmd_req_buf !=
2744 data_ptr->client.user_virt_sb_base) {
2745 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2746 return -EINVAL;
2747 }
2748
2749 if (data_ptr->client.sb_length <
2750 sizeof(struct qseecom_rpmb_provision_key)) {
2751 pr_err("shared buffer is too small to hold key type\n");
2752 return -EINVAL;
2753 }
2754 req_buf = data_ptr->client.sb_virt;
2755
2756 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2757 send_svc_ireq_ptr->key_type =
2758 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2759 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2760 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2761 data_ptr, (uintptr_t)req_ptr->resp_buf));
2762 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2763
2764 return ret;
2765}
2766
2767int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2768 struct qseecom_send_svc_cmd_req *req_ptr,
2769 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2770{
2771 int ret = 0;
2772 uint32_t reqd_len_sb_in = 0;
2773
2774 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2775 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2776 req_ptr, send_svc_ireq_ptr);
2777 return -EINVAL;
2778 }
2779
2780 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2781 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2782 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2783 pr_err("Required: %u, Available: %zu\n",
2784 reqd_len_sb_in, data_ptr->client.sb_length);
2785 return -ENOMEM;
2786 }
2787
2788 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2789 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2790 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2791 data_ptr, (uintptr_t)req_ptr->resp_buf));
2792 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2793
2794 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2795 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2796
2797
2798 return ret;
2799}
2800
2801static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2802 struct qseecom_send_svc_cmd_req *req)
2803{
2804 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2805 pr_err("req or cmd buffer or response buffer is null\n");
2806 return -EINVAL;
2807 }
2808
2809 if (!data || !data->client.ihandle) {
2810 pr_err("Client or client handle is not initialized\n");
2811 return -EINVAL;
2812 }
2813
2814 if (data->client.sb_virt == NULL) {
2815 pr_err("sb_virt null\n");
2816 return -EINVAL;
2817 }
2818
2819 if (data->client.user_virt_sb_base == 0) {
2820 pr_err("user_virt_sb_base is null\n");
2821 return -EINVAL;
2822 }
2823
2824 if (data->client.sb_length == 0) {
2825 pr_err("sb_length is 0\n");
2826 return -EINVAL;
2827 }
2828
2829 if (((uintptr_t)req->cmd_req_buf <
2830 data->client.user_virt_sb_base) ||
2831 ((uintptr_t)req->cmd_req_buf >=
2832 (data->client.user_virt_sb_base + data->client.sb_length))) {
2833 pr_err("cmd buffer address not within shared bufffer\n");
2834 return -EINVAL;
2835 }
2836 if (((uintptr_t)req->resp_buf <
2837 data->client.user_virt_sb_base) ||
2838 ((uintptr_t)req->resp_buf >=
2839 (data->client.user_virt_sb_base + data->client.sb_length))) {
2840 pr_err("response buffer address not within shared bufffer\n");
2841 return -EINVAL;
2842 }
2843 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2844 (req->cmd_req_len > data->client.sb_length) ||
2845 (req->resp_len > data->client.sb_length)) {
2846 pr_err("cmd buf length or response buf length not valid\n");
2847 return -EINVAL;
2848 }
2849 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2850 pr_err("Integer overflow detected in req_len & rsp_len\n");
2851 return -EINVAL;
2852 }
2853
2854 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2855 pr_debug("Not enough memory to fit cmd_buf.\n");
2856 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2857 (req->cmd_req_len + req->resp_len),
2858 data->client.sb_length);
2859 return -ENOMEM;
2860 }
2861 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2862 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2863 return -EINVAL;
2864 }
2865 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2866 pr_err("Integer overflow in resp_len & resp_buf\n");
2867 return -EINVAL;
2868 }
2869 if (data->client.user_virt_sb_base >
2870 (ULONG_MAX - data->client.sb_length)) {
2871 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2872 return -EINVAL;
2873 }
2874 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2875 ((uintptr_t)data->client.user_virt_sb_base +
2876 data->client.sb_length)) ||
2877 (((uintptr_t)req->resp_buf + req->resp_len) >
2878 ((uintptr_t)data->client.user_virt_sb_base +
2879 data->client.sb_length))) {
2880 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2881 return -EINVAL;
2882 }
2883 return 0;
2884}
2885
2886static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2887 void __user *argp)
2888{
2889 int ret = 0;
2890 struct qseecom_client_send_service_ireq send_svc_ireq;
2891 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2892 struct qseecom_command_scm_resp resp;
2893 struct qseecom_send_svc_cmd_req req;
2894 void *send_req_ptr;
2895 size_t req_buf_size;
2896
2897 /*struct qseecom_command_scm_resp resp;*/
2898
2899 if (copy_from_user(&req,
2900 (void __user *)argp,
2901 sizeof(req))) {
2902 pr_err("copy_from_user failed\n");
2903 return -EFAULT;
2904 }
2905
2906 if (__validate_send_service_cmd_inputs(data, &req))
2907 return -EINVAL;
2908
2909 data->type = QSEECOM_SECURE_SERVICE;
2910
2911 switch (req.cmd_id) {
2912 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2913 case QSEOS_RPMB_ERASE_COMMAND:
2914 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2915 send_req_ptr = &send_svc_ireq;
2916 req_buf_size = sizeof(send_svc_ireq);
2917 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2918 send_req_ptr))
2919 return -EINVAL;
2920 break;
2921 case QSEOS_FSM_LTEOTA_REQ_CMD:
2922 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2923 case QSEOS_FSM_IKE_REQ_CMD:
2924 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2925 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2926 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2927 case QSEOS_FSM_ENCFS_REQ_CMD:
2928 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2929 send_req_ptr = &send_fsm_key_svc_ireq;
2930 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2931 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2932 send_req_ptr))
2933 return -EINVAL;
2934 break;
2935 default:
2936 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2937 return -EINVAL;
2938 }
2939
2940 if (qseecom.support_bus_scaling) {
2941 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2942 if (ret) {
2943 pr_err("Fail to set bw HIGH\n");
2944 return ret;
2945 }
2946 } else {
2947 ret = qseecom_perf_enable(data);
2948 if (ret) {
2949 pr_err("Failed to vote for clocks with err %d\n", ret);
2950 goto exit;
2951 }
2952 }
2953
2954 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2955 data->client.sb_virt, data->client.sb_length,
2956 ION_IOC_CLEAN_INV_CACHES);
2957 if (ret) {
2958 pr_err("cache operation failed %d\n", ret);
2959 goto exit;
2960 }
2961 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2962 (const void *)send_req_ptr,
2963 req_buf_size, &resp, sizeof(resp));
2964 if (ret) {
2965 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2966 if (!qseecom.support_bus_scaling) {
2967 qsee_disable_clock_vote(data, CLK_DFAB);
2968 qsee_disable_clock_vote(data, CLK_SFPB);
2969 } else {
2970 __qseecom_add_bw_scale_down_timer(
2971 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2972 }
2973 goto exit;
2974 }
2975 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2976 data->client.sb_virt, data->client.sb_length,
2977 ION_IOC_INV_CACHES);
2978 if (ret) {
2979 pr_err("cache operation failed %d\n", ret);
2980 goto exit;
2981 }
2982 switch (resp.result) {
2983 case QSEOS_RESULT_SUCCESS:
2984 break;
2985 case QSEOS_RESULT_INCOMPLETE:
2986 pr_debug("qseos_result_incomplete\n");
2987 ret = __qseecom_process_incomplete_cmd(data, &resp);
2988 if (ret) {
2989 pr_err("process_incomplete_cmd fail with result: %d\n",
2990 resp.result);
2991 }
2992 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2993 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302994 if (put_user(resp.result,
2995 (uint32_t __user *)req.resp_buf)) {
2996 ret = -EINVAL;
2997 goto exit;
2998 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002999 ret = 0;
3000 }
3001 break;
3002 case QSEOS_RESULT_FAILURE:
3003 pr_err("scm call failed with resp.result: %d\n", resp.result);
3004 ret = -EINVAL;
3005 break;
3006 default:
3007 pr_err("Response result %d not supported\n",
3008 resp.result);
3009 ret = -EINVAL;
3010 break;
3011 }
3012 if (!qseecom.support_bus_scaling) {
3013 qsee_disable_clock_vote(data, CLK_DFAB);
3014 qsee_disable_clock_vote(data, CLK_SFPB);
3015 } else {
3016 __qseecom_add_bw_scale_down_timer(
3017 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3018 }
3019
3020exit:
3021 return ret;
3022}
3023
3024static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3025 struct qseecom_send_cmd_req *req)
3026
3027{
3028 if (!data || !data->client.ihandle) {
3029 pr_err("Client or client handle is not initialized\n");
3030 return -EINVAL;
3031 }
3032 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3033 (req->cmd_req_buf == NULL)) {
3034 pr_err("cmd buffer or response buffer is null\n");
3035 return -EINVAL;
3036 }
3037 if (((uintptr_t)req->cmd_req_buf <
3038 data->client.user_virt_sb_base) ||
3039 ((uintptr_t)req->cmd_req_buf >=
3040 (data->client.user_virt_sb_base + data->client.sb_length))) {
3041 pr_err("cmd buffer address not within shared bufffer\n");
3042 return -EINVAL;
3043 }
3044 if (((uintptr_t)req->resp_buf <
3045 data->client.user_virt_sb_base) ||
3046 ((uintptr_t)req->resp_buf >=
3047 (data->client.user_virt_sb_base + data->client.sb_length))) {
3048 pr_err("response buffer address not within shared bufffer\n");
3049 return -EINVAL;
3050 }
3051 if ((req->cmd_req_len == 0) ||
3052 (req->cmd_req_len > data->client.sb_length) ||
3053 (req->resp_len > data->client.sb_length)) {
3054 pr_err("cmd buf length or response buf length not valid\n");
3055 return -EINVAL;
3056 }
3057 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3058 pr_err("Integer overflow detected in req_len & rsp_len\n");
3059 return -EINVAL;
3060 }
3061
3062 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3063 pr_debug("Not enough memory to fit cmd_buf.\n");
3064 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3065 (req->cmd_req_len + req->resp_len),
3066 data->client.sb_length);
3067 return -ENOMEM;
3068 }
3069 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3070 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3071 return -EINVAL;
3072 }
3073 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3074 pr_err("Integer overflow in resp_len & resp_buf\n");
3075 return -EINVAL;
3076 }
3077 if (data->client.user_virt_sb_base >
3078 (ULONG_MAX - data->client.sb_length)) {
3079 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3080 return -EINVAL;
3081 }
3082 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3083 ((uintptr_t)data->client.user_virt_sb_base +
3084 data->client.sb_length)) ||
3085 (((uintptr_t)req->resp_buf + req->resp_len) >
3086 ((uintptr_t)data->client.user_virt_sb_base +
3087 data->client.sb_length))) {
3088 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3089 return -EINVAL;
3090 }
3091 return 0;
3092}
3093
3094int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3095 struct qseecom_registered_app_list *ptr_app,
3096 struct qseecom_dev_handle *data)
3097{
3098 int ret = 0;
3099
3100 switch (resp->result) {
3101 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3102 pr_warn("App(%d) %s is blocked on listener %d\n",
3103 data->client.app_id, data->client.app_name,
3104 resp->data);
3105 ret = __qseecom_process_reentrancy_blocked_on_listener(
3106 resp, ptr_app, data);
3107 if (ret) {
3108 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3109 data->client.app_id, data->client.app_name, resp->data);
3110 return ret;
3111 }
3112
3113 case QSEOS_RESULT_INCOMPLETE:
3114 qseecom.app_block_ref_cnt++;
3115 ptr_app->app_blocked = true;
3116 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3117 ptr_app->app_blocked = false;
3118 qseecom.app_block_ref_cnt--;
3119 wake_up_interruptible(&qseecom.app_block_wq);
3120 if (ret)
3121 pr_err("process_incomplete_cmd failed err: %d\n",
3122 ret);
3123 return ret;
3124 case QSEOS_RESULT_SUCCESS:
3125 return ret;
3126 default:
3127 pr_err("Response result %d not supported\n",
3128 resp->result);
3129 return -EINVAL;
3130 }
3131}
3132
3133static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3134 struct qseecom_send_cmd_req *req)
3135{
3136 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003137 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003138 u32 reqd_len_sb_in = 0;
3139 struct qseecom_client_send_data_ireq send_data_req = {0};
3140 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3141 struct qseecom_command_scm_resp resp;
3142 unsigned long flags;
3143 struct qseecom_registered_app_list *ptr_app;
3144 bool found_app = false;
3145 void *cmd_buf = NULL;
3146 size_t cmd_len;
3147 struct sglist_info *table = data->sglistinfo_ptr;
3148
3149 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3150 /* find app_id & img_name from list */
3151 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3152 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3153 list) {
3154 if ((ptr_app->app_id == data->client.app_id) &&
3155 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3156 found_app = true;
3157 break;
3158 }
3159 }
3160 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3161
3162 if (!found_app) {
3163 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3164 (char *)data->client.app_name);
3165 return -ENOENT;
3166 }
3167
3168 if (qseecom.qsee_version < QSEE_VERSION_40) {
3169 send_data_req.app_id = data->client.app_id;
3170 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3171 data, (uintptr_t)req->cmd_req_buf));
3172 send_data_req.req_len = req->cmd_req_len;
3173 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3174 data, (uintptr_t)req->resp_buf));
3175 send_data_req.rsp_len = req->resp_len;
3176 send_data_req.sglistinfo_ptr =
3177 (uint32_t)virt_to_phys(table);
3178 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3179 dmac_flush_range((void *)table,
3180 (void *)table + SGLISTINFO_TABLE_SIZE);
3181 cmd_buf = (void *)&send_data_req;
3182 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3183 } else {
3184 send_data_req_64bit.app_id = data->client.app_id;
3185 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3186 (uintptr_t)req->cmd_req_buf);
3187 send_data_req_64bit.req_len = req->cmd_req_len;
3188 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3189 (uintptr_t)req->resp_buf);
3190 send_data_req_64bit.rsp_len = req->resp_len;
3191 /* check if 32bit app's phys_addr region is under 4GB.*/
3192 if ((data->client.app_arch == ELFCLASS32) &&
3193 ((send_data_req_64bit.req_ptr >=
3194 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3195 (send_data_req_64bit.rsp_ptr >=
3196 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3197 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3198 data->client.app_name,
3199 send_data_req_64bit.req_ptr,
3200 send_data_req_64bit.req_len,
3201 send_data_req_64bit.rsp_ptr,
3202 send_data_req_64bit.rsp_len);
3203 return -EFAULT;
3204 }
3205 send_data_req_64bit.sglistinfo_ptr =
3206 (uint64_t)virt_to_phys(table);
3207 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3208 dmac_flush_range((void *)table,
3209 (void *)table + SGLISTINFO_TABLE_SIZE);
3210 cmd_buf = (void *)&send_data_req_64bit;
3211 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3212 }
3213
3214 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3215 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3216 else
3217 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3218
3219 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3220 data->client.sb_virt,
3221 reqd_len_sb_in,
3222 ION_IOC_CLEAN_INV_CACHES);
3223 if (ret) {
3224 pr_err("cache operation failed %d\n", ret);
3225 return ret;
3226 }
3227
3228 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3229
3230 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3231 cmd_buf, cmd_len,
3232 &resp, sizeof(resp));
3233 if (ret) {
3234 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3235 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003236 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003237 }
3238
3239 if (qseecom.qsee_reentrancy_support) {
3240 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003241 if (ret)
3242 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003243 } else {
3244 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3245 ret = __qseecom_process_incomplete_cmd(data, &resp);
3246 if (ret) {
3247 pr_err("process_incomplete_cmd failed err: %d\n",
3248 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003249 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003250 }
3251 } else {
3252 if (resp.result != QSEOS_RESULT_SUCCESS) {
3253 pr_err("Response result %d not supported\n",
3254 resp.result);
3255 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003256 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003257 }
3258 }
3259 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003260exit:
3261 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003262 data->client.sb_virt, data->client.sb_length,
3263 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003264 if (ret2) {
3265 pr_err("cache operation failed %d\n", ret2);
3266 return ret2;
3267 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 return ret;
3269}
3270
3271static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3272{
3273 int ret = 0;
3274 struct qseecom_send_cmd_req req;
3275
3276 ret = copy_from_user(&req, argp, sizeof(req));
3277 if (ret) {
3278 pr_err("copy_from_user failed\n");
3279 return ret;
3280 }
3281
3282 if (__validate_send_cmd_inputs(data, &req))
3283 return -EINVAL;
3284
3285 ret = __qseecom_send_cmd(data, &req);
3286
3287 if (ret)
3288 return ret;
3289
3290 return ret;
3291}
3292
3293int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3294 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3295 struct qseecom_dev_handle *data, int i) {
3296
3297 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3298 (req->ifd_data[i].fd > 0)) {
3299 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3300 (req->ifd_data[i].cmd_buf_offset >
3301 req->cmd_req_len - sizeof(uint32_t))) {
3302 pr_err("Invalid offset (req len) 0x%x\n",
3303 req->ifd_data[i].cmd_buf_offset);
3304 return -EINVAL;
3305 }
3306 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3307 (lstnr_resp->ifd_data[i].fd > 0)) {
3308 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3309 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3310 lstnr_resp->resp_len - sizeof(uint32_t))) {
3311 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3312 lstnr_resp->ifd_data[i].cmd_buf_offset);
3313 return -EINVAL;
3314 }
3315 }
3316 return 0;
3317}
3318
3319static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3320 struct qseecom_dev_handle *data)
3321{
3322 struct ion_handle *ihandle;
3323 char *field;
3324 int ret = 0;
3325 int i = 0;
3326 uint32_t len = 0;
3327 struct scatterlist *sg;
3328 struct qseecom_send_modfd_cmd_req *req = NULL;
3329 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3330 struct qseecom_registered_listener_list *this_lstnr = NULL;
3331 uint32_t offset;
3332 struct sg_table *sg_ptr;
3333
3334 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3335 (data->type != QSEECOM_CLIENT_APP))
3336 return -EFAULT;
3337
3338 if (msg == NULL) {
3339 pr_err("Invalid address\n");
3340 return -EINVAL;
3341 }
3342 if (data->type == QSEECOM_LISTENER_SERVICE) {
3343 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3344 this_lstnr = __qseecom_find_svc(data->listener.id);
3345 if (IS_ERR_OR_NULL(this_lstnr)) {
3346 pr_err("Invalid listener ID\n");
3347 return -ENOMEM;
3348 }
3349 } else {
3350 req = (struct qseecom_send_modfd_cmd_req *)msg;
3351 }
3352
3353 for (i = 0; i < MAX_ION_FD; i++) {
3354 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3355 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003356 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003357 req->ifd_data[i].fd);
3358 if (IS_ERR_OR_NULL(ihandle)) {
3359 pr_err("Ion client can't retrieve the handle\n");
3360 return -ENOMEM;
3361 }
3362 field = (char *) req->cmd_req_buf +
3363 req->ifd_data[i].cmd_buf_offset;
3364 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3365 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003366 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003367 lstnr_resp->ifd_data[i].fd);
3368 if (IS_ERR_OR_NULL(ihandle)) {
3369 pr_err("Ion client can't retrieve the handle\n");
3370 return -ENOMEM;
3371 }
3372 field = lstnr_resp->resp_buf_ptr +
3373 lstnr_resp->ifd_data[i].cmd_buf_offset;
3374 } else {
3375 continue;
3376 }
3377 /* Populate the cmd data structure with the phys_addr */
3378 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3379 if (IS_ERR_OR_NULL(sg_ptr)) {
3380 pr_err("IOn client could not retrieve sg table\n");
3381 goto err;
3382 }
3383 if (sg_ptr->nents == 0) {
3384 pr_err("Num of scattered entries is 0\n");
3385 goto err;
3386 }
3387 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3388 pr_err("Num of scattered entries");
3389 pr_err(" (%d) is greater than max supported %d\n",
3390 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3391 goto err;
3392 }
3393 sg = sg_ptr->sgl;
3394 if (sg_ptr->nents == 1) {
3395 uint32_t *update;
3396
3397 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3398 goto err;
3399 if ((data->type == QSEECOM_CLIENT_APP &&
3400 (data->client.app_arch == ELFCLASS32 ||
3401 data->client.app_arch == ELFCLASS64)) ||
3402 (data->type == QSEECOM_LISTENER_SERVICE)) {
3403 /*
3404 * Check if sg list phy add region is under 4GB
3405 */
3406 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3407 (!cleanup) &&
3408 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3409 >= PHY_ADDR_4G - sg->length)) {
3410 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3411 data->client.app_name,
3412 &(sg_dma_address(sg_ptr->sgl)),
3413 sg->length);
3414 goto err;
3415 }
3416 update = (uint32_t *) field;
3417 *update = cleanup ? 0 :
3418 (uint32_t)sg_dma_address(sg_ptr->sgl);
3419 } else {
3420 pr_err("QSEE app arch %u is not supported\n",
3421 data->client.app_arch);
3422 goto err;
3423 }
3424 len += (uint32_t)sg->length;
3425 } else {
3426 struct qseecom_sg_entry *update;
3427 int j = 0;
3428
3429 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3430 (req->ifd_data[i].fd > 0)) {
3431
3432 if ((req->cmd_req_len <
3433 SG_ENTRY_SZ * sg_ptr->nents) ||
3434 (req->ifd_data[i].cmd_buf_offset >
3435 (req->cmd_req_len -
3436 SG_ENTRY_SZ * sg_ptr->nents))) {
3437 pr_err("Invalid offset = 0x%x\n",
3438 req->ifd_data[i].cmd_buf_offset);
3439 goto err;
3440 }
3441
3442 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3443 (lstnr_resp->ifd_data[i].fd > 0)) {
3444
3445 if ((lstnr_resp->resp_len <
3446 SG_ENTRY_SZ * sg_ptr->nents) ||
3447 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3448 (lstnr_resp->resp_len -
3449 SG_ENTRY_SZ * sg_ptr->nents))) {
3450 goto err;
3451 }
3452 }
3453 if ((data->type == QSEECOM_CLIENT_APP &&
3454 (data->client.app_arch == ELFCLASS32 ||
3455 data->client.app_arch == ELFCLASS64)) ||
3456 (data->type == QSEECOM_LISTENER_SERVICE)) {
3457 update = (struct qseecom_sg_entry *)field;
3458 for (j = 0; j < sg_ptr->nents; j++) {
3459 /*
3460 * Check if sg list PA is under 4GB
3461 */
3462 if ((qseecom.qsee_version >=
3463 QSEE_VERSION_40) &&
3464 (!cleanup) &&
3465 ((uint64_t)(sg_dma_address(sg))
3466 >= PHY_ADDR_4G - sg->length)) {
3467 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3468 data->client.app_name,
3469 &(sg_dma_address(sg)),
3470 sg->length);
3471 goto err;
3472 }
3473 update->phys_addr = cleanup ? 0 :
3474 (uint32_t)sg_dma_address(sg);
3475 update->len = cleanup ? 0 : sg->length;
3476 update++;
3477 len += sg->length;
3478 sg = sg_next(sg);
3479 }
3480 } else {
3481 pr_err("QSEE app arch %u is not supported\n",
3482 data->client.app_arch);
3483 goto err;
3484 }
3485 }
3486
3487 if (cleanup) {
3488 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3489 ihandle, NULL, len,
3490 ION_IOC_INV_CACHES);
3491 if (ret) {
3492 pr_err("cache operation failed %d\n", ret);
3493 goto err;
3494 }
3495 } else {
3496 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3497 ihandle, NULL, len,
3498 ION_IOC_CLEAN_INV_CACHES);
3499 if (ret) {
3500 pr_err("cache operation failed %d\n", ret);
3501 goto err;
3502 }
3503 if (data->type == QSEECOM_CLIENT_APP) {
3504 offset = req->ifd_data[i].cmd_buf_offset;
3505 data->sglistinfo_ptr[i].indexAndFlags =
3506 SGLISTINFO_SET_INDEX_FLAG(
3507 (sg_ptr->nents == 1), 0, offset);
3508 data->sglistinfo_ptr[i].sizeOrCount =
3509 (sg_ptr->nents == 1) ?
3510 sg->length : sg_ptr->nents;
3511 data->sglist_cnt = i + 1;
3512 } else {
3513 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3514 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3515 (uintptr_t)this_lstnr->sb_virt);
3516 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3517 SGLISTINFO_SET_INDEX_FLAG(
3518 (sg_ptr->nents == 1), 0, offset);
3519 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3520 (sg_ptr->nents == 1) ?
3521 sg->length : sg_ptr->nents;
3522 this_lstnr->sglist_cnt = i + 1;
3523 }
3524 }
3525 /* Deallocate the handle */
3526 if (!IS_ERR_OR_NULL(ihandle))
3527 ion_free(qseecom.ion_clnt, ihandle);
3528 }
3529 return ret;
3530err:
3531 if (!IS_ERR_OR_NULL(ihandle))
3532 ion_free(qseecom.ion_clnt, ihandle);
3533 return -ENOMEM;
3534}
3535
3536static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3537 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3538{
3539 struct scatterlist *sg = sg_ptr->sgl;
3540 struct qseecom_sg_entry_64bit *sg_entry;
3541 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3542 void *buf;
3543 uint i;
3544 size_t size;
3545 dma_addr_t coh_pmem;
3546
3547 if (fd_idx >= MAX_ION_FD) {
3548 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3549 return -ENOMEM;
3550 }
3551 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3552 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3553 /* Allocate a contiguous kernel buffer */
3554 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3555 size = (size + PAGE_SIZE) & PAGE_MASK;
3556 buf = dma_alloc_coherent(qseecom.pdev,
3557 size, &coh_pmem, GFP_KERNEL);
3558 if (buf == NULL) {
3559 pr_err("failed to alloc memory for sg buf\n");
3560 return -ENOMEM;
3561 }
3562 /* update qseecom_sg_list_buf_hdr_64bit */
3563 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3564 buf_hdr->new_buf_phys_addr = coh_pmem;
3565 buf_hdr->nents_total = sg_ptr->nents;
3566 /* save the left sg entries into new allocated buf */
3567 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3568 for (i = 0; i < sg_ptr->nents; i++) {
3569 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3570 sg_entry->len = sg->length;
3571 sg_entry++;
3572 sg = sg_next(sg);
3573 }
3574
3575 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3576 data->client.sec_buf_fd[fd_idx].vbase = buf;
3577 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3578 data->client.sec_buf_fd[fd_idx].size = size;
3579
3580 return 0;
3581}
3582
3583static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3584 struct qseecom_dev_handle *data)
3585{
3586 struct ion_handle *ihandle;
3587 char *field;
3588 int ret = 0;
3589 int i = 0;
3590 uint32_t len = 0;
3591 struct scatterlist *sg;
3592 struct qseecom_send_modfd_cmd_req *req = NULL;
3593 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3594 struct qseecom_registered_listener_list *this_lstnr = NULL;
3595 uint32_t offset;
3596 struct sg_table *sg_ptr;
3597
3598 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3599 (data->type != QSEECOM_CLIENT_APP))
3600 return -EFAULT;
3601
3602 if (msg == NULL) {
3603 pr_err("Invalid address\n");
3604 return -EINVAL;
3605 }
3606 if (data->type == QSEECOM_LISTENER_SERVICE) {
3607 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3608 this_lstnr = __qseecom_find_svc(data->listener.id);
3609 if (IS_ERR_OR_NULL(this_lstnr)) {
3610 pr_err("Invalid listener ID\n");
3611 return -ENOMEM;
3612 }
3613 } else {
3614 req = (struct qseecom_send_modfd_cmd_req *)msg;
3615 }
3616
3617 for (i = 0; i < MAX_ION_FD; i++) {
3618 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3619 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003620 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003621 req->ifd_data[i].fd);
3622 if (IS_ERR_OR_NULL(ihandle)) {
3623 pr_err("Ion client can't retrieve the handle\n");
3624 return -ENOMEM;
3625 }
3626 field = (char *) req->cmd_req_buf +
3627 req->ifd_data[i].cmd_buf_offset;
3628 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3629 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003630 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003631 lstnr_resp->ifd_data[i].fd);
3632 if (IS_ERR_OR_NULL(ihandle)) {
3633 pr_err("Ion client can't retrieve the handle\n");
3634 return -ENOMEM;
3635 }
3636 field = lstnr_resp->resp_buf_ptr +
3637 lstnr_resp->ifd_data[i].cmd_buf_offset;
3638 } else {
3639 continue;
3640 }
3641 /* Populate the cmd data structure with the phys_addr */
3642 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3643 if (IS_ERR_OR_NULL(sg_ptr)) {
3644 pr_err("IOn client could not retrieve sg table\n");
3645 goto err;
3646 }
3647 if (sg_ptr->nents == 0) {
3648 pr_err("Num of scattered entries is 0\n");
3649 goto err;
3650 }
3651 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3652 pr_warn("Num of scattered entries");
3653 pr_warn(" (%d) is greater than %d\n",
3654 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3655 if (cleanup) {
3656 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3657 data->client.sec_buf_fd[i].vbase)
3658 dma_free_coherent(qseecom.pdev,
3659 data->client.sec_buf_fd[i].size,
3660 data->client.sec_buf_fd[i].vbase,
3661 data->client.sec_buf_fd[i].pbase);
3662 } else {
3663 ret = __qseecom_allocate_sg_list_buffer(data,
3664 field, i, sg_ptr);
3665 if (ret) {
3666 pr_err("Failed to allocate sg list buffer\n");
3667 goto err;
3668 }
3669 }
3670 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3671 sg = sg_ptr->sgl;
3672 goto cleanup;
3673 }
3674 sg = sg_ptr->sgl;
3675 if (sg_ptr->nents == 1) {
3676 uint64_t *update_64bit;
3677
3678 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3679 goto err;
3680 /* 64bit app uses 64bit address */
3681 update_64bit = (uint64_t *) field;
3682 *update_64bit = cleanup ? 0 :
3683 (uint64_t)sg_dma_address(sg_ptr->sgl);
3684 len += (uint32_t)sg->length;
3685 } else {
3686 struct qseecom_sg_entry_64bit *update_64bit;
3687 int j = 0;
3688
3689 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3690 (req->ifd_data[i].fd > 0)) {
3691
3692 if ((req->cmd_req_len <
3693 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3694 (req->ifd_data[i].cmd_buf_offset >
3695 (req->cmd_req_len -
3696 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3697 pr_err("Invalid offset = 0x%x\n",
3698 req->ifd_data[i].cmd_buf_offset);
3699 goto err;
3700 }
3701
3702 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3703 (lstnr_resp->ifd_data[i].fd > 0)) {
3704
3705 if ((lstnr_resp->resp_len <
3706 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3707 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3708 (lstnr_resp->resp_len -
3709 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3710 goto err;
3711 }
3712 }
3713 /* 64bit app uses 64bit address */
3714 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3715 for (j = 0; j < sg_ptr->nents; j++) {
3716 update_64bit->phys_addr = cleanup ? 0 :
3717 (uint64_t)sg_dma_address(sg);
3718 update_64bit->len = cleanup ? 0 :
3719 (uint32_t)sg->length;
3720 update_64bit++;
3721 len += sg->length;
3722 sg = sg_next(sg);
3723 }
3724 }
3725cleanup:
3726 if (cleanup) {
3727 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3728 ihandle, NULL, len,
3729 ION_IOC_INV_CACHES);
3730 if (ret) {
3731 pr_err("cache operation failed %d\n", ret);
3732 goto err;
3733 }
3734 } else {
3735 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3736 ihandle, NULL, len,
3737 ION_IOC_CLEAN_INV_CACHES);
3738 if (ret) {
3739 pr_err("cache operation failed %d\n", ret);
3740 goto err;
3741 }
3742 if (data->type == QSEECOM_CLIENT_APP) {
3743 offset = req->ifd_data[i].cmd_buf_offset;
3744 data->sglistinfo_ptr[i].indexAndFlags =
3745 SGLISTINFO_SET_INDEX_FLAG(
3746 (sg_ptr->nents == 1), 1, offset);
3747 data->sglistinfo_ptr[i].sizeOrCount =
3748 (sg_ptr->nents == 1) ?
3749 sg->length : sg_ptr->nents;
3750 data->sglist_cnt = i + 1;
3751 } else {
3752 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3753 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3754 (uintptr_t)this_lstnr->sb_virt);
3755 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3756 SGLISTINFO_SET_INDEX_FLAG(
3757 (sg_ptr->nents == 1), 1, offset);
3758 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3759 (sg_ptr->nents == 1) ?
3760 sg->length : sg_ptr->nents;
3761 this_lstnr->sglist_cnt = i + 1;
3762 }
3763 }
3764 /* Deallocate the handle */
3765 if (!IS_ERR_OR_NULL(ihandle))
3766 ion_free(qseecom.ion_clnt, ihandle);
3767 }
3768 return ret;
3769err:
3770 for (i = 0; i < MAX_ION_FD; i++)
3771 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3772 data->client.sec_buf_fd[i].vbase)
3773 dma_free_coherent(qseecom.pdev,
3774 data->client.sec_buf_fd[i].size,
3775 data->client.sec_buf_fd[i].vbase,
3776 data->client.sec_buf_fd[i].pbase);
3777 if (!IS_ERR_OR_NULL(ihandle))
3778 ion_free(qseecom.ion_clnt, ihandle);
3779 return -ENOMEM;
3780}
3781
3782static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3783 void __user *argp,
3784 bool is_64bit_addr)
3785{
3786 int ret = 0;
3787 int i;
3788 struct qseecom_send_modfd_cmd_req req;
3789 struct qseecom_send_cmd_req send_cmd_req;
3790
3791 ret = copy_from_user(&req, argp, sizeof(req));
3792 if (ret) {
3793 pr_err("copy_from_user failed\n");
3794 return ret;
3795 }
3796
3797 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3798 send_cmd_req.cmd_req_len = req.cmd_req_len;
3799 send_cmd_req.resp_buf = req.resp_buf;
3800 send_cmd_req.resp_len = req.resp_len;
3801
3802 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3803 return -EINVAL;
3804
3805 /* validate offsets */
3806 for (i = 0; i < MAX_ION_FD; i++) {
3807 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3808 pr_err("Invalid offset %d = 0x%x\n",
3809 i, req.ifd_data[i].cmd_buf_offset);
3810 return -EINVAL;
3811 }
3812 }
3813 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3814 (uintptr_t)req.cmd_req_buf);
3815 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3816 (uintptr_t)req.resp_buf);
3817
3818 if (!is_64bit_addr) {
3819 ret = __qseecom_update_cmd_buf(&req, false, data);
3820 if (ret)
3821 return ret;
3822 ret = __qseecom_send_cmd(data, &send_cmd_req);
3823 if (ret)
3824 return ret;
3825 ret = __qseecom_update_cmd_buf(&req, true, data);
3826 if (ret)
3827 return ret;
3828 } else {
3829 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3830 if (ret)
3831 return ret;
3832 ret = __qseecom_send_cmd(data, &send_cmd_req);
3833 if (ret)
3834 return ret;
3835 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3836 if (ret)
3837 return ret;
3838 }
3839
3840 return ret;
3841}
3842
3843static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3844 void __user *argp)
3845{
3846 return __qseecom_send_modfd_cmd(data, argp, false);
3847}
3848
3849static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3850 void __user *argp)
3851{
3852 return __qseecom_send_modfd_cmd(data, argp, true);
3853}
3854
3855
3856
3857static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3858 struct qseecom_registered_listener_list *svc)
3859{
3860 int ret;
3861
3862 ret = (svc->rcv_req_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07003863 return ret || data->abort || svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003864}
3865
3866static int qseecom_receive_req(struct qseecom_dev_handle *data)
3867{
3868 int ret = 0;
3869 struct qseecom_registered_listener_list *this_lstnr;
3870
3871 this_lstnr = __qseecom_find_svc(data->listener.id);
3872 if (!this_lstnr) {
3873 pr_err("Invalid listener ID\n");
3874 return -ENODATA;
3875 }
3876
3877 while (1) {
3878 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3879 __qseecom_listener_has_rcvd_req(data,
3880 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003881 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003882 (uint32_t)data->listener.id);
3883 /* woken up for different reason */
3884 return -ERESTARTSYS;
3885 }
3886
Zhen Kong26e62742018-05-04 17:19:06 -07003887 if (data->abort || this_lstnr->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003888 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003889 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003890 return -ENODEV;
3891 }
3892 this_lstnr->rcv_req_flag = 0;
3893 break;
3894 }
3895 return ret;
3896}
3897
3898static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3899{
3900 unsigned char app_arch = 0;
3901 struct elf32_hdr *ehdr;
3902 struct elf64_hdr *ehdr64;
3903
3904 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3905
3906 switch (app_arch) {
3907 case ELFCLASS32: {
3908 ehdr = (struct elf32_hdr *)fw_entry->data;
3909 if (fw_entry->size < sizeof(*ehdr)) {
3910 pr_err("%s: Not big enough to be an elf32 header\n",
3911 qseecom.pdev->init_name);
3912 return false;
3913 }
3914 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3915 pr_err("%s: Not an elf32 header\n",
3916 qseecom.pdev->init_name);
3917 return false;
3918 }
3919 if (ehdr->e_phnum == 0) {
3920 pr_err("%s: No loadable segments\n",
3921 qseecom.pdev->init_name);
3922 return false;
3923 }
3924 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3925 sizeof(struct elf32_hdr) > fw_entry->size) {
3926 pr_err("%s: Program headers not within mdt\n",
3927 qseecom.pdev->init_name);
3928 return false;
3929 }
3930 break;
3931 }
3932 case ELFCLASS64: {
3933 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3934 if (fw_entry->size < sizeof(*ehdr64)) {
3935 pr_err("%s: Not big enough to be an elf64 header\n",
3936 qseecom.pdev->init_name);
3937 return false;
3938 }
3939 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3940 pr_err("%s: Not an elf64 header\n",
3941 qseecom.pdev->init_name);
3942 return false;
3943 }
3944 if (ehdr64->e_phnum == 0) {
3945 pr_err("%s: No loadable segments\n",
3946 qseecom.pdev->init_name);
3947 return false;
3948 }
3949 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3950 sizeof(struct elf64_hdr) > fw_entry->size) {
3951 pr_err("%s: Program headers not within mdt\n",
3952 qseecom.pdev->init_name);
3953 return false;
3954 }
3955 break;
3956 }
3957 default: {
3958 pr_err("QSEE app arch %u is not supported\n", app_arch);
3959 return false;
3960 }
3961 }
3962 return true;
3963}
3964
3965static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3966 uint32_t *app_arch)
3967{
3968 int ret = -1;
3969 int i = 0, rc = 0;
3970 const struct firmware *fw_entry = NULL;
3971 char fw_name[MAX_APP_NAME_SIZE];
3972 struct elf32_hdr *ehdr;
3973 struct elf64_hdr *ehdr64;
3974 int num_images = 0;
3975
3976 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3977 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3978 if (rc) {
3979 pr_err("error with request_firmware\n");
3980 ret = -EIO;
3981 goto err;
3982 }
3983 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3984 ret = -EIO;
3985 goto err;
3986 }
3987 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3988 *fw_size = fw_entry->size;
3989 if (*app_arch == ELFCLASS32) {
3990 ehdr = (struct elf32_hdr *)fw_entry->data;
3991 num_images = ehdr->e_phnum;
3992 } else if (*app_arch == ELFCLASS64) {
3993 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3994 num_images = ehdr64->e_phnum;
3995 } else {
3996 pr_err("QSEE %s app, arch %u is not supported\n",
3997 appname, *app_arch);
3998 ret = -EIO;
3999 goto err;
4000 }
4001 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4002 release_firmware(fw_entry);
4003 fw_entry = NULL;
4004 for (i = 0; i < num_images; i++) {
4005 memset(fw_name, 0, sizeof(fw_name));
4006 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4007 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4008 if (ret)
4009 goto err;
4010 if (*fw_size > U32_MAX - fw_entry->size) {
4011 pr_err("QSEE %s app file size overflow\n", appname);
4012 ret = -EINVAL;
4013 goto err;
4014 }
4015 *fw_size += fw_entry->size;
4016 release_firmware(fw_entry);
4017 fw_entry = NULL;
4018 }
4019
4020 return ret;
4021err:
4022 if (fw_entry)
4023 release_firmware(fw_entry);
4024 *fw_size = 0;
4025 return ret;
4026}
4027
4028static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4029 uint32_t fw_size,
4030 struct qseecom_load_app_ireq *load_req)
4031{
4032 int ret = -1;
4033 int i = 0, rc = 0;
4034 const struct firmware *fw_entry = NULL;
4035 char fw_name[MAX_APP_NAME_SIZE];
4036 u8 *img_data_ptr = img_data;
4037 struct elf32_hdr *ehdr;
4038 struct elf64_hdr *ehdr64;
4039 int num_images = 0;
4040 unsigned char app_arch = 0;
4041
4042 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4043 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4044 if (rc) {
4045 ret = -EIO;
4046 goto err;
4047 }
4048
4049 load_req->img_len = fw_entry->size;
4050 if (load_req->img_len > fw_size) {
4051 pr_err("app %s size %zu is larger than buf size %u\n",
4052 appname, fw_entry->size, fw_size);
4053 ret = -EINVAL;
4054 goto err;
4055 }
4056 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4057 img_data_ptr = img_data_ptr + fw_entry->size;
4058 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4059
4060 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4061 if (app_arch == ELFCLASS32) {
4062 ehdr = (struct elf32_hdr *)fw_entry->data;
4063 num_images = ehdr->e_phnum;
4064 } else if (app_arch == ELFCLASS64) {
4065 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4066 num_images = ehdr64->e_phnum;
4067 } else {
4068 pr_err("QSEE %s app, arch %u is not supported\n",
4069 appname, app_arch);
4070 ret = -EIO;
4071 goto err;
4072 }
4073 release_firmware(fw_entry);
4074 fw_entry = NULL;
4075 for (i = 0; i < num_images; i++) {
4076 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4077 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4078 if (ret) {
4079 pr_err("Failed to locate blob %s\n", fw_name);
4080 goto err;
4081 }
4082 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4083 (fw_entry->size + load_req->img_len > fw_size)) {
4084 pr_err("Invalid file size for %s\n", fw_name);
4085 ret = -EINVAL;
4086 goto err;
4087 }
4088 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4089 img_data_ptr = img_data_ptr + fw_entry->size;
4090 load_req->img_len += fw_entry->size;
4091 release_firmware(fw_entry);
4092 fw_entry = NULL;
4093 }
4094 return ret;
4095err:
4096 release_firmware(fw_entry);
4097 return ret;
4098}
4099
4100static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4101 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4102{
4103 size_t len = 0;
4104 int ret = 0;
4105 ion_phys_addr_t pa;
4106 struct ion_handle *ihandle = NULL;
4107 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004108 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004109
Zhen Kong3dd92792017-12-08 09:47:15 -08004110 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004111 if (retry++) {
4112 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004113 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004114 mutex_lock(&app_access_lock);
4115 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004116 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4117 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4118 } while (IS_ERR_OR_NULL(ihandle) &&
4119 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004120
4121 if (IS_ERR_OR_NULL(ihandle)) {
4122 pr_err("ION alloc failed\n");
4123 return -ENOMEM;
4124 }
4125 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4126 ihandle);
4127
4128 if (IS_ERR_OR_NULL(img_data)) {
4129 pr_err("ION memory mapping for image loading failed\n");
4130 ret = -ENOMEM;
4131 goto exit_ion_free;
4132 }
4133 /* Get the physical address of the ION BUF */
4134 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4135 if (ret) {
4136 pr_err("physical memory retrieval failure\n");
4137 ret = -EIO;
4138 goto exit_ion_unmap_kernel;
4139 }
4140
4141 *pihandle = ihandle;
4142 *data = img_data;
4143 *paddr = pa;
4144 return ret;
4145
4146exit_ion_unmap_kernel:
4147 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4148exit_ion_free:
4149 ion_free(qseecom.ion_clnt, ihandle);
4150 ihandle = NULL;
4151 return ret;
4152}
4153
4154static void __qseecom_free_img_data(struct ion_handle **ihandle)
4155{
4156 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4157 ion_free(qseecom.ion_clnt, *ihandle);
4158 *ihandle = NULL;
4159}
4160
4161static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4162 uint32_t *app_id)
4163{
4164 int ret = -1;
4165 uint32_t fw_size = 0;
4166 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4167 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4168 struct qseecom_command_scm_resp resp;
4169 u8 *img_data = NULL;
4170 ion_phys_addr_t pa = 0;
4171 struct ion_handle *ihandle = NULL;
4172 void *cmd_buf = NULL;
4173 size_t cmd_len;
4174 uint32_t app_arch = 0;
4175
4176 if (!data || !appname || !app_id) {
4177 pr_err("Null pointer to data or appname or appid\n");
4178 return -EINVAL;
4179 }
4180 *app_id = 0;
4181 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4182 return -EIO;
4183 data->client.app_arch = app_arch;
4184
4185 /* Check and load cmnlib */
4186 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4187 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4188 ret = qseecom_load_commonlib_image(data, "cmnlib");
4189 if (ret) {
4190 pr_err("failed to load cmnlib\n");
4191 return -EIO;
4192 }
4193 qseecom.commonlib_loaded = true;
4194 pr_debug("cmnlib is loaded\n");
4195 }
4196
4197 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4198 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4199 if (ret) {
4200 pr_err("failed to load cmnlib64\n");
4201 return -EIO;
4202 }
4203 qseecom.commonlib64_loaded = true;
4204 pr_debug("cmnlib64 is loaded\n");
4205 }
4206 }
4207
4208 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4209 if (ret)
4210 return ret;
4211
4212 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4213 if (ret) {
4214 ret = -EIO;
4215 goto exit_free_img_data;
4216 }
4217
4218 /* Populate the load_req parameters */
4219 if (qseecom.qsee_version < QSEE_VERSION_40) {
4220 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4221 load_req.mdt_len = load_req.mdt_len;
4222 load_req.img_len = load_req.img_len;
4223 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4224 load_req.phy_addr = (uint32_t)pa;
4225 cmd_buf = (void *)&load_req;
4226 cmd_len = sizeof(struct qseecom_load_app_ireq);
4227 } else {
4228 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4229 load_req_64bit.mdt_len = load_req.mdt_len;
4230 load_req_64bit.img_len = load_req.img_len;
4231 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4232 load_req_64bit.phy_addr = (uint64_t)pa;
4233 cmd_buf = (void *)&load_req_64bit;
4234 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4235 }
4236
4237 if (qseecom.support_bus_scaling) {
4238 mutex_lock(&qsee_bw_mutex);
4239 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4240 mutex_unlock(&qsee_bw_mutex);
4241 if (ret) {
4242 ret = -EIO;
4243 goto exit_free_img_data;
4244 }
4245 }
4246
4247 ret = __qseecom_enable_clk_scale_up(data);
4248 if (ret) {
4249 ret = -EIO;
4250 goto exit_unregister_bus_bw_need;
4251 }
4252
4253 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4254 img_data, fw_size,
4255 ION_IOC_CLEAN_INV_CACHES);
4256 if (ret) {
4257 pr_err("cache operation failed %d\n", ret);
4258 goto exit_disable_clk_vote;
4259 }
4260
4261 /* SCM_CALL to load the image */
4262 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4263 &resp, sizeof(resp));
4264 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004265 pr_err("scm_call to load failed : ret %d, result %x\n",
4266 ret, resp.result);
4267 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4268 ret = -EEXIST;
4269 else
4270 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004271 goto exit_disable_clk_vote;
4272 }
4273
4274 switch (resp.result) {
4275 case QSEOS_RESULT_SUCCESS:
4276 *app_id = resp.data;
4277 break;
4278 case QSEOS_RESULT_INCOMPLETE:
4279 ret = __qseecom_process_incomplete_cmd(data, &resp);
4280 if (ret)
4281 pr_err("process_incomplete_cmd FAILED\n");
4282 else
4283 *app_id = resp.data;
4284 break;
4285 case QSEOS_RESULT_FAILURE:
4286 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4287 break;
4288 default:
4289 pr_err("scm call return unknown response %d\n", resp.result);
4290 ret = -EINVAL;
4291 break;
4292 }
4293
4294exit_disable_clk_vote:
4295 __qseecom_disable_clk_scale_down(data);
4296
4297exit_unregister_bus_bw_need:
4298 if (qseecom.support_bus_scaling) {
4299 mutex_lock(&qsee_bw_mutex);
4300 qseecom_unregister_bus_bandwidth_needs(data);
4301 mutex_unlock(&qsee_bw_mutex);
4302 }
4303
4304exit_free_img_data:
4305 __qseecom_free_img_data(&ihandle);
4306 return ret;
4307}
4308
4309static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4310 char *cmnlib_name)
4311{
4312 int ret = 0;
4313 uint32_t fw_size = 0;
4314 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4315 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4316 struct qseecom_command_scm_resp resp;
4317 u8 *img_data = NULL;
4318 ion_phys_addr_t pa = 0;
4319 void *cmd_buf = NULL;
4320 size_t cmd_len;
4321 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004322 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004323
4324 if (!cmnlib_name) {
4325 pr_err("cmnlib_name is NULL\n");
4326 return -EINVAL;
4327 }
4328 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4329 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4330 cmnlib_name, strlen(cmnlib_name));
4331 return -EINVAL;
4332 }
4333
4334 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4335 return -EIO;
4336
Zhen Kong3bafb312017-10-18 10:27:20 -07004337 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004338 &img_data, fw_size, &pa);
4339 if (ret)
4340 return -EIO;
4341
4342 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4343 if (ret) {
4344 ret = -EIO;
4345 goto exit_free_img_data;
4346 }
4347 if (qseecom.qsee_version < QSEE_VERSION_40) {
4348 load_req.phy_addr = (uint32_t)pa;
4349 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4350 cmd_buf = (void *)&load_req;
4351 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4352 } else {
4353 load_req_64bit.phy_addr = (uint64_t)pa;
4354 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4355 load_req_64bit.img_len = load_req.img_len;
4356 load_req_64bit.mdt_len = load_req.mdt_len;
4357 cmd_buf = (void *)&load_req_64bit;
4358 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4359 }
4360
4361 if (qseecom.support_bus_scaling) {
4362 mutex_lock(&qsee_bw_mutex);
4363 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4364 mutex_unlock(&qsee_bw_mutex);
4365 if (ret) {
4366 ret = -EIO;
4367 goto exit_free_img_data;
4368 }
4369 }
4370
4371 /* Vote for the SFPB clock */
4372 ret = __qseecom_enable_clk_scale_up(data);
4373 if (ret) {
4374 ret = -EIO;
4375 goto exit_unregister_bus_bw_need;
4376 }
4377
Zhen Kong3bafb312017-10-18 10:27:20 -07004378 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004379 img_data, fw_size,
4380 ION_IOC_CLEAN_INV_CACHES);
4381 if (ret) {
4382 pr_err("cache operation failed %d\n", ret);
4383 goto exit_disable_clk_vote;
4384 }
4385
4386 /* SCM_CALL to load the image */
4387 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4388 &resp, sizeof(resp));
4389 if (ret) {
4390 pr_err("scm_call to load failed : ret %d\n", ret);
4391 ret = -EIO;
4392 goto exit_disable_clk_vote;
4393 }
4394
4395 switch (resp.result) {
4396 case QSEOS_RESULT_SUCCESS:
4397 break;
4398 case QSEOS_RESULT_FAILURE:
4399 pr_err("scm call failed w/response result%d\n", resp.result);
4400 ret = -EINVAL;
4401 goto exit_disable_clk_vote;
4402 case QSEOS_RESULT_INCOMPLETE:
4403 ret = __qseecom_process_incomplete_cmd(data, &resp);
4404 if (ret) {
4405 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4406 goto exit_disable_clk_vote;
4407 }
4408 break;
4409 default:
4410 pr_err("scm call return unknown response %d\n", resp.result);
4411 ret = -EINVAL;
4412 goto exit_disable_clk_vote;
4413 }
4414
4415exit_disable_clk_vote:
4416 __qseecom_disable_clk_scale_down(data);
4417
4418exit_unregister_bus_bw_need:
4419 if (qseecom.support_bus_scaling) {
4420 mutex_lock(&qsee_bw_mutex);
4421 qseecom_unregister_bus_bandwidth_needs(data);
4422 mutex_unlock(&qsee_bw_mutex);
4423 }
4424
4425exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004426 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004427 return ret;
4428}
4429
4430static int qseecom_unload_commonlib_image(void)
4431{
4432 int ret = -EINVAL;
4433 struct qseecom_unload_lib_image_ireq unload_req = {0};
4434 struct qseecom_command_scm_resp resp;
4435
4436 /* Populate the remaining parameters */
4437 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4438
4439 /* SCM_CALL to load the image */
4440 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4441 sizeof(struct qseecom_unload_lib_image_ireq),
4442 &resp, sizeof(resp));
4443 if (ret) {
4444 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4445 ret = -EIO;
4446 } else {
4447 switch (resp.result) {
4448 case QSEOS_RESULT_SUCCESS:
4449 break;
4450 case QSEOS_RESULT_FAILURE:
4451 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4452 break;
4453 default:
4454 pr_err("scm call return unknown response %d\n",
4455 resp.result);
4456 ret = -EINVAL;
4457 break;
4458 }
4459 }
4460
4461 return ret;
4462}
4463
4464int qseecom_start_app(struct qseecom_handle **handle,
4465 char *app_name, uint32_t size)
4466{
4467 int32_t ret = 0;
4468 unsigned long flags = 0;
4469 struct qseecom_dev_handle *data = NULL;
4470 struct qseecom_check_app_ireq app_ireq;
4471 struct qseecom_registered_app_list *entry = NULL;
4472 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4473 bool found_app = false;
4474 size_t len;
4475 ion_phys_addr_t pa;
4476 uint32_t fw_size, app_arch;
4477 uint32_t app_id = 0;
4478
4479 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4480 pr_err("Not allowed to be called in %d state\n",
4481 atomic_read(&qseecom.qseecom_state));
4482 return -EPERM;
4483 }
4484 if (!app_name) {
4485 pr_err("failed to get the app name\n");
4486 return -EINVAL;
4487 }
4488
Zhen Kong64a6d7282017-06-16 11:55:07 -07004489 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004490 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004491 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004492 return -EINVAL;
4493 }
4494
4495 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4496 if (!(*handle))
4497 return -ENOMEM;
4498
4499 data = kzalloc(sizeof(*data), GFP_KERNEL);
4500 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304501 ret = -ENOMEM;
4502 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004503 }
4504 data->abort = 0;
4505 data->type = QSEECOM_CLIENT_APP;
4506 data->released = false;
4507 data->client.sb_length = size;
4508 data->client.user_virt_sb_base = 0;
4509 data->client.ihandle = NULL;
4510
4511 init_waitqueue_head(&data->abort_wq);
4512
4513 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4514 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4515 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4516 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304517 ret = -ENOMEM;
4518 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004519 }
4520 mutex_lock(&app_access_lock);
4521
Zhen Kong5d02be92018-05-29 16:17:29 -07004522recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004523 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4524 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4525 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4526 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304527 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004528
4529 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4530 if (app_id) {
4531 pr_warn("App id %d for [%s] app exists\n", app_id,
4532 (char *)app_ireq.app_name);
4533 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4534 list_for_each_entry(entry,
4535 &qseecom.registered_app_list_head, list){
4536 if (entry->app_id == app_id) {
4537 entry->ref_cnt++;
4538 found_app = true;
4539 break;
4540 }
4541 }
4542 spin_unlock_irqrestore(
4543 &qseecom.registered_app_list_lock, flags);
4544 if (!found_app)
4545 pr_warn("App_id %d [%s] was loaded but not registered\n",
4546 ret, (char *)app_ireq.app_name);
4547 } else {
4548 /* load the app and get the app_id */
4549 pr_debug("%s: Loading app for the first time'\n",
4550 qseecom.pdev->init_name);
4551 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004552 if (ret == -EEXIST) {
4553 pr_err("recheck if TA %s is loaded\n", app_name);
4554 goto recheck;
4555 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304556 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004557 }
4558 data->client.app_id = app_id;
4559 if (!found_app) {
4560 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4561 if (!entry) {
4562 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304563 ret = -ENOMEM;
4564 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004565 }
4566 entry->app_id = app_id;
4567 entry->ref_cnt = 1;
4568 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4569 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4570 ret = -EIO;
Zhen Konga6e3f512017-01-20 12:22:23 -08004571 kfree(entry);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304572 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004573 }
4574 entry->app_arch = app_arch;
4575 entry->app_blocked = false;
4576 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004577 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004578 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4579 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4580 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4581 flags);
4582 }
4583
4584 /* Get the physical address of the ION BUF */
4585 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4586 if (ret) {
4587 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4588 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304589 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004590 }
4591
4592 /* Populate the structure for sending scm call to load image */
4593 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4594 data->client.ihandle);
4595 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4596 pr_err("ION memory mapping for client shared buf failed\n");
4597 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304598 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004599 }
4600 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4601 data->client.sb_phys = (phys_addr_t)pa;
4602 (*handle)->dev = (void *)data;
4603 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4604 (*handle)->sbuf_len = data->client.sb_length;
4605
4606 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4607 if (!kclient_entry) {
4608 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304609 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004610 }
4611 kclient_entry->handle = *handle;
4612
4613 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4614 list_add_tail(&kclient_entry->list,
4615 &qseecom.registered_kclient_list_head);
4616 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4617
4618 mutex_unlock(&app_access_lock);
4619 return 0;
4620
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304621exit_ion_unmap_kernel:
4622 if (!IS_ERR_OR_NULL(data->client.ihandle))
4623 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4624exit_entry_free:
4625 kfree(entry);
4626exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004627 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304628 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4629 ion_free(qseecom.ion_clnt, data->client.ihandle);
4630 data->client.ihandle = NULL;
4631 }
4632exit_data_free:
4633 kfree(data);
4634exit_handle_free:
4635 if (*handle) {
4636 kfree(*handle);
4637 *handle = NULL;
4638 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004639 return ret;
4640}
4641EXPORT_SYMBOL(qseecom_start_app);
4642
4643int qseecom_shutdown_app(struct qseecom_handle **handle)
4644{
4645 int ret = -EINVAL;
4646 struct qseecom_dev_handle *data;
4647
4648 struct qseecom_registered_kclient_list *kclient = NULL;
4649 unsigned long flags = 0;
4650 bool found_handle = false;
4651
4652 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4653 pr_err("Not allowed to be called in %d state\n",
4654 atomic_read(&qseecom.qseecom_state));
4655 return -EPERM;
4656 }
4657
4658 if ((handle == NULL) || (*handle == NULL)) {
4659 pr_err("Handle is not initialized\n");
4660 return -EINVAL;
4661 }
4662 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4663 mutex_lock(&app_access_lock);
4664
4665 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4666 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4667 list) {
4668 if (kclient->handle == (*handle)) {
4669 list_del(&kclient->list);
4670 found_handle = true;
4671 break;
4672 }
4673 }
4674 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4675 if (!found_handle)
4676 pr_err("Unable to find the handle, exiting\n");
4677 else
4678 ret = qseecom_unload_app(data, false);
4679
4680 mutex_unlock(&app_access_lock);
4681 if (ret == 0) {
4682 kzfree(data);
4683 kzfree(*handle);
4684 kzfree(kclient);
4685 *handle = NULL;
4686 }
4687
4688 return ret;
4689}
4690EXPORT_SYMBOL(qseecom_shutdown_app);
4691
4692int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4693 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4694{
4695 int ret = 0;
4696 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4697 struct qseecom_dev_handle *data;
4698 bool perf_enabled = false;
4699
4700 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4701 pr_err("Not allowed to be called in %d state\n",
4702 atomic_read(&qseecom.qseecom_state));
4703 return -EPERM;
4704 }
4705
4706 if (handle == NULL) {
4707 pr_err("Handle is not initialized\n");
4708 return -EINVAL;
4709 }
4710 data = handle->dev;
4711
4712 req.cmd_req_len = sbuf_len;
4713 req.resp_len = rbuf_len;
4714 req.cmd_req_buf = send_buf;
4715 req.resp_buf = resp_buf;
4716
4717 if (__validate_send_cmd_inputs(data, &req))
4718 return -EINVAL;
4719
4720 mutex_lock(&app_access_lock);
4721 if (qseecom.support_bus_scaling) {
4722 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4723 if (ret) {
4724 pr_err("Failed to set bw.\n");
4725 mutex_unlock(&app_access_lock);
4726 return ret;
4727 }
4728 }
4729 /*
4730 * On targets where crypto clock is handled by HLOS,
4731 * if clk_access_cnt is zero and perf_enabled is false,
4732 * then the crypto clock was not enabled before sending cmd
4733 * to tz, qseecom will enable the clock to avoid service failure.
4734 */
4735 if (!qseecom.no_clock_support &&
4736 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4737 pr_debug("ce clock is not enabled!\n");
4738 ret = qseecom_perf_enable(data);
4739 if (ret) {
4740 pr_err("Failed to vote for clock with err %d\n",
4741 ret);
4742 mutex_unlock(&app_access_lock);
4743 return -EINVAL;
4744 }
4745 perf_enabled = true;
4746 }
4747 if (!strcmp(data->client.app_name, "securemm"))
4748 data->use_legacy_cmd = true;
4749
4750 ret = __qseecom_send_cmd(data, &req);
4751 data->use_legacy_cmd = false;
4752 if (qseecom.support_bus_scaling)
4753 __qseecom_add_bw_scale_down_timer(
4754 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4755
4756 if (perf_enabled) {
4757 qsee_disable_clock_vote(data, CLK_DFAB);
4758 qsee_disable_clock_vote(data, CLK_SFPB);
4759 }
4760
4761 mutex_unlock(&app_access_lock);
4762
4763 if (ret)
4764 return ret;
4765
4766 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4767 req.resp_len, req.resp_buf);
4768 return ret;
4769}
4770EXPORT_SYMBOL(qseecom_send_command);
4771
4772int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4773{
4774 int ret = 0;
4775
4776 if ((handle == NULL) || (handle->dev == NULL)) {
4777 pr_err("No valid kernel client\n");
4778 return -EINVAL;
4779 }
4780 if (high) {
4781 if (qseecom.support_bus_scaling) {
4782 mutex_lock(&qsee_bw_mutex);
4783 __qseecom_register_bus_bandwidth_needs(handle->dev,
4784 HIGH);
4785 mutex_unlock(&qsee_bw_mutex);
4786 } else {
4787 ret = qseecom_perf_enable(handle->dev);
4788 if (ret)
4789 pr_err("Failed to vote for clock with err %d\n",
4790 ret);
4791 }
4792 } else {
4793 if (!qseecom.support_bus_scaling) {
4794 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4795 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4796 } else {
4797 mutex_lock(&qsee_bw_mutex);
4798 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4799 mutex_unlock(&qsee_bw_mutex);
4800 }
4801 }
4802 return ret;
4803}
4804EXPORT_SYMBOL(qseecom_set_bandwidth);
4805
4806int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4807{
4808 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4809 struct qseecom_dev_handle dummy_private_data = {0};
4810 struct qseecom_command_scm_resp resp;
4811 int ret = 0;
4812
4813 if (!desc) {
4814 pr_err("desc is NULL\n");
4815 return -EINVAL;
4816 }
4817
4818 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004819 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004820 resp.data = desc->ret[2]; /*listener_id*/
4821
Zhen Konge7f525f2017-12-01 18:26:25 -08004822 dummy_private_data.client.app_id = desc->ret[1];
4823 dummy_app_entry.app_id = desc->ret[1];
4824
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004825 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004826 if (qseecom.qsee_reentrancy_support)
4827 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004828 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004829 else
4830 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4831 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004832 mutex_unlock(&app_access_lock);
4833 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004834 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004835 (int)desc->ret[0], (int)desc->ret[2],
4836 (int)desc->ret[1], ret);
4837 desc->ret[0] = resp.result;
4838 desc->ret[1] = resp.resp_type;
4839 desc->ret[2] = resp.data;
4840 return ret;
4841}
4842EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4843
4844static int qseecom_send_resp(void)
4845{
4846 qseecom.send_resp_flag = 1;
4847 wake_up_interruptible(&qseecom.send_resp_wq);
4848 return 0;
4849}
4850
4851static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4852{
4853 struct qseecom_registered_listener_list *this_lstnr = NULL;
4854
4855 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4856 this_lstnr = __qseecom_find_svc(data->listener.id);
4857 if (this_lstnr == NULL)
4858 return -EINVAL;
4859 qseecom.send_resp_flag = 1;
4860 this_lstnr->send_resp_flag = 1;
4861 wake_up_interruptible(&qseecom.send_resp_wq);
4862 return 0;
4863}
4864
4865static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4866 struct qseecom_send_modfd_listener_resp *resp,
4867 struct qseecom_registered_listener_list *this_lstnr)
4868{
4869 int i;
4870
4871 if (!data || !resp || !this_lstnr) {
4872 pr_err("listener handle or resp msg is null\n");
4873 return -EINVAL;
4874 }
4875
4876 if (resp->resp_buf_ptr == NULL) {
4877 pr_err("resp buffer is null\n");
4878 return -EINVAL;
4879 }
4880 /* validate resp buf length */
4881 if ((resp->resp_len == 0) ||
4882 (resp->resp_len > this_lstnr->sb_length)) {
4883 pr_err("resp buf length %d not valid\n", resp->resp_len);
4884 return -EINVAL;
4885 }
4886
4887 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4888 pr_err("Integer overflow in resp_len & resp_buf\n");
4889 return -EINVAL;
4890 }
4891 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4892 (ULONG_MAX - this_lstnr->sb_length)) {
4893 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4894 return -EINVAL;
4895 }
4896 /* validate resp buf */
4897 if (((uintptr_t)resp->resp_buf_ptr <
4898 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4899 ((uintptr_t)resp->resp_buf_ptr >=
4900 ((uintptr_t)this_lstnr->user_virt_sb_base +
4901 this_lstnr->sb_length)) ||
4902 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4903 ((uintptr_t)this_lstnr->user_virt_sb_base +
4904 this_lstnr->sb_length))) {
4905 pr_err("resp buf is out of shared buffer region\n");
4906 return -EINVAL;
4907 }
4908
4909 /* validate offsets */
4910 for (i = 0; i < MAX_ION_FD; i++) {
4911 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4912 pr_err("Invalid offset %d = 0x%x\n",
4913 i, resp->ifd_data[i].cmd_buf_offset);
4914 return -EINVAL;
4915 }
4916 }
4917
4918 return 0;
4919}
4920
4921static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4922 void __user *argp, bool is_64bit_addr)
4923{
4924 struct qseecom_send_modfd_listener_resp resp;
4925 struct qseecom_registered_listener_list *this_lstnr = NULL;
4926
4927 if (copy_from_user(&resp, argp, sizeof(resp))) {
4928 pr_err("copy_from_user failed");
4929 return -EINVAL;
4930 }
4931
4932 this_lstnr = __qseecom_find_svc(data->listener.id);
4933 if (this_lstnr == NULL)
4934 return -EINVAL;
4935
4936 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4937 return -EINVAL;
4938
4939 resp.resp_buf_ptr = this_lstnr->sb_virt +
4940 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4941
4942 if (!is_64bit_addr)
4943 __qseecom_update_cmd_buf(&resp, false, data);
4944 else
4945 __qseecom_update_cmd_buf_64(&resp, false, data);
4946 qseecom.send_resp_flag = 1;
4947 this_lstnr->send_resp_flag = 1;
4948 wake_up_interruptible(&qseecom.send_resp_wq);
4949 return 0;
4950}
4951
4952static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4953 void __user *argp)
4954{
4955 return __qseecom_send_modfd_resp(data, argp, false);
4956}
4957
4958static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4959 void __user *argp)
4960{
4961 return __qseecom_send_modfd_resp(data, argp, true);
4962}
4963
4964static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4965 void __user *argp)
4966{
4967 struct qseecom_qseos_version_req req;
4968
4969 if (copy_from_user(&req, argp, sizeof(req))) {
4970 pr_err("copy_from_user failed");
4971 return -EINVAL;
4972 }
4973 req.qseos_version = qseecom.qseos_version;
4974 if (copy_to_user(argp, &req, sizeof(req))) {
4975 pr_err("copy_to_user failed");
4976 return -EINVAL;
4977 }
4978 return 0;
4979}
4980
4981static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4982{
4983 int rc = 0;
4984 struct qseecom_clk *qclk = NULL;
4985
4986 if (qseecom.no_clock_support)
4987 return 0;
4988
4989 if (ce == CLK_QSEE)
4990 qclk = &qseecom.qsee;
4991 if (ce == CLK_CE_DRV)
4992 qclk = &qseecom.ce_drv;
4993
4994 if (qclk == NULL) {
4995 pr_err("CLK type not supported\n");
4996 return -EINVAL;
4997 }
4998 mutex_lock(&clk_access_lock);
4999
5000 if (qclk->clk_access_cnt == ULONG_MAX) {
5001 pr_err("clk_access_cnt beyond limitation\n");
5002 goto err;
5003 }
5004 if (qclk->clk_access_cnt > 0) {
5005 qclk->clk_access_cnt++;
5006 mutex_unlock(&clk_access_lock);
5007 return rc;
5008 }
5009
5010 /* Enable CE core clk */
5011 if (qclk->ce_core_clk != NULL) {
5012 rc = clk_prepare_enable(qclk->ce_core_clk);
5013 if (rc) {
5014 pr_err("Unable to enable/prepare CE core clk\n");
5015 goto err;
5016 }
5017 }
5018 /* Enable CE clk */
5019 if (qclk->ce_clk != NULL) {
5020 rc = clk_prepare_enable(qclk->ce_clk);
5021 if (rc) {
5022 pr_err("Unable to enable/prepare CE iface clk\n");
5023 goto ce_clk_err;
5024 }
5025 }
5026 /* Enable AXI clk */
5027 if (qclk->ce_bus_clk != NULL) {
5028 rc = clk_prepare_enable(qclk->ce_bus_clk);
5029 if (rc) {
5030 pr_err("Unable to enable/prepare CE bus clk\n");
5031 goto ce_bus_clk_err;
5032 }
5033 }
5034 qclk->clk_access_cnt++;
5035 mutex_unlock(&clk_access_lock);
5036 return 0;
5037
5038ce_bus_clk_err:
5039 if (qclk->ce_clk != NULL)
5040 clk_disable_unprepare(qclk->ce_clk);
5041ce_clk_err:
5042 if (qclk->ce_core_clk != NULL)
5043 clk_disable_unprepare(qclk->ce_core_clk);
5044err:
5045 mutex_unlock(&clk_access_lock);
5046 return -EIO;
5047}
5048
5049static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5050{
5051 struct qseecom_clk *qclk;
5052
5053 if (qseecom.no_clock_support)
5054 return;
5055
5056 if (ce == CLK_QSEE)
5057 qclk = &qseecom.qsee;
5058 else
5059 qclk = &qseecom.ce_drv;
5060
5061 mutex_lock(&clk_access_lock);
5062
5063 if (qclk->clk_access_cnt == 0) {
5064 mutex_unlock(&clk_access_lock);
5065 return;
5066 }
5067
5068 if (qclk->clk_access_cnt == 1) {
5069 if (qclk->ce_clk != NULL)
5070 clk_disable_unprepare(qclk->ce_clk);
5071 if (qclk->ce_core_clk != NULL)
5072 clk_disable_unprepare(qclk->ce_core_clk);
5073 if (qclk->ce_bus_clk != NULL)
5074 clk_disable_unprepare(qclk->ce_bus_clk);
5075 }
5076 qclk->clk_access_cnt--;
5077 mutex_unlock(&clk_access_lock);
5078}
5079
5080static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5081 int32_t clk_type)
5082{
5083 int ret = 0;
5084 struct qseecom_clk *qclk;
5085
5086 if (qseecom.no_clock_support)
5087 return 0;
5088
5089 qclk = &qseecom.qsee;
5090 if (!qseecom.qsee_perf_client)
5091 return ret;
5092
5093 switch (clk_type) {
5094 case CLK_DFAB:
5095 mutex_lock(&qsee_bw_mutex);
5096 if (!qseecom.qsee_bw_count) {
5097 if (qseecom.qsee_sfpb_bw_count > 0)
5098 ret = msm_bus_scale_client_update_request(
5099 qseecom.qsee_perf_client, 3);
5100 else {
5101 if (qclk->ce_core_src_clk != NULL)
5102 ret = __qseecom_enable_clk(CLK_QSEE);
5103 if (!ret) {
5104 ret =
5105 msm_bus_scale_client_update_request(
5106 qseecom.qsee_perf_client, 1);
5107 if ((ret) &&
5108 (qclk->ce_core_src_clk != NULL))
5109 __qseecom_disable_clk(CLK_QSEE);
5110 }
5111 }
5112 if (ret)
5113 pr_err("DFAB Bandwidth req failed (%d)\n",
5114 ret);
5115 else {
5116 qseecom.qsee_bw_count++;
5117 data->perf_enabled = true;
5118 }
5119 } else {
5120 qseecom.qsee_bw_count++;
5121 data->perf_enabled = true;
5122 }
5123 mutex_unlock(&qsee_bw_mutex);
5124 break;
5125 case CLK_SFPB:
5126 mutex_lock(&qsee_bw_mutex);
5127 if (!qseecom.qsee_sfpb_bw_count) {
5128 if (qseecom.qsee_bw_count > 0)
5129 ret = msm_bus_scale_client_update_request(
5130 qseecom.qsee_perf_client, 3);
5131 else {
5132 if (qclk->ce_core_src_clk != NULL)
5133 ret = __qseecom_enable_clk(CLK_QSEE);
5134 if (!ret) {
5135 ret =
5136 msm_bus_scale_client_update_request(
5137 qseecom.qsee_perf_client, 2);
5138 if ((ret) &&
5139 (qclk->ce_core_src_clk != NULL))
5140 __qseecom_disable_clk(CLK_QSEE);
5141 }
5142 }
5143
5144 if (ret)
5145 pr_err("SFPB Bandwidth req failed (%d)\n",
5146 ret);
5147 else {
5148 qseecom.qsee_sfpb_bw_count++;
5149 data->fast_load_enabled = true;
5150 }
5151 } else {
5152 qseecom.qsee_sfpb_bw_count++;
5153 data->fast_load_enabled = true;
5154 }
5155 mutex_unlock(&qsee_bw_mutex);
5156 break;
5157 default:
5158 pr_err("Clock type not defined\n");
5159 break;
5160 }
5161 return ret;
5162}
5163
5164static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5165 int32_t clk_type)
5166{
5167 int32_t ret = 0;
5168 struct qseecom_clk *qclk;
5169
5170 qclk = &qseecom.qsee;
5171
5172 if (qseecom.no_clock_support)
5173 return;
5174 if (!qseecom.qsee_perf_client)
5175 return;
5176
5177 switch (clk_type) {
5178 case CLK_DFAB:
5179 mutex_lock(&qsee_bw_mutex);
5180 if (qseecom.qsee_bw_count == 0) {
5181 pr_err("Client error.Extra call to disable DFAB clk\n");
5182 mutex_unlock(&qsee_bw_mutex);
5183 return;
5184 }
5185
5186 if (qseecom.qsee_bw_count == 1) {
5187 if (qseecom.qsee_sfpb_bw_count > 0)
5188 ret = msm_bus_scale_client_update_request(
5189 qseecom.qsee_perf_client, 2);
5190 else {
5191 ret = msm_bus_scale_client_update_request(
5192 qseecom.qsee_perf_client, 0);
5193 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5194 __qseecom_disable_clk(CLK_QSEE);
5195 }
5196 if (ret)
5197 pr_err("SFPB Bandwidth req fail (%d)\n",
5198 ret);
5199 else {
5200 qseecom.qsee_bw_count--;
5201 data->perf_enabled = false;
5202 }
5203 } else {
5204 qseecom.qsee_bw_count--;
5205 data->perf_enabled = false;
5206 }
5207 mutex_unlock(&qsee_bw_mutex);
5208 break;
5209 case CLK_SFPB:
5210 mutex_lock(&qsee_bw_mutex);
5211 if (qseecom.qsee_sfpb_bw_count == 0) {
5212 pr_err("Client error.Extra call to disable SFPB clk\n");
5213 mutex_unlock(&qsee_bw_mutex);
5214 return;
5215 }
5216 if (qseecom.qsee_sfpb_bw_count == 1) {
5217 if (qseecom.qsee_bw_count > 0)
5218 ret = msm_bus_scale_client_update_request(
5219 qseecom.qsee_perf_client, 1);
5220 else {
5221 ret = msm_bus_scale_client_update_request(
5222 qseecom.qsee_perf_client, 0);
5223 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5224 __qseecom_disable_clk(CLK_QSEE);
5225 }
5226 if (ret)
5227 pr_err("SFPB Bandwidth req fail (%d)\n",
5228 ret);
5229 else {
5230 qseecom.qsee_sfpb_bw_count--;
5231 data->fast_load_enabled = false;
5232 }
5233 } else {
5234 qseecom.qsee_sfpb_bw_count--;
5235 data->fast_load_enabled = false;
5236 }
5237 mutex_unlock(&qsee_bw_mutex);
5238 break;
5239 default:
5240 pr_err("Clock type not defined\n");
5241 break;
5242 }
5243
5244}
5245
5246static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5247 void __user *argp)
5248{
5249 struct ion_handle *ihandle; /* Ion handle */
5250 struct qseecom_load_img_req load_img_req;
5251 int uret = 0;
5252 int ret;
5253 ion_phys_addr_t pa = 0;
5254 size_t len;
5255 struct qseecom_load_app_ireq load_req;
5256 struct qseecom_load_app_64bit_ireq load_req_64bit;
5257 struct qseecom_command_scm_resp resp;
5258 void *cmd_buf = NULL;
5259 size_t cmd_len;
5260 /* Copy the relevant information needed for loading the image */
5261 if (copy_from_user(&load_img_req,
5262 (void __user *)argp,
5263 sizeof(struct qseecom_load_img_req))) {
5264 pr_err("copy_from_user failed\n");
5265 return -EFAULT;
5266 }
5267
5268 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005269 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005270 load_img_req.ifd_data_fd);
5271 if (IS_ERR_OR_NULL(ihandle)) {
5272 pr_err("Ion client could not retrieve the handle\n");
5273 return -ENOMEM;
5274 }
5275
5276 /* Get the physical address of the ION BUF */
5277 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5278 if (ret) {
5279 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5280 ret);
5281 return ret;
5282 }
5283 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5284 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5285 len, load_img_req.mdt_len,
5286 load_img_req.img_len);
5287 return ret;
5288 }
5289 /* Populate the structure for sending scm call to load image */
5290 if (qseecom.qsee_version < QSEE_VERSION_40) {
5291 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5292 load_req.mdt_len = load_img_req.mdt_len;
5293 load_req.img_len = load_img_req.img_len;
5294 load_req.phy_addr = (uint32_t)pa;
5295 cmd_buf = (void *)&load_req;
5296 cmd_len = sizeof(struct qseecom_load_app_ireq);
5297 } else {
5298 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5299 load_req_64bit.mdt_len = load_img_req.mdt_len;
5300 load_req_64bit.img_len = load_img_req.img_len;
5301 load_req_64bit.phy_addr = (uint64_t)pa;
5302 cmd_buf = (void *)&load_req_64bit;
5303 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5304 }
5305
5306 if (qseecom.support_bus_scaling) {
5307 mutex_lock(&qsee_bw_mutex);
5308 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5309 mutex_unlock(&qsee_bw_mutex);
5310 if (ret) {
5311 ret = -EIO;
5312 goto exit_cpu_restore;
5313 }
5314 }
5315
5316 /* Vote for the SFPB clock */
5317 ret = __qseecom_enable_clk_scale_up(data);
5318 if (ret) {
5319 ret = -EIO;
5320 goto exit_register_bus_bandwidth_needs;
5321 }
5322 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5323 ION_IOC_CLEAN_INV_CACHES);
5324 if (ret) {
5325 pr_err("cache operation failed %d\n", ret);
5326 goto exit_disable_clock;
5327 }
5328 /* SCM_CALL to load the external elf */
5329 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5330 &resp, sizeof(resp));
5331 if (ret) {
5332 pr_err("scm_call to load failed : ret %d\n",
5333 ret);
5334 ret = -EFAULT;
5335 goto exit_disable_clock;
5336 }
5337
5338 switch (resp.result) {
5339 case QSEOS_RESULT_SUCCESS:
5340 break;
5341 case QSEOS_RESULT_INCOMPLETE:
5342 pr_err("%s: qseos result incomplete\n", __func__);
5343 ret = __qseecom_process_incomplete_cmd(data, &resp);
5344 if (ret)
5345 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5346 break;
5347 case QSEOS_RESULT_FAILURE:
5348 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5349 ret = -EFAULT;
5350 break;
5351 default:
5352 pr_err("scm_call response result %d not supported\n",
5353 resp.result);
5354 ret = -EFAULT;
5355 break;
5356 }
5357
5358exit_disable_clock:
5359 __qseecom_disable_clk_scale_down(data);
5360
5361exit_register_bus_bandwidth_needs:
5362 if (qseecom.support_bus_scaling) {
5363 mutex_lock(&qsee_bw_mutex);
5364 uret = qseecom_unregister_bus_bandwidth_needs(data);
5365 mutex_unlock(&qsee_bw_mutex);
5366 if (uret)
5367 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5368 uret, ret);
5369 }
5370
5371exit_cpu_restore:
5372 /* Deallocate the handle */
5373 if (!IS_ERR_OR_NULL(ihandle))
5374 ion_free(qseecom.ion_clnt, ihandle);
5375 return ret;
5376}
5377
5378static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5379{
5380 int ret = 0;
5381 struct qseecom_command_scm_resp resp;
5382 struct qseecom_unload_app_ireq req;
5383
5384 /* unavailable client app */
5385 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5386
5387 /* Populate the structure for sending scm call to unload image */
5388 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5389
5390 /* SCM_CALL to unload the external elf */
5391 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5392 sizeof(struct qseecom_unload_app_ireq),
5393 &resp, sizeof(resp));
5394 if (ret) {
5395 pr_err("scm_call to unload failed : ret %d\n",
5396 ret);
5397 ret = -EFAULT;
5398 goto qseecom_unload_external_elf_scm_err;
5399 }
5400 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5401 ret = __qseecom_process_incomplete_cmd(data, &resp);
5402 if (ret)
5403 pr_err("process_incomplete_cmd fail err: %d\n",
5404 ret);
5405 } else {
5406 if (resp.result != QSEOS_RESULT_SUCCESS) {
5407 pr_err("scm_call to unload image failed resp.result =%d\n",
5408 resp.result);
5409 ret = -EFAULT;
5410 }
5411 }
5412
5413qseecom_unload_external_elf_scm_err:
5414
5415 return ret;
5416}
5417
5418static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5419 void __user *argp)
5420{
5421
5422 int32_t ret;
5423 struct qseecom_qseos_app_load_query query_req;
5424 struct qseecom_check_app_ireq req;
5425 struct qseecom_registered_app_list *entry = NULL;
5426 unsigned long flags = 0;
5427 uint32_t app_arch = 0, app_id = 0;
5428 bool found_app = false;
5429
5430 /* Copy the relevant information needed for loading the image */
5431 if (copy_from_user(&query_req,
5432 (void __user *)argp,
5433 sizeof(struct qseecom_qseos_app_load_query))) {
5434 pr_err("copy_from_user failed\n");
5435 return -EFAULT;
5436 }
5437
5438 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5439 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5440 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5441
5442 ret = __qseecom_check_app_exists(req, &app_id);
5443 if (ret) {
5444 pr_err(" scm call to check if app is loaded failed");
5445 return ret; /* scm call failed */
5446 }
5447 if (app_id) {
5448 pr_debug("App id %d (%s) already exists\n", app_id,
5449 (char *)(req.app_name));
5450 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5451 list_for_each_entry(entry,
5452 &qseecom.registered_app_list_head, list){
5453 if (entry->app_id == app_id) {
5454 app_arch = entry->app_arch;
5455 entry->ref_cnt++;
5456 found_app = true;
5457 break;
5458 }
5459 }
5460 spin_unlock_irqrestore(
5461 &qseecom.registered_app_list_lock, flags);
5462 data->client.app_id = app_id;
5463 query_req.app_id = app_id;
5464 if (app_arch) {
5465 data->client.app_arch = app_arch;
5466 query_req.app_arch = app_arch;
5467 } else {
5468 data->client.app_arch = 0;
5469 query_req.app_arch = 0;
5470 }
5471 strlcpy(data->client.app_name, query_req.app_name,
5472 MAX_APP_NAME_SIZE);
5473 /*
5474 * If app was loaded by appsbl before and was not registered,
5475 * regiser this app now.
5476 */
5477 if (!found_app) {
5478 pr_debug("Register app %d [%s] which was loaded before\n",
5479 ret, (char *)query_req.app_name);
5480 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5481 if (!entry) {
5482 pr_err("kmalloc for app entry failed\n");
5483 return -ENOMEM;
5484 }
5485 entry->app_id = app_id;
5486 entry->ref_cnt = 1;
5487 entry->app_arch = data->client.app_arch;
5488 strlcpy(entry->app_name, data->client.app_name,
5489 MAX_APP_NAME_SIZE);
5490 entry->app_blocked = false;
5491 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005492 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005493 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5494 flags);
5495 list_add_tail(&entry->list,
5496 &qseecom.registered_app_list_head);
5497 spin_unlock_irqrestore(
5498 &qseecom.registered_app_list_lock, flags);
5499 }
5500 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5501 pr_err("copy_to_user failed\n");
5502 return -EFAULT;
5503 }
5504 return -EEXIST; /* app already loaded */
5505 } else {
5506 return 0; /* app not loaded */
5507 }
5508}
5509
5510static int __qseecom_get_ce_pipe_info(
5511 enum qseecom_key_management_usage_type usage,
5512 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5513{
5514 int ret = -EINVAL;
5515 int i, j;
5516 struct qseecom_ce_info_use *p = NULL;
5517 int total = 0;
5518 struct qseecom_ce_pipe_entry *pcepipe;
5519
5520 switch (usage) {
5521 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5522 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5523 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5524 if (qseecom.support_fde) {
5525 p = qseecom.ce_info.fde;
5526 total = qseecom.ce_info.num_fde;
5527 } else {
5528 pr_err("system does not support fde\n");
5529 return -EINVAL;
5530 }
5531 break;
5532 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5533 if (qseecom.support_pfe) {
5534 p = qseecom.ce_info.pfe;
5535 total = qseecom.ce_info.num_pfe;
5536 } else {
5537 pr_err("system does not support pfe\n");
5538 return -EINVAL;
5539 }
5540 break;
5541 default:
5542 pr_err("unsupported usage %d\n", usage);
5543 return -EINVAL;
5544 }
5545
5546 for (j = 0; j < total; j++) {
5547 if (p->unit_num == unit) {
5548 pcepipe = p->ce_pipe_entry;
5549 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5550 (*ce_hw)[i] = pcepipe->ce_num;
5551 *pipe = pcepipe->ce_pipe_pair;
5552 pcepipe++;
5553 }
5554 ret = 0;
5555 break;
5556 }
5557 p++;
5558 }
5559 return ret;
5560}
5561
5562static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5563 enum qseecom_key_management_usage_type usage,
5564 struct qseecom_key_generate_ireq *ireq)
5565{
5566 struct qseecom_command_scm_resp resp;
5567 int ret;
5568
5569 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5570 usage >= QSEOS_KM_USAGE_MAX) {
5571 pr_err("Error:: unsupported usage %d\n", usage);
5572 return -EFAULT;
5573 }
5574 ret = __qseecom_enable_clk(CLK_QSEE);
5575 if (ret)
5576 return ret;
5577
5578 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5579 ireq, sizeof(struct qseecom_key_generate_ireq),
5580 &resp, sizeof(resp));
5581 if (ret) {
5582 if (ret == -EINVAL &&
5583 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5584 pr_debug("Key ID exists.\n");
5585 ret = 0;
5586 } else {
5587 pr_err("scm call to generate key failed : %d\n", ret);
5588 ret = -EFAULT;
5589 }
5590 goto generate_key_exit;
5591 }
5592
5593 switch (resp.result) {
5594 case QSEOS_RESULT_SUCCESS:
5595 break;
5596 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5597 pr_debug("Key ID exists.\n");
5598 break;
5599 case QSEOS_RESULT_INCOMPLETE:
5600 ret = __qseecom_process_incomplete_cmd(data, &resp);
5601 if (ret) {
5602 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5603 pr_debug("Key ID exists.\n");
5604 ret = 0;
5605 } else {
5606 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5607 resp.result);
5608 }
5609 }
5610 break;
5611 case QSEOS_RESULT_FAILURE:
5612 default:
5613 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5614 ret = -EINVAL;
5615 break;
5616 }
5617generate_key_exit:
5618 __qseecom_disable_clk(CLK_QSEE);
5619 return ret;
5620}
5621
5622static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5623 enum qseecom_key_management_usage_type usage,
5624 struct qseecom_key_delete_ireq *ireq)
5625{
5626 struct qseecom_command_scm_resp resp;
5627 int ret;
5628
5629 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5630 usage >= QSEOS_KM_USAGE_MAX) {
5631 pr_err("Error:: unsupported usage %d\n", usage);
5632 return -EFAULT;
5633 }
5634 ret = __qseecom_enable_clk(CLK_QSEE);
5635 if (ret)
5636 return ret;
5637
5638 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5639 ireq, sizeof(struct qseecom_key_delete_ireq),
5640 &resp, sizeof(struct qseecom_command_scm_resp));
5641 if (ret) {
5642 if (ret == -EINVAL &&
5643 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5644 pr_debug("Max attempts to input password reached.\n");
5645 ret = -ERANGE;
5646 } else {
5647 pr_err("scm call to delete key failed : %d\n", ret);
5648 ret = -EFAULT;
5649 }
5650 goto del_key_exit;
5651 }
5652
5653 switch (resp.result) {
5654 case QSEOS_RESULT_SUCCESS:
5655 break;
5656 case QSEOS_RESULT_INCOMPLETE:
5657 ret = __qseecom_process_incomplete_cmd(data, &resp);
5658 if (ret) {
5659 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5660 resp.result);
5661 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5662 pr_debug("Max attempts to input password reached.\n");
5663 ret = -ERANGE;
5664 }
5665 }
5666 break;
5667 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5668 pr_debug("Max attempts to input password reached.\n");
5669 ret = -ERANGE;
5670 break;
5671 case QSEOS_RESULT_FAILURE:
5672 default:
5673 pr_err("Delete key scm call failed resp.result %d\n",
5674 resp.result);
5675 ret = -EINVAL;
5676 break;
5677 }
5678del_key_exit:
5679 __qseecom_disable_clk(CLK_QSEE);
5680 return ret;
5681}
5682
5683static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5684 enum qseecom_key_management_usage_type usage,
5685 struct qseecom_key_select_ireq *ireq)
5686{
5687 struct qseecom_command_scm_resp resp;
5688 int ret;
5689
5690 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5691 usage >= QSEOS_KM_USAGE_MAX) {
5692 pr_err("Error:: unsupported usage %d\n", usage);
5693 return -EFAULT;
5694 }
5695 ret = __qseecom_enable_clk(CLK_QSEE);
5696 if (ret)
5697 return ret;
5698
5699 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5700 ret = __qseecom_enable_clk(CLK_CE_DRV);
5701 if (ret)
5702 return ret;
5703 }
5704
5705 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5706 ireq, sizeof(struct qseecom_key_select_ireq),
5707 &resp, sizeof(struct qseecom_command_scm_resp));
5708 if (ret) {
5709 if (ret == -EINVAL &&
5710 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5711 pr_debug("Max attempts to input password reached.\n");
5712 ret = -ERANGE;
5713 } else if (ret == -EINVAL &&
5714 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5715 pr_debug("Set Key operation under processing...\n");
5716 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5717 } else {
5718 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5719 ret);
5720 ret = -EFAULT;
5721 }
5722 goto set_key_exit;
5723 }
5724
5725 switch (resp.result) {
5726 case QSEOS_RESULT_SUCCESS:
5727 break;
5728 case QSEOS_RESULT_INCOMPLETE:
5729 ret = __qseecom_process_incomplete_cmd(data, &resp);
5730 if (ret) {
5731 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5732 resp.result);
5733 if (resp.result ==
5734 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5735 pr_debug("Set Key operation under processing...\n");
5736 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5737 }
5738 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5739 pr_debug("Max attempts to input password reached.\n");
5740 ret = -ERANGE;
5741 }
5742 }
5743 break;
5744 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5745 pr_debug("Max attempts to input password reached.\n");
5746 ret = -ERANGE;
5747 break;
5748 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5749 pr_debug("Set Key operation under processing...\n");
5750 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5751 break;
5752 case QSEOS_RESULT_FAILURE:
5753 default:
5754 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5755 ret = -EINVAL;
5756 break;
5757 }
5758set_key_exit:
5759 __qseecom_disable_clk(CLK_QSEE);
5760 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5761 __qseecom_disable_clk(CLK_CE_DRV);
5762 return ret;
5763}
5764
5765static int __qseecom_update_current_key_user_info(
5766 struct qseecom_dev_handle *data,
5767 enum qseecom_key_management_usage_type usage,
5768 struct qseecom_key_userinfo_update_ireq *ireq)
5769{
5770 struct qseecom_command_scm_resp resp;
5771 int ret;
5772
5773 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5774 usage >= QSEOS_KM_USAGE_MAX) {
5775 pr_err("Error:: unsupported usage %d\n", usage);
5776 return -EFAULT;
5777 }
5778 ret = __qseecom_enable_clk(CLK_QSEE);
5779 if (ret)
5780 return ret;
5781
5782 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5783 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5784 &resp, sizeof(struct qseecom_command_scm_resp));
5785 if (ret) {
5786 if (ret == -EINVAL &&
5787 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5788 pr_debug("Set Key operation under processing...\n");
5789 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5790 } else {
5791 pr_err("scm call to update key userinfo failed: %d\n",
5792 ret);
5793 __qseecom_disable_clk(CLK_QSEE);
5794 return -EFAULT;
5795 }
5796 }
5797
5798 switch (resp.result) {
5799 case QSEOS_RESULT_SUCCESS:
5800 break;
5801 case QSEOS_RESULT_INCOMPLETE:
5802 ret = __qseecom_process_incomplete_cmd(data, &resp);
5803 if (resp.result ==
5804 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5805 pr_debug("Set Key operation under processing...\n");
5806 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5807 }
5808 if (ret)
5809 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5810 resp.result);
5811 break;
5812 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5813 pr_debug("Update Key operation under processing...\n");
5814 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5815 break;
5816 case QSEOS_RESULT_FAILURE:
5817 default:
5818 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5819 ret = -EINVAL;
5820 break;
5821 }
5822
5823 __qseecom_disable_clk(CLK_QSEE);
5824 return ret;
5825}
5826
5827
5828static int qseecom_enable_ice_setup(int usage)
5829{
5830 int ret = 0;
5831
5832 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5833 ret = qcom_ice_setup_ice_hw("ufs", true);
5834 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5835 ret = qcom_ice_setup_ice_hw("sdcc", true);
5836
5837 return ret;
5838}
5839
5840static int qseecom_disable_ice_setup(int usage)
5841{
5842 int ret = 0;
5843
5844 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5845 ret = qcom_ice_setup_ice_hw("ufs", false);
5846 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5847 ret = qcom_ice_setup_ice_hw("sdcc", false);
5848
5849 return ret;
5850}
5851
5852static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5853{
5854 struct qseecom_ce_info_use *pce_info_use, *p;
5855 int total = 0;
5856 int i;
5857
5858 switch (usage) {
5859 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5860 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5861 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5862 p = qseecom.ce_info.fde;
5863 total = qseecom.ce_info.num_fde;
5864 break;
5865 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5866 p = qseecom.ce_info.pfe;
5867 total = qseecom.ce_info.num_pfe;
5868 break;
5869 default:
5870 pr_err("unsupported usage %d\n", usage);
5871 return -EINVAL;
5872 }
5873
5874 pce_info_use = NULL;
5875
5876 for (i = 0; i < total; i++) {
5877 if (p->unit_num == unit) {
5878 pce_info_use = p;
5879 break;
5880 }
5881 p++;
5882 }
5883 if (!pce_info_use) {
5884 pr_err("can not find %d\n", unit);
5885 return -EINVAL;
5886 }
5887 return pce_info_use->num_ce_pipe_entries;
5888}
5889
5890static int qseecom_create_key(struct qseecom_dev_handle *data,
5891 void __user *argp)
5892{
5893 int i;
5894 uint32_t *ce_hw = NULL;
5895 uint32_t pipe = 0;
5896 int ret = 0;
5897 uint32_t flags = 0;
5898 struct qseecom_create_key_req create_key_req;
5899 struct qseecom_key_generate_ireq generate_key_ireq;
5900 struct qseecom_key_select_ireq set_key_ireq;
5901 uint32_t entries = 0;
5902
5903 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5904 if (ret) {
5905 pr_err("copy_from_user failed\n");
5906 return ret;
5907 }
5908
5909 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5910 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5911 pr_err("unsupported usage %d\n", create_key_req.usage);
5912 ret = -EFAULT;
5913 return ret;
5914 }
5915 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5916 create_key_req.usage);
5917 if (entries <= 0) {
5918 pr_err("no ce instance for usage %d instance %d\n",
5919 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5920 ret = -EINVAL;
5921 return ret;
5922 }
5923
5924 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5925 if (!ce_hw) {
5926 ret = -ENOMEM;
5927 return ret;
5928 }
5929 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5930 DEFAULT_CE_INFO_UNIT);
5931 if (ret) {
5932 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5933 ret = -EINVAL;
5934 goto free_buf;
5935 }
5936
5937 if (qseecom.fde_key_size)
5938 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5939 else
5940 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5941
5942 generate_key_ireq.flags = flags;
5943 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5944 memset((void *)generate_key_ireq.key_id,
5945 0, QSEECOM_KEY_ID_SIZE);
5946 memset((void *)generate_key_ireq.hash32,
5947 0, QSEECOM_HASH_SIZE);
5948 memcpy((void *)generate_key_ireq.key_id,
5949 (void *)key_id_array[create_key_req.usage].desc,
5950 QSEECOM_KEY_ID_SIZE);
5951 memcpy((void *)generate_key_ireq.hash32,
5952 (void *)create_key_req.hash32,
5953 QSEECOM_HASH_SIZE);
5954
5955 ret = __qseecom_generate_and_save_key(data,
5956 create_key_req.usage, &generate_key_ireq);
5957 if (ret) {
5958 pr_err("Failed to generate key on storage: %d\n", ret);
5959 goto free_buf;
5960 }
5961
5962 for (i = 0; i < entries; i++) {
5963 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5964 if (create_key_req.usage ==
5965 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5966 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5967 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5968
5969 } else if (create_key_req.usage ==
5970 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5971 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5972 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5973
5974 } else {
5975 set_key_ireq.ce = ce_hw[i];
5976 set_key_ireq.pipe = pipe;
5977 }
5978 set_key_ireq.flags = flags;
5979
5980 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5981 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5982 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5983 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5984 memcpy((void *)set_key_ireq.key_id,
5985 (void *)key_id_array[create_key_req.usage].desc,
5986 QSEECOM_KEY_ID_SIZE);
5987 memcpy((void *)set_key_ireq.hash32,
5988 (void *)create_key_req.hash32,
5989 QSEECOM_HASH_SIZE);
5990 /*
5991 * It will return false if it is GPCE based crypto instance or
5992 * ICE is setup properly
5993 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005994 ret = qseecom_enable_ice_setup(create_key_req.usage);
5995 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005996 goto free_buf;
5997
5998 do {
5999 ret = __qseecom_set_clear_ce_key(data,
6000 create_key_req.usage,
6001 &set_key_ireq);
6002 /*
6003 * wait a little before calling scm again to let other
6004 * processes run
6005 */
6006 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6007 msleep(50);
6008
6009 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6010
6011 qseecom_disable_ice_setup(create_key_req.usage);
6012
6013 if (ret) {
6014 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6015 pipe, ce_hw[i], ret);
6016 goto free_buf;
6017 } else {
6018 pr_err("Set the key successfully\n");
6019 if ((create_key_req.usage ==
6020 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6021 (create_key_req.usage ==
6022 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6023 goto free_buf;
6024 }
6025 }
6026
6027free_buf:
6028 kzfree(ce_hw);
6029 return ret;
6030}
6031
6032static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6033 void __user *argp)
6034{
6035 uint32_t *ce_hw = NULL;
6036 uint32_t pipe = 0;
6037 int ret = 0;
6038 uint32_t flags = 0;
6039 int i, j;
6040 struct qseecom_wipe_key_req wipe_key_req;
6041 struct qseecom_key_delete_ireq delete_key_ireq;
6042 struct qseecom_key_select_ireq clear_key_ireq;
6043 uint32_t entries = 0;
6044
6045 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6046 if (ret) {
6047 pr_err("copy_from_user failed\n");
6048 return ret;
6049 }
6050
6051 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6052 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6053 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6054 ret = -EFAULT;
6055 return ret;
6056 }
6057
6058 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6059 wipe_key_req.usage);
6060 if (entries <= 0) {
6061 pr_err("no ce instance for usage %d instance %d\n",
6062 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6063 ret = -EINVAL;
6064 return ret;
6065 }
6066
6067 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6068 if (!ce_hw) {
6069 ret = -ENOMEM;
6070 return ret;
6071 }
6072
6073 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6074 DEFAULT_CE_INFO_UNIT);
6075 if (ret) {
6076 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6077 ret = -EINVAL;
6078 goto free_buf;
6079 }
6080
6081 if (wipe_key_req.wipe_key_flag) {
6082 delete_key_ireq.flags = flags;
6083 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6084 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6085 memcpy((void *)delete_key_ireq.key_id,
6086 (void *)key_id_array[wipe_key_req.usage].desc,
6087 QSEECOM_KEY_ID_SIZE);
6088 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6089
6090 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6091 &delete_key_ireq);
6092 if (ret) {
6093 pr_err("Failed to delete key from ssd storage: %d\n",
6094 ret);
6095 ret = -EFAULT;
6096 goto free_buf;
6097 }
6098 }
6099
6100 for (j = 0; j < entries; j++) {
6101 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6102 if (wipe_key_req.usage ==
6103 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6104 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6105 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6106 } else if (wipe_key_req.usage ==
6107 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6108 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6109 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6110 } else {
6111 clear_key_ireq.ce = ce_hw[j];
6112 clear_key_ireq.pipe = pipe;
6113 }
6114 clear_key_ireq.flags = flags;
6115 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6116 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6117 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6118 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6119
6120 /*
6121 * It will return false if it is GPCE based crypto instance or
6122 * ICE is setup properly
6123 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006124 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6125 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006126 goto free_buf;
6127
6128 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6129 &clear_key_ireq);
6130
6131 qseecom_disable_ice_setup(wipe_key_req.usage);
6132
6133 if (ret) {
6134 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6135 pipe, ce_hw[j], ret);
6136 ret = -EFAULT;
6137 goto free_buf;
6138 }
6139 }
6140
6141free_buf:
6142 kzfree(ce_hw);
6143 return ret;
6144}
6145
6146static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6147 void __user *argp)
6148{
6149 int ret = 0;
6150 uint32_t flags = 0;
6151 struct qseecom_update_key_userinfo_req update_key_req;
6152 struct qseecom_key_userinfo_update_ireq ireq;
6153
6154 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6155 if (ret) {
6156 pr_err("copy_from_user failed\n");
6157 return ret;
6158 }
6159
6160 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6161 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6162 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6163 return -EFAULT;
6164 }
6165
6166 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6167
6168 if (qseecom.fde_key_size)
6169 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6170 else
6171 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6172
6173 ireq.flags = flags;
6174 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6175 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6176 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6177 memcpy((void *)ireq.key_id,
6178 (void *)key_id_array[update_key_req.usage].desc,
6179 QSEECOM_KEY_ID_SIZE);
6180 memcpy((void *)ireq.current_hash32,
6181 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6182 memcpy((void *)ireq.new_hash32,
6183 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6184
6185 do {
6186 ret = __qseecom_update_current_key_user_info(data,
6187 update_key_req.usage,
6188 &ireq);
6189 /*
6190 * wait a little before calling scm again to let other
6191 * processes run
6192 */
6193 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6194 msleep(50);
6195
6196 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6197 if (ret) {
6198 pr_err("Failed to update key info: %d\n", ret);
6199 return ret;
6200 }
6201 return ret;
6202
6203}
6204static int qseecom_is_es_activated(void __user *argp)
6205{
Zhen Kong26e62742018-05-04 17:19:06 -07006206 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006207 struct qseecom_command_scm_resp resp;
6208 int ret;
6209
6210 if (qseecom.qsee_version < QSEE_VERSION_04) {
6211 pr_err("invalid qsee version\n");
6212 return -ENODEV;
6213 }
6214
6215 if (argp == NULL) {
6216 pr_err("arg is null\n");
6217 return -EINVAL;
6218 }
6219
6220 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6221 &req, sizeof(req), &resp, sizeof(resp));
6222 if (ret) {
6223 pr_err("scm_call failed\n");
6224 return ret;
6225 }
6226
6227 req.is_activated = resp.result;
6228 ret = copy_to_user(argp, &req, sizeof(req));
6229 if (ret) {
6230 pr_err("copy_to_user failed\n");
6231 return ret;
6232 }
6233
6234 return 0;
6235}
6236
6237static int qseecom_save_partition_hash(void __user *argp)
6238{
6239 struct qseecom_save_partition_hash_req req;
6240 struct qseecom_command_scm_resp resp;
6241 int ret;
6242
6243 memset(&resp, 0x00, sizeof(resp));
6244
6245 if (qseecom.qsee_version < QSEE_VERSION_04) {
6246 pr_err("invalid qsee version\n");
6247 return -ENODEV;
6248 }
6249
6250 if (argp == NULL) {
6251 pr_err("arg is null\n");
6252 return -EINVAL;
6253 }
6254
6255 ret = copy_from_user(&req, argp, sizeof(req));
6256 if (ret) {
6257 pr_err("copy_from_user failed\n");
6258 return ret;
6259 }
6260
6261 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6262 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6263 if (ret) {
6264 pr_err("qseecom_scm_call failed\n");
6265 return ret;
6266 }
6267
6268 return 0;
6269}
6270
6271static int qseecom_mdtp_cipher_dip(void __user *argp)
6272{
6273 struct qseecom_mdtp_cipher_dip_req req;
6274 u32 tzbuflenin, tzbuflenout;
6275 char *tzbufin = NULL, *tzbufout = NULL;
6276 struct scm_desc desc = {0};
6277 int ret;
6278
6279 do {
6280 /* Copy the parameters from userspace */
6281 if (argp == NULL) {
6282 pr_err("arg is null\n");
6283 ret = -EINVAL;
6284 break;
6285 }
6286
6287 ret = copy_from_user(&req, argp, sizeof(req));
6288 if (ret) {
6289 pr_err("copy_from_user failed, ret= %d\n", ret);
6290 break;
6291 }
6292
6293 if (req.in_buf == NULL || req.out_buf == NULL ||
6294 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6295 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6296 req.direction > 1) {
6297 pr_err("invalid parameters\n");
6298 ret = -EINVAL;
6299 break;
6300 }
6301
6302 /* Copy the input buffer from userspace to kernel space */
6303 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6304 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6305 if (!tzbufin) {
6306 pr_err("error allocating in buffer\n");
6307 ret = -ENOMEM;
6308 break;
6309 }
6310
6311 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6312 if (ret) {
6313 pr_err("copy_from_user failed, ret=%d\n", ret);
6314 break;
6315 }
6316
6317 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6318
6319 /* Prepare the output buffer in kernel space */
6320 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6321 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6322 if (!tzbufout) {
6323 pr_err("error allocating out buffer\n");
6324 ret = -ENOMEM;
6325 break;
6326 }
6327
6328 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6329
6330 /* Send the command to TZ */
6331 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6332 desc.args[0] = virt_to_phys(tzbufin);
6333 desc.args[1] = req.in_buf_size;
6334 desc.args[2] = virt_to_phys(tzbufout);
6335 desc.args[3] = req.out_buf_size;
6336 desc.args[4] = req.direction;
6337
6338 ret = __qseecom_enable_clk(CLK_QSEE);
6339 if (ret)
6340 break;
6341
6342 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6343
6344 __qseecom_disable_clk(CLK_QSEE);
6345
6346 if (ret) {
6347 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6348 ret);
6349 break;
6350 }
6351
6352 /* Copy the output buffer from kernel space to userspace */
6353 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6354 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6355 if (ret) {
6356 pr_err("copy_to_user failed, ret=%d\n", ret);
6357 break;
6358 }
6359 } while (0);
6360
6361 kzfree(tzbufin);
6362 kzfree(tzbufout);
6363
6364 return ret;
6365}
6366
6367static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6368 struct qseecom_qteec_req *req)
6369{
6370 if (!data || !data->client.ihandle) {
6371 pr_err("Client or client handle is not initialized\n");
6372 return -EINVAL;
6373 }
6374
6375 if (data->type != QSEECOM_CLIENT_APP)
6376 return -EFAULT;
6377
6378 if (req->req_len > UINT_MAX - req->resp_len) {
6379 pr_err("Integer overflow detected in req_len & rsp_len\n");
6380 return -EINVAL;
6381 }
6382
6383 if (req->req_len + req->resp_len > data->client.sb_length) {
6384 pr_debug("Not enough memory to fit cmd_buf.\n");
6385 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6386 (req->req_len + req->resp_len), data->client.sb_length);
6387 return -ENOMEM;
6388 }
6389
6390 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6391 pr_err("cmd buffer or response buffer is null\n");
6392 return -EINVAL;
6393 }
6394 if (((uintptr_t)req->req_ptr <
6395 data->client.user_virt_sb_base) ||
6396 ((uintptr_t)req->req_ptr >=
6397 (data->client.user_virt_sb_base + data->client.sb_length))) {
6398 pr_err("cmd buffer address not within shared bufffer\n");
6399 return -EINVAL;
6400 }
6401
6402 if (((uintptr_t)req->resp_ptr <
6403 data->client.user_virt_sb_base) ||
6404 ((uintptr_t)req->resp_ptr >=
6405 (data->client.user_virt_sb_base + data->client.sb_length))) {
6406 pr_err("response buffer address not within shared bufffer\n");
6407 return -EINVAL;
6408 }
6409
6410 if ((req->req_len == 0) || (req->resp_len == 0)) {
6411 pr_err("cmd buf lengtgh/response buf length not valid\n");
6412 return -EINVAL;
6413 }
6414
6415 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6416 pr_err("Integer overflow in req_len & req_ptr\n");
6417 return -EINVAL;
6418 }
6419
6420 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6421 pr_err("Integer overflow in resp_len & resp_ptr\n");
6422 return -EINVAL;
6423 }
6424
6425 if (data->client.user_virt_sb_base >
6426 (ULONG_MAX - data->client.sb_length)) {
6427 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6428 return -EINVAL;
6429 }
6430 if ((((uintptr_t)req->req_ptr + req->req_len) >
6431 ((uintptr_t)data->client.user_virt_sb_base +
6432 data->client.sb_length)) ||
6433 (((uintptr_t)req->resp_ptr + req->resp_len) >
6434 ((uintptr_t)data->client.user_virt_sb_base +
6435 data->client.sb_length))) {
6436 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6437 return -EINVAL;
6438 }
6439 return 0;
6440}
6441
6442static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6443 uint32_t fd_idx, struct sg_table *sg_ptr)
6444{
6445 struct scatterlist *sg = sg_ptr->sgl;
6446 struct qseecom_sg_entry *sg_entry;
6447 void *buf;
6448 uint i;
6449 size_t size;
6450 dma_addr_t coh_pmem;
6451
6452 if (fd_idx >= MAX_ION_FD) {
6453 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6454 return -ENOMEM;
6455 }
6456 /*
6457 * Allocate a buffer, populate it with number of entry plus
6458 * each sg entry's phy addr and length; then return the
6459 * phy_addr of the buffer.
6460 */
6461 size = sizeof(uint32_t) +
6462 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6463 size = (size + PAGE_SIZE) & PAGE_MASK;
6464 buf = dma_alloc_coherent(qseecom.pdev,
6465 size, &coh_pmem, GFP_KERNEL);
6466 if (buf == NULL) {
6467 pr_err("failed to alloc memory for sg buf\n");
6468 return -ENOMEM;
6469 }
6470 *(uint32_t *)buf = sg_ptr->nents;
6471 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6472 for (i = 0; i < sg_ptr->nents; i++) {
6473 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6474 sg_entry->len = sg->length;
6475 sg_entry++;
6476 sg = sg_next(sg);
6477 }
6478 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6479 data->client.sec_buf_fd[fd_idx].vbase = buf;
6480 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6481 data->client.sec_buf_fd[fd_idx].size = size;
6482 return 0;
6483}
6484
6485static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6486 struct qseecom_dev_handle *data, bool cleanup)
6487{
6488 struct ion_handle *ihandle;
6489 int ret = 0;
6490 int i = 0;
6491 uint32_t *update;
6492 struct sg_table *sg_ptr = NULL;
6493 struct scatterlist *sg;
6494 struct qseecom_param_memref *memref;
6495
6496 if (req == NULL) {
6497 pr_err("Invalid address\n");
6498 return -EINVAL;
6499 }
6500 for (i = 0; i < MAX_ION_FD; i++) {
6501 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006502 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006503 req->ifd_data[i].fd);
6504 if (IS_ERR_OR_NULL(ihandle)) {
6505 pr_err("Ion client can't retrieve the handle\n");
6506 return -ENOMEM;
6507 }
6508 if ((req->req_len < sizeof(uint32_t)) ||
6509 (req->ifd_data[i].cmd_buf_offset >
6510 req->req_len - sizeof(uint32_t))) {
6511 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6512 req->req_len,
6513 req->ifd_data[i].cmd_buf_offset);
6514 return -EINVAL;
6515 }
6516 update = (uint32_t *)((char *) req->req_ptr +
6517 req->ifd_data[i].cmd_buf_offset);
6518 if (!update) {
6519 pr_err("update pointer is NULL\n");
6520 return -EINVAL;
6521 }
6522 } else {
6523 continue;
6524 }
6525 /* Populate the cmd data structure with the phys_addr */
6526 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6527 if (IS_ERR_OR_NULL(sg_ptr)) {
6528 pr_err("IOn client could not retrieve sg table\n");
6529 goto err;
6530 }
6531 sg = sg_ptr->sgl;
6532 if (sg == NULL) {
6533 pr_err("sg is NULL\n");
6534 goto err;
6535 }
6536 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6537 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6538 sg_ptr->nents, sg->length);
6539 goto err;
6540 }
6541 /* clean up buf for pre-allocated fd */
6542 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6543 (*update)) {
6544 if (data->client.sec_buf_fd[i].vbase)
6545 dma_free_coherent(qseecom.pdev,
6546 data->client.sec_buf_fd[i].size,
6547 data->client.sec_buf_fd[i].vbase,
6548 data->client.sec_buf_fd[i].pbase);
6549 memset((void *)update, 0,
6550 sizeof(struct qseecom_param_memref));
6551 memset(&(data->client.sec_buf_fd[i]), 0,
6552 sizeof(struct qseecom_sec_buf_fd_info));
6553 goto clean;
6554 }
6555
6556 if (*update == 0) {
6557 /* update buf for pre-allocated fd from secure heap*/
6558 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6559 sg_ptr);
6560 if (ret) {
6561 pr_err("Failed to handle buf for fd[%d]\n", i);
6562 goto err;
6563 }
6564 memref = (struct qseecom_param_memref *)update;
6565 memref->buffer =
6566 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6567 memref->size =
6568 (uint32_t)(data->client.sec_buf_fd[i].size);
6569 } else {
6570 /* update buf for fd from non-secure qseecom heap */
6571 if (sg_ptr->nents != 1) {
6572 pr_err("Num of scat entr (%d) invalid\n",
6573 sg_ptr->nents);
6574 goto err;
6575 }
6576 if (cleanup)
6577 *update = 0;
6578 else
6579 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6580 }
6581clean:
6582 if (cleanup) {
6583 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6584 ihandle, NULL, sg->length,
6585 ION_IOC_INV_CACHES);
6586 if (ret) {
6587 pr_err("cache operation failed %d\n", ret);
6588 goto err;
6589 }
6590 } else {
6591 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6592 ihandle, NULL, sg->length,
6593 ION_IOC_CLEAN_INV_CACHES);
6594 if (ret) {
6595 pr_err("cache operation failed %d\n", ret);
6596 goto err;
6597 }
6598 data->sglistinfo_ptr[i].indexAndFlags =
6599 SGLISTINFO_SET_INDEX_FLAG(
6600 (sg_ptr->nents == 1), 0,
6601 req->ifd_data[i].cmd_buf_offset);
6602 data->sglistinfo_ptr[i].sizeOrCount =
6603 (sg_ptr->nents == 1) ?
6604 sg->length : sg_ptr->nents;
6605 data->sglist_cnt = i + 1;
6606 }
6607 /* Deallocate the handle */
6608 if (!IS_ERR_OR_NULL(ihandle))
6609 ion_free(qseecom.ion_clnt, ihandle);
6610 }
6611 return ret;
6612err:
6613 if (!IS_ERR_OR_NULL(ihandle))
6614 ion_free(qseecom.ion_clnt, ihandle);
6615 return -ENOMEM;
6616}
6617
6618static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6619 struct qseecom_qteec_req *req, uint32_t cmd_id)
6620{
6621 struct qseecom_command_scm_resp resp;
6622 struct qseecom_qteec_ireq ireq;
6623 struct qseecom_qteec_64bit_ireq ireq_64bit;
6624 struct qseecom_registered_app_list *ptr_app;
6625 bool found_app = false;
6626 unsigned long flags;
6627 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006628 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006629 uint32_t reqd_len_sb_in = 0;
6630 void *cmd_buf = NULL;
6631 size_t cmd_len;
6632 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306633 void *req_ptr = NULL;
6634 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006635
6636 ret = __qseecom_qteec_validate_msg(data, req);
6637 if (ret)
6638 return ret;
6639
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306640 req_ptr = req->req_ptr;
6641 resp_ptr = req->resp_ptr;
6642
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006643 /* find app_id & img_name from list */
6644 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6645 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6646 list) {
6647 if ((ptr_app->app_id == data->client.app_id) &&
6648 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6649 found_app = true;
6650 break;
6651 }
6652 }
6653 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6654 if (!found_app) {
6655 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6656 (char *)data->client.app_name);
6657 return -ENOENT;
6658 }
6659
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306660 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6661 (uintptr_t)req->req_ptr);
6662 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6663 (uintptr_t)req->resp_ptr);
6664
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006665 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6666 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6667 ret = __qseecom_update_qteec_req_buf(
6668 (struct qseecom_qteec_modfd_req *)req, data, false);
6669 if (ret)
6670 return ret;
6671 }
6672
6673 if (qseecom.qsee_version < QSEE_VERSION_40) {
6674 ireq.app_id = data->client.app_id;
6675 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306676 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006677 ireq.req_len = req->req_len;
6678 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306679 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006680 ireq.resp_len = req->resp_len;
6681 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6682 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6683 dmac_flush_range((void *)table,
6684 (void *)table + SGLISTINFO_TABLE_SIZE);
6685 cmd_buf = (void *)&ireq;
6686 cmd_len = sizeof(struct qseecom_qteec_ireq);
6687 } else {
6688 ireq_64bit.app_id = data->client.app_id;
6689 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306690 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006691 ireq_64bit.req_len = req->req_len;
6692 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306693 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006694 ireq_64bit.resp_len = req->resp_len;
6695 if ((data->client.app_arch == ELFCLASS32) &&
6696 ((ireq_64bit.req_ptr >=
6697 PHY_ADDR_4G - ireq_64bit.req_len) ||
6698 (ireq_64bit.resp_ptr >=
6699 PHY_ADDR_4G - ireq_64bit.resp_len))){
6700 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6701 data->client.app_name, data->client.app_id);
6702 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6703 ireq_64bit.req_ptr, ireq_64bit.req_len,
6704 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6705 return -EFAULT;
6706 }
6707 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6708 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6709 dmac_flush_range((void *)table,
6710 (void *)table + SGLISTINFO_TABLE_SIZE);
6711 cmd_buf = (void *)&ireq_64bit;
6712 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6713 }
6714 if (qseecom.whitelist_support == true
6715 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6716 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6717 else
6718 *(uint32_t *)cmd_buf = cmd_id;
6719
6720 reqd_len_sb_in = req->req_len + req->resp_len;
6721 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6722 data->client.sb_virt,
6723 reqd_len_sb_in,
6724 ION_IOC_CLEAN_INV_CACHES);
6725 if (ret) {
6726 pr_err("cache operation failed %d\n", ret);
6727 return ret;
6728 }
6729
6730 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6731
6732 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6733 cmd_buf, cmd_len,
6734 &resp, sizeof(resp));
6735 if (ret) {
6736 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6737 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006738 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006739 }
6740
6741 if (qseecom.qsee_reentrancy_support) {
6742 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006743 if (ret)
6744 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006745 } else {
6746 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6747 ret = __qseecom_process_incomplete_cmd(data, &resp);
6748 if (ret) {
6749 pr_err("process_incomplete_cmd failed err: %d\n",
6750 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006751 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006752 }
6753 } else {
6754 if (resp.result != QSEOS_RESULT_SUCCESS) {
6755 pr_err("Response result %d not supported\n",
6756 resp.result);
6757 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006758 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006759 }
6760 }
6761 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006762exit:
6763 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006764 data->client.sb_virt, data->client.sb_length,
6765 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006766 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006767 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006768 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006769 }
6770
6771 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6772 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006773 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006774 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006775 if (ret2)
6776 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006777 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006778 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006779}
6780
6781static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6782 void __user *argp)
6783{
6784 struct qseecom_qteec_modfd_req req;
6785 int ret = 0;
6786
6787 ret = copy_from_user(&req, argp,
6788 sizeof(struct qseecom_qteec_modfd_req));
6789 if (ret) {
6790 pr_err("copy_from_user failed\n");
6791 return ret;
6792 }
6793 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6794 QSEOS_TEE_OPEN_SESSION);
6795
6796 return ret;
6797}
6798
6799static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6800 void __user *argp)
6801{
6802 struct qseecom_qteec_req req;
6803 int ret = 0;
6804
6805 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6806 if (ret) {
6807 pr_err("copy_from_user failed\n");
6808 return ret;
6809 }
6810 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6811 return ret;
6812}
6813
6814static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6815 void __user *argp)
6816{
6817 struct qseecom_qteec_modfd_req req;
6818 struct qseecom_command_scm_resp resp;
6819 struct qseecom_qteec_ireq ireq;
6820 struct qseecom_qteec_64bit_ireq ireq_64bit;
6821 struct qseecom_registered_app_list *ptr_app;
6822 bool found_app = false;
6823 unsigned long flags;
6824 int ret = 0;
6825 int i = 0;
6826 uint32_t reqd_len_sb_in = 0;
6827 void *cmd_buf = NULL;
6828 size_t cmd_len;
6829 struct sglist_info *table = data->sglistinfo_ptr;
6830 void *req_ptr = NULL;
6831 void *resp_ptr = NULL;
6832
6833 ret = copy_from_user(&req, argp,
6834 sizeof(struct qseecom_qteec_modfd_req));
6835 if (ret) {
6836 pr_err("copy_from_user failed\n");
6837 return ret;
6838 }
6839 ret = __qseecom_qteec_validate_msg(data,
6840 (struct qseecom_qteec_req *)(&req));
6841 if (ret)
6842 return ret;
6843 req_ptr = req.req_ptr;
6844 resp_ptr = req.resp_ptr;
6845
6846 /* find app_id & img_name from list */
6847 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6848 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6849 list) {
6850 if ((ptr_app->app_id == data->client.app_id) &&
6851 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6852 found_app = true;
6853 break;
6854 }
6855 }
6856 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6857 if (!found_app) {
6858 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6859 (char *)data->client.app_name);
6860 return -ENOENT;
6861 }
6862
6863 /* validate offsets */
6864 for (i = 0; i < MAX_ION_FD; i++) {
6865 if (req.ifd_data[i].fd) {
6866 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6867 return -EINVAL;
6868 }
6869 }
6870 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6871 (uintptr_t)req.req_ptr);
6872 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6873 (uintptr_t)req.resp_ptr);
6874 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6875 if (ret)
6876 return ret;
6877
6878 if (qseecom.qsee_version < QSEE_VERSION_40) {
6879 ireq.app_id = data->client.app_id;
6880 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6881 (uintptr_t)req_ptr);
6882 ireq.req_len = req.req_len;
6883 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6884 (uintptr_t)resp_ptr);
6885 ireq.resp_len = req.resp_len;
6886 cmd_buf = (void *)&ireq;
6887 cmd_len = sizeof(struct qseecom_qteec_ireq);
6888 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6889 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6890 dmac_flush_range((void *)table,
6891 (void *)table + SGLISTINFO_TABLE_SIZE);
6892 } else {
6893 ireq_64bit.app_id = data->client.app_id;
6894 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6895 (uintptr_t)req_ptr);
6896 ireq_64bit.req_len = req.req_len;
6897 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6898 (uintptr_t)resp_ptr);
6899 ireq_64bit.resp_len = req.resp_len;
6900 cmd_buf = (void *)&ireq_64bit;
6901 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6902 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6903 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6904 dmac_flush_range((void *)table,
6905 (void *)table + SGLISTINFO_TABLE_SIZE);
6906 }
6907 reqd_len_sb_in = req.req_len + req.resp_len;
6908 if (qseecom.whitelist_support == true)
6909 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6910 else
6911 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6912
6913 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6914 data->client.sb_virt,
6915 reqd_len_sb_in,
6916 ION_IOC_CLEAN_INV_CACHES);
6917 if (ret) {
6918 pr_err("cache operation failed %d\n", ret);
6919 return ret;
6920 }
6921
6922 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6923
6924 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6925 cmd_buf, cmd_len,
6926 &resp, sizeof(resp));
6927 if (ret) {
6928 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6929 ret, data->client.app_id);
6930 return ret;
6931 }
6932
6933 if (qseecom.qsee_reentrancy_support) {
6934 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6935 } else {
6936 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6937 ret = __qseecom_process_incomplete_cmd(data, &resp);
6938 if (ret) {
6939 pr_err("process_incomplete_cmd failed err: %d\n",
6940 ret);
6941 return ret;
6942 }
6943 } else {
6944 if (resp.result != QSEOS_RESULT_SUCCESS) {
6945 pr_err("Response result %d not supported\n",
6946 resp.result);
6947 ret = -EINVAL;
6948 }
6949 }
6950 }
6951 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6952 if (ret)
6953 return ret;
6954
6955 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6956 data->client.sb_virt, data->client.sb_length,
6957 ION_IOC_INV_CACHES);
6958 if (ret) {
6959 pr_err("cache operation failed %d\n", ret);
6960 return ret;
6961 }
6962 return 0;
6963}
6964
6965static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6966 void __user *argp)
6967{
6968 struct qseecom_qteec_modfd_req req;
6969 int ret = 0;
6970
6971 ret = copy_from_user(&req, argp,
6972 sizeof(struct qseecom_qteec_modfd_req));
6973 if (ret) {
6974 pr_err("copy_from_user failed\n");
6975 return ret;
6976 }
6977 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6978 QSEOS_TEE_REQUEST_CANCELLATION);
6979
6980 return ret;
6981}
6982
6983static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6984{
6985 if (data->sglist_cnt) {
6986 memset(data->sglistinfo_ptr, 0,
6987 SGLISTINFO_TABLE_SIZE);
6988 data->sglist_cnt = 0;
6989 }
6990}
6991
6992static inline long qseecom_ioctl(struct file *file,
6993 unsigned int cmd, unsigned long arg)
6994{
6995 int ret = 0;
6996 struct qseecom_dev_handle *data = file->private_data;
6997 void __user *argp = (void __user *) arg;
6998 bool perf_enabled = false;
6999
7000 if (!data) {
7001 pr_err("Invalid/uninitialized device handle\n");
7002 return -EINVAL;
7003 }
7004
7005 if (data->abort) {
7006 pr_err("Aborting qseecom driver\n");
7007 return -ENODEV;
7008 }
7009
7010 switch (cmd) {
7011 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7012 if (data->type != QSEECOM_GENERIC) {
7013 pr_err("reg lstnr req: invalid handle (%d)\n",
7014 data->type);
7015 ret = -EINVAL;
7016 break;
7017 }
7018 pr_debug("ioctl register_listener_req()\n");
7019 mutex_lock(&app_access_lock);
7020 atomic_inc(&data->ioctl_count);
7021 data->type = QSEECOM_LISTENER_SERVICE;
7022 ret = qseecom_register_listener(data, argp);
7023 atomic_dec(&data->ioctl_count);
7024 wake_up_all(&data->abort_wq);
7025 mutex_unlock(&app_access_lock);
7026 if (ret)
7027 pr_err("failed qseecom_register_listener: %d\n", ret);
7028 break;
7029 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307030 case QSEECOM_IOCTL_SET_ICE_INFO: {
7031 struct qseecom_ice_data_t ice_data;
7032
7033 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7034 if (ret) {
7035 pr_err("copy_from_user failed\n");
7036 return -EFAULT;
7037 }
7038 qcom_ice_set_fde_flag(ice_data.flag);
7039 break;
7040 }
7041
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007042 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7043 if ((data->listener.id == 0) ||
7044 (data->type != QSEECOM_LISTENER_SERVICE)) {
7045 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7046 data->type, data->listener.id);
7047 ret = -EINVAL;
7048 break;
7049 }
7050 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kong26e62742018-05-04 17:19:06 -07007051 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007052 mutex_lock(&app_access_lock);
7053 atomic_inc(&data->ioctl_count);
7054 ret = qseecom_unregister_listener(data);
7055 atomic_dec(&data->ioctl_count);
7056 wake_up_all(&data->abort_wq);
7057 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007058 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007059 if (ret)
7060 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7061 break;
7062 }
7063 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7064 if ((data->client.app_id == 0) ||
7065 (data->type != QSEECOM_CLIENT_APP)) {
7066 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7067 data->type, data->client.app_id);
7068 ret = -EINVAL;
7069 break;
7070 }
7071 /* Only one client allowed here at a time */
7072 mutex_lock(&app_access_lock);
7073 if (qseecom.support_bus_scaling) {
7074 /* register bus bw in case the client doesn't do it */
7075 if (!data->mode) {
7076 mutex_lock(&qsee_bw_mutex);
7077 __qseecom_register_bus_bandwidth_needs(
7078 data, HIGH);
7079 mutex_unlock(&qsee_bw_mutex);
7080 }
7081 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7082 if (ret) {
7083 pr_err("Failed to set bw.\n");
7084 ret = -EINVAL;
7085 mutex_unlock(&app_access_lock);
7086 break;
7087 }
7088 }
7089 /*
7090 * On targets where crypto clock is handled by HLOS,
7091 * if clk_access_cnt is zero and perf_enabled is false,
7092 * then the crypto clock was not enabled before sending cmd to
7093 * tz, qseecom will enable the clock to avoid service failure.
7094 */
7095 if (!qseecom.no_clock_support &&
7096 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7097 pr_debug("ce clock is not enabled!\n");
7098 ret = qseecom_perf_enable(data);
7099 if (ret) {
7100 pr_err("Failed to vote for clock with err %d\n",
7101 ret);
7102 mutex_unlock(&app_access_lock);
7103 ret = -EINVAL;
7104 break;
7105 }
7106 perf_enabled = true;
7107 }
7108 atomic_inc(&data->ioctl_count);
7109 ret = qseecom_send_cmd(data, argp);
7110 if (qseecom.support_bus_scaling)
7111 __qseecom_add_bw_scale_down_timer(
7112 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7113 if (perf_enabled) {
7114 qsee_disable_clock_vote(data, CLK_DFAB);
7115 qsee_disable_clock_vote(data, CLK_SFPB);
7116 }
7117 atomic_dec(&data->ioctl_count);
7118 wake_up_all(&data->abort_wq);
7119 mutex_unlock(&app_access_lock);
7120 if (ret)
7121 pr_err("failed qseecom_send_cmd: %d\n", ret);
7122 break;
7123 }
7124 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7125 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7126 if ((data->client.app_id == 0) ||
7127 (data->type != QSEECOM_CLIENT_APP)) {
7128 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7129 data->type, data->client.app_id);
7130 ret = -EINVAL;
7131 break;
7132 }
7133 /* Only one client allowed here at a time */
7134 mutex_lock(&app_access_lock);
7135 if (qseecom.support_bus_scaling) {
7136 if (!data->mode) {
7137 mutex_lock(&qsee_bw_mutex);
7138 __qseecom_register_bus_bandwidth_needs(
7139 data, HIGH);
7140 mutex_unlock(&qsee_bw_mutex);
7141 }
7142 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7143 if (ret) {
7144 pr_err("Failed to set bw.\n");
7145 mutex_unlock(&app_access_lock);
7146 ret = -EINVAL;
7147 break;
7148 }
7149 }
7150 /*
7151 * On targets where crypto clock is handled by HLOS,
7152 * if clk_access_cnt is zero and perf_enabled is false,
7153 * then the crypto clock was not enabled before sending cmd to
7154 * tz, qseecom will enable the clock to avoid service failure.
7155 */
7156 if (!qseecom.no_clock_support &&
7157 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7158 pr_debug("ce clock is not enabled!\n");
7159 ret = qseecom_perf_enable(data);
7160 if (ret) {
7161 pr_err("Failed to vote for clock with err %d\n",
7162 ret);
7163 mutex_unlock(&app_access_lock);
7164 ret = -EINVAL;
7165 break;
7166 }
7167 perf_enabled = true;
7168 }
7169 atomic_inc(&data->ioctl_count);
7170 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7171 ret = qseecom_send_modfd_cmd(data, argp);
7172 else
7173 ret = qseecom_send_modfd_cmd_64(data, argp);
7174 if (qseecom.support_bus_scaling)
7175 __qseecom_add_bw_scale_down_timer(
7176 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7177 if (perf_enabled) {
7178 qsee_disable_clock_vote(data, CLK_DFAB);
7179 qsee_disable_clock_vote(data, CLK_SFPB);
7180 }
7181 atomic_dec(&data->ioctl_count);
7182 wake_up_all(&data->abort_wq);
7183 mutex_unlock(&app_access_lock);
7184 if (ret)
7185 pr_err("failed qseecom_send_cmd: %d\n", ret);
7186 __qseecom_clean_data_sglistinfo(data);
7187 break;
7188 }
7189 case QSEECOM_IOCTL_RECEIVE_REQ: {
7190 if ((data->listener.id == 0) ||
7191 (data->type != QSEECOM_LISTENER_SERVICE)) {
7192 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7193 data->type, data->listener.id);
7194 ret = -EINVAL;
7195 break;
7196 }
7197 atomic_inc(&data->ioctl_count);
7198 ret = qseecom_receive_req(data);
7199 atomic_dec(&data->ioctl_count);
7200 wake_up_all(&data->abort_wq);
7201 if (ret && (ret != -ERESTARTSYS))
7202 pr_err("failed qseecom_receive_req: %d\n", ret);
7203 break;
7204 }
7205 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7206 if ((data->listener.id == 0) ||
7207 (data->type != QSEECOM_LISTENER_SERVICE)) {
7208 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7209 data->type, data->listener.id);
7210 ret = -EINVAL;
7211 break;
7212 }
7213 atomic_inc(&data->ioctl_count);
7214 if (!qseecom.qsee_reentrancy_support)
7215 ret = qseecom_send_resp();
7216 else
7217 ret = qseecom_reentrancy_send_resp(data);
7218 atomic_dec(&data->ioctl_count);
7219 wake_up_all(&data->abort_wq);
7220 if (ret)
7221 pr_err("failed qseecom_send_resp: %d\n", ret);
7222 break;
7223 }
7224 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7225 if ((data->type != QSEECOM_CLIENT_APP) &&
7226 (data->type != QSEECOM_GENERIC) &&
7227 (data->type != QSEECOM_SECURE_SERVICE)) {
7228 pr_err("set mem param req: invalid handle (%d)\n",
7229 data->type);
7230 ret = -EINVAL;
7231 break;
7232 }
7233 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7234 mutex_lock(&app_access_lock);
7235 atomic_inc(&data->ioctl_count);
7236 ret = qseecom_set_client_mem_param(data, argp);
7237 atomic_dec(&data->ioctl_count);
7238 mutex_unlock(&app_access_lock);
7239 if (ret)
7240 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7241 ret);
7242 break;
7243 }
7244 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7245 if ((data->type != QSEECOM_GENERIC) &&
7246 (data->type != QSEECOM_CLIENT_APP)) {
7247 pr_err("load app req: invalid handle (%d)\n",
7248 data->type);
7249 ret = -EINVAL;
7250 break;
7251 }
7252 data->type = QSEECOM_CLIENT_APP;
7253 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7254 mutex_lock(&app_access_lock);
7255 atomic_inc(&data->ioctl_count);
7256 ret = qseecom_load_app(data, argp);
7257 atomic_dec(&data->ioctl_count);
7258 mutex_unlock(&app_access_lock);
7259 if (ret)
7260 pr_err("failed load_app request: %d\n", ret);
7261 break;
7262 }
7263 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7264 if ((data->client.app_id == 0) ||
7265 (data->type != QSEECOM_CLIENT_APP)) {
7266 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7267 data->type, data->client.app_id);
7268 ret = -EINVAL;
7269 break;
7270 }
7271 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7272 mutex_lock(&app_access_lock);
7273 atomic_inc(&data->ioctl_count);
7274 ret = qseecom_unload_app(data, false);
7275 atomic_dec(&data->ioctl_count);
7276 mutex_unlock(&app_access_lock);
7277 if (ret)
7278 pr_err("failed unload_app request: %d\n", ret);
7279 break;
7280 }
7281 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7282 atomic_inc(&data->ioctl_count);
7283 ret = qseecom_get_qseos_version(data, argp);
7284 if (ret)
7285 pr_err("qseecom_get_qseos_version: %d\n", ret);
7286 atomic_dec(&data->ioctl_count);
7287 break;
7288 }
7289 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7290 if ((data->type != QSEECOM_GENERIC) &&
7291 (data->type != QSEECOM_CLIENT_APP)) {
7292 pr_err("perf enable req: invalid handle (%d)\n",
7293 data->type);
7294 ret = -EINVAL;
7295 break;
7296 }
7297 if ((data->type == QSEECOM_CLIENT_APP) &&
7298 (data->client.app_id == 0)) {
7299 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7300 data->type, data->client.app_id);
7301 ret = -EINVAL;
7302 break;
7303 }
7304 atomic_inc(&data->ioctl_count);
7305 if (qseecom.support_bus_scaling) {
7306 mutex_lock(&qsee_bw_mutex);
7307 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7308 mutex_unlock(&qsee_bw_mutex);
7309 } else {
7310 ret = qseecom_perf_enable(data);
7311 if (ret)
7312 pr_err("Fail to vote for clocks %d\n", ret);
7313 }
7314 atomic_dec(&data->ioctl_count);
7315 break;
7316 }
7317 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7318 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7319 (data->type != QSEECOM_CLIENT_APP)) {
7320 pr_err("perf disable req: invalid handle (%d)\n",
7321 data->type);
7322 ret = -EINVAL;
7323 break;
7324 }
7325 if ((data->type == QSEECOM_CLIENT_APP) &&
7326 (data->client.app_id == 0)) {
7327 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7328 data->type, data->client.app_id);
7329 ret = -EINVAL;
7330 break;
7331 }
7332 atomic_inc(&data->ioctl_count);
7333 if (!qseecom.support_bus_scaling) {
7334 qsee_disable_clock_vote(data, CLK_DFAB);
7335 qsee_disable_clock_vote(data, CLK_SFPB);
7336 } else {
7337 mutex_lock(&qsee_bw_mutex);
7338 qseecom_unregister_bus_bandwidth_needs(data);
7339 mutex_unlock(&qsee_bw_mutex);
7340 }
7341 atomic_dec(&data->ioctl_count);
7342 break;
7343 }
7344
7345 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7346 /* If crypto clock is not handled by HLOS, return directly. */
7347 if (qseecom.no_clock_support) {
7348 pr_debug("crypto clock is not handled by HLOS\n");
7349 break;
7350 }
7351 if ((data->client.app_id == 0) ||
7352 (data->type != QSEECOM_CLIENT_APP)) {
7353 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7354 data->type, data->client.app_id);
7355 ret = -EINVAL;
7356 break;
7357 }
7358 atomic_inc(&data->ioctl_count);
7359 ret = qseecom_scale_bus_bandwidth(data, argp);
7360 atomic_dec(&data->ioctl_count);
7361 break;
7362 }
7363 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7364 if (data->type != QSEECOM_GENERIC) {
7365 pr_err("load ext elf req: invalid client handle (%d)\n",
7366 data->type);
7367 ret = -EINVAL;
7368 break;
7369 }
7370 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7371 data->released = true;
7372 mutex_lock(&app_access_lock);
7373 atomic_inc(&data->ioctl_count);
7374 ret = qseecom_load_external_elf(data, argp);
7375 atomic_dec(&data->ioctl_count);
7376 mutex_unlock(&app_access_lock);
7377 if (ret)
7378 pr_err("failed load_external_elf request: %d\n", ret);
7379 break;
7380 }
7381 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7382 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7383 pr_err("unload ext elf req: invalid handle (%d)\n",
7384 data->type);
7385 ret = -EINVAL;
7386 break;
7387 }
7388 data->released = true;
7389 mutex_lock(&app_access_lock);
7390 atomic_inc(&data->ioctl_count);
7391 ret = qseecom_unload_external_elf(data);
7392 atomic_dec(&data->ioctl_count);
7393 mutex_unlock(&app_access_lock);
7394 if (ret)
7395 pr_err("failed unload_app request: %d\n", ret);
7396 break;
7397 }
7398 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7399 data->type = QSEECOM_CLIENT_APP;
7400 mutex_lock(&app_access_lock);
7401 atomic_inc(&data->ioctl_count);
7402 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7403 ret = qseecom_query_app_loaded(data, argp);
7404 atomic_dec(&data->ioctl_count);
7405 mutex_unlock(&app_access_lock);
7406 break;
7407 }
7408 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7409 if (data->type != QSEECOM_GENERIC) {
7410 pr_err("send cmd svc req: invalid handle (%d)\n",
7411 data->type);
7412 ret = -EINVAL;
7413 break;
7414 }
7415 data->type = QSEECOM_SECURE_SERVICE;
7416 if (qseecom.qsee_version < QSEE_VERSION_03) {
7417 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7418 qseecom.qsee_version);
7419 return -EINVAL;
7420 }
7421 mutex_lock(&app_access_lock);
7422 atomic_inc(&data->ioctl_count);
7423 ret = qseecom_send_service_cmd(data, argp);
7424 atomic_dec(&data->ioctl_count);
7425 mutex_unlock(&app_access_lock);
7426 break;
7427 }
7428 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7429 if (!(qseecom.support_pfe || qseecom.support_fde))
7430 pr_err("Features requiring key init not supported\n");
7431 if (data->type != QSEECOM_GENERIC) {
7432 pr_err("create key req: invalid handle (%d)\n",
7433 data->type);
7434 ret = -EINVAL;
7435 break;
7436 }
7437 if (qseecom.qsee_version < QSEE_VERSION_05) {
7438 pr_err("Create Key feature unsupported: qsee ver %u\n",
7439 qseecom.qsee_version);
7440 return -EINVAL;
7441 }
7442 data->released = true;
7443 mutex_lock(&app_access_lock);
7444 atomic_inc(&data->ioctl_count);
7445 ret = qseecom_create_key(data, argp);
7446 if (ret)
7447 pr_err("failed to create encryption key: %d\n", ret);
7448
7449 atomic_dec(&data->ioctl_count);
7450 mutex_unlock(&app_access_lock);
7451 break;
7452 }
7453 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7454 if (!(qseecom.support_pfe || qseecom.support_fde))
7455 pr_err("Features requiring key init not supported\n");
7456 if (data->type != QSEECOM_GENERIC) {
7457 pr_err("wipe key req: invalid handle (%d)\n",
7458 data->type);
7459 ret = -EINVAL;
7460 break;
7461 }
7462 if (qseecom.qsee_version < QSEE_VERSION_05) {
7463 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7464 qseecom.qsee_version);
7465 return -EINVAL;
7466 }
7467 data->released = true;
7468 mutex_lock(&app_access_lock);
7469 atomic_inc(&data->ioctl_count);
7470 ret = qseecom_wipe_key(data, argp);
7471 if (ret)
7472 pr_err("failed to wipe encryption key: %d\n", ret);
7473 atomic_dec(&data->ioctl_count);
7474 mutex_unlock(&app_access_lock);
7475 break;
7476 }
7477 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7478 if (!(qseecom.support_pfe || qseecom.support_fde))
7479 pr_err("Features requiring key init not supported\n");
7480 if (data->type != QSEECOM_GENERIC) {
7481 pr_err("update key req: invalid handle (%d)\n",
7482 data->type);
7483 ret = -EINVAL;
7484 break;
7485 }
7486 if (qseecom.qsee_version < QSEE_VERSION_05) {
7487 pr_err("Update Key feature unsupported in qsee ver %u\n",
7488 qseecom.qsee_version);
7489 return -EINVAL;
7490 }
7491 data->released = true;
7492 mutex_lock(&app_access_lock);
7493 atomic_inc(&data->ioctl_count);
7494 ret = qseecom_update_key_user_info(data, argp);
7495 if (ret)
7496 pr_err("failed to update key user info: %d\n", ret);
7497 atomic_dec(&data->ioctl_count);
7498 mutex_unlock(&app_access_lock);
7499 break;
7500 }
7501 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7502 if (data->type != QSEECOM_GENERIC) {
7503 pr_err("save part hash req: invalid handle (%d)\n",
7504 data->type);
7505 ret = -EINVAL;
7506 break;
7507 }
7508 data->released = true;
7509 mutex_lock(&app_access_lock);
7510 atomic_inc(&data->ioctl_count);
7511 ret = qseecom_save_partition_hash(argp);
7512 atomic_dec(&data->ioctl_count);
7513 mutex_unlock(&app_access_lock);
7514 break;
7515 }
7516 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7517 if (data->type != QSEECOM_GENERIC) {
7518 pr_err("ES activated req: invalid handle (%d)\n",
7519 data->type);
7520 ret = -EINVAL;
7521 break;
7522 }
7523 data->released = true;
7524 mutex_lock(&app_access_lock);
7525 atomic_inc(&data->ioctl_count);
7526 ret = qseecom_is_es_activated(argp);
7527 atomic_dec(&data->ioctl_count);
7528 mutex_unlock(&app_access_lock);
7529 break;
7530 }
7531 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7532 if (data->type != QSEECOM_GENERIC) {
7533 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7534 data->type);
7535 ret = -EINVAL;
7536 break;
7537 }
7538 data->released = true;
7539 mutex_lock(&app_access_lock);
7540 atomic_inc(&data->ioctl_count);
7541 ret = qseecom_mdtp_cipher_dip(argp);
7542 atomic_dec(&data->ioctl_count);
7543 mutex_unlock(&app_access_lock);
7544 break;
7545 }
7546 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7547 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7548 if ((data->listener.id == 0) ||
7549 (data->type != QSEECOM_LISTENER_SERVICE)) {
7550 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7551 data->type, data->listener.id);
7552 ret = -EINVAL;
7553 break;
7554 }
7555 atomic_inc(&data->ioctl_count);
7556 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7557 ret = qseecom_send_modfd_resp(data, argp);
7558 else
7559 ret = qseecom_send_modfd_resp_64(data, argp);
7560 atomic_dec(&data->ioctl_count);
7561 wake_up_all(&data->abort_wq);
7562 if (ret)
7563 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7564 __qseecom_clean_data_sglistinfo(data);
7565 break;
7566 }
7567 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7568 if ((data->client.app_id == 0) ||
7569 (data->type != QSEECOM_CLIENT_APP)) {
7570 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7571 data->type, data->client.app_id);
7572 ret = -EINVAL;
7573 break;
7574 }
7575 if (qseecom.qsee_version < QSEE_VERSION_40) {
7576 pr_err("GP feature unsupported: qsee ver %u\n",
7577 qseecom.qsee_version);
7578 return -EINVAL;
7579 }
7580 /* Only one client allowed here at a time */
7581 mutex_lock(&app_access_lock);
7582 atomic_inc(&data->ioctl_count);
7583 ret = qseecom_qteec_open_session(data, argp);
7584 atomic_dec(&data->ioctl_count);
7585 wake_up_all(&data->abort_wq);
7586 mutex_unlock(&app_access_lock);
7587 if (ret)
7588 pr_err("failed open_session_cmd: %d\n", ret);
7589 __qseecom_clean_data_sglistinfo(data);
7590 break;
7591 }
7592 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7593 if ((data->client.app_id == 0) ||
7594 (data->type != QSEECOM_CLIENT_APP)) {
7595 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7596 data->type, data->client.app_id);
7597 ret = -EINVAL;
7598 break;
7599 }
7600 if (qseecom.qsee_version < QSEE_VERSION_40) {
7601 pr_err("GP feature unsupported: qsee ver %u\n",
7602 qseecom.qsee_version);
7603 return -EINVAL;
7604 }
7605 /* Only one client allowed here at a time */
7606 mutex_lock(&app_access_lock);
7607 atomic_inc(&data->ioctl_count);
7608 ret = qseecom_qteec_close_session(data, argp);
7609 atomic_dec(&data->ioctl_count);
7610 wake_up_all(&data->abort_wq);
7611 mutex_unlock(&app_access_lock);
7612 if (ret)
7613 pr_err("failed close_session_cmd: %d\n", ret);
7614 break;
7615 }
7616 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7617 if ((data->client.app_id == 0) ||
7618 (data->type != QSEECOM_CLIENT_APP)) {
7619 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7620 data->type, data->client.app_id);
7621 ret = -EINVAL;
7622 break;
7623 }
7624 if (qseecom.qsee_version < QSEE_VERSION_40) {
7625 pr_err("GP feature unsupported: qsee ver %u\n",
7626 qseecom.qsee_version);
7627 return -EINVAL;
7628 }
7629 /* Only one client allowed here at a time */
7630 mutex_lock(&app_access_lock);
7631 atomic_inc(&data->ioctl_count);
7632 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7633 atomic_dec(&data->ioctl_count);
7634 wake_up_all(&data->abort_wq);
7635 mutex_unlock(&app_access_lock);
7636 if (ret)
7637 pr_err("failed Invoke cmd: %d\n", ret);
7638 __qseecom_clean_data_sglistinfo(data);
7639 break;
7640 }
7641 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7642 if ((data->client.app_id == 0) ||
7643 (data->type != QSEECOM_CLIENT_APP)) {
7644 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7645 data->type, data->client.app_id);
7646 ret = -EINVAL;
7647 break;
7648 }
7649 if (qseecom.qsee_version < QSEE_VERSION_40) {
7650 pr_err("GP feature unsupported: qsee ver %u\n",
7651 qseecom.qsee_version);
7652 return -EINVAL;
7653 }
7654 /* Only one client allowed here at a time */
7655 mutex_lock(&app_access_lock);
7656 atomic_inc(&data->ioctl_count);
7657 ret = qseecom_qteec_request_cancellation(data, argp);
7658 atomic_dec(&data->ioctl_count);
7659 wake_up_all(&data->abort_wq);
7660 mutex_unlock(&app_access_lock);
7661 if (ret)
7662 pr_err("failed request_cancellation: %d\n", ret);
7663 break;
7664 }
7665 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7666 atomic_inc(&data->ioctl_count);
7667 ret = qseecom_get_ce_info(data, argp);
7668 if (ret)
7669 pr_err("failed get fde ce pipe info: %d\n", ret);
7670 atomic_dec(&data->ioctl_count);
7671 break;
7672 }
7673 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7674 atomic_inc(&data->ioctl_count);
7675 ret = qseecom_free_ce_info(data, argp);
7676 if (ret)
7677 pr_err("failed get fde ce pipe info: %d\n", ret);
7678 atomic_dec(&data->ioctl_count);
7679 break;
7680 }
7681 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7682 atomic_inc(&data->ioctl_count);
7683 ret = qseecom_query_ce_info(data, argp);
7684 if (ret)
7685 pr_err("failed get fde ce pipe info: %d\n", ret);
7686 atomic_dec(&data->ioctl_count);
7687 break;
7688 }
7689 default:
7690 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7691 return -EINVAL;
7692 }
7693 return ret;
7694}
7695
7696static int qseecom_open(struct inode *inode, struct file *file)
7697{
7698 int ret = 0;
7699 struct qseecom_dev_handle *data;
7700
7701 data = kzalloc(sizeof(*data), GFP_KERNEL);
7702 if (!data)
7703 return -ENOMEM;
7704 file->private_data = data;
7705 data->abort = 0;
7706 data->type = QSEECOM_GENERIC;
7707 data->released = false;
7708 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7709 data->mode = INACTIVE;
7710 init_waitqueue_head(&data->abort_wq);
7711 atomic_set(&data->ioctl_count, 0);
7712 return ret;
7713}
7714
7715static int qseecom_release(struct inode *inode, struct file *file)
7716{
7717 struct qseecom_dev_handle *data = file->private_data;
7718 int ret = 0;
7719
7720 if (data->released == false) {
7721 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7722 data->type, data->mode, data);
7723 switch (data->type) {
7724 case QSEECOM_LISTENER_SERVICE:
Zhen Kong25731112018-09-20 13:10:03 -07007725 pr_warn("release lsnr svc %d\n", data->listener.id);
Zhen Kong26e62742018-05-04 17:19:06 -07007726 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007727 mutex_lock(&app_access_lock);
7728 ret = qseecom_unregister_listener(data);
7729 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007730 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007731 break;
7732 case QSEECOM_CLIENT_APP:
7733 mutex_lock(&app_access_lock);
7734 ret = qseecom_unload_app(data, true);
7735 mutex_unlock(&app_access_lock);
7736 break;
7737 case QSEECOM_SECURE_SERVICE:
7738 case QSEECOM_GENERIC:
7739 ret = qseecom_unmap_ion_allocated_memory(data);
7740 if (ret)
7741 pr_err("Ion Unmap failed\n");
7742 break;
7743 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7744 break;
7745 default:
7746 pr_err("Unsupported clnt_handle_type %d",
7747 data->type);
7748 break;
7749 }
7750 }
7751
7752 if (qseecom.support_bus_scaling) {
7753 mutex_lock(&qsee_bw_mutex);
7754 if (data->mode != INACTIVE) {
7755 qseecom_unregister_bus_bandwidth_needs(data);
7756 if (qseecom.cumulative_mode == INACTIVE) {
7757 ret = __qseecom_set_msm_bus_request(INACTIVE);
7758 if (ret)
7759 pr_err("Fail to scale down bus\n");
7760 }
7761 }
7762 mutex_unlock(&qsee_bw_mutex);
7763 } else {
7764 if (data->fast_load_enabled == true)
7765 qsee_disable_clock_vote(data, CLK_SFPB);
7766 if (data->perf_enabled == true)
7767 qsee_disable_clock_vote(data, CLK_DFAB);
7768 }
7769 kfree(data);
7770
7771 return ret;
7772}
7773
7774#ifdef CONFIG_COMPAT
7775#include "compat_qseecom.c"
7776#else
7777#define compat_qseecom_ioctl NULL
7778#endif
7779
7780static const struct file_operations qseecom_fops = {
7781 .owner = THIS_MODULE,
7782 .unlocked_ioctl = qseecom_ioctl,
7783 .compat_ioctl = compat_qseecom_ioctl,
7784 .open = qseecom_open,
7785 .release = qseecom_release
7786};
7787
7788static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7789{
7790 int rc = 0;
7791 struct device *pdev;
7792 struct qseecom_clk *qclk;
7793 char *core_clk_src = NULL;
7794 char *core_clk = NULL;
7795 char *iface_clk = NULL;
7796 char *bus_clk = NULL;
7797
7798 switch (ce) {
7799 case CLK_QSEE: {
7800 core_clk_src = "core_clk_src";
7801 core_clk = "core_clk";
7802 iface_clk = "iface_clk";
7803 bus_clk = "bus_clk";
7804 qclk = &qseecom.qsee;
7805 qclk->instance = CLK_QSEE;
7806 break;
7807 };
7808 case CLK_CE_DRV: {
7809 core_clk_src = "ce_drv_core_clk_src";
7810 core_clk = "ce_drv_core_clk";
7811 iface_clk = "ce_drv_iface_clk";
7812 bus_clk = "ce_drv_bus_clk";
7813 qclk = &qseecom.ce_drv;
7814 qclk->instance = CLK_CE_DRV;
7815 break;
7816 };
7817 default:
7818 pr_err("Invalid ce hw instance: %d!\n", ce);
7819 return -EIO;
7820 }
7821
7822 if (qseecom.no_clock_support) {
7823 qclk->ce_core_clk = NULL;
7824 qclk->ce_clk = NULL;
7825 qclk->ce_bus_clk = NULL;
7826 qclk->ce_core_src_clk = NULL;
7827 return 0;
7828 }
7829
7830 pdev = qseecom.pdev;
7831
7832 /* Get CE3 src core clk. */
7833 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7834 if (!IS_ERR(qclk->ce_core_src_clk)) {
7835 rc = clk_set_rate(qclk->ce_core_src_clk,
7836 qseecom.ce_opp_freq_hz);
7837 if (rc) {
7838 clk_put(qclk->ce_core_src_clk);
7839 qclk->ce_core_src_clk = NULL;
7840 pr_err("Unable to set the core src clk @%uMhz.\n",
7841 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7842 return -EIO;
7843 }
7844 } else {
7845 pr_warn("Unable to get CE core src clk, set to NULL\n");
7846 qclk->ce_core_src_clk = NULL;
7847 }
7848
7849 /* Get CE core clk */
7850 qclk->ce_core_clk = clk_get(pdev, core_clk);
7851 if (IS_ERR(qclk->ce_core_clk)) {
7852 rc = PTR_ERR(qclk->ce_core_clk);
7853 pr_err("Unable to get CE core clk\n");
7854 if (qclk->ce_core_src_clk != NULL)
7855 clk_put(qclk->ce_core_src_clk);
7856 return -EIO;
7857 }
7858
7859 /* Get CE Interface clk */
7860 qclk->ce_clk = clk_get(pdev, iface_clk);
7861 if (IS_ERR(qclk->ce_clk)) {
7862 rc = PTR_ERR(qclk->ce_clk);
7863 pr_err("Unable to get CE interface clk\n");
7864 if (qclk->ce_core_src_clk != NULL)
7865 clk_put(qclk->ce_core_src_clk);
7866 clk_put(qclk->ce_core_clk);
7867 return -EIO;
7868 }
7869
7870 /* Get CE AXI clk */
7871 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7872 if (IS_ERR(qclk->ce_bus_clk)) {
7873 rc = PTR_ERR(qclk->ce_bus_clk);
7874 pr_err("Unable to get CE BUS interface clk\n");
7875 if (qclk->ce_core_src_clk != NULL)
7876 clk_put(qclk->ce_core_src_clk);
7877 clk_put(qclk->ce_core_clk);
7878 clk_put(qclk->ce_clk);
7879 return -EIO;
7880 }
7881
7882 return rc;
7883}
7884
7885static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7886{
7887 struct qseecom_clk *qclk;
7888
7889 if (ce == CLK_QSEE)
7890 qclk = &qseecom.qsee;
7891 else
7892 qclk = &qseecom.ce_drv;
7893
7894 if (qclk->ce_clk != NULL) {
7895 clk_put(qclk->ce_clk);
7896 qclk->ce_clk = NULL;
7897 }
7898 if (qclk->ce_core_clk != NULL) {
7899 clk_put(qclk->ce_core_clk);
7900 qclk->ce_core_clk = NULL;
7901 }
7902 if (qclk->ce_bus_clk != NULL) {
7903 clk_put(qclk->ce_bus_clk);
7904 qclk->ce_bus_clk = NULL;
7905 }
7906 if (qclk->ce_core_src_clk != NULL) {
7907 clk_put(qclk->ce_core_src_clk);
7908 qclk->ce_core_src_clk = NULL;
7909 }
7910 qclk->instance = CLK_INVALID;
7911}
7912
7913static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7914{
7915 int rc = 0;
7916 uint32_t hlos_num_ce_hw_instances;
7917 uint32_t disk_encrypt_pipe;
7918 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007919 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007920 int i;
7921 const int *tbl;
7922 int size;
7923 int entry;
7924 struct qseecom_crypto_info *pfde_tbl = NULL;
7925 struct qseecom_crypto_info *p;
7926 int tbl_size;
7927 int j;
7928 bool old_db = true;
7929 struct qseecom_ce_info_use *pce_info_use;
7930 uint32_t *unit_tbl = NULL;
7931 int total_units = 0;
7932 struct qseecom_ce_pipe_entry *pce_entry;
7933
7934 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7935 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7936
7937 if (of_property_read_u32((&pdev->dev)->of_node,
7938 "qcom,qsee-ce-hw-instance",
7939 &qseecom.ce_info.qsee_ce_hw_instance)) {
7940 pr_err("Fail to get qsee ce hw instance information.\n");
7941 rc = -EINVAL;
7942 goto out;
7943 } else {
7944 pr_debug("qsee-ce-hw-instance=0x%x\n",
7945 qseecom.ce_info.qsee_ce_hw_instance);
7946 }
7947
7948 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7949 "qcom,support-fde");
7950 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7951 "qcom,support-pfe");
7952
7953 if (!qseecom.support_pfe && !qseecom.support_fde) {
7954 pr_warn("Device does not support PFE/FDE");
7955 goto out;
7956 }
7957
7958 if (qseecom.support_fde)
7959 tbl = of_get_property((&pdev->dev)->of_node,
7960 "qcom,full-disk-encrypt-info", &size);
7961 else
7962 tbl = NULL;
7963 if (tbl) {
7964 old_db = false;
7965 if (size % sizeof(struct qseecom_crypto_info)) {
7966 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7967 size);
7968 rc = -EINVAL;
7969 goto out;
7970 }
7971 tbl_size = size / sizeof
7972 (struct qseecom_crypto_info);
7973
7974 pfde_tbl = kzalloc(size, GFP_KERNEL);
7975 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7976 total_units = 0;
7977
7978 if (!pfde_tbl || !unit_tbl) {
7979 pr_err("failed to alloc memory\n");
7980 rc = -ENOMEM;
7981 goto out;
7982 }
7983 if (of_property_read_u32_array((&pdev->dev)->of_node,
7984 "qcom,full-disk-encrypt-info",
7985 (u32 *)pfde_tbl, size/sizeof(u32))) {
7986 pr_err("failed to read full-disk-encrypt-info tbl\n");
7987 rc = -EINVAL;
7988 goto out;
7989 }
7990
7991 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7992 for (j = 0; j < total_units; j++) {
7993 if (p->unit_num == *(unit_tbl + j))
7994 break;
7995 }
7996 if (j == total_units) {
7997 *(unit_tbl + total_units) = p->unit_num;
7998 total_units++;
7999 }
8000 }
8001
8002 qseecom.ce_info.num_fde = total_units;
8003 pce_info_use = qseecom.ce_info.fde = kcalloc(
8004 total_units, sizeof(struct qseecom_ce_info_use),
8005 GFP_KERNEL);
8006 if (!pce_info_use) {
8007 pr_err("failed to alloc memory\n");
8008 rc = -ENOMEM;
8009 goto out;
8010 }
8011
8012 for (j = 0; j < total_units; j++, pce_info_use++) {
8013 pce_info_use->unit_num = *(unit_tbl + j);
8014 pce_info_use->alloc = false;
8015 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8016 pce_info_use->num_ce_pipe_entries = 0;
8017 pce_info_use->ce_pipe_entry = NULL;
8018 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8019 if (p->unit_num == pce_info_use->unit_num)
8020 pce_info_use->num_ce_pipe_entries++;
8021 }
8022
8023 entry = pce_info_use->num_ce_pipe_entries;
8024 pce_entry = pce_info_use->ce_pipe_entry =
8025 kcalloc(entry,
8026 sizeof(struct qseecom_ce_pipe_entry),
8027 GFP_KERNEL);
8028 if (pce_entry == NULL) {
8029 pr_err("failed to alloc memory\n");
8030 rc = -ENOMEM;
8031 goto out;
8032 }
8033
8034 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8035 if (p->unit_num == pce_info_use->unit_num) {
8036 pce_entry->ce_num = p->ce;
8037 pce_entry->ce_pipe_pair =
8038 p->pipe_pair;
8039 pce_entry->valid = true;
8040 pce_entry++;
8041 }
8042 }
8043 }
8044 kfree(unit_tbl);
8045 unit_tbl = NULL;
8046 kfree(pfde_tbl);
8047 pfde_tbl = NULL;
8048 }
8049
8050 if (qseecom.support_pfe)
8051 tbl = of_get_property((&pdev->dev)->of_node,
8052 "qcom,per-file-encrypt-info", &size);
8053 else
8054 tbl = NULL;
8055 if (tbl) {
8056 old_db = false;
8057 if (size % sizeof(struct qseecom_crypto_info)) {
8058 pr_err("per-file-encrypt-info tbl size(%d)\n",
8059 size);
8060 rc = -EINVAL;
8061 goto out;
8062 }
8063 tbl_size = size / sizeof
8064 (struct qseecom_crypto_info);
8065
8066 pfde_tbl = kzalloc(size, GFP_KERNEL);
8067 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8068 total_units = 0;
8069 if (!pfde_tbl || !unit_tbl) {
8070 pr_err("failed to alloc memory\n");
8071 rc = -ENOMEM;
8072 goto out;
8073 }
8074 if (of_property_read_u32_array((&pdev->dev)->of_node,
8075 "qcom,per-file-encrypt-info",
8076 (u32 *)pfde_tbl, size/sizeof(u32))) {
8077 pr_err("failed to read per-file-encrypt-info tbl\n");
8078 rc = -EINVAL;
8079 goto out;
8080 }
8081
8082 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8083 for (j = 0; j < total_units; j++) {
8084 if (p->unit_num == *(unit_tbl + j))
8085 break;
8086 }
8087 if (j == total_units) {
8088 *(unit_tbl + total_units) = p->unit_num;
8089 total_units++;
8090 }
8091 }
8092
8093 qseecom.ce_info.num_pfe = total_units;
8094 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8095 total_units, sizeof(struct qseecom_ce_info_use),
8096 GFP_KERNEL);
8097 if (!pce_info_use) {
8098 pr_err("failed to alloc memory\n");
8099 rc = -ENOMEM;
8100 goto out;
8101 }
8102
8103 for (j = 0; j < total_units; j++, pce_info_use++) {
8104 pce_info_use->unit_num = *(unit_tbl + j);
8105 pce_info_use->alloc = false;
8106 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8107 pce_info_use->num_ce_pipe_entries = 0;
8108 pce_info_use->ce_pipe_entry = NULL;
8109 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8110 if (p->unit_num == pce_info_use->unit_num)
8111 pce_info_use->num_ce_pipe_entries++;
8112 }
8113
8114 entry = pce_info_use->num_ce_pipe_entries;
8115 pce_entry = pce_info_use->ce_pipe_entry =
8116 kcalloc(entry,
8117 sizeof(struct qseecom_ce_pipe_entry),
8118 GFP_KERNEL);
8119 if (pce_entry == NULL) {
8120 pr_err("failed to alloc memory\n");
8121 rc = -ENOMEM;
8122 goto out;
8123 }
8124
8125 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8126 if (p->unit_num == pce_info_use->unit_num) {
8127 pce_entry->ce_num = p->ce;
8128 pce_entry->ce_pipe_pair =
8129 p->pipe_pair;
8130 pce_entry->valid = true;
8131 pce_entry++;
8132 }
8133 }
8134 }
8135 kfree(unit_tbl);
8136 unit_tbl = NULL;
8137 kfree(pfde_tbl);
8138 pfde_tbl = NULL;
8139 }
8140
8141 if (!old_db)
8142 goto out1;
8143
8144 if (of_property_read_bool((&pdev->dev)->of_node,
8145 "qcom,support-multiple-ce-hw-instance")) {
8146 if (of_property_read_u32((&pdev->dev)->of_node,
8147 "qcom,hlos-num-ce-hw-instances",
8148 &hlos_num_ce_hw_instances)) {
8149 pr_err("Fail: get hlos number of ce hw instance\n");
8150 rc = -EINVAL;
8151 goto out;
8152 }
8153 } else {
8154 hlos_num_ce_hw_instances = 1;
8155 }
8156
8157 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8158 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8159 MAX_CE_PIPE_PAIR_PER_UNIT);
8160 rc = -EINVAL;
8161 goto out;
8162 }
8163
8164 if (of_property_read_u32_array((&pdev->dev)->of_node,
8165 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8166 hlos_num_ce_hw_instances)) {
8167 pr_err("Fail: get hlos ce hw instance info\n");
8168 rc = -EINVAL;
8169 goto out;
8170 }
8171
8172 if (qseecom.support_fde) {
8173 pce_info_use = qseecom.ce_info.fde =
8174 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8175 if (!pce_info_use) {
8176 pr_err("failed to alloc memory\n");
8177 rc = -ENOMEM;
8178 goto out;
8179 }
8180 /* by default for old db */
8181 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8182 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8183 pce_info_use->alloc = false;
8184 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8185 pce_info_use->ce_pipe_entry = NULL;
8186 if (of_property_read_u32((&pdev->dev)->of_node,
8187 "qcom,disk-encrypt-pipe-pair",
8188 &disk_encrypt_pipe)) {
8189 pr_err("Fail to get FDE pipe information.\n");
8190 rc = -EINVAL;
8191 goto out;
8192 } else {
8193 pr_debug("disk-encrypt-pipe-pair=0x%x",
8194 disk_encrypt_pipe);
8195 }
8196 entry = pce_info_use->num_ce_pipe_entries =
8197 hlos_num_ce_hw_instances;
8198 pce_entry = pce_info_use->ce_pipe_entry =
8199 kcalloc(entry,
8200 sizeof(struct qseecom_ce_pipe_entry),
8201 GFP_KERNEL);
8202 if (pce_entry == NULL) {
8203 pr_err("failed to alloc memory\n");
8204 rc = -ENOMEM;
8205 goto out;
8206 }
8207 for (i = 0; i < entry; i++) {
8208 pce_entry->ce_num = hlos_ce_hw_instance[i];
8209 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8210 pce_entry->valid = 1;
8211 pce_entry++;
8212 }
8213 } else {
8214 pr_warn("Device does not support FDE");
8215 disk_encrypt_pipe = 0xff;
8216 }
8217 if (qseecom.support_pfe) {
8218 pce_info_use = qseecom.ce_info.pfe =
8219 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8220 if (!pce_info_use) {
8221 pr_err("failed to alloc memory\n");
8222 rc = -ENOMEM;
8223 goto out;
8224 }
8225 /* by default for old db */
8226 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8227 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8228 pce_info_use->alloc = false;
8229 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8230 pce_info_use->ce_pipe_entry = NULL;
8231
8232 if (of_property_read_u32((&pdev->dev)->of_node,
8233 "qcom,file-encrypt-pipe-pair",
8234 &file_encrypt_pipe)) {
8235 pr_err("Fail to get PFE pipe information.\n");
8236 rc = -EINVAL;
8237 goto out;
8238 } else {
8239 pr_debug("file-encrypt-pipe-pair=0x%x",
8240 file_encrypt_pipe);
8241 }
8242 entry = pce_info_use->num_ce_pipe_entries =
8243 hlos_num_ce_hw_instances;
8244 pce_entry = pce_info_use->ce_pipe_entry =
8245 kcalloc(entry,
8246 sizeof(struct qseecom_ce_pipe_entry),
8247 GFP_KERNEL);
8248 if (pce_entry == NULL) {
8249 pr_err("failed to alloc memory\n");
8250 rc = -ENOMEM;
8251 goto out;
8252 }
8253 for (i = 0; i < entry; i++) {
8254 pce_entry->ce_num = hlos_ce_hw_instance[i];
8255 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8256 pce_entry->valid = 1;
8257 pce_entry++;
8258 }
8259 } else {
8260 pr_warn("Device does not support PFE");
8261 file_encrypt_pipe = 0xff;
8262 }
8263
8264out1:
8265 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8266 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8267out:
8268 if (rc) {
8269 if (qseecom.ce_info.fde) {
8270 pce_info_use = qseecom.ce_info.fde;
8271 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8272 pce_entry = pce_info_use->ce_pipe_entry;
8273 kfree(pce_entry);
8274 pce_info_use++;
8275 }
8276 }
8277 kfree(qseecom.ce_info.fde);
8278 qseecom.ce_info.fde = NULL;
8279 if (qseecom.ce_info.pfe) {
8280 pce_info_use = qseecom.ce_info.pfe;
8281 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8282 pce_entry = pce_info_use->ce_pipe_entry;
8283 kfree(pce_entry);
8284 pce_info_use++;
8285 }
8286 }
8287 kfree(qseecom.ce_info.pfe);
8288 qseecom.ce_info.pfe = NULL;
8289 }
8290 kfree(unit_tbl);
8291 kfree(pfde_tbl);
8292 return rc;
8293}
8294
8295static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8296 void __user *argp)
8297{
8298 struct qseecom_ce_info_req req;
8299 struct qseecom_ce_info_req *pinfo = &req;
8300 int ret = 0;
8301 int i;
8302 unsigned int entries;
8303 struct qseecom_ce_info_use *pce_info_use, *p;
8304 int total = 0;
8305 bool found = false;
8306 struct qseecom_ce_pipe_entry *pce_entry;
8307
8308 ret = copy_from_user(pinfo, argp,
8309 sizeof(struct qseecom_ce_info_req));
8310 if (ret) {
8311 pr_err("copy_from_user failed\n");
8312 return ret;
8313 }
8314
8315 switch (pinfo->usage) {
8316 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8317 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8318 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8319 if (qseecom.support_fde) {
8320 p = qseecom.ce_info.fde;
8321 total = qseecom.ce_info.num_fde;
8322 } else {
8323 pr_err("system does not support fde\n");
8324 return -EINVAL;
8325 }
8326 break;
8327 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8328 if (qseecom.support_pfe) {
8329 p = qseecom.ce_info.pfe;
8330 total = qseecom.ce_info.num_pfe;
8331 } else {
8332 pr_err("system does not support pfe\n");
8333 return -EINVAL;
8334 }
8335 break;
8336 default:
8337 pr_err("unsupported usage %d\n", pinfo->usage);
8338 return -EINVAL;
8339 }
8340
8341 pce_info_use = NULL;
8342 for (i = 0; i < total; i++) {
8343 if (!p->alloc)
8344 pce_info_use = p;
8345 else if (!memcmp(p->handle, pinfo->handle,
8346 MAX_CE_INFO_HANDLE_SIZE)) {
8347 pce_info_use = p;
8348 found = true;
8349 break;
8350 }
8351 p++;
8352 }
8353
8354 if (pce_info_use == NULL)
8355 return -EBUSY;
8356
8357 pinfo->unit_num = pce_info_use->unit_num;
8358 if (!pce_info_use->alloc) {
8359 pce_info_use->alloc = true;
8360 memcpy(pce_info_use->handle,
8361 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8362 }
8363 if (pce_info_use->num_ce_pipe_entries >
8364 MAX_CE_PIPE_PAIR_PER_UNIT)
8365 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8366 else
8367 entries = pce_info_use->num_ce_pipe_entries;
8368 pinfo->num_ce_pipe_entries = entries;
8369 pce_entry = pce_info_use->ce_pipe_entry;
8370 for (i = 0; i < entries; i++, pce_entry++)
8371 pinfo->ce_pipe_entry[i] = *pce_entry;
8372 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8373 pinfo->ce_pipe_entry[i].valid = 0;
8374
8375 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8376 pr_err("copy_to_user failed\n");
8377 ret = -EFAULT;
8378 }
8379 return ret;
8380}
8381
8382static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8383 void __user *argp)
8384{
8385 struct qseecom_ce_info_req req;
8386 struct qseecom_ce_info_req *pinfo = &req;
8387 int ret = 0;
8388 struct qseecom_ce_info_use *p;
8389 int total = 0;
8390 int i;
8391 bool found = false;
8392
8393 ret = copy_from_user(pinfo, argp,
8394 sizeof(struct qseecom_ce_info_req));
8395 if (ret)
8396 return ret;
8397
8398 switch (pinfo->usage) {
8399 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8400 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8401 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8402 if (qseecom.support_fde) {
8403 p = qseecom.ce_info.fde;
8404 total = qseecom.ce_info.num_fde;
8405 } else {
8406 pr_err("system does not support fde\n");
8407 return -EINVAL;
8408 }
8409 break;
8410 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8411 if (qseecom.support_pfe) {
8412 p = qseecom.ce_info.pfe;
8413 total = qseecom.ce_info.num_pfe;
8414 } else {
8415 pr_err("system does not support pfe\n");
8416 return -EINVAL;
8417 }
8418 break;
8419 default:
8420 pr_err("unsupported usage %d\n", pinfo->usage);
8421 return -EINVAL;
8422 }
8423
8424 for (i = 0; i < total; i++) {
8425 if (p->alloc &&
8426 !memcmp(p->handle, pinfo->handle,
8427 MAX_CE_INFO_HANDLE_SIZE)) {
8428 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8429 p->alloc = false;
8430 found = true;
8431 break;
8432 }
8433 p++;
8434 }
8435 return ret;
8436}
8437
8438static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8439 void __user *argp)
8440{
8441 struct qseecom_ce_info_req req;
8442 struct qseecom_ce_info_req *pinfo = &req;
8443 int ret = 0;
8444 int i;
8445 unsigned int entries;
8446 struct qseecom_ce_info_use *pce_info_use, *p;
8447 int total = 0;
8448 bool found = false;
8449 struct qseecom_ce_pipe_entry *pce_entry;
8450
8451 ret = copy_from_user(pinfo, argp,
8452 sizeof(struct qseecom_ce_info_req));
8453 if (ret)
8454 return ret;
8455
8456 switch (pinfo->usage) {
8457 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8458 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8459 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8460 if (qseecom.support_fde) {
8461 p = qseecom.ce_info.fde;
8462 total = qseecom.ce_info.num_fde;
8463 } else {
8464 pr_err("system does not support fde\n");
8465 return -EINVAL;
8466 }
8467 break;
8468 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8469 if (qseecom.support_pfe) {
8470 p = qseecom.ce_info.pfe;
8471 total = qseecom.ce_info.num_pfe;
8472 } else {
8473 pr_err("system does not support pfe\n");
8474 return -EINVAL;
8475 }
8476 break;
8477 default:
8478 pr_err("unsupported usage %d\n", pinfo->usage);
8479 return -EINVAL;
8480 }
8481
8482 pce_info_use = NULL;
8483 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8484 pinfo->num_ce_pipe_entries = 0;
8485 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8486 pinfo->ce_pipe_entry[i].valid = 0;
8487
8488 for (i = 0; i < total; i++) {
8489
8490 if (p->alloc && !memcmp(p->handle,
8491 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8492 pce_info_use = p;
8493 found = true;
8494 break;
8495 }
8496 p++;
8497 }
8498 if (!pce_info_use)
8499 goto out;
8500 pinfo->unit_num = pce_info_use->unit_num;
8501 if (pce_info_use->num_ce_pipe_entries >
8502 MAX_CE_PIPE_PAIR_PER_UNIT)
8503 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8504 else
8505 entries = pce_info_use->num_ce_pipe_entries;
8506 pinfo->num_ce_pipe_entries = entries;
8507 pce_entry = pce_info_use->ce_pipe_entry;
8508 for (i = 0; i < entries; i++, pce_entry++)
8509 pinfo->ce_pipe_entry[i] = *pce_entry;
8510 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8511 pinfo->ce_pipe_entry[i].valid = 0;
8512out:
8513 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8514 pr_err("copy_to_user failed\n");
8515 ret = -EFAULT;
8516 }
8517 return ret;
8518}
8519
8520/*
8521 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8522 * then whitelist feature is not supported.
8523 */
8524static int qseecom_check_whitelist_feature(void)
8525{
8526 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8527
8528 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8529}
8530
8531static int qseecom_probe(struct platform_device *pdev)
8532{
8533 int rc;
8534 int i;
8535 uint32_t feature = 10;
8536 struct device *class_dev;
8537 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8538 struct qseecom_command_scm_resp resp;
8539 struct qseecom_ce_info_use *pce_info_use = NULL;
8540
8541 qseecom.qsee_bw_count = 0;
8542 qseecom.qsee_perf_client = 0;
8543 qseecom.qsee_sfpb_bw_count = 0;
8544
8545 qseecom.qsee.ce_core_clk = NULL;
8546 qseecom.qsee.ce_clk = NULL;
8547 qseecom.qsee.ce_core_src_clk = NULL;
8548 qseecom.qsee.ce_bus_clk = NULL;
8549
8550 qseecom.cumulative_mode = 0;
8551 qseecom.current_mode = INACTIVE;
8552 qseecom.support_bus_scaling = false;
8553 qseecom.support_fde = false;
8554 qseecom.support_pfe = false;
8555
8556 qseecom.ce_drv.ce_core_clk = NULL;
8557 qseecom.ce_drv.ce_clk = NULL;
8558 qseecom.ce_drv.ce_core_src_clk = NULL;
8559 qseecom.ce_drv.ce_bus_clk = NULL;
8560 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8561
8562 qseecom.app_block_ref_cnt = 0;
8563 init_waitqueue_head(&qseecom.app_block_wq);
8564 qseecom.whitelist_support = true;
8565
8566 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8567 if (rc < 0) {
8568 pr_err("alloc_chrdev_region failed %d\n", rc);
8569 return rc;
8570 }
8571
8572 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8573 if (IS_ERR(driver_class)) {
8574 rc = -ENOMEM;
8575 pr_err("class_create failed %d\n", rc);
8576 goto exit_unreg_chrdev_region;
8577 }
8578
8579 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8580 QSEECOM_DEV);
8581 if (IS_ERR(class_dev)) {
8582 pr_err("class_device_create failed %d\n", rc);
8583 rc = -ENOMEM;
8584 goto exit_destroy_class;
8585 }
8586
8587 cdev_init(&qseecom.cdev, &qseecom_fops);
8588 qseecom.cdev.owner = THIS_MODULE;
8589
8590 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8591 if (rc < 0) {
8592 pr_err("cdev_add failed %d\n", rc);
8593 goto exit_destroy_device;
8594 }
8595
8596 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8597 spin_lock_init(&qseecom.registered_listener_list_lock);
8598 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8599 spin_lock_init(&qseecom.registered_app_list_lock);
8600 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8601 spin_lock_init(&qseecom.registered_kclient_list_lock);
8602 init_waitqueue_head(&qseecom.send_resp_wq);
8603 qseecom.send_resp_flag = 0;
8604
8605 qseecom.qsee_version = QSEEE_VERSION_00;
8606 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8607 &resp, sizeof(resp));
8608 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8609 if (rc) {
8610 pr_err("Failed to get QSEE version info %d\n", rc);
8611 goto exit_del_cdev;
8612 }
8613 qseecom.qsee_version = resp.result;
8614 qseecom.qseos_version = QSEOS_VERSION_14;
8615 qseecom.commonlib_loaded = false;
8616 qseecom.commonlib64_loaded = false;
8617 qseecom.pdev = class_dev;
8618 /* Create ION msm client */
8619 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8620 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8621 pr_err("Ion client cannot be created\n");
8622 rc = -ENOMEM;
8623 goto exit_del_cdev;
8624 }
8625
8626 /* register client for bus scaling */
8627 if (pdev->dev.of_node) {
8628 qseecom.pdev->of_node = pdev->dev.of_node;
8629 qseecom.support_bus_scaling =
8630 of_property_read_bool((&pdev->dev)->of_node,
8631 "qcom,support-bus-scaling");
8632 rc = qseecom_retrieve_ce_data(pdev);
8633 if (rc)
8634 goto exit_destroy_ion_client;
8635 qseecom.appsbl_qseecom_support =
8636 of_property_read_bool((&pdev->dev)->of_node,
8637 "qcom,appsbl-qseecom-support");
8638 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8639 qseecom.appsbl_qseecom_support);
8640
8641 qseecom.commonlib64_loaded =
8642 of_property_read_bool((&pdev->dev)->of_node,
8643 "qcom,commonlib64-loaded-by-uefi");
8644 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8645 qseecom.commonlib64_loaded);
8646 qseecom.fde_key_size =
8647 of_property_read_bool((&pdev->dev)->of_node,
8648 "qcom,fde-key-size");
8649 qseecom.no_clock_support =
8650 of_property_read_bool((&pdev->dev)->of_node,
8651 "qcom,no-clock-support");
8652 if (!qseecom.no_clock_support) {
8653 pr_info("qseecom clocks handled by other subsystem\n");
8654 } else {
8655 pr_info("no-clock-support=0x%x",
8656 qseecom.no_clock_support);
8657 }
8658
8659 if (of_property_read_u32((&pdev->dev)->of_node,
8660 "qcom,qsee-reentrancy-support",
8661 &qseecom.qsee_reentrancy_support)) {
8662 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8663 qseecom.qsee_reentrancy_support = 0;
8664 } else {
8665 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8666 qseecom.qsee_reentrancy_support);
8667 }
8668
8669 /*
8670 * The qseecom bus scaling flag can not be enabled when
8671 * crypto clock is not handled by HLOS.
8672 */
8673 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8674 pr_err("support_bus_scaling flag can not be enabled.\n");
8675 rc = -EINVAL;
8676 goto exit_destroy_ion_client;
8677 }
8678
8679 if (of_property_read_u32((&pdev->dev)->of_node,
8680 "qcom,ce-opp-freq",
8681 &qseecom.ce_opp_freq_hz)) {
8682 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8683 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8684 }
8685 rc = __qseecom_init_clk(CLK_QSEE);
8686 if (rc)
8687 goto exit_destroy_ion_client;
8688
8689 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8690 (qseecom.support_pfe || qseecom.support_fde)) {
8691 rc = __qseecom_init_clk(CLK_CE_DRV);
8692 if (rc) {
8693 __qseecom_deinit_clk(CLK_QSEE);
8694 goto exit_destroy_ion_client;
8695 }
8696 } else {
8697 struct qseecom_clk *qclk;
8698
8699 qclk = &qseecom.qsee;
8700 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8701 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8702 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8703 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8704 }
8705
8706 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8707 msm_bus_cl_get_pdata(pdev);
8708 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8709 (!qseecom.is_apps_region_protected &&
8710 !qseecom.appsbl_qseecom_support)) {
8711 struct resource *resource = NULL;
8712 struct qsee_apps_region_info_ireq req;
8713 struct qsee_apps_region_info_64bit_ireq req_64bit;
8714 struct qseecom_command_scm_resp resp;
8715 void *cmd_buf = NULL;
8716 size_t cmd_len;
8717
8718 resource = platform_get_resource_byname(pdev,
8719 IORESOURCE_MEM, "secapp-region");
8720 if (resource) {
8721 if (qseecom.qsee_version < QSEE_VERSION_40) {
8722 req.qsee_cmd_id =
8723 QSEOS_APP_REGION_NOTIFICATION;
8724 req.addr = (uint32_t)resource->start;
8725 req.size = resource_size(resource);
8726 cmd_buf = (void *)&req;
8727 cmd_len = sizeof(struct
8728 qsee_apps_region_info_ireq);
8729 pr_warn("secure app region addr=0x%x size=0x%x",
8730 req.addr, req.size);
8731 } else {
8732 req_64bit.qsee_cmd_id =
8733 QSEOS_APP_REGION_NOTIFICATION;
8734 req_64bit.addr = resource->start;
8735 req_64bit.size = resource_size(
8736 resource);
8737 cmd_buf = (void *)&req_64bit;
8738 cmd_len = sizeof(struct
8739 qsee_apps_region_info_64bit_ireq);
8740 pr_warn("secure app region addr=0x%llx size=0x%x",
8741 req_64bit.addr, req_64bit.size);
8742 }
8743 } else {
8744 pr_err("Fail to get secure app region info\n");
8745 rc = -EINVAL;
8746 goto exit_deinit_clock;
8747 }
8748 rc = __qseecom_enable_clk(CLK_QSEE);
8749 if (rc) {
8750 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8751 rc = -EIO;
8752 goto exit_deinit_clock;
8753 }
8754 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8755 cmd_buf, cmd_len,
8756 &resp, sizeof(resp));
8757 __qseecom_disable_clk(CLK_QSEE);
8758 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8759 pr_err("send secapp reg fail %d resp.res %d\n",
8760 rc, resp.result);
8761 rc = -EINVAL;
8762 goto exit_deinit_clock;
8763 }
8764 }
8765 /*
8766 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8767 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8768 * Pls add "qseecom.commonlib64_loaded = true" here too.
8769 */
8770 if (qseecom.is_apps_region_protected ||
8771 qseecom.appsbl_qseecom_support)
8772 qseecom.commonlib_loaded = true;
8773 } else {
8774 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8775 pdev->dev.platform_data;
8776 }
8777 if (qseecom.support_bus_scaling) {
8778 init_timer(&(qseecom.bw_scale_down_timer));
8779 INIT_WORK(&qseecom.bw_inactive_req_ws,
8780 qseecom_bw_inactive_req_work);
8781 qseecom.bw_scale_down_timer.function =
8782 qseecom_scale_bus_bandwidth_timer_callback;
8783 }
8784 qseecom.timer_running = false;
8785 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8786 qseecom_platform_support);
8787
8788 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8789 pr_warn("qseecom.whitelist_support = %d\n",
8790 qseecom.whitelist_support);
8791
8792 if (!qseecom.qsee_perf_client)
8793 pr_err("Unable to register bus client\n");
8794
8795 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8796 return 0;
8797
8798exit_deinit_clock:
8799 __qseecom_deinit_clk(CLK_QSEE);
8800 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8801 (qseecom.support_pfe || qseecom.support_fde))
8802 __qseecom_deinit_clk(CLK_CE_DRV);
8803exit_destroy_ion_client:
8804 if (qseecom.ce_info.fde) {
8805 pce_info_use = qseecom.ce_info.fde;
8806 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8807 kzfree(pce_info_use->ce_pipe_entry);
8808 pce_info_use++;
8809 }
8810 kfree(qseecom.ce_info.fde);
8811 }
8812 if (qseecom.ce_info.pfe) {
8813 pce_info_use = qseecom.ce_info.pfe;
8814 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8815 kzfree(pce_info_use->ce_pipe_entry);
8816 pce_info_use++;
8817 }
8818 kfree(qseecom.ce_info.pfe);
8819 }
8820 ion_client_destroy(qseecom.ion_clnt);
8821exit_del_cdev:
8822 cdev_del(&qseecom.cdev);
8823exit_destroy_device:
8824 device_destroy(driver_class, qseecom_device_no);
8825exit_destroy_class:
8826 class_destroy(driver_class);
8827exit_unreg_chrdev_region:
8828 unregister_chrdev_region(qseecom_device_no, 1);
8829 return rc;
8830}
8831
8832static int qseecom_remove(struct platform_device *pdev)
8833{
8834 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308835 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008836 unsigned long flags = 0;
8837 int ret = 0;
8838 int i;
8839 struct qseecom_ce_pipe_entry *pce_entry;
8840 struct qseecom_ce_info_use *pce_info_use;
8841
8842 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8843 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8844
Monika Singhe711b162018-04-24 09:54:50 +05308845 list_for_each_entry_safe(kclient, kclient_tmp,
8846 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008847
8848 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008849 if (!kclient->handle) {
8850 list_del(&kclient->list);
8851 kzfree(kclient);
8852 break;
8853 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008854
8855 list_del(&kclient->list);
8856 mutex_lock(&app_access_lock);
8857 ret = qseecom_unload_app(kclient->handle->dev, false);
8858 mutex_unlock(&app_access_lock);
8859 if (!ret) {
8860 kzfree(kclient->handle->dev);
8861 kzfree(kclient->handle);
8862 kzfree(kclient);
8863 }
8864 }
8865
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008866 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8867
8868 if (qseecom.qseos_version > QSEEE_VERSION_00)
8869 qseecom_unload_commonlib_image();
8870
8871 if (qseecom.qsee_perf_client)
8872 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8873 0);
8874 if (pdev->dev.platform_data != NULL)
8875 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8876
8877 if (qseecom.support_bus_scaling) {
8878 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8879 del_timer_sync(&qseecom.bw_scale_down_timer);
8880 }
8881
8882 if (qseecom.ce_info.fde) {
8883 pce_info_use = qseecom.ce_info.fde;
8884 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8885 pce_entry = pce_info_use->ce_pipe_entry;
8886 kfree(pce_entry);
8887 pce_info_use++;
8888 }
8889 }
8890 kfree(qseecom.ce_info.fde);
8891 if (qseecom.ce_info.pfe) {
8892 pce_info_use = qseecom.ce_info.pfe;
8893 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8894 pce_entry = pce_info_use->ce_pipe_entry;
8895 kfree(pce_entry);
8896 pce_info_use++;
8897 }
8898 }
8899 kfree(qseecom.ce_info.pfe);
8900
8901 /* register client for bus scaling */
8902 if (pdev->dev.of_node) {
8903 __qseecom_deinit_clk(CLK_QSEE);
8904 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8905 (qseecom.support_pfe || qseecom.support_fde))
8906 __qseecom_deinit_clk(CLK_CE_DRV);
8907 }
8908
8909 ion_client_destroy(qseecom.ion_clnt);
8910
8911 cdev_del(&qseecom.cdev);
8912
8913 device_destroy(driver_class, qseecom_device_no);
8914
8915 class_destroy(driver_class);
8916
8917 unregister_chrdev_region(qseecom_device_no, 1);
8918
8919 return ret;
8920}
8921
8922static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8923{
8924 int ret = 0;
8925 struct qseecom_clk *qclk;
8926
8927 qclk = &qseecom.qsee;
8928 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8929 if (qseecom.no_clock_support)
8930 return 0;
8931
8932 mutex_lock(&qsee_bw_mutex);
8933 mutex_lock(&clk_access_lock);
8934
8935 if (qseecom.current_mode != INACTIVE) {
8936 ret = msm_bus_scale_client_update_request(
8937 qseecom.qsee_perf_client, INACTIVE);
8938 if (ret)
8939 pr_err("Fail to scale down bus\n");
8940 else
8941 qseecom.current_mode = INACTIVE;
8942 }
8943
8944 if (qclk->clk_access_cnt) {
8945 if (qclk->ce_clk != NULL)
8946 clk_disable_unprepare(qclk->ce_clk);
8947 if (qclk->ce_core_clk != NULL)
8948 clk_disable_unprepare(qclk->ce_core_clk);
8949 if (qclk->ce_bus_clk != NULL)
8950 clk_disable_unprepare(qclk->ce_bus_clk);
8951 }
8952
8953 del_timer_sync(&(qseecom.bw_scale_down_timer));
8954 qseecom.timer_running = false;
8955
8956 mutex_unlock(&clk_access_lock);
8957 mutex_unlock(&qsee_bw_mutex);
8958 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8959
8960 return 0;
8961}
8962
8963static int qseecom_resume(struct platform_device *pdev)
8964{
8965 int mode = 0;
8966 int ret = 0;
8967 struct qseecom_clk *qclk;
8968
8969 qclk = &qseecom.qsee;
8970 if (qseecom.no_clock_support)
8971 goto exit;
8972
8973 mutex_lock(&qsee_bw_mutex);
8974 mutex_lock(&clk_access_lock);
8975 if (qseecom.cumulative_mode >= HIGH)
8976 mode = HIGH;
8977 else
8978 mode = qseecom.cumulative_mode;
8979
8980 if (qseecom.cumulative_mode != INACTIVE) {
8981 ret = msm_bus_scale_client_update_request(
8982 qseecom.qsee_perf_client, mode);
8983 if (ret)
8984 pr_err("Fail to scale up bus to %d\n", mode);
8985 else
8986 qseecom.current_mode = mode;
8987 }
8988
8989 if (qclk->clk_access_cnt) {
8990 if (qclk->ce_core_clk != NULL) {
8991 ret = clk_prepare_enable(qclk->ce_core_clk);
8992 if (ret) {
8993 pr_err("Unable to enable/prep CE core clk\n");
8994 qclk->clk_access_cnt = 0;
8995 goto err;
8996 }
8997 }
8998 if (qclk->ce_clk != NULL) {
8999 ret = clk_prepare_enable(qclk->ce_clk);
9000 if (ret) {
9001 pr_err("Unable to enable/prep CE iface clk\n");
9002 qclk->clk_access_cnt = 0;
9003 goto ce_clk_err;
9004 }
9005 }
9006 if (qclk->ce_bus_clk != NULL) {
9007 ret = clk_prepare_enable(qclk->ce_bus_clk);
9008 if (ret) {
9009 pr_err("Unable to enable/prep CE bus clk\n");
9010 qclk->clk_access_cnt = 0;
9011 goto ce_bus_clk_err;
9012 }
9013 }
9014 }
9015
9016 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9017 qseecom.bw_scale_down_timer.expires = jiffies +
9018 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9019 mod_timer(&(qseecom.bw_scale_down_timer),
9020 qseecom.bw_scale_down_timer.expires);
9021 qseecom.timer_running = true;
9022 }
9023
9024 mutex_unlock(&clk_access_lock);
9025 mutex_unlock(&qsee_bw_mutex);
9026 goto exit;
9027
9028ce_bus_clk_err:
9029 if (qclk->ce_clk)
9030 clk_disable_unprepare(qclk->ce_clk);
9031ce_clk_err:
9032 if (qclk->ce_core_clk)
9033 clk_disable_unprepare(qclk->ce_core_clk);
9034err:
9035 mutex_unlock(&clk_access_lock);
9036 mutex_unlock(&qsee_bw_mutex);
9037 ret = -EIO;
9038exit:
9039 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9040 return ret;
9041}
9042
9043static const struct of_device_id qseecom_match[] = {
9044 {
9045 .compatible = "qcom,qseecom",
9046 },
9047 {}
9048};
9049
9050static struct platform_driver qseecom_plat_driver = {
9051 .probe = qseecom_probe,
9052 .remove = qseecom_remove,
9053 .suspend = qseecom_suspend,
9054 .resume = qseecom_resume,
9055 .driver = {
9056 .name = "qseecom",
9057 .owner = THIS_MODULE,
9058 .of_match_table = qseecom_match,
9059 },
9060};
9061
9062static int qseecom_init(void)
9063{
9064 return platform_driver_register(&qseecom_plat_driver);
9065}
9066
9067static void qseecom_exit(void)
9068{
9069 platform_driver_unregister(&qseecom_plat_driver);
9070}
9071
9072MODULE_LICENSE("GPL v2");
9073MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9074
9075module_init(qseecom_init);
9076module_exit(qseecom_exit);