blob: 22248da1df72fb3d9c9c18d3e2bb1cfd17e6632a [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
Zhen Kong26e62742018-05-04 17:19:06 -0700192 int abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700193};
194
195struct qseecom_registered_app_list {
196 struct list_head list;
197 u32 app_id;
198 u32 ref_cnt;
199 char app_name[MAX_APP_NAME_SIZE];
200 u32 app_arch;
201 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700202 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 u32 blocked_on_listener_id;
204};
205
206struct qseecom_registered_kclient_list {
207 struct list_head list;
208 struct qseecom_handle *handle;
209};
210
211struct qseecom_ce_info_use {
212 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
213 unsigned int unit_num;
214 unsigned int num_ce_pipe_entries;
215 struct qseecom_ce_pipe_entry *ce_pipe_entry;
216 bool alloc;
217 uint32_t type;
218};
219
220struct ce_hw_usage_info {
221 uint32_t qsee_ce_hw_instance;
222 uint32_t num_fde;
223 struct qseecom_ce_info_use *fde;
224 uint32_t num_pfe;
225 struct qseecom_ce_info_use *pfe;
226};
227
228struct qseecom_clk {
229 enum qseecom_ce_hw_instance instance;
230 struct clk *ce_core_clk;
231 struct clk *ce_clk;
232 struct clk *ce_core_src_clk;
233 struct clk *ce_bus_clk;
234 uint32_t clk_access_cnt;
235};
236
237struct qseecom_control {
238 struct ion_client *ion_clnt; /* Ion client */
239 struct list_head registered_listener_list_head;
240 spinlock_t registered_listener_list_lock;
241
242 struct list_head registered_app_list_head;
243 spinlock_t registered_app_list_lock;
244
245 struct list_head registered_kclient_list_head;
246 spinlock_t registered_kclient_list_lock;
247
248 wait_queue_head_t send_resp_wq;
249 int send_resp_flag;
250
251 uint32_t qseos_version;
252 uint32_t qsee_version;
253 struct device *pdev;
254 bool whitelist_support;
255 bool commonlib_loaded;
256 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700257 struct ce_hw_usage_info ce_info;
258
259 int qsee_bw_count;
260 int qsee_sfpb_bw_count;
261
262 uint32_t qsee_perf_client;
263 struct qseecom_clk qsee;
264 struct qseecom_clk ce_drv;
265
266 bool support_bus_scaling;
267 bool support_fde;
268 bool support_pfe;
269 bool fde_key_size;
270 uint32_t cumulative_mode;
271 enum qseecom_bandwidth_request_mode current_mode;
272 struct timer_list bw_scale_down_timer;
273 struct work_struct bw_inactive_req_ws;
274 struct cdev cdev;
275 bool timer_running;
276 bool no_clock_support;
277 unsigned int ce_opp_freq_hz;
278 bool appsbl_qseecom_support;
279 uint32_t qsee_reentrancy_support;
280
281 uint32_t app_block_ref_cnt;
282 wait_queue_head_t app_block_wq;
283 atomic_t qseecom_state;
284 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700285 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700286};
287
288struct qseecom_sec_buf_fd_info {
289 bool is_sec_buf_fd;
290 size_t size;
291 void *vbase;
292 dma_addr_t pbase;
293};
294
295struct qseecom_param_memref {
296 uint32_t buffer;
297 uint32_t size;
298};
299
300struct qseecom_client_handle {
301 u32 app_id;
302 u8 *sb_virt;
303 phys_addr_t sb_phys;
304 unsigned long user_virt_sb_base;
305 size_t sb_length;
306 struct ion_handle *ihandle; /* Retrieve phy addr */
307 char app_name[MAX_APP_NAME_SIZE];
308 u32 app_arch;
309 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
310};
311
312struct qseecom_listener_handle {
313 u32 id;
314};
315
316static struct qseecom_control qseecom;
317
318struct qseecom_dev_handle {
319 enum qseecom_client_handle_type type;
320 union {
321 struct qseecom_client_handle client;
322 struct qseecom_listener_handle listener;
323 };
324 bool released;
325 int abort;
326 wait_queue_head_t abort_wq;
327 atomic_t ioctl_count;
328 bool perf_enabled;
329 bool fast_load_enabled;
330 enum qseecom_bandwidth_request_mode mode;
331 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
332 uint32_t sglist_cnt;
333 bool use_legacy_cmd;
334};
335
336struct qseecom_key_id_usage_desc {
337 uint8_t desc[QSEECOM_KEY_ID_SIZE];
338};
339
340struct qseecom_crypto_info {
341 unsigned int unit_num;
342 unsigned int ce;
343 unsigned int pipe_pair;
344};
345
346static struct qseecom_key_id_usage_desc key_id_array[] = {
347 {
348 .desc = "Undefined Usage Index",
349 },
350
351 {
352 .desc = "Full Disk Encryption",
353 },
354
355 {
356 .desc = "Per File Encryption",
357 },
358
359 {
360 .desc = "UFS ICE Full Disk Encryption",
361 },
362
363 {
364 .desc = "SDCC ICE Full Disk Encryption",
365 },
366};
367
368/* Function proto types */
369static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
370static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
371static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
372static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
373static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
374static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
375 char *cmnlib_name);
376static int qseecom_enable_ice_setup(int usage);
377static int qseecom_disable_ice_setup(int usage);
378static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
379static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
384 void __user *argp);
385
386static int get_qseecom_keymaster_status(char *str)
387{
388 get_option(&str, &qseecom.is_apps_region_protected);
389 return 1;
390}
391__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
392
393static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
394 const void *req_buf, void *resp_buf)
395{
396 int ret = 0;
397 uint32_t smc_id = 0;
398 uint32_t qseos_cmd_id = 0;
399 struct scm_desc desc = {0};
400 struct qseecom_command_scm_resp *scm_resp = NULL;
401
402 if (!req_buf || !resp_buf) {
403 pr_err("Invalid buffer pointer\n");
404 return -EINVAL;
405 }
406 qseos_cmd_id = *(uint32_t *)req_buf;
407 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
408
409 switch (svc_id) {
410 case 6: {
411 if (tz_cmd_id == 3) {
412 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
413 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
414 desc.args[0] = *(uint32_t *)req_buf;
415 } else {
416 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
417 svc_id, tz_cmd_id);
418 return -EINVAL;
419 }
420 ret = scm_call2(smc_id, &desc);
421 break;
422 }
423 case SCM_SVC_ES: {
424 switch (tz_cmd_id) {
425 case SCM_SAVE_PARTITION_HASH_ID: {
426 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
427 struct qseecom_save_partition_hash_req *p_hash_req =
428 (struct qseecom_save_partition_hash_req *)
429 req_buf;
430 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
431
432 if (!tzbuf)
433 return -ENOMEM;
434 memset(tzbuf, 0, tzbuflen);
435 memcpy(tzbuf, p_hash_req->digest,
436 SHA256_DIGEST_LENGTH);
437 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
438 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
439 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
440 desc.args[0] = p_hash_req->partition_id;
441 desc.args[1] = virt_to_phys(tzbuf);
442 desc.args[2] = SHA256_DIGEST_LENGTH;
443 ret = scm_call2(smc_id, &desc);
444 kzfree(tzbuf);
445 break;
446 }
447 default: {
448 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
449 tz_cmd_id);
450 ret = -EINVAL;
451 break;
452 }
453 } /* end of switch (tz_cmd_id) */
454 break;
455 } /* end of case SCM_SVC_ES */
456 case SCM_SVC_TZSCHEDULER: {
457 switch (qseos_cmd_id) {
458 case QSEOS_APP_START_COMMAND: {
459 struct qseecom_load_app_ireq *req;
460 struct qseecom_load_app_64bit_ireq *req_64bit;
461
462 smc_id = TZ_OS_APP_START_ID;
463 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
464 if (qseecom.qsee_version < QSEE_VERSION_40) {
465 req = (struct qseecom_load_app_ireq *)req_buf;
466 desc.args[0] = req->mdt_len;
467 desc.args[1] = req->img_len;
468 desc.args[2] = req->phy_addr;
469 } else {
470 req_64bit =
471 (struct qseecom_load_app_64bit_ireq *)
472 req_buf;
473 desc.args[0] = req_64bit->mdt_len;
474 desc.args[1] = req_64bit->img_len;
475 desc.args[2] = req_64bit->phy_addr;
476 }
477 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
478 ret = scm_call2(smc_id, &desc);
479 break;
480 }
481 case QSEOS_APP_SHUTDOWN_COMMAND: {
482 struct qseecom_unload_app_ireq *req;
483
484 req = (struct qseecom_unload_app_ireq *)req_buf;
485 smc_id = TZ_OS_APP_SHUTDOWN_ID;
486 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
487 desc.args[0] = req->app_id;
488 ret = scm_call2(smc_id, &desc);
489 break;
490 }
491 case QSEOS_APP_LOOKUP_COMMAND: {
492 struct qseecom_check_app_ireq *req;
493 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
494 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
495
496 if (!tzbuf)
497 return -ENOMEM;
498 req = (struct qseecom_check_app_ireq *)req_buf;
499 pr_debug("Lookup app_name = %s\n", req->app_name);
500 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
501 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
502 smc_id = TZ_OS_APP_LOOKUP_ID;
503 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
504 desc.args[0] = virt_to_phys(tzbuf);
505 desc.args[1] = strlen(req->app_name);
506 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
507 ret = scm_call2(smc_id, &desc);
508 kzfree(tzbuf);
509 break;
510 }
511 case QSEOS_APP_REGION_NOTIFICATION: {
512 struct qsee_apps_region_info_ireq *req;
513 struct qsee_apps_region_info_64bit_ireq *req_64bit;
514
515 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
516 desc.arginfo =
517 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
518 if (qseecom.qsee_version < QSEE_VERSION_40) {
519 req = (struct qsee_apps_region_info_ireq *)
520 req_buf;
521 desc.args[0] = req->addr;
522 desc.args[1] = req->size;
523 } else {
524 req_64bit =
525 (struct qsee_apps_region_info_64bit_ireq *)
526 req_buf;
527 desc.args[0] = req_64bit->addr;
528 desc.args[1] = req_64bit->size;
529 }
530 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
531 ret = scm_call2(smc_id, &desc);
532 break;
533 }
534 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
535 struct qseecom_load_lib_image_ireq *req;
536 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
537
538 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
539 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
540 if (qseecom.qsee_version < QSEE_VERSION_40) {
541 req = (struct qseecom_load_lib_image_ireq *)
542 req_buf;
543 desc.args[0] = req->mdt_len;
544 desc.args[1] = req->img_len;
545 desc.args[2] = req->phy_addr;
546 } else {
547 req_64bit =
548 (struct qseecom_load_lib_image_64bit_ireq *)
549 req_buf;
550 desc.args[0] = req_64bit->mdt_len;
551 desc.args[1] = req_64bit->img_len;
552 desc.args[2] = req_64bit->phy_addr;
553 }
554 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
555 ret = scm_call2(smc_id, &desc);
556 break;
557 }
558 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
559 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
560 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
561 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
562 ret = scm_call2(smc_id, &desc);
563 break;
564 }
565 case QSEOS_REGISTER_LISTENER: {
566 struct qseecom_register_listener_ireq *req;
567 struct qseecom_register_listener_64bit_ireq *req_64bit;
568
569 desc.arginfo =
570 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
571 if (qseecom.qsee_version < QSEE_VERSION_40) {
572 req = (struct qseecom_register_listener_ireq *)
573 req_buf;
574 desc.args[0] = req->listener_id;
575 desc.args[1] = req->sb_ptr;
576 desc.args[2] = req->sb_len;
577 } else {
578 req_64bit =
579 (struct qseecom_register_listener_64bit_ireq *)
580 req_buf;
581 desc.args[0] = req_64bit->listener_id;
582 desc.args[1] = req_64bit->sb_ptr;
583 desc.args[2] = req_64bit->sb_len;
584 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700585 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700586 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
587 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
588 ret = scm_call2(smc_id, &desc);
589 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700590 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700591 smc_id = TZ_OS_REGISTER_LISTENER_ID;
592 __qseecom_reentrancy_check_if_no_app_blocked(
593 smc_id);
594 ret = scm_call2(smc_id, &desc);
595 }
596 break;
597 }
598 case QSEOS_DEREGISTER_LISTENER: {
599 struct qseecom_unregister_listener_ireq *req;
600
601 req = (struct qseecom_unregister_listener_ireq *)
602 req_buf;
603 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
604 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
605 desc.args[0] = req->listener_id;
606 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
607 ret = scm_call2(smc_id, &desc);
608 break;
609 }
610 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
611 struct qseecom_client_listener_data_irsp *req;
612
613 req = (struct qseecom_client_listener_data_irsp *)
614 req_buf;
615 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
616 desc.arginfo =
617 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
618 desc.args[0] = req->listener_id;
619 desc.args[1] = req->status;
620 ret = scm_call2(smc_id, &desc);
621 break;
622 }
623 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
624 struct qseecom_client_listener_data_irsp *req;
625 struct qseecom_client_listener_data_64bit_irsp *req_64;
626
627 smc_id =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
629 desc.arginfo =
630 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
631 if (qseecom.qsee_version < QSEE_VERSION_40) {
632 req =
633 (struct qseecom_client_listener_data_irsp *)
634 req_buf;
635 desc.args[0] = req->listener_id;
636 desc.args[1] = req->status;
637 desc.args[2] = req->sglistinfo_ptr;
638 desc.args[3] = req->sglistinfo_len;
639 } else {
640 req_64 =
641 (struct qseecom_client_listener_data_64bit_irsp *)
642 req_buf;
643 desc.args[0] = req_64->listener_id;
644 desc.args[1] = req_64->status;
645 desc.args[2] = req_64->sglistinfo_ptr;
646 desc.args[3] = req_64->sglistinfo_len;
647 }
648 ret = scm_call2(smc_id, &desc);
649 break;
650 }
651 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
652 struct qseecom_load_app_ireq *req;
653 struct qseecom_load_app_64bit_ireq *req_64bit;
654
655 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
656 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
657 if (qseecom.qsee_version < QSEE_VERSION_40) {
658 req = (struct qseecom_load_app_ireq *)req_buf;
659 desc.args[0] = req->mdt_len;
660 desc.args[1] = req->img_len;
661 desc.args[2] = req->phy_addr;
662 } else {
663 req_64bit =
664 (struct qseecom_load_app_64bit_ireq *)req_buf;
665 desc.args[0] = req_64bit->mdt_len;
666 desc.args[1] = req_64bit->img_len;
667 desc.args[2] = req_64bit->phy_addr;
668 }
669 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
670 ret = scm_call2(smc_id, &desc);
671 break;
672 }
673 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
674 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
675 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
676 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
677 ret = scm_call2(smc_id, &desc);
678 break;
679 }
680
681 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
682 struct qseecom_client_send_data_ireq *req;
683 struct qseecom_client_send_data_64bit_ireq *req_64bit;
684
685 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
686 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
687 if (qseecom.qsee_version < QSEE_VERSION_40) {
688 req = (struct qseecom_client_send_data_ireq *)
689 req_buf;
690 desc.args[0] = req->app_id;
691 desc.args[1] = req->req_ptr;
692 desc.args[2] = req->req_len;
693 desc.args[3] = req->rsp_ptr;
694 desc.args[4] = req->rsp_len;
695 } else {
696 req_64bit =
697 (struct qseecom_client_send_data_64bit_ireq *)
698 req_buf;
699 desc.args[0] = req_64bit->app_id;
700 desc.args[1] = req_64bit->req_ptr;
701 desc.args[2] = req_64bit->req_len;
702 desc.args[3] = req_64bit->rsp_ptr;
703 desc.args[4] = req_64bit->rsp_len;
704 }
705 ret = scm_call2(smc_id, &desc);
706 break;
707 }
708 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
709 struct qseecom_client_send_data_ireq *req;
710 struct qseecom_client_send_data_64bit_ireq *req_64bit;
711
712 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
713 desc.arginfo =
714 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
715 if (qseecom.qsee_version < QSEE_VERSION_40) {
716 req = (struct qseecom_client_send_data_ireq *)
717 req_buf;
718 desc.args[0] = req->app_id;
719 desc.args[1] = req->req_ptr;
720 desc.args[2] = req->req_len;
721 desc.args[3] = req->rsp_ptr;
722 desc.args[4] = req->rsp_len;
723 desc.args[5] = req->sglistinfo_ptr;
724 desc.args[6] = req->sglistinfo_len;
725 } else {
726 req_64bit =
727 (struct qseecom_client_send_data_64bit_ireq *)
728 req_buf;
729 desc.args[0] = req_64bit->app_id;
730 desc.args[1] = req_64bit->req_ptr;
731 desc.args[2] = req_64bit->req_len;
732 desc.args[3] = req_64bit->rsp_ptr;
733 desc.args[4] = req_64bit->rsp_len;
734 desc.args[5] = req_64bit->sglistinfo_ptr;
735 desc.args[6] = req_64bit->sglistinfo_len;
736 }
737 ret = scm_call2(smc_id, &desc);
738 break;
739 }
740 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
741 struct qseecom_client_send_service_ireq *req;
742
743 req = (struct qseecom_client_send_service_ireq *)
744 req_buf;
745 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
746 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
747 desc.args[0] = req->key_type;
748 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
749 ret = scm_call2(smc_id, &desc);
750 break;
751 }
752 case QSEOS_RPMB_ERASE_COMMAND: {
753 smc_id = TZ_OS_RPMB_ERASE_ID;
754 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
755 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
756 ret = scm_call2(smc_id, &desc);
757 break;
758 }
759 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
760 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
761 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
762 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
763 ret = scm_call2(smc_id, &desc);
764 break;
765 }
766 case QSEOS_GENERATE_KEY: {
767 u32 tzbuflen = PAGE_ALIGN(sizeof
768 (struct qseecom_key_generate_ireq) -
769 sizeof(uint32_t));
770 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
771
772 if (!tzbuf)
773 return -ENOMEM;
774 memset(tzbuf, 0, tzbuflen);
775 memcpy(tzbuf, req_buf + sizeof(uint32_t),
776 (sizeof(struct qseecom_key_generate_ireq) -
777 sizeof(uint32_t)));
778 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
779 smc_id = TZ_OS_KS_GEN_KEY_ID;
780 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
781 desc.args[0] = virt_to_phys(tzbuf);
782 desc.args[1] = tzbuflen;
783 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
784 ret = scm_call2(smc_id, &desc);
785 kzfree(tzbuf);
786 break;
787 }
788 case QSEOS_DELETE_KEY: {
789 u32 tzbuflen = PAGE_ALIGN(sizeof
790 (struct qseecom_key_delete_ireq) -
791 sizeof(uint32_t));
792 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
793
794 if (!tzbuf)
795 return -ENOMEM;
796 memset(tzbuf, 0, tzbuflen);
797 memcpy(tzbuf, req_buf + sizeof(uint32_t),
798 (sizeof(struct qseecom_key_delete_ireq) -
799 sizeof(uint32_t)));
800 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
801 smc_id = TZ_OS_KS_DEL_KEY_ID;
802 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
803 desc.args[0] = virt_to_phys(tzbuf);
804 desc.args[1] = tzbuflen;
805 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
806 ret = scm_call2(smc_id, &desc);
807 kzfree(tzbuf);
808 break;
809 }
810 case QSEOS_SET_KEY: {
811 u32 tzbuflen = PAGE_ALIGN(sizeof
812 (struct qseecom_key_select_ireq) -
813 sizeof(uint32_t));
814 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
815
816 if (!tzbuf)
817 return -ENOMEM;
818 memset(tzbuf, 0, tzbuflen);
819 memcpy(tzbuf, req_buf + sizeof(uint32_t),
820 (sizeof(struct qseecom_key_select_ireq) -
821 sizeof(uint32_t)));
822 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
823 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
824 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
825 desc.args[0] = virt_to_phys(tzbuf);
826 desc.args[1] = tzbuflen;
827 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
828 ret = scm_call2(smc_id, &desc);
829 kzfree(tzbuf);
830 break;
831 }
832 case QSEOS_UPDATE_KEY_USERINFO: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_userinfo_update_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
842 (struct qseecom_key_userinfo_update_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
846 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
850 ret = scm_call2(smc_id, &desc);
851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_TEE_OPEN_SESSION: {
855 struct qseecom_qteec_ireq *req;
856 struct qseecom_qteec_64bit_ireq *req_64bit;
857
858 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
859 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
860 if (qseecom.qsee_version < QSEE_VERSION_40) {
861 req = (struct qseecom_qteec_ireq *)req_buf;
862 desc.args[0] = req->app_id;
863 desc.args[1] = req->req_ptr;
864 desc.args[2] = req->req_len;
865 desc.args[3] = req->resp_ptr;
866 desc.args[4] = req->resp_len;
867 } else {
868 req_64bit = (struct qseecom_qteec_64bit_ireq *)
869 req_buf;
870 desc.args[0] = req_64bit->app_id;
871 desc.args[1] = req_64bit->req_ptr;
872 desc.args[2] = req_64bit->req_len;
873 desc.args[3] = req_64bit->resp_ptr;
874 desc.args[4] = req_64bit->resp_len;
875 }
876 ret = scm_call2(smc_id, &desc);
877 break;
878 }
879 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
880 struct qseecom_qteec_ireq *req;
881 struct qseecom_qteec_64bit_ireq *req_64bit;
882
883 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
884 desc.arginfo =
885 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
886 if (qseecom.qsee_version < QSEE_VERSION_40) {
887 req = (struct qseecom_qteec_ireq *)req_buf;
888 desc.args[0] = req->app_id;
889 desc.args[1] = req->req_ptr;
890 desc.args[2] = req->req_len;
891 desc.args[3] = req->resp_ptr;
892 desc.args[4] = req->resp_len;
893 desc.args[5] = req->sglistinfo_ptr;
894 desc.args[6] = req->sglistinfo_len;
895 } else {
896 req_64bit = (struct qseecom_qteec_64bit_ireq *)
897 req_buf;
898 desc.args[0] = req_64bit->app_id;
899 desc.args[1] = req_64bit->req_ptr;
900 desc.args[2] = req_64bit->req_len;
901 desc.args[3] = req_64bit->resp_ptr;
902 desc.args[4] = req_64bit->resp_len;
903 desc.args[5] = req_64bit->sglistinfo_ptr;
904 desc.args[6] = req_64bit->sglistinfo_len;
905 }
906 ret = scm_call2(smc_id, &desc);
907 break;
908 }
909 case QSEOS_TEE_INVOKE_COMMAND: {
910 struct qseecom_qteec_ireq *req;
911 struct qseecom_qteec_64bit_ireq *req_64bit;
912
913 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
914 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
915 if (qseecom.qsee_version < QSEE_VERSION_40) {
916 req = (struct qseecom_qteec_ireq *)req_buf;
917 desc.args[0] = req->app_id;
918 desc.args[1] = req->req_ptr;
919 desc.args[2] = req->req_len;
920 desc.args[3] = req->resp_ptr;
921 desc.args[4] = req->resp_len;
922 } else {
923 req_64bit = (struct qseecom_qteec_64bit_ireq *)
924 req_buf;
925 desc.args[0] = req_64bit->app_id;
926 desc.args[1] = req_64bit->req_ptr;
927 desc.args[2] = req_64bit->req_len;
928 desc.args[3] = req_64bit->resp_ptr;
929 desc.args[4] = req_64bit->resp_len;
930 }
931 ret = scm_call2(smc_id, &desc);
932 break;
933 }
934 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
935 struct qseecom_qteec_ireq *req;
936 struct qseecom_qteec_64bit_ireq *req_64bit;
937
938 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
939 desc.arginfo =
940 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
941 if (qseecom.qsee_version < QSEE_VERSION_40) {
942 req = (struct qseecom_qteec_ireq *)req_buf;
943 desc.args[0] = req->app_id;
944 desc.args[1] = req->req_ptr;
945 desc.args[2] = req->req_len;
946 desc.args[3] = req->resp_ptr;
947 desc.args[4] = req->resp_len;
948 desc.args[5] = req->sglistinfo_ptr;
949 desc.args[6] = req->sglistinfo_len;
950 } else {
951 req_64bit = (struct qseecom_qteec_64bit_ireq *)
952 req_buf;
953 desc.args[0] = req_64bit->app_id;
954 desc.args[1] = req_64bit->req_ptr;
955 desc.args[2] = req_64bit->req_len;
956 desc.args[3] = req_64bit->resp_ptr;
957 desc.args[4] = req_64bit->resp_len;
958 desc.args[5] = req_64bit->sglistinfo_ptr;
959 desc.args[6] = req_64bit->sglistinfo_len;
960 }
961 ret = scm_call2(smc_id, &desc);
962 break;
963 }
964 case QSEOS_TEE_CLOSE_SESSION: {
965 struct qseecom_qteec_ireq *req;
966 struct qseecom_qteec_64bit_ireq *req_64bit;
967
968 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
969 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
970 if (qseecom.qsee_version < QSEE_VERSION_40) {
971 req = (struct qseecom_qteec_ireq *)req_buf;
972 desc.args[0] = req->app_id;
973 desc.args[1] = req->req_ptr;
974 desc.args[2] = req->req_len;
975 desc.args[3] = req->resp_ptr;
976 desc.args[4] = req->resp_len;
977 } else {
978 req_64bit = (struct qseecom_qteec_64bit_ireq *)
979 req_buf;
980 desc.args[0] = req_64bit->app_id;
981 desc.args[1] = req_64bit->req_ptr;
982 desc.args[2] = req_64bit->req_len;
983 desc.args[3] = req_64bit->resp_ptr;
984 desc.args[4] = req_64bit->resp_len;
985 }
986 ret = scm_call2(smc_id, &desc);
987 break;
988 }
989 case QSEOS_TEE_REQUEST_CANCELLATION: {
990 struct qseecom_qteec_ireq *req;
991 struct qseecom_qteec_64bit_ireq *req_64bit;
992
993 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
994 desc.arginfo =
995 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
996 if (qseecom.qsee_version < QSEE_VERSION_40) {
997 req = (struct qseecom_qteec_ireq *)req_buf;
998 desc.args[0] = req->app_id;
999 desc.args[1] = req->req_ptr;
1000 desc.args[2] = req->req_len;
1001 desc.args[3] = req->resp_ptr;
1002 desc.args[4] = req->resp_len;
1003 } else {
1004 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1005 req_buf;
1006 desc.args[0] = req_64bit->app_id;
1007 desc.args[1] = req_64bit->req_ptr;
1008 desc.args[2] = req_64bit->req_len;
1009 desc.args[3] = req_64bit->resp_ptr;
1010 desc.args[4] = req_64bit->resp_len;
1011 }
1012 ret = scm_call2(smc_id, &desc);
1013 break;
1014 }
1015 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1016 struct qseecom_continue_blocked_request_ireq *req =
1017 (struct qseecom_continue_blocked_request_ireq *)
1018 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001019 if (qseecom.smcinvoke_support)
1020 smc_id =
1021 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1022 else
1023 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001024 desc.arginfo =
1025 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001026 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001027 ret = scm_call2(smc_id, &desc);
1028 break;
1029 }
1030 default: {
1031 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1032 qseos_cmd_id);
1033 ret = -EINVAL;
1034 break;
1035 }
1036 } /*end of switch (qsee_cmd_id) */
1037 break;
1038 } /*end of case SCM_SVC_TZSCHEDULER*/
1039 default: {
1040 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1041 svc_id);
1042 ret = -EINVAL;
1043 break;
1044 }
1045 } /*end of switch svc_id */
1046 scm_resp->result = desc.ret[0];
1047 scm_resp->resp_type = desc.ret[1];
1048 scm_resp->data = desc.ret[2];
1049 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1050 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1051 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1052 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1053 return ret;
1054}
1055
1056
1057static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1058 size_t cmd_len, void *resp_buf, size_t resp_len)
1059{
1060 if (!is_scm_armv8())
1061 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1062 resp_buf, resp_len);
1063 else
1064 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1065}
1066
1067static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1068 struct qseecom_register_listener_req *svc)
1069{
1070 struct qseecom_registered_listener_list *ptr;
1071 int unique = 1;
1072 unsigned long flags;
1073
1074 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1075 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1076 if (ptr->svc.listener_id == svc->listener_id) {
1077 pr_err("Service id: %u is already registered\n",
1078 ptr->svc.listener_id);
1079 unique = 0;
1080 break;
1081 }
1082 }
1083 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1084 return unique;
1085}
1086
1087static struct qseecom_registered_listener_list *__qseecom_find_svc(
1088 int32_t listener_id)
1089{
1090 struct qseecom_registered_listener_list *entry = NULL;
1091 unsigned long flags;
1092
1093 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1094 list_for_each_entry(entry,
1095 &qseecom.registered_listener_list_head, list) {
1096 if (entry->svc.listener_id == listener_id)
1097 break;
1098 }
1099 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1100
1101 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1102 pr_err("Service id: %u is not found\n", listener_id);
1103 return NULL;
1104 }
1105
1106 return entry;
1107}
1108
1109static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1110 struct qseecom_dev_handle *handle,
1111 struct qseecom_register_listener_req *listener)
1112{
1113 int ret = 0;
1114 struct qseecom_register_listener_ireq req;
1115 struct qseecom_register_listener_64bit_ireq req_64bit;
1116 struct qseecom_command_scm_resp resp;
1117 ion_phys_addr_t pa;
1118 void *cmd_buf = NULL;
1119 size_t cmd_len;
1120
1121 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001122 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 listener->ifd_data_fd);
1124 if (IS_ERR_OR_NULL(svc->ihandle)) {
1125 pr_err("Ion client could not retrieve the handle\n");
1126 return -ENOMEM;
1127 }
1128
1129 /* Get the physical address of the ION BUF */
1130 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1131 if (ret) {
1132 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1133 ret);
1134 return ret;
1135 }
1136 /* Populate the structure for sending scm call to load image */
1137 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1138 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1139 pr_err("ION memory mapping for listener shared buffer failed\n");
1140 return -ENOMEM;
1141 }
1142 svc->sb_phys = (phys_addr_t)pa;
1143
1144 if (qseecom.qsee_version < QSEE_VERSION_40) {
1145 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1146 req.listener_id = svc->svc.listener_id;
1147 req.sb_len = svc->sb_length;
1148 req.sb_ptr = (uint32_t)svc->sb_phys;
1149 cmd_buf = (void *)&req;
1150 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1151 } else {
1152 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1153 req_64bit.listener_id = svc->svc.listener_id;
1154 req_64bit.sb_len = svc->sb_length;
1155 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1156 cmd_buf = (void *)&req_64bit;
1157 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1158 }
1159
1160 resp.result = QSEOS_RESULT_INCOMPLETE;
1161
1162 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1163 &resp, sizeof(resp));
1164 if (ret) {
1165 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1166 return -EINVAL;
1167 }
1168
1169 if (resp.result != QSEOS_RESULT_SUCCESS) {
1170 pr_err("Error SB registration req: resp.result = %d\n",
1171 resp.result);
1172 return -EPERM;
1173 }
1174 return 0;
1175}
1176
1177static int qseecom_register_listener(struct qseecom_dev_handle *data,
1178 void __user *argp)
1179{
1180 int ret = 0;
1181 unsigned long flags;
1182 struct qseecom_register_listener_req rcvd_lstnr;
1183 struct qseecom_registered_listener_list *new_entry;
1184
1185 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1186 if (ret) {
1187 pr_err("copy_from_user failed\n");
1188 return ret;
1189 }
1190 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1191 rcvd_lstnr.sb_size))
1192 return -EFAULT;
1193
Zhen Kong3c674612018-09-06 22:51:27 -07001194 data->listener.id = rcvd_lstnr.listener_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001195 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001196 pr_err("Service %d is not unique and failed to register\n",
1197 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001198 data->released = true;
1199 return -EBUSY;
1200 }
1201
1202 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1203 if (!new_entry)
1204 return -ENOMEM;
1205 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1206 new_entry->rcv_req_flag = 0;
1207
1208 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1209 new_entry->sb_length = rcvd_lstnr.sb_size;
1210 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1211 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001212 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1213 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001214 kzfree(new_entry);
1215 return -ENOMEM;
1216 }
1217
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001218 init_waitqueue_head(&new_entry->rcv_req_wq);
1219 init_waitqueue_head(&new_entry->listener_block_app_wq);
1220 new_entry->send_resp_flag = 0;
1221 new_entry->listener_in_use = false;
1222 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1223 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1224 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1225
Zhen Kong3c674612018-09-06 22:51:27 -07001226 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001227 return ret;
1228}
1229
Zhen Kong26e62742018-05-04 17:19:06 -07001230static void __qseecom_listener_abort_all(int abort)
1231{
1232 struct qseecom_registered_listener_list *entry = NULL;
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1236 list_for_each_entry(entry,
1237 &qseecom.registered_listener_list_head, list) {
1238 pr_debug("set abort %d for listener %d\n",
1239 abort, entry->svc.listener_id);
1240 entry->abort = abort;
1241 }
1242 if (abort)
1243 wake_up_interruptible_all(&qseecom.send_resp_wq);
1244 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1245}
1246
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001247static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1248{
1249 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001250 struct qseecom_register_listener_ireq req;
1251 struct qseecom_registered_listener_list *ptr_svc = NULL;
1252 struct qseecom_command_scm_resp resp;
1253 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1254
Zhen Kong3c674612018-09-06 22:51:27 -07001255 ptr_svc = __qseecom_find_svc(data->listener.id);
1256 if (!ptr_svc) {
1257 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1258 return -ENODATA;
1259 }
1260
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1262 req.listener_id = data->listener.id;
1263 resp.result = QSEOS_RESULT_INCOMPLETE;
1264
1265 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1266 sizeof(req), &resp, sizeof(resp));
1267 if (ret) {
1268 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1269 ret, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001270 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272
1273 if (resp.result != QSEOS_RESULT_SUCCESS) {
1274 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1275 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001276 ret = -EPERM;
1277 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278 }
1279
1280 data->abort = 1;
Zhen Kong3c674612018-09-06 22:51:27 -07001281 ptr_svc->abort = 1;
1282 wake_up_all(&ptr_svc->rcv_req_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001283
1284 while (atomic_read(&data->ioctl_count) > 1) {
1285 if (wait_event_freezable(data->abort_wq,
1286 atomic_read(&data->ioctl_count) <= 1)) {
1287 pr_err("Interrupted from abort\n");
1288 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001289 }
1290 }
1291
Zhen Kong3c674612018-09-06 22:51:27 -07001292exit:
1293 if (ptr_svc->sb_virt) {
1294 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (!IS_ERR_OR_NULL(ihandle)) {
1296 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1297 ion_free(qseecom.ion_clnt, ihandle);
1298 }
1299 }
Zhen Kong3c674612018-09-06 22:51:27 -07001300 list_del(&ptr_svc->list);
1301 kzfree(ptr_svc);
1302
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001303 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001304 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001305 return ret;
1306}
1307
1308static int __qseecom_set_msm_bus_request(uint32_t mode)
1309{
1310 int ret = 0;
1311 struct qseecom_clk *qclk;
1312
1313 qclk = &qseecom.qsee;
1314 if (qclk->ce_core_src_clk != NULL) {
1315 if (mode == INACTIVE) {
1316 __qseecom_disable_clk(CLK_QSEE);
1317 } else {
1318 ret = __qseecom_enable_clk(CLK_QSEE);
1319 if (ret)
1320 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1321 ret, mode);
1322 }
1323 }
1324
1325 if ((!ret) && (qseecom.current_mode != mode)) {
1326 ret = msm_bus_scale_client_update_request(
1327 qseecom.qsee_perf_client, mode);
1328 if (ret) {
1329 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1330 ret, mode);
1331 if (qclk->ce_core_src_clk != NULL) {
1332 if (mode == INACTIVE) {
1333 ret = __qseecom_enable_clk(CLK_QSEE);
1334 if (ret)
1335 pr_err("CLK enable failed\n");
1336 } else
1337 __qseecom_disable_clk(CLK_QSEE);
1338 }
1339 }
1340 qseecom.current_mode = mode;
1341 }
1342 return ret;
1343}
1344
1345static void qseecom_bw_inactive_req_work(struct work_struct *work)
1346{
1347 mutex_lock(&app_access_lock);
1348 mutex_lock(&qsee_bw_mutex);
1349 if (qseecom.timer_running)
1350 __qseecom_set_msm_bus_request(INACTIVE);
1351 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1352 qseecom.current_mode, qseecom.cumulative_mode);
1353 qseecom.timer_running = false;
1354 mutex_unlock(&qsee_bw_mutex);
1355 mutex_unlock(&app_access_lock);
1356}
1357
1358static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1359{
1360 schedule_work(&qseecom.bw_inactive_req_ws);
1361}
1362
1363static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1364{
1365 struct qseecom_clk *qclk;
1366 int ret = 0;
1367
1368 mutex_lock(&clk_access_lock);
1369 if (ce == CLK_QSEE)
1370 qclk = &qseecom.qsee;
1371 else
1372 qclk = &qseecom.ce_drv;
1373
1374 if (qclk->clk_access_cnt > 2) {
1375 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1376 ret = -EINVAL;
1377 goto err_dec_ref_cnt;
1378 }
1379 if (qclk->clk_access_cnt == 2)
1380 qclk->clk_access_cnt--;
1381
1382err_dec_ref_cnt:
1383 mutex_unlock(&clk_access_lock);
1384 return ret;
1385}
1386
1387
1388static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1389{
1390 int32_t ret = 0;
1391 int32_t request_mode = INACTIVE;
1392
1393 mutex_lock(&qsee_bw_mutex);
1394 if (mode == 0) {
1395 if (qseecom.cumulative_mode > MEDIUM)
1396 request_mode = HIGH;
1397 else
1398 request_mode = qseecom.cumulative_mode;
1399 } else {
1400 request_mode = mode;
1401 }
1402
1403 ret = __qseecom_set_msm_bus_request(request_mode);
1404 if (ret) {
1405 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1406 ret, request_mode);
1407 goto err_scale_timer;
1408 }
1409
1410 if (qseecom.timer_running) {
1411 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1412 if (ret) {
1413 pr_err("Failed to decrease clk ref count.\n");
1414 goto err_scale_timer;
1415 }
1416 del_timer_sync(&(qseecom.bw_scale_down_timer));
1417 qseecom.timer_running = false;
1418 }
1419err_scale_timer:
1420 mutex_unlock(&qsee_bw_mutex);
1421 return ret;
1422}
1423
1424
1425static int qseecom_unregister_bus_bandwidth_needs(
1426 struct qseecom_dev_handle *data)
1427{
1428 int32_t ret = 0;
1429
1430 qseecom.cumulative_mode -= data->mode;
1431 data->mode = INACTIVE;
1432
1433 return ret;
1434}
1435
1436static int __qseecom_register_bus_bandwidth_needs(
1437 struct qseecom_dev_handle *data, uint32_t request_mode)
1438{
1439 int32_t ret = 0;
1440
1441 if (data->mode == INACTIVE) {
1442 qseecom.cumulative_mode += request_mode;
1443 data->mode = request_mode;
1444 } else {
1445 if (data->mode != request_mode) {
1446 qseecom.cumulative_mode -= data->mode;
1447 qseecom.cumulative_mode += request_mode;
1448 data->mode = request_mode;
1449 }
1450 }
1451 return ret;
1452}
1453
1454static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1455{
1456 int ret = 0;
1457
1458 ret = qsee_vote_for_clock(data, CLK_DFAB);
1459 if (ret) {
1460 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1461 goto perf_enable_exit;
1462 }
1463 ret = qsee_vote_for_clock(data, CLK_SFPB);
1464 if (ret) {
1465 qsee_disable_clock_vote(data, CLK_DFAB);
1466 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1467 goto perf_enable_exit;
1468 }
1469
1470perf_enable_exit:
1471 return ret;
1472}
1473
1474static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1475 void __user *argp)
1476{
1477 int32_t ret = 0;
1478 int32_t req_mode;
1479
1480 if (qseecom.no_clock_support)
1481 return 0;
1482
1483 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1484 if (ret) {
1485 pr_err("copy_from_user failed\n");
1486 return ret;
1487 }
1488 if (req_mode > HIGH) {
1489 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1490 return -EINVAL;
1491 }
1492
1493 /*
1494 * Register bus bandwidth needs if bus scaling feature is enabled;
1495 * otherwise, qseecom enable/disable clocks for the client directly.
1496 */
1497 if (qseecom.support_bus_scaling) {
1498 mutex_lock(&qsee_bw_mutex);
1499 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1500 mutex_unlock(&qsee_bw_mutex);
1501 } else {
1502 pr_debug("Bus scaling feature is NOT enabled\n");
1503 pr_debug("request bandwidth mode %d for the client\n",
1504 req_mode);
1505 if (req_mode != INACTIVE) {
1506 ret = qseecom_perf_enable(data);
1507 if (ret)
1508 pr_err("Failed to vote for clock with err %d\n",
1509 ret);
1510 } else {
1511 qsee_disable_clock_vote(data, CLK_DFAB);
1512 qsee_disable_clock_vote(data, CLK_SFPB);
1513 }
1514 }
1515 return ret;
1516}
1517
1518static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1519{
1520 if (qseecom.no_clock_support)
1521 return;
1522
1523 mutex_lock(&qsee_bw_mutex);
1524 qseecom.bw_scale_down_timer.expires = jiffies +
1525 msecs_to_jiffies(duration);
1526 mod_timer(&(qseecom.bw_scale_down_timer),
1527 qseecom.bw_scale_down_timer.expires);
1528 qseecom.timer_running = true;
1529 mutex_unlock(&qsee_bw_mutex);
1530}
1531
1532static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1533{
1534 if (!qseecom.support_bus_scaling)
1535 qsee_disable_clock_vote(data, CLK_SFPB);
1536 else
1537 __qseecom_add_bw_scale_down_timer(
1538 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1539}
1540
1541static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1542{
1543 int ret = 0;
1544
1545 if (qseecom.support_bus_scaling) {
1546 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1547 if (ret)
1548 pr_err("Failed to set bw MEDIUM.\n");
1549 } else {
1550 ret = qsee_vote_for_clock(data, CLK_SFPB);
1551 if (ret)
1552 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1553 }
1554 return ret;
1555}
1556
1557static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1558 void __user *argp)
1559{
1560 ion_phys_addr_t pa;
1561 int32_t ret;
1562 struct qseecom_set_sb_mem_param_req req;
1563 size_t len;
1564
1565 /* Copy the relevant information needed for loading the image */
1566 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1567 return -EFAULT;
1568
1569 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1570 (req.sb_len == 0)) {
1571 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1572 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1573 return -EFAULT;
1574 }
1575 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1576 req.sb_len))
1577 return -EFAULT;
1578
1579 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001580 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001581 req.ifd_data_fd);
1582 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1583 pr_err("Ion client could not retrieve the handle\n");
1584 return -ENOMEM;
1585 }
1586 /* Get the physical address of the ION BUF */
1587 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1588 if (ret) {
1589
1590 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1591 ret);
1592 return ret;
1593 }
1594
1595 if (len < req.sb_len) {
1596 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1597 req.sb_len, len);
1598 return -EINVAL;
1599 }
1600 /* Populate the structure for sending scm call to load image */
1601 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1602 data->client.ihandle);
1603 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1604 pr_err("ION memory mapping for client shared buf failed\n");
1605 return -ENOMEM;
1606 }
1607 data->client.sb_phys = (phys_addr_t)pa;
1608 data->client.sb_length = req.sb_len;
1609 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1610 return 0;
1611}
1612
Zhen Kong26e62742018-05-04 17:19:06 -07001613static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1614 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001615{
1616 int ret;
1617
1618 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001619 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001620}
1621
1622static int __qseecom_reentrancy_listener_has_sent_rsp(
1623 struct qseecom_dev_handle *data,
1624 struct qseecom_registered_listener_list *ptr_svc)
1625{
1626 int ret;
1627
1628 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001629 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001630}
1631
1632static void __qseecom_clean_listener_sglistinfo(
1633 struct qseecom_registered_listener_list *ptr_svc)
1634{
1635 if (ptr_svc->sglist_cnt) {
1636 memset(ptr_svc->sglistinfo_ptr, 0,
1637 SGLISTINFO_TABLE_SIZE);
1638 ptr_svc->sglist_cnt = 0;
1639 }
1640}
1641
Zhen Kong25731112018-09-20 13:10:03 -07001642/* wake up listener receive request wq retry delay (ms) and max attemp count */
1643#define QSEECOM_WAKE_LISTENER_RCVWQ_DELAY 10
1644#define QSEECOM_WAKE_LISTENER_RCVWQ_MAX_ATTEMP 3
1645
1646static int __qseecom_retry_wake_up_listener_rcv_wq(
1647 struct qseecom_registered_listener_list *ptr_svc)
1648{
1649 int retry = 0;
1650
1651 while (ptr_svc->rcv_req_flag == 1 &&
1652 retry++ < QSEECOM_WAKE_LISTENER_RCVWQ_MAX_ATTEMP) {
1653 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1654 msleep(QSEECOM_WAKE_LISTENER_RCVWQ_DELAY);
1655 }
1656 return ptr_svc->rcv_req_flag == 1;
1657}
1658
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001659static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1660 struct qseecom_command_scm_resp *resp)
1661{
1662 int ret = 0;
1663 int rc = 0;
1664 uint32_t lstnr;
1665 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001666 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1667 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1668 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001669 struct qseecom_registered_listener_list *ptr_svc = NULL;
1670 sigset_t new_sigset;
1671 sigset_t old_sigset;
1672 uint32_t status;
1673 void *cmd_buf = NULL;
1674 size_t cmd_len;
1675 struct sglist_info *table = NULL;
1676
1677 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1678 lstnr = resp->data;
1679 /*
1680 * Wake up blocking lsitener service with the lstnr id
1681 */
1682 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1683 flags);
1684 list_for_each_entry(ptr_svc,
1685 &qseecom.registered_listener_list_head, list) {
1686 if (ptr_svc->svc.listener_id == lstnr) {
1687 ptr_svc->listener_in_use = true;
1688 ptr_svc->rcv_req_flag = 1;
1689 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1690 break;
1691 }
1692 }
1693 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1694 flags);
1695
1696 if (ptr_svc == NULL) {
1697 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001698 rc = -EINVAL;
1699 status = QSEOS_RESULT_FAILURE;
1700 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001701 }
1702
1703 if (!ptr_svc->ihandle) {
1704 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001705 rc = -EINVAL;
1706 status = QSEOS_RESULT_FAILURE;
1707 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001708 }
1709
1710 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001711 pr_err("Service %d does not exist\n",
1712 lstnr);
1713 rc = -ERESTARTSYS;
1714 ptr_svc = NULL;
1715 status = QSEOS_RESULT_FAILURE;
1716 goto err_resp;
1717 }
1718
1719 if (ptr_svc->abort == 1) {
1720 pr_err("Service %d abort %d\n",
1721 lstnr, ptr_svc->abort);
1722 rc = -ENODEV;
1723 status = QSEOS_RESULT_FAILURE;
1724 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001725 }
Zhen Kong25731112018-09-20 13:10:03 -07001726
1727 if (ptr_svc->rcv_req_flag == 1 &&
1728 __qseecom_retry_wake_up_listener_rcv_wq(ptr_svc)) {
1729 pr_err("Service %d is not ready to receive request\n",
1730 lstnr);
1731 rc = -ENOENT;
1732 status = QSEOS_RESULT_FAILURE;
1733 goto err_resp;
1734 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001735 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1736
1737 /* initialize the new signal mask with all signals*/
1738 sigfillset(&new_sigset);
1739 /* block all signals */
1740 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1741
1742 do {
1743 /*
1744 * When reentrancy is not supported, check global
1745 * send_resp_flag; otherwise, check this listener's
1746 * send_resp_flag.
1747 */
1748 if (!qseecom.qsee_reentrancy_support &&
1749 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001750 __qseecom_listener_has_sent_rsp(
1751 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001752 break;
1753 }
1754
1755 if (qseecom.qsee_reentrancy_support &&
1756 !wait_event_freezable(qseecom.send_resp_wq,
1757 __qseecom_reentrancy_listener_has_sent_rsp(
1758 data, ptr_svc))) {
1759 break;
1760 }
1761 } while (1);
1762
1763 /* restore signal mask */
1764 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001765 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001766 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1767 data->client.app_id, lstnr, ret);
1768 rc = -ENODEV;
1769 status = QSEOS_RESULT_FAILURE;
1770 } else {
1771 status = QSEOS_RESULT_SUCCESS;
1772 }
Zhen Kong26e62742018-05-04 17:19:06 -07001773err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001774 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001775 if (ptr_svc) {
1776 ptr_svc->send_resp_flag = 0;
1777 table = ptr_svc->sglistinfo_ptr;
1778 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001782 if (table) {
1783 send_data_rsp.sglistinfo_ptr =
1784 (uint32_t)virt_to_phys(table);
1785 send_data_rsp.sglistinfo_len =
1786 SGLISTINFO_TABLE_SIZE;
1787 dmac_flush_range((void *)table,
1788 (void *)table + SGLISTINFO_TABLE_SIZE);
1789 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001790 cmd_buf = (void *)&send_data_rsp;
1791 cmd_len = sizeof(send_data_rsp);
1792 } else {
1793 send_data_rsp_64bit.listener_id = lstnr;
1794 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001795 if (table) {
1796 send_data_rsp_64bit.sglistinfo_ptr =
1797 virt_to_phys(table);
1798 send_data_rsp_64bit.sglistinfo_len =
1799 SGLISTINFO_TABLE_SIZE;
1800 dmac_flush_range((void *)table,
1801 (void *)table + SGLISTINFO_TABLE_SIZE);
1802 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001803 cmd_buf = (void *)&send_data_rsp_64bit;
1804 cmd_len = sizeof(send_data_rsp_64bit);
1805 }
Zhen Kong7d500032018-08-06 16:58:31 -07001806 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001807 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1808 else
1809 *(uint32_t *)cmd_buf =
1810 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001811 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001812 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1813 ptr_svc->ihandle,
1814 ptr_svc->sb_virt, ptr_svc->sb_length,
1815 ION_IOC_CLEAN_INV_CACHES);
1816 if (ret) {
1817 pr_err("cache operation failed %d\n", ret);
1818 return ret;
1819 }
1820 }
1821
1822 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1823 ret = __qseecom_enable_clk(CLK_QSEE);
1824 if (ret)
1825 return ret;
1826 }
1827
1828 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1829 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001830 if (ptr_svc) {
1831 ptr_svc->listener_in_use = false;
1832 __qseecom_clean_listener_sglistinfo(ptr_svc);
1833 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001834 if (ret) {
1835 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1836 ret, data->client.app_id);
1837 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1838 __qseecom_disable_clk(CLK_QSEE);
1839 return ret;
1840 }
Zhen Kong26e62742018-05-04 17:19:06 -07001841 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1842 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001843 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1844 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1845 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1846 resp->result, data->client.app_id, lstnr);
1847 ret = -EINVAL;
1848 }
1849 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1850 __qseecom_disable_clk(CLK_QSEE);
1851
1852 }
1853 if (rc)
1854 return rc;
1855
1856 return ret;
1857}
1858
Zhen Konga91aaf02018-02-02 17:21:04 -08001859static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001860 struct qseecom_command_scm_resp *resp,
1861 struct qseecom_registered_app_list *ptr_app,
1862 struct qseecom_dev_handle *data)
1863{
1864 struct qseecom_registered_listener_list *list_ptr;
1865 int ret = 0;
1866 struct qseecom_continue_blocked_request_ireq ireq;
1867 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001868 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001869 sigset_t new_sigset;
1870 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001871 unsigned long flags;
1872 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001873
1874 if (!resp || !data) {
1875 pr_err("invalid resp or data pointer\n");
1876 ret = -EINVAL;
1877 goto exit;
1878 }
1879
1880 /* find app_id & img_name from list */
1881 if (!ptr_app) {
1882 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1883 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1884 list) {
1885 if ((ptr_app->app_id == data->client.app_id) &&
1886 (!strcmp(ptr_app->app_name,
1887 data->client.app_name))) {
1888 found_app = true;
1889 break;
1890 }
1891 }
1892 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1893 flags);
1894 if (!found_app) {
1895 pr_err("app_id %d (%s) is not found\n",
1896 data->client.app_id,
1897 (char *)data->client.app_name);
1898 ret = -ENOENT;
1899 goto exit;
1900 }
1901 }
1902
Zhen Kongd8cc0052017-11-13 15:13:31 -08001903 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001904 session_id = resp->resp_type;
1905 list_ptr = __qseecom_find_svc(resp->data);
1906 if (!list_ptr) {
1907 pr_err("Invalid listener ID %d\n", resp->data);
1908 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001909 goto exit;
1910 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001911 ptr_app->blocked_on_listener_id = resp->data;
1912
1913 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1914 resp->data, list_ptr->listener_in_use,
1915 session_id, data->client.app_id);
1916
1917 /* sleep until listener is available */
1918 sigfillset(&new_sigset);
1919 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1920
1921 do {
1922 qseecom.app_block_ref_cnt++;
1923 ptr_app->app_blocked = true;
1924 mutex_unlock(&app_access_lock);
1925 wait_event_freezable(
1926 list_ptr->listener_block_app_wq,
1927 !list_ptr->listener_in_use);
1928 mutex_lock(&app_access_lock);
1929 ptr_app->app_blocked = false;
1930 qseecom.app_block_ref_cnt--;
1931 } while (list_ptr->listener_in_use);
1932
1933 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1934
1935 ptr_app->blocked_on_listener_id = 0;
1936 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1937 resp->data, session_id, data->client.app_id);
1938
1939 /* notify TZ that listener is available */
1940 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1941
1942 if (qseecom.smcinvoke_support)
1943 ireq.app_or_session_id = session_id;
1944 else
1945 ireq.app_or_session_id = data->client.app_id;
1946
1947 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1948 &ireq, sizeof(ireq),
1949 &continue_resp, sizeof(continue_resp));
1950 if (ret && qseecom.smcinvoke_support) {
1951 /* retry with legacy cmd */
1952 qseecom.smcinvoke_support = false;
1953 ireq.app_or_session_id = data->client.app_id;
1954 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1955 &ireq, sizeof(ireq),
1956 &continue_resp, sizeof(continue_resp));
1957 qseecom.smcinvoke_support = true;
1958 if (ret) {
1959 pr_err("unblock app %d or session %d fail\n",
1960 data->client.app_id, session_id);
1961 goto exit;
1962 }
1963 }
1964 resp->result = continue_resp.result;
1965 resp->resp_type = continue_resp.resp_type;
1966 resp->data = continue_resp.data;
1967 pr_debug("unblock resp = %d\n", resp->result);
1968 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1969
1970 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1971 pr_err("Unexpected unblock resp %d\n", resp->result);
1972 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001973 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001974exit:
1975 return ret;
1976}
1977
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001978static int __qseecom_reentrancy_process_incomplete_cmd(
1979 struct qseecom_dev_handle *data,
1980 struct qseecom_command_scm_resp *resp)
1981{
1982 int ret = 0;
1983 int rc = 0;
1984 uint32_t lstnr;
1985 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001986 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1987 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1988 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001989 struct qseecom_registered_listener_list *ptr_svc = NULL;
1990 sigset_t new_sigset;
1991 sigset_t old_sigset;
1992 uint32_t status;
1993 void *cmd_buf = NULL;
1994 size_t cmd_len;
1995 struct sglist_info *table = NULL;
1996
Zhen Kong26e62742018-05-04 17:19:06 -07001997 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001998 lstnr = resp->data;
1999 /*
2000 * Wake up blocking lsitener service with the lstnr id
2001 */
2002 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2003 flags);
2004 list_for_each_entry(ptr_svc,
2005 &qseecom.registered_listener_list_head, list) {
2006 if (ptr_svc->svc.listener_id == lstnr) {
2007 ptr_svc->listener_in_use = true;
2008 ptr_svc->rcv_req_flag = 1;
2009 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2010 break;
2011 }
2012 }
2013 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2014 flags);
2015
2016 if (ptr_svc == NULL) {
2017 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002018 rc = -EINVAL;
2019 status = QSEOS_RESULT_FAILURE;
2020 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002021 }
2022
2023 if (!ptr_svc->ihandle) {
2024 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002025 rc = -EINVAL;
2026 status = QSEOS_RESULT_FAILURE;
2027 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002028 }
2029
2030 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002031 pr_err("Service %d does not exist\n",
2032 lstnr);
2033 rc = -ERESTARTSYS;
2034 ptr_svc = NULL;
2035 status = QSEOS_RESULT_FAILURE;
2036 goto err_resp;
2037 }
2038
2039 if (ptr_svc->abort == 1) {
2040 pr_err("Service %d abort %d\n",
2041 lstnr, ptr_svc->abort);
2042 rc = -ENODEV;
2043 status = QSEOS_RESULT_FAILURE;
2044 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002045 }
Zhen Kong25731112018-09-20 13:10:03 -07002046
2047 if (ptr_svc->rcv_req_flag == 1 &&
2048 __qseecom_retry_wake_up_listener_rcv_wq(ptr_svc)) {
2049 pr_err("Service %d is not ready to receive request\n",
2050 lstnr);
2051 rc = -ENOENT;
2052 status = QSEOS_RESULT_FAILURE;
2053 goto err_resp;
2054 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002055 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2056
2057 /* initialize the new signal mask with all signals*/
2058 sigfillset(&new_sigset);
2059
2060 /* block all signals */
2061 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2062
2063 /* unlock mutex btw waking listener and sleep-wait */
2064 mutex_unlock(&app_access_lock);
2065 do {
2066 if (!wait_event_freezable(qseecom.send_resp_wq,
2067 __qseecom_reentrancy_listener_has_sent_rsp(
2068 data, ptr_svc))) {
2069 break;
2070 }
2071 } while (1);
2072 /* lock mutex again after resp sent */
2073 mutex_lock(&app_access_lock);
2074 ptr_svc->send_resp_flag = 0;
2075 qseecom.send_resp_flag = 0;
2076
2077 /* restore signal mask */
2078 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002079 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002080 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2081 data->client.app_id, lstnr, ret);
2082 rc = -ENODEV;
2083 status = QSEOS_RESULT_FAILURE;
2084 } else {
2085 status = QSEOS_RESULT_SUCCESS;
2086 }
Zhen Kong26e62742018-05-04 17:19:06 -07002087err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002088 if (ptr_svc)
2089 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002090 if (qseecom.qsee_version < QSEE_VERSION_40) {
2091 send_data_rsp.listener_id = lstnr;
2092 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002093 if (table) {
2094 send_data_rsp.sglistinfo_ptr =
2095 (uint32_t)virt_to_phys(table);
2096 send_data_rsp.sglistinfo_len =
2097 SGLISTINFO_TABLE_SIZE;
2098 dmac_flush_range((void *)table,
2099 (void *)table + SGLISTINFO_TABLE_SIZE);
2100 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002101 cmd_buf = (void *)&send_data_rsp;
2102 cmd_len = sizeof(send_data_rsp);
2103 } else {
2104 send_data_rsp_64bit.listener_id = lstnr;
2105 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002106 if (table) {
2107 send_data_rsp_64bit.sglistinfo_ptr =
2108 virt_to_phys(table);
2109 send_data_rsp_64bit.sglistinfo_len =
2110 SGLISTINFO_TABLE_SIZE;
2111 dmac_flush_range((void *)table,
2112 (void *)table + SGLISTINFO_TABLE_SIZE);
2113 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002114 cmd_buf = (void *)&send_data_rsp_64bit;
2115 cmd_len = sizeof(send_data_rsp_64bit);
2116 }
Zhen Kong7d500032018-08-06 16:58:31 -07002117 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002118 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2119 else
2120 *(uint32_t *)cmd_buf =
2121 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002122 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002123 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2124 ptr_svc->ihandle,
2125 ptr_svc->sb_virt, ptr_svc->sb_length,
2126 ION_IOC_CLEAN_INV_CACHES);
2127 if (ret) {
2128 pr_err("cache operation failed %d\n", ret);
2129 return ret;
2130 }
2131 }
2132 if (lstnr == RPMB_SERVICE) {
2133 ret = __qseecom_enable_clk(CLK_QSEE);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2139 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002140 if (ptr_svc) {
2141 ptr_svc->listener_in_use = false;
2142 __qseecom_clean_listener_sglistinfo(ptr_svc);
2143 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2144 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002145
2146 if (ret) {
2147 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2148 ret, data->client.app_id);
2149 goto exit;
2150 }
2151
2152 switch (resp->result) {
2153 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2154 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2155 lstnr, data->client.app_id, resp->data);
2156 if (lstnr == resp->data) {
2157 pr_err("lstnr %d should not be blocked!\n",
2158 lstnr);
2159 ret = -EINVAL;
2160 goto exit;
2161 }
2162 ret = __qseecom_process_reentrancy_blocked_on_listener(
2163 resp, NULL, data);
2164 if (ret) {
2165 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2166 data->client.app_id,
2167 data->client.app_name, resp->data);
2168 goto exit;
2169 }
2170 case QSEOS_RESULT_SUCCESS:
2171 case QSEOS_RESULT_INCOMPLETE:
2172 break;
2173 default:
2174 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2175 resp->result, data->client.app_id, lstnr);
2176 ret = -EINVAL;
2177 goto exit;
2178 }
2179exit:
2180 if (lstnr == RPMB_SERVICE)
2181 __qseecom_disable_clk(CLK_QSEE);
2182
2183 }
2184 if (rc)
2185 return rc;
2186
2187 return ret;
2188}
2189
2190/*
2191 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2192 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2193 * So, needs to first check if no app blocked before sending OS level scm call,
2194 * then wait until all apps are unblocked.
2195 */
2196static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2197{
2198 sigset_t new_sigset, old_sigset;
2199
2200 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2201 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2202 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2203 /* thread sleep until this app unblocked */
2204 while (qseecom.app_block_ref_cnt > 0) {
2205 sigfillset(&new_sigset);
2206 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2207 mutex_unlock(&app_access_lock);
2208 do {
2209 if (!wait_event_freezable(qseecom.app_block_wq,
2210 (qseecom.app_block_ref_cnt == 0)))
2211 break;
2212 } while (1);
2213 mutex_lock(&app_access_lock);
2214 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2215 }
2216 }
2217}
2218
2219/*
2220 * scm_call of send data will fail if this TA is blocked or there are more
2221 * than one TA requesting listener services; So, first check to see if need
2222 * to wait.
2223 */
2224static void __qseecom_reentrancy_check_if_this_app_blocked(
2225 struct qseecom_registered_app_list *ptr_app)
2226{
2227 sigset_t new_sigset, old_sigset;
2228
2229 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002230 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002231 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2232 /* thread sleep until this app unblocked */
2233 sigfillset(&new_sigset);
2234 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2235 mutex_unlock(&app_access_lock);
2236 do {
2237 if (!wait_event_freezable(qseecom.app_block_wq,
2238 (!ptr_app->app_blocked &&
2239 qseecom.app_block_ref_cnt <= 1)))
2240 break;
2241 } while (1);
2242 mutex_lock(&app_access_lock);
2243 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2244 }
Zhen Kongdea10592018-07-30 17:50:10 -07002245 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002246 }
2247}
2248
2249static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2250 uint32_t *app_id)
2251{
2252 int32_t ret;
2253 struct qseecom_command_scm_resp resp;
2254 bool found_app = false;
2255 struct qseecom_registered_app_list *entry = NULL;
2256 unsigned long flags = 0;
2257
2258 if (!app_id) {
2259 pr_err("Null pointer to app_id\n");
2260 return -EINVAL;
2261 }
2262 *app_id = 0;
2263
2264 /* check if app exists and has been registered locally */
2265 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2266 list_for_each_entry(entry,
2267 &qseecom.registered_app_list_head, list) {
2268 if (!strcmp(entry->app_name, req.app_name)) {
2269 found_app = true;
2270 break;
2271 }
2272 }
2273 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2274 if (found_app) {
2275 pr_debug("Found app with id %d\n", entry->app_id);
2276 *app_id = entry->app_id;
2277 return 0;
2278 }
2279
2280 memset((void *)&resp, 0, sizeof(resp));
2281
2282 /* SCM_CALL to check if app_id for the mentioned app exists */
2283 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2284 sizeof(struct qseecom_check_app_ireq),
2285 &resp, sizeof(resp));
2286 if (ret) {
2287 pr_err("scm_call to check if app is already loaded failed\n");
2288 return -EINVAL;
2289 }
2290
2291 if (resp.result == QSEOS_RESULT_FAILURE)
2292 return 0;
2293
2294 switch (resp.resp_type) {
2295 /*qsee returned listener type response */
2296 case QSEOS_LISTENER_ID:
2297 pr_err("resp type is of listener type instead of app");
2298 return -EINVAL;
2299 case QSEOS_APP_ID:
2300 *app_id = resp.data;
2301 return 0;
2302 default:
2303 pr_err("invalid resp type (%d) from qsee",
2304 resp.resp_type);
2305 return -ENODEV;
2306 }
2307}
2308
2309static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2310{
2311 struct qseecom_registered_app_list *entry = NULL;
2312 unsigned long flags = 0;
2313 u32 app_id = 0;
2314 struct ion_handle *ihandle; /* Ion handle */
2315 struct qseecom_load_img_req load_img_req;
2316 int32_t ret = 0;
2317 ion_phys_addr_t pa = 0;
2318 size_t len;
2319 struct qseecom_command_scm_resp resp;
2320 struct qseecom_check_app_ireq req;
2321 struct qseecom_load_app_ireq load_req;
2322 struct qseecom_load_app_64bit_ireq load_req_64bit;
2323 void *cmd_buf = NULL;
2324 size_t cmd_len;
2325 bool first_time = false;
2326
2327 /* Copy the relevant information needed for loading the image */
2328 if (copy_from_user(&load_img_req,
2329 (void __user *)argp,
2330 sizeof(struct qseecom_load_img_req))) {
2331 pr_err("copy_from_user failed\n");
2332 return -EFAULT;
2333 }
2334
2335 /* Check and load cmnlib */
2336 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2337 if (!qseecom.commonlib_loaded &&
2338 load_img_req.app_arch == ELFCLASS32) {
2339 ret = qseecom_load_commonlib_image(data, "cmnlib");
2340 if (ret) {
2341 pr_err("failed to load cmnlib\n");
2342 return -EIO;
2343 }
2344 qseecom.commonlib_loaded = true;
2345 pr_debug("cmnlib is loaded\n");
2346 }
2347
2348 if (!qseecom.commonlib64_loaded &&
2349 load_img_req.app_arch == ELFCLASS64) {
2350 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2351 if (ret) {
2352 pr_err("failed to load cmnlib64\n");
2353 return -EIO;
2354 }
2355 qseecom.commonlib64_loaded = true;
2356 pr_debug("cmnlib64 is loaded\n");
2357 }
2358 }
2359
2360 if (qseecom.support_bus_scaling) {
2361 mutex_lock(&qsee_bw_mutex);
2362 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2363 mutex_unlock(&qsee_bw_mutex);
2364 if (ret)
2365 return ret;
2366 }
2367
2368 /* Vote for the SFPB clock */
2369 ret = __qseecom_enable_clk_scale_up(data);
2370 if (ret)
2371 goto enable_clk_err;
2372
2373 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2374 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2375 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2376
2377 ret = __qseecom_check_app_exists(req, &app_id);
2378 if (ret < 0)
2379 goto loadapp_err;
2380
2381 if (app_id) {
2382 pr_debug("App id %d (%s) already exists\n", app_id,
2383 (char *)(req.app_name));
2384 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2385 list_for_each_entry(entry,
2386 &qseecom.registered_app_list_head, list){
2387 if (entry->app_id == app_id) {
2388 entry->ref_cnt++;
2389 break;
2390 }
2391 }
2392 spin_unlock_irqrestore(
2393 &qseecom.registered_app_list_lock, flags);
2394 ret = 0;
2395 } else {
2396 first_time = true;
2397 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2398 (char *)(load_img_req.img_name));
2399 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002400 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002401 load_img_req.ifd_data_fd);
2402 if (IS_ERR_OR_NULL(ihandle)) {
2403 pr_err("Ion client could not retrieve the handle\n");
2404 ret = -ENOMEM;
2405 goto loadapp_err;
2406 }
2407
2408 /* Get the physical address of the ION BUF */
2409 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2410 if (ret) {
2411 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2412 ret);
2413 goto loadapp_err;
2414 }
2415 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2416 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2417 len, load_img_req.mdt_len,
2418 load_img_req.img_len);
2419 ret = -EINVAL;
2420 goto loadapp_err;
2421 }
2422 /* Populate the structure for sending scm call to load image */
2423 if (qseecom.qsee_version < QSEE_VERSION_40) {
2424 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2425 load_req.mdt_len = load_img_req.mdt_len;
2426 load_req.img_len = load_img_req.img_len;
2427 strlcpy(load_req.app_name, load_img_req.img_name,
2428 MAX_APP_NAME_SIZE);
2429 load_req.phy_addr = (uint32_t)pa;
2430 cmd_buf = (void *)&load_req;
2431 cmd_len = sizeof(struct qseecom_load_app_ireq);
2432 } else {
2433 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2434 load_req_64bit.mdt_len = load_img_req.mdt_len;
2435 load_req_64bit.img_len = load_img_req.img_len;
2436 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2437 MAX_APP_NAME_SIZE);
2438 load_req_64bit.phy_addr = (uint64_t)pa;
2439 cmd_buf = (void *)&load_req_64bit;
2440 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2441 }
2442
2443 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2444 ION_IOC_CLEAN_INV_CACHES);
2445 if (ret) {
2446 pr_err("cache operation failed %d\n", ret);
2447 goto loadapp_err;
2448 }
2449
2450 /* SCM_CALL to load the app and get the app_id back */
2451 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2452 cmd_len, &resp, sizeof(resp));
2453 if (ret) {
2454 pr_err("scm_call to load app failed\n");
2455 if (!IS_ERR_OR_NULL(ihandle))
2456 ion_free(qseecom.ion_clnt, ihandle);
2457 ret = -EINVAL;
2458 goto loadapp_err;
2459 }
2460
2461 if (resp.result == QSEOS_RESULT_FAILURE) {
2462 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2463 if (!IS_ERR_OR_NULL(ihandle))
2464 ion_free(qseecom.ion_clnt, ihandle);
2465 ret = -EFAULT;
2466 goto loadapp_err;
2467 }
2468
2469 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2470 ret = __qseecom_process_incomplete_cmd(data, &resp);
2471 if (ret) {
2472 pr_err("process_incomplete_cmd failed err: %d\n",
2473 ret);
2474 if (!IS_ERR_OR_NULL(ihandle))
2475 ion_free(qseecom.ion_clnt, ihandle);
2476 ret = -EFAULT;
2477 goto loadapp_err;
2478 }
2479 }
2480
2481 if (resp.result != QSEOS_RESULT_SUCCESS) {
2482 pr_err("scm_call failed resp.result unknown, %d\n",
2483 resp.result);
2484 if (!IS_ERR_OR_NULL(ihandle))
2485 ion_free(qseecom.ion_clnt, ihandle);
2486 ret = -EFAULT;
2487 goto loadapp_err;
2488 }
2489
2490 app_id = resp.data;
2491
2492 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2493 if (!entry) {
2494 ret = -ENOMEM;
2495 goto loadapp_err;
2496 }
2497 entry->app_id = app_id;
2498 entry->ref_cnt = 1;
2499 entry->app_arch = load_img_req.app_arch;
2500 /*
2501 * keymaster app may be first loaded as "keymaste" by qseecomd,
2502 * and then used as "keymaster" on some targets. To avoid app
2503 * name checking error, register "keymaster" into app_list and
2504 * thread private data.
2505 */
2506 if (!strcmp(load_img_req.img_name, "keymaste"))
2507 strlcpy(entry->app_name, "keymaster",
2508 MAX_APP_NAME_SIZE);
2509 else
2510 strlcpy(entry->app_name, load_img_req.img_name,
2511 MAX_APP_NAME_SIZE);
2512 entry->app_blocked = false;
2513 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002514 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002515
2516 /* Deallocate the handle */
2517 if (!IS_ERR_OR_NULL(ihandle))
2518 ion_free(qseecom.ion_clnt, ihandle);
2519
2520 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2521 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2522 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2523 flags);
2524
2525 pr_warn("App with id %u (%s) now loaded\n", app_id,
2526 (char *)(load_img_req.img_name));
2527 }
2528 data->client.app_id = app_id;
2529 data->client.app_arch = load_img_req.app_arch;
2530 if (!strcmp(load_img_req.img_name, "keymaste"))
2531 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2532 else
2533 strlcpy(data->client.app_name, load_img_req.img_name,
2534 MAX_APP_NAME_SIZE);
2535 load_img_req.app_id = app_id;
2536 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2537 pr_err("copy_to_user failed\n");
2538 ret = -EFAULT;
2539 if (first_time == true) {
2540 spin_lock_irqsave(
2541 &qseecom.registered_app_list_lock, flags);
2542 list_del(&entry->list);
2543 spin_unlock_irqrestore(
2544 &qseecom.registered_app_list_lock, flags);
2545 kzfree(entry);
2546 }
2547 }
2548
2549loadapp_err:
2550 __qseecom_disable_clk_scale_down(data);
2551enable_clk_err:
2552 if (qseecom.support_bus_scaling) {
2553 mutex_lock(&qsee_bw_mutex);
2554 qseecom_unregister_bus_bandwidth_needs(data);
2555 mutex_unlock(&qsee_bw_mutex);
2556 }
2557 return ret;
2558}
2559
2560static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2561{
2562 int ret = 1; /* Set unload app */
2563
2564 wake_up_all(&qseecom.send_resp_wq);
2565 if (qseecom.qsee_reentrancy_support)
2566 mutex_unlock(&app_access_lock);
2567 while (atomic_read(&data->ioctl_count) > 1) {
2568 if (wait_event_freezable(data->abort_wq,
2569 atomic_read(&data->ioctl_count) <= 1)) {
2570 pr_err("Interrupted from abort\n");
2571 ret = -ERESTARTSYS;
2572 break;
2573 }
2574 }
2575 if (qseecom.qsee_reentrancy_support)
2576 mutex_lock(&app_access_lock);
2577 return ret;
2578}
2579
2580static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2581{
2582 int ret = 0;
2583
2584 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2585 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2586 ion_free(qseecom.ion_clnt, data->client.ihandle);
2587 data->client.ihandle = NULL;
2588 }
2589 return ret;
2590}
2591
2592static int qseecom_unload_app(struct qseecom_dev_handle *data,
2593 bool app_crash)
2594{
2595 unsigned long flags;
2596 unsigned long flags1;
2597 int ret = 0;
2598 struct qseecom_command_scm_resp resp;
2599 struct qseecom_registered_app_list *ptr_app = NULL;
2600 bool unload = false;
2601 bool found_app = false;
2602 bool found_dead_app = false;
2603
2604 if (!data) {
2605 pr_err("Invalid/uninitialized device handle\n");
2606 return -EINVAL;
2607 }
2608
2609 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2610 pr_debug("Do not unload keymaster app from tz\n");
2611 goto unload_exit;
2612 }
2613
2614 __qseecom_cleanup_app(data);
2615 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2616
2617 if (data->client.app_id > 0) {
2618 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2619 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2620 list) {
2621 if (ptr_app->app_id == data->client.app_id) {
2622 if (!strcmp((void *)ptr_app->app_name,
2623 (void *)data->client.app_name)) {
2624 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002625 if (ptr_app->app_blocked ||
2626 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002627 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002628 if (app_crash || ptr_app->ref_cnt == 1)
2629 unload = true;
2630 break;
2631 }
2632 found_dead_app = true;
2633 break;
2634 }
2635 }
2636 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2637 flags);
2638 if (found_app == false && found_dead_app == false) {
2639 pr_err("Cannot find app with id = %d (%s)\n",
2640 data->client.app_id,
2641 (char *)data->client.app_name);
2642 ret = -EINVAL;
2643 goto unload_exit;
2644 }
2645 }
2646
2647 if (found_dead_app)
2648 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2649 (char *)data->client.app_name);
2650
2651 if (unload) {
2652 struct qseecom_unload_app_ireq req;
2653 /* Populate the structure for sending scm call to load image */
2654 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2655 req.app_id = data->client.app_id;
2656
2657 /* SCM_CALL to unload the app */
2658 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2659 sizeof(struct qseecom_unload_app_ireq),
2660 &resp, sizeof(resp));
2661 if (ret) {
2662 pr_err("scm_call to unload app (id = %d) failed\n",
2663 req.app_id);
2664 ret = -EFAULT;
2665 goto unload_exit;
2666 } else {
2667 pr_warn("App id %d now unloaded\n", req.app_id);
2668 }
2669 if (resp.result == QSEOS_RESULT_FAILURE) {
2670 pr_err("app (%d) unload_failed!!\n",
2671 data->client.app_id);
2672 ret = -EFAULT;
2673 goto unload_exit;
2674 }
2675 if (resp.result == QSEOS_RESULT_SUCCESS)
2676 pr_debug("App (%d) is unloaded!!\n",
2677 data->client.app_id);
2678 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2679 ret = __qseecom_process_incomplete_cmd(data, &resp);
2680 if (ret) {
2681 pr_err("process_incomplete_cmd fail err: %d\n",
2682 ret);
2683 goto unload_exit;
2684 }
2685 }
2686 }
2687
Zhen Kong7d500032018-08-06 16:58:31 -07002688unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002689 if (found_app) {
2690 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2691 if (app_crash) {
2692 ptr_app->ref_cnt = 0;
2693 pr_debug("app_crash: ref_count = 0\n");
2694 } else {
2695 if (ptr_app->ref_cnt == 1) {
2696 ptr_app->ref_cnt = 0;
2697 pr_debug("ref_count set to 0\n");
2698 } else {
2699 ptr_app->ref_cnt--;
2700 pr_debug("Can't unload app(%d) inuse\n",
2701 ptr_app->app_id);
2702 }
2703 }
2704 if (unload) {
2705 list_del(&ptr_app->list);
2706 kzfree(ptr_app);
2707 }
2708 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2709 flags1);
2710 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002711 qseecom_unmap_ion_allocated_memory(data);
2712 data->released = true;
2713 return ret;
2714}
2715
2716static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2717 unsigned long virt)
2718{
2719 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2720}
2721
2722static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2723 unsigned long virt)
2724{
2725 return (uintptr_t)data->client.sb_virt +
2726 (virt - data->client.user_virt_sb_base);
2727}
2728
2729int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2730 struct qseecom_send_svc_cmd_req *req_ptr,
2731 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2732{
2733 int ret = 0;
2734 void *req_buf = NULL;
2735
2736 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2737 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2738 req_ptr, send_svc_ireq_ptr);
2739 return -EINVAL;
2740 }
2741
2742 /* Clients need to ensure req_buf is at base offset of shared buffer */
2743 if ((uintptr_t)req_ptr->cmd_req_buf !=
2744 data_ptr->client.user_virt_sb_base) {
2745 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2746 return -EINVAL;
2747 }
2748
2749 if (data_ptr->client.sb_length <
2750 sizeof(struct qseecom_rpmb_provision_key)) {
2751 pr_err("shared buffer is too small to hold key type\n");
2752 return -EINVAL;
2753 }
2754 req_buf = data_ptr->client.sb_virt;
2755
2756 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2757 send_svc_ireq_ptr->key_type =
2758 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2759 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2760 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2761 data_ptr, (uintptr_t)req_ptr->resp_buf));
2762 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2763
2764 return ret;
2765}
2766
2767int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2768 struct qseecom_send_svc_cmd_req *req_ptr,
2769 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2770{
2771 int ret = 0;
2772 uint32_t reqd_len_sb_in = 0;
2773
2774 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2775 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2776 req_ptr, send_svc_ireq_ptr);
2777 return -EINVAL;
2778 }
2779
2780 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2781 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2782 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2783 pr_err("Required: %u, Available: %zu\n",
2784 reqd_len_sb_in, data_ptr->client.sb_length);
2785 return -ENOMEM;
2786 }
2787
2788 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2789 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2790 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2791 data_ptr, (uintptr_t)req_ptr->resp_buf));
2792 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2793
2794 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2795 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2796
2797
2798 return ret;
2799}
2800
2801static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2802 struct qseecom_send_svc_cmd_req *req)
2803{
2804 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2805 pr_err("req or cmd buffer or response buffer is null\n");
2806 return -EINVAL;
2807 }
2808
2809 if (!data || !data->client.ihandle) {
2810 pr_err("Client or client handle is not initialized\n");
2811 return -EINVAL;
2812 }
2813
2814 if (data->client.sb_virt == NULL) {
2815 pr_err("sb_virt null\n");
2816 return -EINVAL;
2817 }
2818
2819 if (data->client.user_virt_sb_base == 0) {
2820 pr_err("user_virt_sb_base is null\n");
2821 return -EINVAL;
2822 }
2823
2824 if (data->client.sb_length == 0) {
2825 pr_err("sb_length is 0\n");
2826 return -EINVAL;
2827 }
2828
2829 if (((uintptr_t)req->cmd_req_buf <
2830 data->client.user_virt_sb_base) ||
2831 ((uintptr_t)req->cmd_req_buf >=
2832 (data->client.user_virt_sb_base + data->client.sb_length))) {
2833 pr_err("cmd buffer address not within shared bufffer\n");
2834 return -EINVAL;
2835 }
2836 if (((uintptr_t)req->resp_buf <
2837 data->client.user_virt_sb_base) ||
2838 ((uintptr_t)req->resp_buf >=
2839 (data->client.user_virt_sb_base + data->client.sb_length))) {
2840 pr_err("response buffer address not within shared bufffer\n");
2841 return -EINVAL;
2842 }
2843 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2844 (req->cmd_req_len > data->client.sb_length) ||
2845 (req->resp_len > data->client.sb_length)) {
2846 pr_err("cmd buf length or response buf length not valid\n");
2847 return -EINVAL;
2848 }
2849 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2850 pr_err("Integer overflow detected in req_len & rsp_len\n");
2851 return -EINVAL;
2852 }
2853
2854 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2855 pr_debug("Not enough memory to fit cmd_buf.\n");
2856 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2857 (req->cmd_req_len + req->resp_len),
2858 data->client.sb_length);
2859 return -ENOMEM;
2860 }
2861 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2862 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2863 return -EINVAL;
2864 }
2865 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2866 pr_err("Integer overflow in resp_len & resp_buf\n");
2867 return -EINVAL;
2868 }
2869 if (data->client.user_virt_sb_base >
2870 (ULONG_MAX - data->client.sb_length)) {
2871 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2872 return -EINVAL;
2873 }
2874 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2875 ((uintptr_t)data->client.user_virt_sb_base +
2876 data->client.sb_length)) ||
2877 (((uintptr_t)req->resp_buf + req->resp_len) >
2878 ((uintptr_t)data->client.user_virt_sb_base +
2879 data->client.sb_length))) {
2880 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2881 return -EINVAL;
2882 }
2883 return 0;
2884}
2885
2886static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2887 void __user *argp)
2888{
2889 int ret = 0;
2890 struct qseecom_client_send_service_ireq send_svc_ireq;
2891 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2892 struct qseecom_command_scm_resp resp;
2893 struct qseecom_send_svc_cmd_req req;
2894 void *send_req_ptr;
2895 size_t req_buf_size;
2896
2897 /*struct qseecom_command_scm_resp resp;*/
2898
2899 if (copy_from_user(&req,
2900 (void __user *)argp,
2901 sizeof(req))) {
2902 pr_err("copy_from_user failed\n");
2903 return -EFAULT;
2904 }
2905
2906 if (__validate_send_service_cmd_inputs(data, &req))
2907 return -EINVAL;
2908
2909 data->type = QSEECOM_SECURE_SERVICE;
2910
2911 switch (req.cmd_id) {
2912 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2913 case QSEOS_RPMB_ERASE_COMMAND:
2914 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2915 send_req_ptr = &send_svc_ireq;
2916 req_buf_size = sizeof(send_svc_ireq);
2917 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2918 send_req_ptr))
2919 return -EINVAL;
2920 break;
2921 case QSEOS_FSM_LTEOTA_REQ_CMD:
2922 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2923 case QSEOS_FSM_IKE_REQ_CMD:
2924 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2925 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2926 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2927 case QSEOS_FSM_ENCFS_REQ_CMD:
2928 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2929 send_req_ptr = &send_fsm_key_svc_ireq;
2930 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2931 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2932 send_req_ptr))
2933 return -EINVAL;
2934 break;
2935 default:
2936 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2937 return -EINVAL;
2938 }
2939
2940 if (qseecom.support_bus_scaling) {
2941 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2942 if (ret) {
2943 pr_err("Fail to set bw HIGH\n");
2944 return ret;
2945 }
2946 } else {
2947 ret = qseecom_perf_enable(data);
2948 if (ret) {
2949 pr_err("Failed to vote for clocks with err %d\n", ret);
2950 goto exit;
2951 }
2952 }
2953
2954 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2955 data->client.sb_virt, data->client.sb_length,
2956 ION_IOC_CLEAN_INV_CACHES);
2957 if (ret) {
2958 pr_err("cache operation failed %d\n", ret);
2959 goto exit;
2960 }
2961 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2962 (const void *)send_req_ptr,
2963 req_buf_size, &resp, sizeof(resp));
2964 if (ret) {
2965 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2966 if (!qseecom.support_bus_scaling) {
2967 qsee_disable_clock_vote(data, CLK_DFAB);
2968 qsee_disable_clock_vote(data, CLK_SFPB);
2969 } else {
2970 __qseecom_add_bw_scale_down_timer(
2971 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2972 }
2973 goto exit;
2974 }
2975 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2976 data->client.sb_virt, data->client.sb_length,
2977 ION_IOC_INV_CACHES);
2978 if (ret) {
2979 pr_err("cache operation failed %d\n", ret);
2980 goto exit;
2981 }
2982 switch (resp.result) {
2983 case QSEOS_RESULT_SUCCESS:
2984 break;
2985 case QSEOS_RESULT_INCOMPLETE:
2986 pr_debug("qseos_result_incomplete\n");
2987 ret = __qseecom_process_incomplete_cmd(data, &resp);
2988 if (ret) {
2989 pr_err("process_incomplete_cmd fail with result: %d\n",
2990 resp.result);
2991 }
2992 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2993 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302994 if (put_user(resp.result,
2995 (uint32_t __user *)req.resp_buf)) {
2996 ret = -EINVAL;
2997 goto exit;
2998 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002999 ret = 0;
3000 }
3001 break;
3002 case QSEOS_RESULT_FAILURE:
3003 pr_err("scm call failed with resp.result: %d\n", resp.result);
3004 ret = -EINVAL;
3005 break;
3006 default:
3007 pr_err("Response result %d not supported\n",
3008 resp.result);
3009 ret = -EINVAL;
3010 break;
3011 }
3012 if (!qseecom.support_bus_scaling) {
3013 qsee_disable_clock_vote(data, CLK_DFAB);
3014 qsee_disable_clock_vote(data, CLK_SFPB);
3015 } else {
3016 __qseecom_add_bw_scale_down_timer(
3017 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3018 }
3019
3020exit:
3021 return ret;
3022}
3023
3024static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3025 struct qseecom_send_cmd_req *req)
3026
3027{
3028 if (!data || !data->client.ihandle) {
3029 pr_err("Client or client handle is not initialized\n");
3030 return -EINVAL;
3031 }
3032 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3033 (req->cmd_req_buf == NULL)) {
3034 pr_err("cmd buffer or response buffer is null\n");
3035 return -EINVAL;
3036 }
3037 if (((uintptr_t)req->cmd_req_buf <
3038 data->client.user_virt_sb_base) ||
3039 ((uintptr_t)req->cmd_req_buf >=
3040 (data->client.user_virt_sb_base + data->client.sb_length))) {
3041 pr_err("cmd buffer address not within shared bufffer\n");
3042 return -EINVAL;
3043 }
3044 if (((uintptr_t)req->resp_buf <
3045 data->client.user_virt_sb_base) ||
3046 ((uintptr_t)req->resp_buf >=
3047 (data->client.user_virt_sb_base + data->client.sb_length))) {
3048 pr_err("response buffer address not within shared bufffer\n");
3049 return -EINVAL;
3050 }
3051 if ((req->cmd_req_len == 0) ||
3052 (req->cmd_req_len > data->client.sb_length) ||
3053 (req->resp_len > data->client.sb_length)) {
3054 pr_err("cmd buf length or response buf length not valid\n");
3055 return -EINVAL;
3056 }
3057 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3058 pr_err("Integer overflow detected in req_len & rsp_len\n");
3059 return -EINVAL;
3060 }
3061
3062 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3063 pr_debug("Not enough memory to fit cmd_buf.\n");
3064 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3065 (req->cmd_req_len + req->resp_len),
3066 data->client.sb_length);
3067 return -ENOMEM;
3068 }
3069 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3070 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3071 return -EINVAL;
3072 }
3073 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3074 pr_err("Integer overflow in resp_len & resp_buf\n");
3075 return -EINVAL;
3076 }
3077 if (data->client.user_virt_sb_base >
3078 (ULONG_MAX - data->client.sb_length)) {
3079 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3080 return -EINVAL;
3081 }
3082 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3083 ((uintptr_t)data->client.user_virt_sb_base +
3084 data->client.sb_length)) ||
3085 (((uintptr_t)req->resp_buf + req->resp_len) >
3086 ((uintptr_t)data->client.user_virt_sb_base +
3087 data->client.sb_length))) {
3088 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3089 return -EINVAL;
3090 }
3091 return 0;
3092}
3093
3094int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3095 struct qseecom_registered_app_list *ptr_app,
3096 struct qseecom_dev_handle *data)
3097{
3098 int ret = 0;
3099
3100 switch (resp->result) {
3101 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3102 pr_warn("App(%d) %s is blocked on listener %d\n",
3103 data->client.app_id, data->client.app_name,
3104 resp->data);
3105 ret = __qseecom_process_reentrancy_blocked_on_listener(
3106 resp, ptr_app, data);
3107 if (ret) {
3108 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3109 data->client.app_id, data->client.app_name, resp->data);
3110 return ret;
3111 }
3112
3113 case QSEOS_RESULT_INCOMPLETE:
3114 qseecom.app_block_ref_cnt++;
3115 ptr_app->app_blocked = true;
3116 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3117 ptr_app->app_blocked = false;
3118 qseecom.app_block_ref_cnt--;
3119 wake_up_interruptible(&qseecom.app_block_wq);
3120 if (ret)
3121 pr_err("process_incomplete_cmd failed err: %d\n",
3122 ret);
3123 return ret;
3124 case QSEOS_RESULT_SUCCESS:
3125 return ret;
3126 default:
3127 pr_err("Response result %d not supported\n",
3128 resp->result);
3129 return -EINVAL;
3130 }
3131}
3132
3133static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3134 struct qseecom_send_cmd_req *req)
3135{
3136 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003137 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003138 u32 reqd_len_sb_in = 0;
3139 struct qseecom_client_send_data_ireq send_data_req = {0};
3140 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3141 struct qseecom_command_scm_resp resp;
3142 unsigned long flags;
3143 struct qseecom_registered_app_list *ptr_app;
3144 bool found_app = false;
3145 void *cmd_buf = NULL;
3146 size_t cmd_len;
3147 struct sglist_info *table = data->sglistinfo_ptr;
3148
3149 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3150 /* find app_id & img_name from list */
3151 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3152 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3153 list) {
3154 if ((ptr_app->app_id == data->client.app_id) &&
3155 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3156 found_app = true;
3157 break;
3158 }
3159 }
3160 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3161
3162 if (!found_app) {
3163 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3164 (char *)data->client.app_name);
3165 return -ENOENT;
3166 }
3167
3168 if (qseecom.qsee_version < QSEE_VERSION_40) {
3169 send_data_req.app_id = data->client.app_id;
3170 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3171 data, (uintptr_t)req->cmd_req_buf));
3172 send_data_req.req_len = req->cmd_req_len;
3173 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3174 data, (uintptr_t)req->resp_buf));
3175 send_data_req.rsp_len = req->resp_len;
3176 send_data_req.sglistinfo_ptr =
3177 (uint32_t)virt_to_phys(table);
3178 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3179 dmac_flush_range((void *)table,
3180 (void *)table + SGLISTINFO_TABLE_SIZE);
3181 cmd_buf = (void *)&send_data_req;
3182 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3183 } else {
3184 send_data_req_64bit.app_id = data->client.app_id;
3185 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3186 (uintptr_t)req->cmd_req_buf);
3187 send_data_req_64bit.req_len = req->cmd_req_len;
3188 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3189 (uintptr_t)req->resp_buf);
3190 send_data_req_64bit.rsp_len = req->resp_len;
3191 /* check if 32bit app's phys_addr region is under 4GB.*/
3192 if ((data->client.app_arch == ELFCLASS32) &&
3193 ((send_data_req_64bit.req_ptr >=
3194 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3195 (send_data_req_64bit.rsp_ptr >=
3196 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3197 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3198 data->client.app_name,
3199 send_data_req_64bit.req_ptr,
3200 send_data_req_64bit.req_len,
3201 send_data_req_64bit.rsp_ptr,
3202 send_data_req_64bit.rsp_len);
3203 return -EFAULT;
3204 }
3205 send_data_req_64bit.sglistinfo_ptr =
3206 (uint64_t)virt_to_phys(table);
3207 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3208 dmac_flush_range((void *)table,
3209 (void *)table + SGLISTINFO_TABLE_SIZE);
3210 cmd_buf = (void *)&send_data_req_64bit;
3211 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3212 }
3213
3214 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3215 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3216 else
3217 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3218
3219 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3220 data->client.sb_virt,
3221 reqd_len_sb_in,
3222 ION_IOC_CLEAN_INV_CACHES);
3223 if (ret) {
3224 pr_err("cache operation failed %d\n", ret);
3225 return ret;
3226 }
3227
3228 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3229
3230 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3231 cmd_buf, cmd_len,
3232 &resp, sizeof(resp));
3233 if (ret) {
3234 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3235 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003236 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003237 }
3238
3239 if (qseecom.qsee_reentrancy_support) {
3240 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003241 if (ret)
3242 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003243 } else {
3244 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3245 ret = __qseecom_process_incomplete_cmd(data, &resp);
3246 if (ret) {
3247 pr_err("process_incomplete_cmd failed err: %d\n",
3248 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003249 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003250 }
3251 } else {
3252 if (resp.result != QSEOS_RESULT_SUCCESS) {
3253 pr_err("Response result %d not supported\n",
3254 resp.result);
3255 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003256 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003257 }
3258 }
3259 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003260exit:
3261 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003262 data->client.sb_virt, data->client.sb_length,
3263 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003264 if (ret2) {
3265 pr_err("cache operation failed %d\n", ret2);
3266 return ret2;
3267 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 return ret;
3269}
3270
3271static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3272{
3273 int ret = 0;
3274 struct qseecom_send_cmd_req req;
3275
3276 ret = copy_from_user(&req, argp, sizeof(req));
3277 if (ret) {
3278 pr_err("copy_from_user failed\n");
3279 return ret;
3280 }
3281
3282 if (__validate_send_cmd_inputs(data, &req))
3283 return -EINVAL;
3284
3285 ret = __qseecom_send_cmd(data, &req);
3286
3287 if (ret)
3288 return ret;
3289
3290 return ret;
3291}
3292
3293int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3294 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3295 struct qseecom_dev_handle *data, int i) {
3296
3297 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3298 (req->ifd_data[i].fd > 0)) {
3299 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3300 (req->ifd_data[i].cmd_buf_offset >
3301 req->cmd_req_len - sizeof(uint32_t))) {
3302 pr_err("Invalid offset (req len) 0x%x\n",
3303 req->ifd_data[i].cmd_buf_offset);
3304 return -EINVAL;
3305 }
3306 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3307 (lstnr_resp->ifd_data[i].fd > 0)) {
3308 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3309 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3310 lstnr_resp->resp_len - sizeof(uint32_t))) {
3311 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3312 lstnr_resp->ifd_data[i].cmd_buf_offset);
3313 return -EINVAL;
3314 }
3315 }
3316 return 0;
3317}
3318
3319static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3320 struct qseecom_dev_handle *data)
3321{
3322 struct ion_handle *ihandle;
3323 char *field;
3324 int ret = 0;
3325 int i = 0;
3326 uint32_t len = 0;
3327 struct scatterlist *sg;
3328 struct qseecom_send_modfd_cmd_req *req = NULL;
3329 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3330 struct qseecom_registered_listener_list *this_lstnr = NULL;
3331 uint32_t offset;
3332 struct sg_table *sg_ptr;
3333
3334 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3335 (data->type != QSEECOM_CLIENT_APP))
3336 return -EFAULT;
3337
3338 if (msg == NULL) {
3339 pr_err("Invalid address\n");
3340 return -EINVAL;
3341 }
3342 if (data->type == QSEECOM_LISTENER_SERVICE) {
3343 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3344 this_lstnr = __qseecom_find_svc(data->listener.id);
3345 if (IS_ERR_OR_NULL(this_lstnr)) {
3346 pr_err("Invalid listener ID\n");
3347 return -ENOMEM;
3348 }
3349 } else {
3350 req = (struct qseecom_send_modfd_cmd_req *)msg;
3351 }
3352
3353 for (i = 0; i < MAX_ION_FD; i++) {
3354 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3355 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003356 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003357 req->ifd_data[i].fd);
3358 if (IS_ERR_OR_NULL(ihandle)) {
3359 pr_err("Ion client can't retrieve the handle\n");
3360 return -ENOMEM;
3361 }
3362 field = (char *) req->cmd_req_buf +
3363 req->ifd_data[i].cmd_buf_offset;
3364 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3365 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003366 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003367 lstnr_resp->ifd_data[i].fd);
3368 if (IS_ERR_OR_NULL(ihandle)) {
3369 pr_err("Ion client can't retrieve the handle\n");
3370 return -ENOMEM;
3371 }
3372 field = lstnr_resp->resp_buf_ptr +
3373 lstnr_resp->ifd_data[i].cmd_buf_offset;
3374 } else {
3375 continue;
3376 }
3377 /* Populate the cmd data structure with the phys_addr */
3378 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3379 if (IS_ERR_OR_NULL(sg_ptr)) {
3380 pr_err("IOn client could not retrieve sg table\n");
3381 goto err;
3382 }
3383 if (sg_ptr->nents == 0) {
3384 pr_err("Num of scattered entries is 0\n");
3385 goto err;
3386 }
3387 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3388 pr_err("Num of scattered entries");
3389 pr_err(" (%d) is greater than max supported %d\n",
3390 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3391 goto err;
3392 }
3393 sg = sg_ptr->sgl;
3394 if (sg_ptr->nents == 1) {
3395 uint32_t *update;
3396
3397 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3398 goto err;
3399 if ((data->type == QSEECOM_CLIENT_APP &&
3400 (data->client.app_arch == ELFCLASS32 ||
3401 data->client.app_arch == ELFCLASS64)) ||
3402 (data->type == QSEECOM_LISTENER_SERVICE)) {
3403 /*
3404 * Check if sg list phy add region is under 4GB
3405 */
3406 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3407 (!cleanup) &&
3408 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3409 >= PHY_ADDR_4G - sg->length)) {
3410 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3411 data->client.app_name,
3412 &(sg_dma_address(sg_ptr->sgl)),
3413 sg->length);
3414 goto err;
3415 }
3416 update = (uint32_t *) field;
3417 *update = cleanup ? 0 :
3418 (uint32_t)sg_dma_address(sg_ptr->sgl);
3419 } else {
3420 pr_err("QSEE app arch %u is not supported\n",
3421 data->client.app_arch);
3422 goto err;
3423 }
3424 len += (uint32_t)sg->length;
3425 } else {
3426 struct qseecom_sg_entry *update;
3427 int j = 0;
3428
3429 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3430 (req->ifd_data[i].fd > 0)) {
3431
3432 if ((req->cmd_req_len <
3433 SG_ENTRY_SZ * sg_ptr->nents) ||
3434 (req->ifd_data[i].cmd_buf_offset >
3435 (req->cmd_req_len -
3436 SG_ENTRY_SZ * sg_ptr->nents))) {
3437 pr_err("Invalid offset = 0x%x\n",
3438 req->ifd_data[i].cmd_buf_offset);
3439 goto err;
3440 }
3441
3442 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3443 (lstnr_resp->ifd_data[i].fd > 0)) {
3444
3445 if ((lstnr_resp->resp_len <
3446 SG_ENTRY_SZ * sg_ptr->nents) ||
3447 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3448 (lstnr_resp->resp_len -
3449 SG_ENTRY_SZ * sg_ptr->nents))) {
3450 goto err;
3451 }
3452 }
3453 if ((data->type == QSEECOM_CLIENT_APP &&
3454 (data->client.app_arch == ELFCLASS32 ||
3455 data->client.app_arch == ELFCLASS64)) ||
3456 (data->type == QSEECOM_LISTENER_SERVICE)) {
3457 update = (struct qseecom_sg_entry *)field;
3458 for (j = 0; j < sg_ptr->nents; j++) {
3459 /*
3460 * Check if sg list PA is under 4GB
3461 */
3462 if ((qseecom.qsee_version >=
3463 QSEE_VERSION_40) &&
3464 (!cleanup) &&
3465 ((uint64_t)(sg_dma_address(sg))
3466 >= PHY_ADDR_4G - sg->length)) {
3467 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3468 data->client.app_name,
3469 &(sg_dma_address(sg)),
3470 sg->length);
3471 goto err;
3472 }
3473 update->phys_addr = cleanup ? 0 :
3474 (uint32_t)sg_dma_address(sg);
3475 update->len = cleanup ? 0 : sg->length;
3476 update++;
3477 len += sg->length;
3478 sg = sg_next(sg);
3479 }
3480 } else {
3481 pr_err("QSEE app arch %u is not supported\n",
3482 data->client.app_arch);
3483 goto err;
3484 }
3485 }
3486
3487 if (cleanup) {
3488 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3489 ihandle, NULL, len,
3490 ION_IOC_INV_CACHES);
3491 if (ret) {
3492 pr_err("cache operation failed %d\n", ret);
3493 goto err;
3494 }
3495 } else {
3496 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3497 ihandle, NULL, len,
3498 ION_IOC_CLEAN_INV_CACHES);
3499 if (ret) {
3500 pr_err("cache operation failed %d\n", ret);
3501 goto err;
3502 }
3503 if (data->type == QSEECOM_CLIENT_APP) {
3504 offset = req->ifd_data[i].cmd_buf_offset;
3505 data->sglistinfo_ptr[i].indexAndFlags =
3506 SGLISTINFO_SET_INDEX_FLAG(
3507 (sg_ptr->nents == 1), 0, offset);
3508 data->sglistinfo_ptr[i].sizeOrCount =
3509 (sg_ptr->nents == 1) ?
3510 sg->length : sg_ptr->nents;
3511 data->sglist_cnt = i + 1;
3512 } else {
3513 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3514 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3515 (uintptr_t)this_lstnr->sb_virt);
3516 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3517 SGLISTINFO_SET_INDEX_FLAG(
3518 (sg_ptr->nents == 1), 0, offset);
3519 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3520 (sg_ptr->nents == 1) ?
3521 sg->length : sg_ptr->nents;
3522 this_lstnr->sglist_cnt = i + 1;
3523 }
3524 }
3525 /* Deallocate the handle */
3526 if (!IS_ERR_OR_NULL(ihandle))
3527 ion_free(qseecom.ion_clnt, ihandle);
3528 }
3529 return ret;
3530err:
3531 if (!IS_ERR_OR_NULL(ihandle))
3532 ion_free(qseecom.ion_clnt, ihandle);
3533 return -ENOMEM;
3534}
3535
3536static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3537 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3538{
3539 struct scatterlist *sg = sg_ptr->sgl;
3540 struct qseecom_sg_entry_64bit *sg_entry;
3541 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3542 void *buf;
3543 uint i;
3544 size_t size;
3545 dma_addr_t coh_pmem;
3546
3547 if (fd_idx >= MAX_ION_FD) {
3548 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3549 return -ENOMEM;
3550 }
3551 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3552 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3553 /* Allocate a contiguous kernel buffer */
3554 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3555 size = (size + PAGE_SIZE) & PAGE_MASK;
3556 buf = dma_alloc_coherent(qseecom.pdev,
3557 size, &coh_pmem, GFP_KERNEL);
3558 if (buf == NULL) {
3559 pr_err("failed to alloc memory for sg buf\n");
3560 return -ENOMEM;
3561 }
3562 /* update qseecom_sg_list_buf_hdr_64bit */
3563 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3564 buf_hdr->new_buf_phys_addr = coh_pmem;
3565 buf_hdr->nents_total = sg_ptr->nents;
3566 /* save the left sg entries into new allocated buf */
3567 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3568 for (i = 0; i < sg_ptr->nents; i++) {
3569 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3570 sg_entry->len = sg->length;
3571 sg_entry++;
3572 sg = sg_next(sg);
3573 }
3574
3575 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3576 data->client.sec_buf_fd[fd_idx].vbase = buf;
3577 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3578 data->client.sec_buf_fd[fd_idx].size = size;
3579
3580 return 0;
3581}
3582
3583static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3584 struct qseecom_dev_handle *data)
3585{
3586 struct ion_handle *ihandle;
3587 char *field;
3588 int ret = 0;
3589 int i = 0;
3590 uint32_t len = 0;
3591 struct scatterlist *sg;
3592 struct qseecom_send_modfd_cmd_req *req = NULL;
3593 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3594 struct qseecom_registered_listener_list *this_lstnr = NULL;
3595 uint32_t offset;
3596 struct sg_table *sg_ptr;
3597
3598 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3599 (data->type != QSEECOM_CLIENT_APP))
3600 return -EFAULT;
3601
3602 if (msg == NULL) {
3603 pr_err("Invalid address\n");
3604 return -EINVAL;
3605 }
3606 if (data->type == QSEECOM_LISTENER_SERVICE) {
3607 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3608 this_lstnr = __qseecom_find_svc(data->listener.id);
3609 if (IS_ERR_OR_NULL(this_lstnr)) {
3610 pr_err("Invalid listener ID\n");
3611 return -ENOMEM;
3612 }
3613 } else {
3614 req = (struct qseecom_send_modfd_cmd_req *)msg;
3615 }
3616
3617 for (i = 0; i < MAX_ION_FD; i++) {
3618 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3619 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003620 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003621 req->ifd_data[i].fd);
3622 if (IS_ERR_OR_NULL(ihandle)) {
3623 pr_err("Ion client can't retrieve the handle\n");
3624 return -ENOMEM;
3625 }
3626 field = (char *) req->cmd_req_buf +
3627 req->ifd_data[i].cmd_buf_offset;
3628 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3629 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003630 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003631 lstnr_resp->ifd_data[i].fd);
3632 if (IS_ERR_OR_NULL(ihandle)) {
3633 pr_err("Ion client can't retrieve the handle\n");
3634 return -ENOMEM;
3635 }
3636 field = lstnr_resp->resp_buf_ptr +
3637 lstnr_resp->ifd_data[i].cmd_buf_offset;
3638 } else {
3639 continue;
3640 }
3641 /* Populate the cmd data structure with the phys_addr */
3642 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3643 if (IS_ERR_OR_NULL(sg_ptr)) {
3644 pr_err("IOn client could not retrieve sg table\n");
3645 goto err;
3646 }
3647 if (sg_ptr->nents == 0) {
3648 pr_err("Num of scattered entries is 0\n");
3649 goto err;
3650 }
3651 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3652 pr_warn("Num of scattered entries");
3653 pr_warn(" (%d) is greater than %d\n",
3654 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3655 if (cleanup) {
3656 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3657 data->client.sec_buf_fd[i].vbase)
3658 dma_free_coherent(qseecom.pdev,
3659 data->client.sec_buf_fd[i].size,
3660 data->client.sec_buf_fd[i].vbase,
3661 data->client.sec_buf_fd[i].pbase);
3662 } else {
3663 ret = __qseecom_allocate_sg_list_buffer(data,
3664 field, i, sg_ptr);
3665 if (ret) {
3666 pr_err("Failed to allocate sg list buffer\n");
3667 goto err;
3668 }
3669 }
3670 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3671 sg = sg_ptr->sgl;
3672 goto cleanup;
3673 }
3674 sg = sg_ptr->sgl;
3675 if (sg_ptr->nents == 1) {
3676 uint64_t *update_64bit;
3677
3678 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3679 goto err;
3680 /* 64bit app uses 64bit address */
3681 update_64bit = (uint64_t *) field;
3682 *update_64bit = cleanup ? 0 :
3683 (uint64_t)sg_dma_address(sg_ptr->sgl);
3684 len += (uint32_t)sg->length;
3685 } else {
3686 struct qseecom_sg_entry_64bit *update_64bit;
3687 int j = 0;
3688
3689 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3690 (req->ifd_data[i].fd > 0)) {
3691
3692 if ((req->cmd_req_len <
3693 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3694 (req->ifd_data[i].cmd_buf_offset >
3695 (req->cmd_req_len -
3696 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3697 pr_err("Invalid offset = 0x%x\n",
3698 req->ifd_data[i].cmd_buf_offset);
3699 goto err;
3700 }
3701
3702 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3703 (lstnr_resp->ifd_data[i].fd > 0)) {
3704
3705 if ((lstnr_resp->resp_len <
3706 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3707 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3708 (lstnr_resp->resp_len -
3709 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3710 goto err;
3711 }
3712 }
3713 /* 64bit app uses 64bit address */
3714 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3715 for (j = 0; j < sg_ptr->nents; j++) {
3716 update_64bit->phys_addr = cleanup ? 0 :
3717 (uint64_t)sg_dma_address(sg);
3718 update_64bit->len = cleanup ? 0 :
3719 (uint32_t)sg->length;
3720 update_64bit++;
3721 len += sg->length;
3722 sg = sg_next(sg);
3723 }
3724 }
3725cleanup:
3726 if (cleanup) {
3727 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3728 ihandle, NULL, len,
3729 ION_IOC_INV_CACHES);
3730 if (ret) {
3731 pr_err("cache operation failed %d\n", ret);
3732 goto err;
3733 }
3734 } else {
3735 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3736 ihandle, NULL, len,
3737 ION_IOC_CLEAN_INV_CACHES);
3738 if (ret) {
3739 pr_err("cache operation failed %d\n", ret);
3740 goto err;
3741 }
3742 if (data->type == QSEECOM_CLIENT_APP) {
3743 offset = req->ifd_data[i].cmd_buf_offset;
3744 data->sglistinfo_ptr[i].indexAndFlags =
3745 SGLISTINFO_SET_INDEX_FLAG(
3746 (sg_ptr->nents == 1), 1, offset);
3747 data->sglistinfo_ptr[i].sizeOrCount =
3748 (sg_ptr->nents == 1) ?
3749 sg->length : sg_ptr->nents;
3750 data->sglist_cnt = i + 1;
3751 } else {
3752 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3753 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3754 (uintptr_t)this_lstnr->sb_virt);
3755 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3756 SGLISTINFO_SET_INDEX_FLAG(
3757 (sg_ptr->nents == 1), 1, offset);
3758 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3759 (sg_ptr->nents == 1) ?
3760 sg->length : sg_ptr->nents;
3761 this_lstnr->sglist_cnt = i + 1;
3762 }
3763 }
3764 /* Deallocate the handle */
3765 if (!IS_ERR_OR_NULL(ihandle))
3766 ion_free(qseecom.ion_clnt, ihandle);
3767 }
3768 return ret;
3769err:
3770 for (i = 0; i < MAX_ION_FD; i++)
3771 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3772 data->client.sec_buf_fd[i].vbase)
3773 dma_free_coherent(qseecom.pdev,
3774 data->client.sec_buf_fd[i].size,
3775 data->client.sec_buf_fd[i].vbase,
3776 data->client.sec_buf_fd[i].pbase);
3777 if (!IS_ERR_OR_NULL(ihandle))
3778 ion_free(qseecom.ion_clnt, ihandle);
3779 return -ENOMEM;
3780}
3781
3782static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3783 void __user *argp,
3784 bool is_64bit_addr)
3785{
3786 int ret = 0;
3787 int i;
3788 struct qseecom_send_modfd_cmd_req req;
3789 struct qseecom_send_cmd_req send_cmd_req;
3790
3791 ret = copy_from_user(&req, argp, sizeof(req));
3792 if (ret) {
3793 pr_err("copy_from_user failed\n");
3794 return ret;
3795 }
3796
3797 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3798 send_cmd_req.cmd_req_len = req.cmd_req_len;
3799 send_cmd_req.resp_buf = req.resp_buf;
3800 send_cmd_req.resp_len = req.resp_len;
3801
3802 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3803 return -EINVAL;
3804
3805 /* validate offsets */
3806 for (i = 0; i < MAX_ION_FD; i++) {
3807 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3808 pr_err("Invalid offset %d = 0x%x\n",
3809 i, req.ifd_data[i].cmd_buf_offset);
3810 return -EINVAL;
3811 }
3812 }
3813 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3814 (uintptr_t)req.cmd_req_buf);
3815 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3816 (uintptr_t)req.resp_buf);
3817
3818 if (!is_64bit_addr) {
3819 ret = __qseecom_update_cmd_buf(&req, false, data);
3820 if (ret)
3821 return ret;
3822 ret = __qseecom_send_cmd(data, &send_cmd_req);
3823 if (ret)
3824 return ret;
3825 ret = __qseecom_update_cmd_buf(&req, true, data);
3826 if (ret)
3827 return ret;
3828 } else {
3829 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3830 if (ret)
3831 return ret;
3832 ret = __qseecom_send_cmd(data, &send_cmd_req);
3833 if (ret)
3834 return ret;
3835 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3836 if (ret)
3837 return ret;
3838 }
3839
3840 return ret;
3841}
3842
3843static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3844 void __user *argp)
3845{
3846 return __qseecom_send_modfd_cmd(data, argp, false);
3847}
3848
3849static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3850 void __user *argp)
3851{
3852 return __qseecom_send_modfd_cmd(data, argp, true);
3853}
3854
3855
3856
3857static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3858 struct qseecom_registered_listener_list *svc)
3859{
3860 int ret;
3861
3862 ret = (svc->rcv_req_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07003863 return ret || data->abort || svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003864}
3865
3866static int qseecom_receive_req(struct qseecom_dev_handle *data)
3867{
3868 int ret = 0;
3869 struct qseecom_registered_listener_list *this_lstnr;
3870
3871 this_lstnr = __qseecom_find_svc(data->listener.id);
3872 if (!this_lstnr) {
3873 pr_err("Invalid listener ID\n");
3874 return -ENODATA;
3875 }
3876
3877 while (1) {
3878 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3879 __qseecom_listener_has_rcvd_req(data,
3880 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003881 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003882 (uint32_t)data->listener.id);
3883 /* woken up for different reason */
3884 return -ERESTARTSYS;
3885 }
3886
Zhen Kong26e62742018-05-04 17:19:06 -07003887 if (data->abort || this_lstnr->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003888 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003889 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003890 return -ENODEV;
3891 }
3892 this_lstnr->rcv_req_flag = 0;
3893 break;
3894 }
3895 return ret;
3896}
3897
3898static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3899{
3900 unsigned char app_arch = 0;
3901 struct elf32_hdr *ehdr;
3902 struct elf64_hdr *ehdr64;
3903
3904 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3905
3906 switch (app_arch) {
3907 case ELFCLASS32: {
3908 ehdr = (struct elf32_hdr *)fw_entry->data;
3909 if (fw_entry->size < sizeof(*ehdr)) {
3910 pr_err("%s: Not big enough to be an elf32 header\n",
3911 qseecom.pdev->init_name);
3912 return false;
3913 }
3914 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3915 pr_err("%s: Not an elf32 header\n",
3916 qseecom.pdev->init_name);
3917 return false;
3918 }
3919 if (ehdr->e_phnum == 0) {
3920 pr_err("%s: No loadable segments\n",
3921 qseecom.pdev->init_name);
3922 return false;
3923 }
3924 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3925 sizeof(struct elf32_hdr) > fw_entry->size) {
3926 pr_err("%s: Program headers not within mdt\n",
3927 qseecom.pdev->init_name);
3928 return false;
3929 }
3930 break;
3931 }
3932 case ELFCLASS64: {
3933 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3934 if (fw_entry->size < sizeof(*ehdr64)) {
3935 pr_err("%s: Not big enough to be an elf64 header\n",
3936 qseecom.pdev->init_name);
3937 return false;
3938 }
3939 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3940 pr_err("%s: Not an elf64 header\n",
3941 qseecom.pdev->init_name);
3942 return false;
3943 }
3944 if (ehdr64->e_phnum == 0) {
3945 pr_err("%s: No loadable segments\n",
3946 qseecom.pdev->init_name);
3947 return false;
3948 }
3949 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3950 sizeof(struct elf64_hdr) > fw_entry->size) {
3951 pr_err("%s: Program headers not within mdt\n",
3952 qseecom.pdev->init_name);
3953 return false;
3954 }
3955 break;
3956 }
3957 default: {
3958 pr_err("QSEE app arch %u is not supported\n", app_arch);
3959 return false;
3960 }
3961 }
3962 return true;
3963}
3964
3965static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3966 uint32_t *app_arch)
3967{
3968 int ret = -1;
3969 int i = 0, rc = 0;
3970 const struct firmware *fw_entry = NULL;
3971 char fw_name[MAX_APP_NAME_SIZE];
3972 struct elf32_hdr *ehdr;
3973 struct elf64_hdr *ehdr64;
3974 int num_images = 0;
3975
3976 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3977 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3978 if (rc) {
3979 pr_err("error with request_firmware\n");
3980 ret = -EIO;
3981 goto err;
3982 }
3983 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3984 ret = -EIO;
3985 goto err;
3986 }
3987 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3988 *fw_size = fw_entry->size;
3989 if (*app_arch == ELFCLASS32) {
3990 ehdr = (struct elf32_hdr *)fw_entry->data;
3991 num_images = ehdr->e_phnum;
3992 } else if (*app_arch == ELFCLASS64) {
3993 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3994 num_images = ehdr64->e_phnum;
3995 } else {
3996 pr_err("QSEE %s app, arch %u is not supported\n",
3997 appname, *app_arch);
3998 ret = -EIO;
3999 goto err;
4000 }
4001 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4002 release_firmware(fw_entry);
4003 fw_entry = NULL;
4004 for (i = 0; i < num_images; i++) {
4005 memset(fw_name, 0, sizeof(fw_name));
4006 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4007 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4008 if (ret)
4009 goto err;
4010 if (*fw_size > U32_MAX - fw_entry->size) {
4011 pr_err("QSEE %s app file size overflow\n", appname);
4012 ret = -EINVAL;
4013 goto err;
4014 }
4015 *fw_size += fw_entry->size;
4016 release_firmware(fw_entry);
4017 fw_entry = NULL;
4018 }
4019
4020 return ret;
4021err:
4022 if (fw_entry)
4023 release_firmware(fw_entry);
4024 *fw_size = 0;
4025 return ret;
4026}
4027
4028static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4029 uint32_t fw_size,
4030 struct qseecom_load_app_ireq *load_req)
4031{
4032 int ret = -1;
4033 int i = 0, rc = 0;
4034 const struct firmware *fw_entry = NULL;
4035 char fw_name[MAX_APP_NAME_SIZE];
4036 u8 *img_data_ptr = img_data;
4037 struct elf32_hdr *ehdr;
4038 struct elf64_hdr *ehdr64;
4039 int num_images = 0;
4040 unsigned char app_arch = 0;
4041
4042 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4043 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4044 if (rc) {
4045 ret = -EIO;
4046 goto err;
4047 }
4048
4049 load_req->img_len = fw_entry->size;
4050 if (load_req->img_len > fw_size) {
4051 pr_err("app %s size %zu is larger than buf size %u\n",
4052 appname, fw_entry->size, fw_size);
4053 ret = -EINVAL;
4054 goto err;
4055 }
4056 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4057 img_data_ptr = img_data_ptr + fw_entry->size;
4058 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4059
4060 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4061 if (app_arch == ELFCLASS32) {
4062 ehdr = (struct elf32_hdr *)fw_entry->data;
4063 num_images = ehdr->e_phnum;
4064 } else if (app_arch == ELFCLASS64) {
4065 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4066 num_images = ehdr64->e_phnum;
4067 } else {
4068 pr_err("QSEE %s app, arch %u is not supported\n",
4069 appname, app_arch);
4070 ret = -EIO;
4071 goto err;
4072 }
4073 release_firmware(fw_entry);
4074 fw_entry = NULL;
4075 for (i = 0; i < num_images; i++) {
4076 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4077 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4078 if (ret) {
4079 pr_err("Failed to locate blob %s\n", fw_name);
4080 goto err;
4081 }
4082 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4083 (fw_entry->size + load_req->img_len > fw_size)) {
4084 pr_err("Invalid file size for %s\n", fw_name);
4085 ret = -EINVAL;
4086 goto err;
4087 }
4088 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4089 img_data_ptr = img_data_ptr + fw_entry->size;
4090 load_req->img_len += fw_entry->size;
4091 release_firmware(fw_entry);
4092 fw_entry = NULL;
4093 }
4094 return ret;
4095err:
4096 release_firmware(fw_entry);
4097 return ret;
4098}
4099
4100static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4101 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4102{
4103 size_t len = 0;
4104 int ret = 0;
4105 ion_phys_addr_t pa;
4106 struct ion_handle *ihandle = NULL;
4107 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004108 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004109
Zhen Kong3dd92792017-12-08 09:47:15 -08004110 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004111 if (retry++) {
4112 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004113 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004114 mutex_lock(&app_access_lock);
4115 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004116 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4117 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4118 } while (IS_ERR_OR_NULL(ihandle) &&
4119 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004120
4121 if (IS_ERR_OR_NULL(ihandle)) {
4122 pr_err("ION alloc failed\n");
4123 return -ENOMEM;
4124 }
4125 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4126 ihandle);
4127
4128 if (IS_ERR_OR_NULL(img_data)) {
4129 pr_err("ION memory mapping for image loading failed\n");
4130 ret = -ENOMEM;
4131 goto exit_ion_free;
4132 }
4133 /* Get the physical address of the ION BUF */
4134 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4135 if (ret) {
4136 pr_err("physical memory retrieval failure\n");
4137 ret = -EIO;
4138 goto exit_ion_unmap_kernel;
4139 }
4140
4141 *pihandle = ihandle;
4142 *data = img_data;
4143 *paddr = pa;
4144 return ret;
4145
4146exit_ion_unmap_kernel:
4147 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4148exit_ion_free:
4149 ion_free(qseecom.ion_clnt, ihandle);
4150 ihandle = NULL;
4151 return ret;
4152}
4153
4154static void __qseecom_free_img_data(struct ion_handle **ihandle)
4155{
4156 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4157 ion_free(qseecom.ion_clnt, *ihandle);
4158 *ihandle = NULL;
4159}
4160
4161static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4162 uint32_t *app_id)
4163{
4164 int ret = -1;
4165 uint32_t fw_size = 0;
4166 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4167 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4168 struct qseecom_command_scm_resp resp;
4169 u8 *img_data = NULL;
4170 ion_phys_addr_t pa = 0;
4171 struct ion_handle *ihandle = NULL;
4172 void *cmd_buf = NULL;
4173 size_t cmd_len;
4174 uint32_t app_arch = 0;
4175
4176 if (!data || !appname || !app_id) {
4177 pr_err("Null pointer to data or appname or appid\n");
4178 return -EINVAL;
4179 }
4180 *app_id = 0;
4181 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4182 return -EIO;
4183 data->client.app_arch = app_arch;
4184
4185 /* Check and load cmnlib */
4186 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4187 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4188 ret = qseecom_load_commonlib_image(data, "cmnlib");
4189 if (ret) {
4190 pr_err("failed to load cmnlib\n");
4191 return -EIO;
4192 }
4193 qseecom.commonlib_loaded = true;
4194 pr_debug("cmnlib is loaded\n");
4195 }
4196
4197 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4198 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4199 if (ret) {
4200 pr_err("failed to load cmnlib64\n");
4201 return -EIO;
4202 }
4203 qseecom.commonlib64_loaded = true;
4204 pr_debug("cmnlib64 is loaded\n");
4205 }
4206 }
4207
4208 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4209 if (ret)
4210 return ret;
4211
4212 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4213 if (ret) {
4214 ret = -EIO;
4215 goto exit_free_img_data;
4216 }
4217
4218 /* Populate the load_req parameters */
4219 if (qseecom.qsee_version < QSEE_VERSION_40) {
4220 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4221 load_req.mdt_len = load_req.mdt_len;
4222 load_req.img_len = load_req.img_len;
4223 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4224 load_req.phy_addr = (uint32_t)pa;
4225 cmd_buf = (void *)&load_req;
4226 cmd_len = sizeof(struct qseecom_load_app_ireq);
4227 } else {
4228 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4229 load_req_64bit.mdt_len = load_req.mdt_len;
4230 load_req_64bit.img_len = load_req.img_len;
4231 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4232 load_req_64bit.phy_addr = (uint64_t)pa;
4233 cmd_buf = (void *)&load_req_64bit;
4234 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4235 }
4236
4237 if (qseecom.support_bus_scaling) {
4238 mutex_lock(&qsee_bw_mutex);
4239 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4240 mutex_unlock(&qsee_bw_mutex);
4241 if (ret) {
4242 ret = -EIO;
4243 goto exit_free_img_data;
4244 }
4245 }
4246
4247 ret = __qseecom_enable_clk_scale_up(data);
4248 if (ret) {
4249 ret = -EIO;
4250 goto exit_unregister_bus_bw_need;
4251 }
4252
4253 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4254 img_data, fw_size,
4255 ION_IOC_CLEAN_INV_CACHES);
4256 if (ret) {
4257 pr_err("cache operation failed %d\n", ret);
4258 goto exit_disable_clk_vote;
4259 }
4260
4261 /* SCM_CALL to load the image */
4262 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4263 &resp, sizeof(resp));
4264 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004265 pr_err("scm_call to load failed : ret %d, result %x\n",
4266 ret, resp.result);
4267 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4268 ret = -EEXIST;
4269 else
4270 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004271 goto exit_disable_clk_vote;
4272 }
4273
4274 switch (resp.result) {
4275 case QSEOS_RESULT_SUCCESS:
4276 *app_id = resp.data;
4277 break;
4278 case QSEOS_RESULT_INCOMPLETE:
4279 ret = __qseecom_process_incomplete_cmd(data, &resp);
4280 if (ret)
4281 pr_err("process_incomplete_cmd FAILED\n");
4282 else
4283 *app_id = resp.data;
4284 break;
4285 case QSEOS_RESULT_FAILURE:
4286 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4287 break;
4288 default:
4289 pr_err("scm call return unknown response %d\n", resp.result);
4290 ret = -EINVAL;
4291 break;
4292 }
4293
4294exit_disable_clk_vote:
4295 __qseecom_disable_clk_scale_down(data);
4296
4297exit_unregister_bus_bw_need:
4298 if (qseecom.support_bus_scaling) {
4299 mutex_lock(&qsee_bw_mutex);
4300 qseecom_unregister_bus_bandwidth_needs(data);
4301 mutex_unlock(&qsee_bw_mutex);
4302 }
4303
4304exit_free_img_data:
4305 __qseecom_free_img_data(&ihandle);
4306 return ret;
4307}
4308
4309static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4310 char *cmnlib_name)
4311{
4312 int ret = 0;
4313 uint32_t fw_size = 0;
4314 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4315 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4316 struct qseecom_command_scm_resp resp;
4317 u8 *img_data = NULL;
4318 ion_phys_addr_t pa = 0;
4319 void *cmd_buf = NULL;
4320 size_t cmd_len;
4321 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004322 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004323
4324 if (!cmnlib_name) {
4325 pr_err("cmnlib_name is NULL\n");
4326 return -EINVAL;
4327 }
4328 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4329 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4330 cmnlib_name, strlen(cmnlib_name));
4331 return -EINVAL;
4332 }
4333
4334 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4335 return -EIO;
4336
Zhen Kong3bafb312017-10-18 10:27:20 -07004337 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004338 &img_data, fw_size, &pa);
4339 if (ret)
4340 return -EIO;
4341
4342 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4343 if (ret) {
4344 ret = -EIO;
4345 goto exit_free_img_data;
4346 }
4347 if (qseecom.qsee_version < QSEE_VERSION_40) {
4348 load_req.phy_addr = (uint32_t)pa;
4349 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4350 cmd_buf = (void *)&load_req;
4351 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4352 } else {
4353 load_req_64bit.phy_addr = (uint64_t)pa;
4354 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4355 load_req_64bit.img_len = load_req.img_len;
4356 load_req_64bit.mdt_len = load_req.mdt_len;
4357 cmd_buf = (void *)&load_req_64bit;
4358 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4359 }
4360
4361 if (qseecom.support_bus_scaling) {
4362 mutex_lock(&qsee_bw_mutex);
4363 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4364 mutex_unlock(&qsee_bw_mutex);
4365 if (ret) {
4366 ret = -EIO;
4367 goto exit_free_img_data;
4368 }
4369 }
4370
4371 /* Vote for the SFPB clock */
4372 ret = __qseecom_enable_clk_scale_up(data);
4373 if (ret) {
4374 ret = -EIO;
4375 goto exit_unregister_bus_bw_need;
4376 }
4377
Zhen Kong3bafb312017-10-18 10:27:20 -07004378 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004379 img_data, fw_size,
4380 ION_IOC_CLEAN_INV_CACHES);
4381 if (ret) {
4382 pr_err("cache operation failed %d\n", ret);
4383 goto exit_disable_clk_vote;
4384 }
4385
4386 /* SCM_CALL to load the image */
4387 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4388 &resp, sizeof(resp));
4389 if (ret) {
4390 pr_err("scm_call to load failed : ret %d\n", ret);
4391 ret = -EIO;
4392 goto exit_disable_clk_vote;
4393 }
4394
4395 switch (resp.result) {
4396 case QSEOS_RESULT_SUCCESS:
4397 break;
4398 case QSEOS_RESULT_FAILURE:
4399 pr_err("scm call failed w/response result%d\n", resp.result);
4400 ret = -EINVAL;
4401 goto exit_disable_clk_vote;
4402 case QSEOS_RESULT_INCOMPLETE:
4403 ret = __qseecom_process_incomplete_cmd(data, &resp);
4404 if (ret) {
4405 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4406 goto exit_disable_clk_vote;
4407 }
4408 break;
4409 default:
4410 pr_err("scm call return unknown response %d\n", resp.result);
4411 ret = -EINVAL;
4412 goto exit_disable_clk_vote;
4413 }
4414
4415exit_disable_clk_vote:
4416 __qseecom_disable_clk_scale_down(data);
4417
4418exit_unregister_bus_bw_need:
4419 if (qseecom.support_bus_scaling) {
4420 mutex_lock(&qsee_bw_mutex);
4421 qseecom_unregister_bus_bandwidth_needs(data);
4422 mutex_unlock(&qsee_bw_mutex);
4423 }
4424
4425exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004426 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004427 return ret;
4428}
4429
4430static int qseecom_unload_commonlib_image(void)
4431{
4432 int ret = -EINVAL;
4433 struct qseecom_unload_lib_image_ireq unload_req = {0};
4434 struct qseecom_command_scm_resp resp;
4435
4436 /* Populate the remaining parameters */
4437 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4438
4439 /* SCM_CALL to load the image */
4440 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4441 sizeof(struct qseecom_unload_lib_image_ireq),
4442 &resp, sizeof(resp));
4443 if (ret) {
4444 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4445 ret = -EIO;
4446 } else {
4447 switch (resp.result) {
4448 case QSEOS_RESULT_SUCCESS:
4449 break;
4450 case QSEOS_RESULT_FAILURE:
4451 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4452 break;
4453 default:
4454 pr_err("scm call return unknown response %d\n",
4455 resp.result);
4456 ret = -EINVAL;
4457 break;
4458 }
4459 }
4460
4461 return ret;
4462}
4463
4464int qseecom_start_app(struct qseecom_handle **handle,
4465 char *app_name, uint32_t size)
4466{
4467 int32_t ret = 0;
4468 unsigned long flags = 0;
4469 struct qseecom_dev_handle *data = NULL;
4470 struct qseecom_check_app_ireq app_ireq;
4471 struct qseecom_registered_app_list *entry = NULL;
4472 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4473 bool found_app = false;
4474 size_t len;
4475 ion_phys_addr_t pa;
4476 uint32_t fw_size, app_arch;
4477 uint32_t app_id = 0;
4478
4479 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4480 pr_err("Not allowed to be called in %d state\n",
4481 atomic_read(&qseecom.qseecom_state));
4482 return -EPERM;
4483 }
4484 if (!app_name) {
4485 pr_err("failed to get the app name\n");
4486 return -EINVAL;
4487 }
4488
Zhen Kong64a6d7282017-06-16 11:55:07 -07004489 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004490 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004491 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004492 return -EINVAL;
4493 }
4494
4495 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4496 if (!(*handle))
4497 return -ENOMEM;
4498
4499 data = kzalloc(sizeof(*data), GFP_KERNEL);
4500 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304501 ret = -ENOMEM;
4502 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004503 }
4504 data->abort = 0;
4505 data->type = QSEECOM_CLIENT_APP;
4506 data->released = false;
4507 data->client.sb_length = size;
4508 data->client.user_virt_sb_base = 0;
4509 data->client.ihandle = NULL;
4510
4511 init_waitqueue_head(&data->abort_wq);
4512
4513 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4514 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4515 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4516 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304517 ret = -ENOMEM;
4518 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004519 }
4520 mutex_lock(&app_access_lock);
4521
Zhen Kong5d02be92018-05-29 16:17:29 -07004522recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004523 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4524 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4525 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4526 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304527 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004528
4529 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4530 if (app_id) {
4531 pr_warn("App id %d for [%s] app exists\n", app_id,
4532 (char *)app_ireq.app_name);
4533 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4534 list_for_each_entry(entry,
4535 &qseecom.registered_app_list_head, list){
4536 if (entry->app_id == app_id) {
4537 entry->ref_cnt++;
4538 found_app = true;
4539 break;
4540 }
4541 }
4542 spin_unlock_irqrestore(
4543 &qseecom.registered_app_list_lock, flags);
4544 if (!found_app)
4545 pr_warn("App_id %d [%s] was loaded but not registered\n",
4546 ret, (char *)app_ireq.app_name);
4547 } else {
4548 /* load the app and get the app_id */
4549 pr_debug("%s: Loading app for the first time'\n",
4550 qseecom.pdev->init_name);
4551 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004552 if (ret == -EEXIST) {
4553 pr_err("recheck if TA %s is loaded\n", app_name);
4554 goto recheck;
4555 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304556 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004557 }
4558 data->client.app_id = app_id;
4559 if (!found_app) {
4560 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4561 if (!entry) {
4562 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304563 ret = -ENOMEM;
4564 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004565 }
4566 entry->app_id = app_id;
4567 entry->ref_cnt = 1;
4568 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4569 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4570 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304571 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004572 }
4573 entry->app_arch = app_arch;
4574 entry->app_blocked = false;
4575 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004576 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004577 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4578 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4579 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4580 flags);
4581 }
4582
4583 /* Get the physical address of the ION BUF */
4584 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4585 if (ret) {
4586 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4587 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304588 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004589 }
4590
4591 /* Populate the structure for sending scm call to load image */
4592 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4593 data->client.ihandle);
4594 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4595 pr_err("ION memory mapping for client shared buf failed\n");
4596 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304597 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004598 }
4599 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4600 data->client.sb_phys = (phys_addr_t)pa;
4601 (*handle)->dev = (void *)data;
4602 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4603 (*handle)->sbuf_len = data->client.sb_length;
4604
4605 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4606 if (!kclient_entry) {
4607 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304608 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004609 }
4610 kclient_entry->handle = *handle;
4611
4612 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4613 list_add_tail(&kclient_entry->list,
4614 &qseecom.registered_kclient_list_head);
4615 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4616
4617 mutex_unlock(&app_access_lock);
4618 return 0;
4619
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304620exit_ion_unmap_kernel:
4621 if (!IS_ERR_OR_NULL(data->client.ihandle))
4622 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4623exit_entry_free:
4624 kfree(entry);
4625exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004626 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304627 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4628 ion_free(qseecom.ion_clnt, data->client.ihandle);
4629 data->client.ihandle = NULL;
4630 }
4631exit_data_free:
4632 kfree(data);
4633exit_handle_free:
4634 if (*handle) {
4635 kfree(*handle);
4636 *handle = NULL;
4637 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004638 return ret;
4639}
4640EXPORT_SYMBOL(qseecom_start_app);
4641
4642int qseecom_shutdown_app(struct qseecom_handle **handle)
4643{
4644 int ret = -EINVAL;
4645 struct qseecom_dev_handle *data;
4646
4647 struct qseecom_registered_kclient_list *kclient = NULL;
4648 unsigned long flags = 0;
4649 bool found_handle = false;
4650
4651 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4652 pr_err("Not allowed to be called in %d state\n",
4653 atomic_read(&qseecom.qseecom_state));
4654 return -EPERM;
4655 }
4656
4657 if ((handle == NULL) || (*handle == NULL)) {
4658 pr_err("Handle is not initialized\n");
4659 return -EINVAL;
4660 }
4661 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4662 mutex_lock(&app_access_lock);
4663
4664 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4665 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4666 list) {
4667 if (kclient->handle == (*handle)) {
4668 list_del(&kclient->list);
4669 found_handle = true;
4670 break;
4671 }
4672 }
4673 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4674 if (!found_handle)
4675 pr_err("Unable to find the handle, exiting\n");
4676 else
4677 ret = qseecom_unload_app(data, false);
4678
4679 mutex_unlock(&app_access_lock);
4680 if (ret == 0) {
4681 kzfree(data);
4682 kzfree(*handle);
4683 kzfree(kclient);
4684 *handle = NULL;
4685 }
4686
4687 return ret;
4688}
4689EXPORT_SYMBOL(qseecom_shutdown_app);
4690
4691int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4692 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4693{
4694 int ret = 0;
4695 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4696 struct qseecom_dev_handle *data;
4697 bool perf_enabled = false;
4698
4699 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4700 pr_err("Not allowed to be called in %d state\n",
4701 atomic_read(&qseecom.qseecom_state));
4702 return -EPERM;
4703 }
4704
4705 if (handle == NULL) {
4706 pr_err("Handle is not initialized\n");
4707 return -EINVAL;
4708 }
4709 data = handle->dev;
4710
4711 req.cmd_req_len = sbuf_len;
4712 req.resp_len = rbuf_len;
4713 req.cmd_req_buf = send_buf;
4714 req.resp_buf = resp_buf;
4715
4716 if (__validate_send_cmd_inputs(data, &req))
4717 return -EINVAL;
4718
4719 mutex_lock(&app_access_lock);
4720 if (qseecom.support_bus_scaling) {
4721 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4722 if (ret) {
4723 pr_err("Failed to set bw.\n");
4724 mutex_unlock(&app_access_lock);
4725 return ret;
4726 }
4727 }
4728 /*
4729 * On targets where crypto clock is handled by HLOS,
4730 * if clk_access_cnt is zero and perf_enabled is false,
4731 * then the crypto clock was not enabled before sending cmd
4732 * to tz, qseecom will enable the clock to avoid service failure.
4733 */
4734 if (!qseecom.no_clock_support &&
4735 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4736 pr_debug("ce clock is not enabled!\n");
4737 ret = qseecom_perf_enable(data);
4738 if (ret) {
4739 pr_err("Failed to vote for clock with err %d\n",
4740 ret);
4741 mutex_unlock(&app_access_lock);
4742 return -EINVAL;
4743 }
4744 perf_enabled = true;
4745 }
4746 if (!strcmp(data->client.app_name, "securemm"))
4747 data->use_legacy_cmd = true;
4748
4749 ret = __qseecom_send_cmd(data, &req);
4750 data->use_legacy_cmd = false;
4751 if (qseecom.support_bus_scaling)
4752 __qseecom_add_bw_scale_down_timer(
4753 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4754
4755 if (perf_enabled) {
4756 qsee_disable_clock_vote(data, CLK_DFAB);
4757 qsee_disable_clock_vote(data, CLK_SFPB);
4758 }
4759
4760 mutex_unlock(&app_access_lock);
4761
4762 if (ret)
4763 return ret;
4764
4765 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4766 req.resp_len, req.resp_buf);
4767 return ret;
4768}
4769EXPORT_SYMBOL(qseecom_send_command);
4770
4771int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4772{
4773 int ret = 0;
4774
4775 if ((handle == NULL) || (handle->dev == NULL)) {
4776 pr_err("No valid kernel client\n");
4777 return -EINVAL;
4778 }
4779 if (high) {
4780 if (qseecom.support_bus_scaling) {
4781 mutex_lock(&qsee_bw_mutex);
4782 __qseecom_register_bus_bandwidth_needs(handle->dev,
4783 HIGH);
4784 mutex_unlock(&qsee_bw_mutex);
4785 } else {
4786 ret = qseecom_perf_enable(handle->dev);
4787 if (ret)
4788 pr_err("Failed to vote for clock with err %d\n",
4789 ret);
4790 }
4791 } else {
4792 if (!qseecom.support_bus_scaling) {
4793 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4794 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4795 } else {
4796 mutex_lock(&qsee_bw_mutex);
4797 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4798 mutex_unlock(&qsee_bw_mutex);
4799 }
4800 }
4801 return ret;
4802}
4803EXPORT_SYMBOL(qseecom_set_bandwidth);
4804
4805int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4806{
4807 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4808 struct qseecom_dev_handle dummy_private_data = {0};
4809 struct qseecom_command_scm_resp resp;
4810 int ret = 0;
4811
4812 if (!desc) {
4813 pr_err("desc is NULL\n");
4814 return -EINVAL;
4815 }
4816
4817 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004818 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004819 resp.data = desc->ret[2]; /*listener_id*/
4820
Zhen Konge7f525f2017-12-01 18:26:25 -08004821 dummy_private_data.client.app_id = desc->ret[1];
4822 dummy_app_entry.app_id = desc->ret[1];
4823
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004824 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004825 if (qseecom.qsee_reentrancy_support)
4826 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004827 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004828 else
4829 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4830 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004831 mutex_unlock(&app_access_lock);
4832 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004833 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004834 (int)desc->ret[0], (int)desc->ret[2],
4835 (int)desc->ret[1], ret);
4836 desc->ret[0] = resp.result;
4837 desc->ret[1] = resp.resp_type;
4838 desc->ret[2] = resp.data;
4839 return ret;
4840}
4841EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4842
4843static int qseecom_send_resp(void)
4844{
4845 qseecom.send_resp_flag = 1;
4846 wake_up_interruptible(&qseecom.send_resp_wq);
4847 return 0;
4848}
4849
4850static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4851{
4852 struct qseecom_registered_listener_list *this_lstnr = NULL;
4853
4854 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4855 this_lstnr = __qseecom_find_svc(data->listener.id);
4856 if (this_lstnr == NULL)
4857 return -EINVAL;
4858 qseecom.send_resp_flag = 1;
4859 this_lstnr->send_resp_flag = 1;
4860 wake_up_interruptible(&qseecom.send_resp_wq);
4861 return 0;
4862}
4863
4864static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4865 struct qseecom_send_modfd_listener_resp *resp,
4866 struct qseecom_registered_listener_list *this_lstnr)
4867{
4868 int i;
4869
4870 if (!data || !resp || !this_lstnr) {
4871 pr_err("listener handle or resp msg is null\n");
4872 return -EINVAL;
4873 }
4874
4875 if (resp->resp_buf_ptr == NULL) {
4876 pr_err("resp buffer is null\n");
4877 return -EINVAL;
4878 }
4879 /* validate resp buf length */
4880 if ((resp->resp_len == 0) ||
4881 (resp->resp_len > this_lstnr->sb_length)) {
4882 pr_err("resp buf length %d not valid\n", resp->resp_len);
4883 return -EINVAL;
4884 }
4885
4886 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4887 pr_err("Integer overflow in resp_len & resp_buf\n");
4888 return -EINVAL;
4889 }
4890 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4891 (ULONG_MAX - this_lstnr->sb_length)) {
4892 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4893 return -EINVAL;
4894 }
4895 /* validate resp buf */
4896 if (((uintptr_t)resp->resp_buf_ptr <
4897 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4898 ((uintptr_t)resp->resp_buf_ptr >=
4899 ((uintptr_t)this_lstnr->user_virt_sb_base +
4900 this_lstnr->sb_length)) ||
4901 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4902 ((uintptr_t)this_lstnr->user_virt_sb_base +
4903 this_lstnr->sb_length))) {
4904 pr_err("resp buf is out of shared buffer region\n");
4905 return -EINVAL;
4906 }
4907
4908 /* validate offsets */
4909 for (i = 0; i < MAX_ION_FD; i++) {
4910 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4911 pr_err("Invalid offset %d = 0x%x\n",
4912 i, resp->ifd_data[i].cmd_buf_offset);
4913 return -EINVAL;
4914 }
4915 }
4916
4917 return 0;
4918}
4919
4920static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4921 void __user *argp, bool is_64bit_addr)
4922{
4923 struct qseecom_send_modfd_listener_resp resp;
4924 struct qseecom_registered_listener_list *this_lstnr = NULL;
4925
4926 if (copy_from_user(&resp, argp, sizeof(resp))) {
4927 pr_err("copy_from_user failed");
4928 return -EINVAL;
4929 }
4930
4931 this_lstnr = __qseecom_find_svc(data->listener.id);
4932 if (this_lstnr == NULL)
4933 return -EINVAL;
4934
4935 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4936 return -EINVAL;
4937
4938 resp.resp_buf_ptr = this_lstnr->sb_virt +
4939 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4940
4941 if (!is_64bit_addr)
4942 __qseecom_update_cmd_buf(&resp, false, data);
4943 else
4944 __qseecom_update_cmd_buf_64(&resp, false, data);
4945 qseecom.send_resp_flag = 1;
4946 this_lstnr->send_resp_flag = 1;
4947 wake_up_interruptible(&qseecom.send_resp_wq);
4948 return 0;
4949}
4950
4951static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4952 void __user *argp)
4953{
4954 return __qseecom_send_modfd_resp(data, argp, false);
4955}
4956
4957static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4958 void __user *argp)
4959{
4960 return __qseecom_send_modfd_resp(data, argp, true);
4961}
4962
4963static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4964 void __user *argp)
4965{
4966 struct qseecom_qseos_version_req req;
4967
4968 if (copy_from_user(&req, argp, sizeof(req))) {
4969 pr_err("copy_from_user failed");
4970 return -EINVAL;
4971 }
4972 req.qseos_version = qseecom.qseos_version;
4973 if (copy_to_user(argp, &req, sizeof(req))) {
4974 pr_err("copy_to_user failed");
4975 return -EINVAL;
4976 }
4977 return 0;
4978}
4979
4980static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4981{
4982 int rc = 0;
4983 struct qseecom_clk *qclk = NULL;
4984
4985 if (qseecom.no_clock_support)
4986 return 0;
4987
4988 if (ce == CLK_QSEE)
4989 qclk = &qseecom.qsee;
4990 if (ce == CLK_CE_DRV)
4991 qclk = &qseecom.ce_drv;
4992
4993 if (qclk == NULL) {
4994 pr_err("CLK type not supported\n");
4995 return -EINVAL;
4996 }
4997 mutex_lock(&clk_access_lock);
4998
4999 if (qclk->clk_access_cnt == ULONG_MAX) {
5000 pr_err("clk_access_cnt beyond limitation\n");
5001 goto err;
5002 }
5003 if (qclk->clk_access_cnt > 0) {
5004 qclk->clk_access_cnt++;
5005 mutex_unlock(&clk_access_lock);
5006 return rc;
5007 }
5008
5009 /* Enable CE core clk */
5010 if (qclk->ce_core_clk != NULL) {
5011 rc = clk_prepare_enable(qclk->ce_core_clk);
5012 if (rc) {
5013 pr_err("Unable to enable/prepare CE core clk\n");
5014 goto err;
5015 }
5016 }
5017 /* Enable CE clk */
5018 if (qclk->ce_clk != NULL) {
5019 rc = clk_prepare_enable(qclk->ce_clk);
5020 if (rc) {
5021 pr_err("Unable to enable/prepare CE iface clk\n");
5022 goto ce_clk_err;
5023 }
5024 }
5025 /* Enable AXI clk */
5026 if (qclk->ce_bus_clk != NULL) {
5027 rc = clk_prepare_enable(qclk->ce_bus_clk);
5028 if (rc) {
5029 pr_err("Unable to enable/prepare CE bus clk\n");
5030 goto ce_bus_clk_err;
5031 }
5032 }
5033 qclk->clk_access_cnt++;
5034 mutex_unlock(&clk_access_lock);
5035 return 0;
5036
5037ce_bus_clk_err:
5038 if (qclk->ce_clk != NULL)
5039 clk_disable_unprepare(qclk->ce_clk);
5040ce_clk_err:
5041 if (qclk->ce_core_clk != NULL)
5042 clk_disable_unprepare(qclk->ce_core_clk);
5043err:
5044 mutex_unlock(&clk_access_lock);
5045 return -EIO;
5046}
5047
5048static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5049{
5050 struct qseecom_clk *qclk;
5051
5052 if (qseecom.no_clock_support)
5053 return;
5054
5055 if (ce == CLK_QSEE)
5056 qclk = &qseecom.qsee;
5057 else
5058 qclk = &qseecom.ce_drv;
5059
5060 mutex_lock(&clk_access_lock);
5061
5062 if (qclk->clk_access_cnt == 0) {
5063 mutex_unlock(&clk_access_lock);
5064 return;
5065 }
5066
5067 if (qclk->clk_access_cnt == 1) {
5068 if (qclk->ce_clk != NULL)
5069 clk_disable_unprepare(qclk->ce_clk);
5070 if (qclk->ce_core_clk != NULL)
5071 clk_disable_unprepare(qclk->ce_core_clk);
5072 if (qclk->ce_bus_clk != NULL)
5073 clk_disable_unprepare(qclk->ce_bus_clk);
5074 }
5075 qclk->clk_access_cnt--;
5076 mutex_unlock(&clk_access_lock);
5077}
5078
5079static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5080 int32_t clk_type)
5081{
5082 int ret = 0;
5083 struct qseecom_clk *qclk;
5084
5085 if (qseecom.no_clock_support)
5086 return 0;
5087
5088 qclk = &qseecom.qsee;
5089 if (!qseecom.qsee_perf_client)
5090 return ret;
5091
5092 switch (clk_type) {
5093 case CLK_DFAB:
5094 mutex_lock(&qsee_bw_mutex);
5095 if (!qseecom.qsee_bw_count) {
5096 if (qseecom.qsee_sfpb_bw_count > 0)
5097 ret = msm_bus_scale_client_update_request(
5098 qseecom.qsee_perf_client, 3);
5099 else {
5100 if (qclk->ce_core_src_clk != NULL)
5101 ret = __qseecom_enable_clk(CLK_QSEE);
5102 if (!ret) {
5103 ret =
5104 msm_bus_scale_client_update_request(
5105 qseecom.qsee_perf_client, 1);
5106 if ((ret) &&
5107 (qclk->ce_core_src_clk != NULL))
5108 __qseecom_disable_clk(CLK_QSEE);
5109 }
5110 }
5111 if (ret)
5112 pr_err("DFAB Bandwidth req failed (%d)\n",
5113 ret);
5114 else {
5115 qseecom.qsee_bw_count++;
5116 data->perf_enabled = true;
5117 }
5118 } else {
5119 qseecom.qsee_bw_count++;
5120 data->perf_enabled = true;
5121 }
5122 mutex_unlock(&qsee_bw_mutex);
5123 break;
5124 case CLK_SFPB:
5125 mutex_lock(&qsee_bw_mutex);
5126 if (!qseecom.qsee_sfpb_bw_count) {
5127 if (qseecom.qsee_bw_count > 0)
5128 ret = msm_bus_scale_client_update_request(
5129 qseecom.qsee_perf_client, 3);
5130 else {
5131 if (qclk->ce_core_src_clk != NULL)
5132 ret = __qseecom_enable_clk(CLK_QSEE);
5133 if (!ret) {
5134 ret =
5135 msm_bus_scale_client_update_request(
5136 qseecom.qsee_perf_client, 2);
5137 if ((ret) &&
5138 (qclk->ce_core_src_clk != NULL))
5139 __qseecom_disable_clk(CLK_QSEE);
5140 }
5141 }
5142
5143 if (ret)
5144 pr_err("SFPB Bandwidth req failed (%d)\n",
5145 ret);
5146 else {
5147 qseecom.qsee_sfpb_bw_count++;
5148 data->fast_load_enabled = true;
5149 }
5150 } else {
5151 qseecom.qsee_sfpb_bw_count++;
5152 data->fast_load_enabled = true;
5153 }
5154 mutex_unlock(&qsee_bw_mutex);
5155 break;
5156 default:
5157 pr_err("Clock type not defined\n");
5158 break;
5159 }
5160 return ret;
5161}
5162
5163static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5164 int32_t clk_type)
5165{
5166 int32_t ret = 0;
5167 struct qseecom_clk *qclk;
5168
5169 qclk = &qseecom.qsee;
5170
5171 if (qseecom.no_clock_support)
5172 return;
5173 if (!qseecom.qsee_perf_client)
5174 return;
5175
5176 switch (clk_type) {
5177 case CLK_DFAB:
5178 mutex_lock(&qsee_bw_mutex);
5179 if (qseecom.qsee_bw_count == 0) {
5180 pr_err("Client error.Extra call to disable DFAB clk\n");
5181 mutex_unlock(&qsee_bw_mutex);
5182 return;
5183 }
5184
5185 if (qseecom.qsee_bw_count == 1) {
5186 if (qseecom.qsee_sfpb_bw_count > 0)
5187 ret = msm_bus_scale_client_update_request(
5188 qseecom.qsee_perf_client, 2);
5189 else {
5190 ret = msm_bus_scale_client_update_request(
5191 qseecom.qsee_perf_client, 0);
5192 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5193 __qseecom_disable_clk(CLK_QSEE);
5194 }
5195 if (ret)
5196 pr_err("SFPB Bandwidth req fail (%d)\n",
5197 ret);
5198 else {
5199 qseecom.qsee_bw_count--;
5200 data->perf_enabled = false;
5201 }
5202 } else {
5203 qseecom.qsee_bw_count--;
5204 data->perf_enabled = false;
5205 }
5206 mutex_unlock(&qsee_bw_mutex);
5207 break;
5208 case CLK_SFPB:
5209 mutex_lock(&qsee_bw_mutex);
5210 if (qseecom.qsee_sfpb_bw_count == 0) {
5211 pr_err("Client error.Extra call to disable SFPB clk\n");
5212 mutex_unlock(&qsee_bw_mutex);
5213 return;
5214 }
5215 if (qseecom.qsee_sfpb_bw_count == 1) {
5216 if (qseecom.qsee_bw_count > 0)
5217 ret = msm_bus_scale_client_update_request(
5218 qseecom.qsee_perf_client, 1);
5219 else {
5220 ret = msm_bus_scale_client_update_request(
5221 qseecom.qsee_perf_client, 0);
5222 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5223 __qseecom_disable_clk(CLK_QSEE);
5224 }
5225 if (ret)
5226 pr_err("SFPB Bandwidth req fail (%d)\n",
5227 ret);
5228 else {
5229 qseecom.qsee_sfpb_bw_count--;
5230 data->fast_load_enabled = false;
5231 }
5232 } else {
5233 qseecom.qsee_sfpb_bw_count--;
5234 data->fast_load_enabled = false;
5235 }
5236 mutex_unlock(&qsee_bw_mutex);
5237 break;
5238 default:
5239 pr_err("Clock type not defined\n");
5240 break;
5241 }
5242
5243}
5244
5245static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5246 void __user *argp)
5247{
5248 struct ion_handle *ihandle; /* Ion handle */
5249 struct qseecom_load_img_req load_img_req;
5250 int uret = 0;
5251 int ret;
5252 ion_phys_addr_t pa = 0;
5253 size_t len;
5254 struct qseecom_load_app_ireq load_req;
5255 struct qseecom_load_app_64bit_ireq load_req_64bit;
5256 struct qseecom_command_scm_resp resp;
5257 void *cmd_buf = NULL;
5258 size_t cmd_len;
5259 /* Copy the relevant information needed for loading the image */
5260 if (copy_from_user(&load_img_req,
5261 (void __user *)argp,
5262 sizeof(struct qseecom_load_img_req))) {
5263 pr_err("copy_from_user failed\n");
5264 return -EFAULT;
5265 }
5266
5267 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005268 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005269 load_img_req.ifd_data_fd);
5270 if (IS_ERR_OR_NULL(ihandle)) {
5271 pr_err("Ion client could not retrieve the handle\n");
5272 return -ENOMEM;
5273 }
5274
5275 /* Get the physical address of the ION BUF */
5276 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5277 if (ret) {
5278 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5279 ret);
5280 return ret;
5281 }
5282 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5283 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5284 len, load_img_req.mdt_len,
5285 load_img_req.img_len);
5286 return ret;
5287 }
5288 /* Populate the structure for sending scm call to load image */
5289 if (qseecom.qsee_version < QSEE_VERSION_40) {
5290 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5291 load_req.mdt_len = load_img_req.mdt_len;
5292 load_req.img_len = load_img_req.img_len;
5293 load_req.phy_addr = (uint32_t)pa;
5294 cmd_buf = (void *)&load_req;
5295 cmd_len = sizeof(struct qseecom_load_app_ireq);
5296 } else {
5297 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5298 load_req_64bit.mdt_len = load_img_req.mdt_len;
5299 load_req_64bit.img_len = load_img_req.img_len;
5300 load_req_64bit.phy_addr = (uint64_t)pa;
5301 cmd_buf = (void *)&load_req_64bit;
5302 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5303 }
5304
5305 if (qseecom.support_bus_scaling) {
5306 mutex_lock(&qsee_bw_mutex);
5307 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5308 mutex_unlock(&qsee_bw_mutex);
5309 if (ret) {
5310 ret = -EIO;
5311 goto exit_cpu_restore;
5312 }
5313 }
5314
5315 /* Vote for the SFPB clock */
5316 ret = __qseecom_enable_clk_scale_up(data);
5317 if (ret) {
5318 ret = -EIO;
5319 goto exit_register_bus_bandwidth_needs;
5320 }
5321 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5322 ION_IOC_CLEAN_INV_CACHES);
5323 if (ret) {
5324 pr_err("cache operation failed %d\n", ret);
5325 goto exit_disable_clock;
5326 }
5327 /* SCM_CALL to load the external elf */
5328 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5329 &resp, sizeof(resp));
5330 if (ret) {
5331 pr_err("scm_call to load failed : ret %d\n",
5332 ret);
5333 ret = -EFAULT;
5334 goto exit_disable_clock;
5335 }
5336
5337 switch (resp.result) {
5338 case QSEOS_RESULT_SUCCESS:
5339 break;
5340 case QSEOS_RESULT_INCOMPLETE:
5341 pr_err("%s: qseos result incomplete\n", __func__);
5342 ret = __qseecom_process_incomplete_cmd(data, &resp);
5343 if (ret)
5344 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5345 break;
5346 case QSEOS_RESULT_FAILURE:
5347 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5348 ret = -EFAULT;
5349 break;
5350 default:
5351 pr_err("scm_call response result %d not supported\n",
5352 resp.result);
5353 ret = -EFAULT;
5354 break;
5355 }
5356
5357exit_disable_clock:
5358 __qseecom_disable_clk_scale_down(data);
5359
5360exit_register_bus_bandwidth_needs:
5361 if (qseecom.support_bus_scaling) {
5362 mutex_lock(&qsee_bw_mutex);
5363 uret = qseecom_unregister_bus_bandwidth_needs(data);
5364 mutex_unlock(&qsee_bw_mutex);
5365 if (uret)
5366 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5367 uret, ret);
5368 }
5369
5370exit_cpu_restore:
5371 /* Deallocate the handle */
5372 if (!IS_ERR_OR_NULL(ihandle))
5373 ion_free(qseecom.ion_clnt, ihandle);
5374 return ret;
5375}
5376
5377static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5378{
5379 int ret = 0;
5380 struct qseecom_command_scm_resp resp;
5381 struct qseecom_unload_app_ireq req;
5382
5383 /* unavailable client app */
5384 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5385
5386 /* Populate the structure for sending scm call to unload image */
5387 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5388
5389 /* SCM_CALL to unload the external elf */
5390 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5391 sizeof(struct qseecom_unload_app_ireq),
5392 &resp, sizeof(resp));
5393 if (ret) {
5394 pr_err("scm_call to unload failed : ret %d\n",
5395 ret);
5396 ret = -EFAULT;
5397 goto qseecom_unload_external_elf_scm_err;
5398 }
5399 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5400 ret = __qseecom_process_incomplete_cmd(data, &resp);
5401 if (ret)
5402 pr_err("process_incomplete_cmd fail err: %d\n",
5403 ret);
5404 } else {
5405 if (resp.result != QSEOS_RESULT_SUCCESS) {
5406 pr_err("scm_call to unload image failed resp.result =%d\n",
5407 resp.result);
5408 ret = -EFAULT;
5409 }
5410 }
5411
5412qseecom_unload_external_elf_scm_err:
5413
5414 return ret;
5415}
5416
5417static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5418 void __user *argp)
5419{
5420
5421 int32_t ret;
5422 struct qseecom_qseos_app_load_query query_req;
5423 struct qseecom_check_app_ireq req;
5424 struct qseecom_registered_app_list *entry = NULL;
5425 unsigned long flags = 0;
5426 uint32_t app_arch = 0, app_id = 0;
5427 bool found_app = false;
5428
5429 /* Copy the relevant information needed for loading the image */
5430 if (copy_from_user(&query_req,
5431 (void __user *)argp,
5432 sizeof(struct qseecom_qseos_app_load_query))) {
5433 pr_err("copy_from_user failed\n");
5434 return -EFAULT;
5435 }
5436
5437 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5438 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5439 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5440
5441 ret = __qseecom_check_app_exists(req, &app_id);
5442 if (ret) {
5443 pr_err(" scm call to check if app is loaded failed");
5444 return ret; /* scm call failed */
5445 }
5446 if (app_id) {
5447 pr_debug("App id %d (%s) already exists\n", app_id,
5448 (char *)(req.app_name));
5449 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5450 list_for_each_entry(entry,
5451 &qseecom.registered_app_list_head, list){
5452 if (entry->app_id == app_id) {
5453 app_arch = entry->app_arch;
5454 entry->ref_cnt++;
5455 found_app = true;
5456 break;
5457 }
5458 }
5459 spin_unlock_irqrestore(
5460 &qseecom.registered_app_list_lock, flags);
5461 data->client.app_id = app_id;
5462 query_req.app_id = app_id;
5463 if (app_arch) {
5464 data->client.app_arch = app_arch;
5465 query_req.app_arch = app_arch;
5466 } else {
5467 data->client.app_arch = 0;
5468 query_req.app_arch = 0;
5469 }
5470 strlcpy(data->client.app_name, query_req.app_name,
5471 MAX_APP_NAME_SIZE);
5472 /*
5473 * If app was loaded by appsbl before and was not registered,
5474 * regiser this app now.
5475 */
5476 if (!found_app) {
5477 pr_debug("Register app %d [%s] which was loaded before\n",
5478 ret, (char *)query_req.app_name);
5479 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5480 if (!entry) {
5481 pr_err("kmalloc for app entry failed\n");
5482 return -ENOMEM;
5483 }
5484 entry->app_id = app_id;
5485 entry->ref_cnt = 1;
5486 entry->app_arch = data->client.app_arch;
5487 strlcpy(entry->app_name, data->client.app_name,
5488 MAX_APP_NAME_SIZE);
5489 entry->app_blocked = false;
5490 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005491 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005492 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5493 flags);
5494 list_add_tail(&entry->list,
5495 &qseecom.registered_app_list_head);
5496 spin_unlock_irqrestore(
5497 &qseecom.registered_app_list_lock, flags);
5498 }
5499 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5500 pr_err("copy_to_user failed\n");
5501 return -EFAULT;
5502 }
5503 return -EEXIST; /* app already loaded */
5504 } else {
5505 return 0; /* app not loaded */
5506 }
5507}
5508
5509static int __qseecom_get_ce_pipe_info(
5510 enum qseecom_key_management_usage_type usage,
5511 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5512{
5513 int ret = -EINVAL;
5514 int i, j;
5515 struct qseecom_ce_info_use *p = NULL;
5516 int total = 0;
5517 struct qseecom_ce_pipe_entry *pcepipe;
5518
5519 switch (usage) {
5520 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5521 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5522 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5523 if (qseecom.support_fde) {
5524 p = qseecom.ce_info.fde;
5525 total = qseecom.ce_info.num_fde;
5526 } else {
5527 pr_err("system does not support fde\n");
5528 return -EINVAL;
5529 }
5530 break;
5531 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5532 if (qseecom.support_pfe) {
5533 p = qseecom.ce_info.pfe;
5534 total = qseecom.ce_info.num_pfe;
5535 } else {
5536 pr_err("system does not support pfe\n");
5537 return -EINVAL;
5538 }
5539 break;
5540 default:
5541 pr_err("unsupported usage %d\n", usage);
5542 return -EINVAL;
5543 }
5544
5545 for (j = 0; j < total; j++) {
5546 if (p->unit_num == unit) {
5547 pcepipe = p->ce_pipe_entry;
5548 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5549 (*ce_hw)[i] = pcepipe->ce_num;
5550 *pipe = pcepipe->ce_pipe_pair;
5551 pcepipe++;
5552 }
5553 ret = 0;
5554 break;
5555 }
5556 p++;
5557 }
5558 return ret;
5559}
5560
5561static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5562 enum qseecom_key_management_usage_type usage,
5563 struct qseecom_key_generate_ireq *ireq)
5564{
5565 struct qseecom_command_scm_resp resp;
5566 int ret;
5567
5568 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5569 usage >= QSEOS_KM_USAGE_MAX) {
5570 pr_err("Error:: unsupported usage %d\n", usage);
5571 return -EFAULT;
5572 }
5573 ret = __qseecom_enable_clk(CLK_QSEE);
5574 if (ret)
5575 return ret;
5576
5577 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5578 ireq, sizeof(struct qseecom_key_generate_ireq),
5579 &resp, sizeof(resp));
5580 if (ret) {
5581 if (ret == -EINVAL &&
5582 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5583 pr_debug("Key ID exists.\n");
5584 ret = 0;
5585 } else {
5586 pr_err("scm call to generate key failed : %d\n", ret);
5587 ret = -EFAULT;
5588 }
5589 goto generate_key_exit;
5590 }
5591
5592 switch (resp.result) {
5593 case QSEOS_RESULT_SUCCESS:
5594 break;
5595 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5596 pr_debug("Key ID exists.\n");
5597 break;
5598 case QSEOS_RESULT_INCOMPLETE:
5599 ret = __qseecom_process_incomplete_cmd(data, &resp);
5600 if (ret) {
5601 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5602 pr_debug("Key ID exists.\n");
5603 ret = 0;
5604 } else {
5605 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5606 resp.result);
5607 }
5608 }
5609 break;
5610 case QSEOS_RESULT_FAILURE:
5611 default:
5612 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5613 ret = -EINVAL;
5614 break;
5615 }
5616generate_key_exit:
5617 __qseecom_disable_clk(CLK_QSEE);
5618 return ret;
5619}
5620
5621static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5622 enum qseecom_key_management_usage_type usage,
5623 struct qseecom_key_delete_ireq *ireq)
5624{
5625 struct qseecom_command_scm_resp resp;
5626 int ret;
5627
5628 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5629 usage >= QSEOS_KM_USAGE_MAX) {
5630 pr_err("Error:: unsupported usage %d\n", usage);
5631 return -EFAULT;
5632 }
5633 ret = __qseecom_enable_clk(CLK_QSEE);
5634 if (ret)
5635 return ret;
5636
5637 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5638 ireq, sizeof(struct qseecom_key_delete_ireq),
5639 &resp, sizeof(struct qseecom_command_scm_resp));
5640 if (ret) {
5641 if (ret == -EINVAL &&
5642 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5643 pr_debug("Max attempts to input password reached.\n");
5644 ret = -ERANGE;
5645 } else {
5646 pr_err("scm call to delete key failed : %d\n", ret);
5647 ret = -EFAULT;
5648 }
5649 goto del_key_exit;
5650 }
5651
5652 switch (resp.result) {
5653 case QSEOS_RESULT_SUCCESS:
5654 break;
5655 case QSEOS_RESULT_INCOMPLETE:
5656 ret = __qseecom_process_incomplete_cmd(data, &resp);
5657 if (ret) {
5658 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5659 resp.result);
5660 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5661 pr_debug("Max attempts to input password reached.\n");
5662 ret = -ERANGE;
5663 }
5664 }
5665 break;
5666 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5667 pr_debug("Max attempts to input password reached.\n");
5668 ret = -ERANGE;
5669 break;
5670 case QSEOS_RESULT_FAILURE:
5671 default:
5672 pr_err("Delete key scm call failed resp.result %d\n",
5673 resp.result);
5674 ret = -EINVAL;
5675 break;
5676 }
5677del_key_exit:
5678 __qseecom_disable_clk(CLK_QSEE);
5679 return ret;
5680}
5681
5682static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5683 enum qseecom_key_management_usage_type usage,
5684 struct qseecom_key_select_ireq *ireq)
5685{
5686 struct qseecom_command_scm_resp resp;
5687 int ret;
5688
5689 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5690 usage >= QSEOS_KM_USAGE_MAX) {
5691 pr_err("Error:: unsupported usage %d\n", usage);
5692 return -EFAULT;
5693 }
5694 ret = __qseecom_enable_clk(CLK_QSEE);
5695 if (ret)
5696 return ret;
5697
5698 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5699 ret = __qseecom_enable_clk(CLK_CE_DRV);
5700 if (ret)
5701 return ret;
5702 }
5703
5704 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5705 ireq, sizeof(struct qseecom_key_select_ireq),
5706 &resp, sizeof(struct qseecom_command_scm_resp));
5707 if (ret) {
5708 if (ret == -EINVAL &&
5709 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5710 pr_debug("Max attempts to input password reached.\n");
5711 ret = -ERANGE;
5712 } else if (ret == -EINVAL &&
5713 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5714 pr_debug("Set Key operation under processing...\n");
5715 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5716 } else {
5717 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5718 ret);
5719 ret = -EFAULT;
5720 }
5721 goto set_key_exit;
5722 }
5723
5724 switch (resp.result) {
5725 case QSEOS_RESULT_SUCCESS:
5726 break;
5727 case QSEOS_RESULT_INCOMPLETE:
5728 ret = __qseecom_process_incomplete_cmd(data, &resp);
5729 if (ret) {
5730 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5731 resp.result);
5732 if (resp.result ==
5733 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5734 pr_debug("Set Key operation under processing...\n");
5735 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5736 }
5737 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5738 pr_debug("Max attempts to input password reached.\n");
5739 ret = -ERANGE;
5740 }
5741 }
5742 break;
5743 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5744 pr_debug("Max attempts to input password reached.\n");
5745 ret = -ERANGE;
5746 break;
5747 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5748 pr_debug("Set Key operation under processing...\n");
5749 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5750 break;
5751 case QSEOS_RESULT_FAILURE:
5752 default:
5753 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5754 ret = -EINVAL;
5755 break;
5756 }
5757set_key_exit:
5758 __qseecom_disable_clk(CLK_QSEE);
5759 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5760 __qseecom_disable_clk(CLK_CE_DRV);
5761 return ret;
5762}
5763
5764static int __qseecom_update_current_key_user_info(
5765 struct qseecom_dev_handle *data,
5766 enum qseecom_key_management_usage_type usage,
5767 struct qseecom_key_userinfo_update_ireq *ireq)
5768{
5769 struct qseecom_command_scm_resp resp;
5770 int ret;
5771
5772 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5773 usage >= QSEOS_KM_USAGE_MAX) {
5774 pr_err("Error:: unsupported usage %d\n", usage);
5775 return -EFAULT;
5776 }
5777 ret = __qseecom_enable_clk(CLK_QSEE);
5778 if (ret)
5779 return ret;
5780
5781 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5782 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5783 &resp, sizeof(struct qseecom_command_scm_resp));
5784 if (ret) {
5785 if (ret == -EINVAL &&
5786 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5787 pr_debug("Set Key operation under processing...\n");
5788 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5789 } else {
5790 pr_err("scm call to update key userinfo failed: %d\n",
5791 ret);
5792 __qseecom_disable_clk(CLK_QSEE);
5793 return -EFAULT;
5794 }
5795 }
5796
5797 switch (resp.result) {
5798 case QSEOS_RESULT_SUCCESS:
5799 break;
5800 case QSEOS_RESULT_INCOMPLETE:
5801 ret = __qseecom_process_incomplete_cmd(data, &resp);
5802 if (resp.result ==
5803 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5804 pr_debug("Set Key operation under processing...\n");
5805 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5806 }
5807 if (ret)
5808 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5809 resp.result);
5810 break;
5811 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5812 pr_debug("Update Key operation under processing...\n");
5813 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5814 break;
5815 case QSEOS_RESULT_FAILURE:
5816 default:
5817 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5818 ret = -EINVAL;
5819 break;
5820 }
5821
5822 __qseecom_disable_clk(CLK_QSEE);
5823 return ret;
5824}
5825
5826
5827static int qseecom_enable_ice_setup(int usage)
5828{
5829 int ret = 0;
5830
5831 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5832 ret = qcom_ice_setup_ice_hw("ufs", true);
5833 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5834 ret = qcom_ice_setup_ice_hw("sdcc", true);
5835
5836 return ret;
5837}
5838
5839static int qseecom_disable_ice_setup(int usage)
5840{
5841 int ret = 0;
5842
5843 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5844 ret = qcom_ice_setup_ice_hw("ufs", false);
5845 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5846 ret = qcom_ice_setup_ice_hw("sdcc", false);
5847
5848 return ret;
5849}
5850
5851static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5852{
5853 struct qseecom_ce_info_use *pce_info_use, *p;
5854 int total = 0;
5855 int i;
5856
5857 switch (usage) {
5858 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5859 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5860 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5861 p = qseecom.ce_info.fde;
5862 total = qseecom.ce_info.num_fde;
5863 break;
5864 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5865 p = qseecom.ce_info.pfe;
5866 total = qseecom.ce_info.num_pfe;
5867 break;
5868 default:
5869 pr_err("unsupported usage %d\n", usage);
5870 return -EINVAL;
5871 }
5872
5873 pce_info_use = NULL;
5874
5875 for (i = 0; i < total; i++) {
5876 if (p->unit_num == unit) {
5877 pce_info_use = p;
5878 break;
5879 }
5880 p++;
5881 }
5882 if (!pce_info_use) {
5883 pr_err("can not find %d\n", unit);
5884 return -EINVAL;
5885 }
5886 return pce_info_use->num_ce_pipe_entries;
5887}
5888
5889static int qseecom_create_key(struct qseecom_dev_handle *data,
5890 void __user *argp)
5891{
5892 int i;
5893 uint32_t *ce_hw = NULL;
5894 uint32_t pipe = 0;
5895 int ret = 0;
5896 uint32_t flags = 0;
5897 struct qseecom_create_key_req create_key_req;
5898 struct qseecom_key_generate_ireq generate_key_ireq;
5899 struct qseecom_key_select_ireq set_key_ireq;
5900 uint32_t entries = 0;
5901
5902 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5903 if (ret) {
5904 pr_err("copy_from_user failed\n");
5905 return ret;
5906 }
5907
5908 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5909 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5910 pr_err("unsupported usage %d\n", create_key_req.usage);
5911 ret = -EFAULT;
5912 return ret;
5913 }
5914 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5915 create_key_req.usage);
5916 if (entries <= 0) {
5917 pr_err("no ce instance for usage %d instance %d\n",
5918 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5919 ret = -EINVAL;
5920 return ret;
5921 }
5922
5923 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5924 if (!ce_hw) {
5925 ret = -ENOMEM;
5926 return ret;
5927 }
5928 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5929 DEFAULT_CE_INFO_UNIT);
5930 if (ret) {
5931 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5932 ret = -EINVAL;
5933 goto free_buf;
5934 }
5935
5936 if (qseecom.fde_key_size)
5937 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5938 else
5939 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5940
5941 generate_key_ireq.flags = flags;
5942 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5943 memset((void *)generate_key_ireq.key_id,
5944 0, QSEECOM_KEY_ID_SIZE);
5945 memset((void *)generate_key_ireq.hash32,
5946 0, QSEECOM_HASH_SIZE);
5947 memcpy((void *)generate_key_ireq.key_id,
5948 (void *)key_id_array[create_key_req.usage].desc,
5949 QSEECOM_KEY_ID_SIZE);
5950 memcpy((void *)generate_key_ireq.hash32,
5951 (void *)create_key_req.hash32,
5952 QSEECOM_HASH_SIZE);
5953
5954 ret = __qseecom_generate_and_save_key(data,
5955 create_key_req.usage, &generate_key_ireq);
5956 if (ret) {
5957 pr_err("Failed to generate key on storage: %d\n", ret);
5958 goto free_buf;
5959 }
5960
5961 for (i = 0; i < entries; i++) {
5962 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5963 if (create_key_req.usage ==
5964 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5965 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5966 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5967
5968 } else if (create_key_req.usage ==
5969 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5970 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5971 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5972
5973 } else {
5974 set_key_ireq.ce = ce_hw[i];
5975 set_key_ireq.pipe = pipe;
5976 }
5977 set_key_ireq.flags = flags;
5978
5979 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5980 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5981 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5982 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5983 memcpy((void *)set_key_ireq.key_id,
5984 (void *)key_id_array[create_key_req.usage].desc,
5985 QSEECOM_KEY_ID_SIZE);
5986 memcpy((void *)set_key_ireq.hash32,
5987 (void *)create_key_req.hash32,
5988 QSEECOM_HASH_SIZE);
5989 /*
5990 * It will return false if it is GPCE based crypto instance or
5991 * ICE is setup properly
5992 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005993 ret = qseecom_enable_ice_setup(create_key_req.usage);
5994 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005995 goto free_buf;
5996
5997 do {
5998 ret = __qseecom_set_clear_ce_key(data,
5999 create_key_req.usage,
6000 &set_key_ireq);
6001 /*
6002 * wait a little before calling scm again to let other
6003 * processes run
6004 */
6005 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6006 msleep(50);
6007
6008 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6009
6010 qseecom_disable_ice_setup(create_key_req.usage);
6011
6012 if (ret) {
6013 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6014 pipe, ce_hw[i], ret);
6015 goto free_buf;
6016 } else {
6017 pr_err("Set the key successfully\n");
6018 if ((create_key_req.usage ==
6019 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6020 (create_key_req.usage ==
6021 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6022 goto free_buf;
6023 }
6024 }
6025
6026free_buf:
6027 kzfree(ce_hw);
6028 return ret;
6029}
6030
6031static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6032 void __user *argp)
6033{
6034 uint32_t *ce_hw = NULL;
6035 uint32_t pipe = 0;
6036 int ret = 0;
6037 uint32_t flags = 0;
6038 int i, j;
6039 struct qseecom_wipe_key_req wipe_key_req;
6040 struct qseecom_key_delete_ireq delete_key_ireq;
6041 struct qseecom_key_select_ireq clear_key_ireq;
6042 uint32_t entries = 0;
6043
6044 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6045 if (ret) {
6046 pr_err("copy_from_user failed\n");
6047 return ret;
6048 }
6049
6050 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6051 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6052 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6053 ret = -EFAULT;
6054 return ret;
6055 }
6056
6057 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6058 wipe_key_req.usage);
6059 if (entries <= 0) {
6060 pr_err("no ce instance for usage %d instance %d\n",
6061 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6062 ret = -EINVAL;
6063 return ret;
6064 }
6065
6066 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6067 if (!ce_hw) {
6068 ret = -ENOMEM;
6069 return ret;
6070 }
6071
6072 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6073 DEFAULT_CE_INFO_UNIT);
6074 if (ret) {
6075 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6076 ret = -EINVAL;
6077 goto free_buf;
6078 }
6079
6080 if (wipe_key_req.wipe_key_flag) {
6081 delete_key_ireq.flags = flags;
6082 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6083 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6084 memcpy((void *)delete_key_ireq.key_id,
6085 (void *)key_id_array[wipe_key_req.usage].desc,
6086 QSEECOM_KEY_ID_SIZE);
6087 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6088
6089 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6090 &delete_key_ireq);
6091 if (ret) {
6092 pr_err("Failed to delete key from ssd storage: %d\n",
6093 ret);
6094 ret = -EFAULT;
6095 goto free_buf;
6096 }
6097 }
6098
6099 for (j = 0; j < entries; j++) {
6100 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6101 if (wipe_key_req.usage ==
6102 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6103 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6104 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6105 } else if (wipe_key_req.usage ==
6106 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6107 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6108 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6109 } else {
6110 clear_key_ireq.ce = ce_hw[j];
6111 clear_key_ireq.pipe = pipe;
6112 }
6113 clear_key_ireq.flags = flags;
6114 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6115 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6116 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6117 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6118
6119 /*
6120 * It will return false if it is GPCE based crypto instance or
6121 * ICE is setup properly
6122 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006123 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6124 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006125 goto free_buf;
6126
6127 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6128 &clear_key_ireq);
6129
6130 qseecom_disable_ice_setup(wipe_key_req.usage);
6131
6132 if (ret) {
6133 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6134 pipe, ce_hw[j], ret);
6135 ret = -EFAULT;
6136 goto free_buf;
6137 }
6138 }
6139
6140free_buf:
6141 kzfree(ce_hw);
6142 return ret;
6143}
6144
6145static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6146 void __user *argp)
6147{
6148 int ret = 0;
6149 uint32_t flags = 0;
6150 struct qseecom_update_key_userinfo_req update_key_req;
6151 struct qseecom_key_userinfo_update_ireq ireq;
6152
6153 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6154 if (ret) {
6155 pr_err("copy_from_user failed\n");
6156 return ret;
6157 }
6158
6159 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6160 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6161 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6162 return -EFAULT;
6163 }
6164
6165 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6166
6167 if (qseecom.fde_key_size)
6168 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6169 else
6170 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6171
6172 ireq.flags = flags;
6173 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6174 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6175 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6176 memcpy((void *)ireq.key_id,
6177 (void *)key_id_array[update_key_req.usage].desc,
6178 QSEECOM_KEY_ID_SIZE);
6179 memcpy((void *)ireq.current_hash32,
6180 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6181 memcpy((void *)ireq.new_hash32,
6182 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6183
6184 do {
6185 ret = __qseecom_update_current_key_user_info(data,
6186 update_key_req.usage,
6187 &ireq);
6188 /*
6189 * wait a little before calling scm again to let other
6190 * processes run
6191 */
6192 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6193 msleep(50);
6194
6195 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6196 if (ret) {
6197 pr_err("Failed to update key info: %d\n", ret);
6198 return ret;
6199 }
6200 return ret;
6201
6202}
6203static int qseecom_is_es_activated(void __user *argp)
6204{
Zhen Kong26e62742018-05-04 17:19:06 -07006205 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006206 struct qseecom_command_scm_resp resp;
6207 int ret;
6208
6209 if (qseecom.qsee_version < QSEE_VERSION_04) {
6210 pr_err("invalid qsee version\n");
6211 return -ENODEV;
6212 }
6213
6214 if (argp == NULL) {
6215 pr_err("arg is null\n");
6216 return -EINVAL;
6217 }
6218
6219 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6220 &req, sizeof(req), &resp, sizeof(resp));
6221 if (ret) {
6222 pr_err("scm_call failed\n");
6223 return ret;
6224 }
6225
6226 req.is_activated = resp.result;
6227 ret = copy_to_user(argp, &req, sizeof(req));
6228 if (ret) {
6229 pr_err("copy_to_user failed\n");
6230 return ret;
6231 }
6232
6233 return 0;
6234}
6235
6236static int qseecom_save_partition_hash(void __user *argp)
6237{
6238 struct qseecom_save_partition_hash_req req;
6239 struct qseecom_command_scm_resp resp;
6240 int ret;
6241
6242 memset(&resp, 0x00, sizeof(resp));
6243
6244 if (qseecom.qsee_version < QSEE_VERSION_04) {
6245 pr_err("invalid qsee version\n");
6246 return -ENODEV;
6247 }
6248
6249 if (argp == NULL) {
6250 pr_err("arg is null\n");
6251 return -EINVAL;
6252 }
6253
6254 ret = copy_from_user(&req, argp, sizeof(req));
6255 if (ret) {
6256 pr_err("copy_from_user failed\n");
6257 return ret;
6258 }
6259
6260 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6261 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6262 if (ret) {
6263 pr_err("qseecom_scm_call failed\n");
6264 return ret;
6265 }
6266
6267 return 0;
6268}
6269
6270static int qseecom_mdtp_cipher_dip(void __user *argp)
6271{
6272 struct qseecom_mdtp_cipher_dip_req req;
6273 u32 tzbuflenin, tzbuflenout;
6274 char *tzbufin = NULL, *tzbufout = NULL;
6275 struct scm_desc desc = {0};
6276 int ret;
6277
6278 do {
6279 /* Copy the parameters from userspace */
6280 if (argp == NULL) {
6281 pr_err("arg is null\n");
6282 ret = -EINVAL;
6283 break;
6284 }
6285
6286 ret = copy_from_user(&req, argp, sizeof(req));
6287 if (ret) {
6288 pr_err("copy_from_user failed, ret= %d\n", ret);
6289 break;
6290 }
6291
6292 if (req.in_buf == NULL || req.out_buf == NULL ||
6293 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6294 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6295 req.direction > 1) {
6296 pr_err("invalid parameters\n");
6297 ret = -EINVAL;
6298 break;
6299 }
6300
6301 /* Copy the input buffer from userspace to kernel space */
6302 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6303 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6304 if (!tzbufin) {
6305 pr_err("error allocating in buffer\n");
6306 ret = -ENOMEM;
6307 break;
6308 }
6309
6310 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6311 if (ret) {
6312 pr_err("copy_from_user failed, ret=%d\n", ret);
6313 break;
6314 }
6315
6316 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6317
6318 /* Prepare the output buffer in kernel space */
6319 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6320 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6321 if (!tzbufout) {
6322 pr_err("error allocating out buffer\n");
6323 ret = -ENOMEM;
6324 break;
6325 }
6326
6327 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6328
6329 /* Send the command to TZ */
6330 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6331 desc.args[0] = virt_to_phys(tzbufin);
6332 desc.args[1] = req.in_buf_size;
6333 desc.args[2] = virt_to_phys(tzbufout);
6334 desc.args[3] = req.out_buf_size;
6335 desc.args[4] = req.direction;
6336
6337 ret = __qseecom_enable_clk(CLK_QSEE);
6338 if (ret)
6339 break;
6340
6341 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6342
6343 __qseecom_disable_clk(CLK_QSEE);
6344
6345 if (ret) {
6346 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6347 ret);
6348 break;
6349 }
6350
6351 /* Copy the output buffer from kernel space to userspace */
6352 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6353 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6354 if (ret) {
6355 pr_err("copy_to_user failed, ret=%d\n", ret);
6356 break;
6357 }
6358 } while (0);
6359
6360 kzfree(tzbufin);
6361 kzfree(tzbufout);
6362
6363 return ret;
6364}
6365
6366static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6367 struct qseecom_qteec_req *req)
6368{
6369 if (!data || !data->client.ihandle) {
6370 pr_err("Client or client handle is not initialized\n");
6371 return -EINVAL;
6372 }
6373
6374 if (data->type != QSEECOM_CLIENT_APP)
6375 return -EFAULT;
6376
6377 if (req->req_len > UINT_MAX - req->resp_len) {
6378 pr_err("Integer overflow detected in req_len & rsp_len\n");
6379 return -EINVAL;
6380 }
6381
6382 if (req->req_len + req->resp_len > data->client.sb_length) {
6383 pr_debug("Not enough memory to fit cmd_buf.\n");
6384 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6385 (req->req_len + req->resp_len), data->client.sb_length);
6386 return -ENOMEM;
6387 }
6388
6389 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6390 pr_err("cmd buffer or response buffer is null\n");
6391 return -EINVAL;
6392 }
6393 if (((uintptr_t)req->req_ptr <
6394 data->client.user_virt_sb_base) ||
6395 ((uintptr_t)req->req_ptr >=
6396 (data->client.user_virt_sb_base + data->client.sb_length))) {
6397 pr_err("cmd buffer address not within shared bufffer\n");
6398 return -EINVAL;
6399 }
6400
6401 if (((uintptr_t)req->resp_ptr <
6402 data->client.user_virt_sb_base) ||
6403 ((uintptr_t)req->resp_ptr >=
6404 (data->client.user_virt_sb_base + data->client.sb_length))) {
6405 pr_err("response buffer address not within shared bufffer\n");
6406 return -EINVAL;
6407 }
6408
6409 if ((req->req_len == 0) || (req->resp_len == 0)) {
6410 pr_err("cmd buf lengtgh/response buf length not valid\n");
6411 return -EINVAL;
6412 }
6413
6414 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6415 pr_err("Integer overflow in req_len & req_ptr\n");
6416 return -EINVAL;
6417 }
6418
6419 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6420 pr_err("Integer overflow in resp_len & resp_ptr\n");
6421 return -EINVAL;
6422 }
6423
6424 if (data->client.user_virt_sb_base >
6425 (ULONG_MAX - data->client.sb_length)) {
6426 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6427 return -EINVAL;
6428 }
6429 if ((((uintptr_t)req->req_ptr + req->req_len) >
6430 ((uintptr_t)data->client.user_virt_sb_base +
6431 data->client.sb_length)) ||
6432 (((uintptr_t)req->resp_ptr + req->resp_len) >
6433 ((uintptr_t)data->client.user_virt_sb_base +
6434 data->client.sb_length))) {
6435 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6436 return -EINVAL;
6437 }
6438 return 0;
6439}
6440
6441static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6442 uint32_t fd_idx, struct sg_table *sg_ptr)
6443{
6444 struct scatterlist *sg = sg_ptr->sgl;
6445 struct qseecom_sg_entry *sg_entry;
6446 void *buf;
6447 uint i;
6448 size_t size;
6449 dma_addr_t coh_pmem;
6450
6451 if (fd_idx >= MAX_ION_FD) {
6452 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6453 return -ENOMEM;
6454 }
6455 /*
6456 * Allocate a buffer, populate it with number of entry plus
6457 * each sg entry's phy addr and length; then return the
6458 * phy_addr of the buffer.
6459 */
6460 size = sizeof(uint32_t) +
6461 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6462 size = (size + PAGE_SIZE) & PAGE_MASK;
6463 buf = dma_alloc_coherent(qseecom.pdev,
6464 size, &coh_pmem, GFP_KERNEL);
6465 if (buf == NULL) {
6466 pr_err("failed to alloc memory for sg buf\n");
6467 return -ENOMEM;
6468 }
6469 *(uint32_t *)buf = sg_ptr->nents;
6470 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6471 for (i = 0; i < sg_ptr->nents; i++) {
6472 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6473 sg_entry->len = sg->length;
6474 sg_entry++;
6475 sg = sg_next(sg);
6476 }
6477 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6478 data->client.sec_buf_fd[fd_idx].vbase = buf;
6479 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6480 data->client.sec_buf_fd[fd_idx].size = size;
6481 return 0;
6482}
6483
6484static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6485 struct qseecom_dev_handle *data, bool cleanup)
6486{
6487 struct ion_handle *ihandle;
6488 int ret = 0;
6489 int i = 0;
6490 uint32_t *update;
6491 struct sg_table *sg_ptr = NULL;
6492 struct scatterlist *sg;
6493 struct qseecom_param_memref *memref;
6494
6495 if (req == NULL) {
6496 pr_err("Invalid address\n");
6497 return -EINVAL;
6498 }
6499 for (i = 0; i < MAX_ION_FD; i++) {
6500 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006501 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006502 req->ifd_data[i].fd);
6503 if (IS_ERR_OR_NULL(ihandle)) {
6504 pr_err("Ion client can't retrieve the handle\n");
6505 return -ENOMEM;
6506 }
6507 if ((req->req_len < sizeof(uint32_t)) ||
6508 (req->ifd_data[i].cmd_buf_offset >
6509 req->req_len - sizeof(uint32_t))) {
6510 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6511 req->req_len,
6512 req->ifd_data[i].cmd_buf_offset);
6513 return -EINVAL;
6514 }
6515 update = (uint32_t *)((char *) req->req_ptr +
6516 req->ifd_data[i].cmd_buf_offset);
6517 if (!update) {
6518 pr_err("update pointer is NULL\n");
6519 return -EINVAL;
6520 }
6521 } else {
6522 continue;
6523 }
6524 /* Populate the cmd data structure with the phys_addr */
6525 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6526 if (IS_ERR_OR_NULL(sg_ptr)) {
6527 pr_err("IOn client could not retrieve sg table\n");
6528 goto err;
6529 }
6530 sg = sg_ptr->sgl;
6531 if (sg == NULL) {
6532 pr_err("sg is NULL\n");
6533 goto err;
6534 }
6535 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6536 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6537 sg_ptr->nents, sg->length);
6538 goto err;
6539 }
6540 /* clean up buf for pre-allocated fd */
6541 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6542 (*update)) {
6543 if (data->client.sec_buf_fd[i].vbase)
6544 dma_free_coherent(qseecom.pdev,
6545 data->client.sec_buf_fd[i].size,
6546 data->client.sec_buf_fd[i].vbase,
6547 data->client.sec_buf_fd[i].pbase);
6548 memset((void *)update, 0,
6549 sizeof(struct qseecom_param_memref));
6550 memset(&(data->client.sec_buf_fd[i]), 0,
6551 sizeof(struct qseecom_sec_buf_fd_info));
6552 goto clean;
6553 }
6554
6555 if (*update == 0) {
6556 /* update buf for pre-allocated fd from secure heap*/
6557 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6558 sg_ptr);
6559 if (ret) {
6560 pr_err("Failed to handle buf for fd[%d]\n", i);
6561 goto err;
6562 }
6563 memref = (struct qseecom_param_memref *)update;
6564 memref->buffer =
6565 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6566 memref->size =
6567 (uint32_t)(data->client.sec_buf_fd[i].size);
6568 } else {
6569 /* update buf for fd from non-secure qseecom heap */
6570 if (sg_ptr->nents != 1) {
6571 pr_err("Num of scat entr (%d) invalid\n",
6572 sg_ptr->nents);
6573 goto err;
6574 }
6575 if (cleanup)
6576 *update = 0;
6577 else
6578 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6579 }
6580clean:
6581 if (cleanup) {
6582 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6583 ihandle, NULL, sg->length,
6584 ION_IOC_INV_CACHES);
6585 if (ret) {
6586 pr_err("cache operation failed %d\n", ret);
6587 goto err;
6588 }
6589 } else {
6590 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6591 ihandle, NULL, sg->length,
6592 ION_IOC_CLEAN_INV_CACHES);
6593 if (ret) {
6594 pr_err("cache operation failed %d\n", ret);
6595 goto err;
6596 }
6597 data->sglistinfo_ptr[i].indexAndFlags =
6598 SGLISTINFO_SET_INDEX_FLAG(
6599 (sg_ptr->nents == 1), 0,
6600 req->ifd_data[i].cmd_buf_offset);
6601 data->sglistinfo_ptr[i].sizeOrCount =
6602 (sg_ptr->nents == 1) ?
6603 sg->length : sg_ptr->nents;
6604 data->sglist_cnt = i + 1;
6605 }
6606 /* Deallocate the handle */
6607 if (!IS_ERR_OR_NULL(ihandle))
6608 ion_free(qseecom.ion_clnt, ihandle);
6609 }
6610 return ret;
6611err:
6612 if (!IS_ERR_OR_NULL(ihandle))
6613 ion_free(qseecom.ion_clnt, ihandle);
6614 return -ENOMEM;
6615}
6616
6617static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6618 struct qseecom_qteec_req *req, uint32_t cmd_id)
6619{
6620 struct qseecom_command_scm_resp resp;
6621 struct qseecom_qteec_ireq ireq;
6622 struct qseecom_qteec_64bit_ireq ireq_64bit;
6623 struct qseecom_registered_app_list *ptr_app;
6624 bool found_app = false;
6625 unsigned long flags;
6626 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006627 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006628 uint32_t reqd_len_sb_in = 0;
6629 void *cmd_buf = NULL;
6630 size_t cmd_len;
6631 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306632 void *req_ptr = NULL;
6633 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006634
6635 ret = __qseecom_qteec_validate_msg(data, req);
6636 if (ret)
6637 return ret;
6638
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306639 req_ptr = req->req_ptr;
6640 resp_ptr = req->resp_ptr;
6641
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006642 /* find app_id & img_name from list */
6643 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6644 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6645 list) {
6646 if ((ptr_app->app_id == data->client.app_id) &&
6647 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6648 found_app = true;
6649 break;
6650 }
6651 }
6652 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6653 if (!found_app) {
6654 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6655 (char *)data->client.app_name);
6656 return -ENOENT;
6657 }
6658
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306659 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6660 (uintptr_t)req->req_ptr);
6661 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6662 (uintptr_t)req->resp_ptr);
6663
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006664 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6665 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6666 ret = __qseecom_update_qteec_req_buf(
6667 (struct qseecom_qteec_modfd_req *)req, data, false);
6668 if (ret)
6669 return ret;
6670 }
6671
6672 if (qseecom.qsee_version < QSEE_VERSION_40) {
6673 ireq.app_id = data->client.app_id;
6674 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306675 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006676 ireq.req_len = req->req_len;
6677 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306678 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006679 ireq.resp_len = req->resp_len;
6680 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6681 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6682 dmac_flush_range((void *)table,
6683 (void *)table + SGLISTINFO_TABLE_SIZE);
6684 cmd_buf = (void *)&ireq;
6685 cmd_len = sizeof(struct qseecom_qteec_ireq);
6686 } else {
6687 ireq_64bit.app_id = data->client.app_id;
6688 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306689 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006690 ireq_64bit.req_len = req->req_len;
6691 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306692 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006693 ireq_64bit.resp_len = req->resp_len;
6694 if ((data->client.app_arch == ELFCLASS32) &&
6695 ((ireq_64bit.req_ptr >=
6696 PHY_ADDR_4G - ireq_64bit.req_len) ||
6697 (ireq_64bit.resp_ptr >=
6698 PHY_ADDR_4G - ireq_64bit.resp_len))){
6699 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6700 data->client.app_name, data->client.app_id);
6701 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6702 ireq_64bit.req_ptr, ireq_64bit.req_len,
6703 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6704 return -EFAULT;
6705 }
6706 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6707 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6708 dmac_flush_range((void *)table,
6709 (void *)table + SGLISTINFO_TABLE_SIZE);
6710 cmd_buf = (void *)&ireq_64bit;
6711 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6712 }
6713 if (qseecom.whitelist_support == true
6714 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6715 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6716 else
6717 *(uint32_t *)cmd_buf = cmd_id;
6718
6719 reqd_len_sb_in = req->req_len + req->resp_len;
6720 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6721 data->client.sb_virt,
6722 reqd_len_sb_in,
6723 ION_IOC_CLEAN_INV_CACHES);
6724 if (ret) {
6725 pr_err("cache operation failed %d\n", ret);
6726 return ret;
6727 }
6728
6729 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6730
6731 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6732 cmd_buf, cmd_len,
6733 &resp, sizeof(resp));
6734 if (ret) {
6735 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6736 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006737 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006738 }
6739
6740 if (qseecom.qsee_reentrancy_support) {
6741 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006742 if (ret)
6743 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006744 } else {
6745 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6746 ret = __qseecom_process_incomplete_cmd(data, &resp);
6747 if (ret) {
6748 pr_err("process_incomplete_cmd failed err: %d\n",
6749 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006750 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006751 }
6752 } else {
6753 if (resp.result != QSEOS_RESULT_SUCCESS) {
6754 pr_err("Response result %d not supported\n",
6755 resp.result);
6756 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006757 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006758 }
6759 }
6760 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006761exit:
6762 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006763 data->client.sb_virt, data->client.sb_length,
6764 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006765 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006766 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006767 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006768 }
6769
6770 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6771 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006772 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006773 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006774 if (ret2)
6775 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006776 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006777 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006778}
6779
6780static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6781 void __user *argp)
6782{
6783 struct qseecom_qteec_modfd_req req;
6784 int ret = 0;
6785
6786 ret = copy_from_user(&req, argp,
6787 sizeof(struct qseecom_qteec_modfd_req));
6788 if (ret) {
6789 pr_err("copy_from_user failed\n");
6790 return ret;
6791 }
6792 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6793 QSEOS_TEE_OPEN_SESSION);
6794
6795 return ret;
6796}
6797
6798static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6799 void __user *argp)
6800{
6801 struct qseecom_qteec_req req;
6802 int ret = 0;
6803
6804 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6805 if (ret) {
6806 pr_err("copy_from_user failed\n");
6807 return ret;
6808 }
6809 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6810 return ret;
6811}
6812
6813static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6814 void __user *argp)
6815{
6816 struct qseecom_qteec_modfd_req req;
6817 struct qseecom_command_scm_resp resp;
6818 struct qseecom_qteec_ireq ireq;
6819 struct qseecom_qteec_64bit_ireq ireq_64bit;
6820 struct qseecom_registered_app_list *ptr_app;
6821 bool found_app = false;
6822 unsigned long flags;
6823 int ret = 0;
6824 int i = 0;
6825 uint32_t reqd_len_sb_in = 0;
6826 void *cmd_buf = NULL;
6827 size_t cmd_len;
6828 struct sglist_info *table = data->sglistinfo_ptr;
6829 void *req_ptr = NULL;
6830 void *resp_ptr = NULL;
6831
6832 ret = copy_from_user(&req, argp,
6833 sizeof(struct qseecom_qteec_modfd_req));
6834 if (ret) {
6835 pr_err("copy_from_user failed\n");
6836 return ret;
6837 }
6838 ret = __qseecom_qteec_validate_msg(data,
6839 (struct qseecom_qteec_req *)(&req));
6840 if (ret)
6841 return ret;
6842 req_ptr = req.req_ptr;
6843 resp_ptr = req.resp_ptr;
6844
6845 /* find app_id & img_name from list */
6846 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6847 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6848 list) {
6849 if ((ptr_app->app_id == data->client.app_id) &&
6850 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6851 found_app = true;
6852 break;
6853 }
6854 }
6855 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6856 if (!found_app) {
6857 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6858 (char *)data->client.app_name);
6859 return -ENOENT;
6860 }
6861
6862 /* validate offsets */
6863 for (i = 0; i < MAX_ION_FD; i++) {
6864 if (req.ifd_data[i].fd) {
6865 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6866 return -EINVAL;
6867 }
6868 }
6869 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6870 (uintptr_t)req.req_ptr);
6871 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6872 (uintptr_t)req.resp_ptr);
6873 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6874 if (ret)
6875 return ret;
6876
6877 if (qseecom.qsee_version < QSEE_VERSION_40) {
6878 ireq.app_id = data->client.app_id;
6879 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6880 (uintptr_t)req_ptr);
6881 ireq.req_len = req.req_len;
6882 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6883 (uintptr_t)resp_ptr);
6884 ireq.resp_len = req.resp_len;
6885 cmd_buf = (void *)&ireq;
6886 cmd_len = sizeof(struct qseecom_qteec_ireq);
6887 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6888 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6889 dmac_flush_range((void *)table,
6890 (void *)table + SGLISTINFO_TABLE_SIZE);
6891 } else {
6892 ireq_64bit.app_id = data->client.app_id;
6893 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6894 (uintptr_t)req_ptr);
6895 ireq_64bit.req_len = req.req_len;
6896 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6897 (uintptr_t)resp_ptr);
6898 ireq_64bit.resp_len = req.resp_len;
6899 cmd_buf = (void *)&ireq_64bit;
6900 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6901 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6902 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6903 dmac_flush_range((void *)table,
6904 (void *)table + SGLISTINFO_TABLE_SIZE);
6905 }
6906 reqd_len_sb_in = req.req_len + req.resp_len;
6907 if (qseecom.whitelist_support == true)
6908 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6909 else
6910 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6911
6912 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6913 data->client.sb_virt,
6914 reqd_len_sb_in,
6915 ION_IOC_CLEAN_INV_CACHES);
6916 if (ret) {
6917 pr_err("cache operation failed %d\n", ret);
6918 return ret;
6919 }
6920
6921 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6922
6923 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6924 cmd_buf, cmd_len,
6925 &resp, sizeof(resp));
6926 if (ret) {
6927 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6928 ret, data->client.app_id);
6929 return ret;
6930 }
6931
6932 if (qseecom.qsee_reentrancy_support) {
6933 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6934 } else {
6935 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6936 ret = __qseecom_process_incomplete_cmd(data, &resp);
6937 if (ret) {
6938 pr_err("process_incomplete_cmd failed err: %d\n",
6939 ret);
6940 return ret;
6941 }
6942 } else {
6943 if (resp.result != QSEOS_RESULT_SUCCESS) {
6944 pr_err("Response result %d not supported\n",
6945 resp.result);
6946 ret = -EINVAL;
6947 }
6948 }
6949 }
6950 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6951 if (ret)
6952 return ret;
6953
6954 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6955 data->client.sb_virt, data->client.sb_length,
6956 ION_IOC_INV_CACHES);
6957 if (ret) {
6958 pr_err("cache operation failed %d\n", ret);
6959 return ret;
6960 }
6961 return 0;
6962}
6963
6964static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6965 void __user *argp)
6966{
6967 struct qseecom_qteec_modfd_req req;
6968 int ret = 0;
6969
6970 ret = copy_from_user(&req, argp,
6971 sizeof(struct qseecom_qteec_modfd_req));
6972 if (ret) {
6973 pr_err("copy_from_user failed\n");
6974 return ret;
6975 }
6976 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6977 QSEOS_TEE_REQUEST_CANCELLATION);
6978
6979 return ret;
6980}
6981
6982static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6983{
6984 if (data->sglist_cnt) {
6985 memset(data->sglistinfo_ptr, 0,
6986 SGLISTINFO_TABLE_SIZE);
6987 data->sglist_cnt = 0;
6988 }
6989}
6990
6991static inline long qseecom_ioctl(struct file *file,
6992 unsigned int cmd, unsigned long arg)
6993{
6994 int ret = 0;
6995 struct qseecom_dev_handle *data = file->private_data;
6996 void __user *argp = (void __user *) arg;
6997 bool perf_enabled = false;
6998
6999 if (!data) {
7000 pr_err("Invalid/uninitialized device handle\n");
7001 return -EINVAL;
7002 }
7003
7004 if (data->abort) {
7005 pr_err("Aborting qseecom driver\n");
7006 return -ENODEV;
7007 }
7008
7009 switch (cmd) {
7010 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7011 if (data->type != QSEECOM_GENERIC) {
7012 pr_err("reg lstnr req: invalid handle (%d)\n",
7013 data->type);
7014 ret = -EINVAL;
7015 break;
7016 }
7017 pr_debug("ioctl register_listener_req()\n");
7018 mutex_lock(&app_access_lock);
7019 atomic_inc(&data->ioctl_count);
7020 data->type = QSEECOM_LISTENER_SERVICE;
7021 ret = qseecom_register_listener(data, argp);
7022 atomic_dec(&data->ioctl_count);
7023 wake_up_all(&data->abort_wq);
7024 mutex_unlock(&app_access_lock);
7025 if (ret)
7026 pr_err("failed qseecom_register_listener: %d\n", ret);
7027 break;
7028 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307029 case QSEECOM_IOCTL_SET_ICE_INFO: {
7030 struct qseecom_ice_data_t ice_data;
7031
7032 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7033 if (ret) {
7034 pr_err("copy_from_user failed\n");
7035 return -EFAULT;
7036 }
7037 qcom_ice_set_fde_flag(ice_data.flag);
7038 break;
7039 }
7040
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007041 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7042 if ((data->listener.id == 0) ||
7043 (data->type != QSEECOM_LISTENER_SERVICE)) {
7044 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7045 data->type, data->listener.id);
7046 ret = -EINVAL;
7047 break;
7048 }
7049 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kong26e62742018-05-04 17:19:06 -07007050 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007051 mutex_lock(&app_access_lock);
7052 atomic_inc(&data->ioctl_count);
7053 ret = qseecom_unregister_listener(data);
7054 atomic_dec(&data->ioctl_count);
7055 wake_up_all(&data->abort_wq);
7056 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007057 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007058 if (ret)
7059 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7060 break;
7061 }
7062 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7063 if ((data->client.app_id == 0) ||
7064 (data->type != QSEECOM_CLIENT_APP)) {
7065 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7066 data->type, data->client.app_id);
7067 ret = -EINVAL;
7068 break;
7069 }
7070 /* Only one client allowed here at a time */
7071 mutex_lock(&app_access_lock);
7072 if (qseecom.support_bus_scaling) {
7073 /* register bus bw in case the client doesn't do it */
7074 if (!data->mode) {
7075 mutex_lock(&qsee_bw_mutex);
7076 __qseecom_register_bus_bandwidth_needs(
7077 data, HIGH);
7078 mutex_unlock(&qsee_bw_mutex);
7079 }
7080 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7081 if (ret) {
7082 pr_err("Failed to set bw.\n");
7083 ret = -EINVAL;
7084 mutex_unlock(&app_access_lock);
7085 break;
7086 }
7087 }
7088 /*
7089 * On targets where crypto clock is handled by HLOS,
7090 * if clk_access_cnt is zero and perf_enabled is false,
7091 * then the crypto clock was not enabled before sending cmd to
7092 * tz, qseecom will enable the clock to avoid service failure.
7093 */
7094 if (!qseecom.no_clock_support &&
7095 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7096 pr_debug("ce clock is not enabled!\n");
7097 ret = qseecom_perf_enable(data);
7098 if (ret) {
7099 pr_err("Failed to vote for clock with err %d\n",
7100 ret);
7101 mutex_unlock(&app_access_lock);
7102 ret = -EINVAL;
7103 break;
7104 }
7105 perf_enabled = true;
7106 }
7107 atomic_inc(&data->ioctl_count);
7108 ret = qseecom_send_cmd(data, argp);
7109 if (qseecom.support_bus_scaling)
7110 __qseecom_add_bw_scale_down_timer(
7111 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7112 if (perf_enabled) {
7113 qsee_disable_clock_vote(data, CLK_DFAB);
7114 qsee_disable_clock_vote(data, CLK_SFPB);
7115 }
7116 atomic_dec(&data->ioctl_count);
7117 wake_up_all(&data->abort_wq);
7118 mutex_unlock(&app_access_lock);
7119 if (ret)
7120 pr_err("failed qseecom_send_cmd: %d\n", ret);
7121 break;
7122 }
7123 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7124 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7125 if ((data->client.app_id == 0) ||
7126 (data->type != QSEECOM_CLIENT_APP)) {
7127 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7128 data->type, data->client.app_id);
7129 ret = -EINVAL;
7130 break;
7131 }
7132 /* Only one client allowed here at a time */
7133 mutex_lock(&app_access_lock);
7134 if (qseecom.support_bus_scaling) {
7135 if (!data->mode) {
7136 mutex_lock(&qsee_bw_mutex);
7137 __qseecom_register_bus_bandwidth_needs(
7138 data, HIGH);
7139 mutex_unlock(&qsee_bw_mutex);
7140 }
7141 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7142 if (ret) {
7143 pr_err("Failed to set bw.\n");
7144 mutex_unlock(&app_access_lock);
7145 ret = -EINVAL;
7146 break;
7147 }
7148 }
7149 /*
7150 * On targets where crypto clock is handled by HLOS,
7151 * if clk_access_cnt is zero and perf_enabled is false,
7152 * then the crypto clock was not enabled before sending cmd to
7153 * tz, qseecom will enable the clock to avoid service failure.
7154 */
7155 if (!qseecom.no_clock_support &&
7156 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7157 pr_debug("ce clock is not enabled!\n");
7158 ret = qseecom_perf_enable(data);
7159 if (ret) {
7160 pr_err("Failed to vote for clock with err %d\n",
7161 ret);
7162 mutex_unlock(&app_access_lock);
7163 ret = -EINVAL;
7164 break;
7165 }
7166 perf_enabled = true;
7167 }
7168 atomic_inc(&data->ioctl_count);
7169 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7170 ret = qseecom_send_modfd_cmd(data, argp);
7171 else
7172 ret = qseecom_send_modfd_cmd_64(data, argp);
7173 if (qseecom.support_bus_scaling)
7174 __qseecom_add_bw_scale_down_timer(
7175 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7176 if (perf_enabled) {
7177 qsee_disable_clock_vote(data, CLK_DFAB);
7178 qsee_disable_clock_vote(data, CLK_SFPB);
7179 }
7180 atomic_dec(&data->ioctl_count);
7181 wake_up_all(&data->abort_wq);
7182 mutex_unlock(&app_access_lock);
7183 if (ret)
7184 pr_err("failed qseecom_send_cmd: %d\n", ret);
7185 __qseecom_clean_data_sglistinfo(data);
7186 break;
7187 }
7188 case QSEECOM_IOCTL_RECEIVE_REQ: {
7189 if ((data->listener.id == 0) ||
7190 (data->type != QSEECOM_LISTENER_SERVICE)) {
7191 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7192 data->type, data->listener.id);
7193 ret = -EINVAL;
7194 break;
7195 }
7196 atomic_inc(&data->ioctl_count);
7197 ret = qseecom_receive_req(data);
7198 atomic_dec(&data->ioctl_count);
7199 wake_up_all(&data->abort_wq);
7200 if (ret && (ret != -ERESTARTSYS))
7201 pr_err("failed qseecom_receive_req: %d\n", ret);
7202 break;
7203 }
7204 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7205 if ((data->listener.id == 0) ||
7206 (data->type != QSEECOM_LISTENER_SERVICE)) {
7207 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7208 data->type, data->listener.id);
7209 ret = -EINVAL;
7210 break;
7211 }
7212 atomic_inc(&data->ioctl_count);
7213 if (!qseecom.qsee_reentrancy_support)
7214 ret = qseecom_send_resp();
7215 else
7216 ret = qseecom_reentrancy_send_resp(data);
7217 atomic_dec(&data->ioctl_count);
7218 wake_up_all(&data->abort_wq);
7219 if (ret)
7220 pr_err("failed qseecom_send_resp: %d\n", ret);
7221 break;
7222 }
7223 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7224 if ((data->type != QSEECOM_CLIENT_APP) &&
7225 (data->type != QSEECOM_GENERIC) &&
7226 (data->type != QSEECOM_SECURE_SERVICE)) {
7227 pr_err("set mem param req: invalid handle (%d)\n",
7228 data->type);
7229 ret = -EINVAL;
7230 break;
7231 }
7232 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7233 mutex_lock(&app_access_lock);
7234 atomic_inc(&data->ioctl_count);
7235 ret = qseecom_set_client_mem_param(data, argp);
7236 atomic_dec(&data->ioctl_count);
7237 mutex_unlock(&app_access_lock);
7238 if (ret)
7239 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7240 ret);
7241 break;
7242 }
7243 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7244 if ((data->type != QSEECOM_GENERIC) &&
7245 (data->type != QSEECOM_CLIENT_APP)) {
7246 pr_err("load app req: invalid handle (%d)\n",
7247 data->type);
7248 ret = -EINVAL;
7249 break;
7250 }
7251 data->type = QSEECOM_CLIENT_APP;
7252 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7253 mutex_lock(&app_access_lock);
7254 atomic_inc(&data->ioctl_count);
7255 ret = qseecom_load_app(data, argp);
7256 atomic_dec(&data->ioctl_count);
7257 mutex_unlock(&app_access_lock);
7258 if (ret)
7259 pr_err("failed load_app request: %d\n", ret);
7260 break;
7261 }
7262 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7263 if ((data->client.app_id == 0) ||
7264 (data->type != QSEECOM_CLIENT_APP)) {
7265 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7266 data->type, data->client.app_id);
7267 ret = -EINVAL;
7268 break;
7269 }
7270 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7271 mutex_lock(&app_access_lock);
7272 atomic_inc(&data->ioctl_count);
7273 ret = qseecom_unload_app(data, false);
7274 atomic_dec(&data->ioctl_count);
7275 mutex_unlock(&app_access_lock);
7276 if (ret)
7277 pr_err("failed unload_app request: %d\n", ret);
7278 break;
7279 }
7280 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7281 atomic_inc(&data->ioctl_count);
7282 ret = qseecom_get_qseos_version(data, argp);
7283 if (ret)
7284 pr_err("qseecom_get_qseos_version: %d\n", ret);
7285 atomic_dec(&data->ioctl_count);
7286 break;
7287 }
7288 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7289 if ((data->type != QSEECOM_GENERIC) &&
7290 (data->type != QSEECOM_CLIENT_APP)) {
7291 pr_err("perf enable req: invalid handle (%d)\n",
7292 data->type);
7293 ret = -EINVAL;
7294 break;
7295 }
7296 if ((data->type == QSEECOM_CLIENT_APP) &&
7297 (data->client.app_id == 0)) {
7298 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7299 data->type, data->client.app_id);
7300 ret = -EINVAL;
7301 break;
7302 }
7303 atomic_inc(&data->ioctl_count);
7304 if (qseecom.support_bus_scaling) {
7305 mutex_lock(&qsee_bw_mutex);
7306 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7307 mutex_unlock(&qsee_bw_mutex);
7308 } else {
7309 ret = qseecom_perf_enable(data);
7310 if (ret)
7311 pr_err("Fail to vote for clocks %d\n", ret);
7312 }
7313 atomic_dec(&data->ioctl_count);
7314 break;
7315 }
7316 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7317 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7318 (data->type != QSEECOM_CLIENT_APP)) {
7319 pr_err("perf disable req: invalid handle (%d)\n",
7320 data->type);
7321 ret = -EINVAL;
7322 break;
7323 }
7324 if ((data->type == QSEECOM_CLIENT_APP) &&
7325 (data->client.app_id == 0)) {
7326 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7327 data->type, data->client.app_id);
7328 ret = -EINVAL;
7329 break;
7330 }
7331 atomic_inc(&data->ioctl_count);
7332 if (!qseecom.support_bus_scaling) {
7333 qsee_disable_clock_vote(data, CLK_DFAB);
7334 qsee_disable_clock_vote(data, CLK_SFPB);
7335 } else {
7336 mutex_lock(&qsee_bw_mutex);
7337 qseecom_unregister_bus_bandwidth_needs(data);
7338 mutex_unlock(&qsee_bw_mutex);
7339 }
7340 atomic_dec(&data->ioctl_count);
7341 break;
7342 }
7343
7344 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7345 /* If crypto clock is not handled by HLOS, return directly. */
7346 if (qseecom.no_clock_support) {
7347 pr_debug("crypto clock is not handled by HLOS\n");
7348 break;
7349 }
7350 if ((data->client.app_id == 0) ||
7351 (data->type != QSEECOM_CLIENT_APP)) {
7352 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7353 data->type, data->client.app_id);
7354 ret = -EINVAL;
7355 break;
7356 }
7357 atomic_inc(&data->ioctl_count);
7358 ret = qseecom_scale_bus_bandwidth(data, argp);
7359 atomic_dec(&data->ioctl_count);
7360 break;
7361 }
7362 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7363 if (data->type != QSEECOM_GENERIC) {
7364 pr_err("load ext elf req: invalid client handle (%d)\n",
7365 data->type);
7366 ret = -EINVAL;
7367 break;
7368 }
7369 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7370 data->released = true;
7371 mutex_lock(&app_access_lock);
7372 atomic_inc(&data->ioctl_count);
7373 ret = qseecom_load_external_elf(data, argp);
7374 atomic_dec(&data->ioctl_count);
7375 mutex_unlock(&app_access_lock);
7376 if (ret)
7377 pr_err("failed load_external_elf request: %d\n", ret);
7378 break;
7379 }
7380 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7381 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7382 pr_err("unload ext elf req: invalid handle (%d)\n",
7383 data->type);
7384 ret = -EINVAL;
7385 break;
7386 }
7387 data->released = true;
7388 mutex_lock(&app_access_lock);
7389 atomic_inc(&data->ioctl_count);
7390 ret = qseecom_unload_external_elf(data);
7391 atomic_dec(&data->ioctl_count);
7392 mutex_unlock(&app_access_lock);
7393 if (ret)
7394 pr_err("failed unload_app request: %d\n", ret);
7395 break;
7396 }
7397 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7398 data->type = QSEECOM_CLIENT_APP;
7399 mutex_lock(&app_access_lock);
7400 atomic_inc(&data->ioctl_count);
7401 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7402 ret = qseecom_query_app_loaded(data, argp);
7403 atomic_dec(&data->ioctl_count);
7404 mutex_unlock(&app_access_lock);
7405 break;
7406 }
7407 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7408 if (data->type != QSEECOM_GENERIC) {
7409 pr_err("send cmd svc req: invalid handle (%d)\n",
7410 data->type);
7411 ret = -EINVAL;
7412 break;
7413 }
7414 data->type = QSEECOM_SECURE_SERVICE;
7415 if (qseecom.qsee_version < QSEE_VERSION_03) {
7416 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7417 qseecom.qsee_version);
7418 return -EINVAL;
7419 }
7420 mutex_lock(&app_access_lock);
7421 atomic_inc(&data->ioctl_count);
7422 ret = qseecom_send_service_cmd(data, argp);
7423 atomic_dec(&data->ioctl_count);
7424 mutex_unlock(&app_access_lock);
7425 break;
7426 }
7427 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7428 if (!(qseecom.support_pfe || qseecom.support_fde))
7429 pr_err("Features requiring key init not supported\n");
7430 if (data->type != QSEECOM_GENERIC) {
7431 pr_err("create key req: invalid handle (%d)\n",
7432 data->type);
7433 ret = -EINVAL;
7434 break;
7435 }
7436 if (qseecom.qsee_version < QSEE_VERSION_05) {
7437 pr_err("Create Key feature unsupported: qsee ver %u\n",
7438 qseecom.qsee_version);
7439 return -EINVAL;
7440 }
7441 data->released = true;
7442 mutex_lock(&app_access_lock);
7443 atomic_inc(&data->ioctl_count);
7444 ret = qseecom_create_key(data, argp);
7445 if (ret)
7446 pr_err("failed to create encryption key: %d\n", ret);
7447
7448 atomic_dec(&data->ioctl_count);
7449 mutex_unlock(&app_access_lock);
7450 break;
7451 }
7452 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7453 if (!(qseecom.support_pfe || qseecom.support_fde))
7454 pr_err("Features requiring key init not supported\n");
7455 if (data->type != QSEECOM_GENERIC) {
7456 pr_err("wipe key req: invalid handle (%d)\n",
7457 data->type);
7458 ret = -EINVAL;
7459 break;
7460 }
7461 if (qseecom.qsee_version < QSEE_VERSION_05) {
7462 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7463 qseecom.qsee_version);
7464 return -EINVAL;
7465 }
7466 data->released = true;
7467 mutex_lock(&app_access_lock);
7468 atomic_inc(&data->ioctl_count);
7469 ret = qseecom_wipe_key(data, argp);
7470 if (ret)
7471 pr_err("failed to wipe encryption key: %d\n", ret);
7472 atomic_dec(&data->ioctl_count);
7473 mutex_unlock(&app_access_lock);
7474 break;
7475 }
7476 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7477 if (!(qseecom.support_pfe || qseecom.support_fde))
7478 pr_err("Features requiring key init not supported\n");
7479 if (data->type != QSEECOM_GENERIC) {
7480 pr_err("update key req: invalid handle (%d)\n",
7481 data->type);
7482 ret = -EINVAL;
7483 break;
7484 }
7485 if (qseecom.qsee_version < QSEE_VERSION_05) {
7486 pr_err("Update Key feature unsupported in qsee ver %u\n",
7487 qseecom.qsee_version);
7488 return -EINVAL;
7489 }
7490 data->released = true;
7491 mutex_lock(&app_access_lock);
7492 atomic_inc(&data->ioctl_count);
7493 ret = qseecom_update_key_user_info(data, argp);
7494 if (ret)
7495 pr_err("failed to update key user info: %d\n", ret);
7496 atomic_dec(&data->ioctl_count);
7497 mutex_unlock(&app_access_lock);
7498 break;
7499 }
7500 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7501 if (data->type != QSEECOM_GENERIC) {
7502 pr_err("save part hash req: invalid handle (%d)\n",
7503 data->type);
7504 ret = -EINVAL;
7505 break;
7506 }
7507 data->released = true;
7508 mutex_lock(&app_access_lock);
7509 atomic_inc(&data->ioctl_count);
7510 ret = qseecom_save_partition_hash(argp);
7511 atomic_dec(&data->ioctl_count);
7512 mutex_unlock(&app_access_lock);
7513 break;
7514 }
7515 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7516 if (data->type != QSEECOM_GENERIC) {
7517 pr_err("ES activated req: invalid handle (%d)\n",
7518 data->type);
7519 ret = -EINVAL;
7520 break;
7521 }
7522 data->released = true;
7523 mutex_lock(&app_access_lock);
7524 atomic_inc(&data->ioctl_count);
7525 ret = qseecom_is_es_activated(argp);
7526 atomic_dec(&data->ioctl_count);
7527 mutex_unlock(&app_access_lock);
7528 break;
7529 }
7530 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7531 if (data->type != QSEECOM_GENERIC) {
7532 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7533 data->type);
7534 ret = -EINVAL;
7535 break;
7536 }
7537 data->released = true;
7538 mutex_lock(&app_access_lock);
7539 atomic_inc(&data->ioctl_count);
7540 ret = qseecom_mdtp_cipher_dip(argp);
7541 atomic_dec(&data->ioctl_count);
7542 mutex_unlock(&app_access_lock);
7543 break;
7544 }
7545 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7546 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7547 if ((data->listener.id == 0) ||
7548 (data->type != QSEECOM_LISTENER_SERVICE)) {
7549 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7550 data->type, data->listener.id);
7551 ret = -EINVAL;
7552 break;
7553 }
7554 atomic_inc(&data->ioctl_count);
7555 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7556 ret = qseecom_send_modfd_resp(data, argp);
7557 else
7558 ret = qseecom_send_modfd_resp_64(data, argp);
7559 atomic_dec(&data->ioctl_count);
7560 wake_up_all(&data->abort_wq);
7561 if (ret)
7562 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7563 __qseecom_clean_data_sglistinfo(data);
7564 break;
7565 }
7566 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7567 if ((data->client.app_id == 0) ||
7568 (data->type != QSEECOM_CLIENT_APP)) {
7569 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7570 data->type, data->client.app_id);
7571 ret = -EINVAL;
7572 break;
7573 }
7574 if (qseecom.qsee_version < QSEE_VERSION_40) {
7575 pr_err("GP feature unsupported: qsee ver %u\n",
7576 qseecom.qsee_version);
7577 return -EINVAL;
7578 }
7579 /* Only one client allowed here at a time */
7580 mutex_lock(&app_access_lock);
7581 atomic_inc(&data->ioctl_count);
7582 ret = qseecom_qteec_open_session(data, argp);
7583 atomic_dec(&data->ioctl_count);
7584 wake_up_all(&data->abort_wq);
7585 mutex_unlock(&app_access_lock);
7586 if (ret)
7587 pr_err("failed open_session_cmd: %d\n", ret);
7588 __qseecom_clean_data_sglistinfo(data);
7589 break;
7590 }
7591 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7592 if ((data->client.app_id == 0) ||
7593 (data->type != QSEECOM_CLIENT_APP)) {
7594 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7595 data->type, data->client.app_id);
7596 ret = -EINVAL;
7597 break;
7598 }
7599 if (qseecom.qsee_version < QSEE_VERSION_40) {
7600 pr_err("GP feature unsupported: qsee ver %u\n",
7601 qseecom.qsee_version);
7602 return -EINVAL;
7603 }
7604 /* Only one client allowed here at a time */
7605 mutex_lock(&app_access_lock);
7606 atomic_inc(&data->ioctl_count);
7607 ret = qseecom_qteec_close_session(data, argp);
7608 atomic_dec(&data->ioctl_count);
7609 wake_up_all(&data->abort_wq);
7610 mutex_unlock(&app_access_lock);
7611 if (ret)
7612 pr_err("failed close_session_cmd: %d\n", ret);
7613 break;
7614 }
7615 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7616 if ((data->client.app_id == 0) ||
7617 (data->type != QSEECOM_CLIENT_APP)) {
7618 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7619 data->type, data->client.app_id);
7620 ret = -EINVAL;
7621 break;
7622 }
7623 if (qseecom.qsee_version < QSEE_VERSION_40) {
7624 pr_err("GP feature unsupported: qsee ver %u\n",
7625 qseecom.qsee_version);
7626 return -EINVAL;
7627 }
7628 /* Only one client allowed here at a time */
7629 mutex_lock(&app_access_lock);
7630 atomic_inc(&data->ioctl_count);
7631 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7632 atomic_dec(&data->ioctl_count);
7633 wake_up_all(&data->abort_wq);
7634 mutex_unlock(&app_access_lock);
7635 if (ret)
7636 pr_err("failed Invoke cmd: %d\n", ret);
7637 __qseecom_clean_data_sglistinfo(data);
7638 break;
7639 }
7640 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7641 if ((data->client.app_id == 0) ||
7642 (data->type != QSEECOM_CLIENT_APP)) {
7643 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7644 data->type, data->client.app_id);
7645 ret = -EINVAL;
7646 break;
7647 }
7648 if (qseecom.qsee_version < QSEE_VERSION_40) {
7649 pr_err("GP feature unsupported: qsee ver %u\n",
7650 qseecom.qsee_version);
7651 return -EINVAL;
7652 }
7653 /* Only one client allowed here at a time */
7654 mutex_lock(&app_access_lock);
7655 atomic_inc(&data->ioctl_count);
7656 ret = qseecom_qteec_request_cancellation(data, argp);
7657 atomic_dec(&data->ioctl_count);
7658 wake_up_all(&data->abort_wq);
7659 mutex_unlock(&app_access_lock);
7660 if (ret)
7661 pr_err("failed request_cancellation: %d\n", ret);
7662 break;
7663 }
7664 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7665 atomic_inc(&data->ioctl_count);
7666 ret = qseecom_get_ce_info(data, argp);
7667 if (ret)
7668 pr_err("failed get fde ce pipe info: %d\n", ret);
7669 atomic_dec(&data->ioctl_count);
7670 break;
7671 }
7672 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7673 atomic_inc(&data->ioctl_count);
7674 ret = qseecom_free_ce_info(data, argp);
7675 if (ret)
7676 pr_err("failed get fde ce pipe info: %d\n", ret);
7677 atomic_dec(&data->ioctl_count);
7678 break;
7679 }
7680 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7681 atomic_inc(&data->ioctl_count);
7682 ret = qseecom_query_ce_info(data, argp);
7683 if (ret)
7684 pr_err("failed get fde ce pipe info: %d\n", ret);
7685 atomic_dec(&data->ioctl_count);
7686 break;
7687 }
7688 default:
7689 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7690 return -EINVAL;
7691 }
7692 return ret;
7693}
7694
7695static int qseecom_open(struct inode *inode, struct file *file)
7696{
7697 int ret = 0;
7698 struct qseecom_dev_handle *data;
7699
7700 data = kzalloc(sizeof(*data), GFP_KERNEL);
7701 if (!data)
7702 return -ENOMEM;
7703 file->private_data = data;
7704 data->abort = 0;
7705 data->type = QSEECOM_GENERIC;
7706 data->released = false;
7707 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7708 data->mode = INACTIVE;
7709 init_waitqueue_head(&data->abort_wq);
7710 atomic_set(&data->ioctl_count, 0);
7711 return ret;
7712}
7713
7714static int qseecom_release(struct inode *inode, struct file *file)
7715{
7716 struct qseecom_dev_handle *data = file->private_data;
7717 int ret = 0;
7718
7719 if (data->released == false) {
7720 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7721 data->type, data->mode, data);
7722 switch (data->type) {
7723 case QSEECOM_LISTENER_SERVICE:
Zhen Kong25731112018-09-20 13:10:03 -07007724 pr_warn("release lsnr svc %d\n", data->listener.id);
Zhen Kong26e62742018-05-04 17:19:06 -07007725 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007726 mutex_lock(&app_access_lock);
7727 ret = qseecom_unregister_listener(data);
7728 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007729 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007730 break;
7731 case QSEECOM_CLIENT_APP:
7732 mutex_lock(&app_access_lock);
7733 ret = qseecom_unload_app(data, true);
7734 mutex_unlock(&app_access_lock);
7735 break;
7736 case QSEECOM_SECURE_SERVICE:
7737 case QSEECOM_GENERIC:
7738 ret = qseecom_unmap_ion_allocated_memory(data);
7739 if (ret)
7740 pr_err("Ion Unmap failed\n");
7741 break;
7742 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7743 break;
7744 default:
7745 pr_err("Unsupported clnt_handle_type %d",
7746 data->type);
7747 break;
7748 }
7749 }
7750
7751 if (qseecom.support_bus_scaling) {
7752 mutex_lock(&qsee_bw_mutex);
7753 if (data->mode != INACTIVE) {
7754 qseecom_unregister_bus_bandwidth_needs(data);
7755 if (qseecom.cumulative_mode == INACTIVE) {
7756 ret = __qseecom_set_msm_bus_request(INACTIVE);
7757 if (ret)
7758 pr_err("Fail to scale down bus\n");
7759 }
7760 }
7761 mutex_unlock(&qsee_bw_mutex);
7762 } else {
7763 if (data->fast_load_enabled == true)
7764 qsee_disable_clock_vote(data, CLK_SFPB);
7765 if (data->perf_enabled == true)
7766 qsee_disable_clock_vote(data, CLK_DFAB);
7767 }
7768 kfree(data);
7769
7770 return ret;
7771}
7772
7773#ifdef CONFIG_COMPAT
7774#include "compat_qseecom.c"
7775#else
7776#define compat_qseecom_ioctl NULL
7777#endif
7778
7779static const struct file_operations qseecom_fops = {
7780 .owner = THIS_MODULE,
7781 .unlocked_ioctl = qseecom_ioctl,
7782 .compat_ioctl = compat_qseecom_ioctl,
7783 .open = qseecom_open,
7784 .release = qseecom_release
7785};
7786
7787static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7788{
7789 int rc = 0;
7790 struct device *pdev;
7791 struct qseecom_clk *qclk;
7792 char *core_clk_src = NULL;
7793 char *core_clk = NULL;
7794 char *iface_clk = NULL;
7795 char *bus_clk = NULL;
7796
7797 switch (ce) {
7798 case CLK_QSEE: {
7799 core_clk_src = "core_clk_src";
7800 core_clk = "core_clk";
7801 iface_clk = "iface_clk";
7802 bus_clk = "bus_clk";
7803 qclk = &qseecom.qsee;
7804 qclk->instance = CLK_QSEE;
7805 break;
7806 };
7807 case CLK_CE_DRV: {
7808 core_clk_src = "ce_drv_core_clk_src";
7809 core_clk = "ce_drv_core_clk";
7810 iface_clk = "ce_drv_iface_clk";
7811 bus_clk = "ce_drv_bus_clk";
7812 qclk = &qseecom.ce_drv;
7813 qclk->instance = CLK_CE_DRV;
7814 break;
7815 };
7816 default:
7817 pr_err("Invalid ce hw instance: %d!\n", ce);
7818 return -EIO;
7819 }
7820
7821 if (qseecom.no_clock_support) {
7822 qclk->ce_core_clk = NULL;
7823 qclk->ce_clk = NULL;
7824 qclk->ce_bus_clk = NULL;
7825 qclk->ce_core_src_clk = NULL;
7826 return 0;
7827 }
7828
7829 pdev = qseecom.pdev;
7830
7831 /* Get CE3 src core clk. */
7832 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7833 if (!IS_ERR(qclk->ce_core_src_clk)) {
7834 rc = clk_set_rate(qclk->ce_core_src_clk,
7835 qseecom.ce_opp_freq_hz);
7836 if (rc) {
7837 clk_put(qclk->ce_core_src_clk);
7838 qclk->ce_core_src_clk = NULL;
7839 pr_err("Unable to set the core src clk @%uMhz.\n",
7840 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7841 return -EIO;
7842 }
7843 } else {
7844 pr_warn("Unable to get CE core src clk, set to NULL\n");
7845 qclk->ce_core_src_clk = NULL;
7846 }
7847
7848 /* Get CE core clk */
7849 qclk->ce_core_clk = clk_get(pdev, core_clk);
7850 if (IS_ERR(qclk->ce_core_clk)) {
7851 rc = PTR_ERR(qclk->ce_core_clk);
7852 pr_err("Unable to get CE core clk\n");
7853 if (qclk->ce_core_src_clk != NULL)
7854 clk_put(qclk->ce_core_src_clk);
7855 return -EIO;
7856 }
7857
7858 /* Get CE Interface clk */
7859 qclk->ce_clk = clk_get(pdev, iface_clk);
7860 if (IS_ERR(qclk->ce_clk)) {
7861 rc = PTR_ERR(qclk->ce_clk);
7862 pr_err("Unable to get CE interface clk\n");
7863 if (qclk->ce_core_src_clk != NULL)
7864 clk_put(qclk->ce_core_src_clk);
7865 clk_put(qclk->ce_core_clk);
7866 return -EIO;
7867 }
7868
7869 /* Get CE AXI clk */
7870 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7871 if (IS_ERR(qclk->ce_bus_clk)) {
7872 rc = PTR_ERR(qclk->ce_bus_clk);
7873 pr_err("Unable to get CE BUS interface clk\n");
7874 if (qclk->ce_core_src_clk != NULL)
7875 clk_put(qclk->ce_core_src_clk);
7876 clk_put(qclk->ce_core_clk);
7877 clk_put(qclk->ce_clk);
7878 return -EIO;
7879 }
7880
7881 return rc;
7882}
7883
7884static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7885{
7886 struct qseecom_clk *qclk;
7887
7888 if (ce == CLK_QSEE)
7889 qclk = &qseecom.qsee;
7890 else
7891 qclk = &qseecom.ce_drv;
7892
7893 if (qclk->ce_clk != NULL) {
7894 clk_put(qclk->ce_clk);
7895 qclk->ce_clk = NULL;
7896 }
7897 if (qclk->ce_core_clk != NULL) {
7898 clk_put(qclk->ce_core_clk);
7899 qclk->ce_core_clk = NULL;
7900 }
7901 if (qclk->ce_bus_clk != NULL) {
7902 clk_put(qclk->ce_bus_clk);
7903 qclk->ce_bus_clk = NULL;
7904 }
7905 if (qclk->ce_core_src_clk != NULL) {
7906 clk_put(qclk->ce_core_src_clk);
7907 qclk->ce_core_src_clk = NULL;
7908 }
7909 qclk->instance = CLK_INVALID;
7910}
7911
7912static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7913{
7914 int rc = 0;
7915 uint32_t hlos_num_ce_hw_instances;
7916 uint32_t disk_encrypt_pipe;
7917 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007918 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007919 int i;
7920 const int *tbl;
7921 int size;
7922 int entry;
7923 struct qseecom_crypto_info *pfde_tbl = NULL;
7924 struct qseecom_crypto_info *p;
7925 int tbl_size;
7926 int j;
7927 bool old_db = true;
7928 struct qseecom_ce_info_use *pce_info_use;
7929 uint32_t *unit_tbl = NULL;
7930 int total_units = 0;
7931 struct qseecom_ce_pipe_entry *pce_entry;
7932
7933 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7934 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7935
7936 if (of_property_read_u32((&pdev->dev)->of_node,
7937 "qcom,qsee-ce-hw-instance",
7938 &qseecom.ce_info.qsee_ce_hw_instance)) {
7939 pr_err("Fail to get qsee ce hw instance information.\n");
7940 rc = -EINVAL;
7941 goto out;
7942 } else {
7943 pr_debug("qsee-ce-hw-instance=0x%x\n",
7944 qseecom.ce_info.qsee_ce_hw_instance);
7945 }
7946
7947 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7948 "qcom,support-fde");
7949 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7950 "qcom,support-pfe");
7951
7952 if (!qseecom.support_pfe && !qseecom.support_fde) {
7953 pr_warn("Device does not support PFE/FDE");
7954 goto out;
7955 }
7956
7957 if (qseecom.support_fde)
7958 tbl = of_get_property((&pdev->dev)->of_node,
7959 "qcom,full-disk-encrypt-info", &size);
7960 else
7961 tbl = NULL;
7962 if (tbl) {
7963 old_db = false;
7964 if (size % sizeof(struct qseecom_crypto_info)) {
7965 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7966 size);
7967 rc = -EINVAL;
7968 goto out;
7969 }
7970 tbl_size = size / sizeof
7971 (struct qseecom_crypto_info);
7972
7973 pfde_tbl = kzalloc(size, GFP_KERNEL);
7974 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7975 total_units = 0;
7976
7977 if (!pfde_tbl || !unit_tbl) {
7978 pr_err("failed to alloc memory\n");
7979 rc = -ENOMEM;
7980 goto out;
7981 }
7982 if (of_property_read_u32_array((&pdev->dev)->of_node,
7983 "qcom,full-disk-encrypt-info",
7984 (u32 *)pfde_tbl, size/sizeof(u32))) {
7985 pr_err("failed to read full-disk-encrypt-info tbl\n");
7986 rc = -EINVAL;
7987 goto out;
7988 }
7989
7990 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7991 for (j = 0; j < total_units; j++) {
7992 if (p->unit_num == *(unit_tbl + j))
7993 break;
7994 }
7995 if (j == total_units) {
7996 *(unit_tbl + total_units) = p->unit_num;
7997 total_units++;
7998 }
7999 }
8000
8001 qseecom.ce_info.num_fde = total_units;
8002 pce_info_use = qseecom.ce_info.fde = kcalloc(
8003 total_units, sizeof(struct qseecom_ce_info_use),
8004 GFP_KERNEL);
8005 if (!pce_info_use) {
8006 pr_err("failed to alloc memory\n");
8007 rc = -ENOMEM;
8008 goto out;
8009 }
8010
8011 for (j = 0; j < total_units; j++, pce_info_use++) {
8012 pce_info_use->unit_num = *(unit_tbl + j);
8013 pce_info_use->alloc = false;
8014 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8015 pce_info_use->num_ce_pipe_entries = 0;
8016 pce_info_use->ce_pipe_entry = NULL;
8017 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8018 if (p->unit_num == pce_info_use->unit_num)
8019 pce_info_use->num_ce_pipe_entries++;
8020 }
8021
8022 entry = pce_info_use->num_ce_pipe_entries;
8023 pce_entry = pce_info_use->ce_pipe_entry =
8024 kcalloc(entry,
8025 sizeof(struct qseecom_ce_pipe_entry),
8026 GFP_KERNEL);
8027 if (pce_entry == NULL) {
8028 pr_err("failed to alloc memory\n");
8029 rc = -ENOMEM;
8030 goto out;
8031 }
8032
8033 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8034 if (p->unit_num == pce_info_use->unit_num) {
8035 pce_entry->ce_num = p->ce;
8036 pce_entry->ce_pipe_pair =
8037 p->pipe_pair;
8038 pce_entry->valid = true;
8039 pce_entry++;
8040 }
8041 }
8042 }
8043 kfree(unit_tbl);
8044 unit_tbl = NULL;
8045 kfree(pfde_tbl);
8046 pfde_tbl = NULL;
8047 }
8048
8049 if (qseecom.support_pfe)
8050 tbl = of_get_property((&pdev->dev)->of_node,
8051 "qcom,per-file-encrypt-info", &size);
8052 else
8053 tbl = NULL;
8054 if (tbl) {
8055 old_db = false;
8056 if (size % sizeof(struct qseecom_crypto_info)) {
8057 pr_err("per-file-encrypt-info tbl size(%d)\n",
8058 size);
8059 rc = -EINVAL;
8060 goto out;
8061 }
8062 tbl_size = size / sizeof
8063 (struct qseecom_crypto_info);
8064
8065 pfde_tbl = kzalloc(size, GFP_KERNEL);
8066 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8067 total_units = 0;
8068 if (!pfde_tbl || !unit_tbl) {
8069 pr_err("failed to alloc memory\n");
8070 rc = -ENOMEM;
8071 goto out;
8072 }
8073 if (of_property_read_u32_array((&pdev->dev)->of_node,
8074 "qcom,per-file-encrypt-info",
8075 (u32 *)pfde_tbl, size/sizeof(u32))) {
8076 pr_err("failed to read per-file-encrypt-info tbl\n");
8077 rc = -EINVAL;
8078 goto out;
8079 }
8080
8081 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8082 for (j = 0; j < total_units; j++) {
8083 if (p->unit_num == *(unit_tbl + j))
8084 break;
8085 }
8086 if (j == total_units) {
8087 *(unit_tbl + total_units) = p->unit_num;
8088 total_units++;
8089 }
8090 }
8091
8092 qseecom.ce_info.num_pfe = total_units;
8093 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8094 total_units, sizeof(struct qseecom_ce_info_use),
8095 GFP_KERNEL);
8096 if (!pce_info_use) {
8097 pr_err("failed to alloc memory\n");
8098 rc = -ENOMEM;
8099 goto out;
8100 }
8101
8102 for (j = 0; j < total_units; j++, pce_info_use++) {
8103 pce_info_use->unit_num = *(unit_tbl + j);
8104 pce_info_use->alloc = false;
8105 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8106 pce_info_use->num_ce_pipe_entries = 0;
8107 pce_info_use->ce_pipe_entry = NULL;
8108 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8109 if (p->unit_num == pce_info_use->unit_num)
8110 pce_info_use->num_ce_pipe_entries++;
8111 }
8112
8113 entry = pce_info_use->num_ce_pipe_entries;
8114 pce_entry = pce_info_use->ce_pipe_entry =
8115 kcalloc(entry,
8116 sizeof(struct qseecom_ce_pipe_entry),
8117 GFP_KERNEL);
8118 if (pce_entry == NULL) {
8119 pr_err("failed to alloc memory\n");
8120 rc = -ENOMEM;
8121 goto out;
8122 }
8123
8124 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8125 if (p->unit_num == pce_info_use->unit_num) {
8126 pce_entry->ce_num = p->ce;
8127 pce_entry->ce_pipe_pair =
8128 p->pipe_pair;
8129 pce_entry->valid = true;
8130 pce_entry++;
8131 }
8132 }
8133 }
8134 kfree(unit_tbl);
8135 unit_tbl = NULL;
8136 kfree(pfde_tbl);
8137 pfde_tbl = NULL;
8138 }
8139
8140 if (!old_db)
8141 goto out1;
8142
8143 if (of_property_read_bool((&pdev->dev)->of_node,
8144 "qcom,support-multiple-ce-hw-instance")) {
8145 if (of_property_read_u32((&pdev->dev)->of_node,
8146 "qcom,hlos-num-ce-hw-instances",
8147 &hlos_num_ce_hw_instances)) {
8148 pr_err("Fail: get hlos number of ce hw instance\n");
8149 rc = -EINVAL;
8150 goto out;
8151 }
8152 } else {
8153 hlos_num_ce_hw_instances = 1;
8154 }
8155
8156 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8157 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8158 MAX_CE_PIPE_PAIR_PER_UNIT);
8159 rc = -EINVAL;
8160 goto out;
8161 }
8162
8163 if (of_property_read_u32_array((&pdev->dev)->of_node,
8164 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8165 hlos_num_ce_hw_instances)) {
8166 pr_err("Fail: get hlos ce hw instance info\n");
8167 rc = -EINVAL;
8168 goto out;
8169 }
8170
8171 if (qseecom.support_fde) {
8172 pce_info_use = qseecom.ce_info.fde =
8173 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8174 if (!pce_info_use) {
8175 pr_err("failed to alloc memory\n");
8176 rc = -ENOMEM;
8177 goto out;
8178 }
8179 /* by default for old db */
8180 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8181 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8182 pce_info_use->alloc = false;
8183 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8184 pce_info_use->ce_pipe_entry = NULL;
8185 if (of_property_read_u32((&pdev->dev)->of_node,
8186 "qcom,disk-encrypt-pipe-pair",
8187 &disk_encrypt_pipe)) {
8188 pr_err("Fail to get FDE pipe information.\n");
8189 rc = -EINVAL;
8190 goto out;
8191 } else {
8192 pr_debug("disk-encrypt-pipe-pair=0x%x",
8193 disk_encrypt_pipe);
8194 }
8195 entry = pce_info_use->num_ce_pipe_entries =
8196 hlos_num_ce_hw_instances;
8197 pce_entry = pce_info_use->ce_pipe_entry =
8198 kcalloc(entry,
8199 sizeof(struct qseecom_ce_pipe_entry),
8200 GFP_KERNEL);
8201 if (pce_entry == NULL) {
8202 pr_err("failed to alloc memory\n");
8203 rc = -ENOMEM;
8204 goto out;
8205 }
8206 for (i = 0; i < entry; i++) {
8207 pce_entry->ce_num = hlos_ce_hw_instance[i];
8208 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8209 pce_entry->valid = 1;
8210 pce_entry++;
8211 }
8212 } else {
8213 pr_warn("Device does not support FDE");
8214 disk_encrypt_pipe = 0xff;
8215 }
8216 if (qseecom.support_pfe) {
8217 pce_info_use = qseecom.ce_info.pfe =
8218 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8219 if (!pce_info_use) {
8220 pr_err("failed to alloc memory\n");
8221 rc = -ENOMEM;
8222 goto out;
8223 }
8224 /* by default for old db */
8225 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8226 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8227 pce_info_use->alloc = false;
8228 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8229 pce_info_use->ce_pipe_entry = NULL;
8230
8231 if (of_property_read_u32((&pdev->dev)->of_node,
8232 "qcom,file-encrypt-pipe-pair",
8233 &file_encrypt_pipe)) {
8234 pr_err("Fail to get PFE pipe information.\n");
8235 rc = -EINVAL;
8236 goto out;
8237 } else {
8238 pr_debug("file-encrypt-pipe-pair=0x%x",
8239 file_encrypt_pipe);
8240 }
8241 entry = pce_info_use->num_ce_pipe_entries =
8242 hlos_num_ce_hw_instances;
8243 pce_entry = pce_info_use->ce_pipe_entry =
8244 kcalloc(entry,
8245 sizeof(struct qseecom_ce_pipe_entry),
8246 GFP_KERNEL);
8247 if (pce_entry == NULL) {
8248 pr_err("failed to alloc memory\n");
8249 rc = -ENOMEM;
8250 goto out;
8251 }
8252 for (i = 0; i < entry; i++) {
8253 pce_entry->ce_num = hlos_ce_hw_instance[i];
8254 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8255 pce_entry->valid = 1;
8256 pce_entry++;
8257 }
8258 } else {
8259 pr_warn("Device does not support PFE");
8260 file_encrypt_pipe = 0xff;
8261 }
8262
8263out1:
8264 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8265 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8266out:
8267 if (rc) {
8268 if (qseecom.ce_info.fde) {
8269 pce_info_use = qseecom.ce_info.fde;
8270 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8271 pce_entry = pce_info_use->ce_pipe_entry;
8272 kfree(pce_entry);
8273 pce_info_use++;
8274 }
8275 }
8276 kfree(qseecom.ce_info.fde);
8277 qseecom.ce_info.fde = NULL;
8278 if (qseecom.ce_info.pfe) {
8279 pce_info_use = qseecom.ce_info.pfe;
8280 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8281 pce_entry = pce_info_use->ce_pipe_entry;
8282 kfree(pce_entry);
8283 pce_info_use++;
8284 }
8285 }
8286 kfree(qseecom.ce_info.pfe);
8287 qseecom.ce_info.pfe = NULL;
8288 }
8289 kfree(unit_tbl);
8290 kfree(pfde_tbl);
8291 return rc;
8292}
8293
8294static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8295 void __user *argp)
8296{
8297 struct qseecom_ce_info_req req;
8298 struct qseecom_ce_info_req *pinfo = &req;
8299 int ret = 0;
8300 int i;
8301 unsigned int entries;
8302 struct qseecom_ce_info_use *pce_info_use, *p;
8303 int total = 0;
8304 bool found = false;
8305 struct qseecom_ce_pipe_entry *pce_entry;
8306
8307 ret = copy_from_user(pinfo, argp,
8308 sizeof(struct qseecom_ce_info_req));
8309 if (ret) {
8310 pr_err("copy_from_user failed\n");
8311 return ret;
8312 }
8313
8314 switch (pinfo->usage) {
8315 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8316 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8317 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8318 if (qseecom.support_fde) {
8319 p = qseecom.ce_info.fde;
8320 total = qseecom.ce_info.num_fde;
8321 } else {
8322 pr_err("system does not support fde\n");
8323 return -EINVAL;
8324 }
8325 break;
8326 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8327 if (qseecom.support_pfe) {
8328 p = qseecom.ce_info.pfe;
8329 total = qseecom.ce_info.num_pfe;
8330 } else {
8331 pr_err("system does not support pfe\n");
8332 return -EINVAL;
8333 }
8334 break;
8335 default:
8336 pr_err("unsupported usage %d\n", pinfo->usage);
8337 return -EINVAL;
8338 }
8339
8340 pce_info_use = NULL;
8341 for (i = 0; i < total; i++) {
8342 if (!p->alloc)
8343 pce_info_use = p;
8344 else if (!memcmp(p->handle, pinfo->handle,
8345 MAX_CE_INFO_HANDLE_SIZE)) {
8346 pce_info_use = p;
8347 found = true;
8348 break;
8349 }
8350 p++;
8351 }
8352
8353 if (pce_info_use == NULL)
8354 return -EBUSY;
8355
8356 pinfo->unit_num = pce_info_use->unit_num;
8357 if (!pce_info_use->alloc) {
8358 pce_info_use->alloc = true;
8359 memcpy(pce_info_use->handle,
8360 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8361 }
8362 if (pce_info_use->num_ce_pipe_entries >
8363 MAX_CE_PIPE_PAIR_PER_UNIT)
8364 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8365 else
8366 entries = pce_info_use->num_ce_pipe_entries;
8367 pinfo->num_ce_pipe_entries = entries;
8368 pce_entry = pce_info_use->ce_pipe_entry;
8369 for (i = 0; i < entries; i++, pce_entry++)
8370 pinfo->ce_pipe_entry[i] = *pce_entry;
8371 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8372 pinfo->ce_pipe_entry[i].valid = 0;
8373
8374 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8375 pr_err("copy_to_user failed\n");
8376 ret = -EFAULT;
8377 }
8378 return ret;
8379}
8380
8381static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8382 void __user *argp)
8383{
8384 struct qseecom_ce_info_req req;
8385 struct qseecom_ce_info_req *pinfo = &req;
8386 int ret = 0;
8387 struct qseecom_ce_info_use *p;
8388 int total = 0;
8389 int i;
8390 bool found = false;
8391
8392 ret = copy_from_user(pinfo, argp,
8393 sizeof(struct qseecom_ce_info_req));
8394 if (ret)
8395 return ret;
8396
8397 switch (pinfo->usage) {
8398 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8399 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8400 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8401 if (qseecom.support_fde) {
8402 p = qseecom.ce_info.fde;
8403 total = qseecom.ce_info.num_fde;
8404 } else {
8405 pr_err("system does not support fde\n");
8406 return -EINVAL;
8407 }
8408 break;
8409 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8410 if (qseecom.support_pfe) {
8411 p = qseecom.ce_info.pfe;
8412 total = qseecom.ce_info.num_pfe;
8413 } else {
8414 pr_err("system does not support pfe\n");
8415 return -EINVAL;
8416 }
8417 break;
8418 default:
8419 pr_err("unsupported usage %d\n", pinfo->usage);
8420 return -EINVAL;
8421 }
8422
8423 for (i = 0; i < total; i++) {
8424 if (p->alloc &&
8425 !memcmp(p->handle, pinfo->handle,
8426 MAX_CE_INFO_HANDLE_SIZE)) {
8427 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8428 p->alloc = false;
8429 found = true;
8430 break;
8431 }
8432 p++;
8433 }
8434 return ret;
8435}
8436
8437static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8438 void __user *argp)
8439{
8440 struct qseecom_ce_info_req req;
8441 struct qseecom_ce_info_req *pinfo = &req;
8442 int ret = 0;
8443 int i;
8444 unsigned int entries;
8445 struct qseecom_ce_info_use *pce_info_use, *p;
8446 int total = 0;
8447 bool found = false;
8448 struct qseecom_ce_pipe_entry *pce_entry;
8449
8450 ret = copy_from_user(pinfo, argp,
8451 sizeof(struct qseecom_ce_info_req));
8452 if (ret)
8453 return ret;
8454
8455 switch (pinfo->usage) {
8456 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8457 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8458 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8459 if (qseecom.support_fde) {
8460 p = qseecom.ce_info.fde;
8461 total = qseecom.ce_info.num_fde;
8462 } else {
8463 pr_err("system does not support fde\n");
8464 return -EINVAL;
8465 }
8466 break;
8467 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8468 if (qseecom.support_pfe) {
8469 p = qseecom.ce_info.pfe;
8470 total = qseecom.ce_info.num_pfe;
8471 } else {
8472 pr_err("system does not support pfe\n");
8473 return -EINVAL;
8474 }
8475 break;
8476 default:
8477 pr_err("unsupported usage %d\n", pinfo->usage);
8478 return -EINVAL;
8479 }
8480
8481 pce_info_use = NULL;
8482 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8483 pinfo->num_ce_pipe_entries = 0;
8484 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8485 pinfo->ce_pipe_entry[i].valid = 0;
8486
8487 for (i = 0; i < total; i++) {
8488
8489 if (p->alloc && !memcmp(p->handle,
8490 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8491 pce_info_use = p;
8492 found = true;
8493 break;
8494 }
8495 p++;
8496 }
8497 if (!pce_info_use)
8498 goto out;
8499 pinfo->unit_num = pce_info_use->unit_num;
8500 if (pce_info_use->num_ce_pipe_entries >
8501 MAX_CE_PIPE_PAIR_PER_UNIT)
8502 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8503 else
8504 entries = pce_info_use->num_ce_pipe_entries;
8505 pinfo->num_ce_pipe_entries = entries;
8506 pce_entry = pce_info_use->ce_pipe_entry;
8507 for (i = 0; i < entries; i++, pce_entry++)
8508 pinfo->ce_pipe_entry[i] = *pce_entry;
8509 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8510 pinfo->ce_pipe_entry[i].valid = 0;
8511out:
8512 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8513 pr_err("copy_to_user failed\n");
8514 ret = -EFAULT;
8515 }
8516 return ret;
8517}
8518
8519/*
8520 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8521 * then whitelist feature is not supported.
8522 */
8523static int qseecom_check_whitelist_feature(void)
8524{
8525 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8526
8527 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8528}
8529
8530static int qseecom_probe(struct platform_device *pdev)
8531{
8532 int rc;
8533 int i;
8534 uint32_t feature = 10;
8535 struct device *class_dev;
8536 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8537 struct qseecom_command_scm_resp resp;
8538 struct qseecom_ce_info_use *pce_info_use = NULL;
8539
8540 qseecom.qsee_bw_count = 0;
8541 qseecom.qsee_perf_client = 0;
8542 qseecom.qsee_sfpb_bw_count = 0;
8543
8544 qseecom.qsee.ce_core_clk = NULL;
8545 qseecom.qsee.ce_clk = NULL;
8546 qseecom.qsee.ce_core_src_clk = NULL;
8547 qseecom.qsee.ce_bus_clk = NULL;
8548
8549 qseecom.cumulative_mode = 0;
8550 qseecom.current_mode = INACTIVE;
8551 qseecom.support_bus_scaling = false;
8552 qseecom.support_fde = false;
8553 qseecom.support_pfe = false;
8554
8555 qseecom.ce_drv.ce_core_clk = NULL;
8556 qseecom.ce_drv.ce_clk = NULL;
8557 qseecom.ce_drv.ce_core_src_clk = NULL;
8558 qseecom.ce_drv.ce_bus_clk = NULL;
8559 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8560
8561 qseecom.app_block_ref_cnt = 0;
8562 init_waitqueue_head(&qseecom.app_block_wq);
8563 qseecom.whitelist_support = true;
8564
8565 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8566 if (rc < 0) {
8567 pr_err("alloc_chrdev_region failed %d\n", rc);
8568 return rc;
8569 }
8570
8571 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8572 if (IS_ERR(driver_class)) {
8573 rc = -ENOMEM;
8574 pr_err("class_create failed %d\n", rc);
8575 goto exit_unreg_chrdev_region;
8576 }
8577
8578 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8579 QSEECOM_DEV);
8580 if (IS_ERR(class_dev)) {
8581 pr_err("class_device_create failed %d\n", rc);
8582 rc = -ENOMEM;
8583 goto exit_destroy_class;
8584 }
8585
8586 cdev_init(&qseecom.cdev, &qseecom_fops);
8587 qseecom.cdev.owner = THIS_MODULE;
8588
8589 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8590 if (rc < 0) {
8591 pr_err("cdev_add failed %d\n", rc);
8592 goto exit_destroy_device;
8593 }
8594
8595 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8596 spin_lock_init(&qseecom.registered_listener_list_lock);
8597 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8598 spin_lock_init(&qseecom.registered_app_list_lock);
8599 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8600 spin_lock_init(&qseecom.registered_kclient_list_lock);
8601 init_waitqueue_head(&qseecom.send_resp_wq);
8602 qseecom.send_resp_flag = 0;
8603
8604 qseecom.qsee_version = QSEEE_VERSION_00;
8605 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8606 &resp, sizeof(resp));
8607 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8608 if (rc) {
8609 pr_err("Failed to get QSEE version info %d\n", rc);
8610 goto exit_del_cdev;
8611 }
8612 qseecom.qsee_version = resp.result;
8613 qseecom.qseos_version = QSEOS_VERSION_14;
8614 qseecom.commonlib_loaded = false;
8615 qseecom.commonlib64_loaded = false;
8616 qseecom.pdev = class_dev;
8617 /* Create ION msm client */
8618 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8619 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8620 pr_err("Ion client cannot be created\n");
8621 rc = -ENOMEM;
8622 goto exit_del_cdev;
8623 }
8624
8625 /* register client for bus scaling */
8626 if (pdev->dev.of_node) {
8627 qseecom.pdev->of_node = pdev->dev.of_node;
8628 qseecom.support_bus_scaling =
8629 of_property_read_bool((&pdev->dev)->of_node,
8630 "qcom,support-bus-scaling");
8631 rc = qseecom_retrieve_ce_data(pdev);
8632 if (rc)
8633 goto exit_destroy_ion_client;
8634 qseecom.appsbl_qseecom_support =
8635 of_property_read_bool((&pdev->dev)->of_node,
8636 "qcom,appsbl-qseecom-support");
8637 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8638 qseecom.appsbl_qseecom_support);
8639
8640 qseecom.commonlib64_loaded =
8641 of_property_read_bool((&pdev->dev)->of_node,
8642 "qcom,commonlib64-loaded-by-uefi");
8643 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8644 qseecom.commonlib64_loaded);
8645 qseecom.fde_key_size =
8646 of_property_read_bool((&pdev->dev)->of_node,
8647 "qcom,fde-key-size");
8648 qseecom.no_clock_support =
8649 of_property_read_bool((&pdev->dev)->of_node,
8650 "qcom,no-clock-support");
8651 if (!qseecom.no_clock_support) {
8652 pr_info("qseecom clocks handled by other subsystem\n");
8653 } else {
8654 pr_info("no-clock-support=0x%x",
8655 qseecom.no_clock_support);
8656 }
8657
8658 if (of_property_read_u32((&pdev->dev)->of_node,
8659 "qcom,qsee-reentrancy-support",
8660 &qseecom.qsee_reentrancy_support)) {
8661 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8662 qseecom.qsee_reentrancy_support = 0;
8663 } else {
8664 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8665 qseecom.qsee_reentrancy_support);
8666 }
8667
8668 /*
8669 * The qseecom bus scaling flag can not be enabled when
8670 * crypto clock is not handled by HLOS.
8671 */
8672 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8673 pr_err("support_bus_scaling flag can not be enabled.\n");
8674 rc = -EINVAL;
8675 goto exit_destroy_ion_client;
8676 }
8677
8678 if (of_property_read_u32((&pdev->dev)->of_node,
8679 "qcom,ce-opp-freq",
8680 &qseecom.ce_opp_freq_hz)) {
8681 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8682 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8683 }
8684 rc = __qseecom_init_clk(CLK_QSEE);
8685 if (rc)
8686 goto exit_destroy_ion_client;
8687
8688 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8689 (qseecom.support_pfe || qseecom.support_fde)) {
8690 rc = __qseecom_init_clk(CLK_CE_DRV);
8691 if (rc) {
8692 __qseecom_deinit_clk(CLK_QSEE);
8693 goto exit_destroy_ion_client;
8694 }
8695 } else {
8696 struct qseecom_clk *qclk;
8697
8698 qclk = &qseecom.qsee;
8699 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8700 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8701 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8702 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8703 }
8704
8705 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8706 msm_bus_cl_get_pdata(pdev);
8707 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8708 (!qseecom.is_apps_region_protected &&
8709 !qseecom.appsbl_qseecom_support)) {
8710 struct resource *resource = NULL;
8711 struct qsee_apps_region_info_ireq req;
8712 struct qsee_apps_region_info_64bit_ireq req_64bit;
8713 struct qseecom_command_scm_resp resp;
8714 void *cmd_buf = NULL;
8715 size_t cmd_len;
8716
8717 resource = platform_get_resource_byname(pdev,
8718 IORESOURCE_MEM, "secapp-region");
8719 if (resource) {
8720 if (qseecom.qsee_version < QSEE_VERSION_40) {
8721 req.qsee_cmd_id =
8722 QSEOS_APP_REGION_NOTIFICATION;
8723 req.addr = (uint32_t)resource->start;
8724 req.size = resource_size(resource);
8725 cmd_buf = (void *)&req;
8726 cmd_len = sizeof(struct
8727 qsee_apps_region_info_ireq);
8728 pr_warn("secure app region addr=0x%x size=0x%x",
8729 req.addr, req.size);
8730 } else {
8731 req_64bit.qsee_cmd_id =
8732 QSEOS_APP_REGION_NOTIFICATION;
8733 req_64bit.addr = resource->start;
8734 req_64bit.size = resource_size(
8735 resource);
8736 cmd_buf = (void *)&req_64bit;
8737 cmd_len = sizeof(struct
8738 qsee_apps_region_info_64bit_ireq);
8739 pr_warn("secure app region addr=0x%llx size=0x%x",
8740 req_64bit.addr, req_64bit.size);
8741 }
8742 } else {
8743 pr_err("Fail to get secure app region info\n");
8744 rc = -EINVAL;
8745 goto exit_deinit_clock;
8746 }
8747 rc = __qseecom_enable_clk(CLK_QSEE);
8748 if (rc) {
8749 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8750 rc = -EIO;
8751 goto exit_deinit_clock;
8752 }
8753 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8754 cmd_buf, cmd_len,
8755 &resp, sizeof(resp));
8756 __qseecom_disable_clk(CLK_QSEE);
8757 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8758 pr_err("send secapp reg fail %d resp.res %d\n",
8759 rc, resp.result);
8760 rc = -EINVAL;
8761 goto exit_deinit_clock;
8762 }
8763 }
8764 /*
8765 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8766 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8767 * Pls add "qseecom.commonlib64_loaded = true" here too.
8768 */
8769 if (qseecom.is_apps_region_protected ||
8770 qseecom.appsbl_qseecom_support)
8771 qseecom.commonlib_loaded = true;
8772 } else {
8773 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8774 pdev->dev.platform_data;
8775 }
8776 if (qseecom.support_bus_scaling) {
8777 init_timer(&(qseecom.bw_scale_down_timer));
8778 INIT_WORK(&qseecom.bw_inactive_req_ws,
8779 qseecom_bw_inactive_req_work);
8780 qseecom.bw_scale_down_timer.function =
8781 qseecom_scale_bus_bandwidth_timer_callback;
8782 }
8783 qseecom.timer_running = false;
8784 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8785 qseecom_platform_support);
8786
8787 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8788 pr_warn("qseecom.whitelist_support = %d\n",
8789 qseecom.whitelist_support);
8790
8791 if (!qseecom.qsee_perf_client)
8792 pr_err("Unable to register bus client\n");
8793
8794 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8795 return 0;
8796
8797exit_deinit_clock:
8798 __qseecom_deinit_clk(CLK_QSEE);
8799 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8800 (qseecom.support_pfe || qseecom.support_fde))
8801 __qseecom_deinit_clk(CLK_CE_DRV);
8802exit_destroy_ion_client:
8803 if (qseecom.ce_info.fde) {
8804 pce_info_use = qseecom.ce_info.fde;
8805 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8806 kzfree(pce_info_use->ce_pipe_entry);
8807 pce_info_use++;
8808 }
8809 kfree(qseecom.ce_info.fde);
8810 }
8811 if (qseecom.ce_info.pfe) {
8812 pce_info_use = qseecom.ce_info.pfe;
8813 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8814 kzfree(pce_info_use->ce_pipe_entry);
8815 pce_info_use++;
8816 }
8817 kfree(qseecom.ce_info.pfe);
8818 }
8819 ion_client_destroy(qseecom.ion_clnt);
8820exit_del_cdev:
8821 cdev_del(&qseecom.cdev);
8822exit_destroy_device:
8823 device_destroy(driver_class, qseecom_device_no);
8824exit_destroy_class:
8825 class_destroy(driver_class);
8826exit_unreg_chrdev_region:
8827 unregister_chrdev_region(qseecom_device_no, 1);
8828 return rc;
8829}
8830
8831static int qseecom_remove(struct platform_device *pdev)
8832{
8833 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308834 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008835 unsigned long flags = 0;
8836 int ret = 0;
8837 int i;
8838 struct qseecom_ce_pipe_entry *pce_entry;
8839 struct qseecom_ce_info_use *pce_info_use;
8840
8841 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8842 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8843
Monika Singhe711b162018-04-24 09:54:50 +05308844 list_for_each_entry_safe(kclient, kclient_tmp,
8845 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008846
8847 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008848 if (!kclient->handle) {
8849 list_del(&kclient->list);
8850 kzfree(kclient);
8851 break;
8852 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008853
8854 list_del(&kclient->list);
8855 mutex_lock(&app_access_lock);
8856 ret = qseecom_unload_app(kclient->handle->dev, false);
8857 mutex_unlock(&app_access_lock);
8858 if (!ret) {
8859 kzfree(kclient->handle->dev);
8860 kzfree(kclient->handle);
8861 kzfree(kclient);
8862 }
8863 }
8864
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008865 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8866
8867 if (qseecom.qseos_version > QSEEE_VERSION_00)
8868 qseecom_unload_commonlib_image();
8869
8870 if (qseecom.qsee_perf_client)
8871 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8872 0);
8873 if (pdev->dev.platform_data != NULL)
8874 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8875
8876 if (qseecom.support_bus_scaling) {
8877 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8878 del_timer_sync(&qseecom.bw_scale_down_timer);
8879 }
8880
8881 if (qseecom.ce_info.fde) {
8882 pce_info_use = qseecom.ce_info.fde;
8883 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8884 pce_entry = pce_info_use->ce_pipe_entry;
8885 kfree(pce_entry);
8886 pce_info_use++;
8887 }
8888 }
8889 kfree(qseecom.ce_info.fde);
8890 if (qseecom.ce_info.pfe) {
8891 pce_info_use = qseecom.ce_info.pfe;
8892 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8893 pce_entry = pce_info_use->ce_pipe_entry;
8894 kfree(pce_entry);
8895 pce_info_use++;
8896 }
8897 }
8898 kfree(qseecom.ce_info.pfe);
8899
8900 /* register client for bus scaling */
8901 if (pdev->dev.of_node) {
8902 __qseecom_deinit_clk(CLK_QSEE);
8903 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8904 (qseecom.support_pfe || qseecom.support_fde))
8905 __qseecom_deinit_clk(CLK_CE_DRV);
8906 }
8907
8908 ion_client_destroy(qseecom.ion_clnt);
8909
8910 cdev_del(&qseecom.cdev);
8911
8912 device_destroy(driver_class, qseecom_device_no);
8913
8914 class_destroy(driver_class);
8915
8916 unregister_chrdev_region(qseecom_device_no, 1);
8917
8918 return ret;
8919}
8920
8921static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8922{
8923 int ret = 0;
8924 struct qseecom_clk *qclk;
8925
8926 qclk = &qseecom.qsee;
8927 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8928 if (qseecom.no_clock_support)
8929 return 0;
8930
8931 mutex_lock(&qsee_bw_mutex);
8932 mutex_lock(&clk_access_lock);
8933
8934 if (qseecom.current_mode != INACTIVE) {
8935 ret = msm_bus_scale_client_update_request(
8936 qseecom.qsee_perf_client, INACTIVE);
8937 if (ret)
8938 pr_err("Fail to scale down bus\n");
8939 else
8940 qseecom.current_mode = INACTIVE;
8941 }
8942
8943 if (qclk->clk_access_cnt) {
8944 if (qclk->ce_clk != NULL)
8945 clk_disable_unprepare(qclk->ce_clk);
8946 if (qclk->ce_core_clk != NULL)
8947 clk_disable_unprepare(qclk->ce_core_clk);
8948 if (qclk->ce_bus_clk != NULL)
8949 clk_disable_unprepare(qclk->ce_bus_clk);
8950 }
8951
8952 del_timer_sync(&(qseecom.bw_scale_down_timer));
8953 qseecom.timer_running = false;
8954
8955 mutex_unlock(&clk_access_lock);
8956 mutex_unlock(&qsee_bw_mutex);
8957 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8958
8959 return 0;
8960}
8961
8962static int qseecom_resume(struct platform_device *pdev)
8963{
8964 int mode = 0;
8965 int ret = 0;
8966 struct qseecom_clk *qclk;
8967
8968 qclk = &qseecom.qsee;
8969 if (qseecom.no_clock_support)
8970 goto exit;
8971
8972 mutex_lock(&qsee_bw_mutex);
8973 mutex_lock(&clk_access_lock);
8974 if (qseecom.cumulative_mode >= HIGH)
8975 mode = HIGH;
8976 else
8977 mode = qseecom.cumulative_mode;
8978
8979 if (qseecom.cumulative_mode != INACTIVE) {
8980 ret = msm_bus_scale_client_update_request(
8981 qseecom.qsee_perf_client, mode);
8982 if (ret)
8983 pr_err("Fail to scale up bus to %d\n", mode);
8984 else
8985 qseecom.current_mode = mode;
8986 }
8987
8988 if (qclk->clk_access_cnt) {
8989 if (qclk->ce_core_clk != NULL) {
8990 ret = clk_prepare_enable(qclk->ce_core_clk);
8991 if (ret) {
8992 pr_err("Unable to enable/prep CE core clk\n");
8993 qclk->clk_access_cnt = 0;
8994 goto err;
8995 }
8996 }
8997 if (qclk->ce_clk != NULL) {
8998 ret = clk_prepare_enable(qclk->ce_clk);
8999 if (ret) {
9000 pr_err("Unable to enable/prep CE iface clk\n");
9001 qclk->clk_access_cnt = 0;
9002 goto ce_clk_err;
9003 }
9004 }
9005 if (qclk->ce_bus_clk != NULL) {
9006 ret = clk_prepare_enable(qclk->ce_bus_clk);
9007 if (ret) {
9008 pr_err("Unable to enable/prep CE bus clk\n");
9009 qclk->clk_access_cnt = 0;
9010 goto ce_bus_clk_err;
9011 }
9012 }
9013 }
9014
9015 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9016 qseecom.bw_scale_down_timer.expires = jiffies +
9017 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9018 mod_timer(&(qseecom.bw_scale_down_timer),
9019 qseecom.bw_scale_down_timer.expires);
9020 qseecom.timer_running = true;
9021 }
9022
9023 mutex_unlock(&clk_access_lock);
9024 mutex_unlock(&qsee_bw_mutex);
9025 goto exit;
9026
9027ce_bus_clk_err:
9028 if (qclk->ce_clk)
9029 clk_disable_unprepare(qclk->ce_clk);
9030ce_clk_err:
9031 if (qclk->ce_core_clk)
9032 clk_disable_unprepare(qclk->ce_core_clk);
9033err:
9034 mutex_unlock(&clk_access_lock);
9035 mutex_unlock(&qsee_bw_mutex);
9036 ret = -EIO;
9037exit:
9038 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9039 return ret;
9040}
9041
9042static const struct of_device_id qseecom_match[] = {
9043 {
9044 .compatible = "qcom,qseecom",
9045 },
9046 {}
9047};
9048
9049static struct platform_driver qseecom_plat_driver = {
9050 .probe = qseecom_probe,
9051 .remove = qseecom_remove,
9052 .suspend = qseecom_suspend,
9053 .resume = qseecom_resume,
9054 .driver = {
9055 .name = "qseecom",
9056 .owner = THIS_MODULE,
9057 .of_match_table = qseecom_match,
9058 },
9059};
9060
9061static int qseecom_init(void)
9062{
9063 return platform_driver_register(&qseecom_plat_driver);
9064}
9065
9066static void qseecom_exit(void)
9067{
9068 platform_driver_unregister(&qseecom_plat_driver);
9069}
9070
9071MODULE_LICENSE("GPL v2");
9072MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9073
9074module_init(qseecom_init);
9075module_exit(qseecom_exit);