blob: 63184bfaff56229ed4d68f377a59cc12260041dd [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
Zhen Kongf5087172018-10-11 17:22:05 -0700185 /* rcv_req_flag: -1: not ready; 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700186 int rcv_req_flag;
187 int send_resp_flag;
188 bool listener_in_use;
189 /* wq for thread blocked on this listener*/
190 wait_queue_head_t listener_block_app_wq;
191 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
192 uint32_t sglist_cnt;
Zhen Kong26e62742018-05-04 17:19:06 -0700193 int abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700194};
195
196struct qseecom_registered_app_list {
197 struct list_head list;
198 u32 app_id;
199 u32 ref_cnt;
200 char app_name[MAX_APP_NAME_SIZE];
201 u32 app_arch;
202 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700203 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700204 u32 blocked_on_listener_id;
205};
206
207struct qseecom_registered_kclient_list {
208 struct list_head list;
209 struct qseecom_handle *handle;
210};
211
212struct qseecom_ce_info_use {
213 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
214 unsigned int unit_num;
215 unsigned int num_ce_pipe_entries;
216 struct qseecom_ce_pipe_entry *ce_pipe_entry;
217 bool alloc;
218 uint32_t type;
219};
220
221struct ce_hw_usage_info {
222 uint32_t qsee_ce_hw_instance;
223 uint32_t num_fde;
224 struct qseecom_ce_info_use *fde;
225 uint32_t num_pfe;
226 struct qseecom_ce_info_use *pfe;
227};
228
229struct qseecom_clk {
230 enum qseecom_ce_hw_instance instance;
231 struct clk *ce_core_clk;
232 struct clk *ce_clk;
233 struct clk *ce_core_src_clk;
234 struct clk *ce_bus_clk;
235 uint32_t clk_access_cnt;
236};
237
238struct qseecom_control {
239 struct ion_client *ion_clnt; /* Ion client */
240 struct list_head registered_listener_list_head;
241 spinlock_t registered_listener_list_lock;
242
243 struct list_head registered_app_list_head;
244 spinlock_t registered_app_list_lock;
245
246 struct list_head registered_kclient_list_head;
247 spinlock_t registered_kclient_list_lock;
248
249 wait_queue_head_t send_resp_wq;
250 int send_resp_flag;
251
252 uint32_t qseos_version;
253 uint32_t qsee_version;
254 struct device *pdev;
255 bool whitelist_support;
256 bool commonlib_loaded;
257 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258 struct ce_hw_usage_info ce_info;
259
260 int qsee_bw_count;
261 int qsee_sfpb_bw_count;
262
263 uint32_t qsee_perf_client;
264 struct qseecom_clk qsee;
265 struct qseecom_clk ce_drv;
266
267 bool support_bus_scaling;
268 bool support_fde;
269 bool support_pfe;
270 bool fde_key_size;
271 uint32_t cumulative_mode;
272 enum qseecom_bandwidth_request_mode current_mode;
273 struct timer_list bw_scale_down_timer;
274 struct work_struct bw_inactive_req_ws;
275 struct cdev cdev;
276 bool timer_running;
277 bool no_clock_support;
278 unsigned int ce_opp_freq_hz;
279 bool appsbl_qseecom_support;
280 uint32_t qsee_reentrancy_support;
281
282 uint32_t app_block_ref_cnt;
283 wait_queue_head_t app_block_wq;
284 atomic_t qseecom_state;
285 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700286 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700287};
288
289struct qseecom_sec_buf_fd_info {
290 bool is_sec_buf_fd;
291 size_t size;
292 void *vbase;
293 dma_addr_t pbase;
294};
295
296struct qseecom_param_memref {
297 uint32_t buffer;
298 uint32_t size;
299};
300
301struct qseecom_client_handle {
302 u32 app_id;
303 u8 *sb_virt;
304 phys_addr_t sb_phys;
305 unsigned long user_virt_sb_base;
306 size_t sb_length;
307 struct ion_handle *ihandle; /* Retrieve phy addr */
308 char app_name[MAX_APP_NAME_SIZE];
309 u32 app_arch;
310 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
311};
312
313struct qseecom_listener_handle {
314 u32 id;
315};
316
317static struct qseecom_control qseecom;
318
319struct qseecom_dev_handle {
320 enum qseecom_client_handle_type type;
321 union {
322 struct qseecom_client_handle client;
323 struct qseecom_listener_handle listener;
324 };
325 bool released;
326 int abort;
327 wait_queue_head_t abort_wq;
328 atomic_t ioctl_count;
329 bool perf_enabled;
330 bool fast_load_enabled;
331 enum qseecom_bandwidth_request_mode mode;
332 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
333 uint32_t sglist_cnt;
334 bool use_legacy_cmd;
335};
336
337struct qseecom_key_id_usage_desc {
338 uint8_t desc[QSEECOM_KEY_ID_SIZE];
339};
340
341struct qseecom_crypto_info {
342 unsigned int unit_num;
343 unsigned int ce;
344 unsigned int pipe_pair;
345};
346
347static struct qseecom_key_id_usage_desc key_id_array[] = {
348 {
349 .desc = "Undefined Usage Index",
350 },
351
352 {
353 .desc = "Full Disk Encryption",
354 },
355
356 {
357 .desc = "Per File Encryption",
358 },
359
360 {
361 .desc = "UFS ICE Full Disk Encryption",
362 },
363
364 {
365 .desc = "SDCC ICE Full Disk Encryption",
366 },
367};
368
369/* Function proto types */
370static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
371static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
372static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
373static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
374static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
375static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
376 char *cmnlib_name);
377static int qseecom_enable_ice_setup(int usage);
378static int qseecom_disable_ice_setup(int usage);
379static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
380static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
381 void __user *argp);
382static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
383 void __user *argp);
384static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
385 void __user *argp);
386
387static int get_qseecom_keymaster_status(char *str)
388{
389 get_option(&str, &qseecom.is_apps_region_protected);
390 return 1;
391}
392__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
393
394static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
395 const void *req_buf, void *resp_buf)
396{
397 int ret = 0;
398 uint32_t smc_id = 0;
399 uint32_t qseos_cmd_id = 0;
400 struct scm_desc desc = {0};
401 struct qseecom_command_scm_resp *scm_resp = NULL;
402
403 if (!req_buf || !resp_buf) {
404 pr_err("Invalid buffer pointer\n");
405 return -EINVAL;
406 }
407 qseos_cmd_id = *(uint32_t *)req_buf;
408 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
409
410 switch (svc_id) {
411 case 6: {
412 if (tz_cmd_id == 3) {
413 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
414 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
415 desc.args[0] = *(uint32_t *)req_buf;
416 } else {
417 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
418 svc_id, tz_cmd_id);
419 return -EINVAL;
420 }
421 ret = scm_call2(smc_id, &desc);
422 break;
423 }
424 case SCM_SVC_ES: {
425 switch (tz_cmd_id) {
426 case SCM_SAVE_PARTITION_HASH_ID: {
427 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
428 struct qseecom_save_partition_hash_req *p_hash_req =
429 (struct qseecom_save_partition_hash_req *)
430 req_buf;
431 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
432
433 if (!tzbuf)
434 return -ENOMEM;
435 memset(tzbuf, 0, tzbuflen);
436 memcpy(tzbuf, p_hash_req->digest,
437 SHA256_DIGEST_LENGTH);
438 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
439 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
440 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
441 desc.args[0] = p_hash_req->partition_id;
442 desc.args[1] = virt_to_phys(tzbuf);
443 desc.args[2] = SHA256_DIGEST_LENGTH;
444 ret = scm_call2(smc_id, &desc);
445 kzfree(tzbuf);
446 break;
447 }
448 default: {
449 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
450 tz_cmd_id);
451 ret = -EINVAL;
452 break;
453 }
454 } /* end of switch (tz_cmd_id) */
455 break;
456 } /* end of case SCM_SVC_ES */
457 case SCM_SVC_TZSCHEDULER: {
458 switch (qseos_cmd_id) {
459 case QSEOS_APP_START_COMMAND: {
460 struct qseecom_load_app_ireq *req;
461 struct qseecom_load_app_64bit_ireq *req_64bit;
462
463 smc_id = TZ_OS_APP_START_ID;
464 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
465 if (qseecom.qsee_version < QSEE_VERSION_40) {
466 req = (struct qseecom_load_app_ireq *)req_buf;
467 desc.args[0] = req->mdt_len;
468 desc.args[1] = req->img_len;
469 desc.args[2] = req->phy_addr;
470 } else {
471 req_64bit =
472 (struct qseecom_load_app_64bit_ireq *)
473 req_buf;
474 desc.args[0] = req_64bit->mdt_len;
475 desc.args[1] = req_64bit->img_len;
476 desc.args[2] = req_64bit->phy_addr;
477 }
478 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
479 ret = scm_call2(smc_id, &desc);
480 break;
481 }
482 case QSEOS_APP_SHUTDOWN_COMMAND: {
483 struct qseecom_unload_app_ireq *req;
484
485 req = (struct qseecom_unload_app_ireq *)req_buf;
486 smc_id = TZ_OS_APP_SHUTDOWN_ID;
487 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
488 desc.args[0] = req->app_id;
489 ret = scm_call2(smc_id, &desc);
490 break;
491 }
492 case QSEOS_APP_LOOKUP_COMMAND: {
493 struct qseecom_check_app_ireq *req;
494 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
495 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
496
497 if (!tzbuf)
498 return -ENOMEM;
499 req = (struct qseecom_check_app_ireq *)req_buf;
500 pr_debug("Lookup app_name = %s\n", req->app_name);
501 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
502 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
503 smc_id = TZ_OS_APP_LOOKUP_ID;
504 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
505 desc.args[0] = virt_to_phys(tzbuf);
506 desc.args[1] = strlen(req->app_name);
507 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
508 ret = scm_call2(smc_id, &desc);
509 kzfree(tzbuf);
510 break;
511 }
512 case QSEOS_APP_REGION_NOTIFICATION: {
513 struct qsee_apps_region_info_ireq *req;
514 struct qsee_apps_region_info_64bit_ireq *req_64bit;
515
516 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
517 desc.arginfo =
518 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
519 if (qseecom.qsee_version < QSEE_VERSION_40) {
520 req = (struct qsee_apps_region_info_ireq *)
521 req_buf;
522 desc.args[0] = req->addr;
523 desc.args[1] = req->size;
524 } else {
525 req_64bit =
526 (struct qsee_apps_region_info_64bit_ireq *)
527 req_buf;
528 desc.args[0] = req_64bit->addr;
529 desc.args[1] = req_64bit->size;
530 }
531 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
532 ret = scm_call2(smc_id, &desc);
533 break;
534 }
535 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
536 struct qseecom_load_lib_image_ireq *req;
537 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
538
539 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
540 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
541 if (qseecom.qsee_version < QSEE_VERSION_40) {
542 req = (struct qseecom_load_lib_image_ireq *)
543 req_buf;
544 desc.args[0] = req->mdt_len;
545 desc.args[1] = req->img_len;
546 desc.args[2] = req->phy_addr;
547 } else {
548 req_64bit =
549 (struct qseecom_load_lib_image_64bit_ireq *)
550 req_buf;
551 desc.args[0] = req_64bit->mdt_len;
552 desc.args[1] = req_64bit->img_len;
553 desc.args[2] = req_64bit->phy_addr;
554 }
555 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
556 ret = scm_call2(smc_id, &desc);
557 break;
558 }
559 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
560 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
561 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
562 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
563 ret = scm_call2(smc_id, &desc);
564 break;
565 }
566 case QSEOS_REGISTER_LISTENER: {
567 struct qseecom_register_listener_ireq *req;
568 struct qseecom_register_listener_64bit_ireq *req_64bit;
569
570 desc.arginfo =
571 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
572 if (qseecom.qsee_version < QSEE_VERSION_40) {
573 req = (struct qseecom_register_listener_ireq *)
574 req_buf;
575 desc.args[0] = req->listener_id;
576 desc.args[1] = req->sb_ptr;
577 desc.args[2] = req->sb_len;
578 } else {
579 req_64bit =
580 (struct qseecom_register_listener_64bit_ireq *)
581 req_buf;
582 desc.args[0] = req_64bit->listener_id;
583 desc.args[1] = req_64bit->sb_ptr;
584 desc.args[2] = req_64bit->sb_len;
585 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700586 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700587 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
588 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
589 ret = scm_call2(smc_id, &desc);
590 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700591 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700592 smc_id = TZ_OS_REGISTER_LISTENER_ID;
593 __qseecom_reentrancy_check_if_no_app_blocked(
594 smc_id);
595 ret = scm_call2(smc_id, &desc);
596 }
597 break;
598 }
599 case QSEOS_DEREGISTER_LISTENER: {
600 struct qseecom_unregister_listener_ireq *req;
601
602 req = (struct qseecom_unregister_listener_ireq *)
603 req_buf;
604 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
605 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
606 desc.args[0] = req->listener_id;
607 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
608 ret = scm_call2(smc_id, &desc);
609 break;
610 }
611 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
612 struct qseecom_client_listener_data_irsp *req;
613
614 req = (struct qseecom_client_listener_data_irsp *)
615 req_buf;
616 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
617 desc.arginfo =
618 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
619 desc.args[0] = req->listener_id;
620 desc.args[1] = req->status;
621 ret = scm_call2(smc_id, &desc);
622 break;
623 }
624 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
625 struct qseecom_client_listener_data_irsp *req;
626 struct qseecom_client_listener_data_64bit_irsp *req_64;
627
628 smc_id =
629 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
630 desc.arginfo =
631 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
632 if (qseecom.qsee_version < QSEE_VERSION_40) {
633 req =
634 (struct qseecom_client_listener_data_irsp *)
635 req_buf;
636 desc.args[0] = req->listener_id;
637 desc.args[1] = req->status;
638 desc.args[2] = req->sglistinfo_ptr;
639 desc.args[3] = req->sglistinfo_len;
640 } else {
641 req_64 =
642 (struct qseecom_client_listener_data_64bit_irsp *)
643 req_buf;
644 desc.args[0] = req_64->listener_id;
645 desc.args[1] = req_64->status;
646 desc.args[2] = req_64->sglistinfo_ptr;
647 desc.args[3] = req_64->sglistinfo_len;
648 }
649 ret = scm_call2(smc_id, &desc);
650 break;
651 }
652 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
653 struct qseecom_load_app_ireq *req;
654 struct qseecom_load_app_64bit_ireq *req_64bit;
655
656 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
657 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
658 if (qseecom.qsee_version < QSEE_VERSION_40) {
659 req = (struct qseecom_load_app_ireq *)req_buf;
660 desc.args[0] = req->mdt_len;
661 desc.args[1] = req->img_len;
662 desc.args[2] = req->phy_addr;
663 } else {
664 req_64bit =
665 (struct qseecom_load_app_64bit_ireq *)req_buf;
666 desc.args[0] = req_64bit->mdt_len;
667 desc.args[1] = req_64bit->img_len;
668 desc.args[2] = req_64bit->phy_addr;
669 }
670 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
671 ret = scm_call2(smc_id, &desc);
672 break;
673 }
674 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
675 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
676 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
677 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
678 ret = scm_call2(smc_id, &desc);
679 break;
680 }
681
682 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
683 struct qseecom_client_send_data_ireq *req;
684 struct qseecom_client_send_data_64bit_ireq *req_64bit;
685
686 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
687 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
688 if (qseecom.qsee_version < QSEE_VERSION_40) {
689 req = (struct qseecom_client_send_data_ireq *)
690 req_buf;
691 desc.args[0] = req->app_id;
692 desc.args[1] = req->req_ptr;
693 desc.args[2] = req->req_len;
694 desc.args[3] = req->rsp_ptr;
695 desc.args[4] = req->rsp_len;
696 } else {
697 req_64bit =
698 (struct qseecom_client_send_data_64bit_ireq *)
699 req_buf;
700 desc.args[0] = req_64bit->app_id;
701 desc.args[1] = req_64bit->req_ptr;
702 desc.args[2] = req_64bit->req_len;
703 desc.args[3] = req_64bit->rsp_ptr;
704 desc.args[4] = req_64bit->rsp_len;
705 }
706 ret = scm_call2(smc_id, &desc);
707 break;
708 }
709 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
710 struct qseecom_client_send_data_ireq *req;
711 struct qseecom_client_send_data_64bit_ireq *req_64bit;
712
713 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
714 desc.arginfo =
715 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
716 if (qseecom.qsee_version < QSEE_VERSION_40) {
717 req = (struct qseecom_client_send_data_ireq *)
718 req_buf;
719 desc.args[0] = req->app_id;
720 desc.args[1] = req->req_ptr;
721 desc.args[2] = req->req_len;
722 desc.args[3] = req->rsp_ptr;
723 desc.args[4] = req->rsp_len;
724 desc.args[5] = req->sglistinfo_ptr;
725 desc.args[6] = req->sglistinfo_len;
726 } else {
727 req_64bit =
728 (struct qseecom_client_send_data_64bit_ireq *)
729 req_buf;
730 desc.args[0] = req_64bit->app_id;
731 desc.args[1] = req_64bit->req_ptr;
732 desc.args[2] = req_64bit->req_len;
733 desc.args[3] = req_64bit->rsp_ptr;
734 desc.args[4] = req_64bit->rsp_len;
735 desc.args[5] = req_64bit->sglistinfo_ptr;
736 desc.args[6] = req_64bit->sglistinfo_len;
737 }
738 ret = scm_call2(smc_id, &desc);
739 break;
740 }
741 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
742 struct qseecom_client_send_service_ireq *req;
743
744 req = (struct qseecom_client_send_service_ireq *)
745 req_buf;
746 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
747 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
748 desc.args[0] = req->key_type;
749 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
750 ret = scm_call2(smc_id, &desc);
751 break;
752 }
753 case QSEOS_RPMB_ERASE_COMMAND: {
754 smc_id = TZ_OS_RPMB_ERASE_ID;
755 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
756 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
757 ret = scm_call2(smc_id, &desc);
758 break;
759 }
760 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
761 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
762 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
763 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
764 ret = scm_call2(smc_id, &desc);
765 break;
766 }
767 case QSEOS_GENERATE_KEY: {
768 u32 tzbuflen = PAGE_ALIGN(sizeof
769 (struct qseecom_key_generate_ireq) -
770 sizeof(uint32_t));
771 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
772
773 if (!tzbuf)
774 return -ENOMEM;
775 memset(tzbuf, 0, tzbuflen);
776 memcpy(tzbuf, req_buf + sizeof(uint32_t),
777 (sizeof(struct qseecom_key_generate_ireq) -
778 sizeof(uint32_t)));
779 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
780 smc_id = TZ_OS_KS_GEN_KEY_ID;
781 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
782 desc.args[0] = virt_to_phys(tzbuf);
783 desc.args[1] = tzbuflen;
784 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
785 ret = scm_call2(smc_id, &desc);
786 kzfree(tzbuf);
787 break;
788 }
789 case QSEOS_DELETE_KEY: {
790 u32 tzbuflen = PAGE_ALIGN(sizeof
791 (struct qseecom_key_delete_ireq) -
792 sizeof(uint32_t));
793 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
794
795 if (!tzbuf)
796 return -ENOMEM;
797 memset(tzbuf, 0, tzbuflen);
798 memcpy(tzbuf, req_buf + sizeof(uint32_t),
799 (sizeof(struct qseecom_key_delete_ireq) -
800 sizeof(uint32_t)));
801 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
802 smc_id = TZ_OS_KS_DEL_KEY_ID;
803 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
804 desc.args[0] = virt_to_phys(tzbuf);
805 desc.args[1] = tzbuflen;
806 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
807 ret = scm_call2(smc_id, &desc);
808 kzfree(tzbuf);
809 break;
810 }
811 case QSEOS_SET_KEY: {
812 u32 tzbuflen = PAGE_ALIGN(sizeof
813 (struct qseecom_key_select_ireq) -
814 sizeof(uint32_t));
815 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
816
817 if (!tzbuf)
818 return -ENOMEM;
819 memset(tzbuf, 0, tzbuflen);
820 memcpy(tzbuf, req_buf + sizeof(uint32_t),
821 (sizeof(struct qseecom_key_select_ireq) -
822 sizeof(uint32_t)));
823 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
824 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
825 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
826 desc.args[0] = virt_to_phys(tzbuf);
827 desc.args[1] = tzbuflen;
828 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
829 ret = scm_call2(smc_id, &desc);
830 kzfree(tzbuf);
831 break;
832 }
833 case QSEOS_UPDATE_KEY_USERINFO: {
834 u32 tzbuflen = PAGE_ALIGN(sizeof
835 (struct qseecom_key_userinfo_update_ireq) -
836 sizeof(uint32_t));
837 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
838
839 if (!tzbuf)
840 return -ENOMEM;
841 memset(tzbuf, 0, tzbuflen);
842 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
843 (struct qseecom_key_userinfo_update_ireq) -
844 sizeof(uint32_t)));
845 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
846 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
847 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
848 desc.args[0] = virt_to_phys(tzbuf);
849 desc.args[1] = tzbuflen;
850 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
851 ret = scm_call2(smc_id, &desc);
852 kzfree(tzbuf);
853 break;
854 }
855 case QSEOS_TEE_OPEN_SESSION: {
856 struct qseecom_qteec_ireq *req;
857 struct qseecom_qteec_64bit_ireq *req_64bit;
858
859 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
860 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
861 if (qseecom.qsee_version < QSEE_VERSION_40) {
862 req = (struct qseecom_qteec_ireq *)req_buf;
863 desc.args[0] = req->app_id;
864 desc.args[1] = req->req_ptr;
865 desc.args[2] = req->req_len;
866 desc.args[3] = req->resp_ptr;
867 desc.args[4] = req->resp_len;
868 } else {
869 req_64bit = (struct qseecom_qteec_64bit_ireq *)
870 req_buf;
871 desc.args[0] = req_64bit->app_id;
872 desc.args[1] = req_64bit->req_ptr;
873 desc.args[2] = req_64bit->req_len;
874 desc.args[3] = req_64bit->resp_ptr;
875 desc.args[4] = req_64bit->resp_len;
876 }
877 ret = scm_call2(smc_id, &desc);
878 break;
879 }
880 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
881 struct qseecom_qteec_ireq *req;
882 struct qseecom_qteec_64bit_ireq *req_64bit;
883
884 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
885 desc.arginfo =
886 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
887 if (qseecom.qsee_version < QSEE_VERSION_40) {
888 req = (struct qseecom_qteec_ireq *)req_buf;
889 desc.args[0] = req->app_id;
890 desc.args[1] = req->req_ptr;
891 desc.args[2] = req->req_len;
892 desc.args[3] = req->resp_ptr;
893 desc.args[4] = req->resp_len;
894 desc.args[5] = req->sglistinfo_ptr;
895 desc.args[6] = req->sglistinfo_len;
896 } else {
897 req_64bit = (struct qseecom_qteec_64bit_ireq *)
898 req_buf;
899 desc.args[0] = req_64bit->app_id;
900 desc.args[1] = req_64bit->req_ptr;
901 desc.args[2] = req_64bit->req_len;
902 desc.args[3] = req_64bit->resp_ptr;
903 desc.args[4] = req_64bit->resp_len;
904 desc.args[5] = req_64bit->sglistinfo_ptr;
905 desc.args[6] = req_64bit->sglistinfo_len;
906 }
907 ret = scm_call2(smc_id, &desc);
908 break;
909 }
910 case QSEOS_TEE_INVOKE_COMMAND: {
911 struct qseecom_qteec_ireq *req;
912 struct qseecom_qteec_64bit_ireq *req_64bit;
913
914 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
915 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
916 if (qseecom.qsee_version < QSEE_VERSION_40) {
917 req = (struct qseecom_qteec_ireq *)req_buf;
918 desc.args[0] = req->app_id;
919 desc.args[1] = req->req_ptr;
920 desc.args[2] = req->req_len;
921 desc.args[3] = req->resp_ptr;
922 desc.args[4] = req->resp_len;
923 } else {
924 req_64bit = (struct qseecom_qteec_64bit_ireq *)
925 req_buf;
926 desc.args[0] = req_64bit->app_id;
927 desc.args[1] = req_64bit->req_ptr;
928 desc.args[2] = req_64bit->req_len;
929 desc.args[3] = req_64bit->resp_ptr;
930 desc.args[4] = req_64bit->resp_len;
931 }
932 ret = scm_call2(smc_id, &desc);
933 break;
934 }
935 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
936 struct qseecom_qteec_ireq *req;
937 struct qseecom_qteec_64bit_ireq *req_64bit;
938
939 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
940 desc.arginfo =
941 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
942 if (qseecom.qsee_version < QSEE_VERSION_40) {
943 req = (struct qseecom_qteec_ireq *)req_buf;
944 desc.args[0] = req->app_id;
945 desc.args[1] = req->req_ptr;
946 desc.args[2] = req->req_len;
947 desc.args[3] = req->resp_ptr;
948 desc.args[4] = req->resp_len;
949 desc.args[5] = req->sglistinfo_ptr;
950 desc.args[6] = req->sglistinfo_len;
951 } else {
952 req_64bit = (struct qseecom_qteec_64bit_ireq *)
953 req_buf;
954 desc.args[0] = req_64bit->app_id;
955 desc.args[1] = req_64bit->req_ptr;
956 desc.args[2] = req_64bit->req_len;
957 desc.args[3] = req_64bit->resp_ptr;
958 desc.args[4] = req_64bit->resp_len;
959 desc.args[5] = req_64bit->sglistinfo_ptr;
960 desc.args[6] = req_64bit->sglistinfo_len;
961 }
962 ret = scm_call2(smc_id, &desc);
963 break;
964 }
965 case QSEOS_TEE_CLOSE_SESSION: {
966 struct qseecom_qteec_ireq *req;
967 struct qseecom_qteec_64bit_ireq *req_64bit;
968
969 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
970 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
971 if (qseecom.qsee_version < QSEE_VERSION_40) {
972 req = (struct qseecom_qteec_ireq *)req_buf;
973 desc.args[0] = req->app_id;
974 desc.args[1] = req->req_ptr;
975 desc.args[2] = req->req_len;
976 desc.args[3] = req->resp_ptr;
977 desc.args[4] = req->resp_len;
978 } else {
979 req_64bit = (struct qseecom_qteec_64bit_ireq *)
980 req_buf;
981 desc.args[0] = req_64bit->app_id;
982 desc.args[1] = req_64bit->req_ptr;
983 desc.args[2] = req_64bit->req_len;
984 desc.args[3] = req_64bit->resp_ptr;
985 desc.args[4] = req_64bit->resp_len;
986 }
987 ret = scm_call2(smc_id, &desc);
988 break;
989 }
990 case QSEOS_TEE_REQUEST_CANCELLATION: {
991 struct qseecom_qteec_ireq *req;
992 struct qseecom_qteec_64bit_ireq *req_64bit;
993
994 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
995 desc.arginfo =
996 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
997 if (qseecom.qsee_version < QSEE_VERSION_40) {
998 req = (struct qseecom_qteec_ireq *)req_buf;
999 desc.args[0] = req->app_id;
1000 desc.args[1] = req->req_ptr;
1001 desc.args[2] = req->req_len;
1002 desc.args[3] = req->resp_ptr;
1003 desc.args[4] = req->resp_len;
1004 } else {
1005 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1006 req_buf;
1007 desc.args[0] = req_64bit->app_id;
1008 desc.args[1] = req_64bit->req_ptr;
1009 desc.args[2] = req_64bit->req_len;
1010 desc.args[3] = req_64bit->resp_ptr;
1011 desc.args[4] = req_64bit->resp_len;
1012 }
1013 ret = scm_call2(smc_id, &desc);
1014 break;
1015 }
1016 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1017 struct qseecom_continue_blocked_request_ireq *req =
1018 (struct qseecom_continue_blocked_request_ireq *)
1019 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001020 if (qseecom.smcinvoke_support)
1021 smc_id =
1022 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1023 else
1024 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001025 desc.arginfo =
1026 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001027 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001028 ret = scm_call2(smc_id, &desc);
1029 break;
1030 }
1031 default: {
1032 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1033 qseos_cmd_id);
1034 ret = -EINVAL;
1035 break;
1036 }
1037 } /*end of switch (qsee_cmd_id) */
1038 break;
1039 } /*end of case SCM_SVC_TZSCHEDULER*/
1040 default: {
1041 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1042 svc_id);
1043 ret = -EINVAL;
1044 break;
1045 }
1046 } /*end of switch svc_id */
1047 scm_resp->result = desc.ret[0];
1048 scm_resp->resp_type = desc.ret[1];
1049 scm_resp->data = desc.ret[2];
1050 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1051 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1052 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1053 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1054 return ret;
1055}
1056
1057
1058static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1059 size_t cmd_len, void *resp_buf, size_t resp_len)
1060{
1061 if (!is_scm_armv8())
1062 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1063 resp_buf, resp_len);
1064 else
1065 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1066}
1067
1068static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1069 struct qseecom_register_listener_req *svc)
1070{
1071 struct qseecom_registered_listener_list *ptr;
1072 int unique = 1;
1073 unsigned long flags;
1074
1075 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1076 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1077 if (ptr->svc.listener_id == svc->listener_id) {
1078 pr_err("Service id: %u is already registered\n",
1079 ptr->svc.listener_id);
1080 unique = 0;
1081 break;
1082 }
1083 }
1084 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1085 return unique;
1086}
1087
1088static struct qseecom_registered_listener_list *__qseecom_find_svc(
1089 int32_t listener_id)
1090{
1091 struct qseecom_registered_listener_list *entry = NULL;
1092 unsigned long flags;
1093
1094 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1095 list_for_each_entry(entry,
1096 &qseecom.registered_listener_list_head, list) {
1097 if (entry->svc.listener_id == listener_id)
1098 break;
1099 }
1100 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1101
1102 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1103 pr_err("Service id: %u is not found\n", listener_id);
1104 return NULL;
1105 }
1106
1107 return entry;
1108}
1109
1110static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1111 struct qseecom_dev_handle *handle,
1112 struct qseecom_register_listener_req *listener)
1113{
1114 int ret = 0;
1115 struct qseecom_register_listener_ireq req;
1116 struct qseecom_register_listener_64bit_ireq req_64bit;
1117 struct qseecom_command_scm_resp resp;
1118 ion_phys_addr_t pa;
1119 void *cmd_buf = NULL;
1120 size_t cmd_len;
1121
1122 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001123 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001124 listener->ifd_data_fd);
1125 if (IS_ERR_OR_NULL(svc->ihandle)) {
1126 pr_err("Ion client could not retrieve the handle\n");
1127 return -ENOMEM;
1128 }
1129
1130 /* Get the physical address of the ION BUF */
1131 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1132 if (ret) {
1133 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1134 ret);
1135 return ret;
1136 }
1137 /* Populate the structure for sending scm call to load image */
1138 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1139 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1140 pr_err("ION memory mapping for listener shared buffer failed\n");
1141 return -ENOMEM;
1142 }
1143 svc->sb_phys = (phys_addr_t)pa;
1144
1145 if (qseecom.qsee_version < QSEE_VERSION_40) {
1146 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1147 req.listener_id = svc->svc.listener_id;
1148 req.sb_len = svc->sb_length;
1149 req.sb_ptr = (uint32_t)svc->sb_phys;
1150 cmd_buf = (void *)&req;
1151 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1152 } else {
1153 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1154 req_64bit.listener_id = svc->svc.listener_id;
1155 req_64bit.sb_len = svc->sb_length;
1156 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1157 cmd_buf = (void *)&req_64bit;
1158 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1159 }
1160
1161 resp.result = QSEOS_RESULT_INCOMPLETE;
1162
1163 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1164 &resp, sizeof(resp));
1165 if (ret) {
1166 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1167 return -EINVAL;
1168 }
1169
1170 if (resp.result != QSEOS_RESULT_SUCCESS) {
1171 pr_err("Error SB registration req: resp.result = %d\n",
1172 resp.result);
1173 return -EPERM;
1174 }
1175 return 0;
1176}
1177
1178static int qseecom_register_listener(struct qseecom_dev_handle *data,
1179 void __user *argp)
1180{
1181 int ret = 0;
1182 unsigned long flags;
1183 struct qseecom_register_listener_req rcvd_lstnr;
1184 struct qseecom_registered_listener_list *new_entry;
1185
1186 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1187 if (ret) {
1188 pr_err("copy_from_user failed\n");
1189 return ret;
1190 }
1191 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1192 rcvd_lstnr.sb_size))
1193 return -EFAULT;
1194
Zhen Kong3c674612018-09-06 22:51:27 -07001195 data->listener.id = rcvd_lstnr.listener_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001196 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001197 pr_err("Service %d is not unique and failed to register\n",
1198 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001199 data->released = true;
1200 return -EBUSY;
1201 }
1202
1203 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1204 if (!new_entry)
1205 return -ENOMEM;
1206 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongf5087172018-10-11 17:22:05 -07001207 new_entry->rcv_req_flag = -1;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001208
1209 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1210 new_entry->sb_length = rcvd_lstnr.sb_size;
1211 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1212 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001213 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1214 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001215 kzfree(new_entry);
1216 return -ENOMEM;
1217 }
1218
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001219 init_waitqueue_head(&new_entry->rcv_req_wq);
1220 init_waitqueue_head(&new_entry->listener_block_app_wq);
1221 new_entry->send_resp_flag = 0;
1222 new_entry->listener_in_use = false;
1223 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1224 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1225 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1226
Zhen Kong3c674612018-09-06 22:51:27 -07001227 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001228 return ret;
1229}
1230
Zhen Kong26e62742018-05-04 17:19:06 -07001231static void __qseecom_listener_abort_all(int abort)
1232{
1233 struct qseecom_registered_listener_list *entry = NULL;
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1237 list_for_each_entry(entry,
1238 &qseecom.registered_listener_list_head, list) {
1239 pr_debug("set abort %d for listener %d\n",
1240 abort, entry->svc.listener_id);
1241 entry->abort = abort;
1242 }
1243 if (abort)
1244 wake_up_interruptible_all(&qseecom.send_resp_wq);
1245 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1246}
1247
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001248static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1249{
1250 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001251 struct qseecom_register_listener_ireq req;
1252 struct qseecom_registered_listener_list *ptr_svc = NULL;
1253 struct qseecom_command_scm_resp resp;
1254 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1255
Zhen Kong3c674612018-09-06 22:51:27 -07001256 ptr_svc = __qseecom_find_svc(data->listener.id);
1257 if (!ptr_svc) {
1258 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1259 return -ENODATA;
1260 }
1261
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001262 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1263 req.listener_id = data->listener.id;
1264 resp.result = QSEOS_RESULT_INCOMPLETE;
1265
1266 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1267 sizeof(req), &resp, sizeof(resp));
1268 if (ret) {
1269 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1270 ret, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001271 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 }
1273
1274 if (resp.result != QSEOS_RESULT_SUCCESS) {
1275 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1276 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001277 ret = -EPERM;
1278 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 }
1280
1281 data->abort = 1;
Zhen Kong3c674612018-09-06 22:51:27 -07001282 ptr_svc->abort = 1;
1283 wake_up_all(&ptr_svc->rcv_req_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001284
1285 while (atomic_read(&data->ioctl_count) > 1) {
1286 if (wait_event_freezable(data->abort_wq,
1287 atomic_read(&data->ioctl_count) <= 1)) {
1288 pr_err("Interrupted from abort\n");
1289 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001290 }
1291 }
1292
Zhen Kong3c674612018-09-06 22:51:27 -07001293exit:
1294 if (ptr_svc->sb_virt) {
1295 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001296 if (!IS_ERR_OR_NULL(ihandle)) {
1297 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1298 ion_free(qseecom.ion_clnt, ihandle);
1299 }
1300 }
Zhen Kong3c674612018-09-06 22:51:27 -07001301 list_del(&ptr_svc->list);
1302 kzfree(ptr_svc);
1303
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001304 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001305 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001306 return ret;
1307}
1308
1309static int __qseecom_set_msm_bus_request(uint32_t mode)
1310{
1311 int ret = 0;
1312 struct qseecom_clk *qclk;
1313
1314 qclk = &qseecom.qsee;
1315 if (qclk->ce_core_src_clk != NULL) {
1316 if (mode == INACTIVE) {
1317 __qseecom_disable_clk(CLK_QSEE);
1318 } else {
1319 ret = __qseecom_enable_clk(CLK_QSEE);
1320 if (ret)
1321 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1322 ret, mode);
1323 }
1324 }
1325
1326 if ((!ret) && (qseecom.current_mode != mode)) {
1327 ret = msm_bus_scale_client_update_request(
1328 qseecom.qsee_perf_client, mode);
1329 if (ret) {
1330 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1331 ret, mode);
1332 if (qclk->ce_core_src_clk != NULL) {
1333 if (mode == INACTIVE) {
1334 ret = __qseecom_enable_clk(CLK_QSEE);
1335 if (ret)
1336 pr_err("CLK enable failed\n");
1337 } else
1338 __qseecom_disable_clk(CLK_QSEE);
1339 }
1340 }
1341 qseecom.current_mode = mode;
1342 }
1343 return ret;
1344}
1345
1346static void qseecom_bw_inactive_req_work(struct work_struct *work)
1347{
1348 mutex_lock(&app_access_lock);
1349 mutex_lock(&qsee_bw_mutex);
1350 if (qseecom.timer_running)
1351 __qseecom_set_msm_bus_request(INACTIVE);
1352 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1353 qseecom.current_mode, qseecom.cumulative_mode);
1354 qseecom.timer_running = false;
1355 mutex_unlock(&qsee_bw_mutex);
1356 mutex_unlock(&app_access_lock);
1357}
1358
1359static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1360{
1361 schedule_work(&qseecom.bw_inactive_req_ws);
1362}
1363
1364static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1365{
1366 struct qseecom_clk *qclk;
1367 int ret = 0;
1368
1369 mutex_lock(&clk_access_lock);
1370 if (ce == CLK_QSEE)
1371 qclk = &qseecom.qsee;
1372 else
1373 qclk = &qseecom.ce_drv;
1374
1375 if (qclk->clk_access_cnt > 2) {
1376 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1377 ret = -EINVAL;
1378 goto err_dec_ref_cnt;
1379 }
1380 if (qclk->clk_access_cnt == 2)
1381 qclk->clk_access_cnt--;
1382
1383err_dec_ref_cnt:
1384 mutex_unlock(&clk_access_lock);
1385 return ret;
1386}
1387
1388
1389static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1390{
1391 int32_t ret = 0;
1392 int32_t request_mode = INACTIVE;
1393
1394 mutex_lock(&qsee_bw_mutex);
1395 if (mode == 0) {
1396 if (qseecom.cumulative_mode > MEDIUM)
1397 request_mode = HIGH;
1398 else
1399 request_mode = qseecom.cumulative_mode;
1400 } else {
1401 request_mode = mode;
1402 }
1403
1404 ret = __qseecom_set_msm_bus_request(request_mode);
1405 if (ret) {
1406 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1407 ret, request_mode);
1408 goto err_scale_timer;
1409 }
1410
1411 if (qseecom.timer_running) {
1412 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1413 if (ret) {
1414 pr_err("Failed to decrease clk ref count.\n");
1415 goto err_scale_timer;
1416 }
1417 del_timer_sync(&(qseecom.bw_scale_down_timer));
1418 qseecom.timer_running = false;
1419 }
1420err_scale_timer:
1421 mutex_unlock(&qsee_bw_mutex);
1422 return ret;
1423}
1424
1425
1426static int qseecom_unregister_bus_bandwidth_needs(
1427 struct qseecom_dev_handle *data)
1428{
1429 int32_t ret = 0;
1430
1431 qseecom.cumulative_mode -= data->mode;
1432 data->mode = INACTIVE;
1433
1434 return ret;
1435}
1436
1437static int __qseecom_register_bus_bandwidth_needs(
1438 struct qseecom_dev_handle *data, uint32_t request_mode)
1439{
1440 int32_t ret = 0;
1441
1442 if (data->mode == INACTIVE) {
1443 qseecom.cumulative_mode += request_mode;
1444 data->mode = request_mode;
1445 } else {
1446 if (data->mode != request_mode) {
1447 qseecom.cumulative_mode -= data->mode;
1448 qseecom.cumulative_mode += request_mode;
1449 data->mode = request_mode;
1450 }
1451 }
1452 return ret;
1453}
1454
1455static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1456{
1457 int ret = 0;
1458
1459 ret = qsee_vote_for_clock(data, CLK_DFAB);
1460 if (ret) {
1461 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1462 goto perf_enable_exit;
1463 }
1464 ret = qsee_vote_for_clock(data, CLK_SFPB);
1465 if (ret) {
1466 qsee_disable_clock_vote(data, CLK_DFAB);
1467 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1468 goto perf_enable_exit;
1469 }
1470
1471perf_enable_exit:
1472 return ret;
1473}
1474
1475static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1476 void __user *argp)
1477{
1478 int32_t ret = 0;
1479 int32_t req_mode;
1480
1481 if (qseecom.no_clock_support)
1482 return 0;
1483
1484 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1485 if (ret) {
1486 pr_err("copy_from_user failed\n");
1487 return ret;
1488 }
1489 if (req_mode > HIGH) {
1490 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1491 return -EINVAL;
1492 }
1493
1494 /*
1495 * Register bus bandwidth needs if bus scaling feature is enabled;
1496 * otherwise, qseecom enable/disable clocks for the client directly.
1497 */
1498 if (qseecom.support_bus_scaling) {
1499 mutex_lock(&qsee_bw_mutex);
1500 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1501 mutex_unlock(&qsee_bw_mutex);
1502 } else {
1503 pr_debug("Bus scaling feature is NOT enabled\n");
1504 pr_debug("request bandwidth mode %d for the client\n",
1505 req_mode);
1506 if (req_mode != INACTIVE) {
1507 ret = qseecom_perf_enable(data);
1508 if (ret)
1509 pr_err("Failed to vote for clock with err %d\n",
1510 ret);
1511 } else {
1512 qsee_disable_clock_vote(data, CLK_DFAB);
1513 qsee_disable_clock_vote(data, CLK_SFPB);
1514 }
1515 }
1516 return ret;
1517}
1518
1519static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1520{
1521 if (qseecom.no_clock_support)
1522 return;
1523
1524 mutex_lock(&qsee_bw_mutex);
1525 qseecom.bw_scale_down_timer.expires = jiffies +
1526 msecs_to_jiffies(duration);
1527 mod_timer(&(qseecom.bw_scale_down_timer),
1528 qseecom.bw_scale_down_timer.expires);
1529 qseecom.timer_running = true;
1530 mutex_unlock(&qsee_bw_mutex);
1531}
1532
1533static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1534{
1535 if (!qseecom.support_bus_scaling)
1536 qsee_disable_clock_vote(data, CLK_SFPB);
1537 else
1538 __qseecom_add_bw_scale_down_timer(
1539 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1540}
1541
1542static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1543{
1544 int ret = 0;
1545
1546 if (qseecom.support_bus_scaling) {
1547 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1548 if (ret)
1549 pr_err("Failed to set bw MEDIUM.\n");
1550 } else {
1551 ret = qsee_vote_for_clock(data, CLK_SFPB);
1552 if (ret)
1553 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1554 }
1555 return ret;
1556}
1557
1558static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1559 void __user *argp)
1560{
1561 ion_phys_addr_t pa;
1562 int32_t ret;
1563 struct qseecom_set_sb_mem_param_req req;
1564 size_t len;
1565
1566 /* Copy the relevant information needed for loading the image */
1567 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1568 return -EFAULT;
1569
1570 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1571 (req.sb_len == 0)) {
1572 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1573 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1574 return -EFAULT;
1575 }
1576 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1577 req.sb_len))
1578 return -EFAULT;
1579
1580 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001581 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001582 req.ifd_data_fd);
1583 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1584 pr_err("Ion client could not retrieve the handle\n");
1585 return -ENOMEM;
1586 }
1587 /* Get the physical address of the ION BUF */
1588 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1589 if (ret) {
1590
1591 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1592 ret);
1593 return ret;
1594 }
1595
1596 if (len < req.sb_len) {
1597 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1598 req.sb_len, len);
1599 return -EINVAL;
1600 }
1601 /* Populate the structure for sending scm call to load image */
1602 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1603 data->client.ihandle);
1604 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1605 pr_err("ION memory mapping for client shared buf failed\n");
1606 return -ENOMEM;
1607 }
1608 data->client.sb_phys = (phys_addr_t)pa;
1609 data->client.sb_length = req.sb_len;
1610 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1611 return 0;
1612}
1613
Zhen Kong26e62742018-05-04 17:19:06 -07001614static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1615 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001616{
1617 int ret;
1618
1619 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001620 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001621}
1622
1623static int __qseecom_reentrancy_listener_has_sent_rsp(
1624 struct qseecom_dev_handle *data,
1625 struct qseecom_registered_listener_list *ptr_svc)
1626{
1627 int ret;
1628
1629 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001630 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001631}
1632
1633static void __qseecom_clean_listener_sglistinfo(
1634 struct qseecom_registered_listener_list *ptr_svc)
1635{
1636 if (ptr_svc->sglist_cnt) {
1637 memset(ptr_svc->sglistinfo_ptr, 0,
1638 SGLISTINFO_TABLE_SIZE);
1639 ptr_svc->sglist_cnt = 0;
1640 }
1641}
1642
Zhen Kongf5087172018-10-11 17:22:05 -07001643/* wait listener retry delay (ms) and max attemp count */
1644#define QSEECOM_WAIT_LISTENER_DELAY 10
1645#define QSEECOM_WAIT_LISTENER_MAX_ATTEMP 3
Zhen Kong25731112018-09-20 13:10:03 -07001646
Zhen Kongf5087172018-10-11 17:22:05 -07001647static int __is_listener_rcv_wq_not_ready(
Zhen Kong25731112018-09-20 13:10:03 -07001648 struct qseecom_registered_listener_list *ptr_svc)
1649{
1650 int retry = 0;
1651
Zhen Kongf5087172018-10-11 17:22:05 -07001652 while (ptr_svc->rcv_req_flag == -1 &&
1653 retry++ < QSEECOM_WAIT_LISTENER_MAX_ATTEMP) {
1654 msleep(QSEECOM_WAIT_LISTENER_DELAY);
Zhen Kong25731112018-09-20 13:10:03 -07001655 }
Zhen Kongf5087172018-10-11 17:22:05 -07001656 return ptr_svc->rcv_req_flag == -1;
Zhen Kong25731112018-09-20 13:10:03 -07001657}
1658
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001659static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1660 struct qseecom_command_scm_resp *resp)
1661{
1662 int ret = 0;
1663 int rc = 0;
1664 uint32_t lstnr;
1665 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001666 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1667 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1668 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001669 struct qseecom_registered_listener_list *ptr_svc = NULL;
1670 sigset_t new_sigset;
1671 sigset_t old_sigset;
1672 uint32_t status;
1673 void *cmd_buf = NULL;
1674 size_t cmd_len;
1675 struct sglist_info *table = NULL;
1676
1677 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1678 lstnr = resp->data;
1679 /*
1680 * Wake up blocking lsitener service with the lstnr id
1681 */
1682 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1683 flags);
1684 list_for_each_entry(ptr_svc,
1685 &qseecom.registered_listener_list_head, list) {
1686 if (ptr_svc->svc.listener_id == lstnr) {
Zhen Kongf5087172018-10-11 17:22:05 -07001687 if (__is_listener_rcv_wq_not_ready(ptr_svc))
1688 break;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001689 ptr_svc->listener_in_use = true;
1690 ptr_svc->rcv_req_flag = 1;
1691 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1692 break;
1693 }
1694 }
1695 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1696 flags);
1697
1698 if (ptr_svc == NULL) {
1699 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001700 rc = -EINVAL;
1701 status = QSEOS_RESULT_FAILURE;
1702 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001703 }
1704
1705 if (!ptr_svc->ihandle) {
1706 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001707 rc = -EINVAL;
1708 status = QSEOS_RESULT_FAILURE;
1709 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001710 }
1711
1712 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001713 pr_err("Service %d does not exist\n",
1714 lstnr);
1715 rc = -ERESTARTSYS;
1716 ptr_svc = NULL;
1717 status = QSEOS_RESULT_FAILURE;
1718 goto err_resp;
1719 }
1720
1721 if (ptr_svc->abort == 1) {
1722 pr_err("Service %d abort %d\n",
1723 lstnr, ptr_svc->abort);
1724 rc = -ENODEV;
1725 status = QSEOS_RESULT_FAILURE;
1726 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001727 }
Zhen Kong25731112018-09-20 13:10:03 -07001728
Zhen Kongf5087172018-10-11 17:22:05 -07001729 if (ptr_svc->rcv_req_flag == -1) {
Zhen Kong25731112018-09-20 13:10:03 -07001730 pr_err("Service %d is not ready to receive request\n",
1731 lstnr);
1732 rc = -ENOENT;
1733 status = QSEOS_RESULT_FAILURE;
1734 goto err_resp;
Zhen Kongf5087172018-10-11 17:22:05 -07001735
Zhen Kong25731112018-09-20 13:10:03 -07001736 }
Zhen Kongf5087172018-10-11 17:22:05 -07001737
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001738 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1739
1740 /* initialize the new signal mask with all signals*/
1741 sigfillset(&new_sigset);
1742 /* block all signals */
1743 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1744
1745 do {
1746 /*
1747 * When reentrancy is not supported, check global
1748 * send_resp_flag; otherwise, check this listener's
1749 * send_resp_flag.
1750 */
1751 if (!qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001753 __qseecom_listener_has_sent_rsp(
1754 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001755 break;
1756 }
1757
1758 if (qseecom.qsee_reentrancy_support &&
1759 !wait_event_freezable(qseecom.send_resp_wq,
1760 __qseecom_reentrancy_listener_has_sent_rsp(
1761 data, ptr_svc))) {
1762 break;
1763 }
1764 } while (1);
1765
1766 /* restore signal mask */
1767 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001768 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001769 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1770 data->client.app_id, lstnr, ret);
1771 rc = -ENODEV;
1772 status = QSEOS_RESULT_FAILURE;
1773 } else {
1774 status = QSEOS_RESULT_SUCCESS;
1775 }
Zhen Kong26e62742018-05-04 17:19:06 -07001776err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001777 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001778 if (ptr_svc) {
1779 ptr_svc->send_resp_flag = 0;
1780 table = ptr_svc->sglistinfo_ptr;
1781 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001782 if (qseecom.qsee_version < QSEE_VERSION_40) {
1783 send_data_rsp.listener_id = lstnr;
1784 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001785 if (table) {
1786 send_data_rsp.sglistinfo_ptr =
1787 (uint32_t)virt_to_phys(table);
1788 send_data_rsp.sglistinfo_len =
1789 SGLISTINFO_TABLE_SIZE;
1790 dmac_flush_range((void *)table,
1791 (void *)table + SGLISTINFO_TABLE_SIZE);
1792 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001793 cmd_buf = (void *)&send_data_rsp;
1794 cmd_len = sizeof(send_data_rsp);
1795 } else {
1796 send_data_rsp_64bit.listener_id = lstnr;
1797 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001798 if (table) {
1799 send_data_rsp_64bit.sglistinfo_ptr =
1800 virt_to_phys(table);
1801 send_data_rsp_64bit.sglistinfo_len =
1802 SGLISTINFO_TABLE_SIZE;
1803 dmac_flush_range((void *)table,
1804 (void *)table + SGLISTINFO_TABLE_SIZE);
1805 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 cmd_buf = (void *)&send_data_rsp_64bit;
1807 cmd_len = sizeof(send_data_rsp_64bit);
1808 }
Zhen Kong7d500032018-08-06 16:58:31 -07001809 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001810 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1811 else
1812 *(uint32_t *)cmd_buf =
1813 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1814 if (ptr_svc) {
1815 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1816 ptr_svc->ihandle,
1817 ptr_svc->sb_virt, ptr_svc->sb_length,
1818 ION_IOC_CLEAN_INV_CACHES);
1819 if (ret) {
1820 pr_err("cache operation failed %d\n", ret);
1821 return ret;
1822 }
1823 }
1824
1825 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1826 ret = __qseecom_enable_clk(CLK_QSEE);
1827 if (ret)
1828 return ret;
1829 }
1830
1831 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1832 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001833 if (ptr_svc) {
1834 ptr_svc->listener_in_use = false;
1835 __qseecom_clean_listener_sglistinfo(ptr_svc);
1836 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001837 if (ret) {
1838 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1839 ret, data->client.app_id);
1840 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1841 __qseecom_disable_clk(CLK_QSEE);
1842 return ret;
1843 }
Zhen Kong26e62742018-05-04 17:19:06 -07001844 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1845 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001846 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1847 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1848 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1849 resp->result, data->client.app_id, lstnr);
1850 ret = -EINVAL;
1851 }
1852 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1853 __qseecom_disable_clk(CLK_QSEE);
1854
1855 }
1856 if (rc)
1857 return rc;
1858
1859 return ret;
1860}
1861
Zhen Konga91aaf02018-02-02 17:21:04 -08001862static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001863 struct qseecom_command_scm_resp *resp,
1864 struct qseecom_registered_app_list *ptr_app,
1865 struct qseecom_dev_handle *data)
1866{
1867 struct qseecom_registered_listener_list *list_ptr;
1868 int ret = 0;
1869 struct qseecom_continue_blocked_request_ireq ireq;
1870 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001871 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001872 sigset_t new_sigset;
1873 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001874 unsigned long flags;
1875 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001876
1877 if (!resp || !data) {
1878 pr_err("invalid resp or data pointer\n");
1879 ret = -EINVAL;
1880 goto exit;
1881 }
1882
1883 /* find app_id & img_name from list */
1884 if (!ptr_app) {
1885 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1886 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1887 list) {
1888 if ((ptr_app->app_id == data->client.app_id) &&
1889 (!strcmp(ptr_app->app_name,
1890 data->client.app_name))) {
1891 found_app = true;
1892 break;
1893 }
1894 }
1895 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1896 flags);
1897 if (!found_app) {
1898 pr_err("app_id %d (%s) is not found\n",
1899 data->client.app_id,
1900 (char *)data->client.app_name);
1901 ret = -ENOENT;
1902 goto exit;
1903 }
1904 }
1905
Zhen Kongd8cc0052017-11-13 15:13:31 -08001906 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001907 session_id = resp->resp_type;
1908 list_ptr = __qseecom_find_svc(resp->data);
1909 if (!list_ptr) {
1910 pr_err("Invalid listener ID %d\n", resp->data);
1911 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001912 goto exit;
1913 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001914 ptr_app->blocked_on_listener_id = resp->data;
1915
1916 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1917 resp->data, list_ptr->listener_in_use,
1918 session_id, data->client.app_id);
1919
1920 /* sleep until listener is available */
1921 sigfillset(&new_sigset);
1922 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1923
1924 do {
1925 qseecom.app_block_ref_cnt++;
1926 ptr_app->app_blocked = true;
1927 mutex_unlock(&app_access_lock);
1928 wait_event_freezable(
1929 list_ptr->listener_block_app_wq,
1930 !list_ptr->listener_in_use);
1931 mutex_lock(&app_access_lock);
1932 ptr_app->app_blocked = false;
1933 qseecom.app_block_ref_cnt--;
1934 } while (list_ptr->listener_in_use);
1935
1936 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1937
1938 ptr_app->blocked_on_listener_id = 0;
1939 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1940 resp->data, session_id, data->client.app_id);
1941
1942 /* notify TZ that listener is available */
1943 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1944
1945 if (qseecom.smcinvoke_support)
1946 ireq.app_or_session_id = session_id;
1947 else
1948 ireq.app_or_session_id = data->client.app_id;
1949
1950 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1951 &ireq, sizeof(ireq),
1952 &continue_resp, sizeof(continue_resp));
1953 if (ret && qseecom.smcinvoke_support) {
1954 /* retry with legacy cmd */
1955 qseecom.smcinvoke_support = false;
1956 ireq.app_or_session_id = data->client.app_id;
1957 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1958 &ireq, sizeof(ireq),
1959 &continue_resp, sizeof(continue_resp));
1960 qseecom.smcinvoke_support = true;
1961 if (ret) {
1962 pr_err("unblock app %d or session %d fail\n",
1963 data->client.app_id, session_id);
1964 goto exit;
1965 }
1966 }
1967 resp->result = continue_resp.result;
1968 resp->resp_type = continue_resp.resp_type;
1969 resp->data = continue_resp.data;
1970 pr_debug("unblock resp = %d\n", resp->result);
1971 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1972
1973 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1974 pr_err("Unexpected unblock resp %d\n", resp->result);
1975 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001976 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001977exit:
1978 return ret;
1979}
1980
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001981static int __qseecom_reentrancy_process_incomplete_cmd(
1982 struct qseecom_dev_handle *data,
1983 struct qseecom_command_scm_resp *resp)
1984{
1985 int ret = 0;
1986 int rc = 0;
1987 uint32_t lstnr;
1988 unsigned long flags;
Zhen Kong7d500032018-08-06 16:58:31 -07001989 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1990 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1991 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001992 struct qseecom_registered_listener_list *ptr_svc = NULL;
1993 sigset_t new_sigset;
1994 sigset_t old_sigset;
1995 uint32_t status;
1996 void *cmd_buf = NULL;
1997 size_t cmd_len;
1998 struct sglist_info *table = NULL;
1999
Zhen Kong26e62742018-05-04 17:19:06 -07002000 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002001 lstnr = resp->data;
2002 /*
2003 * Wake up blocking lsitener service with the lstnr id
2004 */
2005 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2006 flags);
2007 list_for_each_entry(ptr_svc,
2008 &qseecom.registered_listener_list_head, list) {
2009 if (ptr_svc->svc.listener_id == lstnr) {
Zhen Kongf5087172018-10-11 17:22:05 -07002010 if (__is_listener_rcv_wq_not_ready(ptr_svc))
2011 break;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002012 ptr_svc->listener_in_use = true;
2013 ptr_svc->rcv_req_flag = 1;
2014 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2015 break;
2016 }
2017 }
2018 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2019 flags);
2020
2021 if (ptr_svc == NULL) {
2022 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002023 rc = -EINVAL;
2024 status = QSEOS_RESULT_FAILURE;
2025 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002026 }
2027
2028 if (!ptr_svc->ihandle) {
2029 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002030 rc = -EINVAL;
2031 status = QSEOS_RESULT_FAILURE;
2032 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002033 }
2034
2035 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002036 pr_err("Service %d does not exist\n",
2037 lstnr);
2038 rc = -ERESTARTSYS;
2039 ptr_svc = NULL;
2040 status = QSEOS_RESULT_FAILURE;
2041 goto err_resp;
2042 }
2043
2044 if (ptr_svc->abort == 1) {
2045 pr_err("Service %d abort %d\n",
2046 lstnr, ptr_svc->abort);
2047 rc = -ENODEV;
2048 status = QSEOS_RESULT_FAILURE;
2049 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002050 }
Zhen Kong25731112018-09-20 13:10:03 -07002051
Zhen Kongf5087172018-10-11 17:22:05 -07002052 if (ptr_svc->rcv_req_flag == -1) {
Zhen Kong25731112018-09-20 13:10:03 -07002053 pr_err("Service %d is not ready to receive request\n",
2054 lstnr);
2055 rc = -ENOENT;
2056 status = QSEOS_RESULT_FAILURE;
2057 goto err_resp;
Zhen Kongf5087172018-10-11 17:22:05 -07002058
Zhen Kong25731112018-09-20 13:10:03 -07002059 }
Zhen Kongf5087172018-10-11 17:22:05 -07002060
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002061 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2062
2063 /* initialize the new signal mask with all signals*/
2064 sigfillset(&new_sigset);
2065
2066 /* block all signals */
2067 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2068
2069 /* unlock mutex btw waking listener and sleep-wait */
2070 mutex_unlock(&app_access_lock);
2071 do {
2072 if (!wait_event_freezable(qseecom.send_resp_wq,
2073 __qseecom_reentrancy_listener_has_sent_rsp(
2074 data, ptr_svc))) {
2075 break;
2076 }
2077 } while (1);
2078 /* lock mutex again after resp sent */
2079 mutex_lock(&app_access_lock);
2080 ptr_svc->send_resp_flag = 0;
2081 qseecom.send_resp_flag = 0;
2082
2083 /* restore signal mask */
2084 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002085 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002086 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2087 data->client.app_id, lstnr, ret);
2088 rc = -ENODEV;
2089 status = QSEOS_RESULT_FAILURE;
2090 } else {
2091 status = QSEOS_RESULT_SUCCESS;
2092 }
Zhen Kong26e62742018-05-04 17:19:06 -07002093err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002094 if (ptr_svc)
2095 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002096 if (qseecom.qsee_version < QSEE_VERSION_40) {
2097 send_data_rsp.listener_id = lstnr;
2098 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002099 if (table) {
2100 send_data_rsp.sglistinfo_ptr =
2101 (uint32_t)virt_to_phys(table);
2102 send_data_rsp.sglistinfo_len =
2103 SGLISTINFO_TABLE_SIZE;
2104 dmac_flush_range((void *)table,
2105 (void *)table + SGLISTINFO_TABLE_SIZE);
2106 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002107 cmd_buf = (void *)&send_data_rsp;
2108 cmd_len = sizeof(send_data_rsp);
2109 } else {
2110 send_data_rsp_64bit.listener_id = lstnr;
2111 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002112 if (table) {
2113 send_data_rsp_64bit.sglistinfo_ptr =
2114 virt_to_phys(table);
2115 send_data_rsp_64bit.sglistinfo_len =
2116 SGLISTINFO_TABLE_SIZE;
2117 dmac_flush_range((void *)table,
2118 (void *)table + SGLISTINFO_TABLE_SIZE);
2119 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002120 cmd_buf = (void *)&send_data_rsp_64bit;
2121 cmd_len = sizeof(send_data_rsp_64bit);
2122 }
Zhen Kong7d500032018-08-06 16:58:31 -07002123 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002124 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2125 else
2126 *(uint32_t *)cmd_buf =
2127 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2128 if (ptr_svc) {
2129 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2130 ptr_svc->ihandle,
2131 ptr_svc->sb_virt, ptr_svc->sb_length,
2132 ION_IOC_CLEAN_INV_CACHES);
2133 if (ret) {
2134 pr_err("cache operation failed %d\n", ret);
2135 return ret;
2136 }
2137 }
2138 if (lstnr == RPMB_SERVICE) {
2139 ret = __qseecom_enable_clk(CLK_QSEE);
2140 if (ret)
2141 return ret;
2142 }
2143
2144 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2145 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002146 if (ptr_svc) {
2147 ptr_svc->listener_in_use = false;
2148 __qseecom_clean_listener_sglistinfo(ptr_svc);
2149 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2150 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002151
2152 if (ret) {
2153 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2154 ret, data->client.app_id);
2155 goto exit;
2156 }
2157
2158 switch (resp->result) {
2159 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2160 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2161 lstnr, data->client.app_id, resp->data);
2162 if (lstnr == resp->data) {
2163 pr_err("lstnr %d should not be blocked!\n",
2164 lstnr);
2165 ret = -EINVAL;
2166 goto exit;
2167 }
2168 ret = __qseecom_process_reentrancy_blocked_on_listener(
2169 resp, NULL, data);
2170 if (ret) {
2171 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2172 data->client.app_id,
2173 data->client.app_name, resp->data);
2174 goto exit;
2175 }
2176 case QSEOS_RESULT_SUCCESS:
2177 case QSEOS_RESULT_INCOMPLETE:
2178 break;
2179 default:
2180 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2181 resp->result, data->client.app_id, lstnr);
2182 ret = -EINVAL;
2183 goto exit;
2184 }
2185exit:
2186 if (lstnr == RPMB_SERVICE)
2187 __qseecom_disable_clk(CLK_QSEE);
2188
2189 }
2190 if (rc)
2191 return rc;
2192
2193 return ret;
2194}
2195
2196/*
2197 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2198 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2199 * So, needs to first check if no app blocked before sending OS level scm call,
2200 * then wait until all apps are unblocked.
2201 */
2202static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2203{
2204 sigset_t new_sigset, old_sigset;
2205
2206 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2207 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2208 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2209 /* thread sleep until this app unblocked */
2210 while (qseecom.app_block_ref_cnt > 0) {
2211 sigfillset(&new_sigset);
2212 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2213 mutex_unlock(&app_access_lock);
2214 do {
2215 if (!wait_event_freezable(qseecom.app_block_wq,
2216 (qseecom.app_block_ref_cnt == 0)))
2217 break;
2218 } while (1);
2219 mutex_lock(&app_access_lock);
2220 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2221 }
2222 }
2223}
2224
2225/*
2226 * scm_call of send data will fail if this TA is blocked or there are more
2227 * than one TA requesting listener services; So, first check to see if need
2228 * to wait.
2229 */
2230static void __qseecom_reentrancy_check_if_this_app_blocked(
2231 struct qseecom_registered_app_list *ptr_app)
2232{
2233 sigset_t new_sigset, old_sigset;
2234
2235 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002236 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002237 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2238 /* thread sleep until this app unblocked */
2239 sigfillset(&new_sigset);
2240 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2241 mutex_unlock(&app_access_lock);
2242 do {
2243 if (!wait_event_freezable(qseecom.app_block_wq,
2244 (!ptr_app->app_blocked &&
2245 qseecom.app_block_ref_cnt <= 1)))
2246 break;
2247 } while (1);
2248 mutex_lock(&app_access_lock);
2249 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2250 }
Zhen Kongdea10592018-07-30 17:50:10 -07002251 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002252 }
2253}
2254
2255static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2256 uint32_t *app_id)
2257{
2258 int32_t ret;
2259 struct qseecom_command_scm_resp resp;
2260 bool found_app = false;
2261 struct qseecom_registered_app_list *entry = NULL;
2262 unsigned long flags = 0;
2263
2264 if (!app_id) {
2265 pr_err("Null pointer to app_id\n");
2266 return -EINVAL;
2267 }
2268 *app_id = 0;
2269
2270 /* check if app exists and has been registered locally */
2271 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2272 list_for_each_entry(entry,
2273 &qseecom.registered_app_list_head, list) {
2274 if (!strcmp(entry->app_name, req.app_name)) {
2275 found_app = true;
2276 break;
2277 }
2278 }
2279 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2280 if (found_app) {
2281 pr_debug("Found app with id %d\n", entry->app_id);
2282 *app_id = entry->app_id;
2283 return 0;
2284 }
2285
2286 memset((void *)&resp, 0, sizeof(resp));
2287
2288 /* SCM_CALL to check if app_id for the mentioned app exists */
2289 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2290 sizeof(struct qseecom_check_app_ireq),
2291 &resp, sizeof(resp));
2292 if (ret) {
2293 pr_err("scm_call to check if app is already loaded failed\n");
2294 return -EINVAL;
2295 }
2296
2297 if (resp.result == QSEOS_RESULT_FAILURE)
2298 return 0;
2299
2300 switch (resp.resp_type) {
2301 /*qsee returned listener type response */
2302 case QSEOS_LISTENER_ID:
2303 pr_err("resp type is of listener type instead of app");
2304 return -EINVAL;
2305 case QSEOS_APP_ID:
2306 *app_id = resp.data;
2307 return 0;
2308 default:
2309 pr_err("invalid resp type (%d) from qsee",
2310 resp.resp_type);
2311 return -ENODEV;
2312 }
2313}
2314
2315static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2316{
2317 struct qseecom_registered_app_list *entry = NULL;
2318 unsigned long flags = 0;
2319 u32 app_id = 0;
2320 struct ion_handle *ihandle; /* Ion handle */
2321 struct qseecom_load_img_req load_img_req;
2322 int32_t ret = 0;
2323 ion_phys_addr_t pa = 0;
2324 size_t len;
2325 struct qseecom_command_scm_resp resp;
2326 struct qseecom_check_app_ireq req;
2327 struct qseecom_load_app_ireq load_req;
2328 struct qseecom_load_app_64bit_ireq load_req_64bit;
2329 void *cmd_buf = NULL;
2330 size_t cmd_len;
2331 bool first_time = false;
2332
2333 /* Copy the relevant information needed for loading the image */
2334 if (copy_from_user(&load_img_req,
2335 (void __user *)argp,
2336 sizeof(struct qseecom_load_img_req))) {
2337 pr_err("copy_from_user failed\n");
2338 return -EFAULT;
2339 }
2340
2341 /* Check and load cmnlib */
2342 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2343 if (!qseecom.commonlib_loaded &&
2344 load_img_req.app_arch == ELFCLASS32) {
2345 ret = qseecom_load_commonlib_image(data, "cmnlib");
2346 if (ret) {
2347 pr_err("failed to load cmnlib\n");
2348 return -EIO;
2349 }
2350 qseecom.commonlib_loaded = true;
2351 pr_debug("cmnlib is loaded\n");
2352 }
2353
2354 if (!qseecom.commonlib64_loaded &&
2355 load_img_req.app_arch == ELFCLASS64) {
2356 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2357 if (ret) {
2358 pr_err("failed to load cmnlib64\n");
2359 return -EIO;
2360 }
2361 qseecom.commonlib64_loaded = true;
2362 pr_debug("cmnlib64 is loaded\n");
2363 }
2364 }
2365
2366 if (qseecom.support_bus_scaling) {
2367 mutex_lock(&qsee_bw_mutex);
2368 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2369 mutex_unlock(&qsee_bw_mutex);
2370 if (ret)
2371 return ret;
2372 }
2373
2374 /* Vote for the SFPB clock */
2375 ret = __qseecom_enable_clk_scale_up(data);
2376 if (ret)
2377 goto enable_clk_err;
2378
2379 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2380 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2381 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2382
2383 ret = __qseecom_check_app_exists(req, &app_id);
2384 if (ret < 0)
2385 goto loadapp_err;
2386
2387 if (app_id) {
2388 pr_debug("App id %d (%s) already exists\n", app_id,
2389 (char *)(req.app_name));
2390 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2391 list_for_each_entry(entry,
2392 &qseecom.registered_app_list_head, list){
2393 if (entry->app_id == app_id) {
2394 entry->ref_cnt++;
2395 break;
2396 }
2397 }
2398 spin_unlock_irqrestore(
2399 &qseecom.registered_app_list_lock, flags);
2400 ret = 0;
2401 } else {
2402 first_time = true;
2403 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2404 (char *)(load_img_req.img_name));
2405 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002406 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002407 load_img_req.ifd_data_fd);
2408 if (IS_ERR_OR_NULL(ihandle)) {
2409 pr_err("Ion client could not retrieve the handle\n");
2410 ret = -ENOMEM;
2411 goto loadapp_err;
2412 }
2413
2414 /* Get the physical address of the ION BUF */
2415 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2416 if (ret) {
2417 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2418 ret);
2419 goto loadapp_err;
2420 }
2421 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2422 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2423 len, load_img_req.mdt_len,
2424 load_img_req.img_len);
2425 ret = -EINVAL;
2426 goto loadapp_err;
2427 }
2428 /* Populate the structure for sending scm call to load image */
2429 if (qseecom.qsee_version < QSEE_VERSION_40) {
2430 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2431 load_req.mdt_len = load_img_req.mdt_len;
2432 load_req.img_len = load_img_req.img_len;
2433 strlcpy(load_req.app_name, load_img_req.img_name,
2434 MAX_APP_NAME_SIZE);
2435 load_req.phy_addr = (uint32_t)pa;
2436 cmd_buf = (void *)&load_req;
2437 cmd_len = sizeof(struct qseecom_load_app_ireq);
2438 } else {
2439 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2440 load_req_64bit.mdt_len = load_img_req.mdt_len;
2441 load_req_64bit.img_len = load_img_req.img_len;
2442 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2443 MAX_APP_NAME_SIZE);
2444 load_req_64bit.phy_addr = (uint64_t)pa;
2445 cmd_buf = (void *)&load_req_64bit;
2446 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2447 }
2448
2449 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2450 ION_IOC_CLEAN_INV_CACHES);
2451 if (ret) {
2452 pr_err("cache operation failed %d\n", ret);
2453 goto loadapp_err;
2454 }
2455
2456 /* SCM_CALL to load the app and get the app_id back */
2457 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2458 cmd_len, &resp, sizeof(resp));
2459 if (ret) {
2460 pr_err("scm_call to load app failed\n");
2461 if (!IS_ERR_OR_NULL(ihandle))
2462 ion_free(qseecom.ion_clnt, ihandle);
2463 ret = -EINVAL;
2464 goto loadapp_err;
2465 }
2466
2467 if (resp.result == QSEOS_RESULT_FAILURE) {
2468 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2469 if (!IS_ERR_OR_NULL(ihandle))
2470 ion_free(qseecom.ion_clnt, ihandle);
2471 ret = -EFAULT;
2472 goto loadapp_err;
2473 }
2474
2475 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2476 ret = __qseecom_process_incomplete_cmd(data, &resp);
2477 if (ret) {
2478 pr_err("process_incomplete_cmd failed err: %d\n",
2479 ret);
2480 if (!IS_ERR_OR_NULL(ihandle))
2481 ion_free(qseecom.ion_clnt, ihandle);
2482 ret = -EFAULT;
2483 goto loadapp_err;
2484 }
2485 }
2486
2487 if (resp.result != QSEOS_RESULT_SUCCESS) {
2488 pr_err("scm_call failed resp.result unknown, %d\n",
2489 resp.result);
2490 if (!IS_ERR_OR_NULL(ihandle))
2491 ion_free(qseecom.ion_clnt, ihandle);
2492 ret = -EFAULT;
2493 goto loadapp_err;
2494 }
2495
2496 app_id = resp.data;
2497
2498 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2499 if (!entry) {
2500 ret = -ENOMEM;
2501 goto loadapp_err;
2502 }
2503 entry->app_id = app_id;
2504 entry->ref_cnt = 1;
2505 entry->app_arch = load_img_req.app_arch;
2506 /*
2507 * keymaster app may be first loaded as "keymaste" by qseecomd,
2508 * and then used as "keymaster" on some targets. To avoid app
2509 * name checking error, register "keymaster" into app_list and
2510 * thread private data.
2511 */
2512 if (!strcmp(load_img_req.img_name, "keymaste"))
2513 strlcpy(entry->app_name, "keymaster",
2514 MAX_APP_NAME_SIZE);
2515 else
2516 strlcpy(entry->app_name, load_img_req.img_name,
2517 MAX_APP_NAME_SIZE);
2518 entry->app_blocked = false;
2519 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002520 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002521
2522 /* Deallocate the handle */
2523 if (!IS_ERR_OR_NULL(ihandle))
2524 ion_free(qseecom.ion_clnt, ihandle);
2525
2526 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2527 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2528 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2529 flags);
2530
2531 pr_warn("App with id %u (%s) now loaded\n", app_id,
2532 (char *)(load_img_req.img_name));
2533 }
2534 data->client.app_id = app_id;
2535 data->client.app_arch = load_img_req.app_arch;
2536 if (!strcmp(load_img_req.img_name, "keymaste"))
2537 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2538 else
2539 strlcpy(data->client.app_name, load_img_req.img_name,
2540 MAX_APP_NAME_SIZE);
2541 load_img_req.app_id = app_id;
2542 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2543 pr_err("copy_to_user failed\n");
2544 ret = -EFAULT;
2545 if (first_time == true) {
2546 spin_lock_irqsave(
2547 &qseecom.registered_app_list_lock, flags);
2548 list_del(&entry->list);
2549 spin_unlock_irqrestore(
2550 &qseecom.registered_app_list_lock, flags);
2551 kzfree(entry);
2552 }
2553 }
2554
2555loadapp_err:
2556 __qseecom_disable_clk_scale_down(data);
2557enable_clk_err:
2558 if (qseecom.support_bus_scaling) {
2559 mutex_lock(&qsee_bw_mutex);
2560 qseecom_unregister_bus_bandwidth_needs(data);
2561 mutex_unlock(&qsee_bw_mutex);
2562 }
2563 return ret;
2564}
2565
2566static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2567{
2568 int ret = 1; /* Set unload app */
2569
2570 wake_up_all(&qseecom.send_resp_wq);
2571 if (qseecom.qsee_reentrancy_support)
2572 mutex_unlock(&app_access_lock);
2573 while (atomic_read(&data->ioctl_count) > 1) {
2574 if (wait_event_freezable(data->abort_wq,
2575 atomic_read(&data->ioctl_count) <= 1)) {
2576 pr_err("Interrupted from abort\n");
2577 ret = -ERESTARTSYS;
2578 break;
2579 }
2580 }
2581 if (qseecom.qsee_reentrancy_support)
2582 mutex_lock(&app_access_lock);
2583 return ret;
2584}
2585
2586static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2587{
2588 int ret = 0;
2589
2590 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2591 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2592 ion_free(qseecom.ion_clnt, data->client.ihandle);
2593 data->client.ihandle = NULL;
2594 }
2595 return ret;
2596}
2597
2598static int qseecom_unload_app(struct qseecom_dev_handle *data,
2599 bool app_crash)
2600{
2601 unsigned long flags;
2602 unsigned long flags1;
2603 int ret = 0;
2604 struct qseecom_command_scm_resp resp;
2605 struct qseecom_registered_app_list *ptr_app = NULL;
2606 bool unload = false;
2607 bool found_app = false;
2608 bool found_dead_app = false;
2609
2610 if (!data) {
2611 pr_err("Invalid/uninitialized device handle\n");
2612 return -EINVAL;
2613 }
2614
2615 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2616 pr_debug("Do not unload keymaster app from tz\n");
2617 goto unload_exit;
2618 }
2619
2620 __qseecom_cleanup_app(data);
2621 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2622
2623 if (data->client.app_id > 0) {
2624 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2625 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2626 list) {
2627 if (ptr_app->app_id == data->client.app_id) {
2628 if (!strcmp((void *)ptr_app->app_name,
2629 (void *)data->client.app_name)) {
2630 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002631 if (ptr_app->app_blocked ||
2632 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002633 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002634 if (app_crash || ptr_app->ref_cnt == 1)
2635 unload = true;
2636 break;
2637 }
2638 found_dead_app = true;
2639 break;
2640 }
2641 }
2642 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2643 flags);
2644 if (found_app == false && found_dead_app == false) {
2645 pr_err("Cannot find app with id = %d (%s)\n",
2646 data->client.app_id,
2647 (char *)data->client.app_name);
2648 ret = -EINVAL;
2649 goto unload_exit;
2650 }
2651 }
2652
2653 if (found_dead_app)
2654 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2655 (char *)data->client.app_name);
2656
2657 if (unload) {
2658 struct qseecom_unload_app_ireq req;
2659 /* Populate the structure for sending scm call to load image */
2660 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2661 req.app_id = data->client.app_id;
2662
2663 /* SCM_CALL to unload the app */
2664 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2665 sizeof(struct qseecom_unload_app_ireq),
2666 &resp, sizeof(resp));
2667 if (ret) {
2668 pr_err("scm_call to unload app (id = %d) failed\n",
2669 req.app_id);
2670 ret = -EFAULT;
2671 goto unload_exit;
2672 } else {
2673 pr_warn("App id %d now unloaded\n", req.app_id);
2674 }
2675 if (resp.result == QSEOS_RESULT_FAILURE) {
2676 pr_err("app (%d) unload_failed!!\n",
2677 data->client.app_id);
2678 ret = -EFAULT;
2679 goto unload_exit;
2680 }
2681 if (resp.result == QSEOS_RESULT_SUCCESS)
2682 pr_debug("App (%d) is unloaded!!\n",
2683 data->client.app_id);
2684 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2685 ret = __qseecom_process_incomplete_cmd(data, &resp);
2686 if (ret) {
2687 pr_err("process_incomplete_cmd fail err: %d\n",
2688 ret);
2689 goto unload_exit;
2690 }
2691 }
2692 }
2693
Zhen Kong7d500032018-08-06 16:58:31 -07002694unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002695 if (found_app) {
2696 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2697 if (app_crash) {
2698 ptr_app->ref_cnt = 0;
2699 pr_debug("app_crash: ref_count = 0\n");
2700 } else {
2701 if (ptr_app->ref_cnt == 1) {
2702 ptr_app->ref_cnt = 0;
2703 pr_debug("ref_count set to 0\n");
2704 } else {
2705 ptr_app->ref_cnt--;
2706 pr_debug("Can't unload app(%d) inuse\n",
2707 ptr_app->app_id);
2708 }
2709 }
2710 if (unload) {
2711 list_del(&ptr_app->list);
2712 kzfree(ptr_app);
2713 }
2714 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2715 flags1);
2716 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002717 qseecom_unmap_ion_allocated_memory(data);
2718 data->released = true;
2719 return ret;
2720}
2721
2722static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2723 unsigned long virt)
2724{
2725 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2726}
2727
2728static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2729 unsigned long virt)
2730{
2731 return (uintptr_t)data->client.sb_virt +
2732 (virt - data->client.user_virt_sb_base);
2733}
2734
2735int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2736 struct qseecom_send_svc_cmd_req *req_ptr,
2737 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2738{
2739 int ret = 0;
2740 void *req_buf = NULL;
2741
2742 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2743 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2744 req_ptr, send_svc_ireq_ptr);
2745 return -EINVAL;
2746 }
2747
2748 /* Clients need to ensure req_buf is at base offset of shared buffer */
2749 if ((uintptr_t)req_ptr->cmd_req_buf !=
2750 data_ptr->client.user_virt_sb_base) {
2751 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2752 return -EINVAL;
2753 }
2754
2755 if (data_ptr->client.sb_length <
2756 sizeof(struct qseecom_rpmb_provision_key)) {
2757 pr_err("shared buffer is too small to hold key type\n");
2758 return -EINVAL;
2759 }
2760 req_buf = data_ptr->client.sb_virt;
2761
2762 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2763 send_svc_ireq_ptr->key_type =
2764 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2765 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2766 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2767 data_ptr, (uintptr_t)req_ptr->resp_buf));
2768 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2769
2770 return ret;
2771}
2772
2773int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2774 struct qseecom_send_svc_cmd_req *req_ptr,
2775 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2776{
2777 int ret = 0;
2778 uint32_t reqd_len_sb_in = 0;
2779
2780 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2781 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2782 req_ptr, send_svc_ireq_ptr);
2783 return -EINVAL;
2784 }
2785
2786 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2787 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2788 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2789 pr_err("Required: %u, Available: %zu\n",
2790 reqd_len_sb_in, data_ptr->client.sb_length);
2791 return -ENOMEM;
2792 }
2793
2794 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2795 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2796 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2797 data_ptr, (uintptr_t)req_ptr->resp_buf));
2798 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2799
2800 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2801 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2802
2803
2804 return ret;
2805}
2806
2807static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2808 struct qseecom_send_svc_cmd_req *req)
2809{
2810 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2811 pr_err("req or cmd buffer or response buffer is null\n");
2812 return -EINVAL;
2813 }
2814
2815 if (!data || !data->client.ihandle) {
2816 pr_err("Client or client handle is not initialized\n");
2817 return -EINVAL;
2818 }
2819
2820 if (data->client.sb_virt == NULL) {
2821 pr_err("sb_virt null\n");
2822 return -EINVAL;
2823 }
2824
2825 if (data->client.user_virt_sb_base == 0) {
2826 pr_err("user_virt_sb_base is null\n");
2827 return -EINVAL;
2828 }
2829
2830 if (data->client.sb_length == 0) {
2831 pr_err("sb_length is 0\n");
2832 return -EINVAL;
2833 }
2834
2835 if (((uintptr_t)req->cmd_req_buf <
2836 data->client.user_virt_sb_base) ||
2837 ((uintptr_t)req->cmd_req_buf >=
2838 (data->client.user_virt_sb_base + data->client.sb_length))) {
2839 pr_err("cmd buffer address not within shared bufffer\n");
2840 return -EINVAL;
2841 }
2842 if (((uintptr_t)req->resp_buf <
2843 data->client.user_virt_sb_base) ||
2844 ((uintptr_t)req->resp_buf >=
2845 (data->client.user_virt_sb_base + data->client.sb_length))) {
2846 pr_err("response buffer address not within shared bufffer\n");
2847 return -EINVAL;
2848 }
2849 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2850 (req->cmd_req_len > data->client.sb_length) ||
2851 (req->resp_len > data->client.sb_length)) {
2852 pr_err("cmd buf length or response buf length not valid\n");
2853 return -EINVAL;
2854 }
2855 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2856 pr_err("Integer overflow detected in req_len & rsp_len\n");
2857 return -EINVAL;
2858 }
2859
2860 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2861 pr_debug("Not enough memory to fit cmd_buf.\n");
2862 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2863 (req->cmd_req_len + req->resp_len),
2864 data->client.sb_length);
2865 return -ENOMEM;
2866 }
2867 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2868 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2869 return -EINVAL;
2870 }
2871 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2872 pr_err("Integer overflow in resp_len & resp_buf\n");
2873 return -EINVAL;
2874 }
2875 if (data->client.user_virt_sb_base >
2876 (ULONG_MAX - data->client.sb_length)) {
2877 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2878 return -EINVAL;
2879 }
2880 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2881 ((uintptr_t)data->client.user_virt_sb_base +
2882 data->client.sb_length)) ||
2883 (((uintptr_t)req->resp_buf + req->resp_len) >
2884 ((uintptr_t)data->client.user_virt_sb_base +
2885 data->client.sb_length))) {
2886 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2887 return -EINVAL;
2888 }
2889 return 0;
2890}
2891
2892static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2893 void __user *argp)
2894{
2895 int ret = 0;
2896 struct qseecom_client_send_service_ireq send_svc_ireq;
2897 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2898 struct qseecom_command_scm_resp resp;
2899 struct qseecom_send_svc_cmd_req req;
2900 void *send_req_ptr;
2901 size_t req_buf_size;
2902
2903 /*struct qseecom_command_scm_resp resp;*/
2904
2905 if (copy_from_user(&req,
2906 (void __user *)argp,
2907 sizeof(req))) {
2908 pr_err("copy_from_user failed\n");
2909 return -EFAULT;
2910 }
2911
2912 if (__validate_send_service_cmd_inputs(data, &req))
2913 return -EINVAL;
2914
2915 data->type = QSEECOM_SECURE_SERVICE;
2916
2917 switch (req.cmd_id) {
2918 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2919 case QSEOS_RPMB_ERASE_COMMAND:
2920 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2921 send_req_ptr = &send_svc_ireq;
2922 req_buf_size = sizeof(send_svc_ireq);
2923 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2924 send_req_ptr))
2925 return -EINVAL;
2926 break;
2927 case QSEOS_FSM_LTEOTA_REQ_CMD:
2928 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2929 case QSEOS_FSM_IKE_REQ_CMD:
2930 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2931 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2932 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2933 case QSEOS_FSM_ENCFS_REQ_CMD:
2934 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2935 send_req_ptr = &send_fsm_key_svc_ireq;
2936 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2937 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2938 send_req_ptr))
2939 return -EINVAL;
2940 break;
2941 default:
2942 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2943 return -EINVAL;
2944 }
2945
2946 if (qseecom.support_bus_scaling) {
2947 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2948 if (ret) {
2949 pr_err("Fail to set bw HIGH\n");
2950 return ret;
2951 }
2952 } else {
2953 ret = qseecom_perf_enable(data);
2954 if (ret) {
2955 pr_err("Failed to vote for clocks with err %d\n", ret);
2956 goto exit;
2957 }
2958 }
2959
2960 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2961 data->client.sb_virt, data->client.sb_length,
2962 ION_IOC_CLEAN_INV_CACHES);
2963 if (ret) {
2964 pr_err("cache operation failed %d\n", ret);
2965 goto exit;
2966 }
2967 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2968 (const void *)send_req_ptr,
2969 req_buf_size, &resp, sizeof(resp));
2970 if (ret) {
2971 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2972 if (!qseecom.support_bus_scaling) {
2973 qsee_disable_clock_vote(data, CLK_DFAB);
2974 qsee_disable_clock_vote(data, CLK_SFPB);
2975 } else {
2976 __qseecom_add_bw_scale_down_timer(
2977 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2978 }
2979 goto exit;
2980 }
2981 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2982 data->client.sb_virt, data->client.sb_length,
2983 ION_IOC_INV_CACHES);
2984 if (ret) {
2985 pr_err("cache operation failed %d\n", ret);
2986 goto exit;
2987 }
2988 switch (resp.result) {
2989 case QSEOS_RESULT_SUCCESS:
2990 break;
2991 case QSEOS_RESULT_INCOMPLETE:
2992 pr_debug("qseos_result_incomplete\n");
2993 ret = __qseecom_process_incomplete_cmd(data, &resp);
2994 if (ret) {
2995 pr_err("process_incomplete_cmd fail with result: %d\n",
2996 resp.result);
2997 }
2998 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2999 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303000 if (put_user(resp.result,
3001 (uint32_t __user *)req.resp_buf)) {
3002 ret = -EINVAL;
3003 goto exit;
3004 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003005 ret = 0;
3006 }
3007 break;
3008 case QSEOS_RESULT_FAILURE:
3009 pr_err("scm call failed with resp.result: %d\n", resp.result);
3010 ret = -EINVAL;
3011 break;
3012 default:
3013 pr_err("Response result %d not supported\n",
3014 resp.result);
3015 ret = -EINVAL;
3016 break;
3017 }
3018 if (!qseecom.support_bus_scaling) {
3019 qsee_disable_clock_vote(data, CLK_DFAB);
3020 qsee_disable_clock_vote(data, CLK_SFPB);
3021 } else {
3022 __qseecom_add_bw_scale_down_timer(
3023 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3024 }
3025
3026exit:
3027 return ret;
3028}
3029
3030static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3031 struct qseecom_send_cmd_req *req)
3032
3033{
3034 if (!data || !data->client.ihandle) {
3035 pr_err("Client or client handle is not initialized\n");
3036 return -EINVAL;
3037 }
3038 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3039 (req->cmd_req_buf == NULL)) {
3040 pr_err("cmd buffer or response buffer is null\n");
3041 return -EINVAL;
3042 }
3043 if (((uintptr_t)req->cmd_req_buf <
3044 data->client.user_virt_sb_base) ||
3045 ((uintptr_t)req->cmd_req_buf >=
3046 (data->client.user_virt_sb_base + data->client.sb_length))) {
3047 pr_err("cmd buffer address not within shared bufffer\n");
3048 return -EINVAL;
3049 }
3050 if (((uintptr_t)req->resp_buf <
3051 data->client.user_virt_sb_base) ||
3052 ((uintptr_t)req->resp_buf >=
3053 (data->client.user_virt_sb_base + data->client.sb_length))) {
3054 pr_err("response buffer address not within shared bufffer\n");
3055 return -EINVAL;
3056 }
3057 if ((req->cmd_req_len == 0) ||
3058 (req->cmd_req_len > data->client.sb_length) ||
3059 (req->resp_len > data->client.sb_length)) {
3060 pr_err("cmd buf length or response buf length not valid\n");
3061 return -EINVAL;
3062 }
3063 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3064 pr_err("Integer overflow detected in req_len & rsp_len\n");
3065 return -EINVAL;
3066 }
3067
3068 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3069 pr_debug("Not enough memory to fit cmd_buf.\n");
3070 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3071 (req->cmd_req_len + req->resp_len),
3072 data->client.sb_length);
3073 return -ENOMEM;
3074 }
3075 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3076 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3077 return -EINVAL;
3078 }
3079 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3080 pr_err("Integer overflow in resp_len & resp_buf\n");
3081 return -EINVAL;
3082 }
3083 if (data->client.user_virt_sb_base >
3084 (ULONG_MAX - data->client.sb_length)) {
3085 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3086 return -EINVAL;
3087 }
3088 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3089 ((uintptr_t)data->client.user_virt_sb_base +
3090 data->client.sb_length)) ||
3091 (((uintptr_t)req->resp_buf + req->resp_len) >
3092 ((uintptr_t)data->client.user_virt_sb_base +
3093 data->client.sb_length))) {
3094 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3095 return -EINVAL;
3096 }
3097 return 0;
3098}
3099
3100int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3101 struct qseecom_registered_app_list *ptr_app,
3102 struct qseecom_dev_handle *data)
3103{
3104 int ret = 0;
3105
3106 switch (resp->result) {
3107 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3108 pr_warn("App(%d) %s is blocked on listener %d\n",
3109 data->client.app_id, data->client.app_name,
3110 resp->data);
3111 ret = __qseecom_process_reentrancy_blocked_on_listener(
3112 resp, ptr_app, data);
3113 if (ret) {
3114 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3115 data->client.app_id, data->client.app_name, resp->data);
3116 return ret;
3117 }
3118
3119 case QSEOS_RESULT_INCOMPLETE:
3120 qseecom.app_block_ref_cnt++;
3121 ptr_app->app_blocked = true;
3122 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3123 ptr_app->app_blocked = false;
3124 qseecom.app_block_ref_cnt--;
3125 wake_up_interruptible(&qseecom.app_block_wq);
3126 if (ret)
3127 pr_err("process_incomplete_cmd failed err: %d\n",
3128 ret);
3129 return ret;
3130 case QSEOS_RESULT_SUCCESS:
3131 return ret;
3132 default:
3133 pr_err("Response result %d not supported\n",
3134 resp->result);
3135 return -EINVAL;
3136 }
3137}
3138
3139static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3140 struct qseecom_send_cmd_req *req)
3141{
3142 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003143 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003144 u32 reqd_len_sb_in = 0;
3145 struct qseecom_client_send_data_ireq send_data_req = {0};
3146 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3147 struct qseecom_command_scm_resp resp;
3148 unsigned long flags;
3149 struct qseecom_registered_app_list *ptr_app;
3150 bool found_app = false;
3151 void *cmd_buf = NULL;
3152 size_t cmd_len;
3153 struct sglist_info *table = data->sglistinfo_ptr;
3154
3155 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3156 /* find app_id & img_name from list */
3157 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3158 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3159 list) {
3160 if ((ptr_app->app_id == data->client.app_id) &&
3161 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3162 found_app = true;
3163 break;
3164 }
3165 }
3166 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3167
3168 if (!found_app) {
3169 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3170 (char *)data->client.app_name);
3171 return -ENOENT;
3172 }
3173
3174 if (qseecom.qsee_version < QSEE_VERSION_40) {
3175 send_data_req.app_id = data->client.app_id;
3176 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3177 data, (uintptr_t)req->cmd_req_buf));
3178 send_data_req.req_len = req->cmd_req_len;
3179 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3180 data, (uintptr_t)req->resp_buf));
3181 send_data_req.rsp_len = req->resp_len;
3182 send_data_req.sglistinfo_ptr =
3183 (uint32_t)virt_to_phys(table);
3184 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3185 dmac_flush_range((void *)table,
3186 (void *)table + SGLISTINFO_TABLE_SIZE);
3187 cmd_buf = (void *)&send_data_req;
3188 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3189 } else {
3190 send_data_req_64bit.app_id = data->client.app_id;
3191 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3192 (uintptr_t)req->cmd_req_buf);
3193 send_data_req_64bit.req_len = req->cmd_req_len;
3194 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3195 (uintptr_t)req->resp_buf);
3196 send_data_req_64bit.rsp_len = req->resp_len;
3197 /* check if 32bit app's phys_addr region is under 4GB.*/
3198 if ((data->client.app_arch == ELFCLASS32) &&
3199 ((send_data_req_64bit.req_ptr >=
3200 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3201 (send_data_req_64bit.rsp_ptr >=
3202 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3203 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3204 data->client.app_name,
3205 send_data_req_64bit.req_ptr,
3206 send_data_req_64bit.req_len,
3207 send_data_req_64bit.rsp_ptr,
3208 send_data_req_64bit.rsp_len);
3209 return -EFAULT;
3210 }
3211 send_data_req_64bit.sglistinfo_ptr =
3212 (uint64_t)virt_to_phys(table);
3213 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3214 dmac_flush_range((void *)table,
3215 (void *)table + SGLISTINFO_TABLE_SIZE);
3216 cmd_buf = (void *)&send_data_req_64bit;
3217 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3218 }
3219
3220 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3221 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3222 else
3223 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3224
3225 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3226 data->client.sb_virt,
3227 reqd_len_sb_in,
3228 ION_IOC_CLEAN_INV_CACHES);
3229 if (ret) {
3230 pr_err("cache operation failed %d\n", ret);
3231 return ret;
3232 }
3233
3234 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3235
3236 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3237 cmd_buf, cmd_len,
3238 &resp, sizeof(resp));
3239 if (ret) {
3240 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3241 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003242 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003243 }
3244
3245 if (qseecom.qsee_reentrancy_support) {
3246 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003247 if (ret)
3248 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003249 } else {
3250 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3251 ret = __qseecom_process_incomplete_cmd(data, &resp);
3252 if (ret) {
3253 pr_err("process_incomplete_cmd failed err: %d\n",
3254 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003255 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003256 }
3257 } else {
3258 if (resp.result != QSEOS_RESULT_SUCCESS) {
3259 pr_err("Response result %d not supported\n",
3260 resp.result);
3261 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003262 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003263 }
3264 }
3265 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003266exit:
3267 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 data->client.sb_virt, data->client.sb_length,
3269 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003270 if (ret2) {
3271 pr_err("cache operation failed %d\n", ret2);
3272 return ret2;
3273 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003274 return ret;
3275}
3276
3277static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3278{
3279 int ret = 0;
3280 struct qseecom_send_cmd_req req;
3281
3282 ret = copy_from_user(&req, argp, sizeof(req));
3283 if (ret) {
3284 pr_err("copy_from_user failed\n");
3285 return ret;
3286 }
3287
3288 if (__validate_send_cmd_inputs(data, &req))
3289 return -EINVAL;
3290
3291 ret = __qseecom_send_cmd(data, &req);
3292
3293 if (ret)
3294 return ret;
3295
3296 return ret;
3297}
3298
3299int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3300 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3301 struct qseecom_dev_handle *data, int i) {
3302
3303 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3304 (req->ifd_data[i].fd > 0)) {
3305 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3306 (req->ifd_data[i].cmd_buf_offset >
3307 req->cmd_req_len - sizeof(uint32_t))) {
3308 pr_err("Invalid offset (req len) 0x%x\n",
3309 req->ifd_data[i].cmd_buf_offset);
3310 return -EINVAL;
3311 }
3312 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3313 (lstnr_resp->ifd_data[i].fd > 0)) {
3314 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3315 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3316 lstnr_resp->resp_len - sizeof(uint32_t))) {
3317 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3318 lstnr_resp->ifd_data[i].cmd_buf_offset);
3319 return -EINVAL;
3320 }
3321 }
3322 return 0;
3323}
3324
3325static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3326 struct qseecom_dev_handle *data)
3327{
3328 struct ion_handle *ihandle;
3329 char *field;
3330 int ret = 0;
3331 int i = 0;
3332 uint32_t len = 0;
3333 struct scatterlist *sg;
3334 struct qseecom_send_modfd_cmd_req *req = NULL;
3335 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3336 struct qseecom_registered_listener_list *this_lstnr = NULL;
3337 uint32_t offset;
3338 struct sg_table *sg_ptr;
3339
3340 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3341 (data->type != QSEECOM_CLIENT_APP))
3342 return -EFAULT;
3343
3344 if (msg == NULL) {
3345 pr_err("Invalid address\n");
3346 return -EINVAL;
3347 }
3348 if (data->type == QSEECOM_LISTENER_SERVICE) {
3349 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3350 this_lstnr = __qseecom_find_svc(data->listener.id);
3351 if (IS_ERR_OR_NULL(this_lstnr)) {
3352 pr_err("Invalid listener ID\n");
3353 return -ENOMEM;
3354 }
3355 } else {
3356 req = (struct qseecom_send_modfd_cmd_req *)msg;
3357 }
3358
3359 for (i = 0; i < MAX_ION_FD; i++) {
3360 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3361 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003362 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003363 req->ifd_data[i].fd);
3364 if (IS_ERR_OR_NULL(ihandle)) {
3365 pr_err("Ion client can't retrieve the handle\n");
3366 return -ENOMEM;
3367 }
3368 field = (char *) req->cmd_req_buf +
3369 req->ifd_data[i].cmd_buf_offset;
3370 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3371 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003372 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003373 lstnr_resp->ifd_data[i].fd);
3374 if (IS_ERR_OR_NULL(ihandle)) {
3375 pr_err("Ion client can't retrieve the handle\n");
3376 return -ENOMEM;
3377 }
3378 field = lstnr_resp->resp_buf_ptr +
3379 lstnr_resp->ifd_data[i].cmd_buf_offset;
3380 } else {
3381 continue;
3382 }
3383 /* Populate the cmd data structure with the phys_addr */
3384 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3385 if (IS_ERR_OR_NULL(sg_ptr)) {
3386 pr_err("IOn client could not retrieve sg table\n");
3387 goto err;
3388 }
3389 if (sg_ptr->nents == 0) {
3390 pr_err("Num of scattered entries is 0\n");
3391 goto err;
3392 }
3393 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3394 pr_err("Num of scattered entries");
3395 pr_err(" (%d) is greater than max supported %d\n",
3396 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3397 goto err;
3398 }
3399 sg = sg_ptr->sgl;
3400 if (sg_ptr->nents == 1) {
3401 uint32_t *update;
3402
3403 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3404 goto err;
3405 if ((data->type == QSEECOM_CLIENT_APP &&
3406 (data->client.app_arch == ELFCLASS32 ||
3407 data->client.app_arch == ELFCLASS64)) ||
3408 (data->type == QSEECOM_LISTENER_SERVICE)) {
3409 /*
3410 * Check if sg list phy add region is under 4GB
3411 */
3412 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3413 (!cleanup) &&
3414 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3415 >= PHY_ADDR_4G - sg->length)) {
3416 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3417 data->client.app_name,
3418 &(sg_dma_address(sg_ptr->sgl)),
3419 sg->length);
3420 goto err;
3421 }
3422 update = (uint32_t *) field;
3423 *update = cleanup ? 0 :
3424 (uint32_t)sg_dma_address(sg_ptr->sgl);
3425 } else {
3426 pr_err("QSEE app arch %u is not supported\n",
3427 data->client.app_arch);
3428 goto err;
3429 }
3430 len += (uint32_t)sg->length;
3431 } else {
3432 struct qseecom_sg_entry *update;
3433 int j = 0;
3434
3435 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3436 (req->ifd_data[i].fd > 0)) {
3437
3438 if ((req->cmd_req_len <
3439 SG_ENTRY_SZ * sg_ptr->nents) ||
3440 (req->ifd_data[i].cmd_buf_offset >
3441 (req->cmd_req_len -
3442 SG_ENTRY_SZ * sg_ptr->nents))) {
3443 pr_err("Invalid offset = 0x%x\n",
3444 req->ifd_data[i].cmd_buf_offset);
3445 goto err;
3446 }
3447
3448 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3449 (lstnr_resp->ifd_data[i].fd > 0)) {
3450
3451 if ((lstnr_resp->resp_len <
3452 SG_ENTRY_SZ * sg_ptr->nents) ||
3453 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3454 (lstnr_resp->resp_len -
3455 SG_ENTRY_SZ * sg_ptr->nents))) {
3456 goto err;
3457 }
3458 }
3459 if ((data->type == QSEECOM_CLIENT_APP &&
3460 (data->client.app_arch == ELFCLASS32 ||
3461 data->client.app_arch == ELFCLASS64)) ||
3462 (data->type == QSEECOM_LISTENER_SERVICE)) {
3463 update = (struct qseecom_sg_entry *)field;
3464 for (j = 0; j < sg_ptr->nents; j++) {
3465 /*
3466 * Check if sg list PA is under 4GB
3467 */
3468 if ((qseecom.qsee_version >=
3469 QSEE_VERSION_40) &&
3470 (!cleanup) &&
3471 ((uint64_t)(sg_dma_address(sg))
3472 >= PHY_ADDR_4G - sg->length)) {
3473 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3474 data->client.app_name,
3475 &(sg_dma_address(sg)),
3476 sg->length);
3477 goto err;
3478 }
3479 update->phys_addr = cleanup ? 0 :
3480 (uint32_t)sg_dma_address(sg);
3481 update->len = cleanup ? 0 : sg->length;
3482 update++;
3483 len += sg->length;
3484 sg = sg_next(sg);
3485 }
3486 } else {
3487 pr_err("QSEE app arch %u is not supported\n",
3488 data->client.app_arch);
3489 goto err;
3490 }
3491 }
3492
3493 if (cleanup) {
3494 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3495 ihandle, NULL, len,
3496 ION_IOC_INV_CACHES);
3497 if (ret) {
3498 pr_err("cache operation failed %d\n", ret);
3499 goto err;
3500 }
3501 } else {
3502 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3503 ihandle, NULL, len,
3504 ION_IOC_CLEAN_INV_CACHES);
3505 if (ret) {
3506 pr_err("cache operation failed %d\n", ret);
3507 goto err;
3508 }
3509 if (data->type == QSEECOM_CLIENT_APP) {
3510 offset = req->ifd_data[i].cmd_buf_offset;
3511 data->sglistinfo_ptr[i].indexAndFlags =
3512 SGLISTINFO_SET_INDEX_FLAG(
3513 (sg_ptr->nents == 1), 0, offset);
3514 data->sglistinfo_ptr[i].sizeOrCount =
3515 (sg_ptr->nents == 1) ?
3516 sg->length : sg_ptr->nents;
3517 data->sglist_cnt = i + 1;
3518 } else {
3519 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3520 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3521 (uintptr_t)this_lstnr->sb_virt);
3522 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3523 SGLISTINFO_SET_INDEX_FLAG(
3524 (sg_ptr->nents == 1), 0, offset);
3525 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3526 (sg_ptr->nents == 1) ?
3527 sg->length : sg_ptr->nents;
3528 this_lstnr->sglist_cnt = i + 1;
3529 }
3530 }
3531 /* Deallocate the handle */
3532 if (!IS_ERR_OR_NULL(ihandle))
3533 ion_free(qseecom.ion_clnt, ihandle);
3534 }
3535 return ret;
3536err:
3537 if (!IS_ERR_OR_NULL(ihandle))
3538 ion_free(qseecom.ion_clnt, ihandle);
3539 return -ENOMEM;
3540}
3541
3542static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3543 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3544{
3545 struct scatterlist *sg = sg_ptr->sgl;
3546 struct qseecom_sg_entry_64bit *sg_entry;
3547 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3548 void *buf;
3549 uint i;
3550 size_t size;
3551 dma_addr_t coh_pmem;
3552
3553 if (fd_idx >= MAX_ION_FD) {
3554 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3555 return -ENOMEM;
3556 }
3557 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3558 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3559 /* Allocate a contiguous kernel buffer */
3560 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3561 size = (size + PAGE_SIZE) & PAGE_MASK;
3562 buf = dma_alloc_coherent(qseecom.pdev,
3563 size, &coh_pmem, GFP_KERNEL);
3564 if (buf == NULL) {
3565 pr_err("failed to alloc memory for sg buf\n");
3566 return -ENOMEM;
3567 }
3568 /* update qseecom_sg_list_buf_hdr_64bit */
3569 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3570 buf_hdr->new_buf_phys_addr = coh_pmem;
3571 buf_hdr->nents_total = sg_ptr->nents;
3572 /* save the left sg entries into new allocated buf */
3573 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3574 for (i = 0; i < sg_ptr->nents; i++) {
3575 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3576 sg_entry->len = sg->length;
3577 sg_entry++;
3578 sg = sg_next(sg);
3579 }
3580
3581 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3582 data->client.sec_buf_fd[fd_idx].vbase = buf;
3583 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3584 data->client.sec_buf_fd[fd_idx].size = size;
3585
3586 return 0;
3587}
3588
3589static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3590 struct qseecom_dev_handle *data)
3591{
3592 struct ion_handle *ihandle;
3593 char *field;
3594 int ret = 0;
3595 int i = 0;
3596 uint32_t len = 0;
3597 struct scatterlist *sg;
3598 struct qseecom_send_modfd_cmd_req *req = NULL;
3599 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3600 struct qseecom_registered_listener_list *this_lstnr = NULL;
3601 uint32_t offset;
3602 struct sg_table *sg_ptr;
3603
3604 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3605 (data->type != QSEECOM_CLIENT_APP))
3606 return -EFAULT;
3607
3608 if (msg == NULL) {
3609 pr_err("Invalid address\n");
3610 return -EINVAL;
3611 }
3612 if (data->type == QSEECOM_LISTENER_SERVICE) {
3613 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3614 this_lstnr = __qseecom_find_svc(data->listener.id);
3615 if (IS_ERR_OR_NULL(this_lstnr)) {
3616 pr_err("Invalid listener ID\n");
3617 return -ENOMEM;
3618 }
3619 } else {
3620 req = (struct qseecom_send_modfd_cmd_req *)msg;
3621 }
3622
3623 for (i = 0; i < MAX_ION_FD; i++) {
3624 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3625 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003626 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003627 req->ifd_data[i].fd);
3628 if (IS_ERR_OR_NULL(ihandle)) {
3629 pr_err("Ion client can't retrieve the handle\n");
3630 return -ENOMEM;
3631 }
3632 field = (char *) req->cmd_req_buf +
3633 req->ifd_data[i].cmd_buf_offset;
3634 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3635 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003636 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003637 lstnr_resp->ifd_data[i].fd);
3638 if (IS_ERR_OR_NULL(ihandle)) {
3639 pr_err("Ion client can't retrieve the handle\n");
3640 return -ENOMEM;
3641 }
3642 field = lstnr_resp->resp_buf_ptr +
3643 lstnr_resp->ifd_data[i].cmd_buf_offset;
3644 } else {
3645 continue;
3646 }
3647 /* Populate the cmd data structure with the phys_addr */
3648 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3649 if (IS_ERR_OR_NULL(sg_ptr)) {
3650 pr_err("IOn client could not retrieve sg table\n");
3651 goto err;
3652 }
3653 if (sg_ptr->nents == 0) {
3654 pr_err("Num of scattered entries is 0\n");
3655 goto err;
3656 }
3657 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3658 pr_warn("Num of scattered entries");
3659 pr_warn(" (%d) is greater than %d\n",
3660 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3661 if (cleanup) {
3662 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3663 data->client.sec_buf_fd[i].vbase)
3664 dma_free_coherent(qseecom.pdev,
3665 data->client.sec_buf_fd[i].size,
3666 data->client.sec_buf_fd[i].vbase,
3667 data->client.sec_buf_fd[i].pbase);
3668 } else {
3669 ret = __qseecom_allocate_sg_list_buffer(data,
3670 field, i, sg_ptr);
3671 if (ret) {
3672 pr_err("Failed to allocate sg list buffer\n");
3673 goto err;
3674 }
3675 }
3676 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3677 sg = sg_ptr->sgl;
3678 goto cleanup;
3679 }
3680 sg = sg_ptr->sgl;
3681 if (sg_ptr->nents == 1) {
3682 uint64_t *update_64bit;
3683
3684 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3685 goto err;
3686 /* 64bit app uses 64bit address */
3687 update_64bit = (uint64_t *) field;
3688 *update_64bit = cleanup ? 0 :
3689 (uint64_t)sg_dma_address(sg_ptr->sgl);
3690 len += (uint32_t)sg->length;
3691 } else {
3692 struct qseecom_sg_entry_64bit *update_64bit;
3693 int j = 0;
3694
3695 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3696 (req->ifd_data[i].fd > 0)) {
3697
3698 if ((req->cmd_req_len <
3699 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3700 (req->ifd_data[i].cmd_buf_offset >
3701 (req->cmd_req_len -
3702 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3703 pr_err("Invalid offset = 0x%x\n",
3704 req->ifd_data[i].cmd_buf_offset);
3705 goto err;
3706 }
3707
3708 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3709 (lstnr_resp->ifd_data[i].fd > 0)) {
3710
3711 if ((lstnr_resp->resp_len <
3712 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3713 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3714 (lstnr_resp->resp_len -
3715 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3716 goto err;
3717 }
3718 }
3719 /* 64bit app uses 64bit address */
3720 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3721 for (j = 0; j < sg_ptr->nents; j++) {
3722 update_64bit->phys_addr = cleanup ? 0 :
3723 (uint64_t)sg_dma_address(sg);
3724 update_64bit->len = cleanup ? 0 :
3725 (uint32_t)sg->length;
3726 update_64bit++;
3727 len += sg->length;
3728 sg = sg_next(sg);
3729 }
3730 }
3731cleanup:
3732 if (cleanup) {
3733 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3734 ihandle, NULL, len,
3735 ION_IOC_INV_CACHES);
3736 if (ret) {
3737 pr_err("cache operation failed %d\n", ret);
3738 goto err;
3739 }
3740 } else {
3741 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3742 ihandle, NULL, len,
3743 ION_IOC_CLEAN_INV_CACHES);
3744 if (ret) {
3745 pr_err("cache operation failed %d\n", ret);
3746 goto err;
3747 }
3748 if (data->type == QSEECOM_CLIENT_APP) {
3749 offset = req->ifd_data[i].cmd_buf_offset;
3750 data->sglistinfo_ptr[i].indexAndFlags =
3751 SGLISTINFO_SET_INDEX_FLAG(
3752 (sg_ptr->nents == 1), 1, offset);
3753 data->sglistinfo_ptr[i].sizeOrCount =
3754 (sg_ptr->nents == 1) ?
3755 sg->length : sg_ptr->nents;
3756 data->sglist_cnt = i + 1;
3757 } else {
3758 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3759 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3760 (uintptr_t)this_lstnr->sb_virt);
3761 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3762 SGLISTINFO_SET_INDEX_FLAG(
3763 (sg_ptr->nents == 1), 1, offset);
3764 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3765 (sg_ptr->nents == 1) ?
3766 sg->length : sg_ptr->nents;
3767 this_lstnr->sglist_cnt = i + 1;
3768 }
3769 }
3770 /* Deallocate the handle */
3771 if (!IS_ERR_OR_NULL(ihandle))
3772 ion_free(qseecom.ion_clnt, ihandle);
3773 }
3774 return ret;
3775err:
3776 for (i = 0; i < MAX_ION_FD; i++)
3777 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3778 data->client.sec_buf_fd[i].vbase)
3779 dma_free_coherent(qseecom.pdev,
3780 data->client.sec_buf_fd[i].size,
3781 data->client.sec_buf_fd[i].vbase,
3782 data->client.sec_buf_fd[i].pbase);
3783 if (!IS_ERR_OR_NULL(ihandle))
3784 ion_free(qseecom.ion_clnt, ihandle);
3785 return -ENOMEM;
3786}
3787
3788static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3789 void __user *argp,
3790 bool is_64bit_addr)
3791{
3792 int ret = 0;
3793 int i;
3794 struct qseecom_send_modfd_cmd_req req;
3795 struct qseecom_send_cmd_req send_cmd_req;
3796
3797 ret = copy_from_user(&req, argp, sizeof(req));
3798 if (ret) {
3799 pr_err("copy_from_user failed\n");
3800 return ret;
3801 }
3802
3803 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3804 send_cmd_req.cmd_req_len = req.cmd_req_len;
3805 send_cmd_req.resp_buf = req.resp_buf;
3806 send_cmd_req.resp_len = req.resp_len;
3807
3808 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3809 return -EINVAL;
3810
3811 /* validate offsets */
3812 for (i = 0; i < MAX_ION_FD; i++) {
3813 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3814 pr_err("Invalid offset %d = 0x%x\n",
3815 i, req.ifd_data[i].cmd_buf_offset);
3816 return -EINVAL;
3817 }
3818 }
3819 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3820 (uintptr_t)req.cmd_req_buf);
3821 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3822 (uintptr_t)req.resp_buf);
3823
3824 if (!is_64bit_addr) {
3825 ret = __qseecom_update_cmd_buf(&req, false, data);
3826 if (ret)
3827 return ret;
3828 ret = __qseecom_send_cmd(data, &send_cmd_req);
3829 if (ret)
3830 return ret;
3831 ret = __qseecom_update_cmd_buf(&req, true, data);
3832 if (ret)
3833 return ret;
3834 } else {
3835 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3836 if (ret)
3837 return ret;
3838 ret = __qseecom_send_cmd(data, &send_cmd_req);
3839 if (ret)
3840 return ret;
3841 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3842 if (ret)
3843 return ret;
3844 }
3845
3846 return ret;
3847}
3848
3849static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3850 void __user *argp)
3851{
3852 return __qseecom_send_modfd_cmd(data, argp, false);
3853}
3854
3855static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3856 void __user *argp)
3857{
3858 return __qseecom_send_modfd_cmd(data, argp, true);
3859}
3860
3861
3862
3863static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3864 struct qseecom_registered_listener_list *svc)
3865{
3866 int ret;
3867
Zhen Kongf5087172018-10-11 17:22:05 -07003868 ret = (svc->rcv_req_flag == 1);
Zhen Kong26e62742018-05-04 17:19:06 -07003869 return ret || data->abort || svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003870}
3871
3872static int qseecom_receive_req(struct qseecom_dev_handle *data)
3873{
3874 int ret = 0;
3875 struct qseecom_registered_listener_list *this_lstnr;
3876
3877 this_lstnr = __qseecom_find_svc(data->listener.id);
3878 if (!this_lstnr) {
3879 pr_err("Invalid listener ID\n");
3880 return -ENODATA;
3881 }
Zhen Kongf5087172018-10-11 17:22:05 -07003882 this_lstnr->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003883
3884 while (1) {
3885 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3886 __qseecom_listener_has_rcvd_req(data,
3887 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003888 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003889 (uint32_t)data->listener.id);
3890 /* woken up for different reason */
3891 return -ERESTARTSYS;
3892 }
3893
Zhen Kong26e62742018-05-04 17:19:06 -07003894 if (data->abort || this_lstnr->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003895 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003896 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003897 return -ENODEV;
3898 }
3899 this_lstnr->rcv_req_flag = 0;
3900 break;
3901 }
3902 return ret;
3903}
3904
3905static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3906{
3907 unsigned char app_arch = 0;
3908 struct elf32_hdr *ehdr;
3909 struct elf64_hdr *ehdr64;
3910
3911 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3912
3913 switch (app_arch) {
3914 case ELFCLASS32: {
3915 ehdr = (struct elf32_hdr *)fw_entry->data;
3916 if (fw_entry->size < sizeof(*ehdr)) {
3917 pr_err("%s: Not big enough to be an elf32 header\n",
3918 qseecom.pdev->init_name);
3919 return false;
3920 }
3921 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3922 pr_err("%s: Not an elf32 header\n",
3923 qseecom.pdev->init_name);
3924 return false;
3925 }
3926 if (ehdr->e_phnum == 0) {
3927 pr_err("%s: No loadable segments\n",
3928 qseecom.pdev->init_name);
3929 return false;
3930 }
3931 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3932 sizeof(struct elf32_hdr) > fw_entry->size) {
3933 pr_err("%s: Program headers not within mdt\n",
3934 qseecom.pdev->init_name);
3935 return false;
3936 }
3937 break;
3938 }
3939 case ELFCLASS64: {
3940 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3941 if (fw_entry->size < sizeof(*ehdr64)) {
3942 pr_err("%s: Not big enough to be an elf64 header\n",
3943 qseecom.pdev->init_name);
3944 return false;
3945 }
3946 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3947 pr_err("%s: Not an elf64 header\n",
3948 qseecom.pdev->init_name);
3949 return false;
3950 }
3951 if (ehdr64->e_phnum == 0) {
3952 pr_err("%s: No loadable segments\n",
3953 qseecom.pdev->init_name);
3954 return false;
3955 }
3956 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3957 sizeof(struct elf64_hdr) > fw_entry->size) {
3958 pr_err("%s: Program headers not within mdt\n",
3959 qseecom.pdev->init_name);
3960 return false;
3961 }
3962 break;
3963 }
3964 default: {
3965 pr_err("QSEE app arch %u is not supported\n", app_arch);
3966 return false;
3967 }
3968 }
3969 return true;
3970}
3971
3972static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3973 uint32_t *app_arch)
3974{
3975 int ret = -1;
3976 int i = 0, rc = 0;
3977 const struct firmware *fw_entry = NULL;
3978 char fw_name[MAX_APP_NAME_SIZE];
3979 struct elf32_hdr *ehdr;
3980 struct elf64_hdr *ehdr64;
3981 int num_images = 0;
3982
3983 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3984 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3985 if (rc) {
3986 pr_err("error with request_firmware\n");
3987 ret = -EIO;
3988 goto err;
3989 }
3990 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3991 ret = -EIO;
3992 goto err;
3993 }
3994 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3995 *fw_size = fw_entry->size;
3996 if (*app_arch == ELFCLASS32) {
3997 ehdr = (struct elf32_hdr *)fw_entry->data;
3998 num_images = ehdr->e_phnum;
3999 } else if (*app_arch == ELFCLASS64) {
4000 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4001 num_images = ehdr64->e_phnum;
4002 } else {
4003 pr_err("QSEE %s app, arch %u is not supported\n",
4004 appname, *app_arch);
4005 ret = -EIO;
4006 goto err;
4007 }
4008 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4009 release_firmware(fw_entry);
4010 fw_entry = NULL;
4011 for (i = 0; i < num_images; i++) {
4012 memset(fw_name, 0, sizeof(fw_name));
4013 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4014 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4015 if (ret)
4016 goto err;
4017 if (*fw_size > U32_MAX - fw_entry->size) {
4018 pr_err("QSEE %s app file size overflow\n", appname);
4019 ret = -EINVAL;
4020 goto err;
4021 }
4022 *fw_size += fw_entry->size;
4023 release_firmware(fw_entry);
4024 fw_entry = NULL;
4025 }
4026
4027 return ret;
4028err:
4029 if (fw_entry)
4030 release_firmware(fw_entry);
4031 *fw_size = 0;
4032 return ret;
4033}
4034
4035static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4036 uint32_t fw_size,
4037 struct qseecom_load_app_ireq *load_req)
4038{
4039 int ret = -1;
4040 int i = 0, rc = 0;
4041 const struct firmware *fw_entry = NULL;
4042 char fw_name[MAX_APP_NAME_SIZE];
4043 u8 *img_data_ptr = img_data;
4044 struct elf32_hdr *ehdr;
4045 struct elf64_hdr *ehdr64;
4046 int num_images = 0;
4047 unsigned char app_arch = 0;
4048
4049 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4050 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4051 if (rc) {
4052 ret = -EIO;
4053 goto err;
4054 }
4055
4056 load_req->img_len = fw_entry->size;
4057 if (load_req->img_len > fw_size) {
4058 pr_err("app %s size %zu is larger than buf size %u\n",
4059 appname, fw_entry->size, fw_size);
4060 ret = -EINVAL;
4061 goto err;
4062 }
4063 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4064 img_data_ptr = img_data_ptr + fw_entry->size;
4065 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4066
4067 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4068 if (app_arch == ELFCLASS32) {
4069 ehdr = (struct elf32_hdr *)fw_entry->data;
4070 num_images = ehdr->e_phnum;
4071 } else if (app_arch == ELFCLASS64) {
4072 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4073 num_images = ehdr64->e_phnum;
4074 } else {
4075 pr_err("QSEE %s app, arch %u is not supported\n",
4076 appname, app_arch);
4077 ret = -EIO;
4078 goto err;
4079 }
4080 release_firmware(fw_entry);
4081 fw_entry = NULL;
4082 for (i = 0; i < num_images; i++) {
4083 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4084 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4085 if (ret) {
4086 pr_err("Failed to locate blob %s\n", fw_name);
4087 goto err;
4088 }
4089 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4090 (fw_entry->size + load_req->img_len > fw_size)) {
4091 pr_err("Invalid file size for %s\n", fw_name);
4092 ret = -EINVAL;
4093 goto err;
4094 }
4095 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4096 img_data_ptr = img_data_ptr + fw_entry->size;
4097 load_req->img_len += fw_entry->size;
4098 release_firmware(fw_entry);
4099 fw_entry = NULL;
4100 }
4101 return ret;
4102err:
4103 release_firmware(fw_entry);
4104 return ret;
4105}
4106
4107static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4108 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4109{
4110 size_t len = 0;
4111 int ret = 0;
4112 ion_phys_addr_t pa;
4113 struct ion_handle *ihandle = NULL;
4114 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004115 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004116
Zhen Kong3dd92792017-12-08 09:47:15 -08004117 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004118 if (retry++) {
4119 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004120 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004121 mutex_lock(&app_access_lock);
4122 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004123 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4124 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4125 } while (IS_ERR_OR_NULL(ihandle) &&
4126 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004127
4128 if (IS_ERR_OR_NULL(ihandle)) {
4129 pr_err("ION alloc failed\n");
4130 return -ENOMEM;
4131 }
4132 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4133 ihandle);
4134
4135 if (IS_ERR_OR_NULL(img_data)) {
4136 pr_err("ION memory mapping for image loading failed\n");
4137 ret = -ENOMEM;
4138 goto exit_ion_free;
4139 }
4140 /* Get the physical address of the ION BUF */
4141 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4142 if (ret) {
4143 pr_err("physical memory retrieval failure\n");
4144 ret = -EIO;
4145 goto exit_ion_unmap_kernel;
4146 }
4147
4148 *pihandle = ihandle;
4149 *data = img_data;
4150 *paddr = pa;
4151 return ret;
4152
4153exit_ion_unmap_kernel:
4154 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4155exit_ion_free:
4156 ion_free(qseecom.ion_clnt, ihandle);
4157 ihandle = NULL;
4158 return ret;
4159}
4160
4161static void __qseecom_free_img_data(struct ion_handle **ihandle)
4162{
4163 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4164 ion_free(qseecom.ion_clnt, *ihandle);
4165 *ihandle = NULL;
4166}
4167
4168static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4169 uint32_t *app_id)
4170{
4171 int ret = -1;
4172 uint32_t fw_size = 0;
4173 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4174 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4175 struct qseecom_command_scm_resp resp;
4176 u8 *img_data = NULL;
4177 ion_phys_addr_t pa = 0;
4178 struct ion_handle *ihandle = NULL;
4179 void *cmd_buf = NULL;
4180 size_t cmd_len;
4181 uint32_t app_arch = 0;
4182
4183 if (!data || !appname || !app_id) {
4184 pr_err("Null pointer to data or appname or appid\n");
4185 return -EINVAL;
4186 }
4187 *app_id = 0;
4188 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4189 return -EIO;
4190 data->client.app_arch = app_arch;
4191
4192 /* Check and load cmnlib */
4193 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4194 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4195 ret = qseecom_load_commonlib_image(data, "cmnlib");
4196 if (ret) {
4197 pr_err("failed to load cmnlib\n");
4198 return -EIO;
4199 }
4200 qseecom.commonlib_loaded = true;
4201 pr_debug("cmnlib is loaded\n");
4202 }
4203
4204 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4205 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4206 if (ret) {
4207 pr_err("failed to load cmnlib64\n");
4208 return -EIO;
4209 }
4210 qseecom.commonlib64_loaded = true;
4211 pr_debug("cmnlib64 is loaded\n");
4212 }
4213 }
4214
4215 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4216 if (ret)
4217 return ret;
4218
4219 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4220 if (ret) {
4221 ret = -EIO;
4222 goto exit_free_img_data;
4223 }
4224
4225 /* Populate the load_req parameters */
4226 if (qseecom.qsee_version < QSEE_VERSION_40) {
4227 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4228 load_req.mdt_len = load_req.mdt_len;
4229 load_req.img_len = load_req.img_len;
4230 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4231 load_req.phy_addr = (uint32_t)pa;
4232 cmd_buf = (void *)&load_req;
4233 cmd_len = sizeof(struct qseecom_load_app_ireq);
4234 } else {
4235 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4236 load_req_64bit.mdt_len = load_req.mdt_len;
4237 load_req_64bit.img_len = load_req.img_len;
4238 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4239 load_req_64bit.phy_addr = (uint64_t)pa;
4240 cmd_buf = (void *)&load_req_64bit;
4241 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4242 }
4243
4244 if (qseecom.support_bus_scaling) {
4245 mutex_lock(&qsee_bw_mutex);
4246 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4247 mutex_unlock(&qsee_bw_mutex);
4248 if (ret) {
4249 ret = -EIO;
4250 goto exit_free_img_data;
4251 }
4252 }
4253
4254 ret = __qseecom_enable_clk_scale_up(data);
4255 if (ret) {
4256 ret = -EIO;
4257 goto exit_unregister_bus_bw_need;
4258 }
4259
4260 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4261 img_data, fw_size,
4262 ION_IOC_CLEAN_INV_CACHES);
4263 if (ret) {
4264 pr_err("cache operation failed %d\n", ret);
4265 goto exit_disable_clk_vote;
4266 }
4267
4268 /* SCM_CALL to load the image */
4269 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4270 &resp, sizeof(resp));
4271 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004272 pr_err("scm_call to load failed : ret %d, result %x\n",
4273 ret, resp.result);
4274 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4275 ret = -EEXIST;
4276 else
4277 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004278 goto exit_disable_clk_vote;
4279 }
4280
4281 switch (resp.result) {
4282 case QSEOS_RESULT_SUCCESS:
4283 *app_id = resp.data;
4284 break;
4285 case QSEOS_RESULT_INCOMPLETE:
4286 ret = __qseecom_process_incomplete_cmd(data, &resp);
4287 if (ret)
4288 pr_err("process_incomplete_cmd FAILED\n");
4289 else
4290 *app_id = resp.data;
4291 break;
4292 case QSEOS_RESULT_FAILURE:
4293 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4294 break;
4295 default:
4296 pr_err("scm call return unknown response %d\n", resp.result);
4297 ret = -EINVAL;
4298 break;
4299 }
4300
4301exit_disable_clk_vote:
4302 __qseecom_disable_clk_scale_down(data);
4303
4304exit_unregister_bus_bw_need:
4305 if (qseecom.support_bus_scaling) {
4306 mutex_lock(&qsee_bw_mutex);
4307 qseecom_unregister_bus_bandwidth_needs(data);
4308 mutex_unlock(&qsee_bw_mutex);
4309 }
4310
4311exit_free_img_data:
4312 __qseecom_free_img_data(&ihandle);
4313 return ret;
4314}
4315
4316static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4317 char *cmnlib_name)
4318{
4319 int ret = 0;
4320 uint32_t fw_size = 0;
4321 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4322 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4323 struct qseecom_command_scm_resp resp;
4324 u8 *img_data = NULL;
4325 ion_phys_addr_t pa = 0;
4326 void *cmd_buf = NULL;
4327 size_t cmd_len;
4328 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004329 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004330
4331 if (!cmnlib_name) {
4332 pr_err("cmnlib_name is NULL\n");
4333 return -EINVAL;
4334 }
4335 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4336 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4337 cmnlib_name, strlen(cmnlib_name));
4338 return -EINVAL;
4339 }
4340
4341 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4342 return -EIO;
4343
Zhen Kong3bafb312017-10-18 10:27:20 -07004344 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004345 &img_data, fw_size, &pa);
4346 if (ret)
4347 return -EIO;
4348
4349 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4350 if (ret) {
4351 ret = -EIO;
4352 goto exit_free_img_data;
4353 }
4354 if (qseecom.qsee_version < QSEE_VERSION_40) {
4355 load_req.phy_addr = (uint32_t)pa;
4356 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4357 cmd_buf = (void *)&load_req;
4358 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4359 } else {
4360 load_req_64bit.phy_addr = (uint64_t)pa;
4361 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4362 load_req_64bit.img_len = load_req.img_len;
4363 load_req_64bit.mdt_len = load_req.mdt_len;
4364 cmd_buf = (void *)&load_req_64bit;
4365 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4366 }
4367
4368 if (qseecom.support_bus_scaling) {
4369 mutex_lock(&qsee_bw_mutex);
4370 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4371 mutex_unlock(&qsee_bw_mutex);
4372 if (ret) {
4373 ret = -EIO;
4374 goto exit_free_img_data;
4375 }
4376 }
4377
4378 /* Vote for the SFPB clock */
4379 ret = __qseecom_enable_clk_scale_up(data);
4380 if (ret) {
4381 ret = -EIO;
4382 goto exit_unregister_bus_bw_need;
4383 }
4384
Zhen Kong3bafb312017-10-18 10:27:20 -07004385 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004386 img_data, fw_size,
4387 ION_IOC_CLEAN_INV_CACHES);
4388 if (ret) {
4389 pr_err("cache operation failed %d\n", ret);
4390 goto exit_disable_clk_vote;
4391 }
4392
4393 /* SCM_CALL to load the image */
4394 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4395 &resp, sizeof(resp));
4396 if (ret) {
4397 pr_err("scm_call to load failed : ret %d\n", ret);
4398 ret = -EIO;
4399 goto exit_disable_clk_vote;
4400 }
4401
4402 switch (resp.result) {
4403 case QSEOS_RESULT_SUCCESS:
4404 break;
4405 case QSEOS_RESULT_FAILURE:
4406 pr_err("scm call failed w/response result%d\n", resp.result);
4407 ret = -EINVAL;
4408 goto exit_disable_clk_vote;
4409 case QSEOS_RESULT_INCOMPLETE:
4410 ret = __qseecom_process_incomplete_cmd(data, &resp);
4411 if (ret) {
4412 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4413 goto exit_disable_clk_vote;
4414 }
4415 break;
4416 default:
4417 pr_err("scm call return unknown response %d\n", resp.result);
4418 ret = -EINVAL;
4419 goto exit_disable_clk_vote;
4420 }
4421
4422exit_disable_clk_vote:
4423 __qseecom_disable_clk_scale_down(data);
4424
4425exit_unregister_bus_bw_need:
4426 if (qseecom.support_bus_scaling) {
4427 mutex_lock(&qsee_bw_mutex);
4428 qseecom_unregister_bus_bandwidth_needs(data);
4429 mutex_unlock(&qsee_bw_mutex);
4430 }
4431
4432exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004433 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004434 return ret;
4435}
4436
4437static int qseecom_unload_commonlib_image(void)
4438{
4439 int ret = -EINVAL;
4440 struct qseecom_unload_lib_image_ireq unload_req = {0};
4441 struct qseecom_command_scm_resp resp;
4442
4443 /* Populate the remaining parameters */
4444 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4445
4446 /* SCM_CALL to load the image */
4447 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4448 sizeof(struct qseecom_unload_lib_image_ireq),
4449 &resp, sizeof(resp));
4450 if (ret) {
4451 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4452 ret = -EIO;
4453 } else {
4454 switch (resp.result) {
4455 case QSEOS_RESULT_SUCCESS:
4456 break;
4457 case QSEOS_RESULT_FAILURE:
4458 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4459 break;
4460 default:
4461 pr_err("scm call return unknown response %d\n",
4462 resp.result);
4463 ret = -EINVAL;
4464 break;
4465 }
4466 }
4467
4468 return ret;
4469}
4470
4471int qseecom_start_app(struct qseecom_handle **handle,
4472 char *app_name, uint32_t size)
4473{
4474 int32_t ret = 0;
4475 unsigned long flags = 0;
4476 struct qseecom_dev_handle *data = NULL;
4477 struct qseecom_check_app_ireq app_ireq;
4478 struct qseecom_registered_app_list *entry = NULL;
4479 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4480 bool found_app = false;
4481 size_t len;
4482 ion_phys_addr_t pa;
4483 uint32_t fw_size, app_arch;
4484 uint32_t app_id = 0;
4485
4486 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4487 pr_err("Not allowed to be called in %d state\n",
4488 atomic_read(&qseecom.qseecom_state));
4489 return -EPERM;
4490 }
4491 if (!app_name) {
4492 pr_err("failed to get the app name\n");
4493 return -EINVAL;
4494 }
4495
Zhen Kong64a6d7282017-06-16 11:55:07 -07004496 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004497 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004498 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004499 return -EINVAL;
4500 }
4501
4502 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4503 if (!(*handle))
4504 return -ENOMEM;
4505
4506 data = kzalloc(sizeof(*data), GFP_KERNEL);
4507 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304508 ret = -ENOMEM;
4509 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004510 }
4511 data->abort = 0;
4512 data->type = QSEECOM_CLIENT_APP;
4513 data->released = false;
4514 data->client.sb_length = size;
4515 data->client.user_virt_sb_base = 0;
4516 data->client.ihandle = NULL;
4517
4518 init_waitqueue_head(&data->abort_wq);
4519
4520 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4521 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4522 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4523 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304524 ret = -ENOMEM;
4525 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004526 }
4527 mutex_lock(&app_access_lock);
4528
Zhen Kong5d02be92018-05-29 16:17:29 -07004529recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004530 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4531 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4532 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4533 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304534 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004535
4536 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4537 if (app_id) {
4538 pr_warn("App id %d for [%s] app exists\n", app_id,
4539 (char *)app_ireq.app_name);
4540 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4541 list_for_each_entry(entry,
4542 &qseecom.registered_app_list_head, list){
4543 if (entry->app_id == app_id) {
4544 entry->ref_cnt++;
4545 found_app = true;
4546 break;
4547 }
4548 }
4549 spin_unlock_irqrestore(
4550 &qseecom.registered_app_list_lock, flags);
4551 if (!found_app)
4552 pr_warn("App_id %d [%s] was loaded but not registered\n",
4553 ret, (char *)app_ireq.app_name);
4554 } else {
4555 /* load the app and get the app_id */
4556 pr_debug("%s: Loading app for the first time'\n",
4557 qseecom.pdev->init_name);
4558 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004559 if (ret == -EEXIST) {
4560 pr_err("recheck if TA %s is loaded\n", app_name);
4561 goto recheck;
4562 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304563 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004564 }
4565 data->client.app_id = app_id;
4566 if (!found_app) {
4567 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4568 if (!entry) {
4569 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304570 ret = -ENOMEM;
4571 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004572 }
4573 entry->app_id = app_id;
4574 entry->ref_cnt = 1;
4575 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4576 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4577 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304578 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004579 }
4580 entry->app_arch = app_arch;
4581 entry->app_blocked = false;
4582 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004583 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004584 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4585 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4586 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4587 flags);
4588 }
4589
4590 /* Get the physical address of the ION BUF */
4591 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4592 if (ret) {
4593 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4594 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304595 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004596 }
4597
4598 /* Populate the structure for sending scm call to load image */
4599 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4600 data->client.ihandle);
4601 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4602 pr_err("ION memory mapping for client shared buf failed\n");
4603 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304604 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004605 }
4606 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4607 data->client.sb_phys = (phys_addr_t)pa;
4608 (*handle)->dev = (void *)data;
4609 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4610 (*handle)->sbuf_len = data->client.sb_length;
4611
4612 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4613 if (!kclient_entry) {
4614 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304615 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004616 }
4617 kclient_entry->handle = *handle;
4618
4619 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4620 list_add_tail(&kclient_entry->list,
4621 &qseecom.registered_kclient_list_head);
4622 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4623
4624 mutex_unlock(&app_access_lock);
4625 return 0;
4626
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304627exit_ion_unmap_kernel:
4628 if (!IS_ERR_OR_NULL(data->client.ihandle))
4629 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4630exit_entry_free:
4631 kfree(entry);
4632exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004633 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304634 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4635 ion_free(qseecom.ion_clnt, data->client.ihandle);
4636 data->client.ihandle = NULL;
4637 }
4638exit_data_free:
4639 kfree(data);
4640exit_handle_free:
4641 if (*handle) {
4642 kfree(*handle);
4643 *handle = NULL;
4644 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004645 return ret;
4646}
4647EXPORT_SYMBOL(qseecom_start_app);
4648
4649int qseecom_shutdown_app(struct qseecom_handle **handle)
4650{
4651 int ret = -EINVAL;
4652 struct qseecom_dev_handle *data;
4653
4654 struct qseecom_registered_kclient_list *kclient = NULL;
4655 unsigned long flags = 0;
4656 bool found_handle = false;
4657
4658 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4659 pr_err("Not allowed to be called in %d state\n",
4660 atomic_read(&qseecom.qseecom_state));
4661 return -EPERM;
4662 }
4663
4664 if ((handle == NULL) || (*handle == NULL)) {
4665 pr_err("Handle is not initialized\n");
4666 return -EINVAL;
4667 }
4668 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4669 mutex_lock(&app_access_lock);
4670
4671 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4672 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4673 list) {
4674 if (kclient->handle == (*handle)) {
4675 list_del(&kclient->list);
4676 found_handle = true;
4677 break;
4678 }
4679 }
4680 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4681 if (!found_handle)
4682 pr_err("Unable to find the handle, exiting\n");
4683 else
4684 ret = qseecom_unload_app(data, false);
4685
4686 mutex_unlock(&app_access_lock);
4687 if (ret == 0) {
4688 kzfree(data);
4689 kzfree(*handle);
4690 kzfree(kclient);
4691 *handle = NULL;
4692 }
4693
4694 return ret;
4695}
4696EXPORT_SYMBOL(qseecom_shutdown_app);
4697
4698int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4699 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4700{
4701 int ret = 0;
4702 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4703 struct qseecom_dev_handle *data;
4704 bool perf_enabled = false;
4705
4706 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4707 pr_err("Not allowed to be called in %d state\n",
4708 atomic_read(&qseecom.qseecom_state));
4709 return -EPERM;
4710 }
4711
4712 if (handle == NULL) {
4713 pr_err("Handle is not initialized\n");
4714 return -EINVAL;
4715 }
4716 data = handle->dev;
4717
4718 req.cmd_req_len = sbuf_len;
4719 req.resp_len = rbuf_len;
4720 req.cmd_req_buf = send_buf;
4721 req.resp_buf = resp_buf;
4722
4723 if (__validate_send_cmd_inputs(data, &req))
4724 return -EINVAL;
4725
4726 mutex_lock(&app_access_lock);
4727 if (qseecom.support_bus_scaling) {
4728 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4729 if (ret) {
4730 pr_err("Failed to set bw.\n");
4731 mutex_unlock(&app_access_lock);
4732 return ret;
4733 }
4734 }
4735 /*
4736 * On targets where crypto clock is handled by HLOS,
4737 * if clk_access_cnt is zero and perf_enabled is false,
4738 * then the crypto clock was not enabled before sending cmd
4739 * to tz, qseecom will enable the clock to avoid service failure.
4740 */
4741 if (!qseecom.no_clock_support &&
4742 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4743 pr_debug("ce clock is not enabled!\n");
4744 ret = qseecom_perf_enable(data);
4745 if (ret) {
4746 pr_err("Failed to vote for clock with err %d\n",
4747 ret);
4748 mutex_unlock(&app_access_lock);
4749 return -EINVAL;
4750 }
4751 perf_enabled = true;
4752 }
4753 if (!strcmp(data->client.app_name, "securemm"))
4754 data->use_legacy_cmd = true;
4755
4756 ret = __qseecom_send_cmd(data, &req);
4757 data->use_legacy_cmd = false;
4758 if (qseecom.support_bus_scaling)
4759 __qseecom_add_bw_scale_down_timer(
4760 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4761
4762 if (perf_enabled) {
4763 qsee_disable_clock_vote(data, CLK_DFAB);
4764 qsee_disable_clock_vote(data, CLK_SFPB);
4765 }
4766
4767 mutex_unlock(&app_access_lock);
4768
4769 if (ret)
4770 return ret;
4771
4772 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4773 req.resp_len, req.resp_buf);
4774 return ret;
4775}
4776EXPORT_SYMBOL(qseecom_send_command);
4777
4778int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4779{
4780 int ret = 0;
4781
4782 if ((handle == NULL) || (handle->dev == NULL)) {
4783 pr_err("No valid kernel client\n");
4784 return -EINVAL;
4785 }
4786 if (high) {
4787 if (qseecom.support_bus_scaling) {
4788 mutex_lock(&qsee_bw_mutex);
4789 __qseecom_register_bus_bandwidth_needs(handle->dev,
4790 HIGH);
4791 mutex_unlock(&qsee_bw_mutex);
4792 } else {
4793 ret = qseecom_perf_enable(handle->dev);
4794 if (ret)
4795 pr_err("Failed to vote for clock with err %d\n",
4796 ret);
4797 }
4798 } else {
4799 if (!qseecom.support_bus_scaling) {
4800 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4801 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4802 } else {
4803 mutex_lock(&qsee_bw_mutex);
4804 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4805 mutex_unlock(&qsee_bw_mutex);
4806 }
4807 }
4808 return ret;
4809}
4810EXPORT_SYMBOL(qseecom_set_bandwidth);
4811
4812int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4813{
4814 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4815 struct qseecom_dev_handle dummy_private_data = {0};
4816 struct qseecom_command_scm_resp resp;
4817 int ret = 0;
4818
4819 if (!desc) {
4820 pr_err("desc is NULL\n");
4821 return -EINVAL;
4822 }
4823
4824 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004825 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004826 resp.data = desc->ret[2]; /*listener_id*/
4827
Zhen Konge7f525f2017-12-01 18:26:25 -08004828 dummy_private_data.client.app_id = desc->ret[1];
4829 dummy_app_entry.app_id = desc->ret[1];
4830
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004831 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004832 if (qseecom.qsee_reentrancy_support)
4833 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004834 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004835 else
4836 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4837 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004838 mutex_unlock(&app_access_lock);
4839 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004840 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004841 (int)desc->ret[0], (int)desc->ret[2],
4842 (int)desc->ret[1], ret);
4843 desc->ret[0] = resp.result;
4844 desc->ret[1] = resp.resp_type;
4845 desc->ret[2] = resp.data;
4846 return ret;
4847}
4848EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4849
4850static int qseecom_send_resp(void)
4851{
4852 qseecom.send_resp_flag = 1;
4853 wake_up_interruptible(&qseecom.send_resp_wq);
4854 return 0;
4855}
4856
4857static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4858{
4859 struct qseecom_registered_listener_list *this_lstnr = NULL;
4860
4861 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4862 this_lstnr = __qseecom_find_svc(data->listener.id);
4863 if (this_lstnr == NULL)
4864 return -EINVAL;
4865 qseecom.send_resp_flag = 1;
4866 this_lstnr->send_resp_flag = 1;
4867 wake_up_interruptible(&qseecom.send_resp_wq);
4868 return 0;
4869}
4870
4871static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4872 struct qseecom_send_modfd_listener_resp *resp,
4873 struct qseecom_registered_listener_list *this_lstnr)
4874{
4875 int i;
4876
4877 if (!data || !resp || !this_lstnr) {
4878 pr_err("listener handle or resp msg is null\n");
4879 return -EINVAL;
4880 }
4881
4882 if (resp->resp_buf_ptr == NULL) {
4883 pr_err("resp buffer is null\n");
4884 return -EINVAL;
4885 }
4886 /* validate resp buf length */
4887 if ((resp->resp_len == 0) ||
4888 (resp->resp_len > this_lstnr->sb_length)) {
4889 pr_err("resp buf length %d not valid\n", resp->resp_len);
4890 return -EINVAL;
4891 }
4892
4893 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4894 pr_err("Integer overflow in resp_len & resp_buf\n");
4895 return -EINVAL;
4896 }
4897 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4898 (ULONG_MAX - this_lstnr->sb_length)) {
4899 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4900 return -EINVAL;
4901 }
4902 /* validate resp buf */
4903 if (((uintptr_t)resp->resp_buf_ptr <
4904 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4905 ((uintptr_t)resp->resp_buf_ptr >=
4906 ((uintptr_t)this_lstnr->user_virt_sb_base +
4907 this_lstnr->sb_length)) ||
4908 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4909 ((uintptr_t)this_lstnr->user_virt_sb_base +
4910 this_lstnr->sb_length))) {
4911 pr_err("resp buf is out of shared buffer region\n");
4912 return -EINVAL;
4913 }
4914
4915 /* validate offsets */
4916 for (i = 0; i < MAX_ION_FD; i++) {
4917 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4918 pr_err("Invalid offset %d = 0x%x\n",
4919 i, resp->ifd_data[i].cmd_buf_offset);
4920 return -EINVAL;
4921 }
4922 }
4923
4924 return 0;
4925}
4926
4927static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4928 void __user *argp, bool is_64bit_addr)
4929{
4930 struct qseecom_send_modfd_listener_resp resp;
4931 struct qseecom_registered_listener_list *this_lstnr = NULL;
4932
4933 if (copy_from_user(&resp, argp, sizeof(resp))) {
4934 pr_err("copy_from_user failed");
4935 return -EINVAL;
4936 }
4937
4938 this_lstnr = __qseecom_find_svc(data->listener.id);
4939 if (this_lstnr == NULL)
4940 return -EINVAL;
4941
4942 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4943 return -EINVAL;
4944
4945 resp.resp_buf_ptr = this_lstnr->sb_virt +
4946 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4947
4948 if (!is_64bit_addr)
4949 __qseecom_update_cmd_buf(&resp, false, data);
4950 else
4951 __qseecom_update_cmd_buf_64(&resp, false, data);
4952 qseecom.send_resp_flag = 1;
4953 this_lstnr->send_resp_flag = 1;
4954 wake_up_interruptible(&qseecom.send_resp_wq);
4955 return 0;
4956}
4957
4958static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4959 void __user *argp)
4960{
4961 return __qseecom_send_modfd_resp(data, argp, false);
4962}
4963
4964static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4965 void __user *argp)
4966{
4967 return __qseecom_send_modfd_resp(data, argp, true);
4968}
4969
4970static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4971 void __user *argp)
4972{
4973 struct qseecom_qseos_version_req req;
4974
4975 if (copy_from_user(&req, argp, sizeof(req))) {
4976 pr_err("copy_from_user failed");
4977 return -EINVAL;
4978 }
4979 req.qseos_version = qseecom.qseos_version;
4980 if (copy_to_user(argp, &req, sizeof(req))) {
4981 pr_err("copy_to_user failed");
4982 return -EINVAL;
4983 }
4984 return 0;
4985}
4986
4987static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4988{
4989 int rc = 0;
4990 struct qseecom_clk *qclk = NULL;
4991
4992 if (qseecom.no_clock_support)
4993 return 0;
4994
4995 if (ce == CLK_QSEE)
4996 qclk = &qseecom.qsee;
4997 if (ce == CLK_CE_DRV)
4998 qclk = &qseecom.ce_drv;
4999
5000 if (qclk == NULL) {
5001 pr_err("CLK type not supported\n");
5002 return -EINVAL;
5003 }
5004 mutex_lock(&clk_access_lock);
5005
5006 if (qclk->clk_access_cnt == ULONG_MAX) {
5007 pr_err("clk_access_cnt beyond limitation\n");
5008 goto err;
5009 }
5010 if (qclk->clk_access_cnt > 0) {
5011 qclk->clk_access_cnt++;
5012 mutex_unlock(&clk_access_lock);
5013 return rc;
5014 }
5015
5016 /* Enable CE core clk */
5017 if (qclk->ce_core_clk != NULL) {
5018 rc = clk_prepare_enable(qclk->ce_core_clk);
5019 if (rc) {
5020 pr_err("Unable to enable/prepare CE core clk\n");
5021 goto err;
5022 }
5023 }
5024 /* Enable CE clk */
5025 if (qclk->ce_clk != NULL) {
5026 rc = clk_prepare_enable(qclk->ce_clk);
5027 if (rc) {
5028 pr_err("Unable to enable/prepare CE iface clk\n");
5029 goto ce_clk_err;
5030 }
5031 }
5032 /* Enable AXI clk */
5033 if (qclk->ce_bus_clk != NULL) {
5034 rc = clk_prepare_enable(qclk->ce_bus_clk);
5035 if (rc) {
5036 pr_err("Unable to enable/prepare CE bus clk\n");
5037 goto ce_bus_clk_err;
5038 }
5039 }
5040 qclk->clk_access_cnt++;
5041 mutex_unlock(&clk_access_lock);
5042 return 0;
5043
5044ce_bus_clk_err:
5045 if (qclk->ce_clk != NULL)
5046 clk_disable_unprepare(qclk->ce_clk);
5047ce_clk_err:
5048 if (qclk->ce_core_clk != NULL)
5049 clk_disable_unprepare(qclk->ce_core_clk);
5050err:
5051 mutex_unlock(&clk_access_lock);
5052 return -EIO;
5053}
5054
5055static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5056{
5057 struct qseecom_clk *qclk;
5058
5059 if (qseecom.no_clock_support)
5060 return;
5061
5062 if (ce == CLK_QSEE)
5063 qclk = &qseecom.qsee;
5064 else
5065 qclk = &qseecom.ce_drv;
5066
5067 mutex_lock(&clk_access_lock);
5068
5069 if (qclk->clk_access_cnt == 0) {
5070 mutex_unlock(&clk_access_lock);
5071 return;
5072 }
5073
5074 if (qclk->clk_access_cnt == 1) {
5075 if (qclk->ce_clk != NULL)
5076 clk_disable_unprepare(qclk->ce_clk);
5077 if (qclk->ce_core_clk != NULL)
5078 clk_disable_unprepare(qclk->ce_core_clk);
5079 if (qclk->ce_bus_clk != NULL)
5080 clk_disable_unprepare(qclk->ce_bus_clk);
5081 }
5082 qclk->clk_access_cnt--;
5083 mutex_unlock(&clk_access_lock);
5084}
5085
5086static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5087 int32_t clk_type)
5088{
5089 int ret = 0;
5090 struct qseecom_clk *qclk;
5091
5092 if (qseecom.no_clock_support)
5093 return 0;
5094
5095 qclk = &qseecom.qsee;
5096 if (!qseecom.qsee_perf_client)
5097 return ret;
5098
5099 switch (clk_type) {
5100 case CLK_DFAB:
5101 mutex_lock(&qsee_bw_mutex);
5102 if (!qseecom.qsee_bw_count) {
5103 if (qseecom.qsee_sfpb_bw_count > 0)
5104 ret = msm_bus_scale_client_update_request(
5105 qseecom.qsee_perf_client, 3);
5106 else {
5107 if (qclk->ce_core_src_clk != NULL)
5108 ret = __qseecom_enable_clk(CLK_QSEE);
5109 if (!ret) {
5110 ret =
5111 msm_bus_scale_client_update_request(
5112 qseecom.qsee_perf_client, 1);
5113 if ((ret) &&
5114 (qclk->ce_core_src_clk != NULL))
5115 __qseecom_disable_clk(CLK_QSEE);
5116 }
5117 }
5118 if (ret)
5119 pr_err("DFAB Bandwidth req failed (%d)\n",
5120 ret);
5121 else {
5122 qseecom.qsee_bw_count++;
5123 data->perf_enabled = true;
5124 }
5125 } else {
5126 qseecom.qsee_bw_count++;
5127 data->perf_enabled = true;
5128 }
5129 mutex_unlock(&qsee_bw_mutex);
5130 break;
5131 case CLK_SFPB:
5132 mutex_lock(&qsee_bw_mutex);
5133 if (!qseecom.qsee_sfpb_bw_count) {
5134 if (qseecom.qsee_bw_count > 0)
5135 ret = msm_bus_scale_client_update_request(
5136 qseecom.qsee_perf_client, 3);
5137 else {
5138 if (qclk->ce_core_src_clk != NULL)
5139 ret = __qseecom_enable_clk(CLK_QSEE);
5140 if (!ret) {
5141 ret =
5142 msm_bus_scale_client_update_request(
5143 qseecom.qsee_perf_client, 2);
5144 if ((ret) &&
5145 (qclk->ce_core_src_clk != NULL))
5146 __qseecom_disable_clk(CLK_QSEE);
5147 }
5148 }
5149
5150 if (ret)
5151 pr_err("SFPB Bandwidth req failed (%d)\n",
5152 ret);
5153 else {
5154 qseecom.qsee_sfpb_bw_count++;
5155 data->fast_load_enabled = true;
5156 }
5157 } else {
5158 qseecom.qsee_sfpb_bw_count++;
5159 data->fast_load_enabled = true;
5160 }
5161 mutex_unlock(&qsee_bw_mutex);
5162 break;
5163 default:
5164 pr_err("Clock type not defined\n");
5165 break;
5166 }
5167 return ret;
5168}
5169
5170static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5171 int32_t clk_type)
5172{
5173 int32_t ret = 0;
5174 struct qseecom_clk *qclk;
5175
5176 qclk = &qseecom.qsee;
5177
5178 if (qseecom.no_clock_support)
5179 return;
5180 if (!qseecom.qsee_perf_client)
5181 return;
5182
5183 switch (clk_type) {
5184 case CLK_DFAB:
5185 mutex_lock(&qsee_bw_mutex);
5186 if (qseecom.qsee_bw_count == 0) {
5187 pr_err("Client error.Extra call to disable DFAB clk\n");
5188 mutex_unlock(&qsee_bw_mutex);
5189 return;
5190 }
5191
5192 if (qseecom.qsee_bw_count == 1) {
5193 if (qseecom.qsee_sfpb_bw_count > 0)
5194 ret = msm_bus_scale_client_update_request(
5195 qseecom.qsee_perf_client, 2);
5196 else {
5197 ret = msm_bus_scale_client_update_request(
5198 qseecom.qsee_perf_client, 0);
5199 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5200 __qseecom_disable_clk(CLK_QSEE);
5201 }
5202 if (ret)
5203 pr_err("SFPB Bandwidth req fail (%d)\n",
5204 ret);
5205 else {
5206 qseecom.qsee_bw_count--;
5207 data->perf_enabled = false;
5208 }
5209 } else {
5210 qseecom.qsee_bw_count--;
5211 data->perf_enabled = false;
5212 }
5213 mutex_unlock(&qsee_bw_mutex);
5214 break;
5215 case CLK_SFPB:
5216 mutex_lock(&qsee_bw_mutex);
5217 if (qseecom.qsee_sfpb_bw_count == 0) {
5218 pr_err("Client error.Extra call to disable SFPB clk\n");
5219 mutex_unlock(&qsee_bw_mutex);
5220 return;
5221 }
5222 if (qseecom.qsee_sfpb_bw_count == 1) {
5223 if (qseecom.qsee_bw_count > 0)
5224 ret = msm_bus_scale_client_update_request(
5225 qseecom.qsee_perf_client, 1);
5226 else {
5227 ret = msm_bus_scale_client_update_request(
5228 qseecom.qsee_perf_client, 0);
5229 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5230 __qseecom_disable_clk(CLK_QSEE);
5231 }
5232 if (ret)
5233 pr_err("SFPB Bandwidth req fail (%d)\n",
5234 ret);
5235 else {
5236 qseecom.qsee_sfpb_bw_count--;
5237 data->fast_load_enabled = false;
5238 }
5239 } else {
5240 qseecom.qsee_sfpb_bw_count--;
5241 data->fast_load_enabled = false;
5242 }
5243 mutex_unlock(&qsee_bw_mutex);
5244 break;
5245 default:
5246 pr_err("Clock type not defined\n");
5247 break;
5248 }
5249
5250}
5251
5252static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5253 void __user *argp)
5254{
5255 struct ion_handle *ihandle; /* Ion handle */
5256 struct qseecom_load_img_req load_img_req;
5257 int uret = 0;
5258 int ret;
5259 ion_phys_addr_t pa = 0;
5260 size_t len;
5261 struct qseecom_load_app_ireq load_req;
5262 struct qseecom_load_app_64bit_ireq load_req_64bit;
5263 struct qseecom_command_scm_resp resp;
5264 void *cmd_buf = NULL;
5265 size_t cmd_len;
5266 /* Copy the relevant information needed for loading the image */
5267 if (copy_from_user(&load_img_req,
5268 (void __user *)argp,
5269 sizeof(struct qseecom_load_img_req))) {
5270 pr_err("copy_from_user failed\n");
5271 return -EFAULT;
5272 }
5273
5274 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005275 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005276 load_img_req.ifd_data_fd);
5277 if (IS_ERR_OR_NULL(ihandle)) {
5278 pr_err("Ion client could not retrieve the handle\n");
5279 return -ENOMEM;
5280 }
5281
5282 /* Get the physical address of the ION BUF */
5283 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5284 if (ret) {
5285 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5286 ret);
5287 return ret;
5288 }
5289 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5290 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5291 len, load_img_req.mdt_len,
5292 load_img_req.img_len);
5293 return ret;
5294 }
5295 /* Populate the structure for sending scm call to load image */
5296 if (qseecom.qsee_version < QSEE_VERSION_40) {
5297 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5298 load_req.mdt_len = load_img_req.mdt_len;
5299 load_req.img_len = load_img_req.img_len;
5300 load_req.phy_addr = (uint32_t)pa;
5301 cmd_buf = (void *)&load_req;
5302 cmd_len = sizeof(struct qseecom_load_app_ireq);
5303 } else {
5304 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5305 load_req_64bit.mdt_len = load_img_req.mdt_len;
5306 load_req_64bit.img_len = load_img_req.img_len;
5307 load_req_64bit.phy_addr = (uint64_t)pa;
5308 cmd_buf = (void *)&load_req_64bit;
5309 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5310 }
5311
5312 if (qseecom.support_bus_scaling) {
5313 mutex_lock(&qsee_bw_mutex);
5314 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5315 mutex_unlock(&qsee_bw_mutex);
5316 if (ret) {
5317 ret = -EIO;
5318 goto exit_cpu_restore;
5319 }
5320 }
5321
5322 /* Vote for the SFPB clock */
5323 ret = __qseecom_enable_clk_scale_up(data);
5324 if (ret) {
5325 ret = -EIO;
5326 goto exit_register_bus_bandwidth_needs;
5327 }
5328 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5329 ION_IOC_CLEAN_INV_CACHES);
5330 if (ret) {
5331 pr_err("cache operation failed %d\n", ret);
5332 goto exit_disable_clock;
5333 }
5334 /* SCM_CALL to load the external elf */
5335 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5336 &resp, sizeof(resp));
5337 if (ret) {
5338 pr_err("scm_call to load failed : ret %d\n",
5339 ret);
5340 ret = -EFAULT;
5341 goto exit_disable_clock;
5342 }
5343
5344 switch (resp.result) {
5345 case QSEOS_RESULT_SUCCESS:
5346 break;
5347 case QSEOS_RESULT_INCOMPLETE:
5348 pr_err("%s: qseos result incomplete\n", __func__);
5349 ret = __qseecom_process_incomplete_cmd(data, &resp);
5350 if (ret)
5351 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5352 break;
5353 case QSEOS_RESULT_FAILURE:
5354 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5355 ret = -EFAULT;
5356 break;
5357 default:
5358 pr_err("scm_call response result %d not supported\n",
5359 resp.result);
5360 ret = -EFAULT;
5361 break;
5362 }
5363
5364exit_disable_clock:
5365 __qseecom_disable_clk_scale_down(data);
5366
5367exit_register_bus_bandwidth_needs:
5368 if (qseecom.support_bus_scaling) {
5369 mutex_lock(&qsee_bw_mutex);
5370 uret = qseecom_unregister_bus_bandwidth_needs(data);
5371 mutex_unlock(&qsee_bw_mutex);
5372 if (uret)
5373 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5374 uret, ret);
5375 }
5376
5377exit_cpu_restore:
5378 /* Deallocate the handle */
5379 if (!IS_ERR_OR_NULL(ihandle))
5380 ion_free(qseecom.ion_clnt, ihandle);
5381 return ret;
5382}
5383
5384static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5385{
5386 int ret = 0;
5387 struct qseecom_command_scm_resp resp;
5388 struct qseecom_unload_app_ireq req;
5389
5390 /* unavailable client app */
5391 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5392
5393 /* Populate the structure for sending scm call to unload image */
5394 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5395
5396 /* SCM_CALL to unload the external elf */
5397 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5398 sizeof(struct qseecom_unload_app_ireq),
5399 &resp, sizeof(resp));
5400 if (ret) {
5401 pr_err("scm_call to unload failed : ret %d\n",
5402 ret);
5403 ret = -EFAULT;
5404 goto qseecom_unload_external_elf_scm_err;
5405 }
5406 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5407 ret = __qseecom_process_incomplete_cmd(data, &resp);
5408 if (ret)
5409 pr_err("process_incomplete_cmd fail err: %d\n",
5410 ret);
5411 } else {
5412 if (resp.result != QSEOS_RESULT_SUCCESS) {
5413 pr_err("scm_call to unload image failed resp.result =%d\n",
5414 resp.result);
5415 ret = -EFAULT;
5416 }
5417 }
5418
5419qseecom_unload_external_elf_scm_err:
5420
5421 return ret;
5422}
5423
5424static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5425 void __user *argp)
5426{
5427
5428 int32_t ret;
5429 struct qseecom_qseos_app_load_query query_req;
5430 struct qseecom_check_app_ireq req;
5431 struct qseecom_registered_app_list *entry = NULL;
5432 unsigned long flags = 0;
5433 uint32_t app_arch = 0, app_id = 0;
5434 bool found_app = false;
5435
5436 /* Copy the relevant information needed for loading the image */
5437 if (copy_from_user(&query_req,
5438 (void __user *)argp,
5439 sizeof(struct qseecom_qseos_app_load_query))) {
5440 pr_err("copy_from_user failed\n");
5441 return -EFAULT;
5442 }
5443
5444 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5445 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5446 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5447
5448 ret = __qseecom_check_app_exists(req, &app_id);
5449 if (ret) {
5450 pr_err(" scm call to check if app is loaded failed");
5451 return ret; /* scm call failed */
5452 }
5453 if (app_id) {
5454 pr_debug("App id %d (%s) already exists\n", app_id,
5455 (char *)(req.app_name));
5456 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5457 list_for_each_entry(entry,
5458 &qseecom.registered_app_list_head, list){
5459 if (entry->app_id == app_id) {
5460 app_arch = entry->app_arch;
5461 entry->ref_cnt++;
5462 found_app = true;
5463 break;
5464 }
5465 }
5466 spin_unlock_irqrestore(
5467 &qseecom.registered_app_list_lock, flags);
5468 data->client.app_id = app_id;
5469 query_req.app_id = app_id;
5470 if (app_arch) {
5471 data->client.app_arch = app_arch;
5472 query_req.app_arch = app_arch;
5473 } else {
5474 data->client.app_arch = 0;
5475 query_req.app_arch = 0;
5476 }
5477 strlcpy(data->client.app_name, query_req.app_name,
5478 MAX_APP_NAME_SIZE);
5479 /*
5480 * If app was loaded by appsbl before and was not registered,
5481 * regiser this app now.
5482 */
5483 if (!found_app) {
5484 pr_debug("Register app %d [%s] which was loaded before\n",
5485 ret, (char *)query_req.app_name);
5486 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5487 if (!entry) {
5488 pr_err("kmalloc for app entry failed\n");
5489 return -ENOMEM;
5490 }
5491 entry->app_id = app_id;
5492 entry->ref_cnt = 1;
5493 entry->app_arch = data->client.app_arch;
5494 strlcpy(entry->app_name, data->client.app_name,
5495 MAX_APP_NAME_SIZE);
5496 entry->app_blocked = false;
5497 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005498 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005499 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5500 flags);
5501 list_add_tail(&entry->list,
5502 &qseecom.registered_app_list_head);
5503 spin_unlock_irqrestore(
5504 &qseecom.registered_app_list_lock, flags);
5505 }
5506 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5507 pr_err("copy_to_user failed\n");
5508 return -EFAULT;
5509 }
5510 return -EEXIST; /* app already loaded */
5511 } else {
5512 return 0; /* app not loaded */
5513 }
5514}
5515
5516static int __qseecom_get_ce_pipe_info(
5517 enum qseecom_key_management_usage_type usage,
5518 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5519{
5520 int ret = -EINVAL;
5521 int i, j;
5522 struct qseecom_ce_info_use *p = NULL;
5523 int total = 0;
5524 struct qseecom_ce_pipe_entry *pcepipe;
5525
5526 switch (usage) {
5527 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5528 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5529 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5530 if (qseecom.support_fde) {
5531 p = qseecom.ce_info.fde;
5532 total = qseecom.ce_info.num_fde;
5533 } else {
5534 pr_err("system does not support fde\n");
5535 return -EINVAL;
5536 }
5537 break;
5538 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5539 if (qseecom.support_pfe) {
5540 p = qseecom.ce_info.pfe;
5541 total = qseecom.ce_info.num_pfe;
5542 } else {
5543 pr_err("system does not support pfe\n");
5544 return -EINVAL;
5545 }
5546 break;
5547 default:
5548 pr_err("unsupported usage %d\n", usage);
5549 return -EINVAL;
5550 }
5551
5552 for (j = 0; j < total; j++) {
5553 if (p->unit_num == unit) {
5554 pcepipe = p->ce_pipe_entry;
5555 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5556 (*ce_hw)[i] = pcepipe->ce_num;
5557 *pipe = pcepipe->ce_pipe_pair;
5558 pcepipe++;
5559 }
5560 ret = 0;
5561 break;
5562 }
5563 p++;
5564 }
5565 return ret;
5566}
5567
5568static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5569 enum qseecom_key_management_usage_type usage,
5570 struct qseecom_key_generate_ireq *ireq)
5571{
5572 struct qseecom_command_scm_resp resp;
5573 int ret;
5574
5575 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5576 usage >= QSEOS_KM_USAGE_MAX) {
5577 pr_err("Error:: unsupported usage %d\n", usage);
5578 return -EFAULT;
5579 }
5580 ret = __qseecom_enable_clk(CLK_QSEE);
5581 if (ret)
5582 return ret;
5583
5584 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5585 ireq, sizeof(struct qseecom_key_generate_ireq),
5586 &resp, sizeof(resp));
5587 if (ret) {
5588 if (ret == -EINVAL &&
5589 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5590 pr_debug("Key ID exists.\n");
5591 ret = 0;
5592 } else {
5593 pr_err("scm call to generate key failed : %d\n", ret);
5594 ret = -EFAULT;
5595 }
5596 goto generate_key_exit;
5597 }
5598
5599 switch (resp.result) {
5600 case QSEOS_RESULT_SUCCESS:
5601 break;
5602 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5603 pr_debug("Key ID exists.\n");
5604 break;
5605 case QSEOS_RESULT_INCOMPLETE:
5606 ret = __qseecom_process_incomplete_cmd(data, &resp);
5607 if (ret) {
5608 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5609 pr_debug("Key ID exists.\n");
5610 ret = 0;
5611 } else {
5612 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5613 resp.result);
5614 }
5615 }
5616 break;
5617 case QSEOS_RESULT_FAILURE:
5618 default:
5619 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5620 ret = -EINVAL;
5621 break;
5622 }
5623generate_key_exit:
5624 __qseecom_disable_clk(CLK_QSEE);
5625 return ret;
5626}
5627
5628static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5629 enum qseecom_key_management_usage_type usage,
5630 struct qseecom_key_delete_ireq *ireq)
5631{
5632 struct qseecom_command_scm_resp resp;
5633 int ret;
5634
5635 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5636 usage >= QSEOS_KM_USAGE_MAX) {
5637 pr_err("Error:: unsupported usage %d\n", usage);
5638 return -EFAULT;
5639 }
5640 ret = __qseecom_enable_clk(CLK_QSEE);
5641 if (ret)
5642 return ret;
5643
5644 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5645 ireq, sizeof(struct qseecom_key_delete_ireq),
5646 &resp, sizeof(struct qseecom_command_scm_resp));
5647 if (ret) {
5648 if (ret == -EINVAL &&
5649 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5650 pr_debug("Max attempts to input password reached.\n");
5651 ret = -ERANGE;
5652 } else {
5653 pr_err("scm call to delete key failed : %d\n", ret);
5654 ret = -EFAULT;
5655 }
5656 goto del_key_exit;
5657 }
5658
5659 switch (resp.result) {
5660 case QSEOS_RESULT_SUCCESS:
5661 break;
5662 case QSEOS_RESULT_INCOMPLETE:
5663 ret = __qseecom_process_incomplete_cmd(data, &resp);
5664 if (ret) {
5665 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5666 resp.result);
5667 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5668 pr_debug("Max attempts to input password reached.\n");
5669 ret = -ERANGE;
5670 }
5671 }
5672 break;
5673 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5674 pr_debug("Max attempts to input password reached.\n");
5675 ret = -ERANGE;
5676 break;
5677 case QSEOS_RESULT_FAILURE:
5678 default:
5679 pr_err("Delete key scm call failed resp.result %d\n",
5680 resp.result);
5681 ret = -EINVAL;
5682 break;
5683 }
5684del_key_exit:
5685 __qseecom_disable_clk(CLK_QSEE);
5686 return ret;
5687}
5688
5689static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5690 enum qseecom_key_management_usage_type usage,
5691 struct qseecom_key_select_ireq *ireq)
5692{
5693 struct qseecom_command_scm_resp resp;
5694 int ret;
5695
5696 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5697 usage >= QSEOS_KM_USAGE_MAX) {
5698 pr_err("Error:: unsupported usage %d\n", usage);
5699 return -EFAULT;
5700 }
5701 ret = __qseecom_enable_clk(CLK_QSEE);
5702 if (ret)
5703 return ret;
5704
5705 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5706 ret = __qseecom_enable_clk(CLK_CE_DRV);
5707 if (ret)
5708 return ret;
5709 }
5710
5711 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5712 ireq, sizeof(struct qseecom_key_select_ireq),
5713 &resp, sizeof(struct qseecom_command_scm_resp));
5714 if (ret) {
5715 if (ret == -EINVAL &&
5716 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5717 pr_debug("Max attempts to input password reached.\n");
5718 ret = -ERANGE;
5719 } else if (ret == -EINVAL &&
5720 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5721 pr_debug("Set Key operation under processing...\n");
5722 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5723 } else {
5724 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5725 ret);
5726 ret = -EFAULT;
5727 }
5728 goto set_key_exit;
5729 }
5730
5731 switch (resp.result) {
5732 case QSEOS_RESULT_SUCCESS:
5733 break;
5734 case QSEOS_RESULT_INCOMPLETE:
5735 ret = __qseecom_process_incomplete_cmd(data, &resp);
5736 if (ret) {
5737 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5738 resp.result);
5739 if (resp.result ==
5740 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5741 pr_debug("Set Key operation under processing...\n");
5742 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5743 }
5744 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5745 pr_debug("Max attempts to input password reached.\n");
5746 ret = -ERANGE;
5747 }
5748 }
5749 break;
5750 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5751 pr_debug("Max attempts to input password reached.\n");
5752 ret = -ERANGE;
5753 break;
5754 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5755 pr_debug("Set Key operation under processing...\n");
5756 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5757 break;
5758 case QSEOS_RESULT_FAILURE:
5759 default:
5760 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5761 ret = -EINVAL;
5762 break;
5763 }
5764set_key_exit:
5765 __qseecom_disable_clk(CLK_QSEE);
5766 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5767 __qseecom_disable_clk(CLK_CE_DRV);
5768 return ret;
5769}
5770
5771static int __qseecom_update_current_key_user_info(
5772 struct qseecom_dev_handle *data,
5773 enum qseecom_key_management_usage_type usage,
5774 struct qseecom_key_userinfo_update_ireq *ireq)
5775{
5776 struct qseecom_command_scm_resp resp;
5777 int ret;
5778
5779 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5780 usage >= QSEOS_KM_USAGE_MAX) {
5781 pr_err("Error:: unsupported usage %d\n", usage);
5782 return -EFAULT;
5783 }
5784 ret = __qseecom_enable_clk(CLK_QSEE);
5785 if (ret)
5786 return ret;
5787
5788 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5789 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5790 &resp, sizeof(struct qseecom_command_scm_resp));
5791 if (ret) {
5792 if (ret == -EINVAL &&
5793 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5794 pr_debug("Set Key operation under processing...\n");
5795 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5796 } else {
5797 pr_err("scm call to update key userinfo failed: %d\n",
5798 ret);
5799 __qseecom_disable_clk(CLK_QSEE);
5800 return -EFAULT;
5801 }
5802 }
5803
5804 switch (resp.result) {
5805 case QSEOS_RESULT_SUCCESS:
5806 break;
5807 case QSEOS_RESULT_INCOMPLETE:
5808 ret = __qseecom_process_incomplete_cmd(data, &resp);
5809 if (resp.result ==
5810 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5811 pr_debug("Set Key operation under processing...\n");
5812 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5813 }
5814 if (ret)
5815 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5816 resp.result);
5817 break;
5818 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5819 pr_debug("Update Key operation under processing...\n");
5820 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5821 break;
5822 case QSEOS_RESULT_FAILURE:
5823 default:
5824 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5825 ret = -EINVAL;
5826 break;
5827 }
5828
5829 __qseecom_disable_clk(CLK_QSEE);
5830 return ret;
5831}
5832
5833
5834static int qseecom_enable_ice_setup(int usage)
5835{
5836 int ret = 0;
5837
5838 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5839 ret = qcom_ice_setup_ice_hw("ufs", true);
5840 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5841 ret = qcom_ice_setup_ice_hw("sdcc", true);
5842
5843 return ret;
5844}
5845
5846static int qseecom_disable_ice_setup(int usage)
5847{
5848 int ret = 0;
5849
5850 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5851 ret = qcom_ice_setup_ice_hw("ufs", false);
5852 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5853 ret = qcom_ice_setup_ice_hw("sdcc", false);
5854
5855 return ret;
5856}
5857
5858static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5859{
5860 struct qseecom_ce_info_use *pce_info_use, *p;
5861 int total = 0;
5862 int i;
5863
5864 switch (usage) {
5865 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5866 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5867 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5868 p = qseecom.ce_info.fde;
5869 total = qseecom.ce_info.num_fde;
5870 break;
5871 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5872 p = qseecom.ce_info.pfe;
5873 total = qseecom.ce_info.num_pfe;
5874 break;
5875 default:
5876 pr_err("unsupported usage %d\n", usage);
5877 return -EINVAL;
5878 }
5879
5880 pce_info_use = NULL;
5881
5882 for (i = 0; i < total; i++) {
5883 if (p->unit_num == unit) {
5884 pce_info_use = p;
5885 break;
5886 }
5887 p++;
5888 }
5889 if (!pce_info_use) {
5890 pr_err("can not find %d\n", unit);
5891 return -EINVAL;
5892 }
5893 return pce_info_use->num_ce_pipe_entries;
5894}
5895
5896static int qseecom_create_key(struct qseecom_dev_handle *data,
5897 void __user *argp)
5898{
5899 int i;
5900 uint32_t *ce_hw = NULL;
5901 uint32_t pipe = 0;
5902 int ret = 0;
5903 uint32_t flags = 0;
5904 struct qseecom_create_key_req create_key_req;
5905 struct qseecom_key_generate_ireq generate_key_ireq;
5906 struct qseecom_key_select_ireq set_key_ireq;
5907 uint32_t entries = 0;
5908
5909 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5910 if (ret) {
5911 pr_err("copy_from_user failed\n");
5912 return ret;
5913 }
5914
5915 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5916 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5917 pr_err("unsupported usage %d\n", create_key_req.usage);
5918 ret = -EFAULT;
5919 return ret;
5920 }
5921 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5922 create_key_req.usage);
5923 if (entries <= 0) {
5924 pr_err("no ce instance for usage %d instance %d\n",
5925 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5926 ret = -EINVAL;
5927 return ret;
5928 }
5929
5930 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5931 if (!ce_hw) {
5932 ret = -ENOMEM;
5933 return ret;
5934 }
5935 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5936 DEFAULT_CE_INFO_UNIT);
5937 if (ret) {
5938 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5939 ret = -EINVAL;
5940 goto free_buf;
5941 }
5942
5943 if (qseecom.fde_key_size)
5944 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5945 else
5946 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5947
5948 generate_key_ireq.flags = flags;
5949 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5950 memset((void *)generate_key_ireq.key_id,
5951 0, QSEECOM_KEY_ID_SIZE);
5952 memset((void *)generate_key_ireq.hash32,
5953 0, QSEECOM_HASH_SIZE);
5954 memcpy((void *)generate_key_ireq.key_id,
5955 (void *)key_id_array[create_key_req.usage].desc,
5956 QSEECOM_KEY_ID_SIZE);
5957 memcpy((void *)generate_key_ireq.hash32,
5958 (void *)create_key_req.hash32,
5959 QSEECOM_HASH_SIZE);
5960
5961 ret = __qseecom_generate_and_save_key(data,
5962 create_key_req.usage, &generate_key_ireq);
5963 if (ret) {
5964 pr_err("Failed to generate key on storage: %d\n", ret);
5965 goto free_buf;
5966 }
5967
5968 for (i = 0; i < entries; i++) {
5969 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5970 if (create_key_req.usage ==
5971 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5972 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5973 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5974
5975 } else if (create_key_req.usage ==
5976 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5977 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5978 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5979
5980 } else {
5981 set_key_ireq.ce = ce_hw[i];
5982 set_key_ireq.pipe = pipe;
5983 }
5984 set_key_ireq.flags = flags;
5985
5986 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5987 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5988 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5989 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5990 memcpy((void *)set_key_ireq.key_id,
5991 (void *)key_id_array[create_key_req.usage].desc,
5992 QSEECOM_KEY_ID_SIZE);
5993 memcpy((void *)set_key_ireq.hash32,
5994 (void *)create_key_req.hash32,
5995 QSEECOM_HASH_SIZE);
5996 /*
5997 * It will return false if it is GPCE based crypto instance or
5998 * ICE is setup properly
5999 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006000 ret = qseecom_enable_ice_setup(create_key_req.usage);
6001 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006002 goto free_buf;
6003
6004 do {
6005 ret = __qseecom_set_clear_ce_key(data,
6006 create_key_req.usage,
6007 &set_key_ireq);
6008 /*
6009 * wait a little before calling scm again to let other
6010 * processes run
6011 */
6012 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6013 msleep(50);
6014
6015 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6016
6017 qseecom_disable_ice_setup(create_key_req.usage);
6018
6019 if (ret) {
6020 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6021 pipe, ce_hw[i], ret);
6022 goto free_buf;
6023 } else {
6024 pr_err("Set the key successfully\n");
6025 if ((create_key_req.usage ==
6026 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6027 (create_key_req.usage ==
6028 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6029 goto free_buf;
6030 }
6031 }
6032
6033free_buf:
6034 kzfree(ce_hw);
6035 return ret;
6036}
6037
6038static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6039 void __user *argp)
6040{
6041 uint32_t *ce_hw = NULL;
6042 uint32_t pipe = 0;
6043 int ret = 0;
6044 uint32_t flags = 0;
6045 int i, j;
6046 struct qseecom_wipe_key_req wipe_key_req;
6047 struct qseecom_key_delete_ireq delete_key_ireq;
6048 struct qseecom_key_select_ireq clear_key_ireq;
6049 uint32_t entries = 0;
6050
6051 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6052 if (ret) {
6053 pr_err("copy_from_user failed\n");
6054 return ret;
6055 }
6056
6057 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6058 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6059 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6060 ret = -EFAULT;
6061 return ret;
6062 }
6063
6064 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6065 wipe_key_req.usage);
6066 if (entries <= 0) {
6067 pr_err("no ce instance for usage %d instance %d\n",
6068 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6069 ret = -EINVAL;
6070 return ret;
6071 }
6072
6073 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6074 if (!ce_hw) {
6075 ret = -ENOMEM;
6076 return ret;
6077 }
6078
6079 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6080 DEFAULT_CE_INFO_UNIT);
6081 if (ret) {
6082 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6083 ret = -EINVAL;
6084 goto free_buf;
6085 }
6086
6087 if (wipe_key_req.wipe_key_flag) {
6088 delete_key_ireq.flags = flags;
6089 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6090 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6091 memcpy((void *)delete_key_ireq.key_id,
6092 (void *)key_id_array[wipe_key_req.usage].desc,
6093 QSEECOM_KEY_ID_SIZE);
6094 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6095
6096 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6097 &delete_key_ireq);
6098 if (ret) {
6099 pr_err("Failed to delete key from ssd storage: %d\n",
6100 ret);
6101 ret = -EFAULT;
6102 goto free_buf;
6103 }
6104 }
6105
6106 for (j = 0; j < entries; j++) {
6107 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6108 if (wipe_key_req.usage ==
6109 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6110 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6111 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6112 } else if (wipe_key_req.usage ==
6113 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6114 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6115 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6116 } else {
6117 clear_key_ireq.ce = ce_hw[j];
6118 clear_key_ireq.pipe = pipe;
6119 }
6120 clear_key_ireq.flags = flags;
6121 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6122 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6123 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6124 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6125
6126 /*
6127 * It will return false if it is GPCE based crypto instance or
6128 * ICE is setup properly
6129 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006130 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6131 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006132 goto free_buf;
6133
6134 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6135 &clear_key_ireq);
6136
6137 qseecom_disable_ice_setup(wipe_key_req.usage);
6138
6139 if (ret) {
6140 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6141 pipe, ce_hw[j], ret);
6142 ret = -EFAULT;
6143 goto free_buf;
6144 }
6145 }
6146
6147free_buf:
6148 kzfree(ce_hw);
6149 return ret;
6150}
6151
6152static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6153 void __user *argp)
6154{
6155 int ret = 0;
6156 uint32_t flags = 0;
6157 struct qseecom_update_key_userinfo_req update_key_req;
6158 struct qseecom_key_userinfo_update_ireq ireq;
6159
6160 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6161 if (ret) {
6162 pr_err("copy_from_user failed\n");
6163 return ret;
6164 }
6165
6166 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6167 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6168 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6169 return -EFAULT;
6170 }
6171
6172 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6173
6174 if (qseecom.fde_key_size)
6175 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6176 else
6177 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6178
6179 ireq.flags = flags;
6180 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6181 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6182 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6183 memcpy((void *)ireq.key_id,
6184 (void *)key_id_array[update_key_req.usage].desc,
6185 QSEECOM_KEY_ID_SIZE);
6186 memcpy((void *)ireq.current_hash32,
6187 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6188 memcpy((void *)ireq.new_hash32,
6189 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6190
6191 do {
6192 ret = __qseecom_update_current_key_user_info(data,
6193 update_key_req.usage,
6194 &ireq);
6195 /*
6196 * wait a little before calling scm again to let other
6197 * processes run
6198 */
6199 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6200 msleep(50);
6201
6202 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6203 if (ret) {
6204 pr_err("Failed to update key info: %d\n", ret);
6205 return ret;
6206 }
6207 return ret;
6208
6209}
6210static int qseecom_is_es_activated(void __user *argp)
6211{
Zhen Kong26e62742018-05-04 17:19:06 -07006212 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006213 struct qseecom_command_scm_resp resp;
6214 int ret;
6215
6216 if (qseecom.qsee_version < QSEE_VERSION_04) {
6217 pr_err("invalid qsee version\n");
6218 return -ENODEV;
6219 }
6220
6221 if (argp == NULL) {
6222 pr_err("arg is null\n");
6223 return -EINVAL;
6224 }
6225
6226 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6227 &req, sizeof(req), &resp, sizeof(resp));
6228 if (ret) {
6229 pr_err("scm_call failed\n");
6230 return ret;
6231 }
6232
6233 req.is_activated = resp.result;
6234 ret = copy_to_user(argp, &req, sizeof(req));
6235 if (ret) {
6236 pr_err("copy_to_user failed\n");
6237 return ret;
6238 }
6239
6240 return 0;
6241}
6242
6243static int qseecom_save_partition_hash(void __user *argp)
6244{
6245 struct qseecom_save_partition_hash_req req;
6246 struct qseecom_command_scm_resp resp;
6247 int ret;
6248
6249 memset(&resp, 0x00, sizeof(resp));
6250
6251 if (qseecom.qsee_version < QSEE_VERSION_04) {
6252 pr_err("invalid qsee version\n");
6253 return -ENODEV;
6254 }
6255
6256 if (argp == NULL) {
6257 pr_err("arg is null\n");
6258 return -EINVAL;
6259 }
6260
6261 ret = copy_from_user(&req, argp, sizeof(req));
6262 if (ret) {
6263 pr_err("copy_from_user failed\n");
6264 return ret;
6265 }
6266
6267 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6268 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6269 if (ret) {
6270 pr_err("qseecom_scm_call failed\n");
6271 return ret;
6272 }
6273
6274 return 0;
6275}
6276
6277static int qseecom_mdtp_cipher_dip(void __user *argp)
6278{
6279 struct qseecom_mdtp_cipher_dip_req req;
6280 u32 tzbuflenin, tzbuflenout;
6281 char *tzbufin = NULL, *tzbufout = NULL;
6282 struct scm_desc desc = {0};
6283 int ret;
6284
6285 do {
6286 /* Copy the parameters from userspace */
6287 if (argp == NULL) {
6288 pr_err("arg is null\n");
6289 ret = -EINVAL;
6290 break;
6291 }
6292
6293 ret = copy_from_user(&req, argp, sizeof(req));
6294 if (ret) {
6295 pr_err("copy_from_user failed, ret= %d\n", ret);
6296 break;
6297 }
6298
6299 if (req.in_buf == NULL || req.out_buf == NULL ||
6300 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6301 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6302 req.direction > 1) {
6303 pr_err("invalid parameters\n");
6304 ret = -EINVAL;
6305 break;
6306 }
6307
6308 /* Copy the input buffer from userspace to kernel space */
6309 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6310 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6311 if (!tzbufin) {
6312 pr_err("error allocating in buffer\n");
6313 ret = -ENOMEM;
6314 break;
6315 }
6316
6317 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6318 if (ret) {
6319 pr_err("copy_from_user failed, ret=%d\n", ret);
6320 break;
6321 }
6322
6323 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6324
6325 /* Prepare the output buffer in kernel space */
6326 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6327 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6328 if (!tzbufout) {
6329 pr_err("error allocating out buffer\n");
6330 ret = -ENOMEM;
6331 break;
6332 }
6333
6334 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6335
6336 /* Send the command to TZ */
6337 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6338 desc.args[0] = virt_to_phys(tzbufin);
6339 desc.args[1] = req.in_buf_size;
6340 desc.args[2] = virt_to_phys(tzbufout);
6341 desc.args[3] = req.out_buf_size;
6342 desc.args[4] = req.direction;
6343
6344 ret = __qseecom_enable_clk(CLK_QSEE);
6345 if (ret)
6346 break;
6347
6348 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6349
6350 __qseecom_disable_clk(CLK_QSEE);
6351
6352 if (ret) {
6353 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6354 ret);
6355 break;
6356 }
6357
6358 /* Copy the output buffer from kernel space to userspace */
6359 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6360 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6361 if (ret) {
6362 pr_err("copy_to_user failed, ret=%d\n", ret);
6363 break;
6364 }
6365 } while (0);
6366
6367 kzfree(tzbufin);
6368 kzfree(tzbufout);
6369
6370 return ret;
6371}
6372
6373static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6374 struct qseecom_qteec_req *req)
6375{
6376 if (!data || !data->client.ihandle) {
6377 pr_err("Client or client handle is not initialized\n");
6378 return -EINVAL;
6379 }
6380
6381 if (data->type != QSEECOM_CLIENT_APP)
6382 return -EFAULT;
6383
6384 if (req->req_len > UINT_MAX - req->resp_len) {
6385 pr_err("Integer overflow detected in req_len & rsp_len\n");
6386 return -EINVAL;
6387 }
6388
6389 if (req->req_len + req->resp_len > data->client.sb_length) {
6390 pr_debug("Not enough memory to fit cmd_buf.\n");
6391 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6392 (req->req_len + req->resp_len), data->client.sb_length);
6393 return -ENOMEM;
6394 }
6395
6396 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6397 pr_err("cmd buffer or response buffer is null\n");
6398 return -EINVAL;
6399 }
6400 if (((uintptr_t)req->req_ptr <
6401 data->client.user_virt_sb_base) ||
6402 ((uintptr_t)req->req_ptr >=
6403 (data->client.user_virt_sb_base + data->client.sb_length))) {
6404 pr_err("cmd buffer address not within shared bufffer\n");
6405 return -EINVAL;
6406 }
6407
6408 if (((uintptr_t)req->resp_ptr <
6409 data->client.user_virt_sb_base) ||
6410 ((uintptr_t)req->resp_ptr >=
6411 (data->client.user_virt_sb_base + data->client.sb_length))) {
6412 pr_err("response buffer address not within shared bufffer\n");
6413 return -EINVAL;
6414 }
6415
6416 if ((req->req_len == 0) || (req->resp_len == 0)) {
6417 pr_err("cmd buf lengtgh/response buf length not valid\n");
6418 return -EINVAL;
6419 }
6420
6421 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6422 pr_err("Integer overflow in req_len & req_ptr\n");
6423 return -EINVAL;
6424 }
6425
6426 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6427 pr_err("Integer overflow in resp_len & resp_ptr\n");
6428 return -EINVAL;
6429 }
6430
6431 if (data->client.user_virt_sb_base >
6432 (ULONG_MAX - data->client.sb_length)) {
6433 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6434 return -EINVAL;
6435 }
6436 if ((((uintptr_t)req->req_ptr + req->req_len) >
6437 ((uintptr_t)data->client.user_virt_sb_base +
6438 data->client.sb_length)) ||
6439 (((uintptr_t)req->resp_ptr + req->resp_len) >
6440 ((uintptr_t)data->client.user_virt_sb_base +
6441 data->client.sb_length))) {
6442 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6443 return -EINVAL;
6444 }
6445 return 0;
6446}
6447
6448static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6449 uint32_t fd_idx, struct sg_table *sg_ptr)
6450{
6451 struct scatterlist *sg = sg_ptr->sgl;
6452 struct qseecom_sg_entry *sg_entry;
6453 void *buf;
6454 uint i;
6455 size_t size;
6456 dma_addr_t coh_pmem;
6457
6458 if (fd_idx >= MAX_ION_FD) {
6459 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6460 return -ENOMEM;
6461 }
6462 /*
6463 * Allocate a buffer, populate it with number of entry plus
6464 * each sg entry's phy addr and length; then return the
6465 * phy_addr of the buffer.
6466 */
6467 size = sizeof(uint32_t) +
6468 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6469 size = (size + PAGE_SIZE) & PAGE_MASK;
6470 buf = dma_alloc_coherent(qseecom.pdev,
6471 size, &coh_pmem, GFP_KERNEL);
6472 if (buf == NULL) {
6473 pr_err("failed to alloc memory for sg buf\n");
6474 return -ENOMEM;
6475 }
6476 *(uint32_t *)buf = sg_ptr->nents;
6477 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6478 for (i = 0; i < sg_ptr->nents; i++) {
6479 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6480 sg_entry->len = sg->length;
6481 sg_entry++;
6482 sg = sg_next(sg);
6483 }
6484 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6485 data->client.sec_buf_fd[fd_idx].vbase = buf;
6486 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6487 data->client.sec_buf_fd[fd_idx].size = size;
6488 return 0;
6489}
6490
6491static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6492 struct qseecom_dev_handle *data, bool cleanup)
6493{
6494 struct ion_handle *ihandle;
6495 int ret = 0;
6496 int i = 0;
6497 uint32_t *update;
6498 struct sg_table *sg_ptr = NULL;
6499 struct scatterlist *sg;
6500 struct qseecom_param_memref *memref;
6501
6502 if (req == NULL) {
6503 pr_err("Invalid address\n");
6504 return -EINVAL;
6505 }
6506 for (i = 0; i < MAX_ION_FD; i++) {
6507 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006508 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006509 req->ifd_data[i].fd);
6510 if (IS_ERR_OR_NULL(ihandle)) {
6511 pr_err("Ion client can't retrieve the handle\n");
6512 return -ENOMEM;
6513 }
6514 if ((req->req_len < sizeof(uint32_t)) ||
6515 (req->ifd_data[i].cmd_buf_offset >
6516 req->req_len - sizeof(uint32_t))) {
6517 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6518 req->req_len,
6519 req->ifd_data[i].cmd_buf_offset);
6520 return -EINVAL;
6521 }
6522 update = (uint32_t *)((char *) req->req_ptr +
6523 req->ifd_data[i].cmd_buf_offset);
6524 if (!update) {
6525 pr_err("update pointer is NULL\n");
6526 return -EINVAL;
6527 }
6528 } else {
6529 continue;
6530 }
6531 /* Populate the cmd data structure with the phys_addr */
6532 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6533 if (IS_ERR_OR_NULL(sg_ptr)) {
6534 pr_err("IOn client could not retrieve sg table\n");
6535 goto err;
6536 }
6537 sg = sg_ptr->sgl;
6538 if (sg == NULL) {
6539 pr_err("sg is NULL\n");
6540 goto err;
6541 }
6542 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6543 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6544 sg_ptr->nents, sg->length);
6545 goto err;
6546 }
6547 /* clean up buf for pre-allocated fd */
6548 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6549 (*update)) {
6550 if (data->client.sec_buf_fd[i].vbase)
6551 dma_free_coherent(qseecom.pdev,
6552 data->client.sec_buf_fd[i].size,
6553 data->client.sec_buf_fd[i].vbase,
6554 data->client.sec_buf_fd[i].pbase);
6555 memset((void *)update, 0,
6556 sizeof(struct qseecom_param_memref));
6557 memset(&(data->client.sec_buf_fd[i]), 0,
6558 sizeof(struct qseecom_sec_buf_fd_info));
6559 goto clean;
6560 }
6561
6562 if (*update == 0) {
6563 /* update buf for pre-allocated fd from secure heap*/
6564 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6565 sg_ptr);
6566 if (ret) {
6567 pr_err("Failed to handle buf for fd[%d]\n", i);
6568 goto err;
6569 }
6570 memref = (struct qseecom_param_memref *)update;
6571 memref->buffer =
6572 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6573 memref->size =
6574 (uint32_t)(data->client.sec_buf_fd[i].size);
6575 } else {
6576 /* update buf for fd from non-secure qseecom heap */
6577 if (sg_ptr->nents != 1) {
6578 pr_err("Num of scat entr (%d) invalid\n",
6579 sg_ptr->nents);
6580 goto err;
6581 }
6582 if (cleanup)
6583 *update = 0;
6584 else
6585 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6586 }
6587clean:
6588 if (cleanup) {
6589 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6590 ihandle, NULL, sg->length,
6591 ION_IOC_INV_CACHES);
6592 if (ret) {
6593 pr_err("cache operation failed %d\n", ret);
6594 goto err;
6595 }
6596 } else {
6597 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6598 ihandle, NULL, sg->length,
6599 ION_IOC_CLEAN_INV_CACHES);
6600 if (ret) {
6601 pr_err("cache operation failed %d\n", ret);
6602 goto err;
6603 }
6604 data->sglistinfo_ptr[i].indexAndFlags =
6605 SGLISTINFO_SET_INDEX_FLAG(
6606 (sg_ptr->nents == 1), 0,
6607 req->ifd_data[i].cmd_buf_offset);
6608 data->sglistinfo_ptr[i].sizeOrCount =
6609 (sg_ptr->nents == 1) ?
6610 sg->length : sg_ptr->nents;
6611 data->sglist_cnt = i + 1;
6612 }
6613 /* Deallocate the handle */
6614 if (!IS_ERR_OR_NULL(ihandle))
6615 ion_free(qseecom.ion_clnt, ihandle);
6616 }
6617 return ret;
6618err:
6619 if (!IS_ERR_OR_NULL(ihandle))
6620 ion_free(qseecom.ion_clnt, ihandle);
6621 return -ENOMEM;
6622}
6623
6624static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6625 struct qseecom_qteec_req *req, uint32_t cmd_id)
6626{
6627 struct qseecom_command_scm_resp resp;
6628 struct qseecom_qteec_ireq ireq;
6629 struct qseecom_qteec_64bit_ireq ireq_64bit;
6630 struct qseecom_registered_app_list *ptr_app;
6631 bool found_app = false;
6632 unsigned long flags;
6633 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006634 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006635 uint32_t reqd_len_sb_in = 0;
6636 void *cmd_buf = NULL;
6637 size_t cmd_len;
6638 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306639 void *req_ptr = NULL;
6640 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006641
6642 ret = __qseecom_qteec_validate_msg(data, req);
6643 if (ret)
6644 return ret;
6645
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306646 req_ptr = req->req_ptr;
6647 resp_ptr = req->resp_ptr;
6648
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006649 /* find app_id & img_name from list */
6650 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6651 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6652 list) {
6653 if ((ptr_app->app_id == data->client.app_id) &&
6654 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6655 found_app = true;
6656 break;
6657 }
6658 }
6659 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6660 if (!found_app) {
6661 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6662 (char *)data->client.app_name);
6663 return -ENOENT;
6664 }
6665
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306666 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6667 (uintptr_t)req->req_ptr);
6668 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6669 (uintptr_t)req->resp_ptr);
6670
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006671 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6672 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6673 ret = __qseecom_update_qteec_req_buf(
6674 (struct qseecom_qteec_modfd_req *)req, data, false);
6675 if (ret)
6676 return ret;
6677 }
6678
6679 if (qseecom.qsee_version < QSEE_VERSION_40) {
6680 ireq.app_id = data->client.app_id;
6681 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306682 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006683 ireq.req_len = req->req_len;
6684 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306685 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006686 ireq.resp_len = req->resp_len;
6687 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6688 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6689 dmac_flush_range((void *)table,
6690 (void *)table + SGLISTINFO_TABLE_SIZE);
6691 cmd_buf = (void *)&ireq;
6692 cmd_len = sizeof(struct qseecom_qteec_ireq);
6693 } else {
6694 ireq_64bit.app_id = data->client.app_id;
6695 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306696 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006697 ireq_64bit.req_len = req->req_len;
6698 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306699 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006700 ireq_64bit.resp_len = req->resp_len;
6701 if ((data->client.app_arch == ELFCLASS32) &&
6702 ((ireq_64bit.req_ptr >=
6703 PHY_ADDR_4G - ireq_64bit.req_len) ||
6704 (ireq_64bit.resp_ptr >=
6705 PHY_ADDR_4G - ireq_64bit.resp_len))){
6706 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6707 data->client.app_name, data->client.app_id);
6708 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6709 ireq_64bit.req_ptr, ireq_64bit.req_len,
6710 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6711 return -EFAULT;
6712 }
6713 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6714 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6715 dmac_flush_range((void *)table,
6716 (void *)table + SGLISTINFO_TABLE_SIZE);
6717 cmd_buf = (void *)&ireq_64bit;
6718 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6719 }
6720 if (qseecom.whitelist_support == true
6721 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6722 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6723 else
6724 *(uint32_t *)cmd_buf = cmd_id;
6725
6726 reqd_len_sb_in = req->req_len + req->resp_len;
6727 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6728 data->client.sb_virt,
6729 reqd_len_sb_in,
6730 ION_IOC_CLEAN_INV_CACHES);
6731 if (ret) {
6732 pr_err("cache operation failed %d\n", ret);
6733 return ret;
6734 }
6735
6736 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6737
6738 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6739 cmd_buf, cmd_len,
6740 &resp, sizeof(resp));
6741 if (ret) {
6742 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6743 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006744 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006745 }
6746
6747 if (qseecom.qsee_reentrancy_support) {
6748 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006749 if (ret)
6750 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006751 } else {
6752 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6753 ret = __qseecom_process_incomplete_cmd(data, &resp);
6754 if (ret) {
6755 pr_err("process_incomplete_cmd failed err: %d\n",
6756 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006757 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006758 }
6759 } else {
6760 if (resp.result != QSEOS_RESULT_SUCCESS) {
6761 pr_err("Response result %d not supported\n",
6762 resp.result);
6763 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006764 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006765 }
6766 }
6767 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006768exit:
6769 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006770 data->client.sb_virt, data->client.sb_length,
6771 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006772 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006773 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006774 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006775 }
6776
6777 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6778 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006779 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006780 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006781 if (ret2)
6782 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006783 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006784 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006785}
6786
6787static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6788 void __user *argp)
6789{
6790 struct qseecom_qteec_modfd_req req;
6791 int ret = 0;
6792
6793 ret = copy_from_user(&req, argp,
6794 sizeof(struct qseecom_qteec_modfd_req));
6795 if (ret) {
6796 pr_err("copy_from_user failed\n");
6797 return ret;
6798 }
6799 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6800 QSEOS_TEE_OPEN_SESSION);
6801
6802 return ret;
6803}
6804
6805static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6806 void __user *argp)
6807{
6808 struct qseecom_qteec_req req;
6809 int ret = 0;
6810
6811 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6812 if (ret) {
6813 pr_err("copy_from_user failed\n");
6814 return ret;
6815 }
6816 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6817 return ret;
6818}
6819
6820static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6821 void __user *argp)
6822{
6823 struct qseecom_qteec_modfd_req req;
6824 struct qseecom_command_scm_resp resp;
6825 struct qseecom_qteec_ireq ireq;
6826 struct qseecom_qteec_64bit_ireq ireq_64bit;
6827 struct qseecom_registered_app_list *ptr_app;
6828 bool found_app = false;
6829 unsigned long flags;
6830 int ret = 0;
6831 int i = 0;
6832 uint32_t reqd_len_sb_in = 0;
6833 void *cmd_buf = NULL;
6834 size_t cmd_len;
6835 struct sglist_info *table = data->sglistinfo_ptr;
6836 void *req_ptr = NULL;
6837 void *resp_ptr = NULL;
6838
6839 ret = copy_from_user(&req, argp,
6840 sizeof(struct qseecom_qteec_modfd_req));
6841 if (ret) {
6842 pr_err("copy_from_user failed\n");
6843 return ret;
6844 }
6845 ret = __qseecom_qteec_validate_msg(data,
6846 (struct qseecom_qteec_req *)(&req));
6847 if (ret)
6848 return ret;
6849 req_ptr = req.req_ptr;
6850 resp_ptr = req.resp_ptr;
6851
6852 /* find app_id & img_name from list */
6853 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6854 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6855 list) {
6856 if ((ptr_app->app_id == data->client.app_id) &&
6857 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6858 found_app = true;
6859 break;
6860 }
6861 }
6862 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6863 if (!found_app) {
6864 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6865 (char *)data->client.app_name);
6866 return -ENOENT;
6867 }
6868
6869 /* validate offsets */
6870 for (i = 0; i < MAX_ION_FD; i++) {
6871 if (req.ifd_data[i].fd) {
6872 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6873 return -EINVAL;
6874 }
6875 }
6876 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6877 (uintptr_t)req.req_ptr);
6878 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6879 (uintptr_t)req.resp_ptr);
6880 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6881 if (ret)
6882 return ret;
6883
6884 if (qseecom.qsee_version < QSEE_VERSION_40) {
6885 ireq.app_id = data->client.app_id;
6886 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6887 (uintptr_t)req_ptr);
6888 ireq.req_len = req.req_len;
6889 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6890 (uintptr_t)resp_ptr);
6891 ireq.resp_len = req.resp_len;
6892 cmd_buf = (void *)&ireq;
6893 cmd_len = sizeof(struct qseecom_qteec_ireq);
6894 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6895 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6896 dmac_flush_range((void *)table,
6897 (void *)table + SGLISTINFO_TABLE_SIZE);
6898 } else {
6899 ireq_64bit.app_id = data->client.app_id;
6900 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6901 (uintptr_t)req_ptr);
6902 ireq_64bit.req_len = req.req_len;
6903 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6904 (uintptr_t)resp_ptr);
6905 ireq_64bit.resp_len = req.resp_len;
6906 cmd_buf = (void *)&ireq_64bit;
6907 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6908 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6909 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6910 dmac_flush_range((void *)table,
6911 (void *)table + SGLISTINFO_TABLE_SIZE);
6912 }
6913 reqd_len_sb_in = req.req_len + req.resp_len;
6914 if (qseecom.whitelist_support == true)
6915 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6916 else
6917 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6918
6919 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6920 data->client.sb_virt,
6921 reqd_len_sb_in,
6922 ION_IOC_CLEAN_INV_CACHES);
6923 if (ret) {
6924 pr_err("cache operation failed %d\n", ret);
6925 return ret;
6926 }
6927
6928 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6929
6930 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6931 cmd_buf, cmd_len,
6932 &resp, sizeof(resp));
6933 if (ret) {
6934 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6935 ret, data->client.app_id);
6936 return ret;
6937 }
6938
6939 if (qseecom.qsee_reentrancy_support) {
6940 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6941 } else {
6942 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6943 ret = __qseecom_process_incomplete_cmd(data, &resp);
6944 if (ret) {
6945 pr_err("process_incomplete_cmd failed err: %d\n",
6946 ret);
6947 return ret;
6948 }
6949 } else {
6950 if (resp.result != QSEOS_RESULT_SUCCESS) {
6951 pr_err("Response result %d not supported\n",
6952 resp.result);
6953 ret = -EINVAL;
6954 }
6955 }
6956 }
6957 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6958 if (ret)
6959 return ret;
6960
6961 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6962 data->client.sb_virt, data->client.sb_length,
6963 ION_IOC_INV_CACHES);
6964 if (ret) {
6965 pr_err("cache operation failed %d\n", ret);
6966 return ret;
6967 }
6968 return 0;
6969}
6970
6971static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6972 void __user *argp)
6973{
6974 struct qseecom_qteec_modfd_req req;
6975 int ret = 0;
6976
6977 ret = copy_from_user(&req, argp,
6978 sizeof(struct qseecom_qteec_modfd_req));
6979 if (ret) {
6980 pr_err("copy_from_user failed\n");
6981 return ret;
6982 }
6983 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6984 QSEOS_TEE_REQUEST_CANCELLATION);
6985
6986 return ret;
6987}
6988
6989static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6990{
6991 if (data->sglist_cnt) {
6992 memset(data->sglistinfo_ptr, 0,
6993 SGLISTINFO_TABLE_SIZE);
6994 data->sglist_cnt = 0;
6995 }
6996}
6997
6998static inline long qseecom_ioctl(struct file *file,
6999 unsigned int cmd, unsigned long arg)
7000{
7001 int ret = 0;
7002 struct qseecom_dev_handle *data = file->private_data;
7003 void __user *argp = (void __user *) arg;
7004 bool perf_enabled = false;
7005
7006 if (!data) {
7007 pr_err("Invalid/uninitialized device handle\n");
7008 return -EINVAL;
7009 }
7010
7011 if (data->abort) {
7012 pr_err("Aborting qseecom driver\n");
7013 return -ENODEV;
7014 }
7015
7016 switch (cmd) {
7017 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7018 if (data->type != QSEECOM_GENERIC) {
7019 pr_err("reg lstnr req: invalid handle (%d)\n",
7020 data->type);
7021 ret = -EINVAL;
7022 break;
7023 }
7024 pr_debug("ioctl register_listener_req()\n");
7025 mutex_lock(&app_access_lock);
7026 atomic_inc(&data->ioctl_count);
7027 data->type = QSEECOM_LISTENER_SERVICE;
7028 ret = qseecom_register_listener(data, argp);
7029 atomic_dec(&data->ioctl_count);
7030 wake_up_all(&data->abort_wq);
7031 mutex_unlock(&app_access_lock);
7032 if (ret)
7033 pr_err("failed qseecom_register_listener: %d\n", ret);
7034 break;
7035 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307036 case QSEECOM_IOCTL_SET_ICE_INFO: {
7037 struct qseecom_ice_data_t ice_data;
7038
7039 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7040 if (ret) {
7041 pr_err("copy_from_user failed\n");
7042 return -EFAULT;
7043 }
7044 qcom_ice_set_fde_flag(ice_data.flag);
7045 break;
7046 }
7047
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007048 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7049 if ((data->listener.id == 0) ||
7050 (data->type != QSEECOM_LISTENER_SERVICE)) {
7051 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7052 data->type, data->listener.id);
7053 ret = -EINVAL;
7054 break;
7055 }
7056 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kong26e62742018-05-04 17:19:06 -07007057 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007058 mutex_lock(&app_access_lock);
7059 atomic_inc(&data->ioctl_count);
7060 ret = qseecom_unregister_listener(data);
7061 atomic_dec(&data->ioctl_count);
7062 wake_up_all(&data->abort_wq);
7063 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007064 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007065 if (ret)
7066 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7067 break;
7068 }
7069 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7070 if ((data->client.app_id == 0) ||
7071 (data->type != QSEECOM_CLIENT_APP)) {
7072 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7073 data->type, data->client.app_id);
7074 ret = -EINVAL;
7075 break;
7076 }
7077 /* Only one client allowed here at a time */
7078 mutex_lock(&app_access_lock);
7079 if (qseecom.support_bus_scaling) {
7080 /* register bus bw in case the client doesn't do it */
7081 if (!data->mode) {
7082 mutex_lock(&qsee_bw_mutex);
7083 __qseecom_register_bus_bandwidth_needs(
7084 data, HIGH);
7085 mutex_unlock(&qsee_bw_mutex);
7086 }
7087 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7088 if (ret) {
7089 pr_err("Failed to set bw.\n");
7090 ret = -EINVAL;
7091 mutex_unlock(&app_access_lock);
7092 break;
7093 }
7094 }
7095 /*
7096 * On targets where crypto clock is handled by HLOS,
7097 * if clk_access_cnt is zero and perf_enabled is false,
7098 * then the crypto clock was not enabled before sending cmd to
7099 * tz, qseecom will enable the clock to avoid service failure.
7100 */
7101 if (!qseecom.no_clock_support &&
7102 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7103 pr_debug("ce clock is not enabled!\n");
7104 ret = qseecom_perf_enable(data);
7105 if (ret) {
7106 pr_err("Failed to vote for clock with err %d\n",
7107 ret);
7108 mutex_unlock(&app_access_lock);
7109 ret = -EINVAL;
7110 break;
7111 }
7112 perf_enabled = true;
7113 }
7114 atomic_inc(&data->ioctl_count);
7115 ret = qseecom_send_cmd(data, argp);
7116 if (qseecom.support_bus_scaling)
7117 __qseecom_add_bw_scale_down_timer(
7118 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7119 if (perf_enabled) {
7120 qsee_disable_clock_vote(data, CLK_DFAB);
7121 qsee_disable_clock_vote(data, CLK_SFPB);
7122 }
7123 atomic_dec(&data->ioctl_count);
7124 wake_up_all(&data->abort_wq);
7125 mutex_unlock(&app_access_lock);
7126 if (ret)
7127 pr_err("failed qseecom_send_cmd: %d\n", ret);
7128 break;
7129 }
7130 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7131 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7132 if ((data->client.app_id == 0) ||
7133 (data->type != QSEECOM_CLIENT_APP)) {
7134 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7135 data->type, data->client.app_id);
7136 ret = -EINVAL;
7137 break;
7138 }
7139 /* Only one client allowed here at a time */
7140 mutex_lock(&app_access_lock);
7141 if (qseecom.support_bus_scaling) {
7142 if (!data->mode) {
7143 mutex_lock(&qsee_bw_mutex);
7144 __qseecom_register_bus_bandwidth_needs(
7145 data, HIGH);
7146 mutex_unlock(&qsee_bw_mutex);
7147 }
7148 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7149 if (ret) {
7150 pr_err("Failed to set bw.\n");
7151 mutex_unlock(&app_access_lock);
7152 ret = -EINVAL;
7153 break;
7154 }
7155 }
7156 /*
7157 * On targets where crypto clock is handled by HLOS,
7158 * if clk_access_cnt is zero and perf_enabled is false,
7159 * then the crypto clock was not enabled before sending cmd to
7160 * tz, qseecom will enable the clock to avoid service failure.
7161 */
7162 if (!qseecom.no_clock_support &&
7163 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7164 pr_debug("ce clock is not enabled!\n");
7165 ret = qseecom_perf_enable(data);
7166 if (ret) {
7167 pr_err("Failed to vote for clock with err %d\n",
7168 ret);
7169 mutex_unlock(&app_access_lock);
7170 ret = -EINVAL;
7171 break;
7172 }
7173 perf_enabled = true;
7174 }
7175 atomic_inc(&data->ioctl_count);
7176 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7177 ret = qseecom_send_modfd_cmd(data, argp);
7178 else
7179 ret = qseecom_send_modfd_cmd_64(data, argp);
7180 if (qseecom.support_bus_scaling)
7181 __qseecom_add_bw_scale_down_timer(
7182 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7183 if (perf_enabled) {
7184 qsee_disable_clock_vote(data, CLK_DFAB);
7185 qsee_disable_clock_vote(data, CLK_SFPB);
7186 }
7187 atomic_dec(&data->ioctl_count);
7188 wake_up_all(&data->abort_wq);
7189 mutex_unlock(&app_access_lock);
7190 if (ret)
7191 pr_err("failed qseecom_send_cmd: %d\n", ret);
7192 __qseecom_clean_data_sglistinfo(data);
7193 break;
7194 }
7195 case QSEECOM_IOCTL_RECEIVE_REQ: {
7196 if ((data->listener.id == 0) ||
7197 (data->type != QSEECOM_LISTENER_SERVICE)) {
7198 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7199 data->type, data->listener.id);
7200 ret = -EINVAL;
7201 break;
7202 }
7203 atomic_inc(&data->ioctl_count);
7204 ret = qseecom_receive_req(data);
7205 atomic_dec(&data->ioctl_count);
7206 wake_up_all(&data->abort_wq);
7207 if (ret && (ret != -ERESTARTSYS))
7208 pr_err("failed qseecom_receive_req: %d\n", ret);
7209 break;
7210 }
7211 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7212 if ((data->listener.id == 0) ||
7213 (data->type != QSEECOM_LISTENER_SERVICE)) {
7214 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7215 data->type, data->listener.id);
7216 ret = -EINVAL;
7217 break;
7218 }
7219 atomic_inc(&data->ioctl_count);
7220 if (!qseecom.qsee_reentrancy_support)
7221 ret = qseecom_send_resp();
7222 else
7223 ret = qseecom_reentrancy_send_resp(data);
7224 atomic_dec(&data->ioctl_count);
7225 wake_up_all(&data->abort_wq);
7226 if (ret)
7227 pr_err("failed qseecom_send_resp: %d\n", ret);
7228 break;
7229 }
7230 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7231 if ((data->type != QSEECOM_CLIENT_APP) &&
7232 (data->type != QSEECOM_GENERIC) &&
7233 (data->type != QSEECOM_SECURE_SERVICE)) {
7234 pr_err("set mem param req: invalid handle (%d)\n",
7235 data->type);
7236 ret = -EINVAL;
7237 break;
7238 }
7239 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7240 mutex_lock(&app_access_lock);
7241 atomic_inc(&data->ioctl_count);
7242 ret = qseecom_set_client_mem_param(data, argp);
7243 atomic_dec(&data->ioctl_count);
7244 mutex_unlock(&app_access_lock);
7245 if (ret)
7246 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7247 ret);
7248 break;
7249 }
7250 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7251 if ((data->type != QSEECOM_GENERIC) &&
7252 (data->type != QSEECOM_CLIENT_APP)) {
7253 pr_err("load app req: invalid handle (%d)\n",
7254 data->type);
7255 ret = -EINVAL;
7256 break;
7257 }
7258 data->type = QSEECOM_CLIENT_APP;
7259 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7260 mutex_lock(&app_access_lock);
7261 atomic_inc(&data->ioctl_count);
7262 ret = qseecom_load_app(data, argp);
7263 atomic_dec(&data->ioctl_count);
7264 mutex_unlock(&app_access_lock);
7265 if (ret)
7266 pr_err("failed load_app request: %d\n", ret);
7267 break;
7268 }
7269 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7270 if ((data->client.app_id == 0) ||
7271 (data->type != QSEECOM_CLIENT_APP)) {
7272 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7273 data->type, data->client.app_id);
7274 ret = -EINVAL;
7275 break;
7276 }
7277 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7278 mutex_lock(&app_access_lock);
7279 atomic_inc(&data->ioctl_count);
7280 ret = qseecom_unload_app(data, false);
7281 atomic_dec(&data->ioctl_count);
7282 mutex_unlock(&app_access_lock);
7283 if (ret)
7284 pr_err("failed unload_app request: %d\n", ret);
7285 break;
7286 }
7287 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7288 atomic_inc(&data->ioctl_count);
7289 ret = qseecom_get_qseos_version(data, argp);
7290 if (ret)
7291 pr_err("qseecom_get_qseos_version: %d\n", ret);
7292 atomic_dec(&data->ioctl_count);
7293 break;
7294 }
7295 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7296 if ((data->type != QSEECOM_GENERIC) &&
7297 (data->type != QSEECOM_CLIENT_APP)) {
7298 pr_err("perf enable req: invalid handle (%d)\n",
7299 data->type);
7300 ret = -EINVAL;
7301 break;
7302 }
7303 if ((data->type == QSEECOM_CLIENT_APP) &&
7304 (data->client.app_id == 0)) {
7305 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7306 data->type, data->client.app_id);
7307 ret = -EINVAL;
7308 break;
7309 }
7310 atomic_inc(&data->ioctl_count);
7311 if (qseecom.support_bus_scaling) {
7312 mutex_lock(&qsee_bw_mutex);
7313 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7314 mutex_unlock(&qsee_bw_mutex);
7315 } else {
7316 ret = qseecom_perf_enable(data);
7317 if (ret)
7318 pr_err("Fail to vote for clocks %d\n", ret);
7319 }
7320 atomic_dec(&data->ioctl_count);
7321 break;
7322 }
7323 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7324 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7325 (data->type != QSEECOM_CLIENT_APP)) {
7326 pr_err("perf disable req: invalid handle (%d)\n",
7327 data->type);
7328 ret = -EINVAL;
7329 break;
7330 }
7331 if ((data->type == QSEECOM_CLIENT_APP) &&
7332 (data->client.app_id == 0)) {
7333 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7334 data->type, data->client.app_id);
7335 ret = -EINVAL;
7336 break;
7337 }
7338 atomic_inc(&data->ioctl_count);
7339 if (!qseecom.support_bus_scaling) {
7340 qsee_disable_clock_vote(data, CLK_DFAB);
7341 qsee_disable_clock_vote(data, CLK_SFPB);
7342 } else {
7343 mutex_lock(&qsee_bw_mutex);
7344 qseecom_unregister_bus_bandwidth_needs(data);
7345 mutex_unlock(&qsee_bw_mutex);
7346 }
7347 atomic_dec(&data->ioctl_count);
7348 break;
7349 }
7350
7351 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7352 /* If crypto clock is not handled by HLOS, return directly. */
7353 if (qseecom.no_clock_support) {
7354 pr_debug("crypto clock is not handled by HLOS\n");
7355 break;
7356 }
7357 if ((data->client.app_id == 0) ||
7358 (data->type != QSEECOM_CLIENT_APP)) {
7359 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7360 data->type, data->client.app_id);
7361 ret = -EINVAL;
7362 break;
7363 }
7364 atomic_inc(&data->ioctl_count);
7365 ret = qseecom_scale_bus_bandwidth(data, argp);
7366 atomic_dec(&data->ioctl_count);
7367 break;
7368 }
7369 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7370 if (data->type != QSEECOM_GENERIC) {
7371 pr_err("load ext elf req: invalid client handle (%d)\n",
7372 data->type);
7373 ret = -EINVAL;
7374 break;
7375 }
7376 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7377 data->released = true;
7378 mutex_lock(&app_access_lock);
7379 atomic_inc(&data->ioctl_count);
7380 ret = qseecom_load_external_elf(data, argp);
7381 atomic_dec(&data->ioctl_count);
7382 mutex_unlock(&app_access_lock);
7383 if (ret)
7384 pr_err("failed load_external_elf request: %d\n", ret);
7385 break;
7386 }
7387 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7388 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7389 pr_err("unload ext elf req: invalid handle (%d)\n",
7390 data->type);
7391 ret = -EINVAL;
7392 break;
7393 }
7394 data->released = true;
7395 mutex_lock(&app_access_lock);
7396 atomic_inc(&data->ioctl_count);
7397 ret = qseecom_unload_external_elf(data);
7398 atomic_dec(&data->ioctl_count);
7399 mutex_unlock(&app_access_lock);
7400 if (ret)
7401 pr_err("failed unload_app request: %d\n", ret);
7402 break;
7403 }
7404 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7405 data->type = QSEECOM_CLIENT_APP;
7406 mutex_lock(&app_access_lock);
7407 atomic_inc(&data->ioctl_count);
7408 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7409 ret = qseecom_query_app_loaded(data, argp);
7410 atomic_dec(&data->ioctl_count);
7411 mutex_unlock(&app_access_lock);
7412 break;
7413 }
7414 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7415 if (data->type != QSEECOM_GENERIC) {
7416 pr_err("send cmd svc req: invalid handle (%d)\n",
7417 data->type);
7418 ret = -EINVAL;
7419 break;
7420 }
7421 data->type = QSEECOM_SECURE_SERVICE;
7422 if (qseecom.qsee_version < QSEE_VERSION_03) {
7423 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7424 qseecom.qsee_version);
7425 return -EINVAL;
7426 }
7427 mutex_lock(&app_access_lock);
7428 atomic_inc(&data->ioctl_count);
7429 ret = qseecom_send_service_cmd(data, argp);
7430 atomic_dec(&data->ioctl_count);
7431 mutex_unlock(&app_access_lock);
7432 break;
7433 }
7434 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7435 if (!(qseecom.support_pfe || qseecom.support_fde))
7436 pr_err("Features requiring key init not supported\n");
7437 if (data->type != QSEECOM_GENERIC) {
7438 pr_err("create key req: invalid handle (%d)\n",
7439 data->type);
7440 ret = -EINVAL;
7441 break;
7442 }
7443 if (qseecom.qsee_version < QSEE_VERSION_05) {
7444 pr_err("Create Key feature unsupported: qsee ver %u\n",
7445 qseecom.qsee_version);
7446 return -EINVAL;
7447 }
7448 data->released = true;
7449 mutex_lock(&app_access_lock);
7450 atomic_inc(&data->ioctl_count);
7451 ret = qseecom_create_key(data, argp);
7452 if (ret)
7453 pr_err("failed to create encryption key: %d\n", ret);
7454
7455 atomic_dec(&data->ioctl_count);
7456 mutex_unlock(&app_access_lock);
7457 break;
7458 }
7459 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7460 if (!(qseecom.support_pfe || qseecom.support_fde))
7461 pr_err("Features requiring key init not supported\n");
7462 if (data->type != QSEECOM_GENERIC) {
7463 pr_err("wipe key req: invalid handle (%d)\n",
7464 data->type);
7465 ret = -EINVAL;
7466 break;
7467 }
7468 if (qseecom.qsee_version < QSEE_VERSION_05) {
7469 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7470 qseecom.qsee_version);
7471 return -EINVAL;
7472 }
7473 data->released = true;
7474 mutex_lock(&app_access_lock);
7475 atomic_inc(&data->ioctl_count);
7476 ret = qseecom_wipe_key(data, argp);
7477 if (ret)
7478 pr_err("failed to wipe encryption key: %d\n", ret);
7479 atomic_dec(&data->ioctl_count);
7480 mutex_unlock(&app_access_lock);
7481 break;
7482 }
7483 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7484 if (!(qseecom.support_pfe || qseecom.support_fde))
7485 pr_err("Features requiring key init not supported\n");
7486 if (data->type != QSEECOM_GENERIC) {
7487 pr_err("update key req: invalid handle (%d)\n",
7488 data->type);
7489 ret = -EINVAL;
7490 break;
7491 }
7492 if (qseecom.qsee_version < QSEE_VERSION_05) {
7493 pr_err("Update Key feature unsupported in qsee ver %u\n",
7494 qseecom.qsee_version);
7495 return -EINVAL;
7496 }
7497 data->released = true;
7498 mutex_lock(&app_access_lock);
7499 atomic_inc(&data->ioctl_count);
7500 ret = qseecom_update_key_user_info(data, argp);
7501 if (ret)
7502 pr_err("failed to update key user info: %d\n", ret);
7503 atomic_dec(&data->ioctl_count);
7504 mutex_unlock(&app_access_lock);
7505 break;
7506 }
7507 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7508 if (data->type != QSEECOM_GENERIC) {
7509 pr_err("save part hash req: invalid handle (%d)\n",
7510 data->type);
7511 ret = -EINVAL;
7512 break;
7513 }
7514 data->released = true;
7515 mutex_lock(&app_access_lock);
7516 atomic_inc(&data->ioctl_count);
7517 ret = qseecom_save_partition_hash(argp);
7518 atomic_dec(&data->ioctl_count);
7519 mutex_unlock(&app_access_lock);
7520 break;
7521 }
7522 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7523 if (data->type != QSEECOM_GENERIC) {
7524 pr_err("ES activated req: invalid handle (%d)\n",
7525 data->type);
7526 ret = -EINVAL;
7527 break;
7528 }
7529 data->released = true;
7530 mutex_lock(&app_access_lock);
7531 atomic_inc(&data->ioctl_count);
7532 ret = qseecom_is_es_activated(argp);
7533 atomic_dec(&data->ioctl_count);
7534 mutex_unlock(&app_access_lock);
7535 break;
7536 }
7537 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7538 if (data->type != QSEECOM_GENERIC) {
7539 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7540 data->type);
7541 ret = -EINVAL;
7542 break;
7543 }
7544 data->released = true;
7545 mutex_lock(&app_access_lock);
7546 atomic_inc(&data->ioctl_count);
7547 ret = qseecom_mdtp_cipher_dip(argp);
7548 atomic_dec(&data->ioctl_count);
7549 mutex_unlock(&app_access_lock);
7550 break;
7551 }
7552 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7553 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7554 if ((data->listener.id == 0) ||
7555 (data->type != QSEECOM_LISTENER_SERVICE)) {
7556 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7557 data->type, data->listener.id);
7558 ret = -EINVAL;
7559 break;
7560 }
7561 atomic_inc(&data->ioctl_count);
7562 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7563 ret = qseecom_send_modfd_resp(data, argp);
7564 else
7565 ret = qseecom_send_modfd_resp_64(data, argp);
7566 atomic_dec(&data->ioctl_count);
7567 wake_up_all(&data->abort_wq);
7568 if (ret)
7569 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7570 __qseecom_clean_data_sglistinfo(data);
7571 break;
7572 }
7573 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7574 if ((data->client.app_id == 0) ||
7575 (data->type != QSEECOM_CLIENT_APP)) {
7576 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7577 data->type, data->client.app_id);
7578 ret = -EINVAL;
7579 break;
7580 }
7581 if (qseecom.qsee_version < QSEE_VERSION_40) {
7582 pr_err("GP feature unsupported: qsee ver %u\n",
7583 qseecom.qsee_version);
7584 return -EINVAL;
7585 }
7586 /* Only one client allowed here at a time */
7587 mutex_lock(&app_access_lock);
7588 atomic_inc(&data->ioctl_count);
7589 ret = qseecom_qteec_open_session(data, argp);
7590 atomic_dec(&data->ioctl_count);
7591 wake_up_all(&data->abort_wq);
7592 mutex_unlock(&app_access_lock);
7593 if (ret)
7594 pr_err("failed open_session_cmd: %d\n", ret);
7595 __qseecom_clean_data_sglistinfo(data);
7596 break;
7597 }
7598 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7599 if ((data->client.app_id == 0) ||
7600 (data->type != QSEECOM_CLIENT_APP)) {
7601 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7602 data->type, data->client.app_id);
7603 ret = -EINVAL;
7604 break;
7605 }
7606 if (qseecom.qsee_version < QSEE_VERSION_40) {
7607 pr_err("GP feature unsupported: qsee ver %u\n",
7608 qseecom.qsee_version);
7609 return -EINVAL;
7610 }
7611 /* Only one client allowed here at a time */
7612 mutex_lock(&app_access_lock);
7613 atomic_inc(&data->ioctl_count);
7614 ret = qseecom_qteec_close_session(data, argp);
7615 atomic_dec(&data->ioctl_count);
7616 wake_up_all(&data->abort_wq);
7617 mutex_unlock(&app_access_lock);
7618 if (ret)
7619 pr_err("failed close_session_cmd: %d\n", ret);
7620 break;
7621 }
7622 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7623 if ((data->client.app_id == 0) ||
7624 (data->type != QSEECOM_CLIENT_APP)) {
7625 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7626 data->type, data->client.app_id);
7627 ret = -EINVAL;
7628 break;
7629 }
7630 if (qseecom.qsee_version < QSEE_VERSION_40) {
7631 pr_err("GP feature unsupported: qsee ver %u\n",
7632 qseecom.qsee_version);
7633 return -EINVAL;
7634 }
7635 /* Only one client allowed here at a time */
7636 mutex_lock(&app_access_lock);
7637 atomic_inc(&data->ioctl_count);
7638 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7639 atomic_dec(&data->ioctl_count);
7640 wake_up_all(&data->abort_wq);
7641 mutex_unlock(&app_access_lock);
7642 if (ret)
7643 pr_err("failed Invoke cmd: %d\n", ret);
7644 __qseecom_clean_data_sglistinfo(data);
7645 break;
7646 }
7647 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7648 if ((data->client.app_id == 0) ||
7649 (data->type != QSEECOM_CLIENT_APP)) {
7650 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7651 data->type, data->client.app_id);
7652 ret = -EINVAL;
7653 break;
7654 }
7655 if (qseecom.qsee_version < QSEE_VERSION_40) {
7656 pr_err("GP feature unsupported: qsee ver %u\n",
7657 qseecom.qsee_version);
7658 return -EINVAL;
7659 }
7660 /* Only one client allowed here at a time */
7661 mutex_lock(&app_access_lock);
7662 atomic_inc(&data->ioctl_count);
7663 ret = qseecom_qteec_request_cancellation(data, argp);
7664 atomic_dec(&data->ioctl_count);
7665 wake_up_all(&data->abort_wq);
7666 mutex_unlock(&app_access_lock);
7667 if (ret)
7668 pr_err("failed request_cancellation: %d\n", ret);
7669 break;
7670 }
7671 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7672 atomic_inc(&data->ioctl_count);
7673 ret = qseecom_get_ce_info(data, argp);
7674 if (ret)
7675 pr_err("failed get fde ce pipe info: %d\n", ret);
7676 atomic_dec(&data->ioctl_count);
7677 break;
7678 }
7679 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7680 atomic_inc(&data->ioctl_count);
7681 ret = qseecom_free_ce_info(data, argp);
7682 if (ret)
7683 pr_err("failed get fde ce pipe info: %d\n", ret);
7684 atomic_dec(&data->ioctl_count);
7685 break;
7686 }
7687 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7688 atomic_inc(&data->ioctl_count);
7689 ret = qseecom_query_ce_info(data, argp);
7690 if (ret)
7691 pr_err("failed get fde ce pipe info: %d\n", ret);
7692 atomic_dec(&data->ioctl_count);
7693 break;
7694 }
7695 default:
7696 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7697 return -EINVAL;
7698 }
7699 return ret;
7700}
7701
7702static int qseecom_open(struct inode *inode, struct file *file)
7703{
7704 int ret = 0;
7705 struct qseecom_dev_handle *data;
7706
7707 data = kzalloc(sizeof(*data), GFP_KERNEL);
7708 if (!data)
7709 return -ENOMEM;
7710 file->private_data = data;
7711 data->abort = 0;
7712 data->type = QSEECOM_GENERIC;
7713 data->released = false;
7714 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7715 data->mode = INACTIVE;
7716 init_waitqueue_head(&data->abort_wq);
7717 atomic_set(&data->ioctl_count, 0);
7718 return ret;
7719}
7720
7721static int qseecom_release(struct inode *inode, struct file *file)
7722{
7723 struct qseecom_dev_handle *data = file->private_data;
7724 int ret = 0;
7725
7726 if (data->released == false) {
7727 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7728 data->type, data->mode, data);
7729 switch (data->type) {
7730 case QSEECOM_LISTENER_SERVICE:
Zhen Kong25731112018-09-20 13:10:03 -07007731 pr_warn("release lsnr svc %d\n", data->listener.id);
Zhen Kong26e62742018-05-04 17:19:06 -07007732 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007733 mutex_lock(&app_access_lock);
7734 ret = qseecom_unregister_listener(data);
7735 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007736 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007737 break;
7738 case QSEECOM_CLIENT_APP:
7739 mutex_lock(&app_access_lock);
7740 ret = qseecom_unload_app(data, true);
7741 mutex_unlock(&app_access_lock);
7742 break;
7743 case QSEECOM_SECURE_SERVICE:
7744 case QSEECOM_GENERIC:
7745 ret = qseecom_unmap_ion_allocated_memory(data);
7746 if (ret)
7747 pr_err("Ion Unmap failed\n");
7748 break;
7749 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7750 break;
7751 default:
7752 pr_err("Unsupported clnt_handle_type %d",
7753 data->type);
7754 break;
7755 }
7756 }
7757
7758 if (qseecom.support_bus_scaling) {
7759 mutex_lock(&qsee_bw_mutex);
7760 if (data->mode != INACTIVE) {
7761 qseecom_unregister_bus_bandwidth_needs(data);
7762 if (qseecom.cumulative_mode == INACTIVE) {
7763 ret = __qseecom_set_msm_bus_request(INACTIVE);
7764 if (ret)
7765 pr_err("Fail to scale down bus\n");
7766 }
7767 }
7768 mutex_unlock(&qsee_bw_mutex);
7769 } else {
7770 if (data->fast_load_enabled == true)
7771 qsee_disable_clock_vote(data, CLK_SFPB);
7772 if (data->perf_enabled == true)
7773 qsee_disable_clock_vote(data, CLK_DFAB);
7774 }
7775 kfree(data);
7776
7777 return ret;
7778}
7779
7780#ifdef CONFIG_COMPAT
7781#include "compat_qseecom.c"
7782#else
7783#define compat_qseecom_ioctl NULL
7784#endif
7785
7786static const struct file_operations qseecom_fops = {
7787 .owner = THIS_MODULE,
7788 .unlocked_ioctl = qseecom_ioctl,
7789 .compat_ioctl = compat_qseecom_ioctl,
7790 .open = qseecom_open,
7791 .release = qseecom_release
7792};
7793
7794static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7795{
7796 int rc = 0;
7797 struct device *pdev;
7798 struct qseecom_clk *qclk;
7799 char *core_clk_src = NULL;
7800 char *core_clk = NULL;
7801 char *iface_clk = NULL;
7802 char *bus_clk = NULL;
7803
7804 switch (ce) {
7805 case CLK_QSEE: {
7806 core_clk_src = "core_clk_src";
7807 core_clk = "core_clk";
7808 iface_clk = "iface_clk";
7809 bus_clk = "bus_clk";
7810 qclk = &qseecom.qsee;
7811 qclk->instance = CLK_QSEE;
7812 break;
7813 };
7814 case CLK_CE_DRV: {
7815 core_clk_src = "ce_drv_core_clk_src";
7816 core_clk = "ce_drv_core_clk";
7817 iface_clk = "ce_drv_iface_clk";
7818 bus_clk = "ce_drv_bus_clk";
7819 qclk = &qseecom.ce_drv;
7820 qclk->instance = CLK_CE_DRV;
7821 break;
7822 };
7823 default:
7824 pr_err("Invalid ce hw instance: %d!\n", ce);
7825 return -EIO;
7826 }
7827
7828 if (qseecom.no_clock_support) {
7829 qclk->ce_core_clk = NULL;
7830 qclk->ce_clk = NULL;
7831 qclk->ce_bus_clk = NULL;
7832 qclk->ce_core_src_clk = NULL;
7833 return 0;
7834 }
7835
7836 pdev = qseecom.pdev;
7837
7838 /* Get CE3 src core clk. */
7839 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7840 if (!IS_ERR(qclk->ce_core_src_clk)) {
7841 rc = clk_set_rate(qclk->ce_core_src_clk,
7842 qseecom.ce_opp_freq_hz);
7843 if (rc) {
7844 clk_put(qclk->ce_core_src_clk);
7845 qclk->ce_core_src_clk = NULL;
7846 pr_err("Unable to set the core src clk @%uMhz.\n",
7847 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7848 return -EIO;
7849 }
7850 } else {
7851 pr_warn("Unable to get CE core src clk, set to NULL\n");
7852 qclk->ce_core_src_clk = NULL;
7853 }
7854
7855 /* Get CE core clk */
7856 qclk->ce_core_clk = clk_get(pdev, core_clk);
7857 if (IS_ERR(qclk->ce_core_clk)) {
7858 rc = PTR_ERR(qclk->ce_core_clk);
7859 pr_err("Unable to get CE core clk\n");
7860 if (qclk->ce_core_src_clk != NULL)
7861 clk_put(qclk->ce_core_src_clk);
7862 return -EIO;
7863 }
7864
7865 /* Get CE Interface clk */
7866 qclk->ce_clk = clk_get(pdev, iface_clk);
7867 if (IS_ERR(qclk->ce_clk)) {
7868 rc = PTR_ERR(qclk->ce_clk);
7869 pr_err("Unable to get CE interface clk\n");
7870 if (qclk->ce_core_src_clk != NULL)
7871 clk_put(qclk->ce_core_src_clk);
7872 clk_put(qclk->ce_core_clk);
7873 return -EIO;
7874 }
7875
7876 /* Get CE AXI clk */
7877 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7878 if (IS_ERR(qclk->ce_bus_clk)) {
7879 rc = PTR_ERR(qclk->ce_bus_clk);
7880 pr_err("Unable to get CE BUS interface clk\n");
7881 if (qclk->ce_core_src_clk != NULL)
7882 clk_put(qclk->ce_core_src_clk);
7883 clk_put(qclk->ce_core_clk);
7884 clk_put(qclk->ce_clk);
7885 return -EIO;
7886 }
7887
7888 return rc;
7889}
7890
7891static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7892{
7893 struct qseecom_clk *qclk;
7894
7895 if (ce == CLK_QSEE)
7896 qclk = &qseecom.qsee;
7897 else
7898 qclk = &qseecom.ce_drv;
7899
7900 if (qclk->ce_clk != NULL) {
7901 clk_put(qclk->ce_clk);
7902 qclk->ce_clk = NULL;
7903 }
7904 if (qclk->ce_core_clk != NULL) {
7905 clk_put(qclk->ce_core_clk);
7906 qclk->ce_core_clk = NULL;
7907 }
7908 if (qclk->ce_bus_clk != NULL) {
7909 clk_put(qclk->ce_bus_clk);
7910 qclk->ce_bus_clk = NULL;
7911 }
7912 if (qclk->ce_core_src_clk != NULL) {
7913 clk_put(qclk->ce_core_src_clk);
7914 qclk->ce_core_src_clk = NULL;
7915 }
7916 qclk->instance = CLK_INVALID;
7917}
7918
7919static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7920{
7921 int rc = 0;
7922 uint32_t hlos_num_ce_hw_instances;
7923 uint32_t disk_encrypt_pipe;
7924 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007925 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007926 int i;
7927 const int *tbl;
7928 int size;
7929 int entry;
7930 struct qseecom_crypto_info *pfde_tbl = NULL;
7931 struct qseecom_crypto_info *p;
7932 int tbl_size;
7933 int j;
7934 bool old_db = true;
7935 struct qseecom_ce_info_use *pce_info_use;
7936 uint32_t *unit_tbl = NULL;
7937 int total_units = 0;
7938 struct qseecom_ce_pipe_entry *pce_entry;
7939
7940 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7941 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7942
7943 if (of_property_read_u32((&pdev->dev)->of_node,
7944 "qcom,qsee-ce-hw-instance",
7945 &qseecom.ce_info.qsee_ce_hw_instance)) {
7946 pr_err("Fail to get qsee ce hw instance information.\n");
7947 rc = -EINVAL;
7948 goto out;
7949 } else {
7950 pr_debug("qsee-ce-hw-instance=0x%x\n",
7951 qseecom.ce_info.qsee_ce_hw_instance);
7952 }
7953
7954 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7955 "qcom,support-fde");
7956 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7957 "qcom,support-pfe");
7958
7959 if (!qseecom.support_pfe && !qseecom.support_fde) {
7960 pr_warn("Device does not support PFE/FDE");
7961 goto out;
7962 }
7963
7964 if (qseecom.support_fde)
7965 tbl = of_get_property((&pdev->dev)->of_node,
7966 "qcom,full-disk-encrypt-info", &size);
7967 else
7968 tbl = NULL;
7969 if (tbl) {
7970 old_db = false;
7971 if (size % sizeof(struct qseecom_crypto_info)) {
7972 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7973 size);
7974 rc = -EINVAL;
7975 goto out;
7976 }
7977 tbl_size = size / sizeof
7978 (struct qseecom_crypto_info);
7979
7980 pfde_tbl = kzalloc(size, GFP_KERNEL);
7981 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7982 total_units = 0;
7983
7984 if (!pfde_tbl || !unit_tbl) {
7985 pr_err("failed to alloc memory\n");
7986 rc = -ENOMEM;
7987 goto out;
7988 }
7989 if (of_property_read_u32_array((&pdev->dev)->of_node,
7990 "qcom,full-disk-encrypt-info",
7991 (u32 *)pfde_tbl, size/sizeof(u32))) {
7992 pr_err("failed to read full-disk-encrypt-info tbl\n");
7993 rc = -EINVAL;
7994 goto out;
7995 }
7996
7997 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7998 for (j = 0; j < total_units; j++) {
7999 if (p->unit_num == *(unit_tbl + j))
8000 break;
8001 }
8002 if (j == total_units) {
8003 *(unit_tbl + total_units) = p->unit_num;
8004 total_units++;
8005 }
8006 }
8007
8008 qseecom.ce_info.num_fde = total_units;
8009 pce_info_use = qseecom.ce_info.fde = kcalloc(
8010 total_units, sizeof(struct qseecom_ce_info_use),
8011 GFP_KERNEL);
8012 if (!pce_info_use) {
8013 pr_err("failed to alloc memory\n");
8014 rc = -ENOMEM;
8015 goto out;
8016 }
8017
8018 for (j = 0; j < total_units; j++, pce_info_use++) {
8019 pce_info_use->unit_num = *(unit_tbl + j);
8020 pce_info_use->alloc = false;
8021 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8022 pce_info_use->num_ce_pipe_entries = 0;
8023 pce_info_use->ce_pipe_entry = NULL;
8024 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8025 if (p->unit_num == pce_info_use->unit_num)
8026 pce_info_use->num_ce_pipe_entries++;
8027 }
8028
8029 entry = pce_info_use->num_ce_pipe_entries;
8030 pce_entry = pce_info_use->ce_pipe_entry =
8031 kcalloc(entry,
8032 sizeof(struct qseecom_ce_pipe_entry),
8033 GFP_KERNEL);
8034 if (pce_entry == NULL) {
8035 pr_err("failed to alloc memory\n");
8036 rc = -ENOMEM;
8037 goto out;
8038 }
8039
8040 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8041 if (p->unit_num == pce_info_use->unit_num) {
8042 pce_entry->ce_num = p->ce;
8043 pce_entry->ce_pipe_pair =
8044 p->pipe_pair;
8045 pce_entry->valid = true;
8046 pce_entry++;
8047 }
8048 }
8049 }
8050 kfree(unit_tbl);
8051 unit_tbl = NULL;
8052 kfree(pfde_tbl);
8053 pfde_tbl = NULL;
8054 }
8055
8056 if (qseecom.support_pfe)
8057 tbl = of_get_property((&pdev->dev)->of_node,
8058 "qcom,per-file-encrypt-info", &size);
8059 else
8060 tbl = NULL;
8061 if (tbl) {
8062 old_db = false;
8063 if (size % sizeof(struct qseecom_crypto_info)) {
8064 pr_err("per-file-encrypt-info tbl size(%d)\n",
8065 size);
8066 rc = -EINVAL;
8067 goto out;
8068 }
8069 tbl_size = size / sizeof
8070 (struct qseecom_crypto_info);
8071
8072 pfde_tbl = kzalloc(size, GFP_KERNEL);
8073 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8074 total_units = 0;
8075 if (!pfde_tbl || !unit_tbl) {
8076 pr_err("failed to alloc memory\n");
8077 rc = -ENOMEM;
8078 goto out;
8079 }
8080 if (of_property_read_u32_array((&pdev->dev)->of_node,
8081 "qcom,per-file-encrypt-info",
8082 (u32 *)pfde_tbl, size/sizeof(u32))) {
8083 pr_err("failed to read per-file-encrypt-info tbl\n");
8084 rc = -EINVAL;
8085 goto out;
8086 }
8087
8088 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8089 for (j = 0; j < total_units; j++) {
8090 if (p->unit_num == *(unit_tbl + j))
8091 break;
8092 }
8093 if (j == total_units) {
8094 *(unit_tbl + total_units) = p->unit_num;
8095 total_units++;
8096 }
8097 }
8098
8099 qseecom.ce_info.num_pfe = total_units;
8100 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8101 total_units, sizeof(struct qseecom_ce_info_use),
8102 GFP_KERNEL);
8103 if (!pce_info_use) {
8104 pr_err("failed to alloc memory\n");
8105 rc = -ENOMEM;
8106 goto out;
8107 }
8108
8109 for (j = 0; j < total_units; j++, pce_info_use++) {
8110 pce_info_use->unit_num = *(unit_tbl + j);
8111 pce_info_use->alloc = false;
8112 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8113 pce_info_use->num_ce_pipe_entries = 0;
8114 pce_info_use->ce_pipe_entry = NULL;
8115 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8116 if (p->unit_num == pce_info_use->unit_num)
8117 pce_info_use->num_ce_pipe_entries++;
8118 }
8119
8120 entry = pce_info_use->num_ce_pipe_entries;
8121 pce_entry = pce_info_use->ce_pipe_entry =
8122 kcalloc(entry,
8123 sizeof(struct qseecom_ce_pipe_entry),
8124 GFP_KERNEL);
8125 if (pce_entry == NULL) {
8126 pr_err("failed to alloc memory\n");
8127 rc = -ENOMEM;
8128 goto out;
8129 }
8130
8131 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8132 if (p->unit_num == pce_info_use->unit_num) {
8133 pce_entry->ce_num = p->ce;
8134 pce_entry->ce_pipe_pair =
8135 p->pipe_pair;
8136 pce_entry->valid = true;
8137 pce_entry++;
8138 }
8139 }
8140 }
8141 kfree(unit_tbl);
8142 unit_tbl = NULL;
8143 kfree(pfde_tbl);
8144 pfde_tbl = NULL;
8145 }
8146
8147 if (!old_db)
8148 goto out1;
8149
8150 if (of_property_read_bool((&pdev->dev)->of_node,
8151 "qcom,support-multiple-ce-hw-instance")) {
8152 if (of_property_read_u32((&pdev->dev)->of_node,
8153 "qcom,hlos-num-ce-hw-instances",
8154 &hlos_num_ce_hw_instances)) {
8155 pr_err("Fail: get hlos number of ce hw instance\n");
8156 rc = -EINVAL;
8157 goto out;
8158 }
8159 } else {
8160 hlos_num_ce_hw_instances = 1;
8161 }
8162
8163 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8164 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8165 MAX_CE_PIPE_PAIR_PER_UNIT);
8166 rc = -EINVAL;
8167 goto out;
8168 }
8169
8170 if (of_property_read_u32_array((&pdev->dev)->of_node,
8171 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8172 hlos_num_ce_hw_instances)) {
8173 pr_err("Fail: get hlos ce hw instance info\n");
8174 rc = -EINVAL;
8175 goto out;
8176 }
8177
8178 if (qseecom.support_fde) {
8179 pce_info_use = qseecom.ce_info.fde =
8180 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8181 if (!pce_info_use) {
8182 pr_err("failed to alloc memory\n");
8183 rc = -ENOMEM;
8184 goto out;
8185 }
8186 /* by default for old db */
8187 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8188 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8189 pce_info_use->alloc = false;
8190 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8191 pce_info_use->ce_pipe_entry = NULL;
8192 if (of_property_read_u32((&pdev->dev)->of_node,
8193 "qcom,disk-encrypt-pipe-pair",
8194 &disk_encrypt_pipe)) {
8195 pr_err("Fail to get FDE pipe information.\n");
8196 rc = -EINVAL;
8197 goto out;
8198 } else {
8199 pr_debug("disk-encrypt-pipe-pair=0x%x",
8200 disk_encrypt_pipe);
8201 }
8202 entry = pce_info_use->num_ce_pipe_entries =
8203 hlos_num_ce_hw_instances;
8204 pce_entry = pce_info_use->ce_pipe_entry =
8205 kcalloc(entry,
8206 sizeof(struct qseecom_ce_pipe_entry),
8207 GFP_KERNEL);
8208 if (pce_entry == NULL) {
8209 pr_err("failed to alloc memory\n");
8210 rc = -ENOMEM;
8211 goto out;
8212 }
8213 for (i = 0; i < entry; i++) {
8214 pce_entry->ce_num = hlos_ce_hw_instance[i];
8215 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8216 pce_entry->valid = 1;
8217 pce_entry++;
8218 }
8219 } else {
8220 pr_warn("Device does not support FDE");
8221 disk_encrypt_pipe = 0xff;
8222 }
8223 if (qseecom.support_pfe) {
8224 pce_info_use = qseecom.ce_info.pfe =
8225 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8226 if (!pce_info_use) {
8227 pr_err("failed to alloc memory\n");
8228 rc = -ENOMEM;
8229 goto out;
8230 }
8231 /* by default for old db */
8232 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8233 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8234 pce_info_use->alloc = false;
8235 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8236 pce_info_use->ce_pipe_entry = NULL;
8237
8238 if (of_property_read_u32((&pdev->dev)->of_node,
8239 "qcom,file-encrypt-pipe-pair",
8240 &file_encrypt_pipe)) {
8241 pr_err("Fail to get PFE pipe information.\n");
8242 rc = -EINVAL;
8243 goto out;
8244 } else {
8245 pr_debug("file-encrypt-pipe-pair=0x%x",
8246 file_encrypt_pipe);
8247 }
8248 entry = pce_info_use->num_ce_pipe_entries =
8249 hlos_num_ce_hw_instances;
8250 pce_entry = pce_info_use->ce_pipe_entry =
8251 kcalloc(entry,
8252 sizeof(struct qseecom_ce_pipe_entry),
8253 GFP_KERNEL);
8254 if (pce_entry == NULL) {
8255 pr_err("failed to alloc memory\n");
8256 rc = -ENOMEM;
8257 goto out;
8258 }
8259 for (i = 0; i < entry; i++) {
8260 pce_entry->ce_num = hlos_ce_hw_instance[i];
8261 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8262 pce_entry->valid = 1;
8263 pce_entry++;
8264 }
8265 } else {
8266 pr_warn("Device does not support PFE");
8267 file_encrypt_pipe = 0xff;
8268 }
8269
8270out1:
8271 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8272 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8273out:
8274 if (rc) {
8275 if (qseecom.ce_info.fde) {
8276 pce_info_use = qseecom.ce_info.fde;
8277 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8278 pce_entry = pce_info_use->ce_pipe_entry;
8279 kfree(pce_entry);
8280 pce_info_use++;
8281 }
8282 }
8283 kfree(qseecom.ce_info.fde);
8284 qseecom.ce_info.fde = NULL;
8285 if (qseecom.ce_info.pfe) {
8286 pce_info_use = qseecom.ce_info.pfe;
8287 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8288 pce_entry = pce_info_use->ce_pipe_entry;
8289 kfree(pce_entry);
8290 pce_info_use++;
8291 }
8292 }
8293 kfree(qseecom.ce_info.pfe);
8294 qseecom.ce_info.pfe = NULL;
8295 }
8296 kfree(unit_tbl);
8297 kfree(pfde_tbl);
8298 return rc;
8299}
8300
8301static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8302 void __user *argp)
8303{
8304 struct qseecom_ce_info_req req;
8305 struct qseecom_ce_info_req *pinfo = &req;
8306 int ret = 0;
8307 int i;
8308 unsigned int entries;
8309 struct qseecom_ce_info_use *pce_info_use, *p;
8310 int total = 0;
8311 bool found = false;
8312 struct qseecom_ce_pipe_entry *pce_entry;
8313
8314 ret = copy_from_user(pinfo, argp,
8315 sizeof(struct qseecom_ce_info_req));
8316 if (ret) {
8317 pr_err("copy_from_user failed\n");
8318 return ret;
8319 }
8320
8321 switch (pinfo->usage) {
8322 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8323 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8324 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8325 if (qseecom.support_fde) {
8326 p = qseecom.ce_info.fde;
8327 total = qseecom.ce_info.num_fde;
8328 } else {
8329 pr_err("system does not support fde\n");
8330 return -EINVAL;
8331 }
8332 break;
8333 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8334 if (qseecom.support_pfe) {
8335 p = qseecom.ce_info.pfe;
8336 total = qseecom.ce_info.num_pfe;
8337 } else {
8338 pr_err("system does not support pfe\n");
8339 return -EINVAL;
8340 }
8341 break;
8342 default:
8343 pr_err("unsupported usage %d\n", pinfo->usage);
8344 return -EINVAL;
8345 }
8346
8347 pce_info_use = NULL;
8348 for (i = 0; i < total; i++) {
8349 if (!p->alloc)
8350 pce_info_use = p;
8351 else if (!memcmp(p->handle, pinfo->handle,
8352 MAX_CE_INFO_HANDLE_SIZE)) {
8353 pce_info_use = p;
8354 found = true;
8355 break;
8356 }
8357 p++;
8358 }
8359
8360 if (pce_info_use == NULL)
8361 return -EBUSY;
8362
8363 pinfo->unit_num = pce_info_use->unit_num;
8364 if (!pce_info_use->alloc) {
8365 pce_info_use->alloc = true;
8366 memcpy(pce_info_use->handle,
8367 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8368 }
8369 if (pce_info_use->num_ce_pipe_entries >
8370 MAX_CE_PIPE_PAIR_PER_UNIT)
8371 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8372 else
8373 entries = pce_info_use->num_ce_pipe_entries;
8374 pinfo->num_ce_pipe_entries = entries;
8375 pce_entry = pce_info_use->ce_pipe_entry;
8376 for (i = 0; i < entries; i++, pce_entry++)
8377 pinfo->ce_pipe_entry[i] = *pce_entry;
8378 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8379 pinfo->ce_pipe_entry[i].valid = 0;
8380
8381 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8382 pr_err("copy_to_user failed\n");
8383 ret = -EFAULT;
8384 }
8385 return ret;
8386}
8387
8388static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8389 void __user *argp)
8390{
8391 struct qseecom_ce_info_req req;
8392 struct qseecom_ce_info_req *pinfo = &req;
8393 int ret = 0;
8394 struct qseecom_ce_info_use *p;
8395 int total = 0;
8396 int i;
8397 bool found = false;
8398
8399 ret = copy_from_user(pinfo, argp,
8400 sizeof(struct qseecom_ce_info_req));
8401 if (ret)
8402 return ret;
8403
8404 switch (pinfo->usage) {
8405 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8406 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8407 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8408 if (qseecom.support_fde) {
8409 p = qseecom.ce_info.fde;
8410 total = qseecom.ce_info.num_fde;
8411 } else {
8412 pr_err("system does not support fde\n");
8413 return -EINVAL;
8414 }
8415 break;
8416 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8417 if (qseecom.support_pfe) {
8418 p = qseecom.ce_info.pfe;
8419 total = qseecom.ce_info.num_pfe;
8420 } else {
8421 pr_err("system does not support pfe\n");
8422 return -EINVAL;
8423 }
8424 break;
8425 default:
8426 pr_err("unsupported usage %d\n", pinfo->usage);
8427 return -EINVAL;
8428 }
8429
8430 for (i = 0; i < total; i++) {
8431 if (p->alloc &&
8432 !memcmp(p->handle, pinfo->handle,
8433 MAX_CE_INFO_HANDLE_SIZE)) {
8434 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8435 p->alloc = false;
8436 found = true;
8437 break;
8438 }
8439 p++;
8440 }
8441 return ret;
8442}
8443
8444static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8445 void __user *argp)
8446{
8447 struct qseecom_ce_info_req req;
8448 struct qseecom_ce_info_req *pinfo = &req;
8449 int ret = 0;
8450 int i;
8451 unsigned int entries;
8452 struct qseecom_ce_info_use *pce_info_use, *p;
8453 int total = 0;
8454 bool found = false;
8455 struct qseecom_ce_pipe_entry *pce_entry;
8456
8457 ret = copy_from_user(pinfo, argp,
8458 sizeof(struct qseecom_ce_info_req));
8459 if (ret)
8460 return ret;
8461
8462 switch (pinfo->usage) {
8463 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8464 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8465 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8466 if (qseecom.support_fde) {
8467 p = qseecom.ce_info.fde;
8468 total = qseecom.ce_info.num_fde;
8469 } else {
8470 pr_err("system does not support fde\n");
8471 return -EINVAL;
8472 }
8473 break;
8474 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8475 if (qseecom.support_pfe) {
8476 p = qseecom.ce_info.pfe;
8477 total = qseecom.ce_info.num_pfe;
8478 } else {
8479 pr_err("system does not support pfe\n");
8480 return -EINVAL;
8481 }
8482 break;
8483 default:
8484 pr_err("unsupported usage %d\n", pinfo->usage);
8485 return -EINVAL;
8486 }
8487
8488 pce_info_use = NULL;
8489 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8490 pinfo->num_ce_pipe_entries = 0;
8491 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8492 pinfo->ce_pipe_entry[i].valid = 0;
8493
8494 for (i = 0; i < total; i++) {
8495
8496 if (p->alloc && !memcmp(p->handle,
8497 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8498 pce_info_use = p;
8499 found = true;
8500 break;
8501 }
8502 p++;
8503 }
8504 if (!pce_info_use)
8505 goto out;
8506 pinfo->unit_num = pce_info_use->unit_num;
8507 if (pce_info_use->num_ce_pipe_entries >
8508 MAX_CE_PIPE_PAIR_PER_UNIT)
8509 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8510 else
8511 entries = pce_info_use->num_ce_pipe_entries;
8512 pinfo->num_ce_pipe_entries = entries;
8513 pce_entry = pce_info_use->ce_pipe_entry;
8514 for (i = 0; i < entries; i++, pce_entry++)
8515 pinfo->ce_pipe_entry[i] = *pce_entry;
8516 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8517 pinfo->ce_pipe_entry[i].valid = 0;
8518out:
8519 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8520 pr_err("copy_to_user failed\n");
8521 ret = -EFAULT;
8522 }
8523 return ret;
8524}
8525
8526/*
8527 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8528 * then whitelist feature is not supported.
8529 */
8530static int qseecom_check_whitelist_feature(void)
8531{
8532 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8533
8534 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8535}
8536
8537static int qseecom_probe(struct platform_device *pdev)
8538{
8539 int rc;
8540 int i;
8541 uint32_t feature = 10;
8542 struct device *class_dev;
8543 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8544 struct qseecom_command_scm_resp resp;
8545 struct qseecom_ce_info_use *pce_info_use = NULL;
8546
8547 qseecom.qsee_bw_count = 0;
8548 qseecom.qsee_perf_client = 0;
8549 qseecom.qsee_sfpb_bw_count = 0;
8550
8551 qseecom.qsee.ce_core_clk = NULL;
8552 qseecom.qsee.ce_clk = NULL;
8553 qseecom.qsee.ce_core_src_clk = NULL;
8554 qseecom.qsee.ce_bus_clk = NULL;
8555
8556 qseecom.cumulative_mode = 0;
8557 qseecom.current_mode = INACTIVE;
8558 qseecom.support_bus_scaling = false;
8559 qseecom.support_fde = false;
8560 qseecom.support_pfe = false;
8561
8562 qseecom.ce_drv.ce_core_clk = NULL;
8563 qseecom.ce_drv.ce_clk = NULL;
8564 qseecom.ce_drv.ce_core_src_clk = NULL;
8565 qseecom.ce_drv.ce_bus_clk = NULL;
8566 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8567
8568 qseecom.app_block_ref_cnt = 0;
8569 init_waitqueue_head(&qseecom.app_block_wq);
8570 qseecom.whitelist_support = true;
8571
8572 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8573 if (rc < 0) {
8574 pr_err("alloc_chrdev_region failed %d\n", rc);
8575 return rc;
8576 }
8577
8578 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8579 if (IS_ERR(driver_class)) {
8580 rc = -ENOMEM;
8581 pr_err("class_create failed %d\n", rc);
8582 goto exit_unreg_chrdev_region;
8583 }
8584
8585 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8586 QSEECOM_DEV);
8587 if (IS_ERR(class_dev)) {
8588 pr_err("class_device_create failed %d\n", rc);
8589 rc = -ENOMEM;
8590 goto exit_destroy_class;
8591 }
8592
8593 cdev_init(&qseecom.cdev, &qseecom_fops);
8594 qseecom.cdev.owner = THIS_MODULE;
8595
8596 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8597 if (rc < 0) {
8598 pr_err("cdev_add failed %d\n", rc);
8599 goto exit_destroy_device;
8600 }
8601
8602 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8603 spin_lock_init(&qseecom.registered_listener_list_lock);
8604 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8605 spin_lock_init(&qseecom.registered_app_list_lock);
8606 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8607 spin_lock_init(&qseecom.registered_kclient_list_lock);
8608 init_waitqueue_head(&qseecom.send_resp_wq);
8609 qseecom.send_resp_flag = 0;
8610
8611 qseecom.qsee_version = QSEEE_VERSION_00;
8612 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8613 &resp, sizeof(resp));
8614 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8615 if (rc) {
8616 pr_err("Failed to get QSEE version info %d\n", rc);
8617 goto exit_del_cdev;
8618 }
8619 qseecom.qsee_version = resp.result;
8620 qseecom.qseos_version = QSEOS_VERSION_14;
8621 qseecom.commonlib_loaded = false;
8622 qseecom.commonlib64_loaded = false;
8623 qseecom.pdev = class_dev;
8624 /* Create ION msm client */
8625 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8626 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8627 pr_err("Ion client cannot be created\n");
8628 rc = -ENOMEM;
8629 goto exit_del_cdev;
8630 }
8631
8632 /* register client for bus scaling */
8633 if (pdev->dev.of_node) {
8634 qseecom.pdev->of_node = pdev->dev.of_node;
8635 qseecom.support_bus_scaling =
8636 of_property_read_bool((&pdev->dev)->of_node,
8637 "qcom,support-bus-scaling");
8638 rc = qseecom_retrieve_ce_data(pdev);
8639 if (rc)
8640 goto exit_destroy_ion_client;
8641 qseecom.appsbl_qseecom_support =
8642 of_property_read_bool((&pdev->dev)->of_node,
8643 "qcom,appsbl-qseecom-support");
8644 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8645 qseecom.appsbl_qseecom_support);
8646
8647 qseecom.commonlib64_loaded =
8648 of_property_read_bool((&pdev->dev)->of_node,
8649 "qcom,commonlib64-loaded-by-uefi");
8650 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8651 qseecom.commonlib64_loaded);
8652 qseecom.fde_key_size =
8653 of_property_read_bool((&pdev->dev)->of_node,
8654 "qcom,fde-key-size");
8655 qseecom.no_clock_support =
8656 of_property_read_bool((&pdev->dev)->of_node,
8657 "qcom,no-clock-support");
8658 if (!qseecom.no_clock_support) {
8659 pr_info("qseecom clocks handled by other subsystem\n");
8660 } else {
8661 pr_info("no-clock-support=0x%x",
8662 qseecom.no_clock_support);
8663 }
8664
8665 if (of_property_read_u32((&pdev->dev)->of_node,
8666 "qcom,qsee-reentrancy-support",
8667 &qseecom.qsee_reentrancy_support)) {
8668 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8669 qseecom.qsee_reentrancy_support = 0;
8670 } else {
8671 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8672 qseecom.qsee_reentrancy_support);
8673 }
8674
8675 /*
8676 * The qseecom bus scaling flag can not be enabled when
8677 * crypto clock is not handled by HLOS.
8678 */
8679 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8680 pr_err("support_bus_scaling flag can not be enabled.\n");
8681 rc = -EINVAL;
8682 goto exit_destroy_ion_client;
8683 }
8684
8685 if (of_property_read_u32((&pdev->dev)->of_node,
8686 "qcom,ce-opp-freq",
8687 &qseecom.ce_opp_freq_hz)) {
8688 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8689 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8690 }
8691 rc = __qseecom_init_clk(CLK_QSEE);
8692 if (rc)
8693 goto exit_destroy_ion_client;
8694
8695 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8696 (qseecom.support_pfe || qseecom.support_fde)) {
8697 rc = __qseecom_init_clk(CLK_CE_DRV);
8698 if (rc) {
8699 __qseecom_deinit_clk(CLK_QSEE);
8700 goto exit_destroy_ion_client;
8701 }
8702 } else {
8703 struct qseecom_clk *qclk;
8704
8705 qclk = &qseecom.qsee;
8706 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8707 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8708 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8709 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8710 }
8711
8712 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8713 msm_bus_cl_get_pdata(pdev);
8714 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8715 (!qseecom.is_apps_region_protected &&
8716 !qseecom.appsbl_qseecom_support)) {
8717 struct resource *resource = NULL;
8718 struct qsee_apps_region_info_ireq req;
8719 struct qsee_apps_region_info_64bit_ireq req_64bit;
8720 struct qseecom_command_scm_resp resp;
8721 void *cmd_buf = NULL;
8722 size_t cmd_len;
8723
8724 resource = platform_get_resource_byname(pdev,
8725 IORESOURCE_MEM, "secapp-region");
8726 if (resource) {
8727 if (qseecom.qsee_version < QSEE_VERSION_40) {
8728 req.qsee_cmd_id =
8729 QSEOS_APP_REGION_NOTIFICATION;
8730 req.addr = (uint32_t)resource->start;
8731 req.size = resource_size(resource);
8732 cmd_buf = (void *)&req;
8733 cmd_len = sizeof(struct
8734 qsee_apps_region_info_ireq);
8735 pr_warn("secure app region addr=0x%x size=0x%x",
8736 req.addr, req.size);
8737 } else {
8738 req_64bit.qsee_cmd_id =
8739 QSEOS_APP_REGION_NOTIFICATION;
8740 req_64bit.addr = resource->start;
8741 req_64bit.size = resource_size(
8742 resource);
8743 cmd_buf = (void *)&req_64bit;
8744 cmd_len = sizeof(struct
8745 qsee_apps_region_info_64bit_ireq);
8746 pr_warn("secure app region addr=0x%llx size=0x%x",
8747 req_64bit.addr, req_64bit.size);
8748 }
8749 } else {
8750 pr_err("Fail to get secure app region info\n");
8751 rc = -EINVAL;
8752 goto exit_deinit_clock;
8753 }
8754 rc = __qseecom_enable_clk(CLK_QSEE);
8755 if (rc) {
8756 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8757 rc = -EIO;
8758 goto exit_deinit_clock;
8759 }
8760 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8761 cmd_buf, cmd_len,
8762 &resp, sizeof(resp));
8763 __qseecom_disable_clk(CLK_QSEE);
8764 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8765 pr_err("send secapp reg fail %d resp.res %d\n",
8766 rc, resp.result);
8767 rc = -EINVAL;
8768 goto exit_deinit_clock;
8769 }
8770 }
8771 /*
8772 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8773 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8774 * Pls add "qseecom.commonlib64_loaded = true" here too.
8775 */
8776 if (qseecom.is_apps_region_protected ||
8777 qseecom.appsbl_qseecom_support)
8778 qseecom.commonlib_loaded = true;
8779 } else {
8780 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8781 pdev->dev.platform_data;
8782 }
8783 if (qseecom.support_bus_scaling) {
8784 init_timer(&(qseecom.bw_scale_down_timer));
8785 INIT_WORK(&qseecom.bw_inactive_req_ws,
8786 qseecom_bw_inactive_req_work);
8787 qseecom.bw_scale_down_timer.function =
8788 qseecom_scale_bus_bandwidth_timer_callback;
8789 }
8790 qseecom.timer_running = false;
8791 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8792 qseecom_platform_support);
8793
8794 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8795 pr_warn("qseecom.whitelist_support = %d\n",
8796 qseecom.whitelist_support);
8797
8798 if (!qseecom.qsee_perf_client)
8799 pr_err("Unable to register bus client\n");
8800
8801 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8802 return 0;
8803
8804exit_deinit_clock:
8805 __qseecom_deinit_clk(CLK_QSEE);
8806 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8807 (qseecom.support_pfe || qseecom.support_fde))
8808 __qseecom_deinit_clk(CLK_CE_DRV);
8809exit_destroy_ion_client:
8810 if (qseecom.ce_info.fde) {
8811 pce_info_use = qseecom.ce_info.fde;
8812 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8813 kzfree(pce_info_use->ce_pipe_entry);
8814 pce_info_use++;
8815 }
8816 kfree(qseecom.ce_info.fde);
8817 }
8818 if (qseecom.ce_info.pfe) {
8819 pce_info_use = qseecom.ce_info.pfe;
8820 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8821 kzfree(pce_info_use->ce_pipe_entry);
8822 pce_info_use++;
8823 }
8824 kfree(qseecom.ce_info.pfe);
8825 }
8826 ion_client_destroy(qseecom.ion_clnt);
8827exit_del_cdev:
8828 cdev_del(&qseecom.cdev);
8829exit_destroy_device:
8830 device_destroy(driver_class, qseecom_device_no);
8831exit_destroy_class:
8832 class_destroy(driver_class);
8833exit_unreg_chrdev_region:
8834 unregister_chrdev_region(qseecom_device_no, 1);
8835 return rc;
8836}
8837
8838static int qseecom_remove(struct platform_device *pdev)
8839{
8840 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308841 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008842 unsigned long flags = 0;
8843 int ret = 0;
8844 int i;
8845 struct qseecom_ce_pipe_entry *pce_entry;
8846 struct qseecom_ce_info_use *pce_info_use;
8847
8848 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8849 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8850
Monika Singhe711b162018-04-24 09:54:50 +05308851 list_for_each_entry_safe(kclient, kclient_tmp,
8852 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008853
8854 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008855 if (!kclient->handle) {
8856 list_del(&kclient->list);
8857 kzfree(kclient);
8858 break;
8859 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008860
8861 list_del(&kclient->list);
8862 mutex_lock(&app_access_lock);
8863 ret = qseecom_unload_app(kclient->handle->dev, false);
8864 mutex_unlock(&app_access_lock);
8865 if (!ret) {
8866 kzfree(kclient->handle->dev);
8867 kzfree(kclient->handle);
8868 kzfree(kclient);
8869 }
8870 }
8871
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008872 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8873
8874 if (qseecom.qseos_version > QSEEE_VERSION_00)
8875 qseecom_unload_commonlib_image();
8876
8877 if (qseecom.qsee_perf_client)
8878 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8879 0);
8880 if (pdev->dev.platform_data != NULL)
8881 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8882
8883 if (qseecom.support_bus_scaling) {
8884 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8885 del_timer_sync(&qseecom.bw_scale_down_timer);
8886 }
8887
8888 if (qseecom.ce_info.fde) {
8889 pce_info_use = qseecom.ce_info.fde;
8890 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8891 pce_entry = pce_info_use->ce_pipe_entry;
8892 kfree(pce_entry);
8893 pce_info_use++;
8894 }
8895 }
8896 kfree(qseecom.ce_info.fde);
8897 if (qseecom.ce_info.pfe) {
8898 pce_info_use = qseecom.ce_info.pfe;
8899 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8900 pce_entry = pce_info_use->ce_pipe_entry;
8901 kfree(pce_entry);
8902 pce_info_use++;
8903 }
8904 }
8905 kfree(qseecom.ce_info.pfe);
8906
8907 /* register client for bus scaling */
8908 if (pdev->dev.of_node) {
8909 __qseecom_deinit_clk(CLK_QSEE);
8910 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8911 (qseecom.support_pfe || qseecom.support_fde))
8912 __qseecom_deinit_clk(CLK_CE_DRV);
8913 }
8914
8915 ion_client_destroy(qseecom.ion_clnt);
8916
8917 cdev_del(&qseecom.cdev);
8918
8919 device_destroy(driver_class, qseecom_device_no);
8920
8921 class_destroy(driver_class);
8922
8923 unregister_chrdev_region(qseecom_device_no, 1);
8924
8925 return ret;
8926}
8927
8928static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8929{
8930 int ret = 0;
8931 struct qseecom_clk *qclk;
8932
8933 qclk = &qseecom.qsee;
8934 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8935 if (qseecom.no_clock_support)
8936 return 0;
8937
8938 mutex_lock(&qsee_bw_mutex);
8939 mutex_lock(&clk_access_lock);
8940
8941 if (qseecom.current_mode != INACTIVE) {
8942 ret = msm_bus_scale_client_update_request(
8943 qseecom.qsee_perf_client, INACTIVE);
8944 if (ret)
8945 pr_err("Fail to scale down bus\n");
8946 else
8947 qseecom.current_mode = INACTIVE;
8948 }
8949
8950 if (qclk->clk_access_cnt) {
8951 if (qclk->ce_clk != NULL)
8952 clk_disable_unprepare(qclk->ce_clk);
8953 if (qclk->ce_core_clk != NULL)
8954 clk_disable_unprepare(qclk->ce_core_clk);
8955 if (qclk->ce_bus_clk != NULL)
8956 clk_disable_unprepare(qclk->ce_bus_clk);
8957 }
8958
8959 del_timer_sync(&(qseecom.bw_scale_down_timer));
8960 qseecom.timer_running = false;
8961
8962 mutex_unlock(&clk_access_lock);
8963 mutex_unlock(&qsee_bw_mutex);
8964 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8965
8966 return 0;
8967}
8968
8969static int qseecom_resume(struct platform_device *pdev)
8970{
8971 int mode = 0;
8972 int ret = 0;
8973 struct qseecom_clk *qclk;
8974
8975 qclk = &qseecom.qsee;
8976 if (qseecom.no_clock_support)
8977 goto exit;
8978
8979 mutex_lock(&qsee_bw_mutex);
8980 mutex_lock(&clk_access_lock);
8981 if (qseecom.cumulative_mode >= HIGH)
8982 mode = HIGH;
8983 else
8984 mode = qseecom.cumulative_mode;
8985
8986 if (qseecom.cumulative_mode != INACTIVE) {
8987 ret = msm_bus_scale_client_update_request(
8988 qseecom.qsee_perf_client, mode);
8989 if (ret)
8990 pr_err("Fail to scale up bus to %d\n", mode);
8991 else
8992 qseecom.current_mode = mode;
8993 }
8994
8995 if (qclk->clk_access_cnt) {
8996 if (qclk->ce_core_clk != NULL) {
8997 ret = clk_prepare_enable(qclk->ce_core_clk);
8998 if (ret) {
8999 pr_err("Unable to enable/prep CE core clk\n");
9000 qclk->clk_access_cnt = 0;
9001 goto err;
9002 }
9003 }
9004 if (qclk->ce_clk != NULL) {
9005 ret = clk_prepare_enable(qclk->ce_clk);
9006 if (ret) {
9007 pr_err("Unable to enable/prep CE iface clk\n");
9008 qclk->clk_access_cnt = 0;
9009 goto ce_clk_err;
9010 }
9011 }
9012 if (qclk->ce_bus_clk != NULL) {
9013 ret = clk_prepare_enable(qclk->ce_bus_clk);
9014 if (ret) {
9015 pr_err("Unable to enable/prep CE bus clk\n");
9016 qclk->clk_access_cnt = 0;
9017 goto ce_bus_clk_err;
9018 }
9019 }
9020 }
9021
9022 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9023 qseecom.bw_scale_down_timer.expires = jiffies +
9024 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9025 mod_timer(&(qseecom.bw_scale_down_timer),
9026 qseecom.bw_scale_down_timer.expires);
9027 qseecom.timer_running = true;
9028 }
9029
9030 mutex_unlock(&clk_access_lock);
9031 mutex_unlock(&qsee_bw_mutex);
9032 goto exit;
9033
9034ce_bus_clk_err:
9035 if (qclk->ce_clk)
9036 clk_disable_unprepare(qclk->ce_clk);
9037ce_clk_err:
9038 if (qclk->ce_core_clk)
9039 clk_disable_unprepare(qclk->ce_core_clk);
9040err:
9041 mutex_unlock(&clk_access_lock);
9042 mutex_unlock(&qsee_bw_mutex);
9043 ret = -EIO;
9044exit:
9045 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9046 return ret;
9047}
9048
9049static const struct of_device_id qseecom_match[] = {
9050 {
9051 .compatible = "qcom,qseecom",
9052 },
9053 {}
9054};
9055
9056static struct platform_driver qseecom_plat_driver = {
9057 .probe = qseecom_probe,
9058 .remove = qseecom_remove,
9059 .suspend = qseecom_suspend,
9060 .resume = qseecom_resume,
9061 .driver = {
9062 .name = "qseecom",
9063 .owner = THIS_MODULE,
9064 .of_match_table = qseecom_match,
9065 },
9066};
9067
9068static int qseecom_init(void)
9069{
9070 return platform_driver_register(&qseecom_plat_driver);
9071}
9072
9073static void qseecom_exit(void)
9074{
9075 platform_driver_unregister(&qseecom_plat_driver);
9076}
9077
9078MODULE_LICENSE("GPL v2");
9079MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9080
9081module_init(qseecom_init);
9082module_exit(qseecom_exit);