blob: 7077b300521755a1a1574ebfb721da8727de5e76 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
255 struct ion_handle *cmnlib_ion_handle;
256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700284 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700285};
286
287struct qseecom_sec_buf_fd_info {
288 bool is_sec_buf_fd;
289 size_t size;
290 void *vbase;
291 dma_addr_t pbase;
292};
293
294struct qseecom_param_memref {
295 uint32_t buffer;
296 uint32_t size;
297};
298
299struct qseecom_client_handle {
300 u32 app_id;
301 u8 *sb_virt;
302 phys_addr_t sb_phys;
303 unsigned long user_virt_sb_base;
304 size_t sb_length;
305 struct ion_handle *ihandle; /* Retrieve phy addr */
306 char app_name[MAX_APP_NAME_SIZE];
307 u32 app_arch;
308 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
309};
310
311struct qseecom_listener_handle {
312 u32 id;
313};
314
315static struct qseecom_control qseecom;
316
317struct qseecom_dev_handle {
318 enum qseecom_client_handle_type type;
319 union {
320 struct qseecom_client_handle client;
321 struct qseecom_listener_handle listener;
322 };
323 bool released;
324 int abort;
325 wait_queue_head_t abort_wq;
326 atomic_t ioctl_count;
327 bool perf_enabled;
328 bool fast_load_enabled;
329 enum qseecom_bandwidth_request_mode mode;
330 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
331 uint32_t sglist_cnt;
332 bool use_legacy_cmd;
333};
334
335struct qseecom_key_id_usage_desc {
336 uint8_t desc[QSEECOM_KEY_ID_SIZE];
337};
338
339struct qseecom_crypto_info {
340 unsigned int unit_num;
341 unsigned int ce;
342 unsigned int pipe_pair;
343};
344
345static struct qseecom_key_id_usage_desc key_id_array[] = {
346 {
347 .desc = "Undefined Usage Index",
348 },
349
350 {
351 .desc = "Full Disk Encryption",
352 },
353
354 {
355 .desc = "Per File Encryption",
356 },
357
358 {
359 .desc = "UFS ICE Full Disk Encryption",
360 },
361
362 {
363 .desc = "SDCC ICE Full Disk Encryption",
364 },
365};
366
367/* Function proto types */
368static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
369static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
370static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
371static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
372static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
373static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
374 char *cmnlib_name);
375static int qseecom_enable_ice_setup(int usage);
376static int qseecom_disable_ice_setup(int usage);
377static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
378static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
379 void __user *argp);
380static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
381 void __user *argp);
382static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
383 void __user *argp);
384
385static int get_qseecom_keymaster_status(char *str)
386{
387 get_option(&str, &qseecom.is_apps_region_protected);
388 return 1;
389}
390__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
391
392static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
393 const void *req_buf, void *resp_buf)
394{
395 int ret = 0;
396 uint32_t smc_id = 0;
397 uint32_t qseos_cmd_id = 0;
398 struct scm_desc desc = {0};
399 struct qseecom_command_scm_resp *scm_resp = NULL;
400
401 if (!req_buf || !resp_buf) {
402 pr_err("Invalid buffer pointer\n");
403 return -EINVAL;
404 }
405 qseos_cmd_id = *(uint32_t *)req_buf;
406 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
407
408 switch (svc_id) {
409 case 6: {
410 if (tz_cmd_id == 3) {
411 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
412 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
413 desc.args[0] = *(uint32_t *)req_buf;
414 } else {
415 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
416 svc_id, tz_cmd_id);
417 return -EINVAL;
418 }
419 ret = scm_call2(smc_id, &desc);
420 break;
421 }
422 case SCM_SVC_ES: {
423 switch (tz_cmd_id) {
424 case SCM_SAVE_PARTITION_HASH_ID: {
425 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
426 struct qseecom_save_partition_hash_req *p_hash_req =
427 (struct qseecom_save_partition_hash_req *)
428 req_buf;
429 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
430
431 if (!tzbuf)
432 return -ENOMEM;
433 memset(tzbuf, 0, tzbuflen);
434 memcpy(tzbuf, p_hash_req->digest,
435 SHA256_DIGEST_LENGTH);
436 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
437 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
438 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
439 desc.args[0] = p_hash_req->partition_id;
440 desc.args[1] = virt_to_phys(tzbuf);
441 desc.args[2] = SHA256_DIGEST_LENGTH;
442 ret = scm_call2(smc_id, &desc);
443 kzfree(tzbuf);
444 break;
445 }
446 default: {
447 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
448 tz_cmd_id);
449 ret = -EINVAL;
450 break;
451 }
452 } /* end of switch (tz_cmd_id) */
453 break;
454 } /* end of case SCM_SVC_ES */
455 case SCM_SVC_TZSCHEDULER: {
456 switch (qseos_cmd_id) {
457 case QSEOS_APP_START_COMMAND: {
458 struct qseecom_load_app_ireq *req;
459 struct qseecom_load_app_64bit_ireq *req_64bit;
460
461 smc_id = TZ_OS_APP_START_ID;
462 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
463 if (qseecom.qsee_version < QSEE_VERSION_40) {
464 req = (struct qseecom_load_app_ireq *)req_buf;
465 desc.args[0] = req->mdt_len;
466 desc.args[1] = req->img_len;
467 desc.args[2] = req->phy_addr;
468 } else {
469 req_64bit =
470 (struct qseecom_load_app_64bit_ireq *)
471 req_buf;
472 desc.args[0] = req_64bit->mdt_len;
473 desc.args[1] = req_64bit->img_len;
474 desc.args[2] = req_64bit->phy_addr;
475 }
476 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
477 ret = scm_call2(smc_id, &desc);
478 break;
479 }
480 case QSEOS_APP_SHUTDOWN_COMMAND: {
481 struct qseecom_unload_app_ireq *req;
482
483 req = (struct qseecom_unload_app_ireq *)req_buf;
484 smc_id = TZ_OS_APP_SHUTDOWN_ID;
485 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
486 desc.args[0] = req->app_id;
487 ret = scm_call2(smc_id, &desc);
488 break;
489 }
490 case QSEOS_APP_LOOKUP_COMMAND: {
491 struct qseecom_check_app_ireq *req;
492 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
493 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
494
495 if (!tzbuf)
496 return -ENOMEM;
497 req = (struct qseecom_check_app_ireq *)req_buf;
498 pr_debug("Lookup app_name = %s\n", req->app_name);
499 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
500 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
501 smc_id = TZ_OS_APP_LOOKUP_ID;
502 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
503 desc.args[0] = virt_to_phys(tzbuf);
504 desc.args[1] = strlen(req->app_name);
505 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
506 ret = scm_call2(smc_id, &desc);
507 kzfree(tzbuf);
508 break;
509 }
510 case QSEOS_APP_REGION_NOTIFICATION: {
511 struct qsee_apps_region_info_ireq *req;
512 struct qsee_apps_region_info_64bit_ireq *req_64bit;
513
514 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
515 desc.arginfo =
516 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
517 if (qseecom.qsee_version < QSEE_VERSION_40) {
518 req = (struct qsee_apps_region_info_ireq *)
519 req_buf;
520 desc.args[0] = req->addr;
521 desc.args[1] = req->size;
522 } else {
523 req_64bit =
524 (struct qsee_apps_region_info_64bit_ireq *)
525 req_buf;
526 desc.args[0] = req_64bit->addr;
527 desc.args[1] = req_64bit->size;
528 }
529 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
530 ret = scm_call2(smc_id, &desc);
531 break;
532 }
533 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
534 struct qseecom_load_lib_image_ireq *req;
535 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
536
537 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
538 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
539 if (qseecom.qsee_version < QSEE_VERSION_40) {
540 req = (struct qseecom_load_lib_image_ireq *)
541 req_buf;
542 desc.args[0] = req->mdt_len;
543 desc.args[1] = req->img_len;
544 desc.args[2] = req->phy_addr;
545 } else {
546 req_64bit =
547 (struct qseecom_load_lib_image_64bit_ireq *)
548 req_buf;
549 desc.args[0] = req_64bit->mdt_len;
550 desc.args[1] = req_64bit->img_len;
551 desc.args[2] = req_64bit->phy_addr;
552 }
553 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
554 ret = scm_call2(smc_id, &desc);
555 break;
556 }
557 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
558 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
559 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
560 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
561 ret = scm_call2(smc_id, &desc);
562 break;
563 }
564 case QSEOS_REGISTER_LISTENER: {
565 struct qseecom_register_listener_ireq *req;
566 struct qseecom_register_listener_64bit_ireq *req_64bit;
567
568 desc.arginfo =
569 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
570 if (qseecom.qsee_version < QSEE_VERSION_40) {
571 req = (struct qseecom_register_listener_ireq *)
572 req_buf;
573 desc.args[0] = req->listener_id;
574 desc.args[1] = req->sb_ptr;
575 desc.args[2] = req->sb_len;
576 } else {
577 req_64bit =
578 (struct qseecom_register_listener_64bit_ireq *)
579 req_buf;
580 desc.args[0] = req_64bit->listener_id;
581 desc.args[1] = req_64bit->sb_ptr;
582 desc.args[2] = req_64bit->sb_len;
583 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700584 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700585 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
586 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
587 ret = scm_call2(smc_id, &desc);
588 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700589 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700590 smc_id = TZ_OS_REGISTER_LISTENER_ID;
591 __qseecom_reentrancy_check_if_no_app_blocked(
592 smc_id);
593 ret = scm_call2(smc_id, &desc);
594 }
595 break;
596 }
597 case QSEOS_DEREGISTER_LISTENER: {
598 struct qseecom_unregister_listener_ireq *req;
599
600 req = (struct qseecom_unregister_listener_ireq *)
601 req_buf;
602 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
603 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
604 desc.args[0] = req->listener_id;
605 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
606 ret = scm_call2(smc_id, &desc);
607 break;
608 }
609 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
610 struct qseecom_client_listener_data_irsp *req;
611
612 req = (struct qseecom_client_listener_data_irsp *)
613 req_buf;
614 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
615 desc.arginfo =
616 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
617 desc.args[0] = req->listener_id;
618 desc.args[1] = req->status;
619 ret = scm_call2(smc_id, &desc);
620 break;
621 }
622 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
623 struct qseecom_client_listener_data_irsp *req;
624 struct qseecom_client_listener_data_64bit_irsp *req_64;
625
626 smc_id =
627 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
628 desc.arginfo =
629 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
630 if (qseecom.qsee_version < QSEE_VERSION_40) {
631 req =
632 (struct qseecom_client_listener_data_irsp *)
633 req_buf;
634 desc.args[0] = req->listener_id;
635 desc.args[1] = req->status;
636 desc.args[2] = req->sglistinfo_ptr;
637 desc.args[3] = req->sglistinfo_len;
638 } else {
639 req_64 =
640 (struct qseecom_client_listener_data_64bit_irsp *)
641 req_buf;
642 desc.args[0] = req_64->listener_id;
643 desc.args[1] = req_64->status;
644 desc.args[2] = req_64->sglistinfo_ptr;
645 desc.args[3] = req_64->sglistinfo_len;
646 }
647 ret = scm_call2(smc_id, &desc);
648 break;
649 }
650 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
651 struct qseecom_load_app_ireq *req;
652 struct qseecom_load_app_64bit_ireq *req_64bit;
653
654 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
655 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
656 if (qseecom.qsee_version < QSEE_VERSION_40) {
657 req = (struct qseecom_load_app_ireq *)req_buf;
658 desc.args[0] = req->mdt_len;
659 desc.args[1] = req->img_len;
660 desc.args[2] = req->phy_addr;
661 } else {
662 req_64bit =
663 (struct qseecom_load_app_64bit_ireq *)req_buf;
664 desc.args[0] = req_64bit->mdt_len;
665 desc.args[1] = req_64bit->img_len;
666 desc.args[2] = req_64bit->phy_addr;
667 }
668 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
669 ret = scm_call2(smc_id, &desc);
670 break;
671 }
672 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
673 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
674 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
675 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
676 ret = scm_call2(smc_id, &desc);
677 break;
678 }
679
680 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
681 struct qseecom_client_send_data_ireq *req;
682 struct qseecom_client_send_data_64bit_ireq *req_64bit;
683
684 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
685 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
686 if (qseecom.qsee_version < QSEE_VERSION_40) {
687 req = (struct qseecom_client_send_data_ireq *)
688 req_buf;
689 desc.args[0] = req->app_id;
690 desc.args[1] = req->req_ptr;
691 desc.args[2] = req->req_len;
692 desc.args[3] = req->rsp_ptr;
693 desc.args[4] = req->rsp_len;
694 } else {
695 req_64bit =
696 (struct qseecom_client_send_data_64bit_ireq *)
697 req_buf;
698 desc.args[0] = req_64bit->app_id;
699 desc.args[1] = req_64bit->req_ptr;
700 desc.args[2] = req_64bit->req_len;
701 desc.args[3] = req_64bit->rsp_ptr;
702 desc.args[4] = req_64bit->rsp_len;
703 }
704 ret = scm_call2(smc_id, &desc);
705 break;
706 }
707 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
708 struct qseecom_client_send_data_ireq *req;
709 struct qseecom_client_send_data_64bit_ireq *req_64bit;
710
711 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
712 desc.arginfo =
713 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
714 if (qseecom.qsee_version < QSEE_VERSION_40) {
715 req = (struct qseecom_client_send_data_ireq *)
716 req_buf;
717 desc.args[0] = req->app_id;
718 desc.args[1] = req->req_ptr;
719 desc.args[2] = req->req_len;
720 desc.args[3] = req->rsp_ptr;
721 desc.args[4] = req->rsp_len;
722 desc.args[5] = req->sglistinfo_ptr;
723 desc.args[6] = req->sglistinfo_len;
724 } else {
725 req_64bit =
726 (struct qseecom_client_send_data_64bit_ireq *)
727 req_buf;
728 desc.args[0] = req_64bit->app_id;
729 desc.args[1] = req_64bit->req_ptr;
730 desc.args[2] = req_64bit->req_len;
731 desc.args[3] = req_64bit->rsp_ptr;
732 desc.args[4] = req_64bit->rsp_len;
733 desc.args[5] = req_64bit->sglistinfo_ptr;
734 desc.args[6] = req_64bit->sglistinfo_len;
735 }
736 ret = scm_call2(smc_id, &desc);
737 break;
738 }
739 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
740 struct qseecom_client_send_service_ireq *req;
741
742 req = (struct qseecom_client_send_service_ireq *)
743 req_buf;
744 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
745 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
746 desc.args[0] = req->key_type;
747 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
748 ret = scm_call2(smc_id, &desc);
749 break;
750 }
751 case QSEOS_RPMB_ERASE_COMMAND: {
752 smc_id = TZ_OS_RPMB_ERASE_ID;
753 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
754 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
755 ret = scm_call2(smc_id, &desc);
756 break;
757 }
758 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
759 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
760 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
761 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
762 ret = scm_call2(smc_id, &desc);
763 break;
764 }
765 case QSEOS_GENERATE_KEY: {
766 u32 tzbuflen = PAGE_ALIGN(sizeof
767 (struct qseecom_key_generate_ireq) -
768 sizeof(uint32_t));
769 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
770
771 if (!tzbuf)
772 return -ENOMEM;
773 memset(tzbuf, 0, tzbuflen);
774 memcpy(tzbuf, req_buf + sizeof(uint32_t),
775 (sizeof(struct qseecom_key_generate_ireq) -
776 sizeof(uint32_t)));
777 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
778 smc_id = TZ_OS_KS_GEN_KEY_ID;
779 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
780 desc.args[0] = virt_to_phys(tzbuf);
781 desc.args[1] = tzbuflen;
782 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
783 ret = scm_call2(smc_id, &desc);
784 kzfree(tzbuf);
785 break;
786 }
787 case QSEOS_DELETE_KEY: {
788 u32 tzbuflen = PAGE_ALIGN(sizeof
789 (struct qseecom_key_delete_ireq) -
790 sizeof(uint32_t));
791 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
792
793 if (!tzbuf)
794 return -ENOMEM;
795 memset(tzbuf, 0, tzbuflen);
796 memcpy(tzbuf, req_buf + sizeof(uint32_t),
797 (sizeof(struct qseecom_key_delete_ireq) -
798 sizeof(uint32_t)));
799 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
800 smc_id = TZ_OS_KS_DEL_KEY_ID;
801 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
802 desc.args[0] = virt_to_phys(tzbuf);
803 desc.args[1] = tzbuflen;
804 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
805 ret = scm_call2(smc_id, &desc);
806 kzfree(tzbuf);
807 break;
808 }
809 case QSEOS_SET_KEY: {
810 u32 tzbuflen = PAGE_ALIGN(sizeof
811 (struct qseecom_key_select_ireq) -
812 sizeof(uint32_t));
813 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
814
815 if (!tzbuf)
816 return -ENOMEM;
817 memset(tzbuf, 0, tzbuflen);
818 memcpy(tzbuf, req_buf + sizeof(uint32_t),
819 (sizeof(struct qseecom_key_select_ireq) -
820 sizeof(uint32_t)));
821 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
822 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
823 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
824 desc.args[0] = virt_to_phys(tzbuf);
825 desc.args[1] = tzbuflen;
826 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
827 ret = scm_call2(smc_id, &desc);
828 kzfree(tzbuf);
829 break;
830 }
831 case QSEOS_UPDATE_KEY_USERINFO: {
832 u32 tzbuflen = PAGE_ALIGN(sizeof
833 (struct qseecom_key_userinfo_update_ireq) -
834 sizeof(uint32_t));
835 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
836
837 if (!tzbuf)
838 return -ENOMEM;
839 memset(tzbuf, 0, tzbuflen);
840 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
841 (struct qseecom_key_userinfo_update_ireq) -
842 sizeof(uint32_t)));
843 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
844 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
845 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
846 desc.args[0] = virt_to_phys(tzbuf);
847 desc.args[1] = tzbuflen;
848 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
849 ret = scm_call2(smc_id, &desc);
850 kzfree(tzbuf);
851 break;
852 }
853 case QSEOS_TEE_OPEN_SESSION: {
854 struct qseecom_qteec_ireq *req;
855 struct qseecom_qteec_64bit_ireq *req_64bit;
856
857 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
858 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
859 if (qseecom.qsee_version < QSEE_VERSION_40) {
860 req = (struct qseecom_qteec_ireq *)req_buf;
861 desc.args[0] = req->app_id;
862 desc.args[1] = req->req_ptr;
863 desc.args[2] = req->req_len;
864 desc.args[3] = req->resp_ptr;
865 desc.args[4] = req->resp_len;
866 } else {
867 req_64bit = (struct qseecom_qteec_64bit_ireq *)
868 req_buf;
869 desc.args[0] = req_64bit->app_id;
870 desc.args[1] = req_64bit->req_ptr;
871 desc.args[2] = req_64bit->req_len;
872 desc.args[3] = req_64bit->resp_ptr;
873 desc.args[4] = req_64bit->resp_len;
874 }
875 ret = scm_call2(smc_id, &desc);
876 break;
877 }
878 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
879 struct qseecom_qteec_ireq *req;
880 struct qseecom_qteec_64bit_ireq *req_64bit;
881
882 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
883 desc.arginfo =
884 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
885 if (qseecom.qsee_version < QSEE_VERSION_40) {
886 req = (struct qseecom_qteec_ireq *)req_buf;
887 desc.args[0] = req->app_id;
888 desc.args[1] = req->req_ptr;
889 desc.args[2] = req->req_len;
890 desc.args[3] = req->resp_ptr;
891 desc.args[4] = req->resp_len;
892 desc.args[5] = req->sglistinfo_ptr;
893 desc.args[6] = req->sglistinfo_len;
894 } else {
895 req_64bit = (struct qseecom_qteec_64bit_ireq *)
896 req_buf;
897 desc.args[0] = req_64bit->app_id;
898 desc.args[1] = req_64bit->req_ptr;
899 desc.args[2] = req_64bit->req_len;
900 desc.args[3] = req_64bit->resp_ptr;
901 desc.args[4] = req_64bit->resp_len;
902 desc.args[5] = req_64bit->sglistinfo_ptr;
903 desc.args[6] = req_64bit->sglistinfo_len;
904 }
905 ret = scm_call2(smc_id, &desc);
906 break;
907 }
908 case QSEOS_TEE_INVOKE_COMMAND: {
909 struct qseecom_qteec_ireq *req;
910 struct qseecom_qteec_64bit_ireq *req_64bit;
911
912 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
913 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
914 if (qseecom.qsee_version < QSEE_VERSION_40) {
915 req = (struct qseecom_qteec_ireq *)req_buf;
916 desc.args[0] = req->app_id;
917 desc.args[1] = req->req_ptr;
918 desc.args[2] = req->req_len;
919 desc.args[3] = req->resp_ptr;
920 desc.args[4] = req->resp_len;
921 } else {
922 req_64bit = (struct qseecom_qteec_64bit_ireq *)
923 req_buf;
924 desc.args[0] = req_64bit->app_id;
925 desc.args[1] = req_64bit->req_ptr;
926 desc.args[2] = req_64bit->req_len;
927 desc.args[3] = req_64bit->resp_ptr;
928 desc.args[4] = req_64bit->resp_len;
929 }
930 ret = scm_call2(smc_id, &desc);
931 break;
932 }
933 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
934 struct qseecom_qteec_ireq *req;
935 struct qseecom_qteec_64bit_ireq *req_64bit;
936
937 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
938 desc.arginfo =
939 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
940 if (qseecom.qsee_version < QSEE_VERSION_40) {
941 req = (struct qseecom_qteec_ireq *)req_buf;
942 desc.args[0] = req->app_id;
943 desc.args[1] = req->req_ptr;
944 desc.args[2] = req->req_len;
945 desc.args[3] = req->resp_ptr;
946 desc.args[4] = req->resp_len;
947 desc.args[5] = req->sglistinfo_ptr;
948 desc.args[6] = req->sglistinfo_len;
949 } else {
950 req_64bit = (struct qseecom_qteec_64bit_ireq *)
951 req_buf;
952 desc.args[0] = req_64bit->app_id;
953 desc.args[1] = req_64bit->req_ptr;
954 desc.args[2] = req_64bit->req_len;
955 desc.args[3] = req_64bit->resp_ptr;
956 desc.args[4] = req_64bit->resp_len;
957 desc.args[5] = req_64bit->sglistinfo_ptr;
958 desc.args[6] = req_64bit->sglistinfo_len;
959 }
960 ret = scm_call2(smc_id, &desc);
961 break;
962 }
963 case QSEOS_TEE_CLOSE_SESSION: {
964 struct qseecom_qteec_ireq *req;
965 struct qseecom_qteec_64bit_ireq *req_64bit;
966
967 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
968 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
969 if (qseecom.qsee_version < QSEE_VERSION_40) {
970 req = (struct qseecom_qteec_ireq *)req_buf;
971 desc.args[0] = req->app_id;
972 desc.args[1] = req->req_ptr;
973 desc.args[2] = req->req_len;
974 desc.args[3] = req->resp_ptr;
975 desc.args[4] = req->resp_len;
976 } else {
977 req_64bit = (struct qseecom_qteec_64bit_ireq *)
978 req_buf;
979 desc.args[0] = req_64bit->app_id;
980 desc.args[1] = req_64bit->req_ptr;
981 desc.args[2] = req_64bit->req_len;
982 desc.args[3] = req_64bit->resp_ptr;
983 desc.args[4] = req_64bit->resp_len;
984 }
985 ret = scm_call2(smc_id, &desc);
986 break;
987 }
988 case QSEOS_TEE_REQUEST_CANCELLATION: {
989 struct qseecom_qteec_ireq *req;
990 struct qseecom_qteec_64bit_ireq *req_64bit;
991
992 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
993 desc.arginfo =
994 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
995 if (qseecom.qsee_version < QSEE_VERSION_40) {
996 req = (struct qseecom_qteec_ireq *)req_buf;
997 desc.args[0] = req->app_id;
998 desc.args[1] = req->req_ptr;
999 desc.args[2] = req->req_len;
1000 desc.args[3] = req->resp_ptr;
1001 desc.args[4] = req->resp_len;
1002 } else {
1003 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1004 req_buf;
1005 desc.args[0] = req_64bit->app_id;
1006 desc.args[1] = req_64bit->req_ptr;
1007 desc.args[2] = req_64bit->req_len;
1008 desc.args[3] = req_64bit->resp_ptr;
1009 desc.args[4] = req_64bit->resp_len;
1010 }
1011 ret = scm_call2(smc_id, &desc);
1012 break;
1013 }
1014 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1015 struct qseecom_continue_blocked_request_ireq *req =
1016 (struct qseecom_continue_blocked_request_ireq *)
1017 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001018 if (qseecom.smcinvoke_support)
1019 smc_id =
1020 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1021 else
1022 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001023 desc.arginfo =
1024 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001025 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001026 ret = scm_call2(smc_id, &desc);
1027 break;
1028 }
1029 default: {
1030 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1031 qseos_cmd_id);
1032 ret = -EINVAL;
1033 break;
1034 }
1035 } /*end of switch (qsee_cmd_id) */
1036 break;
1037 } /*end of case SCM_SVC_TZSCHEDULER*/
1038 default: {
1039 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1040 svc_id);
1041 ret = -EINVAL;
1042 break;
1043 }
1044 } /*end of switch svc_id */
1045 scm_resp->result = desc.ret[0];
1046 scm_resp->resp_type = desc.ret[1];
1047 scm_resp->data = desc.ret[2];
1048 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1049 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1050 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1051 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1052 return ret;
1053}
1054
1055
1056static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1057 size_t cmd_len, void *resp_buf, size_t resp_len)
1058{
1059 if (!is_scm_armv8())
1060 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1061 resp_buf, resp_len);
1062 else
1063 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1064}
1065
1066static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1067 struct qseecom_register_listener_req *svc)
1068{
1069 struct qseecom_registered_listener_list *ptr;
1070 int unique = 1;
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1074 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1075 if (ptr->svc.listener_id == svc->listener_id) {
1076 pr_err("Service id: %u is already registered\n",
1077 ptr->svc.listener_id);
1078 unique = 0;
1079 break;
1080 }
1081 }
1082 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1083 return unique;
1084}
1085
1086static struct qseecom_registered_listener_list *__qseecom_find_svc(
1087 int32_t listener_id)
1088{
1089 struct qseecom_registered_listener_list *entry = NULL;
1090 unsigned long flags;
1091
1092 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1093 list_for_each_entry(entry,
1094 &qseecom.registered_listener_list_head, list) {
1095 if (entry->svc.listener_id == listener_id)
1096 break;
1097 }
1098 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1099
1100 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1101 pr_err("Service id: %u is not found\n", listener_id);
1102 return NULL;
1103 }
1104
1105 return entry;
1106}
1107
1108static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1109 struct qseecom_dev_handle *handle,
1110 struct qseecom_register_listener_req *listener)
1111{
1112 int ret = 0;
1113 struct qseecom_register_listener_ireq req;
1114 struct qseecom_register_listener_64bit_ireq req_64bit;
1115 struct qseecom_command_scm_resp resp;
1116 ion_phys_addr_t pa;
1117 void *cmd_buf = NULL;
1118 size_t cmd_len;
1119
1120 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001121 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001122 listener->ifd_data_fd);
1123 if (IS_ERR_OR_NULL(svc->ihandle)) {
1124 pr_err("Ion client could not retrieve the handle\n");
1125 return -ENOMEM;
1126 }
1127
1128 /* Get the physical address of the ION BUF */
1129 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1130 if (ret) {
1131 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1132 ret);
1133 return ret;
1134 }
1135 /* Populate the structure for sending scm call to load image */
1136 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1137 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1138 pr_err("ION memory mapping for listener shared buffer failed\n");
1139 return -ENOMEM;
1140 }
1141 svc->sb_phys = (phys_addr_t)pa;
1142
1143 if (qseecom.qsee_version < QSEE_VERSION_40) {
1144 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req.listener_id = svc->svc.listener_id;
1146 req.sb_len = svc->sb_length;
1147 req.sb_ptr = (uint32_t)svc->sb_phys;
1148 cmd_buf = (void *)&req;
1149 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1150 } else {
1151 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1152 req_64bit.listener_id = svc->svc.listener_id;
1153 req_64bit.sb_len = svc->sb_length;
1154 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1155 cmd_buf = (void *)&req_64bit;
1156 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1157 }
1158
1159 resp.result = QSEOS_RESULT_INCOMPLETE;
1160
1161 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1162 &resp, sizeof(resp));
1163 if (ret) {
1164 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1165 return -EINVAL;
1166 }
1167
1168 if (resp.result != QSEOS_RESULT_SUCCESS) {
1169 pr_err("Error SB registration req: resp.result = %d\n",
1170 resp.result);
1171 return -EPERM;
1172 }
1173 return 0;
1174}
1175
1176static int qseecom_register_listener(struct qseecom_dev_handle *data,
1177 void __user *argp)
1178{
1179 int ret = 0;
1180 unsigned long flags;
1181 struct qseecom_register_listener_req rcvd_lstnr;
1182 struct qseecom_registered_listener_list *new_entry;
1183
1184 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1185 if (ret) {
1186 pr_err("copy_from_user failed\n");
1187 return ret;
1188 }
1189 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1190 rcvd_lstnr.sb_size))
1191 return -EFAULT;
1192
1193 data->listener.id = 0;
1194 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1195 pr_err("Service is not unique and is already registered\n");
1196 data->released = true;
1197 return -EBUSY;
1198 }
1199
1200 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1201 if (!new_entry)
1202 return -ENOMEM;
1203 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1204 new_entry->rcv_req_flag = 0;
1205
1206 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1207 new_entry->sb_length = rcvd_lstnr.sb_size;
1208 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1209 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1210 pr_err("qseecom_set_sb_memoryfailed\n");
1211 kzfree(new_entry);
1212 return -ENOMEM;
1213 }
1214
1215 data->listener.id = rcvd_lstnr.listener_id;
1216 init_waitqueue_head(&new_entry->rcv_req_wq);
1217 init_waitqueue_head(&new_entry->listener_block_app_wq);
1218 new_entry->send_resp_flag = 0;
1219 new_entry->listener_in_use = false;
1220 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1221 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1222 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1223
1224 return ret;
1225}
1226
1227static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1228{
1229 int ret = 0;
1230 unsigned long flags;
1231 uint32_t unmap_mem = 0;
1232 struct qseecom_register_listener_ireq req;
1233 struct qseecom_registered_listener_list *ptr_svc = NULL;
1234 struct qseecom_command_scm_resp resp;
1235 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1236
1237 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1238 req.listener_id = data->listener.id;
1239 resp.result = QSEOS_RESULT_INCOMPLETE;
1240
1241 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1242 sizeof(req), &resp, sizeof(resp));
1243 if (ret) {
1244 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1245 ret, data->listener.id);
1246 return ret;
1247 }
1248
1249 if (resp.result != QSEOS_RESULT_SUCCESS) {
1250 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1251 resp.result, data->listener.id);
1252 return -EPERM;
1253 }
1254
1255 data->abort = 1;
1256 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1257 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1258 list) {
1259 if (ptr_svc->svc.listener_id == data->listener.id) {
1260 wake_up_all(&ptr_svc->rcv_req_wq);
1261 break;
1262 }
1263 }
1264 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1265
1266 while (atomic_read(&data->ioctl_count) > 1) {
1267 if (wait_event_freezable(data->abort_wq,
1268 atomic_read(&data->ioctl_count) <= 1)) {
1269 pr_err("Interrupted from abort\n");
1270 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001271 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 }
1273 }
1274
1275 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1276 list_for_each_entry(ptr_svc,
1277 &qseecom.registered_listener_list_head, list) {
1278 if (ptr_svc->svc.listener_id == data->listener.id) {
1279 if (ptr_svc->sb_virt) {
1280 unmap_mem = 1;
1281 ihandle = ptr_svc->ihandle;
1282 }
1283 list_del(&ptr_svc->list);
1284 kzfree(ptr_svc);
1285 break;
1286 }
1287 }
1288 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1289
1290 /* Unmap the memory */
1291 if (unmap_mem) {
1292 if (!IS_ERR_OR_NULL(ihandle)) {
1293 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1294 ion_free(qseecom.ion_clnt, ihandle);
1295 }
1296 }
1297 data->released = true;
1298 return ret;
1299}
1300
1301static int __qseecom_set_msm_bus_request(uint32_t mode)
1302{
1303 int ret = 0;
1304 struct qseecom_clk *qclk;
1305
1306 qclk = &qseecom.qsee;
1307 if (qclk->ce_core_src_clk != NULL) {
1308 if (mode == INACTIVE) {
1309 __qseecom_disable_clk(CLK_QSEE);
1310 } else {
1311 ret = __qseecom_enable_clk(CLK_QSEE);
1312 if (ret)
1313 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1314 ret, mode);
1315 }
1316 }
1317
1318 if ((!ret) && (qseecom.current_mode != mode)) {
1319 ret = msm_bus_scale_client_update_request(
1320 qseecom.qsee_perf_client, mode);
1321 if (ret) {
1322 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1323 ret, mode);
1324 if (qclk->ce_core_src_clk != NULL) {
1325 if (mode == INACTIVE) {
1326 ret = __qseecom_enable_clk(CLK_QSEE);
1327 if (ret)
1328 pr_err("CLK enable failed\n");
1329 } else
1330 __qseecom_disable_clk(CLK_QSEE);
1331 }
1332 }
1333 qseecom.current_mode = mode;
1334 }
1335 return ret;
1336}
1337
1338static void qseecom_bw_inactive_req_work(struct work_struct *work)
1339{
1340 mutex_lock(&app_access_lock);
1341 mutex_lock(&qsee_bw_mutex);
1342 if (qseecom.timer_running)
1343 __qseecom_set_msm_bus_request(INACTIVE);
1344 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1345 qseecom.current_mode, qseecom.cumulative_mode);
1346 qseecom.timer_running = false;
1347 mutex_unlock(&qsee_bw_mutex);
1348 mutex_unlock(&app_access_lock);
1349}
1350
1351static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1352{
1353 schedule_work(&qseecom.bw_inactive_req_ws);
1354}
1355
1356static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1357{
1358 struct qseecom_clk *qclk;
1359 int ret = 0;
1360
1361 mutex_lock(&clk_access_lock);
1362 if (ce == CLK_QSEE)
1363 qclk = &qseecom.qsee;
1364 else
1365 qclk = &qseecom.ce_drv;
1366
1367 if (qclk->clk_access_cnt > 2) {
1368 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1369 ret = -EINVAL;
1370 goto err_dec_ref_cnt;
1371 }
1372 if (qclk->clk_access_cnt == 2)
1373 qclk->clk_access_cnt--;
1374
1375err_dec_ref_cnt:
1376 mutex_unlock(&clk_access_lock);
1377 return ret;
1378}
1379
1380
1381static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1382{
1383 int32_t ret = 0;
1384 int32_t request_mode = INACTIVE;
1385
1386 mutex_lock(&qsee_bw_mutex);
1387 if (mode == 0) {
1388 if (qseecom.cumulative_mode > MEDIUM)
1389 request_mode = HIGH;
1390 else
1391 request_mode = qseecom.cumulative_mode;
1392 } else {
1393 request_mode = mode;
1394 }
1395
1396 ret = __qseecom_set_msm_bus_request(request_mode);
1397 if (ret) {
1398 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1399 ret, request_mode);
1400 goto err_scale_timer;
1401 }
1402
1403 if (qseecom.timer_running) {
1404 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1405 if (ret) {
1406 pr_err("Failed to decrease clk ref count.\n");
1407 goto err_scale_timer;
1408 }
1409 del_timer_sync(&(qseecom.bw_scale_down_timer));
1410 qseecom.timer_running = false;
1411 }
1412err_scale_timer:
1413 mutex_unlock(&qsee_bw_mutex);
1414 return ret;
1415}
1416
1417
1418static int qseecom_unregister_bus_bandwidth_needs(
1419 struct qseecom_dev_handle *data)
1420{
1421 int32_t ret = 0;
1422
1423 qseecom.cumulative_mode -= data->mode;
1424 data->mode = INACTIVE;
1425
1426 return ret;
1427}
1428
1429static int __qseecom_register_bus_bandwidth_needs(
1430 struct qseecom_dev_handle *data, uint32_t request_mode)
1431{
1432 int32_t ret = 0;
1433
1434 if (data->mode == INACTIVE) {
1435 qseecom.cumulative_mode += request_mode;
1436 data->mode = request_mode;
1437 } else {
1438 if (data->mode != request_mode) {
1439 qseecom.cumulative_mode -= data->mode;
1440 qseecom.cumulative_mode += request_mode;
1441 data->mode = request_mode;
1442 }
1443 }
1444 return ret;
1445}
1446
1447static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1448{
1449 int ret = 0;
1450
1451 ret = qsee_vote_for_clock(data, CLK_DFAB);
1452 if (ret) {
1453 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1454 goto perf_enable_exit;
1455 }
1456 ret = qsee_vote_for_clock(data, CLK_SFPB);
1457 if (ret) {
1458 qsee_disable_clock_vote(data, CLK_DFAB);
1459 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1460 goto perf_enable_exit;
1461 }
1462
1463perf_enable_exit:
1464 return ret;
1465}
1466
1467static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1468 void __user *argp)
1469{
1470 int32_t ret = 0;
1471 int32_t req_mode;
1472
1473 if (qseecom.no_clock_support)
1474 return 0;
1475
1476 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1477 if (ret) {
1478 pr_err("copy_from_user failed\n");
1479 return ret;
1480 }
1481 if (req_mode > HIGH) {
1482 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1483 return -EINVAL;
1484 }
1485
1486 /*
1487 * Register bus bandwidth needs if bus scaling feature is enabled;
1488 * otherwise, qseecom enable/disable clocks for the client directly.
1489 */
1490 if (qseecom.support_bus_scaling) {
1491 mutex_lock(&qsee_bw_mutex);
1492 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1493 mutex_unlock(&qsee_bw_mutex);
1494 } else {
1495 pr_debug("Bus scaling feature is NOT enabled\n");
1496 pr_debug("request bandwidth mode %d for the client\n",
1497 req_mode);
1498 if (req_mode != INACTIVE) {
1499 ret = qseecom_perf_enable(data);
1500 if (ret)
1501 pr_err("Failed to vote for clock with err %d\n",
1502 ret);
1503 } else {
1504 qsee_disable_clock_vote(data, CLK_DFAB);
1505 qsee_disable_clock_vote(data, CLK_SFPB);
1506 }
1507 }
1508 return ret;
1509}
1510
1511static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1512{
1513 if (qseecom.no_clock_support)
1514 return;
1515
1516 mutex_lock(&qsee_bw_mutex);
1517 qseecom.bw_scale_down_timer.expires = jiffies +
1518 msecs_to_jiffies(duration);
1519 mod_timer(&(qseecom.bw_scale_down_timer),
1520 qseecom.bw_scale_down_timer.expires);
1521 qseecom.timer_running = true;
1522 mutex_unlock(&qsee_bw_mutex);
1523}
1524
1525static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1526{
1527 if (!qseecom.support_bus_scaling)
1528 qsee_disable_clock_vote(data, CLK_SFPB);
1529 else
1530 __qseecom_add_bw_scale_down_timer(
1531 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1532}
1533
1534static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1535{
1536 int ret = 0;
1537
1538 if (qseecom.support_bus_scaling) {
1539 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1540 if (ret)
1541 pr_err("Failed to set bw MEDIUM.\n");
1542 } else {
1543 ret = qsee_vote_for_clock(data, CLK_SFPB);
1544 if (ret)
1545 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1546 }
1547 return ret;
1548}
1549
1550static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1551 void __user *argp)
1552{
1553 ion_phys_addr_t pa;
1554 int32_t ret;
1555 struct qseecom_set_sb_mem_param_req req;
1556 size_t len;
1557
1558 /* Copy the relevant information needed for loading the image */
1559 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1560 return -EFAULT;
1561
1562 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1563 (req.sb_len == 0)) {
1564 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1565 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1566 return -EFAULT;
1567 }
1568 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1569 req.sb_len))
1570 return -EFAULT;
1571
1572 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001573 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001574 req.ifd_data_fd);
1575 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1576 pr_err("Ion client could not retrieve the handle\n");
1577 return -ENOMEM;
1578 }
1579 /* Get the physical address of the ION BUF */
1580 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1581 if (ret) {
1582
1583 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1584 ret);
1585 return ret;
1586 }
1587
1588 if (len < req.sb_len) {
1589 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1590 req.sb_len, len);
1591 return -EINVAL;
1592 }
1593 /* Populate the structure for sending scm call to load image */
1594 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1595 data->client.ihandle);
1596 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1597 pr_err("ION memory mapping for client shared buf failed\n");
1598 return -ENOMEM;
1599 }
1600 data->client.sb_phys = (phys_addr_t)pa;
1601 data->client.sb_length = req.sb_len;
1602 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1603 return 0;
1604}
1605
1606static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1607{
1608 int ret;
1609
1610 ret = (qseecom.send_resp_flag != 0);
1611 return ret || data->abort;
1612}
1613
1614static int __qseecom_reentrancy_listener_has_sent_rsp(
1615 struct qseecom_dev_handle *data,
1616 struct qseecom_registered_listener_list *ptr_svc)
1617{
1618 int ret;
1619
1620 ret = (ptr_svc->send_resp_flag != 0);
1621 return ret || data->abort;
1622}
1623
1624static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1625 struct qseecom_command_scm_resp *resp,
1626 struct qseecom_client_listener_data_irsp *send_data_rsp,
1627 struct qseecom_registered_listener_list *ptr_svc,
1628 uint32_t lstnr) {
1629 int ret = 0;
1630
1631 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1632 qseecom.send_resp_flag = 0;
1633 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1634 send_data_rsp->listener_id = lstnr;
1635 if (ptr_svc)
1636 pr_warn("listener_id:%x, lstnr: %x\n",
1637 ptr_svc->svc.listener_id, lstnr);
1638 if (ptr_svc && ptr_svc->ihandle) {
1639 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1640 ptr_svc->sb_virt, ptr_svc->sb_length,
1641 ION_IOC_CLEAN_INV_CACHES);
1642 if (ret) {
1643 pr_err("cache operation failed %d\n", ret);
1644 return ret;
1645 }
1646 }
1647
1648 if (lstnr == RPMB_SERVICE) {
1649 ret = __qseecom_enable_clk(CLK_QSEE);
1650 if (ret)
1651 return ret;
1652 }
1653 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1654 sizeof(send_data_rsp), resp, sizeof(*resp));
1655 if (ret) {
1656 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1657 ret, data->client.app_id);
1658 if (lstnr == RPMB_SERVICE)
1659 __qseecom_disable_clk(CLK_QSEE);
1660 return ret;
1661 }
1662 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1663 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1664 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1665 resp->result, data->client.app_id, lstnr);
1666 ret = -EINVAL;
1667 }
1668 if (lstnr == RPMB_SERVICE)
1669 __qseecom_disable_clk(CLK_QSEE);
1670 return ret;
1671}
1672
1673static void __qseecom_clean_listener_sglistinfo(
1674 struct qseecom_registered_listener_list *ptr_svc)
1675{
1676 if (ptr_svc->sglist_cnt) {
1677 memset(ptr_svc->sglistinfo_ptr, 0,
1678 SGLISTINFO_TABLE_SIZE);
1679 ptr_svc->sglist_cnt = 0;
1680 }
1681}
1682
1683static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1684 struct qseecom_command_scm_resp *resp)
1685{
1686 int ret = 0;
1687 int rc = 0;
1688 uint32_t lstnr;
1689 unsigned long flags;
1690 struct qseecom_client_listener_data_irsp send_data_rsp;
1691 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1692 struct qseecom_registered_listener_list *ptr_svc = NULL;
1693 sigset_t new_sigset;
1694 sigset_t old_sigset;
1695 uint32_t status;
1696 void *cmd_buf = NULL;
1697 size_t cmd_len;
1698 struct sglist_info *table = NULL;
1699
1700 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1701 lstnr = resp->data;
1702 /*
1703 * Wake up blocking lsitener service with the lstnr id
1704 */
1705 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1706 flags);
1707 list_for_each_entry(ptr_svc,
1708 &qseecom.registered_listener_list_head, list) {
1709 if (ptr_svc->svc.listener_id == lstnr) {
1710 ptr_svc->listener_in_use = true;
1711 ptr_svc->rcv_req_flag = 1;
1712 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1713 break;
1714 }
1715 }
1716 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1717 flags);
1718
1719 if (ptr_svc == NULL) {
1720 pr_err("Listener Svc %d does not exist\n", lstnr);
1721 __qseecom_qseos_fail_return_resp_tz(data, resp,
1722 &send_data_rsp, ptr_svc, lstnr);
1723 return -EINVAL;
1724 }
1725
1726 if (!ptr_svc->ihandle) {
1727 pr_err("Client handle is not initialized\n");
1728 __qseecom_qseos_fail_return_resp_tz(data, resp,
1729 &send_data_rsp, ptr_svc, lstnr);
1730 return -EINVAL;
1731 }
1732
1733 if (ptr_svc->svc.listener_id != lstnr) {
1734 pr_warn("Service requested does not exist\n");
1735 __qseecom_qseos_fail_return_resp_tz(data, resp,
1736 &send_data_rsp, ptr_svc, lstnr);
1737 return -ERESTARTSYS;
1738 }
1739 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1740
1741 /* initialize the new signal mask with all signals*/
1742 sigfillset(&new_sigset);
1743 /* block all signals */
1744 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1745
1746 do {
1747 /*
1748 * When reentrancy is not supported, check global
1749 * send_resp_flag; otherwise, check this listener's
1750 * send_resp_flag.
1751 */
1752 if (!qseecom.qsee_reentrancy_support &&
1753 !wait_event_freezable(qseecom.send_resp_wq,
1754 __qseecom_listener_has_sent_rsp(data))) {
1755 break;
1756 }
1757
1758 if (qseecom.qsee_reentrancy_support &&
1759 !wait_event_freezable(qseecom.send_resp_wq,
1760 __qseecom_reentrancy_listener_has_sent_rsp(
1761 data, ptr_svc))) {
1762 break;
1763 }
1764 } while (1);
1765
1766 /* restore signal mask */
1767 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1768 if (data->abort) {
1769 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1770 data->client.app_id, lstnr, ret);
1771 rc = -ENODEV;
1772 status = QSEOS_RESULT_FAILURE;
1773 } else {
1774 status = QSEOS_RESULT_SUCCESS;
1775 }
1776
1777 qseecom.send_resp_flag = 0;
1778 ptr_svc->send_resp_flag = 0;
1779 table = ptr_svc->sglistinfo_ptr;
1780 if (qseecom.qsee_version < QSEE_VERSION_40) {
1781 send_data_rsp.listener_id = lstnr;
1782 send_data_rsp.status = status;
1783 send_data_rsp.sglistinfo_ptr =
1784 (uint32_t)virt_to_phys(table);
1785 send_data_rsp.sglistinfo_len =
1786 SGLISTINFO_TABLE_SIZE;
1787 dmac_flush_range((void *)table,
1788 (void *)table + SGLISTINFO_TABLE_SIZE);
1789 cmd_buf = (void *)&send_data_rsp;
1790 cmd_len = sizeof(send_data_rsp);
1791 } else {
1792 send_data_rsp_64bit.listener_id = lstnr;
1793 send_data_rsp_64bit.status = status;
1794 send_data_rsp_64bit.sglistinfo_ptr =
1795 virt_to_phys(table);
1796 send_data_rsp_64bit.sglistinfo_len =
1797 SGLISTINFO_TABLE_SIZE;
1798 dmac_flush_range((void *)table,
1799 (void *)table + SGLISTINFO_TABLE_SIZE);
1800 cmd_buf = (void *)&send_data_rsp_64bit;
1801 cmd_len = sizeof(send_data_rsp_64bit);
1802 }
1803 if (qseecom.whitelist_support == false)
1804 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1805 else
1806 *(uint32_t *)cmd_buf =
1807 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1808 if (ptr_svc) {
1809 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1810 ptr_svc->ihandle,
1811 ptr_svc->sb_virt, ptr_svc->sb_length,
1812 ION_IOC_CLEAN_INV_CACHES);
1813 if (ret) {
1814 pr_err("cache operation failed %d\n", ret);
1815 return ret;
1816 }
1817 }
1818
1819 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1820 ret = __qseecom_enable_clk(CLK_QSEE);
1821 if (ret)
1822 return ret;
1823 }
1824
1825 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1826 cmd_buf, cmd_len, resp, sizeof(*resp));
1827 ptr_svc->listener_in_use = false;
1828 __qseecom_clean_listener_sglistinfo(ptr_svc);
1829 if (ret) {
1830 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1831 ret, data->client.app_id);
1832 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1833 __qseecom_disable_clk(CLK_QSEE);
1834 return ret;
1835 }
1836 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1837 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1838 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1839 resp->result, data->client.app_id, lstnr);
1840 ret = -EINVAL;
1841 }
1842 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1843 __qseecom_disable_clk(CLK_QSEE);
1844
1845 }
1846 if (rc)
1847 return rc;
1848
1849 return ret;
1850}
1851
Zhen Kong2f60f492017-06-29 15:22:14 -07001852static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001853 struct qseecom_command_scm_resp *resp,
1854 struct qseecom_registered_app_list *ptr_app,
1855 struct qseecom_dev_handle *data)
1856{
1857 struct qseecom_registered_listener_list *list_ptr;
1858 int ret = 0;
1859 struct qseecom_continue_blocked_request_ireq ireq;
1860 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001862 unsigned long flags;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001863
1864 if (!resp || !data) {
1865 pr_err("invalid resp or data pointer\n");
1866 ret = -EINVAL;
1867 goto exit;
1868 }
1869
1870 /* find app_id & img_name from list */
1871 if (!ptr_app) {
1872 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1873 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1874 list) {
1875 if ((ptr_app->app_id == data->client.app_id) &&
1876 (!strcmp(ptr_app->app_name,
1877 data->client.app_name))) {
1878 found_app = true;
1879 break;
1880 }
1881 }
1882 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1883 flags);
1884 if (!found_app) {
1885 pr_err("app_id %d (%s) is not found\n",
1886 data->client.app_id,
1887 (char *)data->client.app_name);
1888 ret = -ENOENT;
1889 goto exit;
1890 }
1891 }
1892
1893 list_ptr = __qseecom_find_svc(resp->data);
1894 if (!list_ptr) {
1895 pr_err("Invalid listener ID\n");
1896 ret = -ENODATA;
1897 goto exit;
1898 }
1899 pr_debug("lsntr %d in_use = %d\n",
1900 resp->data, list_ptr->listener_in_use);
1901 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001902
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001903 /* sleep until listener is available */
Zhen Kong2f60f492017-06-29 15:22:14 -07001904 qseecom.app_block_ref_cnt++;
1905 ptr_app->app_blocked = true;
1906 mutex_unlock(&app_access_lock);
1907 if (wait_event_freezable(
1908 list_ptr->listener_block_app_wq,
1909 !list_ptr->listener_in_use)) {
1910 pr_err("Interrupted: listener_id %d, app_id %d\n",
1911 resp->data, ptr_app->app_id);
1912 ret = -ERESTARTSYS;
1913 goto exit;
1914 }
1915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001919 ptr_app->blocked_on_listener_id = 0;
1920 /* notify the blocked app that listener is available */
1921 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1922 resp->data, data->client.app_id,
1923 data->client.app_name);
1924 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001925 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 &ireq, sizeof(ireq),
1928 &continue_resp, sizeof(continue_resp));
1929 if (ret) {
1930 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1931 data->client.app_id,
1932 data->client.app_name, ret);
1933 goto exit;
1934 }
1935 /*
1936 * After TZ app is unblocked, then continue to next case
1937 * for incomplete request processing
1938 */
1939 resp->result = QSEOS_RESULT_INCOMPLETE;
1940exit:
1941 return ret;
1942}
1943
Zhen Kong2f60f492017-06-29 15:22:14 -07001944static int __qseecom_process_blocked_on_listener_smcinvoke(
1945 struct qseecom_command_scm_resp *resp)
1946{
1947 struct qseecom_registered_listener_list *list_ptr;
1948 int ret = 0;
1949 struct qseecom_continue_blocked_request_ireq ireq;
1950 struct qseecom_command_scm_resp continue_resp;
1951 unsigned int session_id;
1952
1953 if (!resp) {
1954 pr_err("invalid resp pointer\n");
1955 ret = -EINVAL;
1956 goto exit;
1957 }
1958 session_id = resp->resp_type;
1959 list_ptr = __qseecom_find_svc(resp->data);
1960 if (!list_ptr) {
1961 pr_err("Invalid listener ID\n");
1962 ret = -ENODATA;
1963 goto exit;
1964 }
1965 pr_debug("lsntr %d in_use = %d\n",
1966 resp->data, list_ptr->listener_in_use);
1967 /* sleep until listener is available */
1968 qseecom.app_block_ref_cnt++;
1969 mutex_unlock(&app_access_lock);
1970 if (wait_event_freezable(
1971 list_ptr->listener_block_app_wq,
1972 !list_ptr->listener_in_use)) {
1973 pr_err("Interrupted: listener_id %d, session_id %d\n",
1974 resp->data, session_id);
1975 ret = -ERESTARTSYS;
1976 goto exit;
1977 }
1978 mutex_lock(&app_access_lock);
1979 qseecom.app_block_ref_cnt--;
1980
1981 /* notify TZ that listener is available */
1982 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1983 resp->data, session_id);
1984 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1985 ireq.app_or_session_id = session_id;
1986 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1987 &ireq, sizeof(ireq),
1988 &continue_resp, sizeof(continue_resp));
1989 if (ret) {
1990 pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
1991 session_id, ret);
1992 goto exit;
1993 }
1994 resp->result = QSEOS_RESULT_INCOMPLETE;
1995exit:
1996 return ret;
1997}
1998
1999static int __qseecom_process_reentrancy_blocked_on_listener(
2000 struct qseecom_command_scm_resp *resp,
2001 struct qseecom_registered_app_list *ptr_app,
2002 struct qseecom_dev_handle *data)
2003{
2004 if (!qseecom.smcinvoke_support)
2005 return __qseecom_process_blocked_on_listener_legacy(
2006 resp, ptr_app, data);
2007 else
2008 return __qseecom_process_blocked_on_listener_smcinvoke(
2009 resp);
2010}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002011static int __qseecom_reentrancy_process_incomplete_cmd(
2012 struct qseecom_dev_handle *data,
2013 struct qseecom_command_scm_resp *resp)
2014{
2015 int ret = 0;
2016 int rc = 0;
2017 uint32_t lstnr;
2018 unsigned long flags;
2019 struct qseecom_client_listener_data_irsp send_data_rsp;
2020 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2021 struct qseecom_registered_listener_list *ptr_svc = NULL;
2022 sigset_t new_sigset;
2023 sigset_t old_sigset;
2024 uint32_t status;
2025 void *cmd_buf = NULL;
2026 size_t cmd_len;
2027 struct sglist_info *table = NULL;
2028
2029 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2030 lstnr = resp->data;
2031 /*
2032 * Wake up blocking lsitener service with the lstnr id
2033 */
2034 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2035 flags);
2036 list_for_each_entry(ptr_svc,
2037 &qseecom.registered_listener_list_head, list) {
2038 if (ptr_svc->svc.listener_id == lstnr) {
2039 ptr_svc->listener_in_use = true;
2040 ptr_svc->rcv_req_flag = 1;
2041 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2042 break;
2043 }
2044 }
2045 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2046 flags);
2047
2048 if (ptr_svc == NULL) {
2049 pr_err("Listener Svc %d does not exist\n", lstnr);
2050 return -EINVAL;
2051 }
2052
2053 if (!ptr_svc->ihandle) {
2054 pr_err("Client handle is not initialized\n");
2055 return -EINVAL;
2056 }
2057
2058 if (ptr_svc->svc.listener_id != lstnr) {
2059 pr_warn("Service requested does not exist\n");
2060 return -ERESTARTSYS;
2061 }
2062 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2063
2064 /* initialize the new signal mask with all signals*/
2065 sigfillset(&new_sigset);
2066
2067 /* block all signals */
2068 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2069
2070 /* unlock mutex btw waking listener and sleep-wait */
2071 mutex_unlock(&app_access_lock);
2072 do {
2073 if (!wait_event_freezable(qseecom.send_resp_wq,
2074 __qseecom_reentrancy_listener_has_sent_rsp(
2075 data, ptr_svc))) {
2076 break;
2077 }
2078 } while (1);
2079 /* lock mutex again after resp sent */
2080 mutex_lock(&app_access_lock);
2081 ptr_svc->send_resp_flag = 0;
2082 qseecom.send_resp_flag = 0;
2083
2084 /* restore signal mask */
2085 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2086 if (data->abort) {
2087 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2088 data->client.app_id, lstnr, ret);
2089 rc = -ENODEV;
2090 status = QSEOS_RESULT_FAILURE;
2091 } else {
2092 status = QSEOS_RESULT_SUCCESS;
2093 }
2094 table = ptr_svc->sglistinfo_ptr;
2095 if (qseecom.qsee_version < QSEE_VERSION_40) {
2096 send_data_rsp.listener_id = lstnr;
2097 send_data_rsp.status = status;
2098 send_data_rsp.sglistinfo_ptr =
2099 (uint32_t)virt_to_phys(table);
2100 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2101 dmac_flush_range((void *)table,
2102 (void *)table + SGLISTINFO_TABLE_SIZE);
2103 cmd_buf = (void *)&send_data_rsp;
2104 cmd_len = sizeof(send_data_rsp);
2105 } else {
2106 send_data_rsp_64bit.listener_id = lstnr;
2107 send_data_rsp_64bit.status = status;
2108 send_data_rsp_64bit.sglistinfo_ptr =
2109 virt_to_phys(table);
2110 send_data_rsp_64bit.sglistinfo_len =
2111 SGLISTINFO_TABLE_SIZE;
2112 dmac_flush_range((void *)table,
2113 (void *)table + SGLISTINFO_TABLE_SIZE);
2114 cmd_buf = (void *)&send_data_rsp_64bit;
2115 cmd_len = sizeof(send_data_rsp_64bit);
2116 }
2117 if (qseecom.whitelist_support == false)
2118 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2119 else
2120 *(uint32_t *)cmd_buf =
2121 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2122 if (ptr_svc) {
2123 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2124 ptr_svc->ihandle,
2125 ptr_svc->sb_virt, ptr_svc->sb_length,
2126 ION_IOC_CLEAN_INV_CACHES);
2127 if (ret) {
2128 pr_err("cache operation failed %d\n", ret);
2129 return ret;
2130 }
2131 }
2132 if (lstnr == RPMB_SERVICE) {
2133 ret = __qseecom_enable_clk(CLK_QSEE);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2139 cmd_buf, cmd_len, resp, sizeof(*resp));
2140 ptr_svc->listener_in_use = false;
2141 __qseecom_clean_listener_sglistinfo(ptr_svc);
2142 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2143
2144 if (ret) {
2145 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2146 ret, data->client.app_id);
2147 goto exit;
2148 }
2149
2150 switch (resp->result) {
2151 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2152 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2153 lstnr, data->client.app_id, resp->data);
2154 if (lstnr == resp->data) {
2155 pr_err("lstnr %d should not be blocked!\n",
2156 lstnr);
2157 ret = -EINVAL;
2158 goto exit;
2159 }
2160 ret = __qseecom_process_reentrancy_blocked_on_listener(
2161 resp, NULL, data);
2162 if (ret) {
2163 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2164 data->client.app_id,
2165 data->client.app_name, resp->data);
2166 goto exit;
2167 }
2168 case QSEOS_RESULT_SUCCESS:
2169 case QSEOS_RESULT_INCOMPLETE:
2170 break;
2171 default:
2172 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2173 resp->result, data->client.app_id, lstnr);
2174 ret = -EINVAL;
2175 goto exit;
2176 }
2177exit:
2178 if (lstnr == RPMB_SERVICE)
2179 __qseecom_disable_clk(CLK_QSEE);
2180
2181 }
2182 if (rc)
2183 return rc;
2184
2185 return ret;
2186}
2187
2188/*
2189 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2190 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2191 * So, needs to first check if no app blocked before sending OS level scm call,
2192 * then wait until all apps are unblocked.
2193 */
2194static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2195{
2196 sigset_t new_sigset, old_sigset;
2197
2198 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2199 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2200 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2201 /* thread sleep until this app unblocked */
2202 while (qseecom.app_block_ref_cnt > 0) {
2203 sigfillset(&new_sigset);
2204 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2205 mutex_unlock(&app_access_lock);
2206 do {
2207 if (!wait_event_freezable(qseecom.app_block_wq,
2208 (qseecom.app_block_ref_cnt == 0)))
2209 break;
2210 } while (1);
2211 mutex_lock(&app_access_lock);
2212 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2213 }
2214 }
2215}
2216
2217/*
2218 * scm_call of send data will fail if this TA is blocked or there are more
2219 * than one TA requesting listener services; So, first check to see if need
2220 * to wait.
2221 */
2222static void __qseecom_reentrancy_check_if_this_app_blocked(
2223 struct qseecom_registered_app_list *ptr_app)
2224{
2225 sigset_t new_sigset, old_sigset;
2226
2227 if (qseecom.qsee_reentrancy_support) {
2228 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2229 /* thread sleep until this app unblocked */
2230 sigfillset(&new_sigset);
2231 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2232 mutex_unlock(&app_access_lock);
2233 do {
2234 if (!wait_event_freezable(qseecom.app_block_wq,
2235 (!ptr_app->app_blocked &&
2236 qseecom.app_block_ref_cnt <= 1)))
2237 break;
2238 } while (1);
2239 mutex_lock(&app_access_lock);
2240 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2241 }
2242 }
2243}
2244
2245static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2246 uint32_t *app_id)
2247{
2248 int32_t ret;
2249 struct qseecom_command_scm_resp resp;
2250 bool found_app = false;
2251 struct qseecom_registered_app_list *entry = NULL;
2252 unsigned long flags = 0;
2253
2254 if (!app_id) {
2255 pr_err("Null pointer to app_id\n");
2256 return -EINVAL;
2257 }
2258 *app_id = 0;
2259
2260 /* check if app exists and has been registered locally */
2261 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2262 list_for_each_entry(entry,
2263 &qseecom.registered_app_list_head, list) {
2264 if (!strcmp(entry->app_name, req.app_name)) {
2265 found_app = true;
2266 break;
2267 }
2268 }
2269 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2270 if (found_app) {
2271 pr_debug("Found app with id %d\n", entry->app_id);
2272 *app_id = entry->app_id;
2273 return 0;
2274 }
2275
2276 memset((void *)&resp, 0, sizeof(resp));
2277
2278 /* SCM_CALL to check if app_id for the mentioned app exists */
2279 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2280 sizeof(struct qseecom_check_app_ireq),
2281 &resp, sizeof(resp));
2282 if (ret) {
2283 pr_err("scm_call to check if app is already loaded failed\n");
2284 return -EINVAL;
2285 }
2286
2287 if (resp.result == QSEOS_RESULT_FAILURE)
2288 return 0;
2289
2290 switch (resp.resp_type) {
2291 /*qsee returned listener type response */
2292 case QSEOS_LISTENER_ID:
2293 pr_err("resp type is of listener type instead of app");
2294 return -EINVAL;
2295 case QSEOS_APP_ID:
2296 *app_id = resp.data;
2297 return 0;
2298 default:
2299 pr_err("invalid resp type (%d) from qsee",
2300 resp.resp_type);
2301 return -ENODEV;
2302 }
2303}
2304
2305static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2306{
2307 struct qseecom_registered_app_list *entry = NULL;
2308 unsigned long flags = 0;
2309 u32 app_id = 0;
2310 struct ion_handle *ihandle; /* Ion handle */
2311 struct qseecom_load_img_req load_img_req;
2312 int32_t ret = 0;
2313 ion_phys_addr_t pa = 0;
2314 size_t len;
2315 struct qseecom_command_scm_resp resp;
2316 struct qseecom_check_app_ireq req;
2317 struct qseecom_load_app_ireq load_req;
2318 struct qseecom_load_app_64bit_ireq load_req_64bit;
2319 void *cmd_buf = NULL;
2320 size_t cmd_len;
2321 bool first_time = false;
2322
2323 /* Copy the relevant information needed for loading the image */
2324 if (copy_from_user(&load_img_req,
2325 (void __user *)argp,
2326 sizeof(struct qseecom_load_img_req))) {
2327 pr_err("copy_from_user failed\n");
2328 return -EFAULT;
2329 }
2330
2331 /* Check and load cmnlib */
2332 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2333 if (!qseecom.commonlib_loaded &&
2334 load_img_req.app_arch == ELFCLASS32) {
2335 ret = qseecom_load_commonlib_image(data, "cmnlib");
2336 if (ret) {
2337 pr_err("failed to load cmnlib\n");
2338 return -EIO;
2339 }
2340 qseecom.commonlib_loaded = true;
2341 pr_debug("cmnlib is loaded\n");
2342 }
2343
2344 if (!qseecom.commonlib64_loaded &&
2345 load_img_req.app_arch == ELFCLASS64) {
2346 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2347 if (ret) {
2348 pr_err("failed to load cmnlib64\n");
2349 return -EIO;
2350 }
2351 qseecom.commonlib64_loaded = true;
2352 pr_debug("cmnlib64 is loaded\n");
2353 }
2354 }
2355
2356 if (qseecom.support_bus_scaling) {
2357 mutex_lock(&qsee_bw_mutex);
2358 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2359 mutex_unlock(&qsee_bw_mutex);
2360 if (ret)
2361 return ret;
2362 }
2363
2364 /* Vote for the SFPB clock */
2365 ret = __qseecom_enable_clk_scale_up(data);
2366 if (ret)
2367 goto enable_clk_err;
2368
2369 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2370 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2371 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2372
2373 ret = __qseecom_check_app_exists(req, &app_id);
2374 if (ret < 0)
2375 goto loadapp_err;
2376
2377 if (app_id) {
2378 pr_debug("App id %d (%s) already exists\n", app_id,
2379 (char *)(req.app_name));
2380 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2381 list_for_each_entry(entry,
2382 &qseecom.registered_app_list_head, list){
2383 if (entry->app_id == app_id) {
2384 entry->ref_cnt++;
2385 break;
2386 }
2387 }
2388 spin_unlock_irqrestore(
2389 &qseecom.registered_app_list_lock, flags);
2390 ret = 0;
2391 } else {
2392 first_time = true;
2393 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2394 (char *)(load_img_req.img_name));
2395 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002396 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002397 load_img_req.ifd_data_fd);
2398 if (IS_ERR_OR_NULL(ihandle)) {
2399 pr_err("Ion client could not retrieve the handle\n");
2400 ret = -ENOMEM;
2401 goto loadapp_err;
2402 }
2403
2404 /* Get the physical address of the ION BUF */
2405 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2406 if (ret) {
2407 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2408 ret);
2409 goto loadapp_err;
2410 }
2411 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2412 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2413 len, load_img_req.mdt_len,
2414 load_img_req.img_len);
2415 ret = -EINVAL;
2416 goto loadapp_err;
2417 }
2418 /* Populate the structure for sending scm call to load image */
2419 if (qseecom.qsee_version < QSEE_VERSION_40) {
2420 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2421 load_req.mdt_len = load_img_req.mdt_len;
2422 load_req.img_len = load_img_req.img_len;
2423 strlcpy(load_req.app_name, load_img_req.img_name,
2424 MAX_APP_NAME_SIZE);
2425 load_req.phy_addr = (uint32_t)pa;
2426 cmd_buf = (void *)&load_req;
2427 cmd_len = sizeof(struct qseecom_load_app_ireq);
2428 } else {
2429 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2430 load_req_64bit.mdt_len = load_img_req.mdt_len;
2431 load_req_64bit.img_len = load_img_req.img_len;
2432 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2433 MAX_APP_NAME_SIZE);
2434 load_req_64bit.phy_addr = (uint64_t)pa;
2435 cmd_buf = (void *)&load_req_64bit;
2436 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2437 }
2438
2439 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2440 ION_IOC_CLEAN_INV_CACHES);
2441 if (ret) {
2442 pr_err("cache operation failed %d\n", ret);
2443 goto loadapp_err;
2444 }
2445
2446 /* SCM_CALL to load the app and get the app_id back */
2447 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2448 cmd_len, &resp, sizeof(resp));
2449 if (ret) {
2450 pr_err("scm_call to load app failed\n");
2451 if (!IS_ERR_OR_NULL(ihandle))
2452 ion_free(qseecom.ion_clnt, ihandle);
2453 ret = -EINVAL;
2454 goto loadapp_err;
2455 }
2456
2457 if (resp.result == QSEOS_RESULT_FAILURE) {
2458 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2459 if (!IS_ERR_OR_NULL(ihandle))
2460 ion_free(qseecom.ion_clnt, ihandle);
2461 ret = -EFAULT;
2462 goto loadapp_err;
2463 }
2464
2465 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2466 ret = __qseecom_process_incomplete_cmd(data, &resp);
2467 if (ret) {
2468 pr_err("process_incomplete_cmd failed err: %d\n",
2469 ret);
2470 if (!IS_ERR_OR_NULL(ihandle))
2471 ion_free(qseecom.ion_clnt, ihandle);
2472 ret = -EFAULT;
2473 goto loadapp_err;
2474 }
2475 }
2476
2477 if (resp.result != QSEOS_RESULT_SUCCESS) {
2478 pr_err("scm_call failed resp.result unknown, %d\n",
2479 resp.result);
2480 if (!IS_ERR_OR_NULL(ihandle))
2481 ion_free(qseecom.ion_clnt, ihandle);
2482 ret = -EFAULT;
2483 goto loadapp_err;
2484 }
2485
2486 app_id = resp.data;
2487
2488 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2489 if (!entry) {
2490 ret = -ENOMEM;
2491 goto loadapp_err;
2492 }
2493 entry->app_id = app_id;
2494 entry->ref_cnt = 1;
2495 entry->app_arch = load_img_req.app_arch;
2496 /*
2497 * keymaster app may be first loaded as "keymaste" by qseecomd,
2498 * and then used as "keymaster" on some targets. To avoid app
2499 * name checking error, register "keymaster" into app_list and
2500 * thread private data.
2501 */
2502 if (!strcmp(load_img_req.img_name, "keymaste"))
2503 strlcpy(entry->app_name, "keymaster",
2504 MAX_APP_NAME_SIZE);
2505 else
2506 strlcpy(entry->app_name, load_img_req.img_name,
2507 MAX_APP_NAME_SIZE);
2508 entry->app_blocked = false;
2509 entry->blocked_on_listener_id = 0;
2510
2511 /* Deallocate the handle */
2512 if (!IS_ERR_OR_NULL(ihandle))
2513 ion_free(qseecom.ion_clnt, ihandle);
2514
2515 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2516 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2517 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2518 flags);
2519
2520 pr_warn("App with id %u (%s) now loaded\n", app_id,
2521 (char *)(load_img_req.img_name));
2522 }
2523 data->client.app_id = app_id;
2524 data->client.app_arch = load_img_req.app_arch;
2525 if (!strcmp(load_img_req.img_name, "keymaste"))
2526 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2527 else
2528 strlcpy(data->client.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_img_req.app_id = app_id;
2531 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2532 pr_err("copy_to_user failed\n");
2533 ret = -EFAULT;
2534 if (first_time == true) {
2535 spin_lock_irqsave(
2536 &qseecom.registered_app_list_lock, flags);
2537 list_del(&entry->list);
2538 spin_unlock_irqrestore(
2539 &qseecom.registered_app_list_lock, flags);
2540 kzfree(entry);
2541 }
2542 }
2543
2544loadapp_err:
2545 __qseecom_disable_clk_scale_down(data);
2546enable_clk_err:
2547 if (qseecom.support_bus_scaling) {
2548 mutex_lock(&qsee_bw_mutex);
2549 qseecom_unregister_bus_bandwidth_needs(data);
2550 mutex_unlock(&qsee_bw_mutex);
2551 }
2552 return ret;
2553}
2554
2555static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2556{
2557 int ret = 1; /* Set unload app */
2558
2559 wake_up_all(&qseecom.send_resp_wq);
2560 if (qseecom.qsee_reentrancy_support)
2561 mutex_unlock(&app_access_lock);
2562 while (atomic_read(&data->ioctl_count) > 1) {
2563 if (wait_event_freezable(data->abort_wq,
2564 atomic_read(&data->ioctl_count) <= 1)) {
2565 pr_err("Interrupted from abort\n");
2566 ret = -ERESTARTSYS;
2567 break;
2568 }
2569 }
2570 if (qseecom.qsee_reentrancy_support)
2571 mutex_lock(&app_access_lock);
2572 return ret;
2573}
2574
2575static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2576{
2577 int ret = 0;
2578
2579 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2580 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2581 ion_free(qseecom.ion_clnt, data->client.ihandle);
2582 data->client.ihandle = NULL;
2583 }
2584 return ret;
2585}
2586
2587static int qseecom_unload_app(struct qseecom_dev_handle *data,
2588 bool app_crash)
2589{
2590 unsigned long flags;
2591 unsigned long flags1;
2592 int ret = 0;
2593 struct qseecom_command_scm_resp resp;
2594 struct qseecom_registered_app_list *ptr_app = NULL;
2595 bool unload = false;
2596 bool found_app = false;
2597 bool found_dead_app = false;
2598
2599 if (!data) {
2600 pr_err("Invalid/uninitialized device handle\n");
2601 return -EINVAL;
2602 }
2603
2604 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2605 pr_debug("Do not unload keymaster app from tz\n");
2606 goto unload_exit;
2607 }
2608
2609 __qseecom_cleanup_app(data);
2610 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2611
2612 if (data->client.app_id > 0) {
2613 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2614 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2615 list) {
2616 if (ptr_app->app_id == data->client.app_id) {
2617 if (!strcmp((void *)ptr_app->app_name,
2618 (void *)data->client.app_name)) {
2619 found_app = true;
2620 if (app_crash || ptr_app->ref_cnt == 1)
2621 unload = true;
2622 break;
2623 }
2624 found_dead_app = true;
2625 break;
2626 }
2627 }
2628 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2629 flags);
2630 if (found_app == false && found_dead_app == false) {
2631 pr_err("Cannot find app with id = %d (%s)\n",
2632 data->client.app_id,
2633 (char *)data->client.app_name);
2634 ret = -EINVAL;
2635 goto unload_exit;
2636 }
2637 }
2638
2639 if (found_dead_app)
2640 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2641 (char *)data->client.app_name);
2642
2643 if (unload) {
2644 struct qseecom_unload_app_ireq req;
2645 /* Populate the structure for sending scm call to load image */
2646 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2647 req.app_id = data->client.app_id;
2648
2649 /* SCM_CALL to unload the app */
2650 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2651 sizeof(struct qseecom_unload_app_ireq),
2652 &resp, sizeof(resp));
2653 if (ret) {
2654 pr_err("scm_call to unload app (id = %d) failed\n",
2655 req.app_id);
2656 ret = -EFAULT;
2657 goto unload_exit;
2658 } else {
2659 pr_warn("App id %d now unloaded\n", req.app_id);
2660 }
2661 if (resp.result == QSEOS_RESULT_FAILURE) {
2662 pr_err("app (%d) unload_failed!!\n",
2663 data->client.app_id);
2664 ret = -EFAULT;
2665 goto unload_exit;
2666 }
2667 if (resp.result == QSEOS_RESULT_SUCCESS)
2668 pr_debug("App (%d) is unloaded!!\n",
2669 data->client.app_id);
2670 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2671 ret = __qseecom_process_incomplete_cmd(data, &resp);
2672 if (ret) {
2673 pr_err("process_incomplete_cmd fail err: %d\n",
2674 ret);
2675 goto unload_exit;
2676 }
2677 }
2678 }
2679
2680 if (found_app) {
2681 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2682 if (app_crash) {
2683 ptr_app->ref_cnt = 0;
2684 pr_debug("app_crash: ref_count = 0\n");
2685 } else {
2686 if (ptr_app->ref_cnt == 1) {
2687 ptr_app->ref_cnt = 0;
2688 pr_debug("ref_count set to 0\n");
2689 } else {
2690 ptr_app->ref_cnt--;
2691 pr_debug("Can't unload app(%d) inuse\n",
2692 ptr_app->app_id);
2693 }
2694 }
2695 if (unload) {
2696 list_del(&ptr_app->list);
2697 kzfree(ptr_app);
2698 }
2699 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2700 flags1);
2701 }
2702unload_exit:
2703 qseecom_unmap_ion_allocated_memory(data);
2704 data->released = true;
2705 return ret;
2706}
2707
2708static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2709 unsigned long virt)
2710{
2711 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2712}
2713
2714static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2715 unsigned long virt)
2716{
2717 return (uintptr_t)data->client.sb_virt +
2718 (virt - data->client.user_virt_sb_base);
2719}
2720
2721int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2722 struct qseecom_send_svc_cmd_req *req_ptr,
2723 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2724{
2725 int ret = 0;
2726 void *req_buf = NULL;
2727
2728 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2729 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2730 req_ptr, send_svc_ireq_ptr);
2731 return -EINVAL;
2732 }
2733
2734 /* Clients need to ensure req_buf is at base offset of shared buffer */
2735 if ((uintptr_t)req_ptr->cmd_req_buf !=
2736 data_ptr->client.user_virt_sb_base) {
2737 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2738 return -EINVAL;
2739 }
2740
2741 if (data_ptr->client.sb_length <
2742 sizeof(struct qseecom_rpmb_provision_key)) {
2743 pr_err("shared buffer is too small to hold key type\n");
2744 return -EINVAL;
2745 }
2746 req_buf = data_ptr->client.sb_virt;
2747
2748 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2749 send_svc_ireq_ptr->key_type =
2750 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2751 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2752 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2753 data_ptr, (uintptr_t)req_ptr->resp_buf));
2754 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2755
2756 return ret;
2757}
2758
2759int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2760 struct qseecom_send_svc_cmd_req *req_ptr,
2761 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2762{
2763 int ret = 0;
2764 uint32_t reqd_len_sb_in = 0;
2765
2766 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2767 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2768 req_ptr, send_svc_ireq_ptr);
2769 return -EINVAL;
2770 }
2771
2772 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2773 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2774 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2775 pr_err("Required: %u, Available: %zu\n",
2776 reqd_len_sb_in, data_ptr->client.sb_length);
2777 return -ENOMEM;
2778 }
2779
2780 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2781 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2782 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2783 data_ptr, (uintptr_t)req_ptr->resp_buf));
2784 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2785
2786 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2787 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2788
2789
2790 return ret;
2791}
2792
2793static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2794 struct qseecom_send_svc_cmd_req *req)
2795{
2796 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2797 pr_err("req or cmd buffer or response buffer is null\n");
2798 return -EINVAL;
2799 }
2800
2801 if (!data || !data->client.ihandle) {
2802 pr_err("Client or client handle is not initialized\n");
2803 return -EINVAL;
2804 }
2805
2806 if (data->client.sb_virt == NULL) {
2807 pr_err("sb_virt null\n");
2808 return -EINVAL;
2809 }
2810
2811 if (data->client.user_virt_sb_base == 0) {
2812 pr_err("user_virt_sb_base is null\n");
2813 return -EINVAL;
2814 }
2815
2816 if (data->client.sb_length == 0) {
2817 pr_err("sb_length is 0\n");
2818 return -EINVAL;
2819 }
2820
2821 if (((uintptr_t)req->cmd_req_buf <
2822 data->client.user_virt_sb_base) ||
2823 ((uintptr_t)req->cmd_req_buf >=
2824 (data->client.user_virt_sb_base + data->client.sb_length))) {
2825 pr_err("cmd buffer address not within shared bufffer\n");
2826 return -EINVAL;
2827 }
2828 if (((uintptr_t)req->resp_buf <
2829 data->client.user_virt_sb_base) ||
2830 ((uintptr_t)req->resp_buf >=
2831 (data->client.user_virt_sb_base + data->client.sb_length))) {
2832 pr_err("response buffer address not within shared bufffer\n");
2833 return -EINVAL;
2834 }
2835 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2836 (req->cmd_req_len > data->client.sb_length) ||
2837 (req->resp_len > data->client.sb_length)) {
2838 pr_err("cmd buf length or response buf length not valid\n");
2839 return -EINVAL;
2840 }
2841 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2842 pr_err("Integer overflow detected in req_len & rsp_len\n");
2843 return -EINVAL;
2844 }
2845
2846 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2847 pr_debug("Not enough memory to fit cmd_buf.\n");
2848 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2849 (req->cmd_req_len + req->resp_len),
2850 data->client.sb_length);
2851 return -ENOMEM;
2852 }
2853 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2854 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2855 return -EINVAL;
2856 }
2857 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2858 pr_err("Integer overflow in resp_len & resp_buf\n");
2859 return -EINVAL;
2860 }
2861 if (data->client.user_virt_sb_base >
2862 (ULONG_MAX - data->client.sb_length)) {
2863 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2864 return -EINVAL;
2865 }
2866 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2867 ((uintptr_t)data->client.user_virt_sb_base +
2868 data->client.sb_length)) ||
2869 (((uintptr_t)req->resp_buf + req->resp_len) >
2870 ((uintptr_t)data->client.user_virt_sb_base +
2871 data->client.sb_length))) {
2872 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2873 return -EINVAL;
2874 }
2875 return 0;
2876}
2877
2878static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2879 void __user *argp)
2880{
2881 int ret = 0;
2882 struct qseecom_client_send_service_ireq send_svc_ireq;
2883 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2884 struct qseecom_command_scm_resp resp;
2885 struct qseecom_send_svc_cmd_req req;
2886 void *send_req_ptr;
2887 size_t req_buf_size;
2888
2889 /*struct qseecom_command_scm_resp resp;*/
2890
2891 if (copy_from_user(&req,
2892 (void __user *)argp,
2893 sizeof(req))) {
2894 pr_err("copy_from_user failed\n");
2895 return -EFAULT;
2896 }
2897
2898 if (__validate_send_service_cmd_inputs(data, &req))
2899 return -EINVAL;
2900
2901 data->type = QSEECOM_SECURE_SERVICE;
2902
2903 switch (req.cmd_id) {
2904 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2905 case QSEOS_RPMB_ERASE_COMMAND:
2906 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2907 send_req_ptr = &send_svc_ireq;
2908 req_buf_size = sizeof(send_svc_ireq);
2909 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2910 send_req_ptr))
2911 return -EINVAL;
2912 break;
2913 case QSEOS_FSM_LTEOTA_REQ_CMD:
2914 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2915 case QSEOS_FSM_IKE_REQ_CMD:
2916 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2917 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2918 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2919 case QSEOS_FSM_ENCFS_REQ_CMD:
2920 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2921 send_req_ptr = &send_fsm_key_svc_ireq;
2922 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2923 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2924 send_req_ptr))
2925 return -EINVAL;
2926 break;
2927 default:
2928 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2929 return -EINVAL;
2930 }
2931
2932 if (qseecom.support_bus_scaling) {
2933 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2934 if (ret) {
2935 pr_err("Fail to set bw HIGH\n");
2936 return ret;
2937 }
2938 } else {
2939 ret = qseecom_perf_enable(data);
2940 if (ret) {
2941 pr_err("Failed to vote for clocks with err %d\n", ret);
2942 goto exit;
2943 }
2944 }
2945
2946 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2947 data->client.sb_virt, data->client.sb_length,
2948 ION_IOC_CLEAN_INV_CACHES);
2949 if (ret) {
2950 pr_err("cache operation failed %d\n", ret);
2951 goto exit;
2952 }
2953 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2954 (const void *)send_req_ptr,
2955 req_buf_size, &resp, sizeof(resp));
2956 if (ret) {
2957 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2958 if (!qseecom.support_bus_scaling) {
2959 qsee_disable_clock_vote(data, CLK_DFAB);
2960 qsee_disable_clock_vote(data, CLK_SFPB);
2961 } else {
2962 __qseecom_add_bw_scale_down_timer(
2963 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2964 }
2965 goto exit;
2966 }
2967 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2968 data->client.sb_virt, data->client.sb_length,
2969 ION_IOC_INV_CACHES);
2970 if (ret) {
2971 pr_err("cache operation failed %d\n", ret);
2972 goto exit;
2973 }
2974 switch (resp.result) {
2975 case QSEOS_RESULT_SUCCESS:
2976 break;
2977 case QSEOS_RESULT_INCOMPLETE:
2978 pr_debug("qseos_result_incomplete\n");
2979 ret = __qseecom_process_incomplete_cmd(data, &resp);
2980 if (ret) {
2981 pr_err("process_incomplete_cmd fail with result: %d\n",
2982 resp.result);
2983 }
2984 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2985 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302986 if (put_user(resp.result,
2987 (uint32_t __user *)req.resp_buf)) {
2988 ret = -EINVAL;
2989 goto exit;
2990 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002991 ret = 0;
2992 }
2993 break;
2994 case QSEOS_RESULT_FAILURE:
2995 pr_err("scm call failed with resp.result: %d\n", resp.result);
2996 ret = -EINVAL;
2997 break;
2998 default:
2999 pr_err("Response result %d not supported\n",
3000 resp.result);
3001 ret = -EINVAL;
3002 break;
3003 }
3004 if (!qseecom.support_bus_scaling) {
3005 qsee_disable_clock_vote(data, CLK_DFAB);
3006 qsee_disable_clock_vote(data, CLK_SFPB);
3007 } else {
3008 __qseecom_add_bw_scale_down_timer(
3009 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3010 }
3011
3012exit:
3013 return ret;
3014}
3015
3016static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3017 struct qseecom_send_cmd_req *req)
3018
3019{
3020 if (!data || !data->client.ihandle) {
3021 pr_err("Client or client handle is not initialized\n");
3022 return -EINVAL;
3023 }
3024 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3025 (req->cmd_req_buf == NULL)) {
3026 pr_err("cmd buffer or response buffer is null\n");
3027 return -EINVAL;
3028 }
3029 if (((uintptr_t)req->cmd_req_buf <
3030 data->client.user_virt_sb_base) ||
3031 ((uintptr_t)req->cmd_req_buf >=
3032 (data->client.user_virt_sb_base + data->client.sb_length))) {
3033 pr_err("cmd buffer address not within shared bufffer\n");
3034 return -EINVAL;
3035 }
3036 if (((uintptr_t)req->resp_buf <
3037 data->client.user_virt_sb_base) ||
3038 ((uintptr_t)req->resp_buf >=
3039 (data->client.user_virt_sb_base + data->client.sb_length))) {
3040 pr_err("response buffer address not within shared bufffer\n");
3041 return -EINVAL;
3042 }
3043 if ((req->cmd_req_len == 0) ||
3044 (req->cmd_req_len > data->client.sb_length) ||
3045 (req->resp_len > data->client.sb_length)) {
3046 pr_err("cmd buf length or response buf length not valid\n");
3047 return -EINVAL;
3048 }
3049 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3050 pr_err("Integer overflow detected in req_len & rsp_len\n");
3051 return -EINVAL;
3052 }
3053
3054 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3055 pr_debug("Not enough memory to fit cmd_buf.\n");
3056 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3057 (req->cmd_req_len + req->resp_len),
3058 data->client.sb_length);
3059 return -ENOMEM;
3060 }
3061 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3062 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3063 return -EINVAL;
3064 }
3065 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3066 pr_err("Integer overflow in resp_len & resp_buf\n");
3067 return -EINVAL;
3068 }
3069 if (data->client.user_virt_sb_base >
3070 (ULONG_MAX - data->client.sb_length)) {
3071 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3072 return -EINVAL;
3073 }
3074 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3075 ((uintptr_t)data->client.user_virt_sb_base +
3076 data->client.sb_length)) ||
3077 (((uintptr_t)req->resp_buf + req->resp_len) >
3078 ((uintptr_t)data->client.user_virt_sb_base +
3079 data->client.sb_length))) {
3080 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3081 return -EINVAL;
3082 }
3083 return 0;
3084}
3085
3086int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3087 struct qseecom_registered_app_list *ptr_app,
3088 struct qseecom_dev_handle *data)
3089{
3090 int ret = 0;
3091
3092 switch (resp->result) {
3093 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3094 pr_warn("App(%d) %s is blocked on listener %d\n",
3095 data->client.app_id, data->client.app_name,
3096 resp->data);
3097 ret = __qseecom_process_reentrancy_blocked_on_listener(
3098 resp, ptr_app, data);
3099 if (ret) {
3100 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3101 data->client.app_id, data->client.app_name, resp->data);
3102 return ret;
3103 }
3104
3105 case QSEOS_RESULT_INCOMPLETE:
3106 qseecom.app_block_ref_cnt++;
3107 ptr_app->app_blocked = true;
3108 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3109 ptr_app->app_blocked = false;
3110 qseecom.app_block_ref_cnt--;
3111 wake_up_interruptible(&qseecom.app_block_wq);
3112 if (ret)
3113 pr_err("process_incomplete_cmd failed err: %d\n",
3114 ret);
3115 return ret;
3116 case QSEOS_RESULT_SUCCESS:
3117 return ret;
3118 default:
3119 pr_err("Response result %d not supported\n",
3120 resp->result);
3121 return -EINVAL;
3122 }
3123}
3124
3125static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3126 struct qseecom_send_cmd_req *req)
3127{
3128 int ret = 0;
3129 u32 reqd_len_sb_in = 0;
3130 struct qseecom_client_send_data_ireq send_data_req = {0};
3131 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3132 struct qseecom_command_scm_resp resp;
3133 unsigned long flags;
3134 struct qseecom_registered_app_list *ptr_app;
3135 bool found_app = false;
3136 void *cmd_buf = NULL;
3137 size_t cmd_len;
3138 struct sglist_info *table = data->sglistinfo_ptr;
3139
3140 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3141 /* find app_id & img_name from list */
3142 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3143 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3144 list) {
3145 if ((ptr_app->app_id == data->client.app_id) &&
3146 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3147 found_app = true;
3148 break;
3149 }
3150 }
3151 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3152
3153 if (!found_app) {
3154 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3155 (char *)data->client.app_name);
3156 return -ENOENT;
3157 }
3158
3159 if (qseecom.qsee_version < QSEE_VERSION_40) {
3160 send_data_req.app_id = data->client.app_id;
3161 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3162 data, (uintptr_t)req->cmd_req_buf));
3163 send_data_req.req_len = req->cmd_req_len;
3164 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3165 data, (uintptr_t)req->resp_buf));
3166 send_data_req.rsp_len = req->resp_len;
3167 send_data_req.sglistinfo_ptr =
3168 (uint32_t)virt_to_phys(table);
3169 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3170 dmac_flush_range((void *)table,
3171 (void *)table + SGLISTINFO_TABLE_SIZE);
3172 cmd_buf = (void *)&send_data_req;
3173 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3174 } else {
3175 send_data_req_64bit.app_id = data->client.app_id;
3176 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3177 (uintptr_t)req->cmd_req_buf);
3178 send_data_req_64bit.req_len = req->cmd_req_len;
3179 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3180 (uintptr_t)req->resp_buf);
3181 send_data_req_64bit.rsp_len = req->resp_len;
3182 /* check if 32bit app's phys_addr region is under 4GB.*/
3183 if ((data->client.app_arch == ELFCLASS32) &&
3184 ((send_data_req_64bit.req_ptr >=
3185 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3186 (send_data_req_64bit.rsp_ptr >=
3187 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3188 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3189 data->client.app_name,
3190 send_data_req_64bit.req_ptr,
3191 send_data_req_64bit.req_len,
3192 send_data_req_64bit.rsp_ptr,
3193 send_data_req_64bit.rsp_len);
3194 return -EFAULT;
3195 }
3196 send_data_req_64bit.sglistinfo_ptr =
3197 (uint64_t)virt_to_phys(table);
3198 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3199 dmac_flush_range((void *)table,
3200 (void *)table + SGLISTINFO_TABLE_SIZE);
3201 cmd_buf = (void *)&send_data_req_64bit;
3202 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3203 }
3204
3205 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3206 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3207 else
3208 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3209
3210 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3211 data->client.sb_virt,
3212 reqd_len_sb_in,
3213 ION_IOC_CLEAN_INV_CACHES);
3214 if (ret) {
3215 pr_err("cache operation failed %d\n", ret);
3216 return ret;
3217 }
3218
3219 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3220
3221 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3222 cmd_buf, cmd_len,
3223 &resp, sizeof(resp));
3224 if (ret) {
3225 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3226 ret, data->client.app_id);
3227 return ret;
3228 }
3229
3230 if (qseecom.qsee_reentrancy_support) {
3231 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
3232 } else {
3233 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3234 ret = __qseecom_process_incomplete_cmd(data, &resp);
3235 if (ret) {
3236 pr_err("process_incomplete_cmd failed err: %d\n",
3237 ret);
3238 return ret;
3239 }
3240 } else {
3241 if (resp.result != QSEOS_RESULT_SUCCESS) {
3242 pr_err("Response result %d not supported\n",
3243 resp.result);
3244 ret = -EINVAL;
3245 }
3246 }
3247 }
3248 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3249 data->client.sb_virt, data->client.sb_length,
3250 ION_IOC_INV_CACHES);
3251 if (ret)
3252 pr_err("cache operation failed %d\n", ret);
3253 return ret;
3254}
3255
3256static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3257{
3258 int ret = 0;
3259 struct qseecom_send_cmd_req req;
3260
3261 ret = copy_from_user(&req, argp, sizeof(req));
3262 if (ret) {
3263 pr_err("copy_from_user failed\n");
3264 return ret;
3265 }
3266
3267 if (__validate_send_cmd_inputs(data, &req))
3268 return -EINVAL;
3269
3270 ret = __qseecom_send_cmd(data, &req);
3271
3272 if (ret)
3273 return ret;
3274
3275 return ret;
3276}
3277
3278int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3279 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3280 struct qseecom_dev_handle *data, int i) {
3281
3282 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3283 (req->ifd_data[i].fd > 0)) {
3284 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3285 (req->ifd_data[i].cmd_buf_offset >
3286 req->cmd_req_len - sizeof(uint32_t))) {
3287 pr_err("Invalid offset (req len) 0x%x\n",
3288 req->ifd_data[i].cmd_buf_offset);
3289 return -EINVAL;
3290 }
3291 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3292 (lstnr_resp->ifd_data[i].fd > 0)) {
3293 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3294 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3295 lstnr_resp->resp_len - sizeof(uint32_t))) {
3296 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3297 lstnr_resp->ifd_data[i].cmd_buf_offset);
3298 return -EINVAL;
3299 }
3300 }
3301 return 0;
3302}
3303
3304static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3305 struct qseecom_dev_handle *data)
3306{
3307 struct ion_handle *ihandle;
3308 char *field;
3309 int ret = 0;
3310 int i = 0;
3311 uint32_t len = 0;
3312 struct scatterlist *sg;
3313 struct qseecom_send_modfd_cmd_req *req = NULL;
3314 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3315 struct qseecom_registered_listener_list *this_lstnr = NULL;
3316 uint32_t offset;
3317 struct sg_table *sg_ptr;
3318
3319 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3320 (data->type != QSEECOM_CLIENT_APP))
3321 return -EFAULT;
3322
3323 if (msg == NULL) {
3324 pr_err("Invalid address\n");
3325 return -EINVAL;
3326 }
3327 if (data->type == QSEECOM_LISTENER_SERVICE) {
3328 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3329 this_lstnr = __qseecom_find_svc(data->listener.id);
3330 if (IS_ERR_OR_NULL(this_lstnr)) {
3331 pr_err("Invalid listener ID\n");
3332 return -ENOMEM;
3333 }
3334 } else {
3335 req = (struct qseecom_send_modfd_cmd_req *)msg;
3336 }
3337
3338 for (i = 0; i < MAX_ION_FD; i++) {
3339 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3340 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003341 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003342 req->ifd_data[i].fd);
3343 if (IS_ERR_OR_NULL(ihandle)) {
3344 pr_err("Ion client can't retrieve the handle\n");
3345 return -ENOMEM;
3346 }
3347 field = (char *) req->cmd_req_buf +
3348 req->ifd_data[i].cmd_buf_offset;
3349 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3350 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003351 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003352 lstnr_resp->ifd_data[i].fd);
3353 if (IS_ERR_OR_NULL(ihandle)) {
3354 pr_err("Ion client can't retrieve the handle\n");
3355 return -ENOMEM;
3356 }
3357 field = lstnr_resp->resp_buf_ptr +
3358 lstnr_resp->ifd_data[i].cmd_buf_offset;
3359 } else {
3360 continue;
3361 }
3362 /* Populate the cmd data structure with the phys_addr */
3363 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3364 if (IS_ERR_OR_NULL(sg_ptr)) {
3365 pr_err("IOn client could not retrieve sg table\n");
3366 goto err;
3367 }
3368 if (sg_ptr->nents == 0) {
3369 pr_err("Num of scattered entries is 0\n");
3370 goto err;
3371 }
3372 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3373 pr_err("Num of scattered entries");
3374 pr_err(" (%d) is greater than max supported %d\n",
3375 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3376 goto err;
3377 }
3378 sg = sg_ptr->sgl;
3379 if (sg_ptr->nents == 1) {
3380 uint32_t *update;
3381
3382 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3383 goto err;
3384 if ((data->type == QSEECOM_CLIENT_APP &&
3385 (data->client.app_arch == ELFCLASS32 ||
3386 data->client.app_arch == ELFCLASS64)) ||
3387 (data->type == QSEECOM_LISTENER_SERVICE)) {
3388 /*
3389 * Check if sg list phy add region is under 4GB
3390 */
3391 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3392 (!cleanup) &&
3393 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3394 >= PHY_ADDR_4G - sg->length)) {
3395 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3396 data->client.app_name,
3397 &(sg_dma_address(sg_ptr->sgl)),
3398 sg->length);
3399 goto err;
3400 }
3401 update = (uint32_t *) field;
3402 *update = cleanup ? 0 :
3403 (uint32_t)sg_dma_address(sg_ptr->sgl);
3404 } else {
3405 pr_err("QSEE app arch %u is not supported\n",
3406 data->client.app_arch);
3407 goto err;
3408 }
3409 len += (uint32_t)sg->length;
3410 } else {
3411 struct qseecom_sg_entry *update;
3412 int j = 0;
3413
3414 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3415 (req->ifd_data[i].fd > 0)) {
3416
3417 if ((req->cmd_req_len <
3418 SG_ENTRY_SZ * sg_ptr->nents) ||
3419 (req->ifd_data[i].cmd_buf_offset >
3420 (req->cmd_req_len -
3421 SG_ENTRY_SZ * sg_ptr->nents))) {
3422 pr_err("Invalid offset = 0x%x\n",
3423 req->ifd_data[i].cmd_buf_offset);
3424 goto err;
3425 }
3426
3427 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3428 (lstnr_resp->ifd_data[i].fd > 0)) {
3429
3430 if ((lstnr_resp->resp_len <
3431 SG_ENTRY_SZ * sg_ptr->nents) ||
3432 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3433 (lstnr_resp->resp_len -
3434 SG_ENTRY_SZ * sg_ptr->nents))) {
3435 goto err;
3436 }
3437 }
3438 if ((data->type == QSEECOM_CLIENT_APP &&
3439 (data->client.app_arch == ELFCLASS32 ||
3440 data->client.app_arch == ELFCLASS64)) ||
3441 (data->type == QSEECOM_LISTENER_SERVICE)) {
3442 update = (struct qseecom_sg_entry *)field;
3443 for (j = 0; j < sg_ptr->nents; j++) {
3444 /*
3445 * Check if sg list PA is under 4GB
3446 */
3447 if ((qseecom.qsee_version >=
3448 QSEE_VERSION_40) &&
3449 (!cleanup) &&
3450 ((uint64_t)(sg_dma_address(sg))
3451 >= PHY_ADDR_4G - sg->length)) {
3452 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3453 data->client.app_name,
3454 &(sg_dma_address(sg)),
3455 sg->length);
3456 goto err;
3457 }
3458 update->phys_addr = cleanup ? 0 :
3459 (uint32_t)sg_dma_address(sg);
3460 update->len = cleanup ? 0 : sg->length;
3461 update++;
3462 len += sg->length;
3463 sg = sg_next(sg);
3464 }
3465 } else {
3466 pr_err("QSEE app arch %u is not supported\n",
3467 data->client.app_arch);
3468 goto err;
3469 }
3470 }
3471
3472 if (cleanup) {
3473 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3474 ihandle, NULL, len,
3475 ION_IOC_INV_CACHES);
3476 if (ret) {
3477 pr_err("cache operation failed %d\n", ret);
3478 goto err;
3479 }
3480 } else {
3481 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3482 ihandle, NULL, len,
3483 ION_IOC_CLEAN_INV_CACHES);
3484 if (ret) {
3485 pr_err("cache operation failed %d\n", ret);
3486 goto err;
3487 }
3488 if (data->type == QSEECOM_CLIENT_APP) {
3489 offset = req->ifd_data[i].cmd_buf_offset;
3490 data->sglistinfo_ptr[i].indexAndFlags =
3491 SGLISTINFO_SET_INDEX_FLAG(
3492 (sg_ptr->nents == 1), 0, offset);
3493 data->sglistinfo_ptr[i].sizeOrCount =
3494 (sg_ptr->nents == 1) ?
3495 sg->length : sg_ptr->nents;
3496 data->sglist_cnt = i + 1;
3497 } else {
3498 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3499 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3500 (uintptr_t)this_lstnr->sb_virt);
3501 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3502 SGLISTINFO_SET_INDEX_FLAG(
3503 (sg_ptr->nents == 1), 0, offset);
3504 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3505 (sg_ptr->nents == 1) ?
3506 sg->length : sg_ptr->nents;
3507 this_lstnr->sglist_cnt = i + 1;
3508 }
3509 }
3510 /* Deallocate the handle */
3511 if (!IS_ERR_OR_NULL(ihandle))
3512 ion_free(qseecom.ion_clnt, ihandle);
3513 }
3514 return ret;
3515err:
3516 if (!IS_ERR_OR_NULL(ihandle))
3517 ion_free(qseecom.ion_clnt, ihandle);
3518 return -ENOMEM;
3519}
3520
3521static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3522 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3523{
3524 struct scatterlist *sg = sg_ptr->sgl;
3525 struct qseecom_sg_entry_64bit *sg_entry;
3526 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3527 void *buf;
3528 uint i;
3529 size_t size;
3530 dma_addr_t coh_pmem;
3531
3532 if (fd_idx >= MAX_ION_FD) {
3533 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3534 return -ENOMEM;
3535 }
3536 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3537 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3538 /* Allocate a contiguous kernel buffer */
3539 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3540 size = (size + PAGE_SIZE) & PAGE_MASK;
3541 buf = dma_alloc_coherent(qseecom.pdev,
3542 size, &coh_pmem, GFP_KERNEL);
3543 if (buf == NULL) {
3544 pr_err("failed to alloc memory for sg buf\n");
3545 return -ENOMEM;
3546 }
3547 /* update qseecom_sg_list_buf_hdr_64bit */
3548 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3549 buf_hdr->new_buf_phys_addr = coh_pmem;
3550 buf_hdr->nents_total = sg_ptr->nents;
3551 /* save the left sg entries into new allocated buf */
3552 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3553 for (i = 0; i < sg_ptr->nents; i++) {
3554 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3555 sg_entry->len = sg->length;
3556 sg_entry++;
3557 sg = sg_next(sg);
3558 }
3559
3560 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3561 data->client.sec_buf_fd[fd_idx].vbase = buf;
3562 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3563 data->client.sec_buf_fd[fd_idx].size = size;
3564
3565 return 0;
3566}
3567
3568static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3569 struct qseecom_dev_handle *data)
3570{
3571 struct ion_handle *ihandle;
3572 char *field;
3573 int ret = 0;
3574 int i = 0;
3575 uint32_t len = 0;
3576 struct scatterlist *sg;
3577 struct qseecom_send_modfd_cmd_req *req = NULL;
3578 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3579 struct qseecom_registered_listener_list *this_lstnr = NULL;
3580 uint32_t offset;
3581 struct sg_table *sg_ptr;
3582
3583 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3584 (data->type != QSEECOM_CLIENT_APP))
3585 return -EFAULT;
3586
3587 if (msg == NULL) {
3588 pr_err("Invalid address\n");
3589 return -EINVAL;
3590 }
3591 if (data->type == QSEECOM_LISTENER_SERVICE) {
3592 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3593 this_lstnr = __qseecom_find_svc(data->listener.id);
3594 if (IS_ERR_OR_NULL(this_lstnr)) {
3595 pr_err("Invalid listener ID\n");
3596 return -ENOMEM;
3597 }
3598 } else {
3599 req = (struct qseecom_send_modfd_cmd_req *)msg;
3600 }
3601
3602 for (i = 0; i < MAX_ION_FD; i++) {
3603 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3604 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003605 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003606 req->ifd_data[i].fd);
3607 if (IS_ERR_OR_NULL(ihandle)) {
3608 pr_err("Ion client can't retrieve the handle\n");
3609 return -ENOMEM;
3610 }
3611 field = (char *) req->cmd_req_buf +
3612 req->ifd_data[i].cmd_buf_offset;
3613 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3614 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003615 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003616 lstnr_resp->ifd_data[i].fd);
3617 if (IS_ERR_OR_NULL(ihandle)) {
3618 pr_err("Ion client can't retrieve the handle\n");
3619 return -ENOMEM;
3620 }
3621 field = lstnr_resp->resp_buf_ptr +
3622 lstnr_resp->ifd_data[i].cmd_buf_offset;
3623 } else {
3624 continue;
3625 }
3626 /* Populate the cmd data structure with the phys_addr */
3627 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3628 if (IS_ERR_OR_NULL(sg_ptr)) {
3629 pr_err("IOn client could not retrieve sg table\n");
3630 goto err;
3631 }
3632 if (sg_ptr->nents == 0) {
3633 pr_err("Num of scattered entries is 0\n");
3634 goto err;
3635 }
3636 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3637 pr_warn("Num of scattered entries");
3638 pr_warn(" (%d) is greater than %d\n",
3639 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3640 if (cleanup) {
3641 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3642 data->client.sec_buf_fd[i].vbase)
3643 dma_free_coherent(qseecom.pdev,
3644 data->client.sec_buf_fd[i].size,
3645 data->client.sec_buf_fd[i].vbase,
3646 data->client.sec_buf_fd[i].pbase);
3647 } else {
3648 ret = __qseecom_allocate_sg_list_buffer(data,
3649 field, i, sg_ptr);
3650 if (ret) {
3651 pr_err("Failed to allocate sg list buffer\n");
3652 goto err;
3653 }
3654 }
3655 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3656 sg = sg_ptr->sgl;
3657 goto cleanup;
3658 }
3659 sg = sg_ptr->sgl;
3660 if (sg_ptr->nents == 1) {
3661 uint64_t *update_64bit;
3662
3663 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3664 goto err;
3665 /* 64bit app uses 64bit address */
3666 update_64bit = (uint64_t *) field;
3667 *update_64bit = cleanup ? 0 :
3668 (uint64_t)sg_dma_address(sg_ptr->sgl);
3669 len += (uint32_t)sg->length;
3670 } else {
3671 struct qseecom_sg_entry_64bit *update_64bit;
3672 int j = 0;
3673
3674 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3675 (req->ifd_data[i].fd > 0)) {
3676
3677 if ((req->cmd_req_len <
3678 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3679 (req->ifd_data[i].cmd_buf_offset >
3680 (req->cmd_req_len -
3681 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3682 pr_err("Invalid offset = 0x%x\n",
3683 req->ifd_data[i].cmd_buf_offset);
3684 goto err;
3685 }
3686
3687 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3688 (lstnr_resp->ifd_data[i].fd > 0)) {
3689
3690 if ((lstnr_resp->resp_len <
3691 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3692 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3693 (lstnr_resp->resp_len -
3694 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3695 goto err;
3696 }
3697 }
3698 /* 64bit app uses 64bit address */
3699 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3700 for (j = 0; j < sg_ptr->nents; j++) {
3701 update_64bit->phys_addr = cleanup ? 0 :
3702 (uint64_t)sg_dma_address(sg);
3703 update_64bit->len = cleanup ? 0 :
3704 (uint32_t)sg->length;
3705 update_64bit++;
3706 len += sg->length;
3707 sg = sg_next(sg);
3708 }
3709 }
3710cleanup:
3711 if (cleanup) {
3712 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3713 ihandle, NULL, len,
3714 ION_IOC_INV_CACHES);
3715 if (ret) {
3716 pr_err("cache operation failed %d\n", ret);
3717 goto err;
3718 }
3719 } else {
3720 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3721 ihandle, NULL, len,
3722 ION_IOC_CLEAN_INV_CACHES);
3723 if (ret) {
3724 pr_err("cache operation failed %d\n", ret);
3725 goto err;
3726 }
3727 if (data->type == QSEECOM_CLIENT_APP) {
3728 offset = req->ifd_data[i].cmd_buf_offset;
3729 data->sglistinfo_ptr[i].indexAndFlags =
3730 SGLISTINFO_SET_INDEX_FLAG(
3731 (sg_ptr->nents == 1), 1, offset);
3732 data->sglistinfo_ptr[i].sizeOrCount =
3733 (sg_ptr->nents == 1) ?
3734 sg->length : sg_ptr->nents;
3735 data->sglist_cnt = i + 1;
3736 } else {
3737 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3738 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3739 (uintptr_t)this_lstnr->sb_virt);
3740 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3741 SGLISTINFO_SET_INDEX_FLAG(
3742 (sg_ptr->nents == 1), 1, offset);
3743 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3744 (sg_ptr->nents == 1) ?
3745 sg->length : sg_ptr->nents;
3746 this_lstnr->sglist_cnt = i + 1;
3747 }
3748 }
3749 /* Deallocate the handle */
3750 if (!IS_ERR_OR_NULL(ihandle))
3751 ion_free(qseecom.ion_clnt, ihandle);
3752 }
3753 return ret;
3754err:
3755 for (i = 0; i < MAX_ION_FD; i++)
3756 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3757 data->client.sec_buf_fd[i].vbase)
3758 dma_free_coherent(qseecom.pdev,
3759 data->client.sec_buf_fd[i].size,
3760 data->client.sec_buf_fd[i].vbase,
3761 data->client.sec_buf_fd[i].pbase);
3762 if (!IS_ERR_OR_NULL(ihandle))
3763 ion_free(qseecom.ion_clnt, ihandle);
3764 return -ENOMEM;
3765}
3766
3767static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3768 void __user *argp,
3769 bool is_64bit_addr)
3770{
3771 int ret = 0;
3772 int i;
3773 struct qseecom_send_modfd_cmd_req req;
3774 struct qseecom_send_cmd_req send_cmd_req;
3775
3776 ret = copy_from_user(&req, argp, sizeof(req));
3777 if (ret) {
3778 pr_err("copy_from_user failed\n");
3779 return ret;
3780 }
3781
3782 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3783 send_cmd_req.cmd_req_len = req.cmd_req_len;
3784 send_cmd_req.resp_buf = req.resp_buf;
3785 send_cmd_req.resp_len = req.resp_len;
3786
3787 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3788 return -EINVAL;
3789
3790 /* validate offsets */
3791 for (i = 0; i < MAX_ION_FD; i++) {
3792 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3793 pr_err("Invalid offset %d = 0x%x\n",
3794 i, req.ifd_data[i].cmd_buf_offset);
3795 return -EINVAL;
3796 }
3797 }
3798 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3799 (uintptr_t)req.cmd_req_buf);
3800 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3801 (uintptr_t)req.resp_buf);
3802
3803 if (!is_64bit_addr) {
3804 ret = __qseecom_update_cmd_buf(&req, false, data);
3805 if (ret)
3806 return ret;
3807 ret = __qseecom_send_cmd(data, &send_cmd_req);
3808 if (ret)
3809 return ret;
3810 ret = __qseecom_update_cmd_buf(&req, true, data);
3811 if (ret)
3812 return ret;
3813 } else {
3814 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3815 if (ret)
3816 return ret;
3817 ret = __qseecom_send_cmd(data, &send_cmd_req);
3818 if (ret)
3819 return ret;
3820 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3821 if (ret)
3822 return ret;
3823 }
3824
3825 return ret;
3826}
3827
3828static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3829 void __user *argp)
3830{
3831 return __qseecom_send_modfd_cmd(data, argp, false);
3832}
3833
3834static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3835 void __user *argp)
3836{
3837 return __qseecom_send_modfd_cmd(data, argp, true);
3838}
3839
3840
3841
3842static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3843 struct qseecom_registered_listener_list *svc)
3844{
3845 int ret;
3846
3847 ret = (svc->rcv_req_flag != 0);
3848 return ret || data->abort;
3849}
3850
3851static int qseecom_receive_req(struct qseecom_dev_handle *data)
3852{
3853 int ret = 0;
3854 struct qseecom_registered_listener_list *this_lstnr;
3855
3856 this_lstnr = __qseecom_find_svc(data->listener.id);
3857 if (!this_lstnr) {
3858 pr_err("Invalid listener ID\n");
3859 return -ENODATA;
3860 }
3861
3862 while (1) {
3863 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3864 __qseecom_listener_has_rcvd_req(data,
3865 this_lstnr))) {
3866 pr_debug("Interrupted: exiting Listener Service = %d\n",
3867 (uint32_t)data->listener.id);
3868 /* woken up for different reason */
3869 return -ERESTARTSYS;
3870 }
3871
3872 if (data->abort) {
3873 pr_err("Aborting Listener Service = %d\n",
3874 (uint32_t)data->listener.id);
3875 return -ENODEV;
3876 }
3877 this_lstnr->rcv_req_flag = 0;
3878 break;
3879 }
3880 return ret;
3881}
3882
3883static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3884{
3885 unsigned char app_arch = 0;
3886 struct elf32_hdr *ehdr;
3887 struct elf64_hdr *ehdr64;
3888
3889 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3890
3891 switch (app_arch) {
3892 case ELFCLASS32: {
3893 ehdr = (struct elf32_hdr *)fw_entry->data;
3894 if (fw_entry->size < sizeof(*ehdr)) {
3895 pr_err("%s: Not big enough to be an elf32 header\n",
3896 qseecom.pdev->init_name);
3897 return false;
3898 }
3899 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3900 pr_err("%s: Not an elf32 header\n",
3901 qseecom.pdev->init_name);
3902 return false;
3903 }
3904 if (ehdr->e_phnum == 0) {
3905 pr_err("%s: No loadable segments\n",
3906 qseecom.pdev->init_name);
3907 return false;
3908 }
3909 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3910 sizeof(struct elf32_hdr) > fw_entry->size) {
3911 pr_err("%s: Program headers not within mdt\n",
3912 qseecom.pdev->init_name);
3913 return false;
3914 }
3915 break;
3916 }
3917 case ELFCLASS64: {
3918 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3919 if (fw_entry->size < sizeof(*ehdr64)) {
3920 pr_err("%s: Not big enough to be an elf64 header\n",
3921 qseecom.pdev->init_name);
3922 return false;
3923 }
3924 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3925 pr_err("%s: Not an elf64 header\n",
3926 qseecom.pdev->init_name);
3927 return false;
3928 }
3929 if (ehdr64->e_phnum == 0) {
3930 pr_err("%s: No loadable segments\n",
3931 qseecom.pdev->init_name);
3932 return false;
3933 }
3934 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3935 sizeof(struct elf64_hdr) > fw_entry->size) {
3936 pr_err("%s: Program headers not within mdt\n",
3937 qseecom.pdev->init_name);
3938 return false;
3939 }
3940 break;
3941 }
3942 default: {
3943 pr_err("QSEE app arch %u is not supported\n", app_arch);
3944 return false;
3945 }
3946 }
3947 return true;
3948}
3949
3950static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3951 uint32_t *app_arch)
3952{
3953 int ret = -1;
3954 int i = 0, rc = 0;
3955 const struct firmware *fw_entry = NULL;
3956 char fw_name[MAX_APP_NAME_SIZE];
3957 struct elf32_hdr *ehdr;
3958 struct elf64_hdr *ehdr64;
3959 int num_images = 0;
3960
3961 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3962 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3963 if (rc) {
3964 pr_err("error with request_firmware\n");
3965 ret = -EIO;
3966 goto err;
3967 }
3968 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3969 ret = -EIO;
3970 goto err;
3971 }
3972 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3973 *fw_size = fw_entry->size;
3974 if (*app_arch == ELFCLASS32) {
3975 ehdr = (struct elf32_hdr *)fw_entry->data;
3976 num_images = ehdr->e_phnum;
3977 } else if (*app_arch == ELFCLASS64) {
3978 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3979 num_images = ehdr64->e_phnum;
3980 } else {
3981 pr_err("QSEE %s app, arch %u is not supported\n",
3982 appname, *app_arch);
3983 ret = -EIO;
3984 goto err;
3985 }
3986 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3987 release_firmware(fw_entry);
3988 fw_entry = NULL;
3989 for (i = 0; i < num_images; i++) {
3990 memset(fw_name, 0, sizeof(fw_name));
3991 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3992 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3993 if (ret)
3994 goto err;
3995 if (*fw_size > U32_MAX - fw_entry->size) {
3996 pr_err("QSEE %s app file size overflow\n", appname);
3997 ret = -EINVAL;
3998 goto err;
3999 }
4000 *fw_size += fw_entry->size;
4001 release_firmware(fw_entry);
4002 fw_entry = NULL;
4003 }
4004
4005 return ret;
4006err:
4007 if (fw_entry)
4008 release_firmware(fw_entry);
4009 *fw_size = 0;
4010 return ret;
4011}
4012
4013static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4014 uint32_t fw_size,
4015 struct qseecom_load_app_ireq *load_req)
4016{
4017 int ret = -1;
4018 int i = 0, rc = 0;
4019 const struct firmware *fw_entry = NULL;
4020 char fw_name[MAX_APP_NAME_SIZE];
4021 u8 *img_data_ptr = img_data;
4022 struct elf32_hdr *ehdr;
4023 struct elf64_hdr *ehdr64;
4024 int num_images = 0;
4025 unsigned char app_arch = 0;
4026
4027 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4028 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4029 if (rc) {
4030 ret = -EIO;
4031 goto err;
4032 }
4033
4034 load_req->img_len = fw_entry->size;
4035 if (load_req->img_len > fw_size) {
4036 pr_err("app %s size %zu is larger than buf size %u\n",
4037 appname, fw_entry->size, fw_size);
4038 ret = -EINVAL;
4039 goto err;
4040 }
4041 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4042 img_data_ptr = img_data_ptr + fw_entry->size;
4043 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4044
4045 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4046 if (app_arch == ELFCLASS32) {
4047 ehdr = (struct elf32_hdr *)fw_entry->data;
4048 num_images = ehdr->e_phnum;
4049 } else if (app_arch == ELFCLASS64) {
4050 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4051 num_images = ehdr64->e_phnum;
4052 } else {
4053 pr_err("QSEE %s app, arch %u is not supported\n",
4054 appname, app_arch);
4055 ret = -EIO;
4056 goto err;
4057 }
4058 release_firmware(fw_entry);
4059 fw_entry = NULL;
4060 for (i = 0; i < num_images; i++) {
4061 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4062 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4063 if (ret) {
4064 pr_err("Failed to locate blob %s\n", fw_name);
4065 goto err;
4066 }
4067 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4068 (fw_entry->size + load_req->img_len > fw_size)) {
4069 pr_err("Invalid file size for %s\n", fw_name);
4070 ret = -EINVAL;
4071 goto err;
4072 }
4073 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4074 img_data_ptr = img_data_ptr + fw_entry->size;
4075 load_req->img_len += fw_entry->size;
4076 release_firmware(fw_entry);
4077 fw_entry = NULL;
4078 }
4079 return ret;
4080err:
4081 release_firmware(fw_entry);
4082 return ret;
4083}
4084
4085static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4086 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4087{
4088 size_t len = 0;
4089 int ret = 0;
4090 ion_phys_addr_t pa;
4091 struct ion_handle *ihandle = NULL;
4092 u8 *img_data = NULL;
4093
4094 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4095 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4096
4097 if (IS_ERR_OR_NULL(ihandle)) {
4098 pr_err("ION alloc failed\n");
4099 return -ENOMEM;
4100 }
4101 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4102 ihandle);
4103
4104 if (IS_ERR_OR_NULL(img_data)) {
4105 pr_err("ION memory mapping for image loading failed\n");
4106 ret = -ENOMEM;
4107 goto exit_ion_free;
4108 }
4109 /* Get the physical address of the ION BUF */
4110 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4111 if (ret) {
4112 pr_err("physical memory retrieval failure\n");
4113 ret = -EIO;
4114 goto exit_ion_unmap_kernel;
4115 }
4116
4117 *pihandle = ihandle;
4118 *data = img_data;
4119 *paddr = pa;
4120 return ret;
4121
4122exit_ion_unmap_kernel:
4123 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4124exit_ion_free:
4125 ion_free(qseecom.ion_clnt, ihandle);
4126 ihandle = NULL;
4127 return ret;
4128}
4129
4130static void __qseecom_free_img_data(struct ion_handle **ihandle)
4131{
4132 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4133 ion_free(qseecom.ion_clnt, *ihandle);
4134 *ihandle = NULL;
4135}
4136
4137static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4138 uint32_t *app_id)
4139{
4140 int ret = -1;
4141 uint32_t fw_size = 0;
4142 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4143 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4144 struct qseecom_command_scm_resp resp;
4145 u8 *img_data = NULL;
4146 ion_phys_addr_t pa = 0;
4147 struct ion_handle *ihandle = NULL;
4148 void *cmd_buf = NULL;
4149 size_t cmd_len;
4150 uint32_t app_arch = 0;
4151
4152 if (!data || !appname || !app_id) {
4153 pr_err("Null pointer to data or appname or appid\n");
4154 return -EINVAL;
4155 }
4156 *app_id = 0;
4157 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4158 return -EIO;
4159 data->client.app_arch = app_arch;
4160
4161 /* Check and load cmnlib */
4162 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4163 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4164 ret = qseecom_load_commonlib_image(data, "cmnlib");
4165 if (ret) {
4166 pr_err("failed to load cmnlib\n");
4167 return -EIO;
4168 }
4169 qseecom.commonlib_loaded = true;
4170 pr_debug("cmnlib is loaded\n");
4171 }
4172
4173 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4174 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4175 if (ret) {
4176 pr_err("failed to load cmnlib64\n");
4177 return -EIO;
4178 }
4179 qseecom.commonlib64_loaded = true;
4180 pr_debug("cmnlib64 is loaded\n");
4181 }
4182 }
4183
4184 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4185 if (ret)
4186 return ret;
4187
4188 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4189 if (ret) {
4190 ret = -EIO;
4191 goto exit_free_img_data;
4192 }
4193
4194 /* Populate the load_req parameters */
4195 if (qseecom.qsee_version < QSEE_VERSION_40) {
4196 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4197 load_req.mdt_len = load_req.mdt_len;
4198 load_req.img_len = load_req.img_len;
4199 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4200 load_req.phy_addr = (uint32_t)pa;
4201 cmd_buf = (void *)&load_req;
4202 cmd_len = sizeof(struct qseecom_load_app_ireq);
4203 } else {
4204 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4205 load_req_64bit.mdt_len = load_req.mdt_len;
4206 load_req_64bit.img_len = load_req.img_len;
4207 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4208 load_req_64bit.phy_addr = (uint64_t)pa;
4209 cmd_buf = (void *)&load_req_64bit;
4210 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4211 }
4212
4213 if (qseecom.support_bus_scaling) {
4214 mutex_lock(&qsee_bw_mutex);
4215 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4216 mutex_unlock(&qsee_bw_mutex);
4217 if (ret) {
4218 ret = -EIO;
4219 goto exit_free_img_data;
4220 }
4221 }
4222
4223 ret = __qseecom_enable_clk_scale_up(data);
4224 if (ret) {
4225 ret = -EIO;
4226 goto exit_unregister_bus_bw_need;
4227 }
4228
4229 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4230 img_data, fw_size,
4231 ION_IOC_CLEAN_INV_CACHES);
4232 if (ret) {
4233 pr_err("cache operation failed %d\n", ret);
4234 goto exit_disable_clk_vote;
4235 }
4236
4237 /* SCM_CALL to load the image */
4238 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4239 &resp, sizeof(resp));
4240 if (ret) {
4241 pr_err("scm_call to load failed : ret %d\n", ret);
4242 ret = -EIO;
4243 goto exit_disable_clk_vote;
4244 }
4245
4246 switch (resp.result) {
4247 case QSEOS_RESULT_SUCCESS:
4248 *app_id = resp.data;
4249 break;
4250 case QSEOS_RESULT_INCOMPLETE:
4251 ret = __qseecom_process_incomplete_cmd(data, &resp);
4252 if (ret)
4253 pr_err("process_incomplete_cmd FAILED\n");
4254 else
4255 *app_id = resp.data;
4256 break;
4257 case QSEOS_RESULT_FAILURE:
4258 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4259 break;
4260 default:
4261 pr_err("scm call return unknown response %d\n", resp.result);
4262 ret = -EINVAL;
4263 break;
4264 }
4265
4266exit_disable_clk_vote:
4267 __qseecom_disable_clk_scale_down(data);
4268
4269exit_unregister_bus_bw_need:
4270 if (qseecom.support_bus_scaling) {
4271 mutex_lock(&qsee_bw_mutex);
4272 qseecom_unregister_bus_bandwidth_needs(data);
4273 mutex_unlock(&qsee_bw_mutex);
4274 }
4275
4276exit_free_img_data:
4277 __qseecom_free_img_data(&ihandle);
4278 return ret;
4279}
4280
4281static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4282 char *cmnlib_name)
4283{
4284 int ret = 0;
4285 uint32_t fw_size = 0;
4286 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4287 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4288 struct qseecom_command_scm_resp resp;
4289 u8 *img_data = NULL;
4290 ion_phys_addr_t pa = 0;
4291 void *cmd_buf = NULL;
4292 size_t cmd_len;
4293 uint32_t app_arch = 0;
4294
4295 if (!cmnlib_name) {
4296 pr_err("cmnlib_name is NULL\n");
4297 return -EINVAL;
4298 }
4299 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4300 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4301 cmnlib_name, strlen(cmnlib_name));
4302 return -EINVAL;
4303 }
4304
4305 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4306 return -EIO;
4307
4308 ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
4309 &img_data, fw_size, &pa);
4310 if (ret)
4311 return -EIO;
4312
4313 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4314 if (ret) {
4315 ret = -EIO;
4316 goto exit_free_img_data;
4317 }
4318 if (qseecom.qsee_version < QSEE_VERSION_40) {
4319 load_req.phy_addr = (uint32_t)pa;
4320 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4321 cmd_buf = (void *)&load_req;
4322 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4323 } else {
4324 load_req_64bit.phy_addr = (uint64_t)pa;
4325 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4326 load_req_64bit.img_len = load_req.img_len;
4327 load_req_64bit.mdt_len = load_req.mdt_len;
4328 cmd_buf = (void *)&load_req_64bit;
4329 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4330 }
4331
4332 if (qseecom.support_bus_scaling) {
4333 mutex_lock(&qsee_bw_mutex);
4334 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4335 mutex_unlock(&qsee_bw_mutex);
4336 if (ret) {
4337 ret = -EIO;
4338 goto exit_free_img_data;
4339 }
4340 }
4341
4342 /* Vote for the SFPB clock */
4343 ret = __qseecom_enable_clk_scale_up(data);
4344 if (ret) {
4345 ret = -EIO;
4346 goto exit_unregister_bus_bw_need;
4347 }
4348
4349 ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
4350 img_data, fw_size,
4351 ION_IOC_CLEAN_INV_CACHES);
4352 if (ret) {
4353 pr_err("cache operation failed %d\n", ret);
4354 goto exit_disable_clk_vote;
4355 }
4356
4357 /* SCM_CALL to load the image */
4358 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4359 &resp, sizeof(resp));
4360 if (ret) {
4361 pr_err("scm_call to load failed : ret %d\n", ret);
4362 ret = -EIO;
4363 goto exit_disable_clk_vote;
4364 }
4365
4366 switch (resp.result) {
4367 case QSEOS_RESULT_SUCCESS:
4368 break;
4369 case QSEOS_RESULT_FAILURE:
4370 pr_err("scm call failed w/response result%d\n", resp.result);
4371 ret = -EINVAL;
4372 goto exit_disable_clk_vote;
4373 case QSEOS_RESULT_INCOMPLETE:
4374 ret = __qseecom_process_incomplete_cmd(data, &resp);
4375 if (ret) {
4376 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4377 goto exit_disable_clk_vote;
4378 }
4379 break;
4380 default:
4381 pr_err("scm call return unknown response %d\n", resp.result);
4382 ret = -EINVAL;
4383 goto exit_disable_clk_vote;
4384 }
4385
4386exit_disable_clk_vote:
4387 __qseecom_disable_clk_scale_down(data);
4388
4389exit_unregister_bus_bw_need:
4390 if (qseecom.support_bus_scaling) {
4391 mutex_lock(&qsee_bw_mutex);
4392 qseecom_unregister_bus_bandwidth_needs(data);
4393 mutex_unlock(&qsee_bw_mutex);
4394 }
4395
4396exit_free_img_data:
4397 __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
4398 return ret;
4399}
4400
4401static int qseecom_unload_commonlib_image(void)
4402{
4403 int ret = -EINVAL;
4404 struct qseecom_unload_lib_image_ireq unload_req = {0};
4405 struct qseecom_command_scm_resp resp;
4406
4407 /* Populate the remaining parameters */
4408 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4409
4410 /* SCM_CALL to load the image */
4411 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4412 sizeof(struct qseecom_unload_lib_image_ireq),
4413 &resp, sizeof(resp));
4414 if (ret) {
4415 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4416 ret = -EIO;
4417 } else {
4418 switch (resp.result) {
4419 case QSEOS_RESULT_SUCCESS:
4420 break;
4421 case QSEOS_RESULT_FAILURE:
4422 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4423 break;
4424 default:
4425 pr_err("scm call return unknown response %d\n",
4426 resp.result);
4427 ret = -EINVAL;
4428 break;
4429 }
4430 }
4431
4432 return ret;
4433}
4434
4435int qseecom_start_app(struct qseecom_handle **handle,
4436 char *app_name, uint32_t size)
4437{
4438 int32_t ret = 0;
4439 unsigned long flags = 0;
4440 struct qseecom_dev_handle *data = NULL;
4441 struct qseecom_check_app_ireq app_ireq;
4442 struct qseecom_registered_app_list *entry = NULL;
4443 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4444 bool found_app = false;
4445 size_t len;
4446 ion_phys_addr_t pa;
4447 uint32_t fw_size, app_arch;
4448 uint32_t app_id = 0;
4449
4450 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4451 pr_err("Not allowed to be called in %d state\n",
4452 atomic_read(&qseecom.qseecom_state));
4453 return -EPERM;
4454 }
4455 if (!app_name) {
4456 pr_err("failed to get the app name\n");
4457 return -EINVAL;
4458 }
4459
Zhen Kong64a6d7282017-06-16 11:55:07 -07004460 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004461 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004462 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004463 return -EINVAL;
4464 }
4465
4466 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4467 if (!(*handle))
4468 return -ENOMEM;
4469
4470 data = kzalloc(sizeof(*data), GFP_KERNEL);
4471 if (!data) {
4472 if (ret == 0) {
4473 kfree(*handle);
4474 *handle = NULL;
4475 }
4476 return -ENOMEM;
4477 }
4478 data->abort = 0;
4479 data->type = QSEECOM_CLIENT_APP;
4480 data->released = false;
4481 data->client.sb_length = size;
4482 data->client.user_virt_sb_base = 0;
4483 data->client.ihandle = NULL;
4484
4485 init_waitqueue_head(&data->abort_wq);
4486
4487 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4488 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4489 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4490 pr_err("Ion client could not retrieve the handle\n");
4491 kfree(data);
4492 kfree(*handle);
4493 *handle = NULL;
4494 return -EINVAL;
4495 }
4496 mutex_lock(&app_access_lock);
4497
4498 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4499 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4500 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4501 if (ret)
4502 goto err;
4503
4504 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4505 if (app_id) {
4506 pr_warn("App id %d for [%s] app exists\n", app_id,
4507 (char *)app_ireq.app_name);
4508 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4509 list_for_each_entry(entry,
4510 &qseecom.registered_app_list_head, list){
4511 if (entry->app_id == app_id) {
4512 entry->ref_cnt++;
4513 found_app = true;
4514 break;
4515 }
4516 }
4517 spin_unlock_irqrestore(
4518 &qseecom.registered_app_list_lock, flags);
4519 if (!found_app)
4520 pr_warn("App_id %d [%s] was loaded but not registered\n",
4521 ret, (char *)app_ireq.app_name);
4522 } else {
4523 /* load the app and get the app_id */
4524 pr_debug("%s: Loading app for the first time'\n",
4525 qseecom.pdev->init_name);
4526 ret = __qseecom_load_fw(data, app_name, &app_id);
4527 if (ret < 0)
4528 goto err;
4529 }
4530 data->client.app_id = app_id;
4531 if (!found_app) {
4532 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4533 if (!entry) {
4534 pr_err("kmalloc for app entry failed\n");
4535 ret = -ENOMEM;
4536 goto err;
4537 }
4538 entry->app_id = app_id;
4539 entry->ref_cnt = 1;
4540 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4541 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4542 ret = -EIO;
4543 kfree(entry);
4544 goto err;
4545 }
4546 entry->app_arch = app_arch;
4547 entry->app_blocked = false;
4548 entry->blocked_on_listener_id = 0;
4549 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4550 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4551 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4552 flags);
4553 }
4554
4555 /* Get the physical address of the ION BUF */
4556 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4557 if (ret) {
4558 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4559 ret);
4560 goto err;
4561 }
4562
4563 /* Populate the structure for sending scm call to load image */
4564 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4565 data->client.ihandle);
4566 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4567 pr_err("ION memory mapping for client shared buf failed\n");
4568 ret = -ENOMEM;
4569 goto err;
4570 }
4571 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4572 data->client.sb_phys = (phys_addr_t)pa;
4573 (*handle)->dev = (void *)data;
4574 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4575 (*handle)->sbuf_len = data->client.sb_length;
4576
4577 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4578 if (!kclient_entry) {
4579 ret = -ENOMEM;
4580 goto err;
4581 }
4582 kclient_entry->handle = *handle;
4583
4584 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4585 list_add_tail(&kclient_entry->list,
4586 &qseecom.registered_kclient_list_head);
4587 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4588
4589 mutex_unlock(&app_access_lock);
4590 return 0;
4591
4592err:
4593 kfree(data);
4594 kfree(*handle);
4595 *handle = NULL;
4596 mutex_unlock(&app_access_lock);
4597 return ret;
4598}
4599EXPORT_SYMBOL(qseecom_start_app);
4600
4601int qseecom_shutdown_app(struct qseecom_handle **handle)
4602{
4603 int ret = -EINVAL;
4604 struct qseecom_dev_handle *data;
4605
4606 struct qseecom_registered_kclient_list *kclient = NULL;
4607 unsigned long flags = 0;
4608 bool found_handle = false;
4609
4610 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4611 pr_err("Not allowed to be called in %d state\n",
4612 atomic_read(&qseecom.qseecom_state));
4613 return -EPERM;
4614 }
4615
4616 if ((handle == NULL) || (*handle == NULL)) {
4617 pr_err("Handle is not initialized\n");
4618 return -EINVAL;
4619 }
4620 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4621 mutex_lock(&app_access_lock);
4622
4623 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4624 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4625 list) {
4626 if (kclient->handle == (*handle)) {
4627 list_del(&kclient->list);
4628 found_handle = true;
4629 break;
4630 }
4631 }
4632 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4633 if (!found_handle)
4634 pr_err("Unable to find the handle, exiting\n");
4635 else
4636 ret = qseecom_unload_app(data, false);
4637
4638 mutex_unlock(&app_access_lock);
4639 if (ret == 0) {
4640 kzfree(data);
4641 kzfree(*handle);
4642 kzfree(kclient);
4643 *handle = NULL;
4644 }
4645
4646 return ret;
4647}
4648EXPORT_SYMBOL(qseecom_shutdown_app);
4649
4650int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4651 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4652{
4653 int ret = 0;
4654 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4655 struct qseecom_dev_handle *data;
4656 bool perf_enabled = false;
4657
4658 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4659 pr_err("Not allowed to be called in %d state\n",
4660 atomic_read(&qseecom.qseecom_state));
4661 return -EPERM;
4662 }
4663
4664 if (handle == NULL) {
4665 pr_err("Handle is not initialized\n");
4666 return -EINVAL;
4667 }
4668 data = handle->dev;
4669
4670 req.cmd_req_len = sbuf_len;
4671 req.resp_len = rbuf_len;
4672 req.cmd_req_buf = send_buf;
4673 req.resp_buf = resp_buf;
4674
4675 if (__validate_send_cmd_inputs(data, &req))
4676 return -EINVAL;
4677
4678 mutex_lock(&app_access_lock);
4679 if (qseecom.support_bus_scaling) {
4680 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4681 if (ret) {
4682 pr_err("Failed to set bw.\n");
4683 mutex_unlock(&app_access_lock);
4684 return ret;
4685 }
4686 }
4687 /*
4688 * On targets where crypto clock is handled by HLOS,
4689 * if clk_access_cnt is zero and perf_enabled is false,
4690 * then the crypto clock was not enabled before sending cmd
4691 * to tz, qseecom will enable the clock to avoid service failure.
4692 */
4693 if (!qseecom.no_clock_support &&
4694 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4695 pr_debug("ce clock is not enabled!\n");
4696 ret = qseecom_perf_enable(data);
4697 if (ret) {
4698 pr_err("Failed to vote for clock with err %d\n",
4699 ret);
4700 mutex_unlock(&app_access_lock);
4701 return -EINVAL;
4702 }
4703 perf_enabled = true;
4704 }
4705 if (!strcmp(data->client.app_name, "securemm"))
4706 data->use_legacy_cmd = true;
4707
4708 ret = __qseecom_send_cmd(data, &req);
4709 data->use_legacy_cmd = false;
4710 if (qseecom.support_bus_scaling)
4711 __qseecom_add_bw_scale_down_timer(
4712 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4713
4714 if (perf_enabled) {
4715 qsee_disable_clock_vote(data, CLK_DFAB);
4716 qsee_disable_clock_vote(data, CLK_SFPB);
4717 }
4718
4719 mutex_unlock(&app_access_lock);
4720
4721 if (ret)
4722 return ret;
4723
4724 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4725 req.resp_len, req.resp_buf);
4726 return ret;
4727}
4728EXPORT_SYMBOL(qseecom_send_command);
4729
4730int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4731{
4732 int ret = 0;
4733
4734 if ((handle == NULL) || (handle->dev == NULL)) {
4735 pr_err("No valid kernel client\n");
4736 return -EINVAL;
4737 }
4738 if (high) {
4739 if (qseecom.support_bus_scaling) {
4740 mutex_lock(&qsee_bw_mutex);
4741 __qseecom_register_bus_bandwidth_needs(handle->dev,
4742 HIGH);
4743 mutex_unlock(&qsee_bw_mutex);
4744 } else {
4745 ret = qseecom_perf_enable(handle->dev);
4746 if (ret)
4747 pr_err("Failed to vote for clock with err %d\n",
4748 ret);
4749 }
4750 } else {
4751 if (!qseecom.support_bus_scaling) {
4752 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4753 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4754 } else {
4755 mutex_lock(&qsee_bw_mutex);
4756 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4757 mutex_unlock(&qsee_bw_mutex);
4758 }
4759 }
4760 return ret;
4761}
4762EXPORT_SYMBOL(qseecom_set_bandwidth);
4763
4764int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4765{
4766 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4767 struct qseecom_dev_handle dummy_private_data = {0};
4768 struct qseecom_command_scm_resp resp;
4769 int ret = 0;
4770
4771 if (!desc) {
4772 pr_err("desc is NULL\n");
4773 return -EINVAL;
4774 }
4775
4776 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004777 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004778 resp.data = desc->ret[2]; /*listener_id*/
4779
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004780 mutex_lock(&app_access_lock);
4781 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
4782 &dummy_private_data);
4783 mutex_unlock(&app_access_lock);
4784 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004785 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004786 (int)desc->ret[0], (int)desc->ret[2],
4787 (int)desc->ret[1], ret);
4788 desc->ret[0] = resp.result;
4789 desc->ret[1] = resp.resp_type;
4790 desc->ret[2] = resp.data;
4791 return ret;
4792}
4793EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4794
4795static int qseecom_send_resp(void)
4796{
4797 qseecom.send_resp_flag = 1;
4798 wake_up_interruptible(&qseecom.send_resp_wq);
4799 return 0;
4800}
4801
4802static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4803{
4804 struct qseecom_registered_listener_list *this_lstnr = NULL;
4805
4806 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4807 this_lstnr = __qseecom_find_svc(data->listener.id);
4808 if (this_lstnr == NULL)
4809 return -EINVAL;
4810 qseecom.send_resp_flag = 1;
4811 this_lstnr->send_resp_flag = 1;
4812 wake_up_interruptible(&qseecom.send_resp_wq);
4813 return 0;
4814}
4815
4816static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4817 struct qseecom_send_modfd_listener_resp *resp,
4818 struct qseecom_registered_listener_list *this_lstnr)
4819{
4820 int i;
4821
4822 if (!data || !resp || !this_lstnr) {
4823 pr_err("listener handle or resp msg is null\n");
4824 return -EINVAL;
4825 }
4826
4827 if (resp->resp_buf_ptr == NULL) {
4828 pr_err("resp buffer is null\n");
4829 return -EINVAL;
4830 }
4831 /* validate resp buf length */
4832 if ((resp->resp_len == 0) ||
4833 (resp->resp_len > this_lstnr->sb_length)) {
4834 pr_err("resp buf length %d not valid\n", resp->resp_len);
4835 return -EINVAL;
4836 }
4837
4838 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4839 pr_err("Integer overflow in resp_len & resp_buf\n");
4840 return -EINVAL;
4841 }
4842 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4843 (ULONG_MAX - this_lstnr->sb_length)) {
4844 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4845 return -EINVAL;
4846 }
4847 /* validate resp buf */
4848 if (((uintptr_t)resp->resp_buf_ptr <
4849 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4850 ((uintptr_t)resp->resp_buf_ptr >=
4851 ((uintptr_t)this_lstnr->user_virt_sb_base +
4852 this_lstnr->sb_length)) ||
4853 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4854 ((uintptr_t)this_lstnr->user_virt_sb_base +
4855 this_lstnr->sb_length))) {
4856 pr_err("resp buf is out of shared buffer region\n");
4857 return -EINVAL;
4858 }
4859
4860 /* validate offsets */
4861 for (i = 0; i < MAX_ION_FD; i++) {
4862 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4863 pr_err("Invalid offset %d = 0x%x\n",
4864 i, resp->ifd_data[i].cmd_buf_offset);
4865 return -EINVAL;
4866 }
4867 }
4868
4869 return 0;
4870}
4871
4872static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4873 void __user *argp, bool is_64bit_addr)
4874{
4875 struct qseecom_send_modfd_listener_resp resp;
4876 struct qseecom_registered_listener_list *this_lstnr = NULL;
4877
4878 if (copy_from_user(&resp, argp, sizeof(resp))) {
4879 pr_err("copy_from_user failed");
4880 return -EINVAL;
4881 }
4882
4883 this_lstnr = __qseecom_find_svc(data->listener.id);
4884 if (this_lstnr == NULL)
4885 return -EINVAL;
4886
4887 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4888 return -EINVAL;
4889
4890 resp.resp_buf_ptr = this_lstnr->sb_virt +
4891 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4892
4893 if (!is_64bit_addr)
4894 __qseecom_update_cmd_buf(&resp, false, data);
4895 else
4896 __qseecom_update_cmd_buf_64(&resp, false, data);
4897 qseecom.send_resp_flag = 1;
4898 this_lstnr->send_resp_flag = 1;
4899 wake_up_interruptible(&qseecom.send_resp_wq);
4900 return 0;
4901}
4902
4903static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4904 void __user *argp)
4905{
4906 return __qseecom_send_modfd_resp(data, argp, false);
4907}
4908
4909static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4910 void __user *argp)
4911{
4912 return __qseecom_send_modfd_resp(data, argp, true);
4913}
4914
4915static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4916 void __user *argp)
4917{
4918 struct qseecom_qseos_version_req req;
4919
4920 if (copy_from_user(&req, argp, sizeof(req))) {
4921 pr_err("copy_from_user failed");
4922 return -EINVAL;
4923 }
4924 req.qseos_version = qseecom.qseos_version;
4925 if (copy_to_user(argp, &req, sizeof(req))) {
4926 pr_err("copy_to_user failed");
4927 return -EINVAL;
4928 }
4929 return 0;
4930}
4931
4932static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4933{
4934 int rc = 0;
4935 struct qseecom_clk *qclk = NULL;
4936
4937 if (qseecom.no_clock_support)
4938 return 0;
4939
4940 if (ce == CLK_QSEE)
4941 qclk = &qseecom.qsee;
4942 if (ce == CLK_CE_DRV)
4943 qclk = &qseecom.ce_drv;
4944
4945 if (qclk == NULL) {
4946 pr_err("CLK type not supported\n");
4947 return -EINVAL;
4948 }
4949 mutex_lock(&clk_access_lock);
4950
4951 if (qclk->clk_access_cnt == ULONG_MAX) {
4952 pr_err("clk_access_cnt beyond limitation\n");
4953 goto err;
4954 }
4955 if (qclk->clk_access_cnt > 0) {
4956 qclk->clk_access_cnt++;
4957 mutex_unlock(&clk_access_lock);
4958 return rc;
4959 }
4960
4961 /* Enable CE core clk */
4962 if (qclk->ce_core_clk != NULL) {
4963 rc = clk_prepare_enable(qclk->ce_core_clk);
4964 if (rc) {
4965 pr_err("Unable to enable/prepare CE core clk\n");
4966 goto err;
4967 }
4968 }
4969 /* Enable CE clk */
4970 if (qclk->ce_clk != NULL) {
4971 rc = clk_prepare_enable(qclk->ce_clk);
4972 if (rc) {
4973 pr_err("Unable to enable/prepare CE iface clk\n");
4974 goto ce_clk_err;
4975 }
4976 }
4977 /* Enable AXI clk */
4978 if (qclk->ce_bus_clk != NULL) {
4979 rc = clk_prepare_enable(qclk->ce_bus_clk);
4980 if (rc) {
4981 pr_err("Unable to enable/prepare CE bus clk\n");
4982 goto ce_bus_clk_err;
4983 }
4984 }
4985 qclk->clk_access_cnt++;
4986 mutex_unlock(&clk_access_lock);
4987 return 0;
4988
4989ce_bus_clk_err:
4990 if (qclk->ce_clk != NULL)
4991 clk_disable_unprepare(qclk->ce_clk);
4992ce_clk_err:
4993 if (qclk->ce_core_clk != NULL)
4994 clk_disable_unprepare(qclk->ce_core_clk);
4995err:
4996 mutex_unlock(&clk_access_lock);
4997 return -EIO;
4998}
4999
5000static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5001{
5002 struct qseecom_clk *qclk;
5003
5004 if (qseecom.no_clock_support)
5005 return;
5006
5007 if (ce == CLK_QSEE)
5008 qclk = &qseecom.qsee;
5009 else
5010 qclk = &qseecom.ce_drv;
5011
5012 mutex_lock(&clk_access_lock);
5013
5014 if (qclk->clk_access_cnt == 0) {
5015 mutex_unlock(&clk_access_lock);
5016 return;
5017 }
5018
5019 if (qclk->clk_access_cnt == 1) {
5020 if (qclk->ce_clk != NULL)
5021 clk_disable_unprepare(qclk->ce_clk);
5022 if (qclk->ce_core_clk != NULL)
5023 clk_disable_unprepare(qclk->ce_core_clk);
5024 if (qclk->ce_bus_clk != NULL)
5025 clk_disable_unprepare(qclk->ce_bus_clk);
5026 }
5027 qclk->clk_access_cnt--;
5028 mutex_unlock(&clk_access_lock);
5029}
5030
5031static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5032 int32_t clk_type)
5033{
5034 int ret = 0;
5035 struct qseecom_clk *qclk;
5036
5037 if (qseecom.no_clock_support)
5038 return 0;
5039
5040 qclk = &qseecom.qsee;
5041 if (!qseecom.qsee_perf_client)
5042 return ret;
5043
5044 switch (clk_type) {
5045 case CLK_DFAB:
5046 mutex_lock(&qsee_bw_mutex);
5047 if (!qseecom.qsee_bw_count) {
5048 if (qseecom.qsee_sfpb_bw_count > 0)
5049 ret = msm_bus_scale_client_update_request(
5050 qseecom.qsee_perf_client, 3);
5051 else {
5052 if (qclk->ce_core_src_clk != NULL)
5053 ret = __qseecom_enable_clk(CLK_QSEE);
5054 if (!ret) {
5055 ret =
5056 msm_bus_scale_client_update_request(
5057 qseecom.qsee_perf_client, 1);
5058 if ((ret) &&
5059 (qclk->ce_core_src_clk != NULL))
5060 __qseecom_disable_clk(CLK_QSEE);
5061 }
5062 }
5063 if (ret)
5064 pr_err("DFAB Bandwidth req failed (%d)\n",
5065 ret);
5066 else {
5067 qseecom.qsee_bw_count++;
5068 data->perf_enabled = true;
5069 }
5070 } else {
5071 qseecom.qsee_bw_count++;
5072 data->perf_enabled = true;
5073 }
5074 mutex_unlock(&qsee_bw_mutex);
5075 break;
5076 case CLK_SFPB:
5077 mutex_lock(&qsee_bw_mutex);
5078 if (!qseecom.qsee_sfpb_bw_count) {
5079 if (qseecom.qsee_bw_count > 0)
5080 ret = msm_bus_scale_client_update_request(
5081 qseecom.qsee_perf_client, 3);
5082 else {
5083 if (qclk->ce_core_src_clk != NULL)
5084 ret = __qseecom_enable_clk(CLK_QSEE);
5085 if (!ret) {
5086 ret =
5087 msm_bus_scale_client_update_request(
5088 qseecom.qsee_perf_client, 2);
5089 if ((ret) &&
5090 (qclk->ce_core_src_clk != NULL))
5091 __qseecom_disable_clk(CLK_QSEE);
5092 }
5093 }
5094
5095 if (ret)
5096 pr_err("SFPB Bandwidth req failed (%d)\n",
5097 ret);
5098 else {
5099 qseecom.qsee_sfpb_bw_count++;
5100 data->fast_load_enabled = true;
5101 }
5102 } else {
5103 qseecom.qsee_sfpb_bw_count++;
5104 data->fast_load_enabled = true;
5105 }
5106 mutex_unlock(&qsee_bw_mutex);
5107 break;
5108 default:
5109 pr_err("Clock type not defined\n");
5110 break;
5111 }
5112 return ret;
5113}
5114
5115static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5116 int32_t clk_type)
5117{
5118 int32_t ret = 0;
5119 struct qseecom_clk *qclk;
5120
5121 qclk = &qseecom.qsee;
5122
5123 if (qseecom.no_clock_support)
5124 return;
5125 if (!qseecom.qsee_perf_client)
5126 return;
5127
5128 switch (clk_type) {
5129 case CLK_DFAB:
5130 mutex_lock(&qsee_bw_mutex);
5131 if (qseecom.qsee_bw_count == 0) {
5132 pr_err("Client error.Extra call to disable DFAB clk\n");
5133 mutex_unlock(&qsee_bw_mutex);
5134 return;
5135 }
5136
5137 if (qseecom.qsee_bw_count == 1) {
5138 if (qseecom.qsee_sfpb_bw_count > 0)
5139 ret = msm_bus_scale_client_update_request(
5140 qseecom.qsee_perf_client, 2);
5141 else {
5142 ret = msm_bus_scale_client_update_request(
5143 qseecom.qsee_perf_client, 0);
5144 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5145 __qseecom_disable_clk(CLK_QSEE);
5146 }
5147 if (ret)
5148 pr_err("SFPB Bandwidth req fail (%d)\n",
5149 ret);
5150 else {
5151 qseecom.qsee_bw_count--;
5152 data->perf_enabled = false;
5153 }
5154 } else {
5155 qseecom.qsee_bw_count--;
5156 data->perf_enabled = false;
5157 }
5158 mutex_unlock(&qsee_bw_mutex);
5159 break;
5160 case CLK_SFPB:
5161 mutex_lock(&qsee_bw_mutex);
5162 if (qseecom.qsee_sfpb_bw_count == 0) {
5163 pr_err("Client error.Extra call to disable SFPB clk\n");
5164 mutex_unlock(&qsee_bw_mutex);
5165 return;
5166 }
5167 if (qseecom.qsee_sfpb_bw_count == 1) {
5168 if (qseecom.qsee_bw_count > 0)
5169 ret = msm_bus_scale_client_update_request(
5170 qseecom.qsee_perf_client, 1);
5171 else {
5172 ret = msm_bus_scale_client_update_request(
5173 qseecom.qsee_perf_client, 0);
5174 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5175 __qseecom_disable_clk(CLK_QSEE);
5176 }
5177 if (ret)
5178 pr_err("SFPB Bandwidth req fail (%d)\n",
5179 ret);
5180 else {
5181 qseecom.qsee_sfpb_bw_count--;
5182 data->fast_load_enabled = false;
5183 }
5184 } else {
5185 qseecom.qsee_sfpb_bw_count--;
5186 data->fast_load_enabled = false;
5187 }
5188 mutex_unlock(&qsee_bw_mutex);
5189 break;
5190 default:
5191 pr_err("Clock type not defined\n");
5192 break;
5193 }
5194
5195}
5196
5197static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5198 void __user *argp)
5199{
5200 struct ion_handle *ihandle; /* Ion handle */
5201 struct qseecom_load_img_req load_img_req;
5202 int uret = 0;
5203 int ret;
5204 ion_phys_addr_t pa = 0;
5205 size_t len;
5206 struct qseecom_load_app_ireq load_req;
5207 struct qseecom_load_app_64bit_ireq load_req_64bit;
5208 struct qseecom_command_scm_resp resp;
5209 void *cmd_buf = NULL;
5210 size_t cmd_len;
5211 /* Copy the relevant information needed for loading the image */
5212 if (copy_from_user(&load_img_req,
5213 (void __user *)argp,
5214 sizeof(struct qseecom_load_img_req))) {
5215 pr_err("copy_from_user failed\n");
5216 return -EFAULT;
5217 }
5218
5219 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005220 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005221 load_img_req.ifd_data_fd);
5222 if (IS_ERR_OR_NULL(ihandle)) {
5223 pr_err("Ion client could not retrieve the handle\n");
5224 return -ENOMEM;
5225 }
5226
5227 /* Get the physical address of the ION BUF */
5228 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5229 if (ret) {
5230 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5231 ret);
5232 return ret;
5233 }
5234 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5235 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5236 len, load_img_req.mdt_len,
5237 load_img_req.img_len);
5238 return ret;
5239 }
5240 /* Populate the structure for sending scm call to load image */
5241 if (qseecom.qsee_version < QSEE_VERSION_40) {
5242 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5243 load_req.mdt_len = load_img_req.mdt_len;
5244 load_req.img_len = load_img_req.img_len;
5245 load_req.phy_addr = (uint32_t)pa;
5246 cmd_buf = (void *)&load_req;
5247 cmd_len = sizeof(struct qseecom_load_app_ireq);
5248 } else {
5249 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5250 load_req_64bit.mdt_len = load_img_req.mdt_len;
5251 load_req_64bit.img_len = load_img_req.img_len;
5252 load_req_64bit.phy_addr = (uint64_t)pa;
5253 cmd_buf = (void *)&load_req_64bit;
5254 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5255 }
5256
5257 if (qseecom.support_bus_scaling) {
5258 mutex_lock(&qsee_bw_mutex);
5259 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5260 mutex_unlock(&qsee_bw_mutex);
5261 if (ret) {
5262 ret = -EIO;
5263 goto exit_cpu_restore;
5264 }
5265 }
5266
5267 /* Vote for the SFPB clock */
5268 ret = __qseecom_enable_clk_scale_up(data);
5269 if (ret) {
5270 ret = -EIO;
5271 goto exit_register_bus_bandwidth_needs;
5272 }
5273 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5274 ION_IOC_CLEAN_INV_CACHES);
5275 if (ret) {
5276 pr_err("cache operation failed %d\n", ret);
5277 goto exit_disable_clock;
5278 }
5279 /* SCM_CALL to load the external elf */
5280 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5281 &resp, sizeof(resp));
5282 if (ret) {
5283 pr_err("scm_call to load failed : ret %d\n",
5284 ret);
5285 ret = -EFAULT;
5286 goto exit_disable_clock;
5287 }
5288
5289 switch (resp.result) {
5290 case QSEOS_RESULT_SUCCESS:
5291 break;
5292 case QSEOS_RESULT_INCOMPLETE:
5293 pr_err("%s: qseos result incomplete\n", __func__);
5294 ret = __qseecom_process_incomplete_cmd(data, &resp);
5295 if (ret)
5296 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5297 break;
5298 case QSEOS_RESULT_FAILURE:
5299 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5300 ret = -EFAULT;
5301 break;
5302 default:
5303 pr_err("scm_call response result %d not supported\n",
5304 resp.result);
5305 ret = -EFAULT;
5306 break;
5307 }
5308
5309exit_disable_clock:
5310 __qseecom_disable_clk_scale_down(data);
5311
5312exit_register_bus_bandwidth_needs:
5313 if (qseecom.support_bus_scaling) {
5314 mutex_lock(&qsee_bw_mutex);
5315 uret = qseecom_unregister_bus_bandwidth_needs(data);
5316 mutex_unlock(&qsee_bw_mutex);
5317 if (uret)
5318 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5319 uret, ret);
5320 }
5321
5322exit_cpu_restore:
5323 /* Deallocate the handle */
5324 if (!IS_ERR_OR_NULL(ihandle))
5325 ion_free(qseecom.ion_clnt, ihandle);
5326 return ret;
5327}
5328
5329static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5330{
5331 int ret = 0;
5332 struct qseecom_command_scm_resp resp;
5333 struct qseecom_unload_app_ireq req;
5334
5335 /* unavailable client app */
5336 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5337
5338 /* Populate the structure for sending scm call to unload image */
5339 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5340
5341 /* SCM_CALL to unload the external elf */
5342 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5343 sizeof(struct qseecom_unload_app_ireq),
5344 &resp, sizeof(resp));
5345 if (ret) {
5346 pr_err("scm_call to unload failed : ret %d\n",
5347 ret);
5348 ret = -EFAULT;
5349 goto qseecom_unload_external_elf_scm_err;
5350 }
5351 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5352 ret = __qseecom_process_incomplete_cmd(data, &resp);
5353 if (ret)
5354 pr_err("process_incomplete_cmd fail err: %d\n",
5355 ret);
5356 } else {
5357 if (resp.result != QSEOS_RESULT_SUCCESS) {
5358 pr_err("scm_call to unload image failed resp.result =%d\n",
5359 resp.result);
5360 ret = -EFAULT;
5361 }
5362 }
5363
5364qseecom_unload_external_elf_scm_err:
5365
5366 return ret;
5367}
5368
5369static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5370 void __user *argp)
5371{
5372
5373 int32_t ret;
5374 struct qseecom_qseos_app_load_query query_req;
5375 struct qseecom_check_app_ireq req;
5376 struct qseecom_registered_app_list *entry = NULL;
5377 unsigned long flags = 0;
5378 uint32_t app_arch = 0, app_id = 0;
5379 bool found_app = false;
5380
5381 /* Copy the relevant information needed for loading the image */
5382 if (copy_from_user(&query_req,
5383 (void __user *)argp,
5384 sizeof(struct qseecom_qseos_app_load_query))) {
5385 pr_err("copy_from_user failed\n");
5386 return -EFAULT;
5387 }
5388
5389 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5390 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5391 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5392
5393 ret = __qseecom_check_app_exists(req, &app_id);
5394 if (ret) {
5395 pr_err(" scm call to check if app is loaded failed");
5396 return ret; /* scm call failed */
5397 }
5398 if (app_id) {
5399 pr_debug("App id %d (%s) already exists\n", app_id,
5400 (char *)(req.app_name));
5401 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5402 list_for_each_entry(entry,
5403 &qseecom.registered_app_list_head, list){
5404 if (entry->app_id == app_id) {
5405 app_arch = entry->app_arch;
5406 entry->ref_cnt++;
5407 found_app = true;
5408 break;
5409 }
5410 }
5411 spin_unlock_irqrestore(
5412 &qseecom.registered_app_list_lock, flags);
5413 data->client.app_id = app_id;
5414 query_req.app_id = app_id;
5415 if (app_arch) {
5416 data->client.app_arch = app_arch;
5417 query_req.app_arch = app_arch;
5418 } else {
5419 data->client.app_arch = 0;
5420 query_req.app_arch = 0;
5421 }
5422 strlcpy(data->client.app_name, query_req.app_name,
5423 MAX_APP_NAME_SIZE);
5424 /*
5425 * If app was loaded by appsbl before and was not registered,
5426 * regiser this app now.
5427 */
5428 if (!found_app) {
5429 pr_debug("Register app %d [%s] which was loaded before\n",
5430 ret, (char *)query_req.app_name);
5431 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5432 if (!entry) {
5433 pr_err("kmalloc for app entry failed\n");
5434 return -ENOMEM;
5435 }
5436 entry->app_id = app_id;
5437 entry->ref_cnt = 1;
5438 entry->app_arch = data->client.app_arch;
5439 strlcpy(entry->app_name, data->client.app_name,
5440 MAX_APP_NAME_SIZE);
5441 entry->app_blocked = false;
5442 entry->blocked_on_listener_id = 0;
5443 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5444 flags);
5445 list_add_tail(&entry->list,
5446 &qseecom.registered_app_list_head);
5447 spin_unlock_irqrestore(
5448 &qseecom.registered_app_list_lock, flags);
5449 }
5450 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5451 pr_err("copy_to_user failed\n");
5452 return -EFAULT;
5453 }
5454 return -EEXIST; /* app already loaded */
5455 } else {
5456 return 0; /* app not loaded */
5457 }
5458}
5459
5460static int __qseecom_get_ce_pipe_info(
5461 enum qseecom_key_management_usage_type usage,
5462 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5463{
5464 int ret = -EINVAL;
5465 int i, j;
5466 struct qseecom_ce_info_use *p = NULL;
5467 int total = 0;
5468 struct qseecom_ce_pipe_entry *pcepipe;
5469
5470 switch (usage) {
5471 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5472 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5473 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5474 if (qseecom.support_fde) {
5475 p = qseecom.ce_info.fde;
5476 total = qseecom.ce_info.num_fde;
5477 } else {
5478 pr_err("system does not support fde\n");
5479 return -EINVAL;
5480 }
5481 break;
5482 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5483 if (qseecom.support_pfe) {
5484 p = qseecom.ce_info.pfe;
5485 total = qseecom.ce_info.num_pfe;
5486 } else {
5487 pr_err("system does not support pfe\n");
5488 return -EINVAL;
5489 }
5490 break;
5491 default:
5492 pr_err("unsupported usage %d\n", usage);
5493 return -EINVAL;
5494 }
5495
5496 for (j = 0; j < total; j++) {
5497 if (p->unit_num == unit) {
5498 pcepipe = p->ce_pipe_entry;
5499 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5500 (*ce_hw)[i] = pcepipe->ce_num;
5501 *pipe = pcepipe->ce_pipe_pair;
5502 pcepipe++;
5503 }
5504 ret = 0;
5505 break;
5506 }
5507 p++;
5508 }
5509 return ret;
5510}
5511
5512static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5513 enum qseecom_key_management_usage_type usage,
5514 struct qseecom_key_generate_ireq *ireq)
5515{
5516 struct qseecom_command_scm_resp resp;
5517 int ret;
5518
5519 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5520 usage >= QSEOS_KM_USAGE_MAX) {
5521 pr_err("Error:: unsupported usage %d\n", usage);
5522 return -EFAULT;
5523 }
5524 ret = __qseecom_enable_clk(CLK_QSEE);
5525 if (ret)
5526 return ret;
5527
5528 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5529 ireq, sizeof(struct qseecom_key_generate_ireq),
5530 &resp, sizeof(resp));
5531 if (ret) {
5532 if (ret == -EINVAL &&
5533 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5534 pr_debug("Key ID exists.\n");
5535 ret = 0;
5536 } else {
5537 pr_err("scm call to generate key failed : %d\n", ret);
5538 ret = -EFAULT;
5539 }
5540 goto generate_key_exit;
5541 }
5542
5543 switch (resp.result) {
5544 case QSEOS_RESULT_SUCCESS:
5545 break;
5546 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5547 pr_debug("Key ID exists.\n");
5548 break;
5549 case QSEOS_RESULT_INCOMPLETE:
5550 ret = __qseecom_process_incomplete_cmd(data, &resp);
5551 if (ret) {
5552 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5553 pr_debug("Key ID exists.\n");
5554 ret = 0;
5555 } else {
5556 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5557 resp.result);
5558 }
5559 }
5560 break;
5561 case QSEOS_RESULT_FAILURE:
5562 default:
5563 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5564 ret = -EINVAL;
5565 break;
5566 }
5567generate_key_exit:
5568 __qseecom_disable_clk(CLK_QSEE);
5569 return ret;
5570}
5571
5572static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5573 enum qseecom_key_management_usage_type usage,
5574 struct qseecom_key_delete_ireq *ireq)
5575{
5576 struct qseecom_command_scm_resp resp;
5577 int ret;
5578
5579 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5580 usage >= QSEOS_KM_USAGE_MAX) {
5581 pr_err("Error:: unsupported usage %d\n", usage);
5582 return -EFAULT;
5583 }
5584 ret = __qseecom_enable_clk(CLK_QSEE);
5585 if (ret)
5586 return ret;
5587
5588 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5589 ireq, sizeof(struct qseecom_key_delete_ireq),
5590 &resp, sizeof(struct qseecom_command_scm_resp));
5591 if (ret) {
5592 if (ret == -EINVAL &&
5593 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5594 pr_debug("Max attempts to input password reached.\n");
5595 ret = -ERANGE;
5596 } else {
5597 pr_err("scm call to delete key failed : %d\n", ret);
5598 ret = -EFAULT;
5599 }
5600 goto del_key_exit;
5601 }
5602
5603 switch (resp.result) {
5604 case QSEOS_RESULT_SUCCESS:
5605 break;
5606 case QSEOS_RESULT_INCOMPLETE:
5607 ret = __qseecom_process_incomplete_cmd(data, &resp);
5608 if (ret) {
5609 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5610 resp.result);
5611 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5612 pr_debug("Max attempts to input password reached.\n");
5613 ret = -ERANGE;
5614 }
5615 }
5616 break;
5617 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5618 pr_debug("Max attempts to input password reached.\n");
5619 ret = -ERANGE;
5620 break;
5621 case QSEOS_RESULT_FAILURE:
5622 default:
5623 pr_err("Delete key scm call failed resp.result %d\n",
5624 resp.result);
5625 ret = -EINVAL;
5626 break;
5627 }
5628del_key_exit:
5629 __qseecom_disable_clk(CLK_QSEE);
5630 return ret;
5631}
5632
5633static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5634 enum qseecom_key_management_usage_type usage,
5635 struct qseecom_key_select_ireq *ireq)
5636{
5637 struct qseecom_command_scm_resp resp;
5638 int ret;
5639
5640 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5641 usage >= QSEOS_KM_USAGE_MAX) {
5642 pr_err("Error:: unsupported usage %d\n", usage);
5643 return -EFAULT;
5644 }
5645 ret = __qseecom_enable_clk(CLK_QSEE);
5646 if (ret)
5647 return ret;
5648
5649 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5650 ret = __qseecom_enable_clk(CLK_CE_DRV);
5651 if (ret)
5652 return ret;
5653 }
5654
5655 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5656 ireq, sizeof(struct qseecom_key_select_ireq),
5657 &resp, sizeof(struct qseecom_command_scm_resp));
5658 if (ret) {
5659 if (ret == -EINVAL &&
5660 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5661 pr_debug("Max attempts to input password reached.\n");
5662 ret = -ERANGE;
5663 } else if (ret == -EINVAL &&
5664 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5665 pr_debug("Set Key operation under processing...\n");
5666 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5667 } else {
5668 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5669 ret);
5670 ret = -EFAULT;
5671 }
5672 goto set_key_exit;
5673 }
5674
5675 switch (resp.result) {
5676 case QSEOS_RESULT_SUCCESS:
5677 break;
5678 case QSEOS_RESULT_INCOMPLETE:
5679 ret = __qseecom_process_incomplete_cmd(data, &resp);
5680 if (ret) {
5681 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5682 resp.result);
5683 if (resp.result ==
5684 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5685 pr_debug("Set Key operation under processing...\n");
5686 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5687 }
5688 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5689 pr_debug("Max attempts to input password reached.\n");
5690 ret = -ERANGE;
5691 }
5692 }
5693 break;
5694 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5695 pr_debug("Max attempts to input password reached.\n");
5696 ret = -ERANGE;
5697 break;
5698 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5699 pr_debug("Set Key operation under processing...\n");
5700 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5701 break;
5702 case QSEOS_RESULT_FAILURE:
5703 default:
5704 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5705 ret = -EINVAL;
5706 break;
5707 }
5708set_key_exit:
5709 __qseecom_disable_clk(CLK_QSEE);
5710 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5711 __qseecom_disable_clk(CLK_CE_DRV);
5712 return ret;
5713}
5714
5715static int __qseecom_update_current_key_user_info(
5716 struct qseecom_dev_handle *data,
5717 enum qseecom_key_management_usage_type usage,
5718 struct qseecom_key_userinfo_update_ireq *ireq)
5719{
5720 struct qseecom_command_scm_resp resp;
5721 int ret;
5722
5723 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5724 usage >= QSEOS_KM_USAGE_MAX) {
5725 pr_err("Error:: unsupported usage %d\n", usage);
5726 return -EFAULT;
5727 }
5728 ret = __qseecom_enable_clk(CLK_QSEE);
5729 if (ret)
5730 return ret;
5731
5732 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5733 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5734 &resp, sizeof(struct qseecom_command_scm_resp));
5735 if (ret) {
5736 if (ret == -EINVAL &&
5737 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5738 pr_debug("Set Key operation under processing...\n");
5739 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5740 } else {
5741 pr_err("scm call to update key userinfo failed: %d\n",
5742 ret);
5743 __qseecom_disable_clk(CLK_QSEE);
5744 return -EFAULT;
5745 }
5746 }
5747
5748 switch (resp.result) {
5749 case QSEOS_RESULT_SUCCESS:
5750 break;
5751 case QSEOS_RESULT_INCOMPLETE:
5752 ret = __qseecom_process_incomplete_cmd(data, &resp);
5753 if (resp.result ==
5754 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5755 pr_debug("Set Key operation under processing...\n");
5756 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5757 }
5758 if (ret)
5759 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5760 resp.result);
5761 break;
5762 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5763 pr_debug("Update Key operation under processing...\n");
5764 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5765 break;
5766 case QSEOS_RESULT_FAILURE:
5767 default:
5768 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5769 ret = -EINVAL;
5770 break;
5771 }
5772
5773 __qseecom_disable_clk(CLK_QSEE);
5774 return ret;
5775}
5776
5777
5778static int qseecom_enable_ice_setup(int usage)
5779{
5780 int ret = 0;
5781
5782 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5783 ret = qcom_ice_setup_ice_hw("ufs", true);
5784 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5785 ret = qcom_ice_setup_ice_hw("sdcc", true);
5786
5787 return ret;
5788}
5789
5790static int qseecom_disable_ice_setup(int usage)
5791{
5792 int ret = 0;
5793
5794 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5795 ret = qcom_ice_setup_ice_hw("ufs", false);
5796 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5797 ret = qcom_ice_setup_ice_hw("sdcc", false);
5798
5799 return ret;
5800}
5801
5802static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5803{
5804 struct qseecom_ce_info_use *pce_info_use, *p;
5805 int total = 0;
5806 int i;
5807
5808 switch (usage) {
5809 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5810 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5811 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5812 p = qseecom.ce_info.fde;
5813 total = qseecom.ce_info.num_fde;
5814 break;
5815 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5816 p = qseecom.ce_info.pfe;
5817 total = qseecom.ce_info.num_pfe;
5818 break;
5819 default:
5820 pr_err("unsupported usage %d\n", usage);
5821 return -EINVAL;
5822 }
5823
5824 pce_info_use = NULL;
5825
5826 for (i = 0; i < total; i++) {
5827 if (p->unit_num == unit) {
5828 pce_info_use = p;
5829 break;
5830 }
5831 p++;
5832 }
5833 if (!pce_info_use) {
5834 pr_err("can not find %d\n", unit);
5835 return -EINVAL;
5836 }
5837 return pce_info_use->num_ce_pipe_entries;
5838}
5839
5840static int qseecom_create_key(struct qseecom_dev_handle *data,
5841 void __user *argp)
5842{
5843 int i;
5844 uint32_t *ce_hw = NULL;
5845 uint32_t pipe = 0;
5846 int ret = 0;
5847 uint32_t flags = 0;
5848 struct qseecom_create_key_req create_key_req;
5849 struct qseecom_key_generate_ireq generate_key_ireq;
5850 struct qseecom_key_select_ireq set_key_ireq;
5851 uint32_t entries = 0;
5852
5853 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5854 if (ret) {
5855 pr_err("copy_from_user failed\n");
5856 return ret;
5857 }
5858
5859 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5860 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5861 pr_err("unsupported usage %d\n", create_key_req.usage);
5862 ret = -EFAULT;
5863 return ret;
5864 }
5865 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5866 create_key_req.usage);
5867 if (entries <= 0) {
5868 pr_err("no ce instance for usage %d instance %d\n",
5869 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5870 ret = -EINVAL;
5871 return ret;
5872 }
5873
5874 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5875 if (!ce_hw) {
5876 ret = -ENOMEM;
5877 return ret;
5878 }
5879 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5880 DEFAULT_CE_INFO_UNIT);
5881 if (ret) {
5882 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5883 ret = -EINVAL;
5884 goto free_buf;
5885 }
5886
5887 if (qseecom.fde_key_size)
5888 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5889 else
5890 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5891
5892 generate_key_ireq.flags = flags;
5893 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5894 memset((void *)generate_key_ireq.key_id,
5895 0, QSEECOM_KEY_ID_SIZE);
5896 memset((void *)generate_key_ireq.hash32,
5897 0, QSEECOM_HASH_SIZE);
5898 memcpy((void *)generate_key_ireq.key_id,
5899 (void *)key_id_array[create_key_req.usage].desc,
5900 QSEECOM_KEY_ID_SIZE);
5901 memcpy((void *)generate_key_ireq.hash32,
5902 (void *)create_key_req.hash32,
5903 QSEECOM_HASH_SIZE);
5904
5905 ret = __qseecom_generate_and_save_key(data,
5906 create_key_req.usage, &generate_key_ireq);
5907 if (ret) {
5908 pr_err("Failed to generate key on storage: %d\n", ret);
5909 goto free_buf;
5910 }
5911
5912 for (i = 0; i < entries; i++) {
5913 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5914 if (create_key_req.usage ==
5915 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5916 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5917 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5918
5919 } else if (create_key_req.usage ==
5920 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5921 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5922 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5923
5924 } else {
5925 set_key_ireq.ce = ce_hw[i];
5926 set_key_ireq.pipe = pipe;
5927 }
5928 set_key_ireq.flags = flags;
5929
5930 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5931 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5932 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5933 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5934 memcpy((void *)set_key_ireq.key_id,
5935 (void *)key_id_array[create_key_req.usage].desc,
5936 QSEECOM_KEY_ID_SIZE);
5937 memcpy((void *)set_key_ireq.hash32,
5938 (void *)create_key_req.hash32,
5939 QSEECOM_HASH_SIZE);
5940 /*
5941 * It will return false if it is GPCE based crypto instance or
5942 * ICE is setup properly
5943 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005944 ret = qseecom_enable_ice_setup(create_key_req.usage);
5945 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005946 goto free_buf;
5947
5948 do {
5949 ret = __qseecom_set_clear_ce_key(data,
5950 create_key_req.usage,
5951 &set_key_ireq);
5952 /*
5953 * wait a little before calling scm again to let other
5954 * processes run
5955 */
5956 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5957 msleep(50);
5958
5959 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5960
5961 qseecom_disable_ice_setup(create_key_req.usage);
5962
5963 if (ret) {
5964 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5965 pipe, ce_hw[i], ret);
5966 goto free_buf;
5967 } else {
5968 pr_err("Set the key successfully\n");
5969 if ((create_key_req.usage ==
5970 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5971 (create_key_req.usage ==
5972 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5973 goto free_buf;
5974 }
5975 }
5976
5977free_buf:
5978 kzfree(ce_hw);
5979 return ret;
5980}
5981
5982static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5983 void __user *argp)
5984{
5985 uint32_t *ce_hw = NULL;
5986 uint32_t pipe = 0;
5987 int ret = 0;
5988 uint32_t flags = 0;
5989 int i, j;
5990 struct qseecom_wipe_key_req wipe_key_req;
5991 struct qseecom_key_delete_ireq delete_key_ireq;
5992 struct qseecom_key_select_ireq clear_key_ireq;
5993 uint32_t entries = 0;
5994
5995 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
5996 if (ret) {
5997 pr_err("copy_from_user failed\n");
5998 return ret;
5999 }
6000
6001 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6002 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6003 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6004 ret = -EFAULT;
6005 return ret;
6006 }
6007
6008 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6009 wipe_key_req.usage);
6010 if (entries <= 0) {
6011 pr_err("no ce instance for usage %d instance %d\n",
6012 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6013 ret = -EINVAL;
6014 return ret;
6015 }
6016
6017 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6018 if (!ce_hw) {
6019 ret = -ENOMEM;
6020 return ret;
6021 }
6022
6023 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6024 DEFAULT_CE_INFO_UNIT);
6025 if (ret) {
6026 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6027 ret = -EINVAL;
6028 goto free_buf;
6029 }
6030
6031 if (wipe_key_req.wipe_key_flag) {
6032 delete_key_ireq.flags = flags;
6033 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6034 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6035 memcpy((void *)delete_key_ireq.key_id,
6036 (void *)key_id_array[wipe_key_req.usage].desc,
6037 QSEECOM_KEY_ID_SIZE);
6038 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6039
6040 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6041 &delete_key_ireq);
6042 if (ret) {
6043 pr_err("Failed to delete key from ssd storage: %d\n",
6044 ret);
6045 ret = -EFAULT;
6046 goto free_buf;
6047 }
6048 }
6049
6050 for (j = 0; j < entries; j++) {
6051 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6052 if (wipe_key_req.usage ==
6053 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6054 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6055 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6056 } else if (wipe_key_req.usage ==
6057 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6058 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6059 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6060 } else {
6061 clear_key_ireq.ce = ce_hw[j];
6062 clear_key_ireq.pipe = pipe;
6063 }
6064 clear_key_ireq.flags = flags;
6065 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6066 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6067 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6068 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6069
6070 /*
6071 * It will return false if it is GPCE based crypto instance or
6072 * ICE is setup properly
6073 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006074 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6075 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006076 goto free_buf;
6077
6078 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6079 &clear_key_ireq);
6080
6081 qseecom_disable_ice_setup(wipe_key_req.usage);
6082
6083 if (ret) {
6084 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6085 pipe, ce_hw[j], ret);
6086 ret = -EFAULT;
6087 goto free_buf;
6088 }
6089 }
6090
6091free_buf:
6092 kzfree(ce_hw);
6093 return ret;
6094}
6095
6096static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6097 void __user *argp)
6098{
6099 int ret = 0;
6100 uint32_t flags = 0;
6101 struct qseecom_update_key_userinfo_req update_key_req;
6102 struct qseecom_key_userinfo_update_ireq ireq;
6103
6104 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6105 if (ret) {
6106 pr_err("copy_from_user failed\n");
6107 return ret;
6108 }
6109
6110 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6111 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6112 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6113 return -EFAULT;
6114 }
6115
6116 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6117
6118 if (qseecom.fde_key_size)
6119 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6120 else
6121 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6122
6123 ireq.flags = flags;
6124 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6125 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6126 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6127 memcpy((void *)ireq.key_id,
6128 (void *)key_id_array[update_key_req.usage].desc,
6129 QSEECOM_KEY_ID_SIZE);
6130 memcpy((void *)ireq.current_hash32,
6131 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6132 memcpy((void *)ireq.new_hash32,
6133 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6134
6135 do {
6136 ret = __qseecom_update_current_key_user_info(data,
6137 update_key_req.usage,
6138 &ireq);
6139 /*
6140 * wait a little before calling scm again to let other
6141 * processes run
6142 */
6143 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6144 msleep(50);
6145
6146 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6147 if (ret) {
6148 pr_err("Failed to update key info: %d\n", ret);
6149 return ret;
6150 }
6151 return ret;
6152
6153}
6154static int qseecom_is_es_activated(void __user *argp)
6155{
6156 struct qseecom_is_es_activated_req req;
6157 struct qseecom_command_scm_resp resp;
6158 int ret;
6159
6160 if (qseecom.qsee_version < QSEE_VERSION_04) {
6161 pr_err("invalid qsee version\n");
6162 return -ENODEV;
6163 }
6164
6165 if (argp == NULL) {
6166 pr_err("arg is null\n");
6167 return -EINVAL;
6168 }
6169
6170 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6171 &req, sizeof(req), &resp, sizeof(resp));
6172 if (ret) {
6173 pr_err("scm_call failed\n");
6174 return ret;
6175 }
6176
6177 req.is_activated = resp.result;
6178 ret = copy_to_user(argp, &req, sizeof(req));
6179 if (ret) {
6180 pr_err("copy_to_user failed\n");
6181 return ret;
6182 }
6183
6184 return 0;
6185}
6186
6187static int qseecom_save_partition_hash(void __user *argp)
6188{
6189 struct qseecom_save_partition_hash_req req;
6190 struct qseecom_command_scm_resp resp;
6191 int ret;
6192
6193 memset(&resp, 0x00, sizeof(resp));
6194
6195 if (qseecom.qsee_version < QSEE_VERSION_04) {
6196 pr_err("invalid qsee version\n");
6197 return -ENODEV;
6198 }
6199
6200 if (argp == NULL) {
6201 pr_err("arg is null\n");
6202 return -EINVAL;
6203 }
6204
6205 ret = copy_from_user(&req, argp, sizeof(req));
6206 if (ret) {
6207 pr_err("copy_from_user failed\n");
6208 return ret;
6209 }
6210
6211 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6212 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6213 if (ret) {
6214 pr_err("qseecom_scm_call failed\n");
6215 return ret;
6216 }
6217
6218 return 0;
6219}
6220
6221static int qseecom_mdtp_cipher_dip(void __user *argp)
6222{
6223 struct qseecom_mdtp_cipher_dip_req req;
6224 u32 tzbuflenin, tzbuflenout;
6225 char *tzbufin = NULL, *tzbufout = NULL;
6226 struct scm_desc desc = {0};
6227 int ret;
6228
6229 do {
6230 /* Copy the parameters from userspace */
6231 if (argp == NULL) {
6232 pr_err("arg is null\n");
6233 ret = -EINVAL;
6234 break;
6235 }
6236
6237 ret = copy_from_user(&req, argp, sizeof(req));
6238 if (ret) {
6239 pr_err("copy_from_user failed, ret= %d\n", ret);
6240 break;
6241 }
6242
6243 if (req.in_buf == NULL || req.out_buf == NULL ||
6244 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6245 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6246 req.direction > 1) {
6247 pr_err("invalid parameters\n");
6248 ret = -EINVAL;
6249 break;
6250 }
6251
6252 /* Copy the input buffer from userspace to kernel space */
6253 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6254 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6255 if (!tzbufin) {
6256 pr_err("error allocating in buffer\n");
6257 ret = -ENOMEM;
6258 break;
6259 }
6260
6261 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6262 if (ret) {
6263 pr_err("copy_from_user failed, ret=%d\n", ret);
6264 break;
6265 }
6266
6267 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6268
6269 /* Prepare the output buffer in kernel space */
6270 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6271 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6272 if (!tzbufout) {
6273 pr_err("error allocating out buffer\n");
6274 ret = -ENOMEM;
6275 break;
6276 }
6277
6278 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6279
6280 /* Send the command to TZ */
6281 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6282 desc.args[0] = virt_to_phys(tzbufin);
6283 desc.args[1] = req.in_buf_size;
6284 desc.args[2] = virt_to_phys(tzbufout);
6285 desc.args[3] = req.out_buf_size;
6286 desc.args[4] = req.direction;
6287
6288 ret = __qseecom_enable_clk(CLK_QSEE);
6289 if (ret)
6290 break;
6291
6292 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6293
6294 __qseecom_disable_clk(CLK_QSEE);
6295
6296 if (ret) {
6297 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6298 ret);
6299 break;
6300 }
6301
6302 /* Copy the output buffer from kernel space to userspace */
6303 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6304 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6305 if (ret) {
6306 pr_err("copy_to_user failed, ret=%d\n", ret);
6307 break;
6308 }
6309 } while (0);
6310
6311 kzfree(tzbufin);
6312 kzfree(tzbufout);
6313
6314 return ret;
6315}
6316
6317static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6318 struct qseecom_qteec_req *req)
6319{
6320 if (!data || !data->client.ihandle) {
6321 pr_err("Client or client handle is not initialized\n");
6322 return -EINVAL;
6323 }
6324
6325 if (data->type != QSEECOM_CLIENT_APP)
6326 return -EFAULT;
6327
6328 if (req->req_len > UINT_MAX - req->resp_len) {
6329 pr_err("Integer overflow detected in req_len & rsp_len\n");
6330 return -EINVAL;
6331 }
6332
6333 if (req->req_len + req->resp_len > data->client.sb_length) {
6334 pr_debug("Not enough memory to fit cmd_buf.\n");
6335 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6336 (req->req_len + req->resp_len), data->client.sb_length);
6337 return -ENOMEM;
6338 }
6339
6340 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6341 pr_err("cmd buffer or response buffer is null\n");
6342 return -EINVAL;
6343 }
6344 if (((uintptr_t)req->req_ptr <
6345 data->client.user_virt_sb_base) ||
6346 ((uintptr_t)req->req_ptr >=
6347 (data->client.user_virt_sb_base + data->client.sb_length))) {
6348 pr_err("cmd buffer address not within shared bufffer\n");
6349 return -EINVAL;
6350 }
6351
6352 if (((uintptr_t)req->resp_ptr <
6353 data->client.user_virt_sb_base) ||
6354 ((uintptr_t)req->resp_ptr >=
6355 (data->client.user_virt_sb_base + data->client.sb_length))) {
6356 pr_err("response buffer address not within shared bufffer\n");
6357 return -EINVAL;
6358 }
6359
6360 if ((req->req_len == 0) || (req->resp_len == 0)) {
6361 pr_err("cmd buf lengtgh/response buf length not valid\n");
6362 return -EINVAL;
6363 }
6364
6365 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6366 pr_err("Integer overflow in req_len & req_ptr\n");
6367 return -EINVAL;
6368 }
6369
6370 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6371 pr_err("Integer overflow in resp_len & resp_ptr\n");
6372 return -EINVAL;
6373 }
6374
6375 if (data->client.user_virt_sb_base >
6376 (ULONG_MAX - data->client.sb_length)) {
6377 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6378 return -EINVAL;
6379 }
6380 if ((((uintptr_t)req->req_ptr + req->req_len) >
6381 ((uintptr_t)data->client.user_virt_sb_base +
6382 data->client.sb_length)) ||
6383 (((uintptr_t)req->resp_ptr + req->resp_len) >
6384 ((uintptr_t)data->client.user_virt_sb_base +
6385 data->client.sb_length))) {
6386 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6387 return -EINVAL;
6388 }
6389 return 0;
6390}
6391
6392static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6393 uint32_t fd_idx, struct sg_table *sg_ptr)
6394{
6395 struct scatterlist *sg = sg_ptr->sgl;
6396 struct qseecom_sg_entry *sg_entry;
6397 void *buf;
6398 uint i;
6399 size_t size;
6400 dma_addr_t coh_pmem;
6401
6402 if (fd_idx >= MAX_ION_FD) {
6403 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6404 return -ENOMEM;
6405 }
6406 /*
6407 * Allocate a buffer, populate it with number of entry plus
6408 * each sg entry's phy addr and length; then return the
6409 * phy_addr of the buffer.
6410 */
6411 size = sizeof(uint32_t) +
6412 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6413 size = (size + PAGE_SIZE) & PAGE_MASK;
6414 buf = dma_alloc_coherent(qseecom.pdev,
6415 size, &coh_pmem, GFP_KERNEL);
6416 if (buf == NULL) {
6417 pr_err("failed to alloc memory for sg buf\n");
6418 return -ENOMEM;
6419 }
6420 *(uint32_t *)buf = sg_ptr->nents;
6421 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6422 for (i = 0; i < sg_ptr->nents; i++) {
6423 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6424 sg_entry->len = sg->length;
6425 sg_entry++;
6426 sg = sg_next(sg);
6427 }
6428 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6429 data->client.sec_buf_fd[fd_idx].vbase = buf;
6430 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6431 data->client.sec_buf_fd[fd_idx].size = size;
6432 return 0;
6433}
6434
6435static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6436 struct qseecom_dev_handle *data, bool cleanup)
6437{
6438 struct ion_handle *ihandle;
6439 int ret = 0;
6440 int i = 0;
6441 uint32_t *update;
6442 struct sg_table *sg_ptr = NULL;
6443 struct scatterlist *sg;
6444 struct qseecom_param_memref *memref;
6445
6446 if (req == NULL) {
6447 pr_err("Invalid address\n");
6448 return -EINVAL;
6449 }
6450 for (i = 0; i < MAX_ION_FD; i++) {
6451 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006452 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006453 req->ifd_data[i].fd);
6454 if (IS_ERR_OR_NULL(ihandle)) {
6455 pr_err("Ion client can't retrieve the handle\n");
6456 return -ENOMEM;
6457 }
6458 if ((req->req_len < sizeof(uint32_t)) ||
6459 (req->ifd_data[i].cmd_buf_offset >
6460 req->req_len - sizeof(uint32_t))) {
6461 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6462 req->req_len,
6463 req->ifd_data[i].cmd_buf_offset);
6464 return -EINVAL;
6465 }
6466 update = (uint32_t *)((char *) req->req_ptr +
6467 req->ifd_data[i].cmd_buf_offset);
6468 if (!update) {
6469 pr_err("update pointer is NULL\n");
6470 return -EINVAL;
6471 }
6472 } else {
6473 continue;
6474 }
6475 /* Populate the cmd data structure with the phys_addr */
6476 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6477 if (IS_ERR_OR_NULL(sg_ptr)) {
6478 pr_err("IOn client could not retrieve sg table\n");
6479 goto err;
6480 }
6481 sg = sg_ptr->sgl;
6482 if (sg == NULL) {
6483 pr_err("sg is NULL\n");
6484 goto err;
6485 }
6486 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6487 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6488 sg_ptr->nents, sg->length);
6489 goto err;
6490 }
6491 /* clean up buf for pre-allocated fd */
6492 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6493 (*update)) {
6494 if (data->client.sec_buf_fd[i].vbase)
6495 dma_free_coherent(qseecom.pdev,
6496 data->client.sec_buf_fd[i].size,
6497 data->client.sec_buf_fd[i].vbase,
6498 data->client.sec_buf_fd[i].pbase);
6499 memset((void *)update, 0,
6500 sizeof(struct qseecom_param_memref));
6501 memset(&(data->client.sec_buf_fd[i]), 0,
6502 sizeof(struct qseecom_sec_buf_fd_info));
6503 goto clean;
6504 }
6505
6506 if (*update == 0) {
6507 /* update buf for pre-allocated fd from secure heap*/
6508 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6509 sg_ptr);
6510 if (ret) {
6511 pr_err("Failed to handle buf for fd[%d]\n", i);
6512 goto err;
6513 }
6514 memref = (struct qseecom_param_memref *)update;
6515 memref->buffer =
6516 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6517 memref->size =
6518 (uint32_t)(data->client.sec_buf_fd[i].size);
6519 } else {
6520 /* update buf for fd from non-secure qseecom heap */
6521 if (sg_ptr->nents != 1) {
6522 pr_err("Num of scat entr (%d) invalid\n",
6523 sg_ptr->nents);
6524 goto err;
6525 }
6526 if (cleanup)
6527 *update = 0;
6528 else
6529 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6530 }
6531clean:
6532 if (cleanup) {
6533 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6534 ihandle, NULL, sg->length,
6535 ION_IOC_INV_CACHES);
6536 if (ret) {
6537 pr_err("cache operation failed %d\n", ret);
6538 goto err;
6539 }
6540 } else {
6541 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6542 ihandle, NULL, sg->length,
6543 ION_IOC_CLEAN_INV_CACHES);
6544 if (ret) {
6545 pr_err("cache operation failed %d\n", ret);
6546 goto err;
6547 }
6548 data->sglistinfo_ptr[i].indexAndFlags =
6549 SGLISTINFO_SET_INDEX_FLAG(
6550 (sg_ptr->nents == 1), 0,
6551 req->ifd_data[i].cmd_buf_offset);
6552 data->sglistinfo_ptr[i].sizeOrCount =
6553 (sg_ptr->nents == 1) ?
6554 sg->length : sg_ptr->nents;
6555 data->sglist_cnt = i + 1;
6556 }
6557 /* Deallocate the handle */
6558 if (!IS_ERR_OR_NULL(ihandle))
6559 ion_free(qseecom.ion_clnt, ihandle);
6560 }
6561 return ret;
6562err:
6563 if (!IS_ERR_OR_NULL(ihandle))
6564 ion_free(qseecom.ion_clnt, ihandle);
6565 return -ENOMEM;
6566}
6567
6568static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6569 struct qseecom_qteec_req *req, uint32_t cmd_id)
6570{
6571 struct qseecom_command_scm_resp resp;
6572 struct qseecom_qteec_ireq ireq;
6573 struct qseecom_qteec_64bit_ireq ireq_64bit;
6574 struct qseecom_registered_app_list *ptr_app;
6575 bool found_app = false;
6576 unsigned long flags;
6577 int ret = 0;
6578 uint32_t reqd_len_sb_in = 0;
6579 void *cmd_buf = NULL;
6580 size_t cmd_len;
6581 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306582 void *req_ptr = NULL;
6583 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006584
6585 ret = __qseecom_qteec_validate_msg(data, req);
6586 if (ret)
6587 return ret;
6588
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306589 req_ptr = req->req_ptr;
6590 resp_ptr = req->resp_ptr;
6591
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006592 /* find app_id & img_name from list */
6593 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6594 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6595 list) {
6596 if ((ptr_app->app_id == data->client.app_id) &&
6597 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6598 found_app = true;
6599 break;
6600 }
6601 }
6602 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6603 if (!found_app) {
6604 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6605 (char *)data->client.app_name);
6606 return -ENOENT;
6607 }
6608
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306609 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6610 (uintptr_t)req->req_ptr);
6611 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6612 (uintptr_t)req->resp_ptr);
6613
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006614 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6615 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6616 ret = __qseecom_update_qteec_req_buf(
6617 (struct qseecom_qteec_modfd_req *)req, data, false);
6618 if (ret)
6619 return ret;
6620 }
6621
6622 if (qseecom.qsee_version < QSEE_VERSION_40) {
6623 ireq.app_id = data->client.app_id;
6624 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306625 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006626 ireq.req_len = req->req_len;
6627 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306628 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006629 ireq.resp_len = req->resp_len;
6630 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6631 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6632 dmac_flush_range((void *)table,
6633 (void *)table + SGLISTINFO_TABLE_SIZE);
6634 cmd_buf = (void *)&ireq;
6635 cmd_len = sizeof(struct qseecom_qteec_ireq);
6636 } else {
6637 ireq_64bit.app_id = data->client.app_id;
6638 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306639 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006640 ireq_64bit.req_len = req->req_len;
6641 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306642 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006643 ireq_64bit.resp_len = req->resp_len;
6644 if ((data->client.app_arch == ELFCLASS32) &&
6645 ((ireq_64bit.req_ptr >=
6646 PHY_ADDR_4G - ireq_64bit.req_len) ||
6647 (ireq_64bit.resp_ptr >=
6648 PHY_ADDR_4G - ireq_64bit.resp_len))){
6649 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6650 data->client.app_name, data->client.app_id);
6651 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6652 ireq_64bit.req_ptr, ireq_64bit.req_len,
6653 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6654 return -EFAULT;
6655 }
6656 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6657 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6658 dmac_flush_range((void *)table,
6659 (void *)table + SGLISTINFO_TABLE_SIZE);
6660 cmd_buf = (void *)&ireq_64bit;
6661 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6662 }
6663 if (qseecom.whitelist_support == true
6664 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6665 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6666 else
6667 *(uint32_t *)cmd_buf = cmd_id;
6668
6669 reqd_len_sb_in = req->req_len + req->resp_len;
6670 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6671 data->client.sb_virt,
6672 reqd_len_sb_in,
6673 ION_IOC_CLEAN_INV_CACHES);
6674 if (ret) {
6675 pr_err("cache operation failed %d\n", ret);
6676 return ret;
6677 }
6678
6679 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6680
6681 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6682 cmd_buf, cmd_len,
6683 &resp, sizeof(resp));
6684 if (ret) {
6685 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6686 ret, data->client.app_id);
6687 return ret;
6688 }
6689
6690 if (qseecom.qsee_reentrancy_support) {
6691 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6692 } else {
6693 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6694 ret = __qseecom_process_incomplete_cmd(data, &resp);
6695 if (ret) {
6696 pr_err("process_incomplete_cmd failed err: %d\n",
6697 ret);
6698 return ret;
6699 }
6700 } else {
6701 if (resp.result != QSEOS_RESULT_SUCCESS) {
6702 pr_err("Response result %d not supported\n",
6703 resp.result);
6704 ret = -EINVAL;
6705 }
6706 }
6707 }
6708 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6709 data->client.sb_virt, data->client.sb_length,
6710 ION_IOC_INV_CACHES);
6711 if (ret) {
6712 pr_err("cache operation failed %d\n", ret);
6713 return ret;
6714 }
6715
6716 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6717 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6718 ret = __qseecom_update_qteec_req_buf(
6719 (struct qseecom_qteec_modfd_req *)req, data, true);
6720 if (ret)
6721 return ret;
6722 }
6723 return 0;
6724}
6725
6726static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6727 void __user *argp)
6728{
6729 struct qseecom_qteec_modfd_req req;
6730 int ret = 0;
6731
6732 ret = copy_from_user(&req, argp,
6733 sizeof(struct qseecom_qteec_modfd_req));
6734 if (ret) {
6735 pr_err("copy_from_user failed\n");
6736 return ret;
6737 }
6738 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6739 QSEOS_TEE_OPEN_SESSION);
6740
6741 return ret;
6742}
6743
6744static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6745 void __user *argp)
6746{
6747 struct qseecom_qteec_req req;
6748 int ret = 0;
6749
6750 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6751 if (ret) {
6752 pr_err("copy_from_user failed\n");
6753 return ret;
6754 }
6755 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6756 return ret;
6757}
6758
6759static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6760 void __user *argp)
6761{
6762 struct qseecom_qteec_modfd_req req;
6763 struct qseecom_command_scm_resp resp;
6764 struct qseecom_qteec_ireq ireq;
6765 struct qseecom_qteec_64bit_ireq ireq_64bit;
6766 struct qseecom_registered_app_list *ptr_app;
6767 bool found_app = false;
6768 unsigned long flags;
6769 int ret = 0;
6770 int i = 0;
6771 uint32_t reqd_len_sb_in = 0;
6772 void *cmd_buf = NULL;
6773 size_t cmd_len;
6774 struct sglist_info *table = data->sglistinfo_ptr;
6775 void *req_ptr = NULL;
6776 void *resp_ptr = NULL;
6777
6778 ret = copy_from_user(&req, argp,
6779 sizeof(struct qseecom_qteec_modfd_req));
6780 if (ret) {
6781 pr_err("copy_from_user failed\n");
6782 return ret;
6783 }
6784 ret = __qseecom_qteec_validate_msg(data,
6785 (struct qseecom_qteec_req *)(&req));
6786 if (ret)
6787 return ret;
6788 req_ptr = req.req_ptr;
6789 resp_ptr = req.resp_ptr;
6790
6791 /* find app_id & img_name from list */
6792 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6793 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6794 list) {
6795 if ((ptr_app->app_id == data->client.app_id) &&
6796 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6797 found_app = true;
6798 break;
6799 }
6800 }
6801 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6802 if (!found_app) {
6803 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6804 (char *)data->client.app_name);
6805 return -ENOENT;
6806 }
6807
6808 /* validate offsets */
6809 for (i = 0; i < MAX_ION_FD; i++) {
6810 if (req.ifd_data[i].fd) {
6811 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6812 return -EINVAL;
6813 }
6814 }
6815 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6816 (uintptr_t)req.req_ptr);
6817 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6818 (uintptr_t)req.resp_ptr);
6819 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6820 if (ret)
6821 return ret;
6822
6823 if (qseecom.qsee_version < QSEE_VERSION_40) {
6824 ireq.app_id = data->client.app_id;
6825 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6826 (uintptr_t)req_ptr);
6827 ireq.req_len = req.req_len;
6828 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6829 (uintptr_t)resp_ptr);
6830 ireq.resp_len = req.resp_len;
6831 cmd_buf = (void *)&ireq;
6832 cmd_len = sizeof(struct qseecom_qteec_ireq);
6833 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6834 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6835 dmac_flush_range((void *)table,
6836 (void *)table + SGLISTINFO_TABLE_SIZE);
6837 } else {
6838 ireq_64bit.app_id = data->client.app_id;
6839 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6840 (uintptr_t)req_ptr);
6841 ireq_64bit.req_len = req.req_len;
6842 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6843 (uintptr_t)resp_ptr);
6844 ireq_64bit.resp_len = req.resp_len;
6845 cmd_buf = (void *)&ireq_64bit;
6846 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6847 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6848 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6849 dmac_flush_range((void *)table,
6850 (void *)table + SGLISTINFO_TABLE_SIZE);
6851 }
6852 reqd_len_sb_in = req.req_len + req.resp_len;
6853 if (qseecom.whitelist_support == true)
6854 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6855 else
6856 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6857
6858 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6859 data->client.sb_virt,
6860 reqd_len_sb_in,
6861 ION_IOC_CLEAN_INV_CACHES);
6862 if (ret) {
6863 pr_err("cache operation failed %d\n", ret);
6864 return ret;
6865 }
6866
6867 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6868
6869 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6870 cmd_buf, cmd_len,
6871 &resp, sizeof(resp));
6872 if (ret) {
6873 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6874 ret, data->client.app_id);
6875 return ret;
6876 }
6877
6878 if (qseecom.qsee_reentrancy_support) {
6879 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6880 } else {
6881 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6882 ret = __qseecom_process_incomplete_cmd(data, &resp);
6883 if (ret) {
6884 pr_err("process_incomplete_cmd failed err: %d\n",
6885 ret);
6886 return ret;
6887 }
6888 } else {
6889 if (resp.result != QSEOS_RESULT_SUCCESS) {
6890 pr_err("Response result %d not supported\n",
6891 resp.result);
6892 ret = -EINVAL;
6893 }
6894 }
6895 }
6896 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6897 if (ret)
6898 return ret;
6899
6900 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6901 data->client.sb_virt, data->client.sb_length,
6902 ION_IOC_INV_CACHES);
6903 if (ret) {
6904 pr_err("cache operation failed %d\n", ret);
6905 return ret;
6906 }
6907 return 0;
6908}
6909
6910static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6911 void __user *argp)
6912{
6913 struct qseecom_qteec_modfd_req req;
6914 int ret = 0;
6915
6916 ret = copy_from_user(&req, argp,
6917 sizeof(struct qseecom_qteec_modfd_req));
6918 if (ret) {
6919 pr_err("copy_from_user failed\n");
6920 return ret;
6921 }
6922 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6923 QSEOS_TEE_REQUEST_CANCELLATION);
6924
6925 return ret;
6926}
6927
6928static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6929{
6930 if (data->sglist_cnt) {
6931 memset(data->sglistinfo_ptr, 0,
6932 SGLISTINFO_TABLE_SIZE);
6933 data->sglist_cnt = 0;
6934 }
6935}
6936
6937static inline long qseecom_ioctl(struct file *file,
6938 unsigned int cmd, unsigned long arg)
6939{
6940 int ret = 0;
6941 struct qseecom_dev_handle *data = file->private_data;
6942 void __user *argp = (void __user *) arg;
6943 bool perf_enabled = false;
6944
6945 if (!data) {
6946 pr_err("Invalid/uninitialized device handle\n");
6947 return -EINVAL;
6948 }
6949
6950 if (data->abort) {
6951 pr_err("Aborting qseecom driver\n");
6952 return -ENODEV;
6953 }
6954
6955 switch (cmd) {
6956 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6957 if (data->type != QSEECOM_GENERIC) {
6958 pr_err("reg lstnr req: invalid handle (%d)\n",
6959 data->type);
6960 ret = -EINVAL;
6961 break;
6962 }
6963 pr_debug("ioctl register_listener_req()\n");
6964 mutex_lock(&app_access_lock);
6965 atomic_inc(&data->ioctl_count);
6966 data->type = QSEECOM_LISTENER_SERVICE;
6967 ret = qseecom_register_listener(data, argp);
6968 atomic_dec(&data->ioctl_count);
6969 wake_up_all(&data->abort_wq);
6970 mutex_unlock(&app_access_lock);
6971 if (ret)
6972 pr_err("failed qseecom_register_listener: %d\n", ret);
6973 break;
6974 }
6975 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6976 if ((data->listener.id == 0) ||
6977 (data->type != QSEECOM_LISTENER_SERVICE)) {
6978 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6979 data->type, data->listener.id);
6980 ret = -EINVAL;
6981 break;
6982 }
6983 pr_debug("ioctl unregister_listener_req()\n");
6984 mutex_lock(&app_access_lock);
6985 atomic_inc(&data->ioctl_count);
6986 ret = qseecom_unregister_listener(data);
6987 atomic_dec(&data->ioctl_count);
6988 wake_up_all(&data->abort_wq);
6989 mutex_unlock(&app_access_lock);
6990 if (ret)
6991 pr_err("failed qseecom_unregister_listener: %d\n", ret);
6992 break;
6993 }
6994 case QSEECOM_IOCTL_SEND_CMD_REQ: {
6995 if ((data->client.app_id == 0) ||
6996 (data->type != QSEECOM_CLIENT_APP)) {
6997 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
6998 data->type, data->client.app_id);
6999 ret = -EINVAL;
7000 break;
7001 }
7002 /* Only one client allowed here at a time */
7003 mutex_lock(&app_access_lock);
7004 if (qseecom.support_bus_scaling) {
7005 /* register bus bw in case the client doesn't do it */
7006 if (!data->mode) {
7007 mutex_lock(&qsee_bw_mutex);
7008 __qseecom_register_bus_bandwidth_needs(
7009 data, HIGH);
7010 mutex_unlock(&qsee_bw_mutex);
7011 }
7012 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7013 if (ret) {
7014 pr_err("Failed to set bw.\n");
7015 ret = -EINVAL;
7016 mutex_unlock(&app_access_lock);
7017 break;
7018 }
7019 }
7020 /*
7021 * On targets where crypto clock is handled by HLOS,
7022 * if clk_access_cnt is zero and perf_enabled is false,
7023 * then the crypto clock was not enabled before sending cmd to
7024 * tz, qseecom will enable the clock to avoid service failure.
7025 */
7026 if (!qseecom.no_clock_support &&
7027 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7028 pr_debug("ce clock is not enabled!\n");
7029 ret = qseecom_perf_enable(data);
7030 if (ret) {
7031 pr_err("Failed to vote for clock with err %d\n",
7032 ret);
7033 mutex_unlock(&app_access_lock);
7034 ret = -EINVAL;
7035 break;
7036 }
7037 perf_enabled = true;
7038 }
7039 atomic_inc(&data->ioctl_count);
7040 ret = qseecom_send_cmd(data, argp);
7041 if (qseecom.support_bus_scaling)
7042 __qseecom_add_bw_scale_down_timer(
7043 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7044 if (perf_enabled) {
7045 qsee_disable_clock_vote(data, CLK_DFAB);
7046 qsee_disable_clock_vote(data, CLK_SFPB);
7047 }
7048 atomic_dec(&data->ioctl_count);
7049 wake_up_all(&data->abort_wq);
7050 mutex_unlock(&app_access_lock);
7051 if (ret)
7052 pr_err("failed qseecom_send_cmd: %d\n", ret);
7053 break;
7054 }
7055 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7056 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7057 if ((data->client.app_id == 0) ||
7058 (data->type != QSEECOM_CLIENT_APP)) {
7059 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7060 data->type, data->client.app_id);
7061 ret = -EINVAL;
7062 break;
7063 }
7064 /* Only one client allowed here at a time */
7065 mutex_lock(&app_access_lock);
7066 if (qseecom.support_bus_scaling) {
7067 if (!data->mode) {
7068 mutex_lock(&qsee_bw_mutex);
7069 __qseecom_register_bus_bandwidth_needs(
7070 data, HIGH);
7071 mutex_unlock(&qsee_bw_mutex);
7072 }
7073 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7074 if (ret) {
7075 pr_err("Failed to set bw.\n");
7076 mutex_unlock(&app_access_lock);
7077 ret = -EINVAL;
7078 break;
7079 }
7080 }
7081 /*
7082 * On targets where crypto clock is handled by HLOS,
7083 * if clk_access_cnt is zero and perf_enabled is false,
7084 * then the crypto clock was not enabled before sending cmd to
7085 * tz, qseecom will enable the clock to avoid service failure.
7086 */
7087 if (!qseecom.no_clock_support &&
7088 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7089 pr_debug("ce clock is not enabled!\n");
7090 ret = qseecom_perf_enable(data);
7091 if (ret) {
7092 pr_err("Failed to vote for clock with err %d\n",
7093 ret);
7094 mutex_unlock(&app_access_lock);
7095 ret = -EINVAL;
7096 break;
7097 }
7098 perf_enabled = true;
7099 }
7100 atomic_inc(&data->ioctl_count);
7101 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7102 ret = qseecom_send_modfd_cmd(data, argp);
7103 else
7104 ret = qseecom_send_modfd_cmd_64(data, argp);
7105 if (qseecom.support_bus_scaling)
7106 __qseecom_add_bw_scale_down_timer(
7107 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7108 if (perf_enabled) {
7109 qsee_disable_clock_vote(data, CLK_DFAB);
7110 qsee_disable_clock_vote(data, CLK_SFPB);
7111 }
7112 atomic_dec(&data->ioctl_count);
7113 wake_up_all(&data->abort_wq);
7114 mutex_unlock(&app_access_lock);
7115 if (ret)
7116 pr_err("failed qseecom_send_cmd: %d\n", ret);
7117 __qseecom_clean_data_sglistinfo(data);
7118 break;
7119 }
7120 case QSEECOM_IOCTL_RECEIVE_REQ: {
7121 if ((data->listener.id == 0) ||
7122 (data->type != QSEECOM_LISTENER_SERVICE)) {
7123 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7124 data->type, data->listener.id);
7125 ret = -EINVAL;
7126 break;
7127 }
7128 atomic_inc(&data->ioctl_count);
7129 ret = qseecom_receive_req(data);
7130 atomic_dec(&data->ioctl_count);
7131 wake_up_all(&data->abort_wq);
7132 if (ret && (ret != -ERESTARTSYS))
7133 pr_err("failed qseecom_receive_req: %d\n", ret);
7134 break;
7135 }
7136 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7137 if ((data->listener.id == 0) ||
7138 (data->type != QSEECOM_LISTENER_SERVICE)) {
7139 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7140 data->type, data->listener.id);
7141 ret = -EINVAL;
7142 break;
7143 }
7144 atomic_inc(&data->ioctl_count);
7145 if (!qseecom.qsee_reentrancy_support)
7146 ret = qseecom_send_resp();
7147 else
7148 ret = qseecom_reentrancy_send_resp(data);
7149 atomic_dec(&data->ioctl_count);
7150 wake_up_all(&data->abort_wq);
7151 if (ret)
7152 pr_err("failed qseecom_send_resp: %d\n", ret);
7153 break;
7154 }
7155 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7156 if ((data->type != QSEECOM_CLIENT_APP) &&
7157 (data->type != QSEECOM_GENERIC) &&
7158 (data->type != QSEECOM_SECURE_SERVICE)) {
7159 pr_err("set mem param req: invalid handle (%d)\n",
7160 data->type);
7161 ret = -EINVAL;
7162 break;
7163 }
7164 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7165 mutex_lock(&app_access_lock);
7166 atomic_inc(&data->ioctl_count);
7167 ret = qseecom_set_client_mem_param(data, argp);
7168 atomic_dec(&data->ioctl_count);
7169 mutex_unlock(&app_access_lock);
7170 if (ret)
7171 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7172 ret);
7173 break;
7174 }
7175 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7176 if ((data->type != QSEECOM_GENERIC) &&
7177 (data->type != QSEECOM_CLIENT_APP)) {
7178 pr_err("load app req: invalid handle (%d)\n",
7179 data->type);
7180 ret = -EINVAL;
7181 break;
7182 }
7183 data->type = QSEECOM_CLIENT_APP;
7184 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7185 mutex_lock(&app_access_lock);
7186 atomic_inc(&data->ioctl_count);
7187 ret = qseecom_load_app(data, argp);
7188 atomic_dec(&data->ioctl_count);
7189 mutex_unlock(&app_access_lock);
7190 if (ret)
7191 pr_err("failed load_app request: %d\n", ret);
7192 break;
7193 }
7194 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7195 if ((data->client.app_id == 0) ||
7196 (data->type != QSEECOM_CLIENT_APP)) {
7197 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7198 data->type, data->client.app_id);
7199 ret = -EINVAL;
7200 break;
7201 }
7202 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7203 mutex_lock(&app_access_lock);
7204 atomic_inc(&data->ioctl_count);
7205 ret = qseecom_unload_app(data, false);
7206 atomic_dec(&data->ioctl_count);
7207 mutex_unlock(&app_access_lock);
7208 if (ret)
7209 pr_err("failed unload_app request: %d\n", ret);
7210 break;
7211 }
7212 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7213 atomic_inc(&data->ioctl_count);
7214 ret = qseecom_get_qseos_version(data, argp);
7215 if (ret)
7216 pr_err("qseecom_get_qseos_version: %d\n", ret);
7217 atomic_dec(&data->ioctl_count);
7218 break;
7219 }
7220 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7221 if ((data->type != QSEECOM_GENERIC) &&
7222 (data->type != QSEECOM_CLIENT_APP)) {
7223 pr_err("perf enable req: invalid handle (%d)\n",
7224 data->type);
7225 ret = -EINVAL;
7226 break;
7227 }
7228 if ((data->type == QSEECOM_CLIENT_APP) &&
7229 (data->client.app_id == 0)) {
7230 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7231 data->type, data->client.app_id);
7232 ret = -EINVAL;
7233 break;
7234 }
7235 atomic_inc(&data->ioctl_count);
7236 if (qseecom.support_bus_scaling) {
7237 mutex_lock(&qsee_bw_mutex);
7238 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7239 mutex_unlock(&qsee_bw_mutex);
7240 } else {
7241 ret = qseecom_perf_enable(data);
7242 if (ret)
7243 pr_err("Fail to vote for clocks %d\n", ret);
7244 }
7245 atomic_dec(&data->ioctl_count);
7246 break;
7247 }
7248 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7249 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7250 (data->type != QSEECOM_CLIENT_APP)) {
7251 pr_err("perf disable req: invalid handle (%d)\n",
7252 data->type);
7253 ret = -EINVAL;
7254 break;
7255 }
7256 if ((data->type == QSEECOM_CLIENT_APP) &&
7257 (data->client.app_id == 0)) {
7258 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7259 data->type, data->client.app_id);
7260 ret = -EINVAL;
7261 break;
7262 }
7263 atomic_inc(&data->ioctl_count);
7264 if (!qseecom.support_bus_scaling) {
7265 qsee_disable_clock_vote(data, CLK_DFAB);
7266 qsee_disable_clock_vote(data, CLK_SFPB);
7267 } else {
7268 mutex_lock(&qsee_bw_mutex);
7269 qseecom_unregister_bus_bandwidth_needs(data);
7270 mutex_unlock(&qsee_bw_mutex);
7271 }
7272 atomic_dec(&data->ioctl_count);
7273 break;
7274 }
7275
7276 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7277 /* If crypto clock is not handled by HLOS, return directly. */
7278 if (qseecom.no_clock_support) {
7279 pr_debug("crypto clock is not handled by HLOS\n");
7280 break;
7281 }
7282 if ((data->client.app_id == 0) ||
7283 (data->type != QSEECOM_CLIENT_APP)) {
7284 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7285 data->type, data->client.app_id);
7286 ret = -EINVAL;
7287 break;
7288 }
7289 atomic_inc(&data->ioctl_count);
7290 ret = qseecom_scale_bus_bandwidth(data, argp);
7291 atomic_dec(&data->ioctl_count);
7292 break;
7293 }
7294 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7295 if (data->type != QSEECOM_GENERIC) {
7296 pr_err("load ext elf req: invalid client handle (%d)\n",
7297 data->type);
7298 ret = -EINVAL;
7299 break;
7300 }
7301 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7302 data->released = true;
7303 mutex_lock(&app_access_lock);
7304 atomic_inc(&data->ioctl_count);
7305 ret = qseecom_load_external_elf(data, argp);
7306 atomic_dec(&data->ioctl_count);
7307 mutex_unlock(&app_access_lock);
7308 if (ret)
7309 pr_err("failed load_external_elf request: %d\n", ret);
7310 break;
7311 }
7312 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7313 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7314 pr_err("unload ext elf req: invalid handle (%d)\n",
7315 data->type);
7316 ret = -EINVAL;
7317 break;
7318 }
7319 data->released = true;
7320 mutex_lock(&app_access_lock);
7321 atomic_inc(&data->ioctl_count);
7322 ret = qseecom_unload_external_elf(data);
7323 atomic_dec(&data->ioctl_count);
7324 mutex_unlock(&app_access_lock);
7325 if (ret)
7326 pr_err("failed unload_app request: %d\n", ret);
7327 break;
7328 }
7329 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7330 data->type = QSEECOM_CLIENT_APP;
7331 mutex_lock(&app_access_lock);
7332 atomic_inc(&data->ioctl_count);
7333 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7334 ret = qseecom_query_app_loaded(data, argp);
7335 atomic_dec(&data->ioctl_count);
7336 mutex_unlock(&app_access_lock);
7337 break;
7338 }
7339 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7340 if (data->type != QSEECOM_GENERIC) {
7341 pr_err("send cmd svc req: invalid handle (%d)\n",
7342 data->type);
7343 ret = -EINVAL;
7344 break;
7345 }
7346 data->type = QSEECOM_SECURE_SERVICE;
7347 if (qseecom.qsee_version < QSEE_VERSION_03) {
7348 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7349 qseecom.qsee_version);
7350 return -EINVAL;
7351 }
7352 mutex_lock(&app_access_lock);
7353 atomic_inc(&data->ioctl_count);
7354 ret = qseecom_send_service_cmd(data, argp);
7355 atomic_dec(&data->ioctl_count);
7356 mutex_unlock(&app_access_lock);
7357 break;
7358 }
7359 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7360 if (!(qseecom.support_pfe || qseecom.support_fde))
7361 pr_err("Features requiring key init not supported\n");
7362 if (data->type != QSEECOM_GENERIC) {
7363 pr_err("create key req: invalid handle (%d)\n",
7364 data->type);
7365 ret = -EINVAL;
7366 break;
7367 }
7368 if (qseecom.qsee_version < QSEE_VERSION_05) {
7369 pr_err("Create Key feature unsupported: qsee ver %u\n",
7370 qseecom.qsee_version);
7371 return -EINVAL;
7372 }
7373 data->released = true;
7374 mutex_lock(&app_access_lock);
7375 atomic_inc(&data->ioctl_count);
7376 ret = qseecom_create_key(data, argp);
7377 if (ret)
7378 pr_err("failed to create encryption key: %d\n", ret);
7379
7380 atomic_dec(&data->ioctl_count);
7381 mutex_unlock(&app_access_lock);
7382 break;
7383 }
7384 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7385 if (!(qseecom.support_pfe || qseecom.support_fde))
7386 pr_err("Features requiring key init not supported\n");
7387 if (data->type != QSEECOM_GENERIC) {
7388 pr_err("wipe key req: invalid handle (%d)\n",
7389 data->type);
7390 ret = -EINVAL;
7391 break;
7392 }
7393 if (qseecom.qsee_version < QSEE_VERSION_05) {
7394 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7395 qseecom.qsee_version);
7396 return -EINVAL;
7397 }
7398 data->released = true;
7399 mutex_lock(&app_access_lock);
7400 atomic_inc(&data->ioctl_count);
7401 ret = qseecom_wipe_key(data, argp);
7402 if (ret)
7403 pr_err("failed to wipe encryption key: %d\n", ret);
7404 atomic_dec(&data->ioctl_count);
7405 mutex_unlock(&app_access_lock);
7406 break;
7407 }
7408 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7409 if (!(qseecom.support_pfe || qseecom.support_fde))
7410 pr_err("Features requiring key init not supported\n");
7411 if (data->type != QSEECOM_GENERIC) {
7412 pr_err("update key req: invalid handle (%d)\n",
7413 data->type);
7414 ret = -EINVAL;
7415 break;
7416 }
7417 if (qseecom.qsee_version < QSEE_VERSION_05) {
7418 pr_err("Update Key feature unsupported in qsee ver %u\n",
7419 qseecom.qsee_version);
7420 return -EINVAL;
7421 }
7422 data->released = true;
7423 mutex_lock(&app_access_lock);
7424 atomic_inc(&data->ioctl_count);
7425 ret = qseecom_update_key_user_info(data, argp);
7426 if (ret)
7427 pr_err("failed to update key user info: %d\n", ret);
7428 atomic_dec(&data->ioctl_count);
7429 mutex_unlock(&app_access_lock);
7430 break;
7431 }
7432 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7433 if (data->type != QSEECOM_GENERIC) {
7434 pr_err("save part hash req: invalid handle (%d)\n",
7435 data->type);
7436 ret = -EINVAL;
7437 break;
7438 }
7439 data->released = true;
7440 mutex_lock(&app_access_lock);
7441 atomic_inc(&data->ioctl_count);
7442 ret = qseecom_save_partition_hash(argp);
7443 atomic_dec(&data->ioctl_count);
7444 mutex_unlock(&app_access_lock);
7445 break;
7446 }
7447 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7448 if (data->type != QSEECOM_GENERIC) {
7449 pr_err("ES activated req: invalid handle (%d)\n",
7450 data->type);
7451 ret = -EINVAL;
7452 break;
7453 }
7454 data->released = true;
7455 mutex_lock(&app_access_lock);
7456 atomic_inc(&data->ioctl_count);
7457 ret = qseecom_is_es_activated(argp);
7458 atomic_dec(&data->ioctl_count);
7459 mutex_unlock(&app_access_lock);
7460 break;
7461 }
7462 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7463 if (data->type != QSEECOM_GENERIC) {
7464 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7465 data->type);
7466 ret = -EINVAL;
7467 break;
7468 }
7469 data->released = true;
7470 mutex_lock(&app_access_lock);
7471 atomic_inc(&data->ioctl_count);
7472 ret = qseecom_mdtp_cipher_dip(argp);
7473 atomic_dec(&data->ioctl_count);
7474 mutex_unlock(&app_access_lock);
7475 break;
7476 }
7477 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7478 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7479 if ((data->listener.id == 0) ||
7480 (data->type != QSEECOM_LISTENER_SERVICE)) {
7481 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7482 data->type, data->listener.id);
7483 ret = -EINVAL;
7484 break;
7485 }
7486 atomic_inc(&data->ioctl_count);
7487 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7488 ret = qseecom_send_modfd_resp(data, argp);
7489 else
7490 ret = qseecom_send_modfd_resp_64(data, argp);
7491 atomic_dec(&data->ioctl_count);
7492 wake_up_all(&data->abort_wq);
7493 if (ret)
7494 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7495 __qseecom_clean_data_sglistinfo(data);
7496 break;
7497 }
7498 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7499 if ((data->client.app_id == 0) ||
7500 (data->type != QSEECOM_CLIENT_APP)) {
7501 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7502 data->type, data->client.app_id);
7503 ret = -EINVAL;
7504 break;
7505 }
7506 if (qseecom.qsee_version < QSEE_VERSION_40) {
7507 pr_err("GP feature unsupported: qsee ver %u\n",
7508 qseecom.qsee_version);
7509 return -EINVAL;
7510 }
7511 /* Only one client allowed here at a time */
7512 mutex_lock(&app_access_lock);
7513 atomic_inc(&data->ioctl_count);
7514 ret = qseecom_qteec_open_session(data, argp);
7515 atomic_dec(&data->ioctl_count);
7516 wake_up_all(&data->abort_wq);
7517 mutex_unlock(&app_access_lock);
7518 if (ret)
7519 pr_err("failed open_session_cmd: %d\n", ret);
7520 __qseecom_clean_data_sglistinfo(data);
7521 break;
7522 }
7523 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7524 if ((data->client.app_id == 0) ||
7525 (data->type != QSEECOM_CLIENT_APP)) {
7526 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7527 data->type, data->client.app_id);
7528 ret = -EINVAL;
7529 break;
7530 }
7531 if (qseecom.qsee_version < QSEE_VERSION_40) {
7532 pr_err("GP feature unsupported: qsee ver %u\n",
7533 qseecom.qsee_version);
7534 return -EINVAL;
7535 }
7536 /* Only one client allowed here at a time */
7537 mutex_lock(&app_access_lock);
7538 atomic_inc(&data->ioctl_count);
7539 ret = qseecom_qteec_close_session(data, argp);
7540 atomic_dec(&data->ioctl_count);
7541 wake_up_all(&data->abort_wq);
7542 mutex_unlock(&app_access_lock);
7543 if (ret)
7544 pr_err("failed close_session_cmd: %d\n", ret);
7545 break;
7546 }
7547 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7548 if ((data->client.app_id == 0) ||
7549 (data->type != QSEECOM_CLIENT_APP)) {
7550 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7551 data->type, data->client.app_id);
7552 ret = -EINVAL;
7553 break;
7554 }
7555 if (qseecom.qsee_version < QSEE_VERSION_40) {
7556 pr_err("GP feature unsupported: qsee ver %u\n",
7557 qseecom.qsee_version);
7558 return -EINVAL;
7559 }
7560 /* Only one client allowed here at a time */
7561 mutex_lock(&app_access_lock);
7562 atomic_inc(&data->ioctl_count);
7563 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7564 atomic_dec(&data->ioctl_count);
7565 wake_up_all(&data->abort_wq);
7566 mutex_unlock(&app_access_lock);
7567 if (ret)
7568 pr_err("failed Invoke cmd: %d\n", ret);
7569 __qseecom_clean_data_sglistinfo(data);
7570 break;
7571 }
7572 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7573 if ((data->client.app_id == 0) ||
7574 (data->type != QSEECOM_CLIENT_APP)) {
7575 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7576 data->type, data->client.app_id);
7577 ret = -EINVAL;
7578 break;
7579 }
7580 if (qseecom.qsee_version < QSEE_VERSION_40) {
7581 pr_err("GP feature unsupported: qsee ver %u\n",
7582 qseecom.qsee_version);
7583 return -EINVAL;
7584 }
7585 /* Only one client allowed here at a time */
7586 mutex_lock(&app_access_lock);
7587 atomic_inc(&data->ioctl_count);
7588 ret = qseecom_qteec_request_cancellation(data, argp);
7589 atomic_dec(&data->ioctl_count);
7590 wake_up_all(&data->abort_wq);
7591 mutex_unlock(&app_access_lock);
7592 if (ret)
7593 pr_err("failed request_cancellation: %d\n", ret);
7594 break;
7595 }
7596 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7597 atomic_inc(&data->ioctl_count);
7598 ret = qseecom_get_ce_info(data, argp);
7599 if (ret)
7600 pr_err("failed get fde ce pipe info: %d\n", ret);
7601 atomic_dec(&data->ioctl_count);
7602 break;
7603 }
7604 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7605 atomic_inc(&data->ioctl_count);
7606 ret = qseecom_free_ce_info(data, argp);
7607 if (ret)
7608 pr_err("failed get fde ce pipe info: %d\n", ret);
7609 atomic_dec(&data->ioctl_count);
7610 break;
7611 }
7612 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7613 atomic_inc(&data->ioctl_count);
7614 ret = qseecom_query_ce_info(data, argp);
7615 if (ret)
7616 pr_err("failed get fde ce pipe info: %d\n", ret);
7617 atomic_dec(&data->ioctl_count);
7618 break;
7619 }
7620 default:
7621 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7622 return -EINVAL;
7623 }
7624 return ret;
7625}
7626
7627static int qseecom_open(struct inode *inode, struct file *file)
7628{
7629 int ret = 0;
7630 struct qseecom_dev_handle *data;
7631
7632 data = kzalloc(sizeof(*data), GFP_KERNEL);
7633 if (!data)
7634 return -ENOMEM;
7635 file->private_data = data;
7636 data->abort = 0;
7637 data->type = QSEECOM_GENERIC;
7638 data->released = false;
7639 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7640 data->mode = INACTIVE;
7641 init_waitqueue_head(&data->abort_wq);
7642 atomic_set(&data->ioctl_count, 0);
7643 return ret;
7644}
7645
7646static int qseecom_release(struct inode *inode, struct file *file)
7647{
7648 struct qseecom_dev_handle *data = file->private_data;
7649 int ret = 0;
7650
7651 if (data->released == false) {
7652 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7653 data->type, data->mode, data);
7654 switch (data->type) {
7655 case QSEECOM_LISTENER_SERVICE:
7656 mutex_lock(&app_access_lock);
7657 ret = qseecom_unregister_listener(data);
7658 mutex_unlock(&app_access_lock);
7659 break;
7660 case QSEECOM_CLIENT_APP:
7661 mutex_lock(&app_access_lock);
7662 ret = qseecom_unload_app(data, true);
7663 mutex_unlock(&app_access_lock);
7664 break;
7665 case QSEECOM_SECURE_SERVICE:
7666 case QSEECOM_GENERIC:
7667 ret = qseecom_unmap_ion_allocated_memory(data);
7668 if (ret)
7669 pr_err("Ion Unmap failed\n");
7670 break;
7671 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7672 break;
7673 default:
7674 pr_err("Unsupported clnt_handle_type %d",
7675 data->type);
7676 break;
7677 }
7678 }
7679
7680 if (qseecom.support_bus_scaling) {
7681 mutex_lock(&qsee_bw_mutex);
7682 if (data->mode != INACTIVE) {
7683 qseecom_unregister_bus_bandwidth_needs(data);
7684 if (qseecom.cumulative_mode == INACTIVE) {
7685 ret = __qseecom_set_msm_bus_request(INACTIVE);
7686 if (ret)
7687 pr_err("Fail to scale down bus\n");
7688 }
7689 }
7690 mutex_unlock(&qsee_bw_mutex);
7691 } else {
7692 if (data->fast_load_enabled == true)
7693 qsee_disable_clock_vote(data, CLK_SFPB);
7694 if (data->perf_enabled == true)
7695 qsee_disable_clock_vote(data, CLK_DFAB);
7696 }
7697 kfree(data);
7698
7699 return ret;
7700}
7701
7702#ifdef CONFIG_COMPAT
7703#include "compat_qseecom.c"
7704#else
7705#define compat_qseecom_ioctl NULL
7706#endif
7707
7708static const struct file_operations qseecom_fops = {
7709 .owner = THIS_MODULE,
7710 .unlocked_ioctl = qseecom_ioctl,
7711 .compat_ioctl = compat_qseecom_ioctl,
7712 .open = qseecom_open,
7713 .release = qseecom_release
7714};
7715
7716static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7717{
7718 int rc = 0;
7719 struct device *pdev;
7720 struct qseecom_clk *qclk;
7721 char *core_clk_src = NULL;
7722 char *core_clk = NULL;
7723 char *iface_clk = NULL;
7724 char *bus_clk = NULL;
7725
7726 switch (ce) {
7727 case CLK_QSEE: {
7728 core_clk_src = "core_clk_src";
7729 core_clk = "core_clk";
7730 iface_clk = "iface_clk";
7731 bus_clk = "bus_clk";
7732 qclk = &qseecom.qsee;
7733 qclk->instance = CLK_QSEE;
7734 break;
7735 };
7736 case CLK_CE_DRV: {
7737 core_clk_src = "ce_drv_core_clk_src";
7738 core_clk = "ce_drv_core_clk";
7739 iface_clk = "ce_drv_iface_clk";
7740 bus_clk = "ce_drv_bus_clk";
7741 qclk = &qseecom.ce_drv;
7742 qclk->instance = CLK_CE_DRV;
7743 break;
7744 };
7745 default:
7746 pr_err("Invalid ce hw instance: %d!\n", ce);
7747 return -EIO;
7748 }
7749
7750 if (qseecom.no_clock_support) {
7751 qclk->ce_core_clk = NULL;
7752 qclk->ce_clk = NULL;
7753 qclk->ce_bus_clk = NULL;
7754 qclk->ce_core_src_clk = NULL;
7755 return 0;
7756 }
7757
7758 pdev = qseecom.pdev;
7759
7760 /* Get CE3 src core clk. */
7761 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7762 if (!IS_ERR(qclk->ce_core_src_clk)) {
7763 rc = clk_set_rate(qclk->ce_core_src_clk,
7764 qseecom.ce_opp_freq_hz);
7765 if (rc) {
7766 clk_put(qclk->ce_core_src_clk);
7767 qclk->ce_core_src_clk = NULL;
7768 pr_err("Unable to set the core src clk @%uMhz.\n",
7769 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7770 return -EIO;
7771 }
7772 } else {
7773 pr_warn("Unable to get CE core src clk, set to NULL\n");
7774 qclk->ce_core_src_clk = NULL;
7775 }
7776
7777 /* Get CE core clk */
7778 qclk->ce_core_clk = clk_get(pdev, core_clk);
7779 if (IS_ERR(qclk->ce_core_clk)) {
7780 rc = PTR_ERR(qclk->ce_core_clk);
7781 pr_err("Unable to get CE core clk\n");
7782 if (qclk->ce_core_src_clk != NULL)
7783 clk_put(qclk->ce_core_src_clk);
7784 return -EIO;
7785 }
7786
7787 /* Get CE Interface clk */
7788 qclk->ce_clk = clk_get(pdev, iface_clk);
7789 if (IS_ERR(qclk->ce_clk)) {
7790 rc = PTR_ERR(qclk->ce_clk);
7791 pr_err("Unable to get CE interface clk\n");
7792 if (qclk->ce_core_src_clk != NULL)
7793 clk_put(qclk->ce_core_src_clk);
7794 clk_put(qclk->ce_core_clk);
7795 return -EIO;
7796 }
7797
7798 /* Get CE AXI clk */
7799 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7800 if (IS_ERR(qclk->ce_bus_clk)) {
7801 rc = PTR_ERR(qclk->ce_bus_clk);
7802 pr_err("Unable to get CE BUS interface clk\n");
7803 if (qclk->ce_core_src_clk != NULL)
7804 clk_put(qclk->ce_core_src_clk);
7805 clk_put(qclk->ce_core_clk);
7806 clk_put(qclk->ce_clk);
7807 return -EIO;
7808 }
7809
7810 return rc;
7811}
7812
7813static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7814{
7815 struct qseecom_clk *qclk;
7816
7817 if (ce == CLK_QSEE)
7818 qclk = &qseecom.qsee;
7819 else
7820 qclk = &qseecom.ce_drv;
7821
7822 if (qclk->ce_clk != NULL) {
7823 clk_put(qclk->ce_clk);
7824 qclk->ce_clk = NULL;
7825 }
7826 if (qclk->ce_core_clk != NULL) {
7827 clk_put(qclk->ce_core_clk);
7828 qclk->ce_core_clk = NULL;
7829 }
7830 if (qclk->ce_bus_clk != NULL) {
7831 clk_put(qclk->ce_bus_clk);
7832 qclk->ce_bus_clk = NULL;
7833 }
7834 if (qclk->ce_core_src_clk != NULL) {
7835 clk_put(qclk->ce_core_src_clk);
7836 qclk->ce_core_src_clk = NULL;
7837 }
7838 qclk->instance = CLK_INVALID;
7839}
7840
7841static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7842{
7843 int rc = 0;
7844 uint32_t hlos_num_ce_hw_instances;
7845 uint32_t disk_encrypt_pipe;
7846 uint32_t file_encrypt_pipe;
7847 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
7848 int i;
7849 const int *tbl;
7850 int size;
7851 int entry;
7852 struct qseecom_crypto_info *pfde_tbl = NULL;
7853 struct qseecom_crypto_info *p;
7854 int tbl_size;
7855 int j;
7856 bool old_db = true;
7857 struct qseecom_ce_info_use *pce_info_use;
7858 uint32_t *unit_tbl = NULL;
7859 int total_units = 0;
7860 struct qseecom_ce_pipe_entry *pce_entry;
7861
7862 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7863 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7864
7865 if (of_property_read_u32((&pdev->dev)->of_node,
7866 "qcom,qsee-ce-hw-instance",
7867 &qseecom.ce_info.qsee_ce_hw_instance)) {
7868 pr_err("Fail to get qsee ce hw instance information.\n");
7869 rc = -EINVAL;
7870 goto out;
7871 } else {
7872 pr_debug("qsee-ce-hw-instance=0x%x\n",
7873 qseecom.ce_info.qsee_ce_hw_instance);
7874 }
7875
7876 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7877 "qcom,support-fde");
7878 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7879 "qcom,support-pfe");
7880
7881 if (!qseecom.support_pfe && !qseecom.support_fde) {
7882 pr_warn("Device does not support PFE/FDE");
7883 goto out;
7884 }
7885
7886 if (qseecom.support_fde)
7887 tbl = of_get_property((&pdev->dev)->of_node,
7888 "qcom,full-disk-encrypt-info", &size);
7889 else
7890 tbl = NULL;
7891 if (tbl) {
7892 old_db = false;
7893 if (size % sizeof(struct qseecom_crypto_info)) {
7894 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7895 size);
7896 rc = -EINVAL;
7897 goto out;
7898 }
7899 tbl_size = size / sizeof
7900 (struct qseecom_crypto_info);
7901
7902 pfde_tbl = kzalloc(size, GFP_KERNEL);
7903 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7904 total_units = 0;
7905
7906 if (!pfde_tbl || !unit_tbl) {
7907 pr_err("failed to alloc memory\n");
7908 rc = -ENOMEM;
7909 goto out;
7910 }
7911 if (of_property_read_u32_array((&pdev->dev)->of_node,
7912 "qcom,full-disk-encrypt-info",
7913 (u32 *)pfde_tbl, size/sizeof(u32))) {
7914 pr_err("failed to read full-disk-encrypt-info tbl\n");
7915 rc = -EINVAL;
7916 goto out;
7917 }
7918
7919 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7920 for (j = 0; j < total_units; j++) {
7921 if (p->unit_num == *(unit_tbl + j))
7922 break;
7923 }
7924 if (j == total_units) {
7925 *(unit_tbl + total_units) = p->unit_num;
7926 total_units++;
7927 }
7928 }
7929
7930 qseecom.ce_info.num_fde = total_units;
7931 pce_info_use = qseecom.ce_info.fde = kcalloc(
7932 total_units, sizeof(struct qseecom_ce_info_use),
7933 GFP_KERNEL);
7934 if (!pce_info_use) {
7935 pr_err("failed to alloc memory\n");
7936 rc = -ENOMEM;
7937 goto out;
7938 }
7939
7940 for (j = 0; j < total_units; j++, pce_info_use++) {
7941 pce_info_use->unit_num = *(unit_tbl + j);
7942 pce_info_use->alloc = false;
7943 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7944 pce_info_use->num_ce_pipe_entries = 0;
7945 pce_info_use->ce_pipe_entry = NULL;
7946 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7947 if (p->unit_num == pce_info_use->unit_num)
7948 pce_info_use->num_ce_pipe_entries++;
7949 }
7950
7951 entry = pce_info_use->num_ce_pipe_entries;
7952 pce_entry = pce_info_use->ce_pipe_entry =
7953 kcalloc(entry,
7954 sizeof(struct qseecom_ce_pipe_entry),
7955 GFP_KERNEL);
7956 if (pce_entry == NULL) {
7957 pr_err("failed to alloc memory\n");
7958 rc = -ENOMEM;
7959 goto out;
7960 }
7961
7962 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7963 if (p->unit_num == pce_info_use->unit_num) {
7964 pce_entry->ce_num = p->ce;
7965 pce_entry->ce_pipe_pair =
7966 p->pipe_pair;
7967 pce_entry->valid = true;
7968 pce_entry++;
7969 }
7970 }
7971 }
7972 kfree(unit_tbl);
7973 unit_tbl = NULL;
7974 kfree(pfde_tbl);
7975 pfde_tbl = NULL;
7976 }
7977
7978 if (qseecom.support_pfe)
7979 tbl = of_get_property((&pdev->dev)->of_node,
7980 "qcom,per-file-encrypt-info", &size);
7981 else
7982 tbl = NULL;
7983 if (tbl) {
7984 old_db = false;
7985 if (size % sizeof(struct qseecom_crypto_info)) {
7986 pr_err("per-file-encrypt-info tbl size(%d)\n",
7987 size);
7988 rc = -EINVAL;
7989 goto out;
7990 }
7991 tbl_size = size / sizeof
7992 (struct qseecom_crypto_info);
7993
7994 pfde_tbl = kzalloc(size, GFP_KERNEL);
7995 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7996 total_units = 0;
7997 if (!pfde_tbl || !unit_tbl) {
7998 pr_err("failed to alloc memory\n");
7999 rc = -ENOMEM;
8000 goto out;
8001 }
8002 if (of_property_read_u32_array((&pdev->dev)->of_node,
8003 "qcom,per-file-encrypt-info",
8004 (u32 *)pfde_tbl, size/sizeof(u32))) {
8005 pr_err("failed to read per-file-encrypt-info tbl\n");
8006 rc = -EINVAL;
8007 goto out;
8008 }
8009
8010 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8011 for (j = 0; j < total_units; j++) {
8012 if (p->unit_num == *(unit_tbl + j))
8013 break;
8014 }
8015 if (j == total_units) {
8016 *(unit_tbl + total_units) = p->unit_num;
8017 total_units++;
8018 }
8019 }
8020
8021 qseecom.ce_info.num_pfe = total_units;
8022 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8023 total_units, sizeof(struct qseecom_ce_info_use),
8024 GFP_KERNEL);
8025 if (!pce_info_use) {
8026 pr_err("failed to alloc memory\n");
8027 rc = -ENOMEM;
8028 goto out;
8029 }
8030
8031 for (j = 0; j < total_units; j++, pce_info_use++) {
8032 pce_info_use->unit_num = *(unit_tbl + j);
8033 pce_info_use->alloc = false;
8034 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8035 pce_info_use->num_ce_pipe_entries = 0;
8036 pce_info_use->ce_pipe_entry = NULL;
8037 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8038 if (p->unit_num == pce_info_use->unit_num)
8039 pce_info_use->num_ce_pipe_entries++;
8040 }
8041
8042 entry = pce_info_use->num_ce_pipe_entries;
8043 pce_entry = pce_info_use->ce_pipe_entry =
8044 kcalloc(entry,
8045 sizeof(struct qseecom_ce_pipe_entry),
8046 GFP_KERNEL);
8047 if (pce_entry == NULL) {
8048 pr_err("failed to alloc memory\n");
8049 rc = -ENOMEM;
8050 goto out;
8051 }
8052
8053 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8054 if (p->unit_num == pce_info_use->unit_num) {
8055 pce_entry->ce_num = p->ce;
8056 pce_entry->ce_pipe_pair =
8057 p->pipe_pair;
8058 pce_entry->valid = true;
8059 pce_entry++;
8060 }
8061 }
8062 }
8063 kfree(unit_tbl);
8064 unit_tbl = NULL;
8065 kfree(pfde_tbl);
8066 pfde_tbl = NULL;
8067 }
8068
8069 if (!old_db)
8070 goto out1;
8071
8072 if (of_property_read_bool((&pdev->dev)->of_node,
8073 "qcom,support-multiple-ce-hw-instance")) {
8074 if (of_property_read_u32((&pdev->dev)->of_node,
8075 "qcom,hlos-num-ce-hw-instances",
8076 &hlos_num_ce_hw_instances)) {
8077 pr_err("Fail: get hlos number of ce hw instance\n");
8078 rc = -EINVAL;
8079 goto out;
8080 }
8081 } else {
8082 hlos_num_ce_hw_instances = 1;
8083 }
8084
8085 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8086 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8087 MAX_CE_PIPE_PAIR_PER_UNIT);
8088 rc = -EINVAL;
8089 goto out;
8090 }
8091
8092 if (of_property_read_u32_array((&pdev->dev)->of_node,
8093 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8094 hlos_num_ce_hw_instances)) {
8095 pr_err("Fail: get hlos ce hw instance info\n");
8096 rc = -EINVAL;
8097 goto out;
8098 }
8099
8100 if (qseecom.support_fde) {
8101 pce_info_use = qseecom.ce_info.fde =
8102 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8103 if (!pce_info_use) {
8104 pr_err("failed to alloc memory\n");
8105 rc = -ENOMEM;
8106 goto out;
8107 }
8108 /* by default for old db */
8109 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8110 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8111 pce_info_use->alloc = false;
8112 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8113 pce_info_use->ce_pipe_entry = NULL;
8114 if (of_property_read_u32((&pdev->dev)->of_node,
8115 "qcom,disk-encrypt-pipe-pair",
8116 &disk_encrypt_pipe)) {
8117 pr_err("Fail to get FDE pipe information.\n");
8118 rc = -EINVAL;
8119 goto out;
8120 } else {
8121 pr_debug("disk-encrypt-pipe-pair=0x%x",
8122 disk_encrypt_pipe);
8123 }
8124 entry = pce_info_use->num_ce_pipe_entries =
8125 hlos_num_ce_hw_instances;
8126 pce_entry = pce_info_use->ce_pipe_entry =
8127 kcalloc(entry,
8128 sizeof(struct qseecom_ce_pipe_entry),
8129 GFP_KERNEL);
8130 if (pce_entry == NULL) {
8131 pr_err("failed to alloc memory\n");
8132 rc = -ENOMEM;
8133 goto out;
8134 }
8135 for (i = 0; i < entry; i++) {
8136 pce_entry->ce_num = hlos_ce_hw_instance[i];
8137 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8138 pce_entry->valid = 1;
8139 pce_entry++;
8140 }
8141 } else {
8142 pr_warn("Device does not support FDE");
8143 disk_encrypt_pipe = 0xff;
8144 }
8145 if (qseecom.support_pfe) {
8146 pce_info_use = qseecom.ce_info.pfe =
8147 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8148 if (!pce_info_use) {
8149 pr_err("failed to alloc memory\n");
8150 rc = -ENOMEM;
8151 goto out;
8152 }
8153 /* by default for old db */
8154 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8155 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8156 pce_info_use->alloc = false;
8157 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8158 pce_info_use->ce_pipe_entry = NULL;
8159
8160 if (of_property_read_u32((&pdev->dev)->of_node,
8161 "qcom,file-encrypt-pipe-pair",
8162 &file_encrypt_pipe)) {
8163 pr_err("Fail to get PFE pipe information.\n");
8164 rc = -EINVAL;
8165 goto out;
8166 } else {
8167 pr_debug("file-encrypt-pipe-pair=0x%x",
8168 file_encrypt_pipe);
8169 }
8170 entry = pce_info_use->num_ce_pipe_entries =
8171 hlos_num_ce_hw_instances;
8172 pce_entry = pce_info_use->ce_pipe_entry =
8173 kcalloc(entry,
8174 sizeof(struct qseecom_ce_pipe_entry),
8175 GFP_KERNEL);
8176 if (pce_entry == NULL) {
8177 pr_err("failed to alloc memory\n");
8178 rc = -ENOMEM;
8179 goto out;
8180 }
8181 for (i = 0; i < entry; i++) {
8182 pce_entry->ce_num = hlos_ce_hw_instance[i];
8183 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8184 pce_entry->valid = 1;
8185 pce_entry++;
8186 }
8187 } else {
8188 pr_warn("Device does not support PFE");
8189 file_encrypt_pipe = 0xff;
8190 }
8191
8192out1:
8193 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8194 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8195out:
8196 if (rc) {
8197 if (qseecom.ce_info.fde) {
8198 pce_info_use = qseecom.ce_info.fde;
8199 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8200 pce_entry = pce_info_use->ce_pipe_entry;
8201 kfree(pce_entry);
8202 pce_info_use++;
8203 }
8204 }
8205 kfree(qseecom.ce_info.fde);
8206 qseecom.ce_info.fde = NULL;
8207 if (qseecom.ce_info.pfe) {
8208 pce_info_use = qseecom.ce_info.pfe;
8209 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8210 pce_entry = pce_info_use->ce_pipe_entry;
8211 kfree(pce_entry);
8212 pce_info_use++;
8213 }
8214 }
8215 kfree(qseecom.ce_info.pfe);
8216 qseecom.ce_info.pfe = NULL;
8217 }
8218 kfree(unit_tbl);
8219 kfree(pfde_tbl);
8220 return rc;
8221}
8222
8223static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8224 void __user *argp)
8225{
8226 struct qseecom_ce_info_req req;
8227 struct qseecom_ce_info_req *pinfo = &req;
8228 int ret = 0;
8229 int i;
8230 unsigned int entries;
8231 struct qseecom_ce_info_use *pce_info_use, *p;
8232 int total = 0;
8233 bool found = false;
8234 struct qseecom_ce_pipe_entry *pce_entry;
8235
8236 ret = copy_from_user(pinfo, argp,
8237 sizeof(struct qseecom_ce_info_req));
8238 if (ret) {
8239 pr_err("copy_from_user failed\n");
8240 return ret;
8241 }
8242
8243 switch (pinfo->usage) {
8244 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8245 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8246 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8247 if (qseecom.support_fde) {
8248 p = qseecom.ce_info.fde;
8249 total = qseecom.ce_info.num_fde;
8250 } else {
8251 pr_err("system does not support fde\n");
8252 return -EINVAL;
8253 }
8254 break;
8255 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8256 if (qseecom.support_pfe) {
8257 p = qseecom.ce_info.pfe;
8258 total = qseecom.ce_info.num_pfe;
8259 } else {
8260 pr_err("system does not support pfe\n");
8261 return -EINVAL;
8262 }
8263 break;
8264 default:
8265 pr_err("unsupported usage %d\n", pinfo->usage);
8266 return -EINVAL;
8267 }
8268
8269 pce_info_use = NULL;
8270 for (i = 0; i < total; i++) {
8271 if (!p->alloc)
8272 pce_info_use = p;
8273 else if (!memcmp(p->handle, pinfo->handle,
8274 MAX_CE_INFO_HANDLE_SIZE)) {
8275 pce_info_use = p;
8276 found = true;
8277 break;
8278 }
8279 p++;
8280 }
8281
8282 if (pce_info_use == NULL)
8283 return -EBUSY;
8284
8285 pinfo->unit_num = pce_info_use->unit_num;
8286 if (!pce_info_use->alloc) {
8287 pce_info_use->alloc = true;
8288 memcpy(pce_info_use->handle,
8289 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8290 }
8291 if (pce_info_use->num_ce_pipe_entries >
8292 MAX_CE_PIPE_PAIR_PER_UNIT)
8293 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8294 else
8295 entries = pce_info_use->num_ce_pipe_entries;
8296 pinfo->num_ce_pipe_entries = entries;
8297 pce_entry = pce_info_use->ce_pipe_entry;
8298 for (i = 0; i < entries; i++, pce_entry++)
8299 pinfo->ce_pipe_entry[i] = *pce_entry;
8300 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8301 pinfo->ce_pipe_entry[i].valid = 0;
8302
8303 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8304 pr_err("copy_to_user failed\n");
8305 ret = -EFAULT;
8306 }
8307 return ret;
8308}
8309
8310static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8311 void __user *argp)
8312{
8313 struct qseecom_ce_info_req req;
8314 struct qseecom_ce_info_req *pinfo = &req;
8315 int ret = 0;
8316 struct qseecom_ce_info_use *p;
8317 int total = 0;
8318 int i;
8319 bool found = false;
8320
8321 ret = copy_from_user(pinfo, argp,
8322 sizeof(struct qseecom_ce_info_req));
8323 if (ret)
8324 return ret;
8325
8326 switch (pinfo->usage) {
8327 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8328 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8329 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8330 if (qseecom.support_fde) {
8331 p = qseecom.ce_info.fde;
8332 total = qseecom.ce_info.num_fde;
8333 } else {
8334 pr_err("system does not support fde\n");
8335 return -EINVAL;
8336 }
8337 break;
8338 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8339 if (qseecom.support_pfe) {
8340 p = qseecom.ce_info.pfe;
8341 total = qseecom.ce_info.num_pfe;
8342 } else {
8343 pr_err("system does not support pfe\n");
8344 return -EINVAL;
8345 }
8346 break;
8347 default:
8348 pr_err("unsupported usage %d\n", pinfo->usage);
8349 return -EINVAL;
8350 }
8351
8352 for (i = 0; i < total; i++) {
8353 if (p->alloc &&
8354 !memcmp(p->handle, pinfo->handle,
8355 MAX_CE_INFO_HANDLE_SIZE)) {
8356 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8357 p->alloc = false;
8358 found = true;
8359 break;
8360 }
8361 p++;
8362 }
8363 return ret;
8364}
8365
8366static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8367 void __user *argp)
8368{
8369 struct qseecom_ce_info_req req;
8370 struct qseecom_ce_info_req *pinfo = &req;
8371 int ret = 0;
8372 int i;
8373 unsigned int entries;
8374 struct qseecom_ce_info_use *pce_info_use, *p;
8375 int total = 0;
8376 bool found = false;
8377 struct qseecom_ce_pipe_entry *pce_entry;
8378
8379 ret = copy_from_user(pinfo, argp,
8380 sizeof(struct qseecom_ce_info_req));
8381 if (ret)
8382 return ret;
8383
8384 switch (pinfo->usage) {
8385 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8386 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8387 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8388 if (qseecom.support_fde) {
8389 p = qseecom.ce_info.fde;
8390 total = qseecom.ce_info.num_fde;
8391 } else {
8392 pr_err("system does not support fde\n");
8393 return -EINVAL;
8394 }
8395 break;
8396 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8397 if (qseecom.support_pfe) {
8398 p = qseecom.ce_info.pfe;
8399 total = qseecom.ce_info.num_pfe;
8400 } else {
8401 pr_err("system does not support pfe\n");
8402 return -EINVAL;
8403 }
8404 break;
8405 default:
8406 pr_err("unsupported usage %d\n", pinfo->usage);
8407 return -EINVAL;
8408 }
8409
8410 pce_info_use = NULL;
8411 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8412 pinfo->num_ce_pipe_entries = 0;
8413 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8414 pinfo->ce_pipe_entry[i].valid = 0;
8415
8416 for (i = 0; i < total; i++) {
8417
8418 if (p->alloc && !memcmp(p->handle,
8419 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8420 pce_info_use = p;
8421 found = true;
8422 break;
8423 }
8424 p++;
8425 }
8426 if (!pce_info_use)
8427 goto out;
8428 pinfo->unit_num = pce_info_use->unit_num;
8429 if (pce_info_use->num_ce_pipe_entries >
8430 MAX_CE_PIPE_PAIR_PER_UNIT)
8431 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8432 else
8433 entries = pce_info_use->num_ce_pipe_entries;
8434 pinfo->num_ce_pipe_entries = entries;
8435 pce_entry = pce_info_use->ce_pipe_entry;
8436 for (i = 0; i < entries; i++, pce_entry++)
8437 pinfo->ce_pipe_entry[i] = *pce_entry;
8438 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8439 pinfo->ce_pipe_entry[i].valid = 0;
8440out:
8441 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8442 pr_err("copy_to_user failed\n");
8443 ret = -EFAULT;
8444 }
8445 return ret;
8446}
8447
8448/*
8449 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8450 * then whitelist feature is not supported.
8451 */
8452static int qseecom_check_whitelist_feature(void)
8453{
8454 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8455
8456 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8457}
8458
8459static int qseecom_probe(struct platform_device *pdev)
8460{
8461 int rc;
8462 int i;
8463 uint32_t feature = 10;
8464 struct device *class_dev;
8465 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8466 struct qseecom_command_scm_resp resp;
8467 struct qseecom_ce_info_use *pce_info_use = NULL;
8468
8469 qseecom.qsee_bw_count = 0;
8470 qseecom.qsee_perf_client = 0;
8471 qseecom.qsee_sfpb_bw_count = 0;
8472
8473 qseecom.qsee.ce_core_clk = NULL;
8474 qseecom.qsee.ce_clk = NULL;
8475 qseecom.qsee.ce_core_src_clk = NULL;
8476 qseecom.qsee.ce_bus_clk = NULL;
8477
8478 qseecom.cumulative_mode = 0;
8479 qseecom.current_mode = INACTIVE;
8480 qseecom.support_bus_scaling = false;
8481 qseecom.support_fde = false;
8482 qseecom.support_pfe = false;
8483
8484 qseecom.ce_drv.ce_core_clk = NULL;
8485 qseecom.ce_drv.ce_clk = NULL;
8486 qseecom.ce_drv.ce_core_src_clk = NULL;
8487 qseecom.ce_drv.ce_bus_clk = NULL;
8488 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8489
8490 qseecom.app_block_ref_cnt = 0;
8491 init_waitqueue_head(&qseecom.app_block_wq);
8492 qseecom.whitelist_support = true;
8493
8494 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8495 if (rc < 0) {
8496 pr_err("alloc_chrdev_region failed %d\n", rc);
8497 return rc;
8498 }
8499
8500 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8501 if (IS_ERR(driver_class)) {
8502 rc = -ENOMEM;
8503 pr_err("class_create failed %d\n", rc);
8504 goto exit_unreg_chrdev_region;
8505 }
8506
8507 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8508 QSEECOM_DEV);
8509 if (IS_ERR(class_dev)) {
8510 pr_err("class_device_create failed %d\n", rc);
8511 rc = -ENOMEM;
8512 goto exit_destroy_class;
8513 }
8514
8515 cdev_init(&qseecom.cdev, &qseecom_fops);
8516 qseecom.cdev.owner = THIS_MODULE;
8517
8518 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8519 if (rc < 0) {
8520 pr_err("cdev_add failed %d\n", rc);
8521 goto exit_destroy_device;
8522 }
8523
8524 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8525 spin_lock_init(&qseecom.registered_listener_list_lock);
8526 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8527 spin_lock_init(&qseecom.registered_app_list_lock);
8528 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8529 spin_lock_init(&qseecom.registered_kclient_list_lock);
8530 init_waitqueue_head(&qseecom.send_resp_wq);
8531 qseecom.send_resp_flag = 0;
8532
8533 qseecom.qsee_version = QSEEE_VERSION_00;
8534 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8535 &resp, sizeof(resp));
8536 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8537 if (rc) {
8538 pr_err("Failed to get QSEE version info %d\n", rc);
8539 goto exit_del_cdev;
8540 }
8541 qseecom.qsee_version = resp.result;
8542 qseecom.qseos_version = QSEOS_VERSION_14;
8543 qseecom.commonlib_loaded = false;
8544 qseecom.commonlib64_loaded = false;
8545 qseecom.pdev = class_dev;
8546 /* Create ION msm client */
8547 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8548 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8549 pr_err("Ion client cannot be created\n");
8550 rc = -ENOMEM;
8551 goto exit_del_cdev;
8552 }
8553
8554 /* register client for bus scaling */
8555 if (pdev->dev.of_node) {
8556 qseecom.pdev->of_node = pdev->dev.of_node;
8557 qseecom.support_bus_scaling =
8558 of_property_read_bool((&pdev->dev)->of_node,
8559 "qcom,support-bus-scaling");
8560 rc = qseecom_retrieve_ce_data(pdev);
8561 if (rc)
8562 goto exit_destroy_ion_client;
8563 qseecom.appsbl_qseecom_support =
8564 of_property_read_bool((&pdev->dev)->of_node,
8565 "qcom,appsbl-qseecom-support");
8566 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8567 qseecom.appsbl_qseecom_support);
8568
8569 qseecom.commonlib64_loaded =
8570 of_property_read_bool((&pdev->dev)->of_node,
8571 "qcom,commonlib64-loaded-by-uefi");
8572 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8573 qseecom.commonlib64_loaded);
8574 qseecom.fde_key_size =
8575 of_property_read_bool((&pdev->dev)->of_node,
8576 "qcom,fde-key-size");
8577 qseecom.no_clock_support =
8578 of_property_read_bool((&pdev->dev)->of_node,
8579 "qcom,no-clock-support");
8580 if (!qseecom.no_clock_support) {
8581 pr_info("qseecom clocks handled by other subsystem\n");
8582 } else {
8583 pr_info("no-clock-support=0x%x",
8584 qseecom.no_clock_support);
8585 }
8586
8587 if (of_property_read_u32((&pdev->dev)->of_node,
8588 "qcom,qsee-reentrancy-support",
8589 &qseecom.qsee_reentrancy_support)) {
8590 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8591 qseecom.qsee_reentrancy_support = 0;
8592 } else {
8593 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8594 qseecom.qsee_reentrancy_support);
8595 }
8596
8597 /*
8598 * The qseecom bus scaling flag can not be enabled when
8599 * crypto clock is not handled by HLOS.
8600 */
8601 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8602 pr_err("support_bus_scaling flag can not be enabled.\n");
8603 rc = -EINVAL;
8604 goto exit_destroy_ion_client;
8605 }
8606
8607 if (of_property_read_u32((&pdev->dev)->of_node,
8608 "qcom,ce-opp-freq",
8609 &qseecom.ce_opp_freq_hz)) {
8610 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8611 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8612 }
8613 rc = __qseecom_init_clk(CLK_QSEE);
8614 if (rc)
8615 goto exit_destroy_ion_client;
8616
8617 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8618 (qseecom.support_pfe || qseecom.support_fde)) {
8619 rc = __qseecom_init_clk(CLK_CE_DRV);
8620 if (rc) {
8621 __qseecom_deinit_clk(CLK_QSEE);
8622 goto exit_destroy_ion_client;
8623 }
8624 } else {
8625 struct qseecom_clk *qclk;
8626
8627 qclk = &qseecom.qsee;
8628 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8629 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8630 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8631 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8632 }
8633
8634 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8635 msm_bus_cl_get_pdata(pdev);
8636 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8637 (!qseecom.is_apps_region_protected &&
8638 !qseecom.appsbl_qseecom_support)) {
8639 struct resource *resource = NULL;
8640 struct qsee_apps_region_info_ireq req;
8641 struct qsee_apps_region_info_64bit_ireq req_64bit;
8642 struct qseecom_command_scm_resp resp;
8643 void *cmd_buf = NULL;
8644 size_t cmd_len;
8645
8646 resource = platform_get_resource_byname(pdev,
8647 IORESOURCE_MEM, "secapp-region");
8648 if (resource) {
8649 if (qseecom.qsee_version < QSEE_VERSION_40) {
8650 req.qsee_cmd_id =
8651 QSEOS_APP_REGION_NOTIFICATION;
8652 req.addr = (uint32_t)resource->start;
8653 req.size = resource_size(resource);
8654 cmd_buf = (void *)&req;
8655 cmd_len = sizeof(struct
8656 qsee_apps_region_info_ireq);
8657 pr_warn("secure app region addr=0x%x size=0x%x",
8658 req.addr, req.size);
8659 } else {
8660 req_64bit.qsee_cmd_id =
8661 QSEOS_APP_REGION_NOTIFICATION;
8662 req_64bit.addr = resource->start;
8663 req_64bit.size = resource_size(
8664 resource);
8665 cmd_buf = (void *)&req_64bit;
8666 cmd_len = sizeof(struct
8667 qsee_apps_region_info_64bit_ireq);
8668 pr_warn("secure app region addr=0x%llx size=0x%x",
8669 req_64bit.addr, req_64bit.size);
8670 }
8671 } else {
8672 pr_err("Fail to get secure app region info\n");
8673 rc = -EINVAL;
8674 goto exit_deinit_clock;
8675 }
8676 rc = __qseecom_enable_clk(CLK_QSEE);
8677 if (rc) {
8678 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8679 rc = -EIO;
8680 goto exit_deinit_clock;
8681 }
8682 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8683 cmd_buf, cmd_len,
8684 &resp, sizeof(resp));
8685 __qseecom_disable_clk(CLK_QSEE);
8686 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8687 pr_err("send secapp reg fail %d resp.res %d\n",
8688 rc, resp.result);
8689 rc = -EINVAL;
8690 goto exit_deinit_clock;
8691 }
8692 }
8693 /*
8694 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8695 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8696 * Pls add "qseecom.commonlib64_loaded = true" here too.
8697 */
8698 if (qseecom.is_apps_region_protected ||
8699 qseecom.appsbl_qseecom_support)
8700 qseecom.commonlib_loaded = true;
8701 } else {
8702 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8703 pdev->dev.platform_data;
8704 }
8705 if (qseecom.support_bus_scaling) {
8706 init_timer(&(qseecom.bw_scale_down_timer));
8707 INIT_WORK(&qseecom.bw_inactive_req_ws,
8708 qseecom_bw_inactive_req_work);
8709 qseecom.bw_scale_down_timer.function =
8710 qseecom_scale_bus_bandwidth_timer_callback;
8711 }
8712 qseecom.timer_running = false;
8713 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8714 qseecom_platform_support);
8715
8716 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8717 pr_warn("qseecom.whitelist_support = %d\n",
8718 qseecom.whitelist_support);
8719
8720 if (!qseecom.qsee_perf_client)
8721 pr_err("Unable to register bus client\n");
8722
8723 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8724 return 0;
8725
8726exit_deinit_clock:
8727 __qseecom_deinit_clk(CLK_QSEE);
8728 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8729 (qseecom.support_pfe || qseecom.support_fde))
8730 __qseecom_deinit_clk(CLK_CE_DRV);
8731exit_destroy_ion_client:
8732 if (qseecom.ce_info.fde) {
8733 pce_info_use = qseecom.ce_info.fde;
8734 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8735 kzfree(pce_info_use->ce_pipe_entry);
8736 pce_info_use++;
8737 }
8738 kfree(qseecom.ce_info.fde);
8739 }
8740 if (qseecom.ce_info.pfe) {
8741 pce_info_use = qseecom.ce_info.pfe;
8742 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8743 kzfree(pce_info_use->ce_pipe_entry);
8744 pce_info_use++;
8745 }
8746 kfree(qseecom.ce_info.pfe);
8747 }
8748 ion_client_destroy(qseecom.ion_clnt);
8749exit_del_cdev:
8750 cdev_del(&qseecom.cdev);
8751exit_destroy_device:
8752 device_destroy(driver_class, qseecom_device_no);
8753exit_destroy_class:
8754 class_destroy(driver_class);
8755exit_unreg_chrdev_region:
8756 unregister_chrdev_region(qseecom_device_no, 1);
8757 return rc;
8758}
8759
8760static int qseecom_remove(struct platform_device *pdev)
8761{
8762 struct qseecom_registered_kclient_list *kclient = NULL;
8763 unsigned long flags = 0;
8764 int ret = 0;
8765 int i;
8766 struct qseecom_ce_pipe_entry *pce_entry;
8767 struct qseecom_ce_info_use *pce_info_use;
8768
8769 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8770 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8771
8772 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8773 list) {
8774 if (!kclient)
8775 goto exit_irqrestore;
8776
8777 /* Break the loop if client handle is NULL */
8778 if (!kclient->handle)
8779 goto exit_free_kclient;
8780
8781 if (list_empty(&kclient->list))
8782 goto exit_free_kc_handle;
8783
8784 list_del(&kclient->list);
8785 mutex_lock(&app_access_lock);
8786 ret = qseecom_unload_app(kclient->handle->dev, false);
8787 mutex_unlock(&app_access_lock);
8788 if (!ret) {
8789 kzfree(kclient->handle->dev);
8790 kzfree(kclient->handle);
8791 kzfree(kclient);
8792 }
8793 }
8794
8795exit_free_kc_handle:
8796 kzfree(kclient->handle);
8797exit_free_kclient:
8798 kzfree(kclient);
8799exit_irqrestore:
8800 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8801
8802 if (qseecom.qseos_version > QSEEE_VERSION_00)
8803 qseecom_unload_commonlib_image();
8804
8805 if (qseecom.qsee_perf_client)
8806 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8807 0);
8808 if (pdev->dev.platform_data != NULL)
8809 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8810
8811 if (qseecom.support_bus_scaling) {
8812 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8813 del_timer_sync(&qseecom.bw_scale_down_timer);
8814 }
8815
8816 if (qseecom.ce_info.fde) {
8817 pce_info_use = qseecom.ce_info.fde;
8818 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8819 pce_entry = pce_info_use->ce_pipe_entry;
8820 kfree(pce_entry);
8821 pce_info_use++;
8822 }
8823 }
8824 kfree(qseecom.ce_info.fde);
8825 if (qseecom.ce_info.pfe) {
8826 pce_info_use = qseecom.ce_info.pfe;
8827 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8828 pce_entry = pce_info_use->ce_pipe_entry;
8829 kfree(pce_entry);
8830 pce_info_use++;
8831 }
8832 }
8833 kfree(qseecom.ce_info.pfe);
8834
8835 /* register client for bus scaling */
8836 if (pdev->dev.of_node) {
8837 __qseecom_deinit_clk(CLK_QSEE);
8838 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8839 (qseecom.support_pfe || qseecom.support_fde))
8840 __qseecom_deinit_clk(CLK_CE_DRV);
8841 }
8842
8843 ion_client_destroy(qseecom.ion_clnt);
8844
8845 cdev_del(&qseecom.cdev);
8846
8847 device_destroy(driver_class, qseecom_device_no);
8848
8849 class_destroy(driver_class);
8850
8851 unregister_chrdev_region(qseecom_device_no, 1);
8852
8853 return ret;
8854}
8855
8856static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8857{
8858 int ret = 0;
8859 struct qseecom_clk *qclk;
8860
8861 qclk = &qseecom.qsee;
8862 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8863 if (qseecom.no_clock_support)
8864 return 0;
8865
8866 mutex_lock(&qsee_bw_mutex);
8867 mutex_lock(&clk_access_lock);
8868
8869 if (qseecom.current_mode != INACTIVE) {
8870 ret = msm_bus_scale_client_update_request(
8871 qseecom.qsee_perf_client, INACTIVE);
8872 if (ret)
8873 pr_err("Fail to scale down bus\n");
8874 else
8875 qseecom.current_mode = INACTIVE;
8876 }
8877
8878 if (qclk->clk_access_cnt) {
8879 if (qclk->ce_clk != NULL)
8880 clk_disable_unprepare(qclk->ce_clk);
8881 if (qclk->ce_core_clk != NULL)
8882 clk_disable_unprepare(qclk->ce_core_clk);
8883 if (qclk->ce_bus_clk != NULL)
8884 clk_disable_unprepare(qclk->ce_bus_clk);
8885 }
8886
8887 del_timer_sync(&(qseecom.bw_scale_down_timer));
8888 qseecom.timer_running = false;
8889
8890 mutex_unlock(&clk_access_lock);
8891 mutex_unlock(&qsee_bw_mutex);
8892 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8893
8894 return 0;
8895}
8896
8897static int qseecom_resume(struct platform_device *pdev)
8898{
8899 int mode = 0;
8900 int ret = 0;
8901 struct qseecom_clk *qclk;
8902
8903 qclk = &qseecom.qsee;
8904 if (qseecom.no_clock_support)
8905 goto exit;
8906
8907 mutex_lock(&qsee_bw_mutex);
8908 mutex_lock(&clk_access_lock);
8909 if (qseecom.cumulative_mode >= HIGH)
8910 mode = HIGH;
8911 else
8912 mode = qseecom.cumulative_mode;
8913
8914 if (qseecom.cumulative_mode != INACTIVE) {
8915 ret = msm_bus_scale_client_update_request(
8916 qseecom.qsee_perf_client, mode);
8917 if (ret)
8918 pr_err("Fail to scale up bus to %d\n", mode);
8919 else
8920 qseecom.current_mode = mode;
8921 }
8922
8923 if (qclk->clk_access_cnt) {
8924 if (qclk->ce_core_clk != NULL) {
8925 ret = clk_prepare_enable(qclk->ce_core_clk);
8926 if (ret) {
8927 pr_err("Unable to enable/prep CE core clk\n");
8928 qclk->clk_access_cnt = 0;
8929 goto err;
8930 }
8931 }
8932 if (qclk->ce_clk != NULL) {
8933 ret = clk_prepare_enable(qclk->ce_clk);
8934 if (ret) {
8935 pr_err("Unable to enable/prep CE iface clk\n");
8936 qclk->clk_access_cnt = 0;
8937 goto ce_clk_err;
8938 }
8939 }
8940 if (qclk->ce_bus_clk != NULL) {
8941 ret = clk_prepare_enable(qclk->ce_bus_clk);
8942 if (ret) {
8943 pr_err("Unable to enable/prep CE bus clk\n");
8944 qclk->clk_access_cnt = 0;
8945 goto ce_bus_clk_err;
8946 }
8947 }
8948 }
8949
8950 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8951 qseecom.bw_scale_down_timer.expires = jiffies +
8952 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8953 mod_timer(&(qseecom.bw_scale_down_timer),
8954 qseecom.bw_scale_down_timer.expires);
8955 qseecom.timer_running = true;
8956 }
8957
8958 mutex_unlock(&clk_access_lock);
8959 mutex_unlock(&qsee_bw_mutex);
8960 goto exit;
8961
8962ce_bus_clk_err:
8963 if (qclk->ce_clk)
8964 clk_disable_unprepare(qclk->ce_clk);
8965ce_clk_err:
8966 if (qclk->ce_core_clk)
8967 clk_disable_unprepare(qclk->ce_core_clk);
8968err:
8969 mutex_unlock(&clk_access_lock);
8970 mutex_unlock(&qsee_bw_mutex);
8971 ret = -EIO;
8972exit:
8973 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8974 return ret;
8975}
8976
8977static const struct of_device_id qseecom_match[] = {
8978 {
8979 .compatible = "qcom,qseecom",
8980 },
8981 {}
8982};
8983
8984static struct platform_driver qseecom_plat_driver = {
8985 .probe = qseecom_probe,
8986 .remove = qseecom_remove,
8987 .suspend = qseecom_suspend,
8988 .resume = qseecom_resume,
8989 .driver = {
8990 .name = "qseecom",
8991 .owner = THIS_MODULE,
8992 .of_match_table = qseecom_match,
8993 },
8994};
8995
8996static int qseecom_init(void)
8997{
8998 return platform_driver_register(&qseecom_plat_driver);
8999}
9000
9001static void qseecom_exit(void)
9002{
9003 platform_driver_unregister(&qseecom_plat_driver);
9004}
9005
9006MODULE_LICENSE("GPL v2");
9007MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9008
9009module_init(qseecom_init);
9010module_exit(qseecom_exit);