blob: 39244560a788d5803efd927a5dfc70b735214342 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
Zhen Kong26e62742018-05-04 17:19:06 -0700192 int abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700193};
194
195struct qseecom_registered_app_list {
196 struct list_head list;
197 u32 app_id;
198 u32 ref_cnt;
199 char app_name[MAX_APP_NAME_SIZE];
200 u32 app_arch;
201 bool app_blocked;
202 u32 blocked_on_listener_id;
203};
204
205struct qseecom_registered_kclient_list {
206 struct list_head list;
207 struct qseecom_handle *handle;
208};
209
210struct qseecom_ce_info_use {
211 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
212 unsigned int unit_num;
213 unsigned int num_ce_pipe_entries;
214 struct qseecom_ce_pipe_entry *ce_pipe_entry;
215 bool alloc;
216 uint32_t type;
217};
218
219struct ce_hw_usage_info {
220 uint32_t qsee_ce_hw_instance;
221 uint32_t num_fde;
222 struct qseecom_ce_info_use *fde;
223 uint32_t num_pfe;
224 struct qseecom_ce_info_use *pfe;
225};
226
227struct qseecom_clk {
228 enum qseecom_ce_hw_instance instance;
229 struct clk *ce_core_clk;
230 struct clk *ce_clk;
231 struct clk *ce_core_src_clk;
232 struct clk *ce_bus_clk;
233 uint32_t clk_access_cnt;
234};
235
236struct qseecom_control {
237 struct ion_client *ion_clnt; /* Ion client */
238 struct list_head registered_listener_list_head;
239 spinlock_t registered_listener_list_lock;
240
241 struct list_head registered_app_list_head;
242 spinlock_t registered_app_list_lock;
243
244 struct list_head registered_kclient_list_head;
245 spinlock_t registered_kclient_list_lock;
246
247 wait_queue_head_t send_resp_wq;
248 int send_resp_flag;
249
250 uint32_t qseos_version;
251 uint32_t qsee_version;
252 struct device *pdev;
253 bool whitelist_support;
254 bool commonlib_loaded;
255 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700284 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700285};
286
287struct qseecom_sec_buf_fd_info {
288 bool is_sec_buf_fd;
289 size_t size;
290 void *vbase;
291 dma_addr_t pbase;
292};
293
294struct qseecom_param_memref {
295 uint32_t buffer;
296 uint32_t size;
297};
298
299struct qseecom_client_handle {
300 u32 app_id;
301 u8 *sb_virt;
302 phys_addr_t sb_phys;
303 unsigned long user_virt_sb_base;
304 size_t sb_length;
305 struct ion_handle *ihandle; /* Retrieve phy addr */
306 char app_name[MAX_APP_NAME_SIZE];
307 u32 app_arch;
308 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
309};
310
311struct qseecom_listener_handle {
312 u32 id;
313};
314
315static struct qseecom_control qseecom;
316
317struct qseecom_dev_handle {
318 enum qseecom_client_handle_type type;
319 union {
320 struct qseecom_client_handle client;
321 struct qseecom_listener_handle listener;
322 };
323 bool released;
324 int abort;
325 wait_queue_head_t abort_wq;
326 atomic_t ioctl_count;
327 bool perf_enabled;
328 bool fast_load_enabled;
329 enum qseecom_bandwidth_request_mode mode;
330 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
331 uint32_t sglist_cnt;
332 bool use_legacy_cmd;
333};
334
335struct qseecom_key_id_usage_desc {
336 uint8_t desc[QSEECOM_KEY_ID_SIZE];
337};
338
339struct qseecom_crypto_info {
340 unsigned int unit_num;
341 unsigned int ce;
342 unsigned int pipe_pair;
343};
344
345static struct qseecom_key_id_usage_desc key_id_array[] = {
346 {
347 .desc = "Undefined Usage Index",
348 },
349
350 {
351 .desc = "Full Disk Encryption",
352 },
353
354 {
355 .desc = "Per File Encryption",
356 },
357
358 {
359 .desc = "UFS ICE Full Disk Encryption",
360 },
361
362 {
363 .desc = "SDCC ICE Full Disk Encryption",
364 },
365};
366
367/* Function proto types */
368static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
369static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
370static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
371static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
372static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
373static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
374 char *cmnlib_name);
375static int qseecom_enable_ice_setup(int usage);
376static int qseecom_disable_ice_setup(int usage);
377static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
378static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
379 void __user *argp);
380static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
381 void __user *argp);
382static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
383 void __user *argp);
384
385static int get_qseecom_keymaster_status(char *str)
386{
387 get_option(&str, &qseecom.is_apps_region_protected);
388 return 1;
389}
390__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
391
392static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
393 const void *req_buf, void *resp_buf)
394{
395 int ret = 0;
396 uint32_t smc_id = 0;
397 uint32_t qseos_cmd_id = 0;
398 struct scm_desc desc = {0};
399 struct qseecom_command_scm_resp *scm_resp = NULL;
400
401 if (!req_buf || !resp_buf) {
402 pr_err("Invalid buffer pointer\n");
403 return -EINVAL;
404 }
405 qseos_cmd_id = *(uint32_t *)req_buf;
406 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
407
408 switch (svc_id) {
409 case 6: {
410 if (tz_cmd_id == 3) {
411 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
412 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
413 desc.args[0] = *(uint32_t *)req_buf;
414 } else {
415 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
416 svc_id, tz_cmd_id);
417 return -EINVAL;
418 }
419 ret = scm_call2(smc_id, &desc);
420 break;
421 }
422 case SCM_SVC_ES: {
423 switch (tz_cmd_id) {
424 case SCM_SAVE_PARTITION_HASH_ID: {
425 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
426 struct qseecom_save_partition_hash_req *p_hash_req =
427 (struct qseecom_save_partition_hash_req *)
428 req_buf;
429 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
430
431 if (!tzbuf)
432 return -ENOMEM;
433 memset(tzbuf, 0, tzbuflen);
434 memcpy(tzbuf, p_hash_req->digest,
435 SHA256_DIGEST_LENGTH);
436 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
437 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
438 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
439 desc.args[0] = p_hash_req->partition_id;
440 desc.args[1] = virt_to_phys(tzbuf);
441 desc.args[2] = SHA256_DIGEST_LENGTH;
442 ret = scm_call2(smc_id, &desc);
443 kzfree(tzbuf);
444 break;
445 }
446 default: {
447 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
448 tz_cmd_id);
449 ret = -EINVAL;
450 break;
451 }
452 } /* end of switch (tz_cmd_id) */
453 break;
454 } /* end of case SCM_SVC_ES */
455 case SCM_SVC_TZSCHEDULER: {
456 switch (qseos_cmd_id) {
457 case QSEOS_APP_START_COMMAND: {
458 struct qseecom_load_app_ireq *req;
459 struct qseecom_load_app_64bit_ireq *req_64bit;
460
461 smc_id = TZ_OS_APP_START_ID;
462 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
463 if (qseecom.qsee_version < QSEE_VERSION_40) {
464 req = (struct qseecom_load_app_ireq *)req_buf;
465 desc.args[0] = req->mdt_len;
466 desc.args[1] = req->img_len;
467 desc.args[2] = req->phy_addr;
468 } else {
469 req_64bit =
470 (struct qseecom_load_app_64bit_ireq *)
471 req_buf;
472 desc.args[0] = req_64bit->mdt_len;
473 desc.args[1] = req_64bit->img_len;
474 desc.args[2] = req_64bit->phy_addr;
475 }
476 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
477 ret = scm_call2(smc_id, &desc);
478 break;
479 }
480 case QSEOS_APP_SHUTDOWN_COMMAND: {
481 struct qseecom_unload_app_ireq *req;
482
483 req = (struct qseecom_unload_app_ireq *)req_buf;
484 smc_id = TZ_OS_APP_SHUTDOWN_ID;
485 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
486 desc.args[0] = req->app_id;
487 ret = scm_call2(smc_id, &desc);
488 break;
489 }
490 case QSEOS_APP_LOOKUP_COMMAND: {
491 struct qseecom_check_app_ireq *req;
492 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
493 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
494
495 if (!tzbuf)
496 return -ENOMEM;
497 req = (struct qseecom_check_app_ireq *)req_buf;
498 pr_debug("Lookup app_name = %s\n", req->app_name);
499 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
500 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
501 smc_id = TZ_OS_APP_LOOKUP_ID;
502 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
503 desc.args[0] = virt_to_phys(tzbuf);
504 desc.args[1] = strlen(req->app_name);
505 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
506 ret = scm_call2(smc_id, &desc);
507 kzfree(tzbuf);
508 break;
509 }
510 case QSEOS_APP_REGION_NOTIFICATION: {
511 struct qsee_apps_region_info_ireq *req;
512 struct qsee_apps_region_info_64bit_ireq *req_64bit;
513
514 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
515 desc.arginfo =
516 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
517 if (qseecom.qsee_version < QSEE_VERSION_40) {
518 req = (struct qsee_apps_region_info_ireq *)
519 req_buf;
520 desc.args[0] = req->addr;
521 desc.args[1] = req->size;
522 } else {
523 req_64bit =
524 (struct qsee_apps_region_info_64bit_ireq *)
525 req_buf;
526 desc.args[0] = req_64bit->addr;
527 desc.args[1] = req_64bit->size;
528 }
529 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
530 ret = scm_call2(smc_id, &desc);
531 break;
532 }
533 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
534 struct qseecom_load_lib_image_ireq *req;
535 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
536
537 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
538 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
539 if (qseecom.qsee_version < QSEE_VERSION_40) {
540 req = (struct qseecom_load_lib_image_ireq *)
541 req_buf;
542 desc.args[0] = req->mdt_len;
543 desc.args[1] = req->img_len;
544 desc.args[2] = req->phy_addr;
545 } else {
546 req_64bit =
547 (struct qseecom_load_lib_image_64bit_ireq *)
548 req_buf;
549 desc.args[0] = req_64bit->mdt_len;
550 desc.args[1] = req_64bit->img_len;
551 desc.args[2] = req_64bit->phy_addr;
552 }
553 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
554 ret = scm_call2(smc_id, &desc);
555 break;
556 }
557 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
558 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
559 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
560 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
561 ret = scm_call2(smc_id, &desc);
562 break;
563 }
564 case QSEOS_REGISTER_LISTENER: {
565 struct qseecom_register_listener_ireq *req;
566 struct qseecom_register_listener_64bit_ireq *req_64bit;
567
568 desc.arginfo =
569 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
570 if (qseecom.qsee_version < QSEE_VERSION_40) {
571 req = (struct qseecom_register_listener_ireq *)
572 req_buf;
573 desc.args[0] = req->listener_id;
574 desc.args[1] = req->sb_ptr;
575 desc.args[2] = req->sb_len;
576 } else {
577 req_64bit =
578 (struct qseecom_register_listener_64bit_ireq *)
579 req_buf;
580 desc.args[0] = req_64bit->listener_id;
581 desc.args[1] = req_64bit->sb_ptr;
582 desc.args[2] = req_64bit->sb_len;
583 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700584 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700585 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
586 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
587 ret = scm_call2(smc_id, &desc);
588 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700589 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700590 smc_id = TZ_OS_REGISTER_LISTENER_ID;
591 __qseecom_reentrancy_check_if_no_app_blocked(
592 smc_id);
593 ret = scm_call2(smc_id, &desc);
594 }
595 break;
596 }
597 case QSEOS_DEREGISTER_LISTENER: {
598 struct qseecom_unregister_listener_ireq *req;
599
600 req = (struct qseecom_unregister_listener_ireq *)
601 req_buf;
602 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
603 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
604 desc.args[0] = req->listener_id;
605 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
606 ret = scm_call2(smc_id, &desc);
607 break;
608 }
609 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
610 struct qseecom_client_listener_data_irsp *req;
611
612 req = (struct qseecom_client_listener_data_irsp *)
613 req_buf;
614 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
615 desc.arginfo =
616 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
617 desc.args[0] = req->listener_id;
618 desc.args[1] = req->status;
619 ret = scm_call2(smc_id, &desc);
620 break;
621 }
622 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
623 struct qseecom_client_listener_data_irsp *req;
624 struct qseecom_client_listener_data_64bit_irsp *req_64;
625
626 smc_id =
627 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
628 desc.arginfo =
629 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
630 if (qseecom.qsee_version < QSEE_VERSION_40) {
631 req =
632 (struct qseecom_client_listener_data_irsp *)
633 req_buf;
634 desc.args[0] = req->listener_id;
635 desc.args[1] = req->status;
636 desc.args[2] = req->sglistinfo_ptr;
637 desc.args[3] = req->sglistinfo_len;
638 } else {
639 req_64 =
640 (struct qseecom_client_listener_data_64bit_irsp *)
641 req_buf;
642 desc.args[0] = req_64->listener_id;
643 desc.args[1] = req_64->status;
644 desc.args[2] = req_64->sglistinfo_ptr;
645 desc.args[3] = req_64->sglistinfo_len;
646 }
647 ret = scm_call2(smc_id, &desc);
648 break;
649 }
650 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
651 struct qseecom_load_app_ireq *req;
652 struct qseecom_load_app_64bit_ireq *req_64bit;
653
654 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
655 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
656 if (qseecom.qsee_version < QSEE_VERSION_40) {
657 req = (struct qseecom_load_app_ireq *)req_buf;
658 desc.args[0] = req->mdt_len;
659 desc.args[1] = req->img_len;
660 desc.args[2] = req->phy_addr;
661 } else {
662 req_64bit =
663 (struct qseecom_load_app_64bit_ireq *)req_buf;
664 desc.args[0] = req_64bit->mdt_len;
665 desc.args[1] = req_64bit->img_len;
666 desc.args[2] = req_64bit->phy_addr;
667 }
668 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
669 ret = scm_call2(smc_id, &desc);
670 break;
671 }
672 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
673 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
674 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
675 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
676 ret = scm_call2(smc_id, &desc);
677 break;
678 }
679
680 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
681 struct qseecom_client_send_data_ireq *req;
682 struct qseecom_client_send_data_64bit_ireq *req_64bit;
683
684 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
685 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
686 if (qseecom.qsee_version < QSEE_VERSION_40) {
687 req = (struct qseecom_client_send_data_ireq *)
688 req_buf;
689 desc.args[0] = req->app_id;
690 desc.args[1] = req->req_ptr;
691 desc.args[2] = req->req_len;
692 desc.args[3] = req->rsp_ptr;
693 desc.args[4] = req->rsp_len;
694 } else {
695 req_64bit =
696 (struct qseecom_client_send_data_64bit_ireq *)
697 req_buf;
698 desc.args[0] = req_64bit->app_id;
699 desc.args[1] = req_64bit->req_ptr;
700 desc.args[2] = req_64bit->req_len;
701 desc.args[3] = req_64bit->rsp_ptr;
702 desc.args[4] = req_64bit->rsp_len;
703 }
704 ret = scm_call2(smc_id, &desc);
705 break;
706 }
707 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
708 struct qseecom_client_send_data_ireq *req;
709 struct qseecom_client_send_data_64bit_ireq *req_64bit;
710
711 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
712 desc.arginfo =
713 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
714 if (qseecom.qsee_version < QSEE_VERSION_40) {
715 req = (struct qseecom_client_send_data_ireq *)
716 req_buf;
717 desc.args[0] = req->app_id;
718 desc.args[1] = req->req_ptr;
719 desc.args[2] = req->req_len;
720 desc.args[3] = req->rsp_ptr;
721 desc.args[4] = req->rsp_len;
722 desc.args[5] = req->sglistinfo_ptr;
723 desc.args[6] = req->sglistinfo_len;
724 } else {
725 req_64bit =
726 (struct qseecom_client_send_data_64bit_ireq *)
727 req_buf;
728 desc.args[0] = req_64bit->app_id;
729 desc.args[1] = req_64bit->req_ptr;
730 desc.args[2] = req_64bit->req_len;
731 desc.args[3] = req_64bit->rsp_ptr;
732 desc.args[4] = req_64bit->rsp_len;
733 desc.args[5] = req_64bit->sglistinfo_ptr;
734 desc.args[6] = req_64bit->sglistinfo_len;
735 }
736 ret = scm_call2(smc_id, &desc);
737 break;
738 }
739 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
740 struct qseecom_client_send_service_ireq *req;
741
742 req = (struct qseecom_client_send_service_ireq *)
743 req_buf;
744 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
745 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
746 desc.args[0] = req->key_type;
747 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
748 ret = scm_call2(smc_id, &desc);
749 break;
750 }
751 case QSEOS_RPMB_ERASE_COMMAND: {
752 smc_id = TZ_OS_RPMB_ERASE_ID;
753 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
754 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
755 ret = scm_call2(smc_id, &desc);
756 break;
757 }
758 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
759 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
760 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
761 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
762 ret = scm_call2(smc_id, &desc);
763 break;
764 }
765 case QSEOS_GENERATE_KEY: {
766 u32 tzbuflen = PAGE_ALIGN(sizeof
767 (struct qseecom_key_generate_ireq) -
768 sizeof(uint32_t));
769 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
770
771 if (!tzbuf)
772 return -ENOMEM;
773 memset(tzbuf, 0, tzbuflen);
774 memcpy(tzbuf, req_buf + sizeof(uint32_t),
775 (sizeof(struct qseecom_key_generate_ireq) -
776 sizeof(uint32_t)));
777 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
778 smc_id = TZ_OS_KS_GEN_KEY_ID;
779 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
780 desc.args[0] = virt_to_phys(tzbuf);
781 desc.args[1] = tzbuflen;
782 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
783 ret = scm_call2(smc_id, &desc);
784 kzfree(tzbuf);
785 break;
786 }
787 case QSEOS_DELETE_KEY: {
788 u32 tzbuflen = PAGE_ALIGN(sizeof
789 (struct qseecom_key_delete_ireq) -
790 sizeof(uint32_t));
791 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
792
793 if (!tzbuf)
794 return -ENOMEM;
795 memset(tzbuf, 0, tzbuflen);
796 memcpy(tzbuf, req_buf + sizeof(uint32_t),
797 (sizeof(struct qseecom_key_delete_ireq) -
798 sizeof(uint32_t)));
799 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
800 smc_id = TZ_OS_KS_DEL_KEY_ID;
801 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
802 desc.args[0] = virt_to_phys(tzbuf);
803 desc.args[1] = tzbuflen;
804 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
805 ret = scm_call2(smc_id, &desc);
806 kzfree(tzbuf);
807 break;
808 }
809 case QSEOS_SET_KEY: {
810 u32 tzbuflen = PAGE_ALIGN(sizeof
811 (struct qseecom_key_select_ireq) -
812 sizeof(uint32_t));
813 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
814
815 if (!tzbuf)
816 return -ENOMEM;
817 memset(tzbuf, 0, tzbuflen);
818 memcpy(tzbuf, req_buf + sizeof(uint32_t),
819 (sizeof(struct qseecom_key_select_ireq) -
820 sizeof(uint32_t)));
821 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
822 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
823 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
824 desc.args[0] = virt_to_phys(tzbuf);
825 desc.args[1] = tzbuflen;
826 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
827 ret = scm_call2(smc_id, &desc);
828 kzfree(tzbuf);
829 break;
830 }
831 case QSEOS_UPDATE_KEY_USERINFO: {
832 u32 tzbuflen = PAGE_ALIGN(sizeof
833 (struct qseecom_key_userinfo_update_ireq) -
834 sizeof(uint32_t));
835 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
836
837 if (!tzbuf)
838 return -ENOMEM;
839 memset(tzbuf, 0, tzbuflen);
840 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
841 (struct qseecom_key_userinfo_update_ireq) -
842 sizeof(uint32_t)));
843 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
844 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
845 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
846 desc.args[0] = virt_to_phys(tzbuf);
847 desc.args[1] = tzbuflen;
848 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
849 ret = scm_call2(smc_id, &desc);
850 kzfree(tzbuf);
851 break;
852 }
853 case QSEOS_TEE_OPEN_SESSION: {
854 struct qseecom_qteec_ireq *req;
855 struct qseecom_qteec_64bit_ireq *req_64bit;
856
857 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
858 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
859 if (qseecom.qsee_version < QSEE_VERSION_40) {
860 req = (struct qseecom_qteec_ireq *)req_buf;
861 desc.args[0] = req->app_id;
862 desc.args[1] = req->req_ptr;
863 desc.args[2] = req->req_len;
864 desc.args[3] = req->resp_ptr;
865 desc.args[4] = req->resp_len;
866 } else {
867 req_64bit = (struct qseecom_qteec_64bit_ireq *)
868 req_buf;
869 desc.args[0] = req_64bit->app_id;
870 desc.args[1] = req_64bit->req_ptr;
871 desc.args[2] = req_64bit->req_len;
872 desc.args[3] = req_64bit->resp_ptr;
873 desc.args[4] = req_64bit->resp_len;
874 }
875 ret = scm_call2(smc_id, &desc);
876 break;
877 }
878 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
879 struct qseecom_qteec_ireq *req;
880 struct qseecom_qteec_64bit_ireq *req_64bit;
881
882 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
883 desc.arginfo =
884 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
885 if (qseecom.qsee_version < QSEE_VERSION_40) {
886 req = (struct qseecom_qteec_ireq *)req_buf;
887 desc.args[0] = req->app_id;
888 desc.args[1] = req->req_ptr;
889 desc.args[2] = req->req_len;
890 desc.args[3] = req->resp_ptr;
891 desc.args[4] = req->resp_len;
892 desc.args[5] = req->sglistinfo_ptr;
893 desc.args[6] = req->sglistinfo_len;
894 } else {
895 req_64bit = (struct qseecom_qteec_64bit_ireq *)
896 req_buf;
897 desc.args[0] = req_64bit->app_id;
898 desc.args[1] = req_64bit->req_ptr;
899 desc.args[2] = req_64bit->req_len;
900 desc.args[3] = req_64bit->resp_ptr;
901 desc.args[4] = req_64bit->resp_len;
902 desc.args[5] = req_64bit->sglistinfo_ptr;
903 desc.args[6] = req_64bit->sglistinfo_len;
904 }
905 ret = scm_call2(smc_id, &desc);
906 break;
907 }
908 case QSEOS_TEE_INVOKE_COMMAND: {
909 struct qseecom_qteec_ireq *req;
910 struct qseecom_qteec_64bit_ireq *req_64bit;
911
912 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
913 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
914 if (qseecom.qsee_version < QSEE_VERSION_40) {
915 req = (struct qseecom_qteec_ireq *)req_buf;
916 desc.args[0] = req->app_id;
917 desc.args[1] = req->req_ptr;
918 desc.args[2] = req->req_len;
919 desc.args[3] = req->resp_ptr;
920 desc.args[4] = req->resp_len;
921 } else {
922 req_64bit = (struct qseecom_qteec_64bit_ireq *)
923 req_buf;
924 desc.args[0] = req_64bit->app_id;
925 desc.args[1] = req_64bit->req_ptr;
926 desc.args[2] = req_64bit->req_len;
927 desc.args[3] = req_64bit->resp_ptr;
928 desc.args[4] = req_64bit->resp_len;
929 }
930 ret = scm_call2(smc_id, &desc);
931 break;
932 }
933 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
934 struct qseecom_qteec_ireq *req;
935 struct qseecom_qteec_64bit_ireq *req_64bit;
936
937 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
938 desc.arginfo =
939 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
940 if (qseecom.qsee_version < QSEE_VERSION_40) {
941 req = (struct qseecom_qteec_ireq *)req_buf;
942 desc.args[0] = req->app_id;
943 desc.args[1] = req->req_ptr;
944 desc.args[2] = req->req_len;
945 desc.args[3] = req->resp_ptr;
946 desc.args[4] = req->resp_len;
947 desc.args[5] = req->sglistinfo_ptr;
948 desc.args[6] = req->sglistinfo_len;
949 } else {
950 req_64bit = (struct qseecom_qteec_64bit_ireq *)
951 req_buf;
952 desc.args[0] = req_64bit->app_id;
953 desc.args[1] = req_64bit->req_ptr;
954 desc.args[2] = req_64bit->req_len;
955 desc.args[3] = req_64bit->resp_ptr;
956 desc.args[4] = req_64bit->resp_len;
957 desc.args[5] = req_64bit->sglistinfo_ptr;
958 desc.args[6] = req_64bit->sglistinfo_len;
959 }
960 ret = scm_call2(smc_id, &desc);
961 break;
962 }
963 case QSEOS_TEE_CLOSE_SESSION: {
964 struct qseecom_qteec_ireq *req;
965 struct qseecom_qteec_64bit_ireq *req_64bit;
966
967 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
968 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
969 if (qseecom.qsee_version < QSEE_VERSION_40) {
970 req = (struct qseecom_qteec_ireq *)req_buf;
971 desc.args[0] = req->app_id;
972 desc.args[1] = req->req_ptr;
973 desc.args[2] = req->req_len;
974 desc.args[3] = req->resp_ptr;
975 desc.args[4] = req->resp_len;
976 } else {
977 req_64bit = (struct qseecom_qteec_64bit_ireq *)
978 req_buf;
979 desc.args[0] = req_64bit->app_id;
980 desc.args[1] = req_64bit->req_ptr;
981 desc.args[2] = req_64bit->req_len;
982 desc.args[3] = req_64bit->resp_ptr;
983 desc.args[4] = req_64bit->resp_len;
984 }
985 ret = scm_call2(smc_id, &desc);
986 break;
987 }
988 case QSEOS_TEE_REQUEST_CANCELLATION: {
989 struct qseecom_qteec_ireq *req;
990 struct qseecom_qteec_64bit_ireq *req_64bit;
991
992 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
993 desc.arginfo =
994 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
995 if (qseecom.qsee_version < QSEE_VERSION_40) {
996 req = (struct qseecom_qteec_ireq *)req_buf;
997 desc.args[0] = req->app_id;
998 desc.args[1] = req->req_ptr;
999 desc.args[2] = req->req_len;
1000 desc.args[3] = req->resp_ptr;
1001 desc.args[4] = req->resp_len;
1002 } else {
1003 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1004 req_buf;
1005 desc.args[0] = req_64bit->app_id;
1006 desc.args[1] = req_64bit->req_ptr;
1007 desc.args[2] = req_64bit->req_len;
1008 desc.args[3] = req_64bit->resp_ptr;
1009 desc.args[4] = req_64bit->resp_len;
1010 }
1011 ret = scm_call2(smc_id, &desc);
1012 break;
1013 }
1014 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1015 struct qseecom_continue_blocked_request_ireq *req =
1016 (struct qseecom_continue_blocked_request_ireq *)
1017 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001018 if (qseecom.smcinvoke_support)
1019 smc_id =
1020 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1021 else
1022 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001023 desc.arginfo =
1024 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001025 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001026 ret = scm_call2(smc_id, &desc);
1027 break;
1028 }
1029 default: {
1030 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1031 qseos_cmd_id);
1032 ret = -EINVAL;
1033 break;
1034 }
1035 } /*end of switch (qsee_cmd_id) */
1036 break;
1037 } /*end of case SCM_SVC_TZSCHEDULER*/
1038 default: {
1039 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1040 svc_id);
1041 ret = -EINVAL;
1042 break;
1043 }
1044 } /*end of switch svc_id */
1045 scm_resp->result = desc.ret[0];
1046 scm_resp->resp_type = desc.ret[1];
1047 scm_resp->data = desc.ret[2];
1048 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1049 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1050 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1051 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1052 return ret;
1053}
1054
1055
1056static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1057 size_t cmd_len, void *resp_buf, size_t resp_len)
1058{
1059 if (!is_scm_armv8())
1060 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1061 resp_buf, resp_len);
1062 else
1063 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1064}
1065
1066static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1067 struct qseecom_register_listener_req *svc)
1068{
1069 struct qseecom_registered_listener_list *ptr;
1070 int unique = 1;
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1074 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1075 if (ptr->svc.listener_id == svc->listener_id) {
1076 pr_err("Service id: %u is already registered\n",
1077 ptr->svc.listener_id);
1078 unique = 0;
1079 break;
1080 }
1081 }
1082 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1083 return unique;
1084}
1085
1086static struct qseecom_registered_listener_list *__qseecom_find_svc(
1087 int32_t listener_id)
1088{
1089 struct qseecom_registered_listener_list *entry = NULL;
1090 unsigned long flags;
1091
1092 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1093 list_for_each_entry(entry,
1094 &qseecom.registered_listener_list_head, list) {
1095 if (entry->svc.listener_id == listener_id)
1096 break;
1097 }
1098 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1099
1100 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1101 pr_err("Service id: %u is not found\n", listener_id);
1102 return NULL;
1103 }
1104
1105 return entry;
1106}
1107
1108static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1109 struct qseecom_dev_handle *handle,
1110 struct qseecom_register_listener_req *listener)
1111{
1112 int ret = 0;
1113 struct qseecom_register_listener_ireq req;
1114 struct qseecom_register_listener_64bit_ireq req_64bit;
1115 struct qseecom_command_scm_resp resp;
1116 ion_phys_addr_t pa;
1117 void *cmd_buf = NULL;
1118 size_t cmd_len;
1119
1120 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001121 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001122 listener->ifd_data_fd);
1123 if (IS_ERR_OR_NULL(svc->ihandle)) {
1124 pr_err("Ion client could not retrieve the handle\n");
1125 return -ENOMEM;
1126 }
1127
1128 /* Get the physical address of the ION BUF */
1129 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1130 if (ret) {
1131 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1132 ret);
1133 return ret;
1134 }
1135 /* Populate the structure for sending scm call to load image */
1136 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1137 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1138 pr_err("ION memory mapping for listener shared buffer failed\n");
1139 return -ENOMEM;
1140 }
1141 svc->sb_phys = (phys_addr_t)pa;
1142
1143 if (qseecom.qsee_version < QSEE_VERSION_40) {
1144 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req.listener_id = svc->svc.listener_id;
1146 req.sb_len = svc->sb_length;
1147 req.sb_ptr = (uint32_t)svc->sb_phys;
1148 cmd_buf = (void *)&req;
1149 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1150 } else {
1151 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1152 req_64bit.listener_id = svc->svc.listener_id;
1153 req_64bit.sb_len = svc->sb_length;
1154 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1155 cmd_buf = (void *)&req_64bit;
1156 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1157 }
1158
1159 resp.result = QSEOS_RESULT_INCOMPLETE;
1160
1161 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1162 &resp, sizeof(resp));
1163 if (ret) {
1164 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1165 return -EINVAL;
1166 }
1167
1168 if (resp.result != QSEOS_RESULT_SUCCESS) {
1169 pr_err("Error SB registration req: resp.result = %d\n",
1170 resp.result);
1171 return -EPERM;
1172 }
1173 return 0;
1174}
1175
1176static int qseecom_register_listener(struct qseecom_dev_handle *data,
1177 void __user *argp)
1178{
1179 int ret = 0;
1180 unsigned long flags;
1181 struct qseecom_register_listener_req rcvd_lstnr;
1182 struct qseecom_registered_listener_list *new_entry;
1183
1184 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1185 if (ret) {
1186 pr_err("copy_from_user failed\n");
1187 return ret;
1188 }
1189 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1190 rcvd_lstnr.sb_size))
1191 return -EFAULT;
1192
1193 data->listener.id = 0;
1194 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1195 pr_err("Service is not unique and is already registered\n");
1196 data->released = true;
1197 return -EBUSY;
1198 }
1199
1200 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1201 if (!new_entry)
1202 return -ENOMEM;
1203 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1204 new_entry->rcv_req_flag = 0;
1205
1206 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1207 new_entry->sb_length = rcvd_lstnr.sb_size;
1208 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1209 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1210 pr_err("qseecom_set_sb_memoryfailed\n");
1211 kzfree(new_entry);
1212 return -ENOMEM;
1213 }
1214
1215 data->listener.id = rcvd_lstnr.listener_id;
1216 init_waitqueue_head(&new_entry->rcv_req_wq);
1217 init_waitqueue_head(&new_entry->listener_block_app_wq);
1218 new_entry->send_resp_flag = 0;
1219 new_entry->listener_in_use = false;
1220 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1221 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1222 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1223
1224 return ret;
1225}
1226
Zhen Kong26e62742018-05-04 17:19:06 -07001227static void __qseecom_listener_abort_all(int abort)
1228{
1229 struct qseecom_registered_listener_list *entry = NULL;
1230 unsigned long flags;
1231
1232 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1233 list_for_each_entry(entry,
1234 &qseecom.registered_listener_list_head, list) {
1235 pr_debug("set abort %d for listener %d\n",
1236 abort, entry->svc.listener_id);
1237 entry->abort = abort;
1238 }
1239 if (abort)
1240 wake_up_interruptible_all(&qseecom.send_resp_wq);
1241 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1242}
1243
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001244static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1245{
1246 int ret = 0;
1247 unsigned long flags;
1248 uint32_t unmap_mem = 0;
1249 struct qseecom_register_listener_ireq req;
1250 struct qseecom_registered_listener_list *ptr_svc = NULL;
1251 struct qseecom_command_scm_resp resp;
1252 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1253
1254 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1255 req.listener_id = data->listener.id;
1256 resp.result = QSEOS_RESULT_INCOMPLETE;
1257
1258 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1259 sizeof(req), &resp, sizeof(resp));
1260 if (ret) {
1261 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1262 ret, data->listener.id);
1263 return ret;
1264 }
1265
1266 if (resp.result != QSEOS_RESULT_SUCCESS) {
1267 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1268 resp.result, data->listener.id);
1269 return -EPERM;
1270 }
1271
1272 data->abort = 1;
1273 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1274 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1275 list) {
1276 if (ptr_svc->svc.listener_id == data->listener.id) {
Zhen Kong26e62742018-05-04 17:19:06 -07001277 ptr_svc->abort = 1;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278 wake_up_all(&ptr_svc->rcv_req_wq);
1279 break;
1280 }
1281 }
1282 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1283
1284 while (atomic_read(&data->ioctl_count) > 1) {
1285 if (wait_event_freezable(data->abort_wq,
1286 atomic_read(&data->ioctl_count) <= 1)) {
1287 pr_err("Interrupted from abort\n");
1288 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001289 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001290 }
1291 }
1292
1293 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1294 list_for_each_entry(ptr_svc,
1295 &qseecom.registered_listener_list_head, list) {
1296 if (ptr_svc->svc.listener_id == data->listener.id) {
1297 if (ptr_svc->sb_virt) {
1298 unmap_mem = 1;
1299 ihandle = ptr_svc->ihandle;
1300 }
1301 list_del(&ptr_svc->list);
1302 kzfree(ptr_svc);
1303 break;
1304 }
1305 }
1306 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1307
1308 /* Unmap the memory */
1309 if (unmap_mem) {
1310 if (!IS_ERR_OR_NULL(ihandle)) {
1311 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1312 ion_free(qseecom.ion_clnt, ihandle);
1313 }
1314 }
1315 data->released = true;
1316 return ret;
1317}
1318
1319static int __qseecom_set_msm_bus_request(uint32_t mode)
1320{
1321 int ret = 0;
1322 struct qseecom_clk *qclk;
1323
1324 qclk = &qseecom.qsee;
1325 if (qclk->ce_core_src_clk != NULL) {
1326 if (mode == INACTIVE) {
1327 __qseecom_disable_clk(CLK_QSEE);
1328 } else {
1329 ret = __qseecom_enable_clk(CLK_QSEE);
1330 if (ret)
1331 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1332 ret, mode);
1333 }
1334 }
1335
1336 if ((!ret) && (qseecom.current_mode != mode)) {
1337 ret = msm_bus_scale_client_update_request(
1338 qseecom.qsee_perf_client, mode);
1339 if (ret) {
1340 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1341 ret, mode);
1342 if (qclk->ce_core_src_clk != NULL) {
1343 if (mode == INACTIVE) {
1344 ret = __qseecom_enable_clk(CLK_QSEE);
1345 if (ret)
1346 pr_err("CLK enable failed\n");
1347 } else
1348 __qseecom_disable_clk(CLK_QSEE);
1349 }
1350 }
1351 qseecom.current_mode = mode;
1352 }
1353 return ret;
1354}
1355
1356static void qseecom_bw_inactive_req_work(struct work_struct *work)
1357{
1358 mutex_lock(&app_access_lock);
1359 mutex_lock(&qsee_bw_mutex);
1360 if (qseecom.timer_running)
1361 __qseecom_set_msm_bus_request(INACTIVE);
1362 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1363 qseecom.current_mode, qseecom.cumulative_mode);
1364 qseecom.timer_running = false;
1365 mutex_unlock(&qsee_bw_mutex);
1366 mutex_unlock(&app_access_lock);
1367}
1368
1369static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1370{
1371 schedule_work(&qseecom.bw_inactive_req_ws);
1372}
1373
1374static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1375{
1376 struct qseecom_clk *qclk;
1377 int ret = 0;
1378
1379 mutex_lock(&clk_access_lock);
1380 if (ce == CLK_QSEE)
1381 qclk = &qseecom.qsee;
1382 else
1383 qclk = &qseecom.ce_drv;
1384
1385 if (qclk->clk_access_cnt > 2) {
1386 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1387 ret = -EINVAL;
1388 goto err_dec_ref_cnt;
1389 }
1390 if (qclk->clk_access_cnt == 2)
1391 qclk->clk_access_cnt--;
1392
1393err_dec_ref_cnt:
1394 mutex_unlock(&clk_access_lock);
1395 return ret;
1396}
1397
1398
1399static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1400{
1401 int32_t ret = 0;
1402 int32_t request_mode = INACTIVE;
1403
1404 mutex_lock(&qsee_bw_mutex);
1405 if (mode == 0) {
1406 if (qseecom.cumulative_mode > MEDIUM)
1407 request_mode = HIGH;
1408 else
1409 request_mode = qseecom.cumulative_mode;
1410 } else {
1411 request_mode = mode;
1412 }
1413
1414 ret = __qseecom_set_msm_bus_request(request_mode);
1415 if (ret) {
1416 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1417 ret, request_mode);
1418 goto err_scale_timer;
1419 }
1420
1421 if (qseecom.timer_running) {
1422 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1423 if (ret) {
1424 pr_err("Failed to decrease clk ref count.\n");
1425 goto err_scale_timer;
1426 }
1427 del_timer_sync(&(qseecom.bw_scale_down_timer));
1428 qseecom.timer_running = false;
1429 }
1430err_scale_timer:
1431 mutex_unlock(&qsee_bw_mutex);
1432 return ret;
1433}
1434
1435
1436static int qseecom_unregister_bus_bandwidth_needs(
1437 struct qseecom_dev_handle *data)
1438{
1439 int32_t ret = 0;
1440
1441 qseecom.cumulative_mode -= data->mode;
1442 data->mode = INACTIVE;
1443
1444 return ret;
1445}
1446
1447static int __qseecom_register_bus_bandwidth_needs(
1448 struct qseecom_dev_handle *data, uint32_t request_mode)
1449{
1450 int32_t ret = 0;
1451
1452 if (data->mode == INACTIVE) {
1453 qseecom.cumulative_mode += request_mode;
1454 data->mode = request_mode;
1455 } else {
1456 if (data->mode != request_mode) {
1457 qseecom.cumulative_mode -= data->mode;
1458 qseecom.cumulative_mode += request_mode;
1459 data->mode = request_mode;
1460 }
1461 }
1462 return ret;
1463}
1464
1465static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1466{
1467 int ret = 0;
1468
1469 ret = qsee_vote_for_clock(data, CLK_DFAB);
1470 if (ret) {
1471 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1472 goto perf_enable_exit;
1473 }
1474 ret = qsee_vote_for_clock(data, CLK_SFPB);
1475 if (ret) {
1476 qsee_disable_clock_vote(data, CLK_DFAB);
1477 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1478 goto perf_enable_exit;
1479 }
1480
1481perf_enable_exit:
1482 return ret;
1483}
1484
1485static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1486 void __user *argp)
1487{
1488 int32_t ret = 0;
1489 int32_t req_mode;
1490
1491 if (qseecom.no_clock_support)
1492 return 0;
1493
1494 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1495 if (ret) {
1496 pr_err("copy_from_user failed\n");
1497 return ret;
1498 }
1499 if (req_mode > HIGH) {
1500 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1501 return -EINVAL;
1502 }
1503
1504 /*
1505 * Register bus bandwidth needs if bus scaling feature is enabled;
1506 * otherwise, qseecom enable/disable clocks for the client directly.
1507 */
1508 if (qseecom.support_bus_scaling) {
1509 mutex_lock(&qsee_bw_mutex);
1510 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1511 mutex_unlock(&qsee_bw_mutex);
1512 } else {
1513 pr_debug("Bus scaling feature is NOT enabled\n");
1514 pr_debug("request bandwidth mode %d for the client\n",
1515 req_mode);
1516 if (req_mode != INACTIVE) {
1517 ret = qseecom_perf_enable(data);
1518 if (ret)
1519 pr_err("Failed to vote for clock with err %d\n",
1520 ret);
1521 } else {
1522 qsee_disable_clock_vote(data, CLK_DFAB);
1523 qsee_disable_clock_vote(data, CLK_SFPB);
1524 }
1525 }
1526 return ret;
1527}
1528
1529static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1530{
1531 if (qseecom.no_clock_support)
1532 return;
1533
1534 mutex_lock(&qsee_bw_mutex);
1535 qseecom.bw_scale_down_timer.expires = jiffies +
1536 msecs_to_jiffies(duration);
1537 mod_timer(&(qseecom.bw_scale_down_timer),
1538 qseecom.bw_scale_down_timer.expires);
1539 qseecom.timer_running = true;
1540 mutex_unlock(&qsee_bw_mutex);
1541}
1542
1543static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1544{
1545 if (!qseecom.support_bus_scaling)
1546 qsee_disable_clock_vote(data, CLK_SFPB);
1547 else
1548 __qseecom_add_bw_scale_down_timer(
1549 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1550}
1551
1552static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1553{
1554 int ret = 0;
1555
1556 if (qseecom.support_bus_scaling) {
1557 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1558 if (ret)
1559 pr_err("Failed to set bw MEDIUM.\n");
1560 } else {
1561 ret = qsee_vote_for_clock(data, CLK_SFPB);
1562 if (ret)
1563 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1564 }
1565 return ret;
1566}
1567
1568static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1569 void __user *argp)
1570{
1571 ion_phys_addr_t pa;
1572 int32_t ret;
1573 struct qseecom_set_sb_mem_param_req req;
1574 size_t len;
1575
1576 /* Copy the relevant information needed for loading the image */
1577 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1578 return -EFAULT;
1579
1580 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1581 (req.sb_len == 0)) {
1582 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1583 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1584 return -EFAULT;
1585 }
1586 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1587 req.sb_len))
1588 return -EFAULT;
1589
1590 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001591 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001592 req.ifd_data_fd);
1593 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1594 pr_err("Ion client could not retrieve the handle\n");
1595 return -ENOMEM;
1596 }
1597 /* Get the physical address of the ION BUF */
1598 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1599 if (ret) {
1600
1601 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1602 ret);
1603 return ret;
1604 }
1605
1606 if (len < req.sb_len) {
1607 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1608 req.sb_len, len);
1609 return -EINVAL;
1610 }
1611 /* Populate the structure for sending scm call to load image */
1612 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1613 data->client.ihandle);
1614 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1615 pr_err("ION memory mapping for client shared buf failed\n");
1616 return -ENOMEM;
1617 }
1618 data->client.sb_phys = (phys_addr_t)pa;
1619 data->client.sb_length = req.sb_len;
1620 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1621 return 0;
1622}
1623
Zhen Kong26e62742018-05-04 17:19:06 -07001624static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1625 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001626{
1627 int ret;
1628
1629 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001630 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001631}
1632
1633static int __qseecom_reentrancy_listener_has_sent_rsp(
1634 struct qseecom_dev_handle *data,
1635 struct qseecom_registered_listener_list *ptr_svc)
1636{
1637 int ret;
1638
1639 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001640 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001641}
1642
1643static void __qseecom_clean_listener_sglistinfo(
1644 struct qseecom_registered_listener_list *ptr_svc)
1645{
1646 if (ptr_svc->sglist_cnt) {
1647 memset(ptr_svc->sglistinfo_ptr, 0,
1648 SGLISTINFO_TABLE_SIZE);
1649 ptr_svc->sglist_cnt = 0;
1650 }
1651}
1652
1653static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1654 struct qseecom_command_scm_resp *resp)
1655{
1656 int ret = 0;
1657 int rc = 0;
1658 uint32_t lstnr;
1659 unsigned long flags;
1660 struct qseecom_client_listener_data_irsp send_data_rsp;
1661 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1662 struct qseecom_registered_listener_list *ptr_svc = NULL;
1663 sigset_t new_sigset;
1664 sigset_t old_sigset;
1665 uint32_t status;
1666 void *cmd_buf = NULL;
1667 size_t cmd_len;
1668 struct sglist_info *table = NULL;
1669
1670 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1671 lstnr = resp->data;
1672 /*
1673 * Wake up blocking lsitener service with the lstnr id
1674 */
1675 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1676 flags);
1677 list_for_each_entry(ptr_svc,
1678 &qseecom.registered_listener_list_head, list) {
1679 if (ptr_svc->svc.listener_id == lstnr) {
1680 ptr_svc->listener_in_use = true;
1681 ptr_svc->rcv_req_flag = 1;
1682 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1683 break;
1684 }
1685 }
1686 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1687 flags);
1688
1689 if (ptr_svc == NULL) {
1690 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001691 rc = -EINVAL;
1692 status = QSEOS_RESULT_FAILURE;
1693 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001694 }
1695
1696 if (!ptr_svc->ihandle) {
1697 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001698 rc = -EINVAL;
1699 status = QSEOS_RESULT_FAILURE;
1700 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001701 }
1702
1703 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001704 pr_err("Service %d does not exist\n",
1705 lstnr);
1706 rc = -ERESTARTSYS;
1707 ptr_svc = NULL;
1708 status = QSEOS_RESULT_FAILURE;
1709 goto err_resp;
1710 }
1711
1712 if (ptr_svc->abort == 1) {
1713 pr_err("Service %d abort %d\n",
1714 lstnr, ptr_svc->abort);
1715 rc = -ENODEV;
1716 status = QSEOS_RESULT_FAILURE;
1717 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001718 }
1719 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1720
1721 /* initialize the new signal mask with all signals*/
1722 sigfillset(&new_sigset);
1723 /* block all signals */
1724 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1725
1726 do {
1727 /*
1728 * When reentrancy is not supported, check global
1729 * send_resp_flag; otherwise, check this listener's
1730 * send_resp_flag.
1731 */
1732 if (!qseecom.qsee_reentrancy_support &&
1733 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001734 __qseecom_listener_has_sent_rsp(
1735 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001736 break;
1737 }
1738
1739 if (qseecom.qsee_reentrancy_support &&
1740 !wait_event_freezable(qseecom.send_resp_wq,
1741 __qseecom_reentrancy_listener_has_sent_rsp(
1742 data, ptr_svc))) {
1743 break;
1744 }
1745 } while (1);
1746
1747 /* restore signal mask */
1748 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001749 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001750 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1751 data->client.app_id, lstnr, ret);
1752 rc = -ENODEV;
1753 status = QSEOS_RESULT_FAILURE;
1754 } else {
1755 status = QSEOS_RESULT_SUCCESS;
1756 }
Zhen Kong26e62742018-05-04 17:19:06 -07001757err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001758 qseecom.send_resp_flag = 0;
1759 ptr_svc->send_resp_flag = 0;
1760 table = ptr_svc->sglistinfo_ptr;
1761 if (qseecom.qsee_version < QSEE_VERSION_40) {
1762 send_data_rsp.listener_id = lstnr;
1763 send_data_rsp.status = status;
1764 send_data_rsp.sglistinfo_ptr =
1765 (uint32_t)virt_to_phys(table);
1766 send_data_rsp.sglistinfo_len =
1767 SGLISTINFO_TABLE_SIZE;
1768 dmac_flush_range((void *)table,
1769 (void *)table + SGLISTINFO_TABLE_SIZE);
1770 cmd_buf = (void *)&send_data_rsp;
1771 cmd_len = sizeof(send_data_rsp);
1772 } else {
1773 send_data_rsp_64bit.listener_id = lstnr;
1774 send_data_rsp_64bit.status = status;
1775 send_data_rsp_64bit.sglistinfo_ptr =
1776 virt_to_phys(table);
1777 send_data_rsp_64bit.sglistinfo_len =
1778 SGLISTINFO_TABLE_SIZE;
1779 dmac_flush_range((void *)table,
1780 (void *)table + SGLISTINFO_TABLE_SIZE);
1781 cmd_buf = (void *)&send_data_rsp_64bit;
1782 cmd_len = sizeof(send_data_rsp_64bit);
1783 }
1784 if (qseecom.whitelist_support == false)
1785 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1786 else
1787 *(uint32_t *)cmd_buf =
1788 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1789 if (ptr_svc) {
1790 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1791 ptr_svc->ihandle,
1792 ptr_svc->sb_virt, ptr_svc->sb_length,
1793 ION_IOC_CLEAN_INV_CACHES);
1794 if (ret) {
1795 pr_err("cache operation failed %d\n", ret);
1796 return ret;
1797 }
1798 }
1799
1800 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1801 ret = __qseecom_enable_clk(CLK_QSEE);
1802 if (ret)
1803 return ret;
1804 }
1805
1806 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1807 cmd_buf, cmd_len, resp, sizeof(*resp));
1808 ptr_svc->listener_in_use = false;
1809 __qseecom_clean_listener_sglistinfo(ptr_svc);
1810 if (ret) {
1811 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1812 ret, data->client.app_id);
1813 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1814 __qseecom_disable_clk(CLK_QSEE);
1815 return ret;
1816 }
Zhen Kong26e62742018-05-04 17:19:06 -07001817 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1818 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001819 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1820 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1821 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1822 resp->result, data->client.app_id, lstnr);
1823 ret = -EINVAL;
1824 }
1825 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1826 __qseecom_disable_clk(CLK_QSEE);
1827
1828 }
1829 if (rc)
1830 return rc;
1831
1832 return ret;
1833}
1834
Zhen Konga91aaf02018-02-02 17:21:04 -08001835static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001836 struct qseecom_command_scm_resp *resp,
1837 struct qseecom_registered_app_list *ptr_app,
1838 struct qseecom_dev_handle *data)
1839{
1840 struct qseecom_registered_listener_list *list_ptr;
1841 int ret = 0;
1842 struct qseecom_continue_blocked_request_ireq ireq;
1843 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001844 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001845 sigset_t new_sigset;
1846 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001847 unsigned long flags;
1848 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001849
1850 if (!resp || !data) {
1851 pr_err("invalid resp or data pointer\n");
1852 ret = -EINVAL;
1853 goto exit;
1854 }
1855
1856 /* find app_id & img_name from list */
1857 if (!ptr_app) {
1858 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1859 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1860 list) {
1861 if ((ptr_app->app_id == data->client.app_id) &&
1862 (!strcmp(ptr_app->app_name,
1863 data->client.app_name))) {
1864 found_app = true;
1865 break;
1866 }
1867 }
1868 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1869 flags);
1870 if (!found_app) {
1871 pr_err("app_id %d (%s) is not found\n",
1872 data->client.app_id,
1873 (char *)data->client.app_name);
1874 ret = -ENOENT;
1875 goto exit;
1876 }
1877 }
1878
Zhen Kongd8cc0052017-11-13 15:13:31 -08001879 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001880 session_id = resp->resp_type;
1881 list_ptr = __qseecom_find_svc(resp->data);
1882 if (!list_ptr) {
1883 pr_err("Invalid listener ID %d\n", resp->data);
1884 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001885 goto exit;
1886 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001887 ptr_app->blocked_on_listener_id = resp->data;
1888
1889 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1890 resp->data, list_ptr->listener_in_use,
1891 session_id, data->client.app_id);
1892
1893 /* sleep until listener is available */
1894 sigfillset(&new_sigset);
1895 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1896
1897 do {
1898 qseecom.app_block_ref_cnt++;
1899 ptr_app->app_blocked = true;
1900 mutex_unlock(&app_access_lock);
1901 wait_event_freezable(
1902 list_ptr->listener_block_app_wq,
1903 !list_ptr->listener_in_use);
1904 mutex_lock(&app_access_lock);
1905 ptr_app->app_blocked = false;
1906 qseecom.app_block_ref_cnt--;
1907 } while (list_ptr->listener_in_use);
1908
1909 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1910
1911 ptr_app->blocked_on_listener_id = 0;
1912 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1913 resp->data, session_id, data->client.app_id);
1914
1915 /* notify TZ that listener is available */
1916 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1917
1918 if (qseecom.smcinvoke_support)
1919 ireq.app_or_session_id = session_id;
1920 else
1921 ireq.app_or_session_id = data->client.app_id;
1922
1923 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1924 &ireq, sizeof(ireq),
1925 &continue_resp, sizeof(continue_resp));
1926 if (ret && qseecom.smcinvoke_support) {
1927 /* retry with legacy cmd */
1928 qseecom.smcinvoke_support = false;
1929 ireq.app_or_session_id = data->client.app_id;
1930 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1931 &ireq, sizeof(ireq),
1932 &continue_resp, sizeof(continue_resp));
1933 qseecom.smcinvoke_support = true;
1934 if (ret) {
1935 pr_err("unblock app %d or session %d fail\n",
1936 data->client.app_id, session_id);
1937 goto exit;
1938 }
1939 }
1940 resp->result = continue_resp.result;
1941 resp->resp_type = continue_resp.resp_type;
1942 resp->data = continue_resp.data;
1943 pr_debug("unblock resp = %d\n", resp->result);
1944 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1945
1946 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1947 pr_err("Unexpected unblock resp %d\n", resp->result);
1948 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001949 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001950exit:
1951 return ret;
1952}
1953
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001954static int __qseecom_reentrancy_process_incomplete_cmd(
1955 struct qseecom_dev_handle *data,
1956 struct qseecom_command_scm_resp *resp)
1957{
1958 int ret = 0;
1959 int rc = 0;
1960 uint32_t lstnr;
1961 unsigned long flags;
1962 struct qseecom_client_listener_data_irsp send_data_rsp;
1963 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1964 struct qseecom_registered_listener_list *ptr_svc = NULL;
1965 sigset_t new_sigset;
1966 sigset_t old_sigset;
1967 uint32_t status;
1968 void *cmd_buf = NULL;
1969 size_t cmd_len;
1970 struct sglist_info *table = NULL;
1971
Zhen Kong26e62742018-05-04 17:19:06 -07001972 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001973 lstnr = resp->data;
1974 /*
1975 * Wake up blocking lsitener service with the lstnr id
1976 */
1977 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1978 flags);
1979 list_for_each_entry(ptr_svc,
1980 &qseecom.registered_listener_list_head, list) {
1981 if (ptr_svc->svc.listener_id == lstnr) {
1982 ptr_svc->listener_in_use = true;
1983 ptr_svc->rcv_req_flag = 1;
1984 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1985 break;
1986 }
1987 }
1988 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1989 flags);
1990
1991 if (ptr_svc == NULL) {
1992 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001993 rc = -EINVAL;
1994 status = QSEOS_RESULT_FAILURE;
1995 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001996 }
1997
1998 if (!ptr_svc->ihandle) {
1999 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002000 rc = -EINVAL;
2001 status = QSEOS_RESULT_FAILURE;
2002 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002003 }
2004
2005 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002006 pr_err("Service %d does not exist\n",
2007 lstnr);
2008 rc = -ERESTARTSYS;
2009 ptr_svc = NULL;
2010 status = QSEOS_RESULT_FAILURE;
2011 goto err_resp;
2012 }
2013
2014 if (ptr_svc->abort == 1) {
2015 pr_err("Service %d abort %d\n",
2016 lstnr, ptr_svc->abort);
2017 rc = -ENODEV;
2018 status = QSEOS_RESULT_FAILURE;
2019 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002020 }
2021 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2022
2023 /* initialize the new signal mask with all signals*/
2024 sigfillset(&new_sigset);
2025
2026 /* block all signals */
2027 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2028
2029 /* unlock mutex btw waking listener and sleep-wait */
2030 mutex_unlock(&app_access_lock);
2031 do {
2032 if (!wait_event_freezable(qseecom.send_resp_wq,
2033 __qseecom_reentrancy_listener_has_sent_rsp(
2034 data, ptr_svc))) {
2035 break;
2036 }
2037 } while (1);
2038 /* lock mutex again after resp sent */
2039 mutex_lock(&app_access_lock);
2040 ptr_svc->send_resp_flag = 0;
2041 qseecom.send_resp_flag = 0;
2042
2043 /* restore signal mask */
2044 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002045 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002046 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2047 data->client.app_id, lstnr, ret);
2048 rc = -ENODEV;
2049 status = QSEOS_RESULT_FAILURE;
2050 } else {
2051 status = QSEOS_RESULT_SUCCESS;
2052 }
Zhen Kong26e62742018-05-04 17:19:06 -07002053err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002054 table = ptr_svc->sglistinfo_ptr;
2055 if (qseecom.qsee_version < QSEE_VERSION_40) {
2056 send_data_rsp.listener_id = lstnr;
2057 send_data_rsp.status = status;
2058 send_data_rsp.sglistinfo_ptr =
2059 (uint32_t)virt_to_phys(table);
2060 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2061 dmac_flush_range((void *)table,
2062 (void *)table + SGLISTINFO_TABLE_SIZE);
2063 cmd_buf = (void *)&send_data_rsp;
2064 cmd_len = sizeof(send_data_rsp);
2065 } else {
2066 send_data_rsp_64bit.listener_id = lstnr;
2067 send_data_rsp_64bit.status = status;
2068 send_data_rsp_64bit.sglistinfo_ptr =
2069 virt_to_phys(table);
2070 send_data_rsp_64bit.sglistinfo_len =
2071 SGLISTINFO_TABLE_SIZE;
2072 dmac_flush_range((void *)table,
2073 (void *)table + SGLISTINFO_TABLE_SIZE);
2074 cmd_buf = (void *)&send_data_rsp_64bit;
2075 cmd_len = sizeof(send_data_rsp_64bit);
2076 }
2077 if (qseecom.whitelist_support == false)
2078 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2079 else
2080 *(uint32_t *)cmd_buf =
2081 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2082 if (ptr_svc) {
2083 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2084 ptr_svc->ihandle,
2085 ptr_svc->sb_virt, ptr_svc->sb_length,
2086 ION_IOC_CLEAN_INV_CACHES);
2087 if (ret) {
2088 pr_err("cache operation failed %d\n", ret);
2089 return ret;
2090 }
2091 }
2092 if (lstnr == RPMB_SERVICE) {
2093 ret = __qseecom_enable_clk(CLK_QSEE);
2094 if (ret)
2095 return ret;
2096 }
2097
2098 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2099 cmd_buf, cmd_len, resp, sizeof(*resp));
2100 ptr_svc->listener_in_use = false;
2101 __qseecom_clean_listener_sglistinfo(ptr_svc);
2102 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2103
2104 if (ret) {
2105 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2106 ret, data->client.app_id);
2107 goto exit;
2108 }
2109
2110 switch (resp->result) {
2111 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2112 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2113 lstnr, data->client.app_id, resp->data);
2114 if (lstnr == resp->data) {
2115 pr_err("lstnr %d should not be blocked!\n",
2116 lstnr);
2117 ret = -EINVAL;
2118 goto exit;
2119 }
2120 ret = __qseecom_process_reentrancy_blocked_on_listener(
2121 resp, NULL, data);
2122 if (ret) {
2123 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2124 data->client.app_id,
2125 data->client.app_name, resp->data);
2126 goto exit;
2127 }
2128 case QSEOS_RESULT_SUCCESS:
2129 case QSEOS_RESULT_INCOMPLETE:
2130 break;
2131 default:
2132 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2133 resp->result, data->client.app_id, lstnr);
2134 ret = -EINVAL;
2135 goto exit;
2136 }
2137exit:
2138 if (lstnr == RPMB_SERVICE)
2139 __qseecom_disable_clk(CLK_QSEE);
2140
2141 }
2142 if (rc)
2143 return rc;
2144
2145 return ret;
2146}
2147
2148/*
2149 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2150 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2151 * So, needs to first check if no app blocked before sending OS level scm call,
2152 * then wait until all apps are unblocked.
2153 */
2154static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2155{
2156 sigset_t new_sigset, old_sigset;
2157
2158 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2159 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2160 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2161 /* thread sleep until this app unblocked */
2162 while (qseecom.app_block_ref_cnt > 0) {
2163 sigfillset(&new_sigset);
2164 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2165 mutex_unlock(&app_access_lock);
2166 do {
2167 if (!wait_event_freezable(qseecom.app_block_wq,
2168 (qseecom.app_block_ref_cnt == 0)))
2169 break;
2170 } while (1);
2171 mutex_lock(&app_access_lock);
2172 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2173 }
2174 }
2175}
2176
2177/*
2178 * scm_call of send data will fail if this TA is blocked or there are more
2179 * than one TA requesting listener services; So, first check to see if need
2180 * to wait.
2181 */
2182static void __qseecom_reentrancy_check_if_this_app_blocked(
2183 struct qseecom_registered_app_list *ptr_app)
2184{
2185 sigset_t new_sigset, old_sigset;
2186
2187 if (qseecom.qsee_reentrancy_support) {
2188 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2189 /* thread sleep until this app unblocked */
2190 sigfillset(&new_sigset);
2191 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2192 mutex_unlock(&app_access_lock);
2193 do {
2194 if (!wait_event_freezable(qseecom.app_block_wq,
2195 (!ptr_app->app_blocked &&
2196 qseecom.app_block_ref_cnt <= 1)))
2197 break;
2198 } while (1);
2199 mutex_lock(&app_access_lock);
2200 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2201 }
2202 }
2203}
2204
2205static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2206 uint32_t *app_id)
2207{
2208 int32_t ret;
2209 struct qseecom_command_scm_resp resp;
2210 bool found_app = false;
2211 struct qseecom_registered_app_list *entry = NULL;
2212 unsigned long flags = 0;
2213
2214 if (!app_id) {
2215 pr_err("Null pointer to app_id\n");
2216 return -EINVAL;
2217 }
2218 *app_id = 0;
2219
2220 /* check if app exists and has been registered locally */
2221 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2222 list_for_each_entry(entry,
2223 &qseecom.registered_app_list_head, list) {
2224 if (!strcmp(entry->app_name, req.app_name)) {
2225 found_app = true;
2226 break;
2227 }
2228 }
2229 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2230 if (found_app) {
2231 pr_debug("Found app with id %d\n", entry->app_id);
2232 *app_id = entry->app_id;
2233 return 0;
2234 }
2235
2236 memset((void *)&resp, 0, sizeof(resp));
2237
2238 /* SCM_CALL to check if app_id for the mentioned app exists */
2239 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2240 sizeof(struct qseecom_check_app_ireq),
2241 &resp, sizeof(resp));
2242 if (ret) {
2243 pr_err("scm_call to check if app is already loaded failed\n");
2244 return -EINVAL;
2245 }
2246
2247 if (resp.result == QSEOS_RESULT_FAILURE)
2248 return 0;
2249
2250 switch (resp.resp_type) {
2251 /*qsee returned listener type response */
2252 case QSEOS_LISTENER_ID:
2253 pr_err("resp type is of listener type instead of app");
2254 return -EINVAL;
2255 case QSEOS_APP_ID:
2256 *app_id = resp.data;
2257 return 0;
2258 default:
2259 pr_err("invalid resp type (%d) from qsee",
2260 resp.resp_type);
2261 return -ENODEV;
2262 }
2263}
2264
2265static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2266{
2267 struct qseecom_registered_app_list *entry = NULL;
2268 unsigned long flags = 0;
2269 u32 app_id = 0;
2270 struct ion_handle *ihandle; /* Ion handle */
2271 struct qseecom_load_img_req load_img_req;
2272 int32_t ret = 0;
2273 ion_phys_addr_t pa = 0;
2274 size_t len;
2275 struct qseecom_command_scm_resp resp;
2276 struct qseecom_check_app_ireq req;
2277 struct qseecom_load_app_ireq load_req;
2278 struct qseecom_load_app_64bit_ireq load_req_64bit;
2279 void *cmd_buf = NULL;
2280 size_t cmd_len;
2281 bool first_time = false;
2282
2283 /* Copy the relevant information needed for loading the image */
2284 if (copy_from_user(&load_img_req,
2285 (void __user *)argp,
2286 sizeof(struct qseecom_load_img_req))) {
2287 pr_err("copy_from_user failed\n");
2288 return -EFAULT;
2289 }
2290
2291 /* Check and load cmnlib */
2292 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2293 if (!qseecom.commonlib_loaded &&
2294 load_img_req.app_arch == ELFCLASS32) {
2295 ret = qseecom_load_commonlib_image(data, "cmnlib");
2296 if (ret) {
2297 pr_err("failed to load cmnlib\n");
2298 return -EIO;
2299 }
2300 qseecom.commonlib_loaded = true;
2301 pr_debug("cmnlib is loaded\n");
2302 }
2303
2304 if (!qseecom.commonlib64_loaded &&
2305 load_img_req.app_arch == ELFCLASS64) {
2306 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2307 if (ret) {
2308 pr_err("failed to load cmnlib64\n");
2309 return -EIO;
2310 }
2311 qseecom.commonlib64_loaded = true;
2312 pr_debug("cmnlib64 is loaded\n");
2313 }
2314 }
2315
2316 if (qseecom.support_bus_scaling) {
2317 mutex_lock(&qsee_bw_mutex);
2318 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2319 mutex_unlock(&qsee_bw_mutex);
2320 if (ret)
2321 return ret;
2322 }
2323
2324 /* Vote for the SFPB clock */
2325 ret = __qseecom_enable_clk_scale_up(data);
2326 if (ret)
2327 goto enable_clk_err;
2328
2329 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2330 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2331 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2332
2333 ret = __qseecom_check_app_exists(req, &app_id);
2334 if (ret < 0)
2335 goto loadapp_err;
2336
2337 if (app_id) {
2338 pr_debug("App id %d (%s) already exists\n", app_id,
2339 (char *)(req.app_name));
2340 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2341 list_for_each_entry(entry,
2342 &qseecom.registered_app_list_head, list){
2343 if (entry->app_id == app_id) {
2344 entry->ref_cnt++;
2345 break;
2346 }
2347 }
2348 spin_unlock_irqrestore(
2349 &qseecom.registered_app_list_lock, flags);
2350 ret = 0;
2351 } else {
2352 first_time = true;
2353 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2354 (char *)(load_img_req.img_name));
2355 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002356 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002357 load_img_req.ifd_data_fd);
2358 if (IS_ERR_OR_NULL(ihandle)) {
2359 pr_err("Ion client could not retrieve the handle\n");
2360 ret = -ENOMEM;
2361 goto loadapp_err;
2362 }
2363
2364 /* Get the physical address of the ION BUF */
2365 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2366 if (ret) {
2367 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2368 ret);
2369 goto loadapp_err;
2370 }
2371 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2372 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2373 len, load_img_req.mdt_len,
2374 load_img_req.img_len);
2375 ret = -EINVAL;
2376 goto loadapp_err;
2377 }
2378 /* Populate the structure for sending scm call to load image */
2379 if (qseecom.qsee_version < QSEE_VERSION_40) {
2380 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2381 load_req.mdt_len = load_img_req.mdt_len;
2382 load_req.img_len = load_img_req.img_len;
2383 strlcpy(load_req.app_name, load_img_req.img_name,
2384 MAX_APP_NAME_SIZE);
2385 load_req.phy_addr = (uint32_t)pa;
2386 cmd_buf = (void *)&load_req;
2387 cmd_len = sizeof(struct qseecom_load_app_ireq);
2388 } else {
2389 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2390 load_req_64bit.mdt_len = load_img_req.mdt_len;
2391 load_req_64bit.img_len = load_img_req.img_len;
2392 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2393 MAX_APP_NAME_SIZE);
2394 load_req_64bit.phy_addr = (uint64_t)pa;
2395 cmd_buf = (void *)&load_req_64bit;
2396 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2397 }
2398
2399 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2400 ION_IOC_CLEAN_INV_CACHES);
2401 if (ret) {
2402 pr_err("cache operation failed %d\n", ret);
2403 goto loadapp_err;
2404 }
2405
2406 /* SCM_CALL to load the app and get the app_id back */
2407 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2408 cmd_len, &resp, sizeof(resp));
2409 if (ret) {
2410 pr_err("scm_call to load app failed\n");
2411 if (!IS_ERR_OR_NULL(ihandle))
2412 ion_free(qseecom.ion_clnt, ihandle);
2413 ret = -EINVAL;
2414 goto loadapp_err;
2415 }
2416
2417 if (resp.result == QSEOS_RESULT_FAILURE) {
2418 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2419 if (!IS_ERR_OR_NULL(ihandle))
2420 ion_free(qseecom.ion_clnt, ihandle);
2421 ret = -EFAULT;
2422 goto loadapp_err;
2423 }
2424
2425 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2426 ret = __qseecom_process_incomplete_cmd(data, &resp);
2427 if (ret) {
2428 pr_err("process_incomplete_cmd failed err: %d\n",
2429 ret);
2430 if (!IS_ERR_OR_NULL(ihandle))
2431 ion_free(qseecom.ion_clnt, ihandle);
2432 ret = -EFAULT;
2433 goto loadapp_err;
2434 }
2435 }
2436
2437 if (resp.result != QSEOS_RESULT_SUCCESS) {
2438 pr_err("scm_call failed resp.result unknown, %d\n",
2439 resp.result);
2440 if (!IS_ERR_OR_NULL(ihandle))
2441 ion_free(qseecom.ion_clnt, ihandle);
2442 ret = -EFAULT;
2443 goto loadapp_err;
2444 }
2445
2446 app_id = resp.data;
2447
2448 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2449 if (!entry) {
2450 ret = -ENOMEM;
2451 goto loadapp_err;
2452 }
2453 entry->app_id = app_id;
2454 entry->ref_cnt = 1;
2455 entry->app_arch = load_img_req.app_arch;
2456 /*
2457 * keymaster app may be first loaded as "keymaste" by qseecomd,
2458 * and then used as "keymaster" on some targets. To avoid app
2459 * name checking error, register "keymaster" into app_list and
2460 * thread private data.
2461 */
2462 if (!strcmp(load_img_req.img_name, "keymaste"))
2463 strlcpy(entry->app_name, "keymaster",
2464 MAX_APP_NAME_SIZE);
2465 else
2466 strlcpy(entry->app_name, load_img_req.img_name,
2467 MAX_APP_NAME_SIZE);
2468 entry->app_blocked = false;
2469 entry->blocked_on_listener_id = 0;
2470
2471 /* Deallocate the handle */
2472 if (!IS_ERR_OR_NULL(ihandle))
2473 ion_free(qseecom.ion_clnt, ihandle);
2474
2475 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2476 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2477 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2478 flags);
2479
2480 pr_warn("App with id %u (%s) now loaded\n", app_id,
2481 (char *)(load_img_req.img_name));
2482 }
2483 data->client.app_id = app_id;
2484 data->client.app_arch = load_img_req.app_arch;
2485 if (!strcmp(load_img_req.img_name, "keymaste"))
2486 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2487 else
2488 strlcpy(data->client.app_name, load_img_req.img_name,
2489 MAX_APP_NAME_SIZE);
2490 load_img_req.app_id = app_id;
2491 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2492 pr_err("copy_to_user failed\n");
2493 ret = -EFAULT;
2494 if (first_time == true) {
2495 spin_lock_irqsave(
2496 &qseecom.registered_app_list_lock, flags);
2497 list_del(&entry->list);
2498 spin_unlock_irqrestore(
2499 &qseecom.registered_app_list_lock, flags);
2500 kzfree(entry);
2501 }
2502 }
2503
2504loadapp_err:
2505 __qseecom_disable_clk_scale_down(data);
2506enable_clk_err:
2507 if (qseecom.support_bus_scaling) {
2508 mutex_lock(&qsee_bw_mutex);
2509 qseecom_unregister_bus_bandwidth_needs(data);
2510 mutex_unlock(&qsee_bw_mutex);
2511 }
2512 return ret;
2513}
2514
2515static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2516{
2517 int ret = 1; /* Set unload app */
2518
2519 wake_up_all(&qseecom.send_resp_wq);
2520 if (qseecom.qsee_reentrancy_support)
2521 mutex_unlock(&app_access_lock);
2522 while (atomic_read(&data->ioctl_count) > 1) {
2523 if (wait_event_freezable(data->abort_wq,
2524 atomic_read(&data->ioctl_count) <= 1)) {
2525 pr_err("Interrupted from abort\n");
2526 ret = -ERESTARTSYS;
2527 break;
2528 }
2529 }
2530 if (qseecom.qsee_reentrancy_support)
2531 mutex_lock(&app_access_lock);
2532 return ret;
2533}
2534
2535static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2536{
2537 int ret = 0;
2538
2539 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2540 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2541 ion_free(qseecom.ion_clnt, data->client.ihandle);
2542 data->client.ihandle = NULL;
2543 }
2544 return ret;
2545}
2546
2547static int qseecom_unload_app(struct qseecom_dev_handle *data,
2548 bool app_crash)
2549{
2550 unsigned long flags;
2551 unsigned long flags1;
2552 int ret = 0;
2553 struct qseecom_command_scm_resp resp;
2554 struct qseecom_registered_app_list *ptr_app = NULL;
2555 bool unload = false;
2556 bool found_app = false;
2557 bool found_dead_app = false;
2558
2559 if (!data) {
2560 pr_err("Invalid/uninitialized device handle\n");
2561 return -EINVAL;
2562 }
2563
2564 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2565 pr_debug("Do not unload keymaster app from tz\n");
2566 goto unload_exit;
2567 }
2568
2569 __qseecom_cleanup_app(data);
2570 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2571
2572 if (data->client.app_id > 0) {
2573 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2574 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2575 list) {
2576 if (ptr_app->app_id == data->client.app_id) {
2577 if (!strcmp((void *)ptr_app->app_name,
2578 (void *)data->client.app_name)) {
2579 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002580 if (ptr_app->app_blocked)
2581 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002582 if (app_crash || ptr_app->ref_cnt == 1)
2583 unload = true;
2584 break;
2585 }
2586 found_dead_app = true;
2587 break;
2588 }
2589 }
2590 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2591 flags);
2592 if (found_app == false && found_dead_app == false) {
2593 pr_err("Cannot find app with id = %d (%s)\n",
2594 data->client.app_id,
2595 (char *)data->client.app_name);
2596 ret = -EINVAL;
2597 goto unload_exit;
2598 }
2599 }
2600
2601 if (found_dead_app)
2602 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2603 (char *)data->client.app_name);
2604
2605 if (unload) {
2606 struct qseecom_unload_app_ireq req;
2607 /* Populate the structure for sending scm call to load image */
2608 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2609 req.app_id = data->client.app_id;
2610
2611 /* SCM_CALL to unload the app */
2612 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2613 sizeof(struct qseecom_unload_app_ireq),
2614 &resp, sizeof(resp));
2615 if (ret) {
2616 pr_err("scm_call to unload app (id = %d) failed\n",
2617 req.app_id);
2618 ret = -EFAULT;
2619 goto unload_exit;
2620 } else {
2621 pr_warn("App id %d now unloaded\n", req.app_id);
2622 }
2623 if (resp.result == QSEOS_RESULT_FAILURE) {
2624 pr_err("app (%d) unload_failed!!\n",
2625 data->client.app_id);
2626 ret = -EFAULT;
2627 goto unload_exit;
2628 }
2629 if (resp.result == QSEOS_RESULT_SUCCESS)
2630 pr_debug("App (%d) is unloaded!!\n",
2631 data->client.app_id);
2632 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2633 ret = __qseecom_process_incomplete_cmd(data, &resp);
2634 if (ret) {
2635 pr_err("process_incomplete_cmd fail err: %d\n",
2636 ret);
2637 goto unload_exit;
2638 }
2639 }
2640 }
2641
2642 if (found_app) {
2643 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2644 if (app_crash) {
2645 ptr_app->ref_cnt = 0;
2646 pr_debug("app_crash: ref_count = 0\n");
2647 } else {
2648 if (ptr_app->ref_cnt == 1) {
2649 ptr_app->ref_cnt = 0;
2650 pr_debug("ref_count set to 0\n");
2651 } else {
2652 ptr_app->ref_cnt--;
2653 pr_debug("Can't unload app(%d) inuse\n",
2654 ptr_app->app_id);
2655 }
2656 }
2657 if (unload) {
2658 list_del(&ptr_app->list);
2659 kzfree(ptr_app);
2660 }
2661 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2662 flags1);
2663 }
2664unload_exit:
2665 qseecom_unmap_ion_allocated_memory(data);
2666 data->released = true;
2667 return ret;
2668}
2669
2670static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2671 unsigned long virt)
2672{
2673 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2674}
2675
2676static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2677 unsigned long virt)
2678{
2679 return (uintptr_t)data->client.sb_virt +
2680 (virt - data->client.user_virt_sb_base);
2681}
2682
2683int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2684 struct qseecom_send_svc_cmd_req *req_ptr,
2685 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2686{
2687 int ret = 0;
2688 void *req_buf = NULL;
2689
2690 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2691 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2692 req_ptr, send_svc_ireq_ptr);
2693 return -EINVAL;
2694 }
2695
2696 /* Clients need to ensure req_buf is at base offset of shared buffer */
2697 if ((uintptr_t)req_ptr->cmd_req_buf !=
2698 data_ptr->client.user_virt_sb_base) {
2699 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2700 return -EINVAL;
2701 }
2702
2703 if (data_ptr->client.sb_length <
2704 sizeof(struct qseecom_rpmb_provision_key)) {
2705 pr_err("shared buffer is too small to hold key type\n");
2706 return -EINVAL;
2707 }
2708 req_buf = data_ptr->client.sb_virt;
2709
2710 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2711 send_svc_ireq_ptr->key_type =
2712 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2713 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2714 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2715 data_ptr, (uintptr_t)req_ptr->resp_buf));
2716 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2717
2718 return ret;
2719}
2720
2721int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2722 struct qseecom_send_svc_cmd_req *req_ptr,
2723 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2724{
2725 int ret = 0;
2726 uint32_t reqd_len_sb_in = 0;
2727
2728 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2729 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2730 req_ptr, send_svc_ireq_ptr);
2731 return -EINVAL;
2732 }
2733
2734 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2735 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2736 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2737 pr_err("Required: %u, Available: %zu\n",
2738 reqd_len_sb_in, data_ptr->client.sb_length);
2739 return -ENOMEM;
2740 }
2741
2742 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2743 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2744 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2745 data_ptr, (uintptr_t)req_ptr->resp_buf));
2746 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2747
2748 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2749 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2750
2751
2752 return ret;
2753}
2754
2755static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2756 struct qseecom_send_svc_cmd_req *req)
2757{
2758 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2759 pr_err("req or cmd buffer or response buffer is null\n");
2760 return -EINVAL;
2761 }
2762
2763 if (!data || !data->client.ihandle) {
2764 pr_err("Client or client handle is not initialized\n");
2765 return -EINVAL;
2766 }
2767
2768 if (data->client.sb_virt == NULL) {
2769 pr_err("sb_virt null\n");
2770 return -EINVAL;
2771 }
2772
2773 if (data->client.user_virt_sb_base == 0) {
2774 pr_err("user_virt_sb_base is null\n");
2775 return -EINVAL;
2776 }
2777
2778 if (data->client.sb_length == 0) {
2779 pr_err("sb_length is 0\n");
2780 return -EINVAL;
2781 }
2782
2783 if (((uintptr_t)req->cmd_req_buf <
2784 data->client.user_virt_sb_base) ||
2785 ((uintptr_t)req->cmd_req_buf >=
2786 (data->client.user_virt_sb_base + data->client.sb_length))) {
2787 pr_err("cmd buffer address not within shared bufffer\n");
2788 return -EINVAL;
2789 }
2790 if (((uintptr_t)req->resp_buf <
2791 data->client.user_virt_sb_base) ||
2792 ((uintptr_t)req->resp_buf >=
2793 (data->client.user_virt_sb_base + data->client.sb_length))) {
2794 pr_err("response buffer address not within shared bufffer\n");
2795 return -EINVAL;
2796 }
2797 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2798 (req->cmd_req_len > data->client.sb_length) ||
2799 (req->resp_len > data->client.sb_length)) {
2800 pr_err("cmd buf length or response buf length not valid\n");
2801 return -EINVAL;
2802 }
2803 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2804 pr_err("Integer overflow detected in req_len & rsp_len\n");
2805 return -EINVAL;
2806 }
2807
2808 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2809 pr_debug("Not enough memory to fit cmd_buf.\n");
2810 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2811 (req->cmd_req_len + req->resp_len),
2812 data->client.sb_length);
2813 return -ENOMEM;
2814 }
2815 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2816 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2817 return -EINVAL;
2818 }
2819 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2820 pr_err("Integer overflow in resp_len & resp_buf\n");
2821 return -EINVAL;
2822 }
2823 if (data->client.user_virt_sb_base >
2824 (ULONG_MAX - data->client.sb_length)) {
2825 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2826 return -EINVAL;
2827 }
2828 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2829 ((uintptr_t)data->client.user_virt_sb_base +
2830 data->client.sb_length)) ||
2831 (((uintptr_t)req->resp_buf + req->resp_len) >
2832 ((uintptr_t)data->client.user_virt_sb_base +
2833 data->client.sb_length))) {
2834 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2835 return -EINVAL;
2836 }
2837 return 0;
2838}
2839
2840static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2841 void __user *argp)
2842{
2843 int ret = 0;
2844 struct qseecom_client_send_service_ireq send_svc_ireq;
2845 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2846 struct qseecom_command_scm_resp resp;
2847 struct qseecom_send_svc_cmd_req req;
2848 void *send_req_ptr;
2849 size_t req_buf_size;
2850
2851 /*struct qseecom_command_scm_resp resp;*/
2852
2853 if (copy_from_user(&req,
2854 (void __user *)argp,
2855 sizeof(req))) {
2856 pr_err("copy_from_user failed\n");
2857 return -EFAULT;
2858 }
2859
2860 if (__validate_send_service_cmd_inputs(data, &req))
2861 return -EINVAL;
2862
2863 data->type = QSEECOM_SECURE_SERVICE;
2864
2865 switch (req.cmd_id) {
2866 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2867 case QSEOS_RPMB_ERASE_COMMAND:
2868 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2869 send_req_ptr = &send_svc_ireq;
2870 req_buf_size = sizeof(send_svc_ireq);
2871 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2872 send_req_ptr))
2873 return -EINVAL;
2874 break;
2875 case QSEOS_FSM_LTEOTA_REQ_CMD:
2876 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2877 case QSEOS_FSM_IKE_REQ_CMD:
2878 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2879 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2880 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2881 case QSEOS_FSM_ENCFS_REQ_CMD:
2882 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2883 send_req_ptr = &send_fsm_key_svc_ireq;
2884 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2885 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2886 send_req_ptr))
2887 return -EINVAL;
2888 break;
2889 default:
2890 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2891 return -EINVAL;
2892 }
2893
2894 if (qseecom.support_bus_scaling) {
2895 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2896 if (ret) {
2897 pr_err("Fail to set bw HIGH\n");
2898 return ret;
2899 }
2900 } else {
2901 ret = qseecom_perf_enable(data);
2902 if (ret) {
2903 pr_err("Failed to vote for clocks with err %d\n", ret);
2904 goto exit;
2905 }
2906 }
2907
2908 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2909 data->client.sb_virt, data->client.sb_length,
2910 ION_IOC_CLEAN_INV_CACHES);
2911 if (ret) {
2912 pr_err("cache operation failed %d\n", ret);
2913 goto exit;
2914 }
2915 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2916 (const void *)send_req_ptr,
2917 req_buf_size, &resp, sizeof(resp));
2918 if (ret) {
2919 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2920 if (!qseecom.support_bus_scaling) {
2921 qsee_disable_clock_vote(data, CLK_DFAB);
2922 qsee_disable_clock_vote(data, CLK_SFPB);
2923 } else {
2924 __qseecom_add_bw_scale_down_timer(
2925 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2926 }
2927 goto exit;
2928 }
2929 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2930 data->client.sb_virt, data->client.sb_length,
2931 ION_IOC_INV_CACHES);
2932 if (ret) {
2933 pr_err("cache operation failed %d\n", ret);
2934 goto exit;
2935 }
2936 switch (resp.result) {
2937 case QSEOS_RESULT_SUCCESS:
2938 break;
2939 case QSEOS_RESULT_INCOMPLETE:
2940 pr_debug("qseos_result_incomplete\n");
2941 ret = __qseecom_process_incomplete_cmd(data, &resp);
2942 if (ret) {
2943 pr_err("process_incomplete_cmd fail with result: %d\n",
2944 resp.result);
2945 }
2946 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2947 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302948 if (put_user(resp.result,
2949 (uint32_t __user *)req.resp_buf)) {
2950 ret = -EINVAL;
2951 goto exit;
2952 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002953 ret = 0;
2954 }
2955 break;
2956 case QSEOS_RESULT_FAILURE:
2957 pr_err("scm call failed with resp.result: %d\n", resp.result);
2958 ret = -EINVAL;
2959 break;
2960 default:
2961 pr_err("Response result %d not supported\n",
2962 resp.result);
2963 ret = -EINVAL;
2964 break;
2965 }
2966 if (!qseecom.support_bus_scaling) {
2967 qsee_disable_clock_vote(data, CLK_DFAB);
2968 qsee_disable_clock_vote(data, CLK_SFPB);
2969 } else {
2970 __qseecom_add_bw_scale_down_timer(
2971 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2972 }
2973
2974exit:
2975 return ret;
2976}
2977
2978static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
2979 struct qseecom_send_cmd_req *req)
2980
2981{
2982 if (!data || !data->client.ihandle) {
2983 pr_err("Client or client handle is not initialized\n");
2984 return -EINVAL;
2985 }
2986 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
2987 (req->cmd_req_buf == NULL)) {
2988 pr_err("cmd buffer or response buffer is null\n");
2989 return -EINVAL;
2990 }
2991 if (((uintptr_t)req->cmd_req_buf <
2992 data->client.user_virt_sb_base) ||
2993 ((uintptr_t)req->cmd_req_buf >=
2994 (data->client.user_virt_sb_base + data->client.sb_length))) {
2995 pr_err("cmd buffer address not within shared bufffer\n");
2996 return -EINVAL;
2997 }
2998 if (((uintptr_t)req->resp_buf <
2999 data->client.user_virt_sb_base) ||
3000 ((uintptr_t)req->resp_buf >=
3001 (data->client.user_virt_sb_base + data->client.sb_length))) {
3002 pr_err("response buffer address not within shared bufffer\n");
3003 return -EINVAL;
3004 }
3005 if ((req->cmd_req_len == 0) ||
3006 (req->cmd_req_len > data->client.sb_length) ||
3007 (req->resp_len > data->client.sb_length)) {
3008 pr_err("cmd buf length or response buf length not valid\n");
3009 return -EINVAL;
3010 }
3011 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3012 pr_err("Integer overflow detected in req_len & rsp_len\n");
3013 return -EINVAL;
3014 }
3015
3016 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3017 pr_debug("Not enough memory to fit cmd_buf.\n");
3018 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3019 (req->cmd_req_len + req->resp_len),
3020 data->client.sb_length);
3021 return -ENOMEM;
3022 }
3023 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3024 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3025 return -EINVAL;
3026 }
3027 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3028 pr_err("Integer overflow in resp_len & resp_buf\n");
3029 return -EINVAL;
3030 }
3031 if (data->client.user_virt_sb_base >
3032 (ULONG_MAX - data->client.sb_length)) {
3033 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3034 return -EINVAL;
3035 }
3036 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3037 ((uintptr_t)data->client.user_virt_sb_base +
3038 data->client.sb_length)) ||
3039 (((uintptr_t)req->resp_buf + req->resp_len) >
3040 ((uintptr_t)data->client.user_virt_sb_base +
3041 data->client.sb_length))) {
3042 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3043 return -EINVAL;
3044 }
3045 return 0;
3046}
3047
3048int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3049 struct qseecom_registered_app_list *ptr_app,
3050 struct qseecom_dev_handle *data)
3051{
3052 int ret = 0;
3053
3054 switch (resp->result) {
3055 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3056 pr_warn("App(%d) %s is blocked on listener %d\n",
3057 data->client.app_id, data->client.app_name,
3058 resp->data);
3059 ret = __qseecom_process_reentrancy_blocked_on_listener(
3060 resp, ptr_app, data);
3061 if (ret) {
3062 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3063 data->client.app_id, data->client.app_name, resp->data);
3064 return ret;
3065 }
3066
3067 case QSEOS_RESULT_INCOMPLETE:
3068 qseecom.app_block_ref_cnt++;
3069 ptr_app->app_blocked = true;
3070 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3071 ptr_app->app_blocked = false;
3072 qseecom.app_block_ref_cnt--;
3073 wake_up_interruptible(&qseecom.app_block_wq);
3074 if (ret)
3075 pr_err("process_incomplete_cmd failed err: %d\n",
3076 ret);
3077 return ret;
3078 case QSEOS_RESULT_SUCCESS:
3079 return ret;
3080 default:
3081 pr_err("Response result %d not supported\n",
3082 resp->result);
3083 return -EINVAL;
3084 }
3085}
3086
3087static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3088 struct qseecom_send_cmd_req *req)
3089{
3090 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003091 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003092 u32 reqd_len_sb_in = 0;
3093 struct qseecom_client_send_data_ireq send_data_req = {0};
3094 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3095 struct qseecom_command_scm_resp resp;
3096 unsigned long flags;
3097 struct qseecom_registered_app_list *ptr_app;
3098 bool found_app = false;
3099 void *cmd_buf = NULL;
3100 size_t cmd_len;
3101 struct sglist_info *table = data->sglistinfo_ptr;
3102
3103 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3104 /* find app_id & img_name from list */
3105 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3106 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3107 list) {
3108 if ((ptr_app->app_id == data->client.app_id) &&
3109 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3110 found_app = true;
3111 break;
3112 }
3113 }
3114 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3115
3116 if (!found_app) {
3117 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3118 (char *)data->client.app_name);
3119 return -ENOENT;
3120 }
3121
3122 if (qseecom.qsee_version < QSEE_VERSION_40) {
3123 send_data_req.app_id = data->client.app_id;
3124 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3125 data, (uintptr_t)req->cmd_req_buf));
3126 send_data_req.req_len = req->cmd_req_len;
3127 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3128 data, (uintptr_t)req->resp_buf));
3129 send_data_req.rsp_len = req->resp_len;
3130 send_data_req.sglistinfo_ptr =
3131 (uint32_t)virt_to_phys(table);
3132 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3133 dmac_flush_range((void *)table,
3134 (void *)table + SGLISTINFO_TABLE_SIZE);
3135 cmd_buf = (void *)&send_data_req;
3136 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3137 } else {
3138 send_data_req_64bit.app_id = data->client.app_id;
3139 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3140 (uintptr_t)req->cmd_req_buf);
3141 send_data_req_64bit.req_len = req->cmd_req_len;
3142 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3143 (uintptr_t)req->resp_buf);
3144 send_data_req_64bit.rsp_len = req->resp_len;
3145 /* check if 32bit app's phys_addr region is under 4GB.*/
3146 if ((data->client.app_arch == ELFCLASS32) &&
3147 ((send_data_req_64bit.req_ptr >=
3148 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3149 (send_data_req_64bit.rsp_ptr >=
3150 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3151 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3152 data->client.app_name,
3153 send_data_req_64bit.req_ptr,
3154 send_data_req_64bit.req_len,
3155 send_data_req_64bit.rsp_ptr,
3156 send_data_req_64bit.rsp_len);
3157 return -EFAULT;
3158 }
3159 send_data_req_64bit.sglistinfo_ptr =
3160 (uint64_t)virt_to_phys(table);
3161 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3162 dmac_flush_range((void *)table,
3163 (void *)table + SGLISTINFO_TABLE_SIZE);
3164 cmd_buf = (void *)&send_data_req_64bit;
3165 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3166 }
3167
3168 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3169 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3170 else
3171 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3172
3173 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3174 data->client.sb_virt,
3175 reqd_len_sb_in,
3176 ION_IOC_CLEAN_INV_CACHES);
3177 if (ret) {
3178 pr_err("cache operation failed %d\n", ret);
3179 return ret;
3180 }
3181
3182 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3183
3184 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3185 cmd_buf, cmd_len,
3186 &resp, sizeof(resp));
3187 if (ret) {
3188 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3189 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003190 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003191 }
3192
3193 if (qseecom.qsee_reentrancy_support) {
3194 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003195 if (ret)
3196 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003197 } else {
3198 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3199 ret = __qseecom_process_incomplete_cmd(data, &resp);
3200 if (ret) {
3201 pr_err("process_incomplete_cmd failed err: %d\n",
3202 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003203 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003204 }
3205 } else {
3206 if (resp.result != QSEOS_RESULT_SUCCESS) {
3207 pr_err("Response result %d not supported\n",
3208 resp.result);
3209 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003210 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003211 }
3212 }
3213 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003214exit:
3215 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003216 data->client.sb_virt, data->client.sb_length,
3217 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003218 if (ret2) {
3219 pr_err("cache operation failed %d\n", ret2);
3220 return ret2;
3221 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003222 return ret;
3223}
3224
3225static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3226{
3227 int ret = 0;
3228 struct qseecom_send_cmd_req req;
3229
3230 ret = copy_from_user(&req, argp, sizeof(req));
3231 if (ret) {
3232 pr_err("copy_from_user failed\n");
3233 return ret;
3234 }
3235
3236 if (__validate_send_cmd_inputs(data, &req))
3237 return -EINVAL;
3238
3239 ret = __qseecom_send_cmd(data, &req);
3240
3241 if (ret)
3242 return ret;
3243
3244 return ret;
3245}
3246
3247int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3248 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3249 struct qseecom_dev_handle *data, int i) {
3250
3251 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3252 (req->ifd_data[i].fd > 0)) {
3253 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3254 (req->ifd_data[i].cmd_buf_offset >
3255 req->cmd_req_len - sizeof(uint32_t))) {
3256 pr_err("Invalid offset (req len) 0x%x\n",
3257 req->ifd_data[i].cmd_buf_offset);
3258 return -EINVAL;
3259 }
3260 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3261 (lstnr_resp->ifd_data[i].fd > 0)) {
3262 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3263 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3264 lstnr_resp->resp_len - sizeof(uint32_t))) {
3265 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3266 lstnr_resp->ifd_data[i].cmd_buf_offset);
3267 return -EINVAL;
3268 }
3269 }
3270 return 0;
3271}
3272
3273static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3274 struct qseecom_dev_handle *data)
3275{
3276 struct ion_handle *ihandle;
3277 char *field;
3278 int ret = 0;
3279 int i = 0;
3280 uint32_t len = 0;
3281 struct scatterlist *sg;
3282 struct qseecom_send_modfd_cmd_req *req = NULL;
3283 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3284 struct qseecom_registered_listener_list *this_lstnr = NULL;
3285 uint32_t offset;
3286 struct sg_table *sg_ptr;
3287
3288 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3289 (data->type != QSEECOM_CLIENT_APP))
3290 return -EFAULT;
3291
3292 if (msg == NULL) {
3293 pr_err("Invalid address\n");
3294 return -EINVAL;
3295 }
3296 if (data->type == QSEECOM_LISTENER_SERVICE) {
3297 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3298 this_lstnr = __qseecom_find_svc(data->listener.id);
3299 if (IS_ERR_OR_NULL(this_lstnr)) {
3300 pr_err("Invalid listener ID\n");
3301 return -ENOMEM;
3302 }
3303 } else {
3304 req = (struct qseecom_send_modfd_cmd_req *)msg;
3305 }
3306
3307 for (i = 0; i < MAX_ION_FD; i++) {
3308 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3309 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003310 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003311 req->ifd_data[i].fd);
3312 if (IS_ERR_OR_NULL(ihandle)) {
3313 pr_err("Ion client can't retrieve the handle\n");
3314 return -ENOMEM;
3315 }
3316 field = (char *) req->cmd_req_buf +
3317 req->ifd_data[i].cmd_buf_offset;
3318 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3319 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003320 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003321 lstnr_resp->ifd_data[i].fd);
3322 if (IS_ERR_OR_NULL(ihandle)) {
3323 pr_err("Ion client can't retrieve the handle\n");
3324 return -ENOMEM;
3325 }
3326 field = lstnr_resp->resp_buf_ptr +
3327 lstnr_resp->ifd_data[i].cmd_buf_offset;
3328 } else {
3329 continue;
3330 }
3331 /* Populate the cmd data structure with the phys_addr */
3332 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3333 if (IS_ERR_OR_NULL(sg_ptr)) {
3334 pr_err("IOn client could not retrieve sg table\n");
3335 goto err;
3336 }
3337 if (sg_ptr->nents == 0) {
3338 pr_err("Num of scattered entries is 0\n");
3339 goto err;
3340 }
3341 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3342 pr_err("Num of scattered entries");
3343 pr_err(" (%d) is greater than max supported %d\n",
3344 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3345 goto err;
3346 }
3347 sg = sg_ptr->sgl;
3348 if (sg_ptr->nents == 1) {
3349 uint32_t *update;
3350
3351 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3352 goto err;
3353 if ((data->type == QSEECOM_CLIENT_APP &&
3354 (data->client.app_arch == ELFCLASS32 ||
3355 data->client.app_arch == ELFCLASS64)) ||
3356 (data->type == QSEECOM_LISTENER_SERVICE)) {
3357 /*
3358 * Check if sg list phy add region is under 4GB
3359 */
3360 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3361 (!cleanup) &&
3362 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3363 >= PHY_ADDR_4G - sg->length)) {
3364 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3365 data->client.app_name,
3366 &(sg_dma_address(sg_ptr->sgl)),
3367 sg->length);
3368 goto err;
3369 }
3370 update = (uint32_t *) field;
3371 *update = cleanup ? 0 :
3372 (uint32_t)sg_dma_address(sg_ptr->sgl);
3373 } else {
3374 pr_err("QSEE app arch %u is not supported\n",
3375 data->client.app_arch);
3376 goto err;
3377 }
3378 len += (uint32_t)sg->length;
3379 } else {
3380 struct qseecom_sg_entry *update;
3381 int j = 0;
3382
3383 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3384 (req->ifd_data[i].fd > 0)) {
3385
3386 if ((req->cmd_req_len <
3387 SG_ENTRY_SZ * sg_ptr->nents) ||
3388 (req->ifd_data[i].cmd_buf_offset >
3389 (req->cmd_req_len -
3390 SG_ENTRY_SZ * sg_ptr->nents))) {
3391 pr_err("Invalid offset = 0x%x\n",
3392 req->ifd_data[i].cmd_buf_offset);
3393 goto err;
3394 }
3395
3396 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3397 (lstnr_resp->ifd_data[i].fd > 0)) {
3398
3399 if ((lstnr_resp->resp_len <
3400 SG_ENTRY_SZ * sg_ptr->nents) ||
3401 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3402 (lstnr_resp->resp_len -
3403 SG_ENTRY_SZ * sg_ptr->nents))) {
3404 goto err;
3405 }
3406 }
3407 if ((data->type == QSEECOM_CLIENT_APP &&
3408 (data->client.app_arch == ELFCLASS32 ||
3409 data->client.app_arch == ELFCLASS64)) ||
3410 (data->type == QSEECOM_LISTENER_SERVICE)) {
3411 update = (struct qseecom_sg_entry *)field;
3412 for (j = 0; j < sg_ptr->nents; j++) {
3413 /*
3414 * Check if sg list PA is under 4GB
3415 */
3416 if ((qseecom.qsee_version >=
3417 QSEE_VERSION_40) &&
3418 (!cleanup) &&
3419 ((uint64_t)(sg_dma_address(sg))
3420 >= PHY_ADDR_4G - sg->length)) {
3421 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3422 data->client.app_name,
3423 &(sg_dma_address(sg)),
3424 sg->length);
3425 goto err;
3426 }
3427 update->phys_addr = cleanup ? 0 :
3428 (uint32_t)sg_dma_address(sg);
3429 update->len = cleanup ? 0 : sg->length;
3430 update++;
3431 len += sg->length;
3432 sg = sg_next(sg);
3433 }
3434 } else {
3435 pr_err("QSEE app arch %u is not supported\n",
3436 data->client.app_arch);
3437 goto err;
3438 }
3439 }
3440
3441 if (cleanup) {
3442 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3443 ihandle, NULL, len,
3444 ION_IOC_INV_CACHES);
3445 if (ret) {
3446 pr_err("cache operation failed %d\n", ret);
3447 goto err;
3448 }
3449 } else {
3450 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3451 ihandle, NULL, len,
3452 ION_IOC_CLEAN_INV_CACHES);
3453 if (ret) {
3454 pr_err("cache operation failed %d\n", ret);
3455 goto err;
3456 }
3457 if (data->type == QSEECOM_CLIENT_APP) {
3458 offset = req->ifd_data[i].cmd_buf_offset;
3459 data->sglistinfo_ptr[i].indexAndFlags =
3460 SGLISTINFO_SET_INDEX_FLAG(
3461 (sg_ptr->nents == 1), 0, offset);
3462 data->sglistinfo_ptr[i].sizeOrCount =
3463 (sg_ptr->nents == 1) ?
3464 sg->length : sg_ptr->nents;
3465 data->sglist_cnt = i + 1;
3466 } else {
3467 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3468 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3469 (uintptr_t)this_lstnr->sb_virt);
3470 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3471 SGLISTINFO_SET_INDEX_FLAG(
3472 (sg_ptr->nents == 1), 0, offset);
3473 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3474 (sg_ptr->nents == 1) ?
3475 sg->length : sg_ptr->nents;
3476 this_lstnr->sglist_cnt = i + 1;
3477 }
3478 }
3479 /* Deallocate the handle */
3480 if (!IS_ERR_OR_NULL(ihandle))
3481 ion_free(qseecom.ion_clnt, ihandle);
3482 }
3483 return ret;
3484err:
3485 if (!IS_ERR_OR_NULL(ihandle))
3486 ion_free(qseecom.ion_clnt, ihandle);
3487 return -ENOMEM;
3488}
3489
3490static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3491 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3492{
3493 struct scatterlist *sg = sg_ptr->sgl;
3494 struct qseecom_sg_entry_64bit *sg_entry;
3495 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3496 void *buf;
3497 uint i;
3498 size_t size;
3499 dma_addr_t coh_pmem;
3500
3501 if (fd_idx >= MAX_ION_FD) {
3502 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3503 return -ENOMEM;
3504 }
3505 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3506 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3507 /* Allocate a contiguous kernel buffer */
3508 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3509 size = (size + PAGE_SIZE) & PAGE_MASK;
3510 buf = dma_alloc_coherent(qseecom.pdev,
3511 size, &coh_pmem, GFP_KERNEL);
3512 if (buf == NULL) {
3513 pr_err("failed to alloc memory for sg buf\n");
3514 return -ENOMEM;
3515 }
3516 /* update qseecom_sg_list_buf_hdr_64bit */
3517 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3518 buf_hdr->new_buf_phys_addr = coh_pmem;
3519 buf_hdr->nents_total = sg_ptr->nents;
3520 /* save the left sg entries into new allocated buf */
3521 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3522 for (i = 0; i < sg_ptr->nents; i++) {
3523 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3524 sg_entry->len = sg->length;
3525 sg_entry++;
3526 sg = sg_next(sg);
3527 }
3528
3529 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3530 data->client.sec_buf_fd[fd_idx].vbase = buf;
3531 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3532 data->client.sec_buf_fd[fd_idx].size = size;
3533
3534 return 0;
3535}
3536
3537static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3538 struct qseecom_dev_handle *data)
3539{
3540 struct ion_handle *ihandle;
3541 char *field;
3542 int ret = 0;
3543 int i = 0;
3544 uint32_t len = 0;
3545 struct scatterlist *sg;
3546 struct qseecom_send_modfd_cmd_req *req = NULL;
3547 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3548 struct qseecom_registered_listener_list *this_lstnr = NULL;
3549 uint32_t offset;
3550 struct sg_table *sg_ptr;
3551
3552 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3553 (data->type != QSEECOM_CLIENT_APP))
3554 return -EFAULT;
3555
3556 if (msg == NULL) {
3557 pr_err("Invalid address\n");
3558 return -EINVAL;
3559 }
3560 if (data->type == QSEECOM_LISTENER_SERVICE) {
3561 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3562 this_lstnr = __qseecom_find_svc(data->listener.id);
3563 if (IS_ERR_OR_NULL(this_lstnr)) {
3564 pr_err("Invalid listener ID\n");
3565 return -ENOMEM;
3566 }
3567 } else {
3568 req = (struct qseecom_send_modfd_cmd_req *)msg;
3569 }
3570
3571 for (i = 0; i < MAX_ION_FD; i++) {
3572 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3573 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003574 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003575 req->ifd_data[i].fd);
3576 if (IS_ERR_OR_NULL(ihandle)) {
3577 pr_err("Ion client can't retrieve the handle\n");
3578 return -ENOMEM;
3579 }
3580 field = (char *) req->cmd_req_buf +
3581 req->ifd_data[i].cmd_buf_offset;
3582 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3583 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003584 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003585 lstnr_resp->ifd_data[i].fd);
3586 if (IS_ERR_OR_NULL(ihandle)) {
3587 pr_err("Ion client can't retrieve the handle\n");
3588 return -ENOMEM;
3589 }
3590 field = lstnr_resp->resp_buf_ptr +
3591 lstnr_resp->ifd_data[i].cmd_buf_offset;
3592 } else {
3593 continue;
3594 }
3595 /* Populate the cmd data structure with the phys_addr */
3596 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3597 if (IS_ERR_OR_NULL(sg_ptr)) {
3598 pr_err("IOn client could not retrieve sg table\n");
3599 goto err;
3600 }
3601 if (sg_ptr->nents == 0) {
3602 pr_err("Num of scattered entries is 0\n");
3603 goto err;
3604 }
3605 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3606 pr_warn("Num of scattered entries");
3607 pr_warn(" (%d) is greater than %d\n",
3608 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3609 if (cleanup) {
3610 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3611 data->client.sec_buf_fd[i].vbase)
3612 dma_free_coherent(qseecom.pdev,
3613 data->client.sec_buf_fd[i].size,
3614 data->client.sec_buf_fd[i].vbase,
3615 data->client.sec_buf_fd[i].pbase);
3616 } else {
3617 ret = __qseecom_allocate_sg_list_buffer(data,
3618 field, i, sg_ptr);
3619 if (ret) {
3620 pr_err("Failed to allocate sg list buffer\n");
3621 goto err;
3622 }
3623 }
3624 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3625 sg = sg_ptr->sgl;
3626 goto cleanup;
3627 }
3628 sg = sg_ptr->sgl;
3629 if (sg_ptr->nents == 1) {
3630 uint64_t *update_64bit;
3631
3632 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3633 goto err;
3634 /* 64bit app uses 64bit address */
3635 update_64bit = (uint64_t *) field;
3636 *update_64bit = cleanup ? 0 :
3637 (uint64_t)sg_dma_address(sg_ptr->sgl);
3638 len += (uint32_t)sg->length;
3639 } else {
3640 struct qseecom_sg_entry_64bit *update_64bit;
3641 int j = 0;
3642
3643 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3644 (req->ifd_data[i].fd > 0)) {
3645
3646 if ((req->cmd_req_len <
3647 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3648 (req->ifd_data[i].cmd_buf_offset >
3649 (req->cmd_req_len -
3650 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3651 pr_err("Invalid offset = 0x%x\n",
3652 req->ifd_data[i].cmd_buf_offset);
3653 goto err;
3654 }
3655
3656 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3657 (lstnr_resp->ifd_data[i].fd > 0)) {
3658
3659 if ((lstnr_resp->resp_len <
3660 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3661 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3662 (lstnr_resp->resp_len -
3663 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3664 goto err;
3665 }
3666 }
3667 /* 64bit app uses 64bit address */
3668 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3669 for (j = 0; j < sg_ptr->nents; j++) {
3670 update_64bit->phys_addr = cleanup ? 0 :
3671 (uint64_t)sg_dma_address(sg);
3672 update_64bit->len = cleanup ? 0 :
3673 (uint32_t)sg->length;
3674 update_64bit++;
3675 len += sg->length;
3676 sg = sg_next(sg);
3677 }
3678 }
3679cleanup:
3680 if (cleanup) {
3681 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3682 ihandle, NULL, len,
3683 ION_IOC_INV_CACHES);
3684 if (ret) {
3685 pr_err("cache operation failed %d\n", ret);
3686 goto err;
3687 }
3688 } else {
3689 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3690 ihandle, NULL, len,
3691 ION_IOC_CLEAN_INV_CACHES);
3692 if (ret) {
3693 pr_err("cache operation failed %d\n", ret);
3694 goto err;
3695 }
3696 if (data->type == QSEECOM_CLIENT_APP) {
3697 offset = req->ifd_data[i].cmd_buf_offset;
3698 data->sglistinfo_ptr[i].indexAndFlags =
3699 SGLISTINFO_SET_INDEX_FLAG(
3700 (sg_ptr->nents == 1), 1, offset);
3701 data->sglistinfo_ptr[i].sizeOrCount =
3702 (sg_ptr->nents == 1) ?
3703 sg->length : sg_ptr->nents;
3704 data->sglist_cnt = i + 1;
3705 } else {
3706 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3707 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3708 (uintptr_t)this_lstnr->sb_virt);
3709 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3710 SGLISTINFO_SET_INDEX_FLAG(
3711 (sg_ptr->nents == 1), 1, offset);
3712 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3713 (sg_ptr->nents == 1) ?
3714 sg->length : sg_ptr->nents;
3715 this_lstnr->sglist_cnt = i + 1;
3716 }
3717 }
3718 /* Deallocate the handle */
3719 if (!IS_ERR_OR_NULL(ihandle))
3720 ion_free(qseecom.ion_clnt, ihandle);
3721 }
3722 return ret;
3723err:
3724 for (i = 0; i < MAX_ION_FD; i++)
3725 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3726 data->client.sec_buf_fd[i].vbase)
3727 dma_free_coherent(qseecom.pdev,
3728 data->client.sec_buf_fd[i].size,
3729 data->client.sec_buf_fd[i].vbase,
3730 data->client.sec_buf_fd[i].pbase);
3731 if (!IS_ERR_OR_NULL(ihandle))
3732 ion_free(qseecom.ion_clnt, ihandle);
3733 return -ENOMEM;
3734}
3735
3736static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3737 void __user *argp,
3738 bool is_64bit_addr)
3739{
3740 int ret = 0;
3741 int i;
3742 struct qseecom_send_modfd_cmd_req req;
3743 struct qseecom_send_cmd_req send_cmd_req;
3744
3745 ret = copy_from_user(&req, argp, sizeof(req));
3746 if (ret) {
3747 pr_err("copy_from_user failed\n");
3748 return ret;
3749 }
3750
3751 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3752 send_cmd_req.cmd_req_len = req.cmd_req_len;
3753 send_cmd_req.resp_buf = req.resp_buf;
3754 send_cmd_req.resp_len = req.resp_len;
3755
3756 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3757 return -EINVAL;
3758
3759 /* validate offsets */
3760 for (i = 0; i < MAX_ION_FD; i++) {
3761 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3762 pr_err("Invalid offset %d = 0x%x\n",
3763 i, req.ifd_data[i].cmd_buf_offset);
3764 return -EINVAL;
3765 }
3766 }
3767 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3768 (uintptr_t)req.cmd_req_buf);
3769 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3770 (uintptr_t)req.resp_buf);
3771
3772 if (!is_64bit_addr) {
3773 ret = __qseecom_update_cmd_buf(&req, false, data);
3774 if (ret)
3775 return ret;
3776 ret = __qseecom_send_cmd(data, &send_cmd_req);
3777 if (ret)
3778 return ret;
3779 ret = __qseecom_update_cmd_buf(&req, true, data);
3780 if (ret)
3781 return ret;
3782 } else {
3783 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3784 if (ret)
3785 return ret;
3786 ret = __qseecom_send_cmd(data, &send_cmd_req);
3787 if (ret)
3788 return ret;
3789 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3790 if (ret)
3791 return ret;
3792 }
3793
3794 return ret;
3795}
3796
3797static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3798 void __user *argp)
3799{
3800 return __qseecom_send_modfd_cmd(data, argp, false);
3801}
3802
3803static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3804 void __user *argp)
3805{
3806 return __qseecom_send_modfd_cmd(data, argp, true);
3807}
3808
3809
3810
3811static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3812 struct qseecom_registered_listener_list *svc)
3813{
3814 int ret;
3815
3816 ret = (svc->rcv_req_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07003817 return ret || data->abort || svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003818}
3819
3820static int qseecom_receive_req(struct qseecom_dev_handle *data)
3821{
3822 int ret = 0;
3823 struct qseecom_registered_listener_list *this_lstnr;
3824
3825 this_lstnr = __qseecom_find_svc(data->listener.id);
3826 if (!this_lstnr) {
3827 pr_err("Invalid listener ID\n");
3828 return -ENODATA;
3829 }
3830
3831 while (1) {
3832 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3833 __qseecom_listener_has_rcvd_req(data,
3834 this_lstnr))) {
3835 pr_debug("Interrupted: exiting Listener Service = %d\n",
3836 (uint32_t)data->listener.id);
3837 /* woken up for different reason */
3838 return -ERESTARTSYS;
3839 }
3840
Zhen Kong26e62742018-05-04 17:19:06 -07003841 if (data->abort || this_lstnr->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003842 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003843 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003844 return -ENODEV;
3845 }
3846 this_lstnr->rcv_req_flag = 0;
3847 break;
3848 }
3849 return ret;
3850}
3851
3852static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3853{
3854 unsigned char app_arch = 0;
3855 struct elf32_hdr *ehdr;
3856 struct elf64_hdr *ehdr64;
3857
3858 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3859
3860 switch (app_arch) {
3861 case ELFCLASS32: {
3862 ehdr = (struct elf32_hdr *)fw_entry->data;
3863 if (fw_entry->size < sizeof(*ehdr)) {
3864 pr_err("%s: Not big enough to be an elf32 header\n",
3865 qseecom.pdev->init_name);
3866 return false;
3867 }
3868 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3869 pr_err("%s: Not an elf32 header\n",
3870 qseecom.pdev->init_name);
3871 return false;
3872 }
3873 if (ehdr->e_phnum == 0) {
3874 pr_err("%s: No loadable segments\n",
3875 qseecom.pdev->init_name);
3876 return false;
3877 }
3878 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3879 sizeof(struct elf32_hdr) > fw_entry->size) {
3880 pr_err("%s: Program headers not within mdt\n",
3881 qseecom.pdev->init_name);
3882 return false;
3883 }
3884 break;
3885 }
3886 case ELFCLASS64: {
3887 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3888 if (fw_entry->size < sizeof(*ehdr64)) {
3889 pr_err("%s: Not big enough to be an elf64 header\n",
3890 qseecom.pdev->init_name);
3891 return false;
3892 }
3893 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3894 pr_err("%s: Not an elf64 header\n",
3895 qseecom.pdev->init_name);
3896 return false;
3897 }
3898 if (ehdr64->e_phnum == 0) {
3899 pr_err("%s: No loadable segments\n",
3900 qseecom.pdev->init_name);
3901 return false;
3902 }
3903 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3904 sizeof(struct elf64_hdr) > fw_entry->size) {
3905 pr_err("%s: Program headers not within mdt\n",
3906 qseecom.pdev->init_name);
3907 return false;
3908 }
3909 break;
3910 }
3911 default: {
3912 pr_err("QSEE app arch %u is not supported\n", app_arch);
3913 return false;
3914 }
3915 }
3916 return true;
3917}
3918
3919static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3920 uint32_t *app_arch)
3921{
3922 int ret = -1;
3923 int i = 0, rc = 0;
3924 const struct firmware *fw_entry = NULL;
3925 char fw_name[MAX_APP_NAME_SIZE];
3926 struct elf32_hdr *ehdr;
3927 struct elf64_hdr *ehdr64;
3928 int num_images = 0;
3929
3930 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3931 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3932 if (rc) {
3933 pr_err("error with request_firmware\n");
3934 ret = -EIO;
3935 goto err;
3936 }
3937 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3938 ret = -EIO;
3939 goto err;
3940 }
3941 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3942 *fw_size = fw_entry->size;
3943 if (*app_arch == ELFCLASS32) {
3944 ehdr = (struct elf32_hdr *)fw_entry->data;
3945 num_images = ehdr->e_phnum;
3946 } else if (*app_arch == ELFCLASS64) {
3947 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3948 num_images = ehdr64->e_phnum;
3949 } else {
3950 pr_err("QSEE %s app, arch %u is not supported\n",
3951 appname, *app_arch);
3952 ret = -EIO;
3953 goto err;
3954 }
3955 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3956 release_firmware(fw_entry);
3957 fw_entry = NULL;
3958 for (i = 0; i < num_images; i++) {
3959 memset(fw_name, 0, sizeof(fw_name));
3960 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3961 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3962 if (ret)
3963 goto err;
3964 if (*fw_size > U32_MAX - fw_entry->size) {
3965 pr_err("QSEE %s app file size overflow\n", appname);
3966 ret = -EINVAL;
3967 goto err;
3968 }
3969 *fw_size += fw_entry->size;
3970 release_firmware(fw_entry);
3971 fw_entry = NULL;
3972 }
3973
3974 return ret;
3975err:
3976 if (fw_entry)
3977 release_firmware(fw_entry);
3978 *fw_size = 0;
3979 return ret;
3980}
3981
3982static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
3983 uint32_t fw_size,
3984 struct qseecom_load_app_ireq *load_req)
3985{
3986 int ret = -1;
3987 int i = 0, rc = 0;
3988 const struct firmware *fw_entry = NULL;
3989 char fw_name[MAX_APP_NAME_SIZE];
3990 u8 *img_data_ptr = img_data;
3991 struct elf32_hdr *ehdr;
3992 struct elf64_hdr *ehdr64;
3993 int num_images = 0;
3994 unsigned char app_arch = 0;
3995
3996 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3997 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3998 if (rc) {
3999 ret = -EIO;
4000 goto err;
4001 }
4002
4003 load_req->img_len = fw_entry->size;
4004 if (load_req->img_len > fw_size) {
4005 pr_err("app %s size %zu is larger than buf size %u\n",
4006 appname, fw_entry->size, fw_size);
4007 ret = -EINVAL;
4008 goto err;
4009 }
4010 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4011 img_data_ptr = img_data_ptr + fw_entry->size;
4012 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4013
4014 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4015 if (app_arch == ELFCLASS32) {
4016 ehdr = (struct elf32_hdr *)fw_entry->data;
4017 num_images = ehdr->e_phnum;
4018 } else if (app_arch == ELFCLASS64) {
4019 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4020 num_images = ehdr64->e_phnum;
4021 } else {
4022 pr_err("QSEE %s app, arch %u is not supported\n",
4023 appname, app_arch);
4024 ret = -EIO;
4025 goto err;
4026 }
4027 release_firmware(fw_entry);
4028 fw_entry = NULL;
4029 for (i = 0; i < num_images; i++) {
4030 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4031 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4032 if (ret) {
4033 pr_err("Failed to locate blob %s\n", fw_name);
4034 goto err;
4035 }
4036 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4037 (fw_entry->size + load_req->img_len > fw_size)) {
4038 pr_err("Invalid file size for %s\n", fw_name);
4039 ret = -EINVAL;
4040 goto err;
4041 }
4042 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4043 img_data_ptr = img_data_ptr + fw_entry->size;
4044 load_req->img_len += fw_entry->size;
4045 release_firmware(fw_entry);
4046 fw_entry = NULL;
4047 }
4048 return ret;
4049err:
4050 release_firmware(fw_entry);
4051 return ret;
4052}
4053
4054static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4055 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4056{
4057 size_t len = 0;
4058 int ret = 0;
4059 ion_phys_addr_t pa;
4060 struct ion_handle *ihandle = NULL;
4061 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004062 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004063
Zhen Kong3dd92792017-12-08 09:47:15 -08004064 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004065 if (retry++) {
4066 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004067 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004068 mutex_lock(&app_access_lock);
4069 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004070 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4071 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4072 } while (IS_ERR_OR_NULL(ihandle) &&
4073 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004074
4075 if (IS_ERR_OR_NULL(ihandle)) {
4076 pr_err("ION alloc failed\n");
4077 return -ENOMEM;
4078 }
4079 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4080 ihandle);
4081
4082 if (IS_ERR_OR_NULL(img_data)) {
4083 pr_err("ION memory mapping for image loading failed\n");
4084 ret = -ENOMEM;
4085 goto exit_ion_free;
4086 }
4087 /* Get the physical address of the ION BUF */
4088 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4089 if (ret) {
4090 pr_err("physical memory retrieval failure\n");
4091 ret = -EIO;
4092 goto exit_ion_unmap_kernel;
4093 }
4094
4095 *pihandle = ihandle;
4096 *data = img_data;
4097 *paddr = pa;
4098 return ret;
4099
4100exit_ion_unmap_kernel:
4101 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4102exit_ion_free:
4103 ion_free(qseecom.ion_clnt, ihandle);
4104 ihandle = NULL;
4105 return ret;
4106}
4107
4108static void __qseecom_free_img_data(struct ion_handle **ihandle)
4109{
4110 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4111 ion_free(qseecom.ion_clnt, *ihandle);
4112 *ihandle = NULL;
4113}
4114
4115static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4116 uint32_t *app_id)
4117{
4118 int ret = -1;
4119 uint32_t fw_size = 0;
4120 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4121 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4122 struct qseecom_command_scm_resp resp;
4123 u8 *img_data = NULL;
4124 ion_phys_addr_t pa = 0;
4125 struct ion_handle *ihandle = NULL;
4126 void *cmd_buf = NULL;
4127 size_t cmd_len;
4128 uint32_t app_arch = 0;
4129
4130 if (!data || !appname || !app_id) {
4131 pr_err("Null pointer to data or appname or appid\n");
4132 return -EINVAL;
4133 }
4134 *app_id = 0;
4135 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4136 return -EIO;
4137 data->client.app_arch = app_arch;
4138
4139 /* Check and load cmnlib */
4140 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4141 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4142 ret = qseecom_load_commonlib_image(data, "cmnlib");
4143 if (ret) {
4144 pr_err("failed to load cmnlib\n");
4145 return -EIO;
4146 }
4147 qseecom.commonlib_loaded = true;
4148 pr_debug("cmnlib is loaded\n");
4149 }
4150
4151 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4152 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4153 if (ret) {
4154 pr_err("failed to load cmnlib64\n");
4155 return -EIO;
4156 }
4157 qseecom.commonlib64_loaded = true;
4158 pr_debug("cmnlib64 is loaded\n");
4159 }
4160 }
4161
4162 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4163 if (ret)
4164 return ret;
4165
4166 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4167 if (ret) {
4168 ret = -EIO;
4169 goto exit_free_img_data;
4170 }
4171
4172 /* Populate the load_req parameters */
4173 if (qseecom.qsee_version < QSEE_VERSION_40) {
4174 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4175 load_req.mdt_len = load_req.mdt_len;
4176 load_req.img_len = load_req.img_len;
4177 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4178 load_req.phy_addr = (uint32_t)pa;
4179 cmd_buf = (void *)&load_req;
4180 cmd_len = sizeof(struct qseecom_load_app_ireq);
4181 } else {
4182 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4183 load_req_64bit.mdt_len = load_req.mdt_len;
4184 load_req_64bit.img_len = load_req.img_len;
4185 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4186 load_req_64bit.phy_addr = (uint64_t)pa;
4187 cmd_buf = (void *)&load_req_64bit;
4188 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4189 }
4190
4191 if (qseecom.support_bus_scaling) {
4192 mutex_lock(&qsee_bw_mutex);
4193 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4194 mutex_unlock(&qsee_bw_mutex);
4195 if (ret) {
4196 ret = -EIO;
4197 goto exit_free_img_data;
4198 }
4199 }
4200
4201 ret = __qseecom_enable_clk_scale_up(data);
4202 if (ret) {
4203 ret = -EIO;
4204 goto exit_unregister_bus_bw_need;
4205 }
4206
4207 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4208 img_data, fw_size,
4209 ION_IOC_CLEAN_INV_CACHES);
4210 if (ret) {
4211 pr_err("cache operation failed %d\n", ret);
4212 goto exit_disable_clk_vote;
4213 }
4214
4215 /* SCM_CALL to load the image */
4216 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4217 &resp, sizeof(resp));
4218 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004219 pr_err("scm_call to load failed : ret %d, result %x\n",
4220 ret, resp.result);
4221 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4222 ret = -EEXIST;
4223 else
4224 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004225 goto exit_disable_clk_vote;
4226 }
4227
4228 switch (resp.result) {
4229 case QSEOS_RESULT_SUCCESS:
4230 *app_id = resp.data;
4231 break;
4232 case QSEOS_RESULT_INCOMPLETE:
4233 ret = __qseecom_process_incomplete_cmd(data, &resp);
4234 if (ret)
4235 pr_err("process_incomplete_cmd FAILED\n");
4236 else
4237 *app_id = resp.data;
4238 break;
4239 case QSEOS_RESULT_FAILURE:
4240 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4241 break;
4242 default:
4243 pr_err("scm call return unknown response %d\n", resp.result);
4244 ret = -EINVAL;
4245 break;
4246 }
4247
4248exit_disable_clk_vote:
4249 __qseecom_disable_clk_scale_down(data);
4250
4251exit_unregister_bus_bw_need:
4252 if (qseecom.support_bus_scaling) {
4253 mutex_lock(&qsee_bw_mutex);
4254 qseecom_unregister_bus_bandwidth_needs(data);
4255 mutex_unlock(&qsee_bw_mutex);
4256 }
4257
4258exit_free_img_data:
4259 __qseecom_free_img_data(&ihandle);
4260 return ret;
4261}
4262
4263static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4264 char *cmnlib_name)
4265{
4266 int ret = 0;
4267 uint32_t fw_size = 0;
4268 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4269 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4270 struct qseecom_command_scm_resp resp;
4271 u8 *img_data = NULL;
4272 ion_phys_addr_t pa = 0;
4273 void *cmd_buf = NULL;
4274 size_t cmd_len;
4275 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004276 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004277
4278 if (!cmnlib_name) {
4279 pr_err("cmnlib_name is NULL\n");
4280 return -EINVAL;
4281 }
4282 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4283 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4284 cmnlib_name, strlen(cmnlib_name));
4285 return -EINVAL;
4286 }
4287
4288 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4289 return -EIO;
4290
Zhen Kong3bafb312017-10-18 10:27:20 -07004291 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004292 &img_data, fw_size, &pa);
4293 if (ret)
4294 return -EIO;
4295
4296 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4297 if (ret) {
4298 ret = -EIO;
4299 goto exit_free_img_data;
4300 }
4301 if (qseecom.qsee_version < QSEE_VERSION_40) {
4302 load_req.phy_addr = (uint32_t)pa;
4303 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4304 cmd_buf = (void *)&load_req;
4305 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4306 } else {
4307 load_req_64bit.phy_addr = (uint64_t)pa;
4308 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4309 load_req_64bit.img_len = load_req.img_len;
4310 load_req_64bit.mdt_len = load_req.mdt_len;
4311 cmd_buf = (void *)&load_req_64bit;
4312 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4313 }
4314
4315 if (qseecom.support_bus_scaling) {
4316 mutex_lock(&qsee_bw_mutex);
4317 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4318 mutex_unlock(&qsee_bw_mutex);
4319 if (ret) {
4320 ret = -EIO;
4321 goto exit_free_img_data;
4322 }
4323 }
4324
4325 /* Vote for the SFPB clock */
4326 ret = __qseecom_enable_clk_scale_up(data);
4327 if (ret) {
4328 ret = -EIO;
4329 goto exit_unregister_bus_bw_need;
4330 }
4331
Zhen Kong3bafb312017-10-18 10:27:20 -07004332 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004333 img_data, fw_size,
4334 ION_IOC_CLEAN_INV_CACHES);
4335 if (ret) {
4336 pr_err("cache operation failed %d\n", ret);
4337 goto exit_disable_clk_vote;
4338 }
4339
4340 /* SCM_CALL to load the image */
4341 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4342 &resp, sizeof(resp));
4343 if (ret) {
4344 pr_err("scm_call to load failed : ret %d\n", ret);
4345 ret = -EIO;
4346 goto exit_disable_clk_vote;
4347 }
4348
4349 switch (resp.result) {
4350 case QSEOS_RESULT_SUCCESS:
4351 break;
4352 case QSEOS_RESULT_FAILURE:
4353 pr_err("scm call failed w/response result%d\n", resp.result);
4354 ret = -EINVAL;
4355 goto exit_disable_clk_vote;
4356 case QSEOS_RESULT_INCOMPLETE:
4357 ret = __qseecom_process_incomplete_cmd(data, &resp);
4358 if (ret) {
4359 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4360 goto exit_disable_clk_vote;
4361 }
4362 break;
4363 default:
4364 pr_err("scm call return unknown response %d\n", resp.result);
4365 ret = -EINVAL;
4366 goto exit_disable_clk_vote;
4367 }
4368
4369exit_disable_clk_vote:
4370 __qseecom_disable_clk_scale_down(data);
4371
4372exit_unregister_bus_bw_need:
4373 if (qseecom.support_bus_scaling) {
4374 mutex_lock(&qsee_bw_mutex);
4375 qseecom_unregister_bus_bandwidth_needs(data);
4376 mutex_unlock(&qsee_bw_mutex);
4377 }
4378
4379exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004380 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004381 return ret;
4382}
4383
4384static int qseecom_unload_commonlib_image(void)
4385{
4386 int ret = -EINVAL;
4387 struct qseecom_unload_lib_image_ireq unload_req = {0};
4388 struct qseecom_command_scm_resp resp;
4389
4390 /* Populate the remaining parameters */
4391 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4392
4393 /* SCM_CALL to load the image */
4394 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4395 sizeof(struct qseecom_unload_lib_image_ireq),
4396 &resp, sizeof(resp));
4397 if (ret) {
4398 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4399 ret = -EIO;
4400 } else {
4401 switch (resp.result) {
4402 case QSEOS_RESULT_SUCCESS:
4403 break;
4404 case QSEOS_RESULT_FAILURE:
4405 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4406 break;
4407 default:
4408 pr_err("scm call return unknown response %d\n",
4409 resp.result);
4410 ret = -EINVAL;
4411 break;
4412 }
4413 }
4414
4415 return ret;
4416}
4417
4418int qseecom_start_app(struct qseecom_handle **handle,
4419 char *app_name, uint32_t size)
4420{
4421 int32_t ret = 0;
4422 unsigned long flags = 0;
4423 struct qseecom_dev_handle *data = NULL;
4424 struct qseecom_check_app_ireq app_ireq;
4425 struct qseecom_registered_app_list *entry = NULL;
4426 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4427 bool found_app = false;
4428 size_t len;
4429 ion_phys_addr_t pa;
4430 uint32_t fw_size, app_arch;
4431 uint32_t app_id = 0;
4432
4433 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4434 pr_err("Not allowed to be called in %d state\n",
4435 atomic_read(&qseecom.qseecom_state));
4436 return -EPERM;
4437 }
4438 if (!app_name) {
4439 pr_err("failed to get the app name\n");
4440 return -EINVAL;
4441 }
4442
Zhen Kong64a6d7282017-06-16 11:55:07 -07004443 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004444 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004445 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004446 return -EINVAL;
4447 }
4448
4449 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4450 if (!(*handle))
4451 return -ENOMEM;
4452
4453 data = kzalloc(sizeof(*data), GFP_KERNEL);
4454 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304455 ret = -ENOMEM;
4456 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004457 }
4458 data->abort = 0;
4459 data->type = QSEECOM_CLIENT_APP;
4460 data->released = false;
4461 data->client.sb_length = size;
4462 data->client.user_virt_sb_base = 0;
4463 data->client.ihandle = NULL;
4464
4465 init_waitqueue_head(&data->abort_wq);
4466
4467 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4468 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4469 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4470 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304471 ret = -ENOMEM;
4472 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004473 }
4474 mutex_lock(&app_access_lock);
4475
Zhen Kong5d02be92018-05-29 16:17:29 -07004476recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004477 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4478 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4479 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4480 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304481 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004482
4483 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4484 if (app_id) {
4485 pr_warn("App id %d for [%s] app exists\n", app_id,
4486 (char *)app_ireq.app_name);
4487 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4488 list_for_each_entry(entry,
4489 &qseecom.registered_app_list_head, list){
4490 if (entry->app_id == app_id) {
4491 entry->ref_cnt++;
4492 found_app = true;
4493 break;
4494 }
4495 }
4496 spin_unlock_irqrestore(
4497 &qseecom.registered_app_list_lock, flags);
4498 if (!found_app)
4499 pr_warn("App_id %d [%s] was loaded but not registered\n",
4500 ret, (char *)app_ireq.app_name);
4501 } else {
4502 /* load the app and get the app_id */
4503 pr_debug("%s: Loading app for the first time'\n",
4504 qseecom.pdev->init_name);
4505 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004506 if (ret == -EEXIST) {
4507 pr_err("recheck if TA %s is loaded\n", app_name);
4508 goto recheck;
4509 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304510 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004511 }
4512 data->client.app_id = app_id;
4513 if (!found_app) {
4514 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4515 if (!entry) {
4516 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304517 ret = -ENOMEM;
4518 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004519 }
4520 entry->app_id = app_id;
4521 entry->ref_cnt = 1;
4522 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4523 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4524 ret = -EIO;
Zhen Konga6e3f512017-01-20 12:22:23 -08004525 kfree(entry);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304526 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004527 }
4528 entry->app_arch = app_arch;
4529 entry->app_blocked = false;
4530 entry->blocked_on_listener_id = 0;
4531 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4532 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4533 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4534 flags);
4535 }
4536
4537 /* Get the physical address of the ION BUF */
4538 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4539 if (ret) {
4540 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4541 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304542 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004543 }
4544
4545 /* Populate the structure for sending scm call to load image */
4546 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4547 data->client.ihandle);
4548 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4549 pr_err("ION memory mapping for client shared buf failed\n");
4550 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304551 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004552 }
4553 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4554 data->client.sb_phys = (phys_addr_t)pa;
4555 (*handle)->dev = (void *)data;
4556 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4557 (*handle)->sbuf_len = data->client.sb_length;
4558
4559 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4560 if (!kclient_entry) {
4561 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304562 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004563 }
4564 kclient_entry->handle = *handle;
4565
4566 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4567 list_add_tail(&kclient_entry->list,
4568 &qseecom.registered_kclient_list_head);
4569 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4570
4571 mutex_unlock(&app_access_lock);
4572 return 0;
4573
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304574exit_ion_unmap_kernel:
4575 if (!IS_ERR_OR_NULL(data->client.ihandle))
4576 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4577exit_entry_free:
4578 kfree(entry);
4579exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004580 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304581 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4582 ion_free(qseecom.ion_clnt, data->client.ihandle);
4583 data->client.ihandle = NULL;
4584 }
4585exit_data_free:
4586 kfree(data);
4587exit_handle_free:
4588 if (*handle) {
4589 kfree(*handle);
4590 *handle = NULL;
4591 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004592 return ret;
4593}
4594EXPORT_SYMBOL(qseecom_start_app);
4595
4596int qseecom_shutdown_app(struct qseecom_handle **handle)
4597{
4598 int ret = -EINVAL;
4599 struct qseecom_dev_handle *data;
4600
4601 struct qseecom_registered_kclient_list *kclient = NULL;
4602 unsigned long flags = 0;
4603 bool found_handle = false;
4604
4605 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4606 pr_err("Not allowed to be called in %d state\n",
4607 atomic_read(&qseecom.qseecom_state));
4608 return -EPERM;
4609 }
4610
4611 if ((handle == NULL) || (*handle == NULL)) {
4612 pr_err("Handle is not initialized\n");
4613 return -EINVAL;
4614 }
4615 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4616 mutex_lock(&app_access_lock);
4617
4618 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4619 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4620 list) {
4621 if (kclient->handle == (*handle)) {
4622 list_del(&kclient->list);
4623 found_handle = true;
4624 break;
4625 }
4626 }
4627 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4628 if (!found_handle)
4629 pr_err("Unable to find the handle, exiting\n");
4630 else
4631 ret = qseecom_unload_app(data, false);
4632
4633 mutex_unlock(&app_access_lock);
4634 if (ret == 0) {
4635 kzfree(data);
4636 kzfree(*handle);
4637 kzfree(kclient);
4638 *handle = NULL;
4639 }
4640
4641 return ret;
4642}
4643EXPORT_SYMBOL(qseecom_shutdown_app);
4644
4645int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4646 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4647{
4648 int ret = 0;
4649 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4650 struct qseecom_dev_handle *data;
4651 bool perf_enabled = false;
4652
4653 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4654 pr_err("Not allowed to be called in %d state\n",
4655 atomic_read(&qseecom.qseecom_state));
4656 return -EPERM;
4657 }
4658
4659 if (handle == NULL) {
4660 pr_err("Handle is not initialized\n");
4661 return -EINVAL;
4662 }
4663 data = handle->dev;
4664
4665 req.cmd_req_len = sbuf_len;
4666 req.resp_len = rbuf_len;
4667 req.cmd_req_buf = send_buf;
4668 req.resp_buf = resp_buf;
4669
4670 if (__validate_send_cmd_inputs(data, &req))
4671 return -EINVAL;
4672
4673 mutex_lock(&app_access_lock);
4674 if (qseecom.support_bus_scaling) {
4675 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4676 if (ret) {
4677 pr_err("Failed to set bw.\n");
4678 mutex_unlock(&app_access_lock);
4679 return ret;
4680 }
4681 }
4682 /*
4683 * On targets where crypto clock is handled by HLOS,
4684 * if clk_access_cnt is zero and perf_enabled is false,
4685 * then the crypto clock was not enabled before sending cmd
4686 * to tz, qseecom will enable the clock to avoid service failure.
4687 */
4688 if (!qseecom.no_clock_support &&
4689 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4690 pr_debug("ce clock is not enabled!\n");
4691 ret = qseecom_perf_enable(data);
4692 if (ret) {
4693 pr_err("Failed to vote for clock with err %d\n",
4694 ret);
4695 mutex_unlock(&app_access_lock);
4696 return -EINVAL;
4697 }
4698 perf_enabled = true;
4699 }
4700 if (!strcmp(data->client.app_name, "securemm"))
4701 data->use_legacy_cmd = true;
4702
4703 ret = __qseecom_send_cmd(data, &req);
4704 data->use_legacy_cmd = false;
4705 if (qseecom.support_bus_scaling)
4706 __qseecom_add_bw_scale_down_timer(
4707 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4708
4709 if (perf_enabled) {
4710 qsee_disable_clock_vote(data, CLK_DFAB);
4711 qsee_disable_clock_vote(data, CLK_SFPB);
4712 }
4713
4714 mutex_unlock(&app_access_lock);
4715
4716 if (ret)
4717 return ret;
4718
4719 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4720 req.resp_len, req.resp_buf);
4721 return ret;
4722}
4723EXPORT_SYMBOL(qseecom_send_command);
4724
4725int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4726{
4727 int ret = 0;
4728
4729 if ((handle == NULL) || (handle->dev == NULL)) {
4730 pr_err("No valid kernel client\n");
4731 return -EINVAL;
4732 }
4733 if (high) {
4734 if (qseecom.support_bus_scaling) {
4735 mutex_lock(&qsee_bw_mutex);
4736 __qseecom_register_bus_bandwidth_needs(handle->dev,
4737 HIGH);
4738 mutex_unlock(&qsee_bw_mutex);
4739 } else {
4740 ret = qseecom_perf_enable(handle->dev);
4741 if (ret)
4742 pr_err("Failed to vote for clock with err %d\n",
4743 ret);
4744 }
4745 } else {
4746 if (!qseecom.support_bus_scaling) {
4747 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4748 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4749 } else {
4750 mutex_lock(&qsee_bw_mutex);
4751 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4752 mutex_unlock(&qsee_bw_mutex);
4753 }
4754 }
4755 return ret;
4756}
4757EXPORT_SYMBOL(qseecom_set_bandwidth);
4758
4759int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4760{
4761 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4762 struct qseecom_dev_handle dummy_private_data = {0};
4763 struct qseecom_command_scm_resp resp;
4764 int ret = 0;
4765
4766 if (!desc) {
4767 pr_err("desc is NULL\n");
4768 return -EINVAL;
4769 }
4770
4771 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004772 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004773 resp.data = desc->ret[2]; /*listener_id*/
4774
Zhen Konge7f525f2017-12-01 18:26:25 -08004775 dummy_private_data.client.app_id = desc->ret[1];
4776 dummy_app_entry.app_id = desc->ret[1];
4777
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004778 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004779 if (qseecom.qsee_reentrancy_support)
4780 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004781 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004782 else
4783 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4784 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004785 mutex_unlock(&app_access_lock);
4786 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004787 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004788 (int)desc->ret[0], (int)desc->ret[2],
4789 (int)desc->ret[1], ret);
4790 desc->ret[0] = resp.result;
4791 desc->ret[1] = resp.resp_type;
4792 desc->ret[2] = resp.data;
4793 return ret;
4794}
4795EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4796
4797static int qseecom_send_resp(void)
4798{
4799 qseecom.send_resp_flag = 1;
4800 wake_up_interruptible(&qseecom.send_resp_wq);
4801 return 0;
4802}
4803
4804static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4805{
4806 struct qseecom_registered_listener_list *this_lstnr = NULL;
4807
4808 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4809 this_lstnr = __qseecom_find_svc(data->listener.id);
4810 if (this_lstnr == NULL)
4811 return -EINVAL;
4812 qseecom.send_resp_flag = 1;
4813 this_lstnr->send_resp_flag = 1;
4814 wake_up_interruptible(&qseecom.send_resp_wq);
4815 return 0;
4816}
4817
4818static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4819 struct qseecom_send_modfd_listener_resp *resp,
4820 struct qseecom_registered_listener_list *this_lstnr)
4821{
4822 int i;
4823
4824 if (!data || !resp || !this_lstnr) {
4825 pr_err("listener handle or resp msg is null\n");
4826 return -EINVAL;
4827 }
4828
4829 if (resp->resp_buf_ptr == NULL) {
4830 pr_err("resp buffer is null\n");
4831 return -EINVAL;
4832 }
4833 /* validate resp buf length */
4834 if ((resp->resp_len == 0) ||
4835 (resp->resp_len > this_lstnr->sb_length)) {
4836 pr_err("resp buf length %d not valid\n", resp->resp_len);
4837 return -EINVAL;
4838 }
4839
4840 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4841 pr_err("Integer overflow in resp_len & resp_buf\n");
4842 return -EINVAL;
4843 }
4844 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4845 (ULONG_MAX - this_lstnr->sb_length)) {
4846 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4847 return -EINVAL;
4848 }
4849 /* validate resp buf */
4850 if (((uintptr_t)resp->resp_buf_ptr <
4851 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4852 ((uintptr_t)resp->resp_buf_ptr >=
4853 ((uintptr_t)this_lstnr->user_virt_sb_base +
4854 this_lstnr->sb_length)) ||
4855 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4856 ((uintptr_t)this_lstnr->user_virt_sb_base +
4857 this_lstnr->sb_length))) {
4858 pr_err("resp buf is out of shared buffer region\n");
4859 return -EINVAL;
4860 }
4861
4862 /* validate offsets */
4863 for (i = 0; i < MAX_ION_FD; i++) {
4864 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4865 pr_err("Invalid offset %d = 0x%x\n",
4866 i, resp->ifd_data[i].cmd_buf_offset);
4867 return -EINVAL;
4868 }
4869 }
4870
4871 return 0;
4872}
4873
4874static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4875 void __user *argp, bool is_64bit_addr)
4876{
4877 struct qseecom_send_modfd_listener_resp resp;
4878 struct qseecom_registered_listener_list *this_lstnr = NULL;
4879
4880 if (copy_from_user(&resp, argp, sizeof(resp))) {
4881 pr_err("copy_from_user failed");
4882 return -EINVAL;
4883 }
4884
4885 this_lstnr = __qseecom_find_svc(data->listener.id);
4886 if (this_lstnr == NULL)
4887 return -EINVAL;
4888
4889 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4890 return -EINVAL;
4891
4892 resp.resp_buf_ptr = this_lstnr->sb_virt +
4893 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4894
4895 if (!is_64bit_addr)
4896 __qseecom_update_cmd_buf(&resp, false, data);
4897 else
4898 __qseecom_update_cmd_buf_64(&resp, false, data);
4899 qseecom.send_resp_flag = 1;
4900 this_lstnr->send_resp_flag = 1;
4901 wake_up_interruptible(&qseecom.send_resp_wq);
4902 return 0;
4903}
4904
4905static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4906 void __user *argp)
4907{
4908 return __qseecom_send_modfd_resp(data, argp, false);
4909}
4910
4911static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4912 void __user *argp)
4913{
4914 return __qseecom_send_modfd_resp(data, argp, true);
4915}
4916
4917static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4918 void __user *argp)
4919{
4920 struct qseecom_qseos_version_req req;
4921
4922 if (copy_from_user(&req, argp, sizeof(req))) {
4923 pr_err("copy_from_user failed");
4924 return -EINVAL;
4925 }
4926 req.qseos_version = qseecom.qseos_version;
4927 if (copy_to_user(argp, &req, sizeof(req))) {
4928 pr_err("copy_to_user failed");
4929 return -EINVAL;
4930 }
4931 return 0;
4932}
4933
4934static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4935{
4936 int rc = 0;
4937 struct qseecom_clk *qclk = NULL;
4938
4939 if (qseecom.no_clock_support)
4940 return 0;
4941
4942 if (ce == CLK_QSEE)
4943 qclk = &qseecom.qsee;
4944 if (ce == CLK_CE_DRV)
4945 qclk = &qseecom.ce_drv;
4946
4947 if (qclk == NULL) {
4948 pr_err("CLK type not supported\n");
4949 return -EINVAL;
4950 }
4951 mutex_lock(&clk_access_lock);
4952
4953 if (qclk->clk_access_cnt == ULONG_MAX) {
4954 pr_err("clk_access_cnt beyond limitation\n");
4955 goto err;
4956 }
4957 if (qclk->clk_access_cnt > 0) {
4958 qclk->clk_access_cnt++;
4959 mutex_unlock(&clk_access_lock);
4960 return rc;
4961 }
4962
4963 /* Enable CE core clk */
4964 if (qclk->ce_core_clk != NULL) {
4965 rc = clk_prepare_enable(qclk->ce_core_clk);
4966 if (rc) {
4967 pr_err("Unable to enable/prepare CE core clk\n");
4968 goto err;
4969 }
4970 }
4971 /* Enable CE clk */
4972 if (qclk->ce_clk != NULL) {
4973 rc = clk_prepare_enable(qclk->ce_clk);
4974 if (rc) {
4975 pr_err("Unable to enable/prepare CE iface clk\n");
4976 goto ce_clk_err;
4977 }
4978 }
4979 /* Enable AXI clk */
4980 if (qclk->ce_bus_clk != NULL) {
4981 rc = clk_prepare_enable(qclk->ce_bus_clk);
4982 if (rc) {
4983 pr_err("Unable to enable/prepare CE bus clk\n");
4984 goto ce_bus_clk_err;
4985 }
4986 }
4987 qclk->clk_access_cnt++;
4988 mutex_unlock(&clk_access_lock);
4989 return 0;
4990
4991ce_bus_clk_err:
4992 if (qclk->ce_clk != NULL)
4993 clk_disable_unprepare(qclk->ce_clk);
4994ce_clk_err:
4995 if (qclk->ce_core_clk != NULL)
4996 clk_disable_unprepare(qclk->ce_core_clk);
4997err:
4998 mutex_unlock(&clk_access_lock);
4999 return -EIO;
5000}
5001
5002static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5003{
5004 struct qseecom_clk *qclk;
5005
5006 if (qseecom.no_clock_support)
5007 return;
5008
5009 if (ce == CLK_QSEE)
5010 qclk = &qseecom.qsee;
5011 else
5012 qclk = &qseecom.ce_drv;
5013
5014 mutex_lock(&clk_access_lock);
5015
5016 if (qclk->clk_access_cnt == 0) {
5017 mutex_unlock(&clk_access_lock);
5018 return;
5019 }
5020
5021 if (qclk->clk_access_cnt == 1) {
5022 if (qclk->ce_clk != NULL)
5023 clk_disable_unprepare(qclk->ce_clk);
5024 if (qclk->ce_core_clk != NULL)
5025 clk_disable_unprepare(qclk->ce_core_clk);
5026 if (qclk->ce_bus_clk != NULL)
5027 clk_disable_unprepare(qclk->ce_bus_clk);
5028 }
5029 qclk->clk_access_cnt--;
5030 mutex_unlock(&clk_access_lock);
5031}
5032
5033static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5034 int32_t clk_type)
5035{
5036 int ret = 0;
5037 struct qseecom_clk *qclk;
5038
5039 if (qseecom.no_clock_support)
5040 return 0;
5041
5042 qclk = &qseecom.qsee;
5043 if (!qseecom.qsee_perf_client)
5044 return ret;
5045
5046 switch (clk_type) {
5047 case CLK_DFAB:
5048 mutex_lock(&qsee_bw_mutex);
5049 if (!qseecom.qsee_bw_count) {
5050 if (qseecom.qsee_sfpb_bw_count > 0)
5051 ret = msm_bus_scale_client_update_request(
5052 qseecom.qsee_perf_client, 3);
5053 else {
5054 if (qclk->ce_core_src_clk != NULL)
5055 ret = __qseecom_enable_clk(CLK_QSEE);
5056 if (!ret) {
5057 ret =
5058 msm_bus_scale_client_update_request(
5059 qseecom.qsee_perf_client, 1);
5060 if ((ret) &&
5061 (qclk->ce_core_src_clk != NULL))
5062 __qseecom_disable_clk(CLK_QSEE);
5063 }
5064 }
5065 if (ret)
5066 pr_err("DFAB Bandwidth req failed (%d)\n",
5067 ret);
5068 else {
5069 qseecom.qsee_bw_count++;
5070 data->perf_enabled = true;
5071 }
5072 } else {
5073 qseecom.qsee_bw_count++;
5074 data->perf_enabled = true;
5075 }
5076 mutex_unlock(&qsee_bw_mutex);
5077 break;
5078 case CLK_SFPB:
5079 mutex_lock(&qsee_bw_mutex);
5080 if (!qseecom.qsee_sfpb_bw_count) {
5081 if (qseecom.qsee_bw_count > 0)
5082 ret = msm_bus_scale_client_update_request(
5083 qseecom.qsee_perf_client, 3);
5084 else {
5085 if (qclk->ce_core_src_clk != NULL)
5086 ret = __qseecom_enable_clk(CLK_QSEE);
5087 if (!ret) {
5088 ret =
5089 msm_bus_scale_client_update_request(
5090 qseecom.qsee_perf_client, 2);
5091 if ((ret) &&
5092 (qclk->ce_core_src_clk != NULL))
5093 __qseecom_disable_clk(CLK_QSEE);
5094 }
5095 }
5096
5097 if (ret)
5098 pr_err("SFPB Bandwidth req failed (%d)\n",
5099 ret);
5100 else {
5101 qseecom.qsee_sfpb_bw_count++;
5102 data->fast_load_enabled = true;
5103 }
5104 } else {
5105 qseecom.qsee_sfpb_bw_count++;
5106 data->fast_load_enabled = true;
5107 }
5108 mutex_unlock(&qsee_bw_mutex);
5109 break;
5110 default:
5111 pr_err("Clock type not defined\n");
5112 break;
5113 }
5114 return ret;
5115}
5116
5117static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5118 int32_t clk_type)
5119{
5120 int32_t ret = 0;
5121 struct qseecom_clk *qclk;
5122
5123 qclk = &qseecom.qsee;
5124
5125 if (qseecom.no_clock_support)
5126 return;
5127 if (!qseecom.qsee_perf_client)
5128 return;
5129
5130 switch (clk_type) {
5131 case CLK_DFAB:
5132 mutex_lock(&qsee_bw_mutex);
5133 if (qseecom.qsee_bw_count == 0) {
5134 pr_err("Client error.Extra call to disable DFAB clk\n");
5135 mutex_unlock(&qsee_bw_mutex);
5136 return;
5137 }
5138
5139 if (qseecom.qsee_bw_count == 1) {
5140 if (qseecom.qsee_sfpb_bw_count > 0)
5141 ret = msm_bus_scale_client_update_request(
5142 qseecom.qsee_perf_client, 2);
5143 else {
5144 ret = msm_bus_scale_client_update_request(
5145 qseecom.qsee_perf_client, 0);
5146 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5147 __qseecom_disable_clk(CLK_QSEE);
5148 }
5149 if (ret)
5150 pr_err("SFPB Bandwidth req fail (%d)\n",
5151 ret);
5152 else {
5153 qseecom.qsee_bw_count--;
5154 data->perf_enabled = false;
5155 }
5156 } else {
5157 qseecom.qsee_bw_count--;
5158 data->perf_enabled = false;
5159 }
5160 mutex_unlock(&qsee_bw_mutex);
5161 break;
5162 case CLK_SFPB:
5163 mutex_lock(&qsee_bw_mutex);
5164 if (qseecom.qsee_sfpb_bw_count == 0) {
5165 pr_err("Client error.Extra call to disable SFPB clk\n");
5166 mutex_unlock(&qsee_bw_mutex);
5167 return;
5168 }
5169 if (qseecom.qsee_sfpb_bw_count == 1) {
5170 if (qseecom.qsee_bw_count > 0)
5171 ret = msm_bus_scale_client_update_request(
5172 qseecom.qsee_perf_client, 1);
5173 else {
5174 ret = msm_bus_scale_client_update_request(
5175 qseecom.qsee_perf_client, 0);
5176 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5177 __qseecom_disable_clk(CLK_QSEE);
5178 }
5179 if (ret)
5180 pr_err("SFPB Bandwidth req fail (%d)\n",
5181 ret);
5182 else {
5183 qseecom.qsee_sfpb_bw_count--;
5184 data->fast_load_enabled = false;
5185 }
5186 } else {
5187 qseecom.qsee_sfpb_bw_count--;
5188 data->fast_load_enabled = false;
5189 }
5190 mutex_unlock(&qsee_bw_mutex);
5191 break;
5192 default:
5193 pr_err("Clock type not defined\n");
5194 break;
5195 }
5196
5197}
5198
5199static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5200 void __user *argp)
5201{
5202 struct ion_handle *ihandle; /* Ion handle */
5203 struct qseecom_load_img_req load_img_req;
5204 int uret = 0;
5205 int ret;
5206 ion_phys_addr_t pa = 0;
5207 size_t len;
5208 struct qseecom_load_app_ireq load_req;
5209 struct qseecom_load_app_64bit_ireq load_req_64bit;
5210 struct qseecom_command_scm_resp resp;
5211 void *cmd_buf = NULL;
5212 size_t cmd_len;
5213 /* Copy the relevant information needed for loading the image */
5214 if (copy_from_user(&load_img_req,
5215 (void __user *)argp,
5216 sizeof(struct qseecom_load_img_req))) {
5217 pr_err("copy_from_user failed\n");
5218 return -EFAULT;
5219 }
5220
5221 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005222 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005223 load_img_req.ifd_data_fd);
5224 if (IS_ERR_OR_NULL(ihandle)) {
5225 pr_err("Ion client could not retrieve the handle\n");
5226 return -ENOMEM;
5227 }
5228
5229 /* Get the physical address of the ION BUF */
5230 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5231 if (ret) {
5232 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5233 ret);
5234 return ret;
5235 }
5236 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5237 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5238 len, load_img_req.mdt_len,
5239 load_img_req.img_len);
5240 return ret;
5241 }
5242 /* Populate the structure for sending scm call to load image */
5243 if (qseecom.qsee_version < QSEE_VERSION_40) {
5244 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5245 load_req.mdt_len = load_img_req.mdt_len;
5246 load_req.img_len = load_img_req.img_len;
5247 load_req.phy_addr = (uint32_t)pa;
5248 cmd_buf = (void *)&load_req;
5249 cmd_len = sizeof(struct qseecom_load_app_ireq);
5250 } else {
5251 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5252 load_req_64bit.mdt_len = load_img_req.mdt_len;
5253 load_req_64bit.img_len = load_img_req.img_len;
5254 load_req_64bit.phy_addr = (uint64_t)pa;
5255 cmd_buf = (void *)&load_req_64bit;
5256 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5257 }
5258
5259 if (qseecom.support_bus_scaling) {
5260 mutex_lock(&qsee_bw_mutex);
5261 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5262 mutex_unlock(&qsee_bw_mutex);
5263 if (ret) {
5264 ret = -EIO;
5265 goto exit_cpu_restore;
5266 }
5267 }
5268
5269 /* Vote for the SFPB clock */
5270 ret = __qseecom_enable_clk_scale_up(data);
5271 if (ret) {
5272 ret = -EIO;
5273 goto exit_register_bus_bandwidth_needs;
5274 }
5275 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5276 ION_IOC_CLEAN_INV_CACHES);
5277 if (ret) {
5278 pr_err("cache operation failed %d\n", ret);
5279 goto exit_disable_clock;
5280 }
5281 /* SCM_CALL to load the external elf */
5282 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5283 &resp, sizeof(resp));
5284 if (ret) {
5285 pr_err("scm_call to load failed : ret %d\n",
5286 ret);
5287 ret = -EFAULT;
5288 goto exit_disable_clock;
5289 }
5290
5291 switch (resp.result) {
5292 case QSEOS_RESULT_SUCCESS:
5293 break;
5294 case QSEOS_RESULT_INCOMPLETE:
5295 pr_err("%s: qseos result incomplete\n", __func__);
5296 ret = __qseecom_process_incomplete_cmd(data, &resp);
5297 if (ret)
5298 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5299 break;
5300 case QSEOS_RESULT_FAILURE:
5301 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5302 ret = -EFAULT;
5303 break;
5304 default:
5305 pr_err("scm_call response result %d not supported\n",
5306 resp.result);
5307 ret = -EFAULT;
5308 break;
5309 }
5310
5311exit_disable_clock:
5312 __qseecom_disable_clk_scale_down(data);
5313
5314exit_register_bus_bandwidth_needs:
5315 if (qseecom.support_bus_scaling) {
5316 mutex_lock(&qsee_bw_mutex);
5317 uret = qseecom_unregister_bus_bandwidth_needs(data);
5318 mutex_unlock(&qsee_bw_mutex);
5319 if (uret)
5320 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5321 uret, ret);
5322 }
5323
5324exit_cpu_restore:
5325 /* Deallocate the handle */
5326 if (!IS_ERR_OR_NULL(ihandle))
5327 ion_free(qseecom.ion_clnt, ihandle);
5328 return ret;
5329}
5330
5331static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5332{
5333 int ret = 0;
5334 struct qseecom_command_scm_resp resp;
5335 struct qseecom_unload_app_ireq req;
5336
5337 /* unavailable client app */
5338 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5339
5340 /* Populate the structure for sending scm call to unload image */
5341 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5342
5343 /* SCM_CALL to unload the external elf */
5344 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5345 sizeof(struct qseecom_unload_app_ireq),
5346 &resp, sizeof(resp));
5347 if (ret) {
5348 pr_err("scm_call to unload failed : ret %d\n",
5349 ret);
5350 ret = -EFAULT;
5351 goto qseecom_unload_external_elf_scm_err;
5352 }
5353 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5354 ret = __qseecom_process_incomplete_cmd(data, &resp);
5355 if (ret)
5356 pr_err("process_incomplete_cmd fail err: %d\n",
5357 ret);
5358 } else {
5359 if (resp.result != QSEOS_RESULT_SUCCESS) {
5360 pr_err("scm_call to unload image failed resp.result =%d\n",
5361 resp.result);
5362 ret = -EFAULT;
5363 }
5364 }
5365
5366qseecom_unload_external_elf_scm_err:
5367
5368 return ret;
5369}
5370
5371static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5372 void __user *argp)
5373{
5374
5375 int32_t ret;
5376 struct qseecom_qseos_app_load_query query_req;
5377 struct qseecom_check_app_ireq req;
5378 struct qseecom_registered_app_list *entry = NULL;
5379 unsigned long flags = 0;
5380 uint32_t app_arch = 0, app_id = 0;
5381 bool found_app = false;
5382
5383 /* Copy the relevant information needed for loading the image */
5384 if (copy_from_user(&query_req,
5385 (void __user *)argp,
5386 sizeof(struct qseecom_qseos_app_load_query))) {
5387 pr_err("copy_from_user failed\n");
5388 return -EFAULT;
5389 }
5390
5391 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5392 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5393 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5394
5395 ret = __qseecom_check_app_exists(req, &app_id);
5396 if (ret) {
5397 pr_err(" scm call to check if app is loaded failed");
5398 return ret; /* scm call failed */
5399 }
5400 if (app_id) {
5401 pr_debug("App id %d (%s) already exists\n", app_id,
5402 (char *)(req.app_name));
5403 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5404 list_for_each_entry(entry,
5405 &qseecom.registered_app_list_head, list){
5406 if (entry->app_id == app_id) {
5407 app_arch = entry->app_arch;
5408 entry->ref_cnt++;
5409 found_app = true;
5410 break;
5411 }
5412 }
5413 spin_unlock_irqrestore(
5414 &qseecom.registered_app_list_lock, flags);
5415 data->client.app_id = app_id;
5416 query_req.app_id = app_id;
5417 if (app_arch) {
5418 data->client.app_arch = app_arch;
5419 query_req.app_arch = app_arch;
5420 } else {
5421 data->client.app_arch = 0;
5422 query_req.app_arch = 0;
5423 }
5424 strlcpy(data->client.app_name, query_req.app_name,
5425 MAX_APP_NAME_SIZE);
5426 /*
5427 * If app was loaded by appsbl before and was not registered,
5428 * regiser this app now.
5429 */
5430 if (!found_app) {
5431 pr_debug("Register app %d [%s] which was loaded before\n",
5432 ret, (char *)query_req.app_name);
5433 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5434 if (!entry) {
5435 pr_err("kmalloc for app entry failed\n");
5436 return -ENOMEM;
5437 }
5438 entry->app_id = app_id;
5439 entry->ref_cnt = 1;
5440 entry->app_arch = data->client.app_arch;
5441 strlcpy(entry->app_name, data->client.app_name,
5442 MAX_APP_NAME_SIZE);
5443 entry->app_blocked = false;
5444 entry->blocked_on_listener_id = 0;
5445 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5446 flags);
5447 list_add_tail(&entry->list,
5448 &qseecom.registered_app_list_head);
5449 spin_unlock_irqrestore(
5450 &qseecom.registered_app_list_lock, flags);
5451 }
5452 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5453 pr_err("copy_to_user failed\n");
5454 return -EFAULT;
5455 }
5456 return -EEXIST; /* app already loaded */
5457 } else {
5458 return 0; /* app not loaded */
5459 }
5460}
5461
5462static int __qseecom_get_ce_pipe_info(
5463 enum qseecom_key_management_usage_type usage,
5464 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5465{
5466 int ret = -EINVAL;
5467 int i, j;
5468 struct qseecom_ce_info_use *p = NULL;
5469 int total = 0;
5470 struct qseecom_ce_pipe_entry *pcepipe;
5471
5472 switch (usage) {
5473 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5474 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5475 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5476 if (qseecom.support_fde) {
5477 p = qseecom.ce_info.fde;
5478 total = qseecom.ce_info.num_fde;
5479 } else {
5480 pr_err("system does not support fde\n");
5481 return -EINVAL;
5482 }
5483 break;
5484 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5485 if (qseecom.support_pfe) {
5486 p = qseecom.ce_info.pfe;
5487 total = qseecom.ce_info.num_pfe;
5488 } else {
5489 pr_err("system does not support pfe\n");
5490 return -EINVAL;
5491 }
5492 break;
5493 default:
5494 pr_err("unsupported usage %d\n", usage);
5495 return -EINVAL;
5496 }
5497
5498 for (j = 0; j < total; j++) {
5499 if (p->unit_num == unit) {
5500 pcepipe = p->ce_pipe_entry;
5501 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5502 (*ce_hw)[i] = pcepipe->ce_num;
5503 *pipe = pcepipe->ce_pipe_pair;
5504 pcepipe++;
5505 }
5506 ret = 0;
5507 break;
5508 }
5509 p++;
5510 }
5511 return ret;
5512}
5513
5514static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5515 enum qseecom_key_management_usage_type usage,
5516 struct qseecom_key_generate_ireq *ireq)
5517{
5518 struct qseecom_command_scm_resp resp;
5519 int ret;
5520
5521 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5522 usage >= QSEOS_KM_USAGE_MAX) {
5523 pr_err("Error:: unsupported usage %d\n", usage);
5524 return -EFAULT;
5525 }
5526 ret = __qseecom_enable_clk(CLK_QSEE);
5527 if (ret)
5528 return ret;
5529
5530 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5531 ireq, sizeof(struct qseecom_key_generate_ireq),
5532 &resp, sizeof(resp));
5533 if (ret) {
5534 if (ret == -EINVAL &&
5535 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5536 pr_debug("Key ID exists.\n");
5537 ret = 0;
5538 } else {
5539 pr_err("scm call to generate key failed : %d\n", ret);
5540 ret = -EFAULT;
5541 }
5542 goto generate_key_exit;
5543 }
5544
5545 switch (resp.result) {
5546 case QSEOS_RESULT_SUCCESS:
5547 break;
5548 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5549 pr_debug("Key ID exists.\n");
5550 break;
5551 case QSEOS_RESULT_INCOMPLETE:
5552 ret = __qseecom_process_incomplete_cmd(data, &resp);
5553 if (ret) {
5554 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5555 pr_debug("Key ID exists.\n");
5556 ret = 0;
5557 } else {
5558 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5559 resp.result);
5560 }
5561 }
5562 break;
5563 case QSEOS_RESULT_FAILURE:
5564 default:
5565 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5566 ret = -EINVAL;
5567 break;
5568 }
5569generate_key_exit:
5570 __qseecom_disable_clk(CLK_QSEE);
5571 return ret;
5572}
5573
5574static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5575 enum qseecom_key_management_usage_type usage,
5576 struct qseecom_key_delete_ireq *ireq)
5577{
5578 struct qseecom_command_scm_resp resp;
5579 int ret;
5580
5581 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5582 usage >= QSEOS_KM_USAGE_MAX) {
5583 pr_err("Error:: unsupported usage %d\n", usage);
5584 return -EFAULT;
5585 }
5586 ret = __qseecom_enable_clk(CLK_QSEE);
5587 if (ret)
5588 return ret;
5589
5590 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5591 ireq, sizeof(struct qseecom_key_delete_ireq),
5592 &resp, sizeof(struct qseecom_command_scm_resp));
5593 if (ret) {
5594 if (ret == -EINVAL &&
5595 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5596 pr_debug("Max attempts to input password reached.\n");
5597 ret = -ERANGE;
5598 } else {
5599 pr_err("scm call to delete key failed : %d\n", ret);
5600 ret = -EFAULT;
5601 }
5602 goto del_key_exit;
5603 }
5604
5605 switch (resp.result) {
5606 case QSEOS_RESULT_SUCCESS:
5607 break;
5608 case QSEOS_RESULT_INCOMPLETE:
5609 ret = __qseecom_process_incomplete_cmd(data, &resp);
5610 if (ret) {
5611 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5612 resp.result);
5613 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5614 pr_debug("Max attempts to input password reached.\n");
5615 ret = -ERANGE;
5616 }
5617 }
5618 break;
5619 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5620 pr_debug("Max attempts to input password reached.\n");
5621 ret = -ERANGE;
5622 break;
5623 case QSEOS_RESULT_FAILURE:
5624 default:
5625 pr_err("Delete key scm call failed resp.result %d\n",
5626 resp.result);
5627 ret = -EINVAL;
5628 break;
5629 }
5630del_key_exit:
5631 __qseecom_disable_clk(CLK_QSEE);
5632 return ret;
5633}
5634
5635static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5636 enum qseecom_key_management_usage_type usage,
5637 struct qseecom_key_select_ireq *ireq)
5638{
5639 struct qseecom_command_scm_resp resp;
5640 int ret;
5641
5642 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5643 usage >= QSEOS_KM_USAGE_MAX) {
5644 pr_err("Error:: unsupported usage %d\n", usage);
5645 return -EFAULT;
5646 }
5647 ret = __qseecom_enable_clk(CLK_QSEE);
5648 if (ret)
5649 return ret;
5650
5651 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5652 ret = __qseecom_enable_clk(CLK_CE_DRV);
5653 if (ret)
5654 return ret;
5655 }
5656
5657 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5658 ireq, sizeof(struct qseecom_key_select_ireq),
5659 &resp, sizeof(struct qseecom_command_scm_resp));
5660 if (ret) {
5661 if (ret == -EINVAL &&
5662 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5663 pr_debug("Max attempts to input password reached.\n");
5664 ret = -ERANGE;
5665 } else if (ret == -EINVAL &&
5666 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5667 pr_debug("Set Key operation under processing...\n");
5668 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5669 } else {
5670 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5671 ret);
5672 ret = -EFAULT;
5673 }
5674 goto set_key_exit;
5675 }
5676
5677 switch (resp.result) {
5678 case QSEOS_RESULT_SUCCESS:
5679 break;
5680 case QSEOS_RESULT_INCOMPLETE:
5681 ret = __qseecom_process_incomplete_cmd(data, &resp);
5682 if (ret) {
5683 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5684 resp.result);
5685 if (resp.result ==
5686 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5687 pr_debug("Set Key operation under processing...\n");
5688 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5689 }
5690 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5691 pr_debug("Max attempts to input password reached.\n");
5692 ret = -ERANGE;
5693 }
5694 }
5695 break;
5696 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5697 pr_debug("Max attempts to input password reached.\n");
5698 ret = -ERANGE;
5699 break;
5700 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5701 pr_debug("Set Key operation under processing...\n");
5702 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5703 break;
5704 case QSEOS_RESULT_FAILURE:
5705 default:
5706 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5707 ret = -EINVAL;
5708 break;
5709 }
5710set_key_exit:
5711 __qseecom_disable_clk(CLK_QSEE);
5712 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5713 __qseecom_disable_clk(CLK_CE_DRV);
5714 return ret;
5715}
5716
5717static int __qseecom_update_current_key_user_info(
5718 struct qseecom_dev_handle *data,
5719 enum qseecom_key_management_usage_type usage,
5720 struct qseecom_key_userinfo_update_ireq *ireq)
5721{
5722 struct qseecom_command_scm_resp resp;
5723 int ret;
5724
5725 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5726 usage >= QSEOS_KM_USAGE_MAX) {
5727 pr_err("Error:: unsupported usage %d\n", usage);
5728 return -EFAULT;
5729 }
5730 ret = __qseecom_enable_clk(CLK_QSEE);
5731 if (ret)
5732 return ret;
5733
5734 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5735 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5736 &resp, sizeof(struct qseecom_command_scm_resp));
5737 if (ret) {
5738 if (ret == -EINVAL &&
5739 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5740 pr_debug("Set Key operation under processing...\n");
5741 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5742 } else {
5743 pr_err("scm call to update key userinfo failed: %d\n",
5744 ret);
5745 __qseecom_disable_clk(CLK_QSEE);
5746 return -EFAULT;
5747 }
5748 }
5749
5750 switch (resp.result) {
5751 case QSEOS_RESULT_SUCCESS:
5752 break;
5753 case QSEOS_RESULT_INCOMPLETE:
5754 ret = __qseecom_process_incomplete_cmd(data, &resp);
5755 if (resp.result ==
5756 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5757 pr_debug("Set Key operation under processing...\n");
5758 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5759 }
5760 if (ret)
5761 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5762 resp.result);
5763 break;
5764 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5765 pr_debug("Update Key operation under processing...\n");
5766 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5767 break;
5768 case QSEOS_RESULT_FAILURE:
5769 default:
5770 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5771 ret = -EINVAL;
5772 break;
5773 }
5774
5775 __qseecom_disable_clk(CLK_QSEE);
5776 return ret;
5777}
5778
5779
5780static int qseecom_enable_ice_setup(int usage)
5781{
5782 int ret = 0;
5783
5784 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5785 ret = qcom_ice_setup_ice_hw("ufs", true);
5786 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5787 ret = qcom_ice_setup_ice_hw("sdcc", true);
5788
5789 return ret;
5790}
5791
5792static int qseecom_disable_ice_setup(int usage)
5793{
5794 int ret = 0;
5795
5796 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5797 ret = qcom_ice_setup_ice_hw("ufs", false);
5798 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5799 ret = qcom_ice_setup_ice_hw("sdcc", false);
5800
5801 return ret;
5802}
5803
5804static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5805{
5806 struct qseecom_ce_info_use *pce_info_use, *p;
5807 int total = 0;
5808 int i;
5809
5810 switch (usage) {
5811 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5812 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5813 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5814 p = qseecom.ce_info.fde;
5815 total = qseecom.ce_info.num_fde;
5816 break;
5817 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5818 p = qseecom.ce_info.pfe;
5819 total = qseecom.ce_info.num_pfe;
5820 break;
5821 default:
5822 pr_err("unsupported usage %d\n", usage);
5823 return -EINVAL;
5824 }
5825
5826 pce_info_use = NULL;
5827
5828 for (i = 0; i < total; i++) {
5829 if (p->unit_num == unit) {
5830 pce_info_use = p;
5831 break;
5832 }
5833 p++;
5834 }
5835 if (!pce_info_use) {
5836 pr_err("can not find %d\n", unit);
5837 return -EINVAL;
5838 }
5839 return pce_info_use->num_ce_pipe_entries;
5840}
5841
5842static int qseecom_create_key(struct qseecom_dev_handle *data,
5843 void __user *argp)
5844{
5845 int i;
5846 uint32_t *ce_hw = NULL;
5847 uint32_t pipe = 0;
5848 int ret = 0;
5849 uint32_t flags = 0;
5850 struct qseecom_create_key_req create_key_req;
5851 struct qseecom_key_generate_ireq generate_key_ireq;
5852 struct qseecom_key_select_ireq set_key_ireq;
5853 uint32_t entries = 0;
5854
5855 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5856 if (ret) {
5857 pr_err("copy_from_user failed\n");
5858 return ret;
5859 }
5860
5861 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5862 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5863 pr_err("unsupported usage %d\n", create_key_req.usage);
5864 ret = -EFAULT;
5865 return ret;
5866 }
5867 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5868 create_key_req.usage);
5869 if (entries <= 0) {
5870 pr_err("no ce instance for usage %d instance %d\n",
5871 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5872 ret = -EINVAL;
5873 return ret;
5874 }
5875
5876 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5877 if (!ce_hw) {
5878 ret = -ENOMEM;
5879 return ret;
5880 }
5881 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5882 DEFAULT_CE_INFO_UNIT);
5883 if (ret) {
5884 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5885 ret = -EINVAL;
5886 goto free_buf;
5887 }
5888
5889 if (qseecom.fde_key_size)
5890 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5891 else
5892 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5893
5894 generate_key_ireq.flags = flags;
5895 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5896 memset((void *)generate_key_ireq.key_id,
5897 0, QSEECOM_KEY_ID_SIZE);
5898 memset((void *)generate_key_ireq.hash32,
5899 0, QSEECOM_HASH_SIZE);
5900 memcpy((void *)generate_key_ireq.key_id,
5901 (void *)key_id_array[create_key_req.usage].desc,
5902 QSEECOM_KEY_ID_SIZE);
5903 memcpy((void *)generate_key_ireq.hash32,
5904 (void *)create_key_req.hash32,
5905 QSEECOM_HASH_SIZE);
5906
5907 ret = __qseecom_generate_and_save_key(data,
5908 create_key_req.usage, &generate_key_ireq);
5909 if (ret) {
5910 pr_err("Failed to generate key on storage: %d\n", ret);
5911 goto free_buf;
5912 }
5913
5914 for (i = 0; i < entries; i++) {
5915 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5916 if (create_key_req.usage ==
5917 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5918 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5919 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5920
5921 } else if (create_key_req.usage ==
5922 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5923 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5924 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5925
5926 } else {
5927 set_key_ireq.ce = ce_hw[i];
5928 set_key_ireq.pipe = pipe;
5929 }
5930 set_key_ireq.flags = flags;
5931
5932 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5933 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5934 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5935 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5936 memcpy((void *)set_key_ireq.key_id,
5937 (void *)key_id_array[create_key_req.usage].desc,
5938 QSEECOM_KEY_ID_SIZE);
5939 memcpy((void *)set_key_ireq.hash32,
5940 (void *)create_key_req.hash32,
5941 QSEECOM_HASH_SIZE);
5942 /*
5943 * It will return false if it is GPCE based crypto instance or
5944 * ICE is setup properly
5945 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005946 ret = qseecom_enable_ice_setup(create_key_req.usage);
5947 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005948 goto free_buf;
5949
5950 do {
5951 ret = __qseecom_set_clear_ce_key(data,
5952 create_key_req.usage,
5953 &set_key_ireq);
5954 /*
5955 * wait a little before calling scm again to let other
5956 * processes run
5957 */
5958 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5959 msleep(50);
5960
5961 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5962
5963 qseecom_disable_ice_setup(create_key_req.usage);
5964
5965 if (ret) {
5966 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5967 pipe, ce_hw[i], ret);
5968 goto free_buf;
5969 } else {
5970 pr_err("Set the key successfully\n");
5971 if ((create_key_req.usage ==
5972 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5973 (create_key_req.usage ==
5974 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5975 goto free_buf;
5976 }
5977 }
5978
5979free_buf:
5980 kzfree(ce_hw);
5981 return ret;
5982}
5983
5984static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5985 void __user *argp)
5986{
5987 uint32_t *ce_hw = NULL;
5988 uint32_t pipe = 0;
5989 int ret = 0;
5990 uint32_t flags = 0;
5991 int i, j;
5992 struct qseecom_wipe_key_req wipe_key_req;
5993 struct qseecom_key_delete_ireq delete_key_ireq;
5994 struct qseecom_key_select_ireq clear_key_ireq;
5995 uint32_t entries = 0;
5996
5997 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
5998 if (ret) {
5999 pr_err("copy_from_user failed\n");
6000 return ret;
6001 }
6002
6003 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6004 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6005 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6006 ret = -EFAULT;
6007 return ret;
6008 }
6009
6010 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6011 wipe_key_req.usage);
6012 if (entries <= 0) {
6013 pr_err("no ce instance for usage %d instance %d\n",
6014 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6015 ret = -EINVAL;
6016 return ret;
6017 }
6018
6019 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6020 if (!ce_hw) {
6021 ret = -ENOMEM;
6022 return ret;
6023 }
6024
6025 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6026 DEFAULT_CE_INFO_UNIT);
6027 if (ret) {
6028 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6029 ret = -EINVAL;
6030 goto free_buf;
6031 }
6032
6033 if (wipe_key_req.wipe_key_flag) {
6034 delete_key_ireq.flags = flags;
6035 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6036 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6037 memcpy((void *)delete_key_ireq.key_id,
6038 (void *)key_id_array[wipe_key_req.usage].desc,
6039 QSEECOM_KEY_ID_SIZE);
6040 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6041
6042 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6043 &delete_key_ireq);
6044 if (ret) {
6045 pr_err("Failed to delete key from ssd storage: %d\n",
6046 ret);
6047 ret = -EFAULT;
6048 goto free_buf;
6049 }
6050 }
6051
6052 for (j = 0; j < entries; j++) {
6053 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6054 if (wipe_key_req.usage ==
6055 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6056 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6057 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6058 } else if (wipe_key_req.usage ==
6059 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6060 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6061 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6062 } else {
6063 clear_key_ireq.ce = ce_hw[j];
6064 clear_key_ireq.pipe = pipe;
6065 }
6066 clear_key_ireq.flags = flags;
6067 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6068 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6069 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6070 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6071
6072 /*
6073 * It will return false if it is GPCE based crypto instance or
6074 * ICE is setup properly
6075 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006076 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6077 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006078 goto free_buf;
6079
6080 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6081 &clear_key_ireq);
6082
6083 qseecom_disable_ice_setup(wipe_key_req.usage);
6084
6085 if (ret) {
6086 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6087 pipe, ce_hw[j], ret);
6088 ret = -EFAULT;
6089 goto free_buf;
6090 }
6091 }
6092
6093free_buf:
6094 kzfree(ce_hw);
6095 return ret;
6096}
6097
6098static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6099 void __user *argp)
6100{
6101 int ret = 0;
6102 uint32_t flags = 0;
6103 struct qseecom_update_key_userinfo_req update_key_req;
6104 struct qseecom_key_userinfo_update_ireq ireq;
6105
6106 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6107 if (ret) {
6108 pr_err("copy_from_user failed\n");
6109 return ret;
6110 }
6111
6112 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6113 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6114 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6115 return -EFAULT;
6116 }
6117
6118 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6119
6120 if (qseecom.fde_key_size)
6121 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6122 else
6123 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6124
6125 ireq.flags = flags;
6126 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6127 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6128 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6129 memcpy((void *)ireq.key_id,
6130 (void *)key_id_array[update_key_req.usage].desc,
6131 QSEECOM_KEY_ID_SIZE);
6132 memcpy((void *)ireq.current_hash32,
6133 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6134 memcpy((void *)ireq.new_hash32,
6135 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6136
6137 do {
6138 ret = __qseecom_update_current_key_user_info(data,
6139 update_key_req.usage,
6140 &ireq);
6141 /*
6142 * wait a little before calling scm again to let other
6143 * processes run
6144 */
6145 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6146 msleep(50);
6147
6148 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6149 if (ret) {
6150 pr_err("Failed to update key info: %d\n", ret);
6151 return ret;
6152 }
6153 return ret;
6154
6155}
6156static int qseecom_is_es_activated(void __user *argp)
6157{
Zhen Kong26e62742018-05-04 17:19:06 -07006158 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006159 struct qseecom_command_scm_resp resp;
6160 int ret;
6161
6162 if (qseecom.qsee_version < QSEE_VERSION_04) {
6163 pr_err("invalid qsee version\n");
6164 return -ENODEV;
6165 }
6166
6167 if (argp == NULL) {
6168 pr_err("arg is null\n");
6169 return -EINVAL;
6170 }
6171
6172 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6173 &req, sizeof(req), &resp, sizeof(resp));
6174 if (ret) {
6175 pr_err("scm_call failed\n");
6176 return ret;
6177 }
6178
6179 req.is_activated = resp.result;
6180 ret = copy_to_user(argp, &req, sizeof(req));
6181 if (ret) {
6182 pr_err("copy_to_user failed\n");
6183 return ret;
6184 }
6185
6186 return 0;
6187}
6188
6189static int qseecom_save_partition_hash(void __user *argp)
6190{
6191 struct qseecom_save_partition_hash_req req;
6192 struct qseecom_command_scm_resp resp;
6193 int ret;
6194
6195 memset(&resp, 0x00, sizeof(resp));
6196
6197 if (qseecom.qsee_version < QSEE_VERSION_04) {
6198 pr_err("invalid qsee version\n");
6199 return -ENODEV;
6200 }
6201
6202 if (argp == NULL) {
6203 pr_err("arg is null\n");
6204 return -EINVAL;
6205 }
6206
6207 ret = copy_from_user(&req, argp, sizeof(req));
6208 if (ret) {
6209 pr_err("copy_from_user failed\n");
6210 return ret;
6211 }
6212
6213 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6214 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6215 if (ret) {
6216 pr_err("qseecom_scm_call failed\n");
6217 return ret;
6218 }
6219
6220 return 0;
6221}
6222
6223static int qseecom_mdtp_cipher_dip(void __user *argp)
6224{
6225 struct qseecom_mdtp_cipher_dip_req req;
6226 u32 tzbuflenin, tzbuflenout;
6227 char *tzbufin = NULL, *tzbufout = NULL;
6228 struct scm_desc desc = {0};
6229 int ret;
6230
6231 do {
6232 /* Copy the parameters from userspace */
6233 if (argp == NULL) {
6234 pr_err("arg is null\n");
6235 ret = -EINVAL;
6236 break;
6237 }
6238
6239 ret = copy_from_user(&req, argp, sizeof(req));
6240 if (ret) {
6241 pr_err("copy_from_user failed, ret= %d\n", ret);
6242 break;
6243 }
6244
6245 if (req.in_buf == NULL || req.out_buf == NULL ||
6246 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6247 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6248 req.direction > 1) {
6249 pr_err("invalid parameters\n");
6250 ret = -EINVAL;
6251 break;
6252 }
6253
6254 /* Copy the input buffer from userspace to kernel space */
6255 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6256 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6257 if (!tzbufin) {
6258 pr_err("error allocating in buffer\n");
6259 ret = -ENOMEM;
6260 break;
6261 }
6262
6263 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6264 if (ret) {
6265 pr_err("copy_from_user failed, ret=%d\n", ret);
6266 break;
6267 }
6268
6269 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6270
6271 /* Prepare the output buffer in kernel space */
6272 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6273 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6274 if (!tzbufout) {
6275 pr_err("error allocating out buffer\n");
6276 ret = -ENOMEM;
6277 break;
6278 }
6279
6280 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6281
6282 /* Send the command to TZ */
6283 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6284 desc.args[0] = virt_to_phys(tzbufin);
6285 desc.args[1] = req.in_buf_size;
6286 desc.args[2] = virt_to_phys(tzbufout);
6287 desc.args[3] = req.out_buf_size;
6288 desc.args[4] = req.direction;
6289
6290 ret = __qseecom_enable_clk(CLK_QSEE);
6291 if (ret)
6292 break;
6293
6294 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6295
6296 __qseecom_disable_clk(CLK_QSEE);
6297
6298 if (ret) {
6299 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6300 ret);
6301 break;
6302 }
6303
6304 /* Copy the output buffer from kernel space to userspace */
6305 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6306 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6307 if (ret) {
6308 pr_err("copy_to_user failed, ret=%d\n", ret);
6309 break;
6310 }
6311 } while (0);
6312
6313 kzfree(tzbufin);
6314 kzfree(tzbufout);
6315
6316 return ret;
6317}
6318
6319static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6320 struct qseecom_qteec_req *req)
6321{
6322 if (!data || !data->client.ihandle) {
6323 pr_err("Client or client handle is not initialized\n");
6324 return -EINVAL;
6325 }
6326
6327 if (data->type != QSEECOM_CLIENT_APP)
6328 return -EFAULT;
6329
6330 if (req->req_len > UINT_MAX - req->resp_len) {
6331 pr_err("Integer overflow detected in req_len & rsp_len\n");
6332 return -EINVAL;
6333 }
6334
6335 if (req->req_len + req->resp_len > data->client.sb_length) {
6336 pr_debug("Not enough memory to fit cmd_buf.\n");
6337 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6338 (req->req_len + req->resp_len), data->client.sb_length);
6339 return -ENOMEM;
6340 }
6341
6342 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6343 pr_err("cmd buffer or response buffer is null\n");
6344 return -EINVAL;
6345 }
6346 if (((uintptr_t)req->req_ptr <
6347 data->client.user_virt_sb_base) ||
6348 ((uintptr_t)req->req_ptr >=
6349 (data->client.user_virt_sb_base + data->client.sb_length))) {
6350 pr_err("cmd buffer address not within shared bufffer\n");
6351 return -EINVAL;
6352 }
6353
6354 if (((uintptr_t)req->resp_ptr <
6355 data->client.user_virt_sb_base) ||
6356 ((uintptr_t)req->resp_ptr >=
6357 (data->client.user_virt_sb_base + data->client.sb_length))) {
6358 pr_err("response buffer address not within shared bufffer\n");
6359 return -EINVAL;
6360 }
6361
6362 if ((req->req_len == 0) || (req->resp_len == 0)) {
6363 pr_err("cmd buf lengtgh/response buf length not valid\n");
6364 return -EINVAL;
6365 }
6366
6367 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6368 pr_err("Integer overflow in req_len & req_ptr\n");
6369 return -EINVAL;
6370 }
6371
6372 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6373 pr_err("Integer overflow in resp_len & resp_ptr\n");
6374 return -EINVAL;
6375 }
6376
6377 if (data->client.user_virt_sb_base >
6378 (ULONG_MAX - data->client.sb_length)) {
6379 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6380 return -EINVAL;
6381 }
6382 if ((((uintptr_t)req->req_ptr + req->req_len) >
6383 ((uintptr_t)data->client.user_virt_sb_base +
6384 data->client.sb_length)) ||
6385 (((uintptr_t)req->resp_ptr + req->resp_len) >
6386 ((uintptr_t)data->client.user_virt_sb_base +
6387 data->client.sb_length))) {
6388 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6389 return -EINVAL;
6390 }
6391 return 0;
6392}
6393
6394static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6395 uint32_t fd_idx, struct sg_table *sg_ptr)
6396{
6397 struct scatterlist *sg = sg_ptr->sgl;
6398 struct qseecom_sg_entry *sg_entry;
6399 void *buf;
6400 uint i;
6401 size_t size;
6402 dma_addr_t coh_pmem;
6403
6404 if (fd_idx >= MAX_ION_FD) {
6405 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6406 return -ENOMEM;
6407 }
6408 /*
6409 * Allocate a buffer, populate it with number of entry plus
6410 * each sg entry's phy addr and length; then return the
6411 * phy_addr of the buffer.
6412 */
6413 size = sizeof(uint32_t) +
6414 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6415 size = (size + PAGE_SIZE) & PAGE_MASK;
6416 buf = dma_alloc_coherent(qseecom.pdev,
6417 size, &coh_pmem, GFP_KERNEL);
6418 if (buf == NULL) {
6419 pr_err("failed to alloc memory for sg buf\n");
6420 return -ENOMEM;
6421 }
6422 *(uint32_t *)buf = sg_ptr->nents;
6423 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6424 for (i = 0; i < sg_ptr->nents; i++) {
6425 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6426 sg_entry->len = sg->length;
6427 sg_entry++;
6428 sg = sg_next(sg);
6429 }
6430 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6431 data->client.sec_buf_fd[fd_idx].vbase = buf;
6432 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6433 data->client.sec_buf_fd[fd_idx].size = size;
6434 return 0;
6435}
6436
6437static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6438 struct qseecom_dev_handle *data, bool cleanup)
6439{
6440 struct ion_handle *ihandle;
6441 int ret = 0;
6442 int i = 0;
6443 uint32_t *update;
6444 struct sg_table *sg_ptr = NULL;
6445 struct scatterlist *sg;
6446 struct qseecom_param_memref *memref;
6447
6448 if (req == NULL) {
6449 pr_err("Invalid address\n");
6450 return -EINVAL;
6451 }
6452 for (i = 0; i < MAX_ION_FD; i++) {
6453 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006454 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006455 req->ifd_data[i].fd);
6456 if (IS_ERR_OR_NULL(ihandle)) {
6457 pr_err("Ion client can't retrieve the handle\n");
6458 return -ENOMEM;
6459 }
6460 if ((req->req_len < sizeof(uint32_t)) ||
6461 (req->ifd_data[i].cmd_buf_offset >
6462 req->req_len - sizeof(uint32_t))) {
6463 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6464 req->req_len,
6465 req->ifd_data[i].cmd_buf_offset);
6466 return -EINVAL;
6467 }
6468 update = (uint32_t *)((char *) req->req_ptr +
6469 req->ifd_data[i].cmd_buf_offset);
6470 if (!update) {
6471 pr_err("update pointer is NULL\n");
6472 return -EINVAL;
6473 }
6474 } else {
6475 continue;
6476 }
6477 /* Populate the cmd data structure with the phys_addr */
6478 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6479 if (IS_ERR_OR_NULL(sg_ptr)) {
6480 pr_err("IOn client could not retrieve sg table\n");
6481 goto err;
6482 }
6483 sg = sg_ptr->sgl;
6484 if (sg == NULL) {
6485 pr_err("sg is NULL\n");
6486 goto err;
6487 }
6488 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6489 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6490 sg_ptr->nents, sg->length);
6491 goto err;
6492 }
6493 /* clean up buf for pre-allocated fd */
6494 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6495 (*update)) {
6496 if (data->client.sec_buf_fd[i].vbase)
6497 dma_free_coherent(qseecom.pdev,
6498 data->client.sec_buf_fd[i].size,
6499 data->client.sec_buf_fd[i].vbase,
6500 data->client.sec_buf_fd[i].pbase);
6501 memset((void *)update, 0,
6502 sizeof(struct qseecom_param_memref));
6503 memset(&(data->client.sec_buf_fd[i]), 0,
6504 sizeof(struct qseecom_sec_buf_fd_info));
6505 goto clean;
6506 }
6507
6508 if (*update == 0) {
6509 /* update buf for pre-allocated fd from secure heap*/
6510 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6511 sg_ptr);
6512 if (ret) {
6513 pr_err("Failed to handle buf for fd[%d]\n", i);
6514 goto err;
6515 }
6516 memref = (struct qseecom_param_memref *)update;
6517 memref->buffer =
6518 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6519 memref->size =
6520 (uint32_t)(data->client.sec_buf_fd[i].size);
6521 } else {
6522 /* update buf for fd from non-secure qseecom heap */
6523 if (sg_ptr->nents != 1) {
6524 pr_err("Num of scat entr (%d) invalid\n",
6525 sg_ptr->nents);
6526 goto err;
6527 }
6528 if (cleanup)
6529 *update = 0;
6530 else
6531 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6532 }
6533clean:
6534 if (cleanup) {
6535 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6536 ihandle, NULL, sg->length,
6537 ION_IOC_INV_CACHES);
6538 if (ret) {
6539 pr_err("cache operation failed %d\n", ret);
6540 goto err;
6541 }
6542 } else {
6543 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6544 ihandle, NULL, sg->length,
6545 ION_IOC_CLEAN_INV_CACHES);
6546 if (ret) {
6547 pr_err("cache operation failed %d\n", ret);
6548 goto err;
6549 }
6550 data->sglistinfo_ptr[i].indexAndFlags =
6551 SGLISTINFO_SET_INDEX_FLAG(
6552 (sg_ptr->nents == 1), 0,
6553 req->ifd_data[i].cmd_buf_offset);
6554 data->sglistinfo_ptr[i].sizeOrCount =
6555 (sg_ptr->nents == 1) ?
6556 sg->length : sg_ptr->nents;
6557 data->sglist_cnt = i + 1;
6558 }
6559 /* Deallocate the handle */
6560 if (!IS_ERR_OR_NULL(ihandle))
6561 ion_free(qseecom.ion_clnt, ihandle);
6562 }
6563 return ret;
6564err:
6565 if (!IS_ERR_OR_NULL(ihandle))
6566 ion_free(qseecom.ion_clnt, ihandle);
6567 return -ENOMEM;
6568}
6569
6570static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6571 struct qseecom_qteec_req *req, uint32_t cmd_id)
6572{
6573 struct qseecom_command_scm_resp resp;
6574 struct qseecom_qteec_ireq ireq;
6575 struct qseecom_qteec_64bit_ireq ireq_64bit;
6576 struct qseecom_registered_app_list *ptr_app;
6577 bool found_app = false;
6578 unsigned long flags;
6579 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006580 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006581 uint32_t reqd_len_sb_in = 0;
6582 void *cmd_buf = NULL;
6583 size_t cmd_len;
6584 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306585 void *req_ptr = NULL;
6586 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006587
6588 ret = __qseecom_qteec_validate_msg(data, req);
6589 if (ret)
6590 return ret;
6591
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306592 req_ptr = req->req_ptr;
6593 resp_ptr = req->resp_ptr;
6594
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006595 /* find app_id & img_name from list */
6596 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6597 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6598 list) {
6599 if ((ptr_app->app_id == data->client.app_id) &&
6600 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6601 found_app = true;
6602 break;
6603 }
6604 }
6605 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6606 if (!found_app) {
6607 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6608 (char *)data->client.app_name);
6609 return -ENOENT;
6610 }
6611
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306612 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6613 (uintptr_t)req->req_ptr);
6614 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6615 (uintptr_t)req->resp_ptr);
6616
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006617 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6618 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6619 ret = __qseecom_update_qteec_req_buf(
6620 (struct qseecom_qteec_modfd_req *)req, data, false);
6621 if (ret)
6622 return ret;
6623 }
6624
6625 if (qseecom.qsee_version < QSEE_VERSION_40) {
6626 ireq.app_id = data->client.app_id;
6627 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306628 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006629 ireq.req_len = req->req_len;
6630 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306631 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006632 ireq.resp_len = req->resp_len;
6633 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6634 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6635 dmac_flush_range((void *)table,
6636 (void *)table + SGLISTINFO_TABLE_SIZE);
6637 cmd_buf = (void *)&ireq;
6638 cmd_len = sizeof(struct qseecom_qteec_ireq);
6639 } else {
6640 ireq_64bit.app_id = data->client.app_id;
6641 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306642 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006643 ireq_64bit.req_len = req->req_len;
6644 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306645 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006646 ireq_64bit.resp_len = req->resp_len;
6647 if ((data->client.app_arch == ELFCLASS32) &&
6648 ((ireq_64bit.req_ptr >=
6649 PHY_ADDR_4G - ireq_64bit.req_len) ||
6650 (ireq_64bit.resp_ptr >=
6651 PHY_ADDR_4G - ireq_64bit.resp_len))){
6652 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6653 data->client.app_name, data->client.app_id);
6654 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6655 ireq_64bit.req_ptr, ireq_64bit.req_len,
6656 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6657 return -EFAULT;
6658 }
6659 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6660 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6661 dmac_flush_range((void *)table,
6662 (void *)table + SGLISTINFO_TABLE_SIZE);
6663 cmd_buf = (void *)&ireq_64bit;
6664 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6665 }
6666 if (qseecom.whitelist_support == true
6667 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6668 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6669 else
6670 *(uint32_t *)cmd_buf = cmd_id;
6671
6672 reqd_len_sb_in = req->req_len + req->resp_len;
6673 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6674 data->client.sb_virt,
6675 reqd_len_sb_in,
6676 ION_IOC_CLEAN_INV_CACHES);
6677 if (ret) {
6678 pr_err("cache operation failed %d\n", ret);
6679 return ret;
6680 }
6681
6682 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6683
6684 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6685 cmd_buf, cmd_len,
6686 &resp, sizeof(resp));
6687 if (ret) {
6688 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6689 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006690 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006691 }
6692
6693 if (qseecom.qsee_reentrancy_support) {
6694 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006695 if (ret)
6696 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006697 } else {
6698 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6699 ret = __qseecom_process_incomplete_cmd(data, &resp);
6700 if (ret) {
6701 pr_err("process_incomplete_cmd failed err: %d\n",
6702 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006703 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006704 }
6705 } else {
6706 if (resp.result != QSEOS_RESULT_SUCCESS) {
6707 pr_err("Response result %d not supported\n",
6708 resp.result);
6709 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006710 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006711 }
6712 }
6713 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006714exit:
6715 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006716 data->client.sb_virt, data->client.sb_length,
6717 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006718 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006719 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006720 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006721 }
6722
6723 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6724 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006725 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006726 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006727 if (ret2)
6728 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006729 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006730 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006731}
6732
6733static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6734 void __user *argp)
6735{
6736 struct qseecom_qteec_modfd_req req;
6737 int ret = 0;
6738
6739 ret = copy_from_user(&req, argp,
6740 sizeof(struct qseecom_qteec_modfd_req));
6741 if (ret) {
6742 pr_err("copy_from_user failed\n");
6743 return ret;
6744 }
6745 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6746 QSEOS_TEE_OPEN_SESSION);
6747
6748 return ret;
6749}
6750
6751static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6752 void __user *argp)
6753{
6754 struct qseecom_qteec_req req;
6755 int ret = 0;
6756
6757 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6758 if (ret) {
6759 pr_err("copy_from_user failed\n");
6760 return ret;
6761 }
6762 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6763 return ret;
6764}
6765
6766static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6767 void __user *argp)
6768{
6769 struct qseecom_qteec_modfd_req req;
6770 struct qseecom_command_scm_resp resp;
6771 struct qseecom_qteec_ireq ireq;
6772 struct qseecom_qteec_64bit_ireq ireq_64bit;
6773 struct qseecom_registered_app_list *ptr_app;
6774 bool found_app = false;
6775 unsigned long flags;
6776 int ret = 0;
6777 int i = 0;
6778 uint32_t reqd_len_sb_in = 0;
6779 void *cmd_buf = NULL;
6780 size_t cmd_len;
6781 struct sglist_info *table = data->sglistinfo_ptr;
6782 void *req_ptr = NULL;
6783 void *resp_ptr = NULL;
6784
6785 ret = copy_from_user(&req, argp,
6786 sizeof(struct qseecom_qteec_modfd_req));
6787 if (ret) {
6788 pr_err("copy_from_user failed\n");
6789 return ret;
6790 }
6791 ret = __qseecom_qteec_validate_msg(data,
6792 (struct qseecom_qteec_req *)(&req));
6793 if (ret)
6794 return ret;
6795 req_ptr = req.req_ptr;
6796 resp_ptr = req.resp_ptr;
6797
6798 /* find app_id & img_name from list */
6799 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6800 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6801 list) {
6802 if ((ptr_app->app_id == data->client.app_id) &&
6803 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6804 found_app = true;
6805 break;
6806 }
6807 }
6808 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6809 if (!found_app) {
6810 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6811 (char *)data->client.app_name);
6812 return -ENOENT;
6813 }
6814
6815 /* validate offsets */
6816 for (i = 0; i < MAX_ION_FD; i++) {
6817 if (req.ifd_data[i].fd) {
6818 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6819 return -EINVAL;
6820 }
6821 }
6822 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6823 (uintptr_t)req.req_ptr);
6824 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6825 (uintptr_t)req.resp_ptr);
6826 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6827 if (ret)
6828 return ret;
6829
6830 if (qseecom.qsee_version < QSEE_VERSION_40) {
6831 ireq.app_id = data->client.app_id;
6832 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6833 (uintptr_t)req_ptr);
6834 ireq.req_len = req.req_len;
6835 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6836 (uintptr_t)resp_ptr);
6837 ireq.resp_len = req.resp_len;
6838 cmd_buf = (void *)&ireq;
6839 cmd_len = sizeof(struct qseecom_qteec_ireq);
6840 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6841 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6842 dmac_flush_range((void *)table,
6843 (void *)table + SGLISTINFO_TABLE_SIZE);
6844 } else {
6845 ireq_64bit.app_id = data->client.app_id;
6846 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6847 (uintptr_t)req_ptr);
6848 ireq_64bit.req_len = req.req_len;
6849 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6850 (uintptr_t)resp_ptr);
6851 ireq_64bit.resp_len = req.resp_len;
6852 cmd_buf = (void *)&ireq_64bit;
6853 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6854 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6855 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6856 dmac_flush_range((void *)table,
6857 (void *)table + SGLISTINFO_TABLE_SIZE);
6858 }
6859 reqd_len_sb_in = req.req_len + req.resp_len;
6860 if (qseecom.whitelist_support == true)
6861 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6862 else
6863 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6864
6865 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6866 data->client.sb_virt,
6867 reqd_len_sb_in,
6868 ION_IOC_CLEAN_INV_CACHES);
6869 if (ret) {
6870 pr_err("cache operation failed %d\n", ret);
6871 return ret;
6872 }
6873
6874 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6875
6876 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6877 cmd_buf, cmd_len,
6878 &resp, sizeof(resp));
6879 if (ret) {
6880 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6881 ret, data->client.app_id);
6882 return ret;
6883 }
6884
6885 if (qseecom.qsee_reentrancy_support) {
6886 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6887 } else {
6888 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6889 ret = __qseecom_process_incomplete_cmd(data, &resp);
6890 if (ret) {
6891 pr_err("process_incomplete_cmd failed err: %d\n",
6892 ret);
6893 return ret;
6894 }
6895 } else {
6896 if (resp.result != QSEOS_RESULT_SUCCESS) {
6897 pr_err("Response result %d not supported\n",
6898 resp.result);
6899 ret = -EINVAL;
6900 }
6901 }
6902 }
6903 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6904 if (ret)
6905 return ret;
6906
6907 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6908 data->client.sb_virt, data->client.sb_length,
6909 ION_IOC_INV_CACHES);
6910 if (ret) {
6911 pr_err("cache operation failed %d\n", ret);
6912 return ret;
6913 }
6914 return 0;
6915}
6916
6917static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6918 void __user *argp)
6919{
6920 struct qseecom_qteec_modfd_req req;
6921 int ret = 0;
6922
6923 ret = copy_from_user(&req, argp,
6924 sizeof(struct qseecom_qteec_modfd_req));
6925 if (ret) {
6926 pr_err("copy_from_user failed\n");
6927 return ret;
6928 }
6929 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6930 QSEOS_TEE_REQUEST_CANCELLATION);
6931
6932 return ret;
6933}
6934
6935static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6936{
6937 if (data->sglist_cnt) {
6938 memset(data->sglistinfo_ptr, 0,
6939 SGLISTINFO_TABLE_SIZE);
6940 data->sglist_cnt = 0;
6941 }
6942}
6943
6944static inline long qseecom_ioctl(struct file *file,
6945 unsigned int cmd, unsigned long arg)
6946{
6947 int ret = 0;
6948 struct qseecom_dev_handle *data = file->private_data;
6949 void __user *argp = (void __user *) arg;
6950 bool perf_enabled = false;
6951
6952 if (!data) {
6953 pr_err("Invalid/uninitialized device handle\n");
6954 return -EINVAL;
6955 }
6956
6957 if (data->abort) {
6958 pr_err("Aborting qseecom driver\n");
6959 return -ENODEV;
6960 }
6961
6962 switch (cmd) {
6963 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6964 if (data->type != QSEECOM_GENERIC) {
6965 pr_err("reg lstnr req: invalid handle (%d)\n",
6966 data->type);
6967 ret = -EINVAL;
6968 break;
6969 }
6970 pr_debug("ioctl register_listener_req()\n");
6971 mutex_lock(&app_access_lock);
6972 atomic_inc(&data->ioctl_count);
6973 data->type = QSEECOM_LISTENER_SERVICE;
6974 ret = qseecom_register_listener(data, argp);
6975 atomic_dec(&data->ioctl_count);
6976 wake_up_all(&data->abort_wq);
6977 mutex_unlock(&app_access_lock);
6978 if (ret)
6979 pr_err("failed qseecom_register_listener: %d\n", ret);
6980 break;
6981 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05306982 case QSEECOM_IOCTL_SET_ICE_INFO: {
6983 struct qseecom_ice_data_t ice_data;
6984
6985 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
6986 if (ret) {
6987 pr_err("copy_from_user failed\n");
6988 return -EFAULT;
6989 }
6990 qcom_ice_set_fde_flag(ice_data.flag);
6991 break;
6992 }
6993
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006994 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6995 if ((data->listener.id == 0) ||
6996 (data->type != QSEECOM_LISTENER_SERVICE)) {
6997 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6998 data->type, data->listener.id);
6999 ret = -EINVAL;
7000 break;
7001 }
7002 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kong26e62742018-05-04 17:19:06 -07007003 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007004 mutex_lock(&app_access_lock);
7005 atomic_inc(&data->ioctl_count);
7006 ret = qseecom_unregister_listener(data);
7007 atomic_dec(&data->ioctl_count);
7008 wake_up_all(&data->abort_wq);
7009 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007010 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007011 if (ret)
7012 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7013 break;
7014 }
7015 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7016 if ((data->client.app_id == 0) ||
7017 (data->type != QSEECOM_CLIENT_APP)) {
7018 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7019 data->type, data->client.app_id);
7020 ret = -EINVAL;
7021 break;
7022 }
7023 /* Only one client allowed here at a time */
7024 mutex_lock(&app_access_lock);
7025 if (qseecom.support_bus_scaling) {
7026 /* register bus bw in case the client doesn't do it */
7027 if (!data->mode) {
7028 mutex_lock(&qsee_bw_mutex);
7029 __qseecom_register_bus_bandwidth_needs(
7030 data, HIGH);
7031 mutex_unlock(&qsee_bw_mutex);
7032 }
7033 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7034 if (ret) {
7035 pr_err("Failed to set bw.\n");
7036 ret = -EINVAL;
7037 mutex_unlock(&app_access_lock);
7038 break;
7039 }
7040 }
7041 /*
7042 * On targets where crypto clock is handled by HLOS,
7043 * if clk_access_cnt is zero and perf_enabled is false,
7044 * then the crypto clock was not enabled before sending cmd to
7045 * tz, qseecom will enable the clock to avoid service failure.
7046 */
7047 if (!qseecom.no_clock_support &&
7048 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7049 pr_debug("ce clock is not enabled!\n");
7050 ret = qseecom_perf_enable(data);
7051 if (ret) {
7052 pr_err("Failed to vote for clock with err %d\n",
7053 ret);
7054 mutex_unlock(&app_access_lock);
7055 ret = -EINVAL;
7056 break;
7057 }
7058 perf_enabled = true;
7059 }
7060 atomic_inc(&data->ioctl_count);
7061 ret = qseecom_send_cmd(data, argp);
7062 if (qseecom.support_bus_scaling)
7063 __qseecom_add_bw_scale_down_timer(
7064 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7065 if (perf_enabled) {
7066 qsee_disable_clock_vote(data, CLK_DFAB);
7067 qsee_disable_clock_vote(data, CLK_SFPB);
7068 }
7069 atomic_dec(&data->ioctl_count);
7070 wake_up_all(&data->abort_wq);
7071 mutex_unlock(&app_access_lock);
7072 if (ret)
7073 pr_err("failed qseecom_send_cmd: %d\n", ret);
7074 break;
7075 }
7076 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7077 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7078 if ((data->client.app_id == 0) ||
7079 (data->type != QSEECOM_CLIENT_APP)) {
7080 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7081 data->type, data->client.app_id);
7082 ret = -EINVAL;
7083 break;
7084 }
7085 /* Only one client allowed here at a time */
7086 mutex_lock(&app_access_lock);
7087 if (qseecom.support_bus_scaling) {
7088 if (!data->mode) {
7089 mutex_lock(&qsee_bw_mutex);
7090 __qseecom_register_bus_bandwidth_needs(
7091 data, HIGH);
7092 mutex_unlock(&qsee_bw_mutex);
7093 }
7094 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7095 if (ret) {
7096 pr_err("Failed to set bw.\n");
7097 mutex_unlock(&app_access_lock);
7098 ret = -EINVAL;
7099 break;
7100 }
7101 }
7102 /*
7103 * On targets where crypto clock is handled by HLOS,
7104 * if clk_access_cnt is zero and perf_enabled is false,
7105 * then the crypto clock was not enabled before sending cmd to
7106 * tz, qseecom will enable the clock to avoid service failure.
7107 */
7108 if (!qseecom.no_clock_support &&
7109 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7110 pr_debug("ce clock is not enabled!\n");
7111 ret = qseecom_perf_enable(data);
7112 if (ret) {
7113 pr_err("Failed to vote for clock with err %d\n",
7114 ret);
7115 mutex_unlock(&app_access_lock);
7116 ret = -EINVAL;
7117 break;
7118 }
7119 perf_enabled = true;
7120 }
7121 atomic_inc(&data->ioctl_count);
7122 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7123 ret = qseecom_send_modfd_cmd(data, argp);
7124 else
7125 ret = qseecom_send_modfd_cmd_64(data, argp);
7126 if (qseecom.support_bus_scaling)
7127 __qseecom_add_bw_scale_down_timer(
7128 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7129 if (perf_enabled) {
7130 qsee_disable_clock_vote(data, CLK_DFAB);
7131 qsee_disable_clock_vote(data, CLK_SFPB);
7132 }
7133 atomic_dec(&data->ioctl_count);
7134 wake_up_all(&data->abort_wq);
7135 mutex_unlock(&app_access_lock);
7136 if (ret)
7137 pr_err("failed qseecom_send_cmd: %d\n", ret);
7138 __qseecom_clean_data_sglistinfo(data);
7139 break;
7140 }
7141 case QSEECOM_IOCTL_RECEIVE_REQ: {
7142 if ((data->listener.id == 0) ||
7143 (data->type != QSEECOM_LISTENER_SERVICE)) {
7144 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7145 data->type, data->listener.id);
7146 ret = -EINVAL;
7147 break;
7148 }
7149 atomic_inc(&data->ioctl_count);
7150 ret = qseecom_receive_req(data);
7151 atomic_dec(&data->ioctl_count);
7152 wake_up_all(&data->abort_wq);
7153 if (ret && (ret != -ERESTARTSYS))
7154 pr_err("failed qseecom_receive_req: %d\n", ret);
7155 break;
7156 }
7157 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7158 if ((data->listener.id == 0) ||
7159 (data->type != QSEECOM_LISTENER_SERVICE)) {
7160 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7161 data->type, data->listener.id);
7162 ret = -EINVAL;
7163 break;
7164 }
7165 atomic_inc(&data->ioctl_count);
7166 if (!qseecom.qsee_reentrancy_support)
7167 ret = qseecom_send_resp();
7168 else
7169 ret = qseecom_reentrancy_send_resp(data);
7170 atomic_dec(&data->ioctl_count);
7171 wake_up_all(&data->abort_wq);
7172 if (ret)
7173 pr_err("failed qseecom_send_resp: %d\n", ret);
7174 break;
7175 }
7176 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7177 if ((data->type != QSEECOM_CLIENT_APP) &&
7178 (data->type != QSEECOM_GENERIC) &&
7179 (data->type != QSEECOM_SECURE_SERVICE)) {
7180 pr_err("set mem param req: invalid handle (%d)\n",
7181 data->type);
7182 ret = -EINVAL;
7183 break;
7184 }
7185 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7186 mutex_lock(&app_access_lock);
7187 atomic_inc(&data->ioctl_count);
7188 ret = qseecom_set_client_mem_param(data, argp);
7189 atomic_dec(&data->ioctl_count);
7190 mutex_unlock(&app_access_lock);
7191 if (ret)
7192 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7193 ret);
7194 break;
7195 }
7196 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7197 if ((data->type != QSEECOM_GENERIC) &&
7198 (data->type != QSEECOM_CLIENT_APP)) {
7199 pr_err("load app req: invalid handle (%d)\n",
7200 data->type);
7201 ret = -EINVAL;
7202 break;
7203 }
7204 data->type = QSEECOM_CLIENT_APP;
7205 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7206 mutex_lock(&app_access_lock);
7207 atomic_inc(&data->ioctl_count);
7208 ret = qseecom_load_app(data, argp);
7209 atomic_dec(&data->ioctl_count);
7210 mutex_unlock(&app_access_lock);
7211 if (ret)
7212 pr_err("failed load_app request: %d\n", ret);
7213 break;
7214 }
7215 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7216 if ((data->client.app_id == 0) ||
7217 (data->type != QSEECOM_CLIENT_APP)) {
7218 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7219 data->type, data->client.app_id);
7220 ret = -EINVAL;
7221 break;
7222 }
7223 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7224 mutex_lock(&app_access_lock);
7225 atomic_inc(&data->ioctl_count);
7226 ret = qseecom_unload_app(data, false);
7227 atomic_dec(&data->ioctl_count);
7228 mutex_unlock(&app_access_lock);
7229 if (ret)
7230 pr_err("failed unload_app request: %d\n", ret);
7231 break;
7232 }
7233 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7234 atomic_inc(&data->ioctl_count);
7235 ret = qseecom_get_qseos_version(data, argp);
7236 if (ret)
7237 pr_err("qseecom_get_qseos_version: %d\n", ret);
7238 atomic_dec(&data->ioctl_count);
7239 break;
7240 }
7241 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7242 if ((data->type != QSEECOM_GENERIC) &&
7243 (data->type != QSEECOM_CLIENT_APP)) {
7244 pr_err("perf enable req: invalid handle (%d)\n",
7245 data->type);
7246 ret = -EINVAL;
7247 break;
7248 }
7249 if ((data->type == QSEECOM_CLIENT_APP) &&
7250 (data->client.app_id == 0)) {
7251 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7252 data->type, data->client.app_id);
7253 ret = -EINVAL;
7254 break;
7255 }
7256 atomic_inc(&data->ioctl_count);
7257 if (qseecom.support_bus_scaling) {
7258 mutex_lock(&qsee_bw_mutex);
7259 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7260 mutex_unlock(&qsee_bw_mutex);
7261 } else {
7262 ret = qseecom_perf_enable(data);
7263 if (ret)
7264 pr_err("Fail to vote for clocks %d\n", ret);
7265 }
7266 atomic_dec(&data->ioctl_count);
7267 break;
7268 }
7269 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7270 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7271 (data->type != QSEECOM_CLIENT_APP)) {
7272 pr_err("perf disable req: invalid handle (%d)\n",
7273 data->type);
7274 ret = -EINVAL;
7275 break;
7276 }
7277 if ((data->type == QSEECOM_CLIENT_APP) &&
7278 (data->client.app_id == 0)) {
7279 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7280 data->type, data->client.app_id);
7281 ret = -EINVAL;
7282 break;
7283 }
7284 atomic_inc(&data->ioctl_count);
7285 if (!qseecom.support_bus_scaling) {
7286 qsee_disable_clock_vote(data, CLK_DFAB);
7287 qsee_disable_clock_vote(data, CLK_SFPB);
7288 } else {
7289 mutex_lock(&qsee_bw_mutex);
7290 qseecom_unregister_bus_bandwidth_needs(data);
7291 mutex_unlock(&qsee_bw_mutex);
7292 }
7293 atomic_dec(&data->ioctl_count);
7294 break;
7295 }
7296
7297 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7298 /* If crypto clock is not handled by HLOS, return directly. */
7299 if (qseecom.no_clock_support) {
7300 pr_debug("crypto clock is not handled by HLOS\n");
7301 break;
7302 }
7303 if ((data->client.app_id == 0) ||
7304 (data->type != QSEECOM_CLIENT_APP)) {
7305 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7306 data->type, data->client.app_id);
7307 ret = -EINVAL;
7308 break;
7309 }
7310 atomic_inc(&data->ioctl_count);
7311 ret = qseecom_scale_bus_bandwidth(data, argp);
7312 atomic_dec(&data->ioctl_count);
7313 break;
7314 }
7315 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7316 if (data->type != QSEECOM_GENERIC) {
7317 pr_err("load ext elf req: invalid client handle (%d)\n",
7318 data->type);
7319 ret = -EINVAL;
7320 break;
7321 }
7322 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7323 data->released = true;
7324 mutex_lock(&app_access_lock);
7325 atomic_inc(&data->ioctl_count);
7326 ret = qseecom_load_external_elf(data, argp);
7327 atomic_dec(&data->ioctl_count);
7328 mutex_unlock(&app_access_lock);
7329 if (ret)
7330 pr_err("failed load_external_elf request: %d\n", ret);
7331 break;
7332 }
7333 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7334 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7335 pr_err("unload ext elf req: invalid handle (%d)\n",
7336 data->type);
7337 ret = -EINVAL;
7338 break;
7339 }
7340 data->released = true;
7341 mutex_lock(&app_access_lock);
7342 atomic_inc(&data->ioctl_count);
7343 ret = qseecom_unload_external_elf(data);
7344 atomic_dec(&data->ioctl_count);
7345 mutex_unlock(&app_access_lock);
7346 if (ret)
7347 pr_err("failed unload_app request: %d\n", ret);
7348 break;
7349 }
7350 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7351 data->type = QSEECOM_CLIENT_APP;
7352 mutex_lock(&app_access_lock);
7353 atomic_inc(&data->ioctl_count);
7354 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7355 ret = qseecom_query_app_loaded(data, argp);
7356 atomic_dec(&data->ioctl_count);
7357 mutex_unlock(&app_access_lock);
7358 break;
7359 }
7360 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7361 if (data->type != QSEECOM_GENERIC) {
7362 pr_err("send cmd svc req: invalid handle (%d)\n",
7363 data->type);
7364 ret = -EINVAL;
7365 break;
7366 }
7367 data->type = QSEECOM_SECURE_SERVICE;
7368 if (qseecom.qsee_version < QSEE_VERSION_03) {
7369 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7370 qseecom.qsee_version);
7371 return -EINVAL;
7372 }
7373 mutex_lock(&app_access_lock);
7374 atomic_inc(&data->ioctl_count);
7375 ret = qseecom_send_service_cmd(data, argp);
7376 atomic_dec(&data->ioctl_count);
7377 mutex_unlock(&app_access_lock);
7378 break;
7379 }
7380 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7381 if (!(qseecom.support_pfe || qseecom.support_fde))
7382 pr_err("Features requiring key init not supported\n");
7383 if (data->type != QSEECOM_GENERIC) {
7384 pr_err("create key req: invalid handle (%d)\n",
7385 data->type);
7386 ret = -EINVAL;
7387 break;
7388 }
7389 if (qseecom.qsee_version < QSEE_VERSION_05) {
7390 pr_err("Create Key feature unsupported: qsee ver %u\n",
7391 qseecom.qsee_version);
7392 return -EINVAL;
7393 }
7394 data->released = true;
7395 mutex_lock(&app_access_lock);
7396 atomic_inc(&data->ioctl_count);
7397 ret = qseecom_create_key(data, argp);
7398 if (ret)
7399 pr_err("failed to create encryption key: %d\n", ret);
7400
7401 atomic_dec(&data->ioctl_count);
7402 mutex_unlock(&app_access_lock);
7403 break;
7404 }
7405 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7406 if (!(qseecom.support_pfe || qseecom.support_fde))
7407 pr_err("Features requiring key init not supported\n");
7408 if (data->type != QSEECOM_GENERIC) {
7409 pr_err("wipe key req: invalid handle (%d)\n",
7410 data->type);
7411 ret = -EINVAL;
7412 break;
7413 }
7414 if (qseecom.qsee_version < QSEE_VERSION_05) {
7415 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7416 qseecom.qsee_version);
7417 return -EINVAL;
7418 }
7419 data->released = true;
7420 mutex_lock(&app_access_lock);
7421 atomic_inc(&data->ioctl_count);
7422 ret = qseecom_wipe_key(data, argp);
7423 if (ret)
7424 pr_err("failed to wipe encryption key: %d\n", ret);
7425 atomic_dec(&data->ioctl_count);
7426 mutex_unlock(&app_access_lock);
7427 break;
7428 }
7429 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7430 if (!(qseecom.support_pfe || qseecom.support_fde))
7431 pr_err("Features requiring key init not supported\n");
7432 if (data->type != QSEECOM_GENERIC) {
7433 pr_err("update key req: invalid handle (%d)\n",
7434 data->type);
7435 ret = -EINVAL;
7436 break;
7437 }
7438 if (qseecom.qsee_version < QSEE_VERSION_05) {
7439 pr_err("Update Key feature unsupported in qsee ver %u\n",
7440 qseecom.qsee_version);
7441 return -EINVAL;
7442 }
7443 data->released = true;
7444 mutex_lock(&app_access_lock);
7445 atomic_inc(&data->ioctl_count);
7446 ret = qseecom_update_key_user_info(data, argp);
7447 if (ret)
7448 pr_err("failed to update key user info: %d\n", ret);
7449 atomic_dec(&data->ioctl_count);
7450 mutex_unlock(&app_access_lock);
7451 break;
7452 }
7453 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7454 if (data->type != QSEECOM_GENERIC) {
7455 pr_err("save part hash req: invalid handle (%d)\n",
7456 data->type);
7457 ret = -EINVAL;
7458 break;
7459 }
7460 data->released = true;
7461 mutex_lock(&app_access_lock);
7462 atomic_inc(&data->ioctl_count);
7463 ret = qseecom_save_partition_hash(argp);
7464 atomic_dec(&data->ioctl_count);
7465 mutex_unlock(&app_access_lock);
7466 break;
7467 }
7468 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7469 if (data->type != QSEECOM_GENERIC) {
7470 pr_err("ES activated req: invalid handle (%d)\n",
7471 data->type);
7472 ret = -EINVAL;
7473 break;
7474 }
7475 data->released = true;
7476 mutex_lock(&app_access_lock);
7477 atomic_inc(&data->ioctl_count);
7478 ret = qseecom_is_es_activated(argp);
7479 atomic_dec(&data->ioctl_count);
7480 mutex_unlock(&app_access_lock);
7481 break;
7482 }
7483 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7484 if (data->type != QSEECOM_GENERIC) {
7485 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7486 data->type);
7487 ret = -EINVAL;
7488 break;
7489 }
7490 data->released = true;
7491 mutex_lock(&app_access_lock);
7492 atomic_inc(&data->ioctl_count);
7493 ret = qseecom_mdtp_cipher_dip(argp);
7494 atomic_dec(&data->ioctl_count);
7495 mutex_unlock(&app_access_lock);
7496 break;
7497 }
7498 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7499 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7500 if ((data->listener.id == 0) ||
7501 (data->type != QSEECOM_LISTENER_SERVICE)) {
7502 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7503 data->type, data->listener.id);
7504 ret = -EINVAL;
7505 break;
7506 }
7507 atomic_inc(&data->ioctl_count);
7508 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7509 ret = qseecom_send_modfd_resp(data, argp);
7510 else
7511 ret = qseecom_send_modfd_resp_64(data, argp);
7512 atomic_dec(&data->ioctl_count);
7513 wake_up_all(&data->abort_wq);
7514 if (ret)
7515 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7516 __qseecom_clean_data_sglistinfo(data);
7517 break;
7518 }
7519 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7520 if ((data->client.app_id == 0) ||
7521 (data->type != QSEECOM_CLIENT_APP)) {
7522 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7523 data->type, data->client.app_id);
7524 ret = -EINVAL;
7525 break;
7526 }
7527 if (qseecom.qsee_version < QSEE_VERSION_40) {
7528 pr_err("GP feature unsupported: qsee ver %u\n",
7529 qseecom.qsee_version);
7530 return -EINVAL;
7531 }
7532 /* Only one client allowed here at a time */
7533 mutex_lock(&app_access_lock);
7534 atomic_inc(&data->ioctl_count);
7535 ret = qseecom_qteec_open_session(data, argp);
7536 atomic_dec(&data->ioctl_count);
7537 wake_up_all(&data->abort_wq);
7538 mutex_unlock(&app_access_lock);
7539 if (ret)
7540 pr_err("failed open_session_cmd: %d\n", ret);
7541 __qseecom_clean_data_sglistinfo(data);
7542 break;
7543 }
7544 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7545 if ((data->client.app_id == 0) ||
7546 (data->type != QSEECOM_CLIENT_APP)) {
7547 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7548 data->type, data->client.app_id);
7549 ret = -EINVAL;
7550 break;
7551 }
7552 if (qseecom.qsee_version < QSEE_VERSION_40) {
7553 pr_err("GP feature unsupported: qsee ver %u\n",
7554 qseecom.qsee_version);
7555 return -EINVAL;
7556 }
7557 /* Only one client allowed here at a time */
7558 mutex_lock(&app_access_lock);
7559 atomic_inc(&data->ioctl_count);
7560 ret = qseecom_qteec_close_session(data, argp);
7561 atomic_dec(&data->ioctl_count);
7562 wake_up_all(&data->abort_wq);
7563 mutex_unlock(&app_access_lock);
7564 if (ret)
7565 pr_err("failed close_session_cmd: %d\n", ret);
7566 break;
7567 }
7568 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7569 if ((data->client.app_id == 0) ||
7570 (data->type != QSEECOM_CLIENT_APP)) {
7571 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7572 data->type, data->client.app_id);
7573 ret = -EINVAL;
7574 break;
7575 }
7576 if (qseecom.qsee_version < QSEE_VERSION_40) {
7577 pr_err("GP feature unsupported: qsee ver %u\n",
7578 qseecom.qsee_version);
7579 return -EINVAL;
7580 }
7581 /* Only one client allowed here at a time */
7582 mutex_lock(&app_access_lock);
7583 atomic_inc(&data->ioctl_count);
7584 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7585 atomic_dec(&data->ioctl_count);
7586 wake_up_all(&data->abort_wq);
7587 mutex_unlock(&app_access_lock);
7588 if (ret)
7589 pr_err("failed Invoke cmd: %d\n", ret);
7590 __qseecom_clean_data_sglistinfo(data);
7591 break;
7592 }
7593 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7594 if ((data->client.app_id == 0) ||
7595 (data->type != QSEECOM_CLIENT_APP)) {
7596 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7597 data->type, data->client.app_id);
7598 ret = -EINVAL;
7599 break;
7600 }
7601 if (qseecom.qsee_version < QSEE_VERSION_40) {
7602 pr_err("GP feature unsupported: qsee ver %u\n",
7603 qseecom.qsee_version);
7604 return -EINVAL;
7605 }
7606 /* Only one client allowed here at a time */
7607 mutex_lock(&app_access_lock);
7608 atomic_inc(&data->ioctl_count);
7609 ret = qseecom_qteec_request_cancellation(data, argp);
7610 atomic_dec(&data->ioctl_count);
7611 wake_up_all(&data->abort_wq);
7612 mutex_unlock(&app_access_lock);
7613 if (ret)
7614 pr_err("failed request_cancellation: %d\n", ret);
7615 break;
7616 }
7617 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7618 atomic_inc(&data->ioctl_count);
7619 ret = qseecom_get_ce_info(data, argp);
7620 if (ret)
7621 pr_err("failed get fde ce pipe info: %d\n", ret);
7622 atomic_dec(&data->ioctl_count);
7623 break;
7624 }
7625 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7626 atomic_inc(&data->ioctl_count);
7627 ret = qseecom_free_ce_info(data, argp);
7628 if (ret)
7629 pr_err("failed get fde ce pipe info: %d\n", ret);
7630 atomic_dec(&data->ioctl_count);
7631 break;
7632 }
7633 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7634 atomic_inc(&data->ioctl_count);
7635 ret = qseecom_query_ce_info(data, argp);
7636 if (ret)
7637 pr_err("failed get fde ce pipe info: %d\n", ret);
7638 atomic_dec(&data->ioctl_count);
7639 break;
7640 }
7641 default:
7642 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7643 return -EINVAL;
7644 }
7645 return ret;
7646}
7647
7648static int qseecom_open(struct inode *inode, struct file *file)
7649{
7650 int ret = 0;
7651 struct qseecom_dev_handle *data;
7652
7653 data = kzalloc(sizeof(*data), GFP_KERNEL);
7654 if (!data)
7655 return -ENOMEM;
7656 file->private_data = data;
7657 data->abort = 0;
7658 data->type = QSEECOM_GENERIC;
7659 data->released = false;
7660 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7661 data->mode = INACTIVE;
7662 init_waitqueue_head(&data->abort_wq);
7663 atomic_set(&data->ioctl_count, 0);
7664 return ret;
7665}
7666
7667static int qseecom_release(struct inode *inode, struct file *file)
7668{
7669 struct qseecom_dev_handle *data = file->private_data;
7670 int ret = 0;
7671
7672 if (data->released == false) {
7673 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7674 data->type, data->mode, data);
7675 switch (data->type) {
7676 case QSEECOM_LISTENER_SERVICE:
Zhen Kong26e62742018-05-04 17:19:06 -07007677 __qseecom_listener_abort_all(1);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007678 mutex_lock(&app_access_lock);
7679 ret = qseecom_unregister_listener(data);
7680 mutex_unlock(&app_access_lock);
Zhen Kong26e62742018-05-04 17:19:06 -07007681 __qseecom_listener_abort_all(0);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007682 break;
7683 case QSEECOM_CLIENT_APP:
7684 mutex_lock(&app_access_lock);
7685 ret = qseecom_unload_app(data, true);
7686 mutex_unlock(&app_access_lock);
7687 break;
7688 case QSEECOM_SECURE_SERVICE:
7689 case QSEECOM_GENERIC:
7690 ret = qseecom_unmap_ion_allocated_memory(data);
7691 if (ret)
7692 pr_err("Ion Unmap failed\n");
7693 break;
7694 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7695 break;
7696 default:
7697 pr_err("Unsupported clnt_handle_type %d",
7698 data->type);
7699 break;
7700 }
7701 }
7702
7703 if (qseecom.support_bus_scaling) {
7704 mutex_lock(&qsee_bw_mutex);
7705 if (data->mode != INACTIVE) {
7706 qseecom_unregister_bus_bandwidth_needs(data);
7707 if (qseecom.cumulative_mode == INACTIVE) {
7708 ret = __qseecom_set_msm_bus_request(INACTIVE);
7709 if (ret)
7710 pr_err("Fail to scale down bus\n");
7711 }
7712 }
7713 mutex_unlock(&qsee_bw_mutex);
7714 } else {
7715 if (data->fast_load_enabled == true)
7716 qsee_disable_clock_vote(data, CLK_SFPB);
7717 if (data->perf_enabled == true)
7718 qsee_disable_clock_vote(data, CLK_DFAB);
7719 }
7720 kfree(data);
7721
7722 return ret;
7723}
7724
7725#ifdef CONFIG_COMPAT
7726#include "compat_qseecom.c"
7727#else
7728#define compat_qseecom_ioctl NULL
7729#endif
7730
7731static const struct file_operations qseecom_fops = {
7732 .owner = THIS_MODULE,
7733 .unlocked_ioctl = qseecom_ioctl,
7734 .compat_ioctl = compat_qseecom_ioctl,
7735 .open = qseecom_open,
7736 .release = qseecom_release
7737};
7738
7739static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7740{
7741 int rc = 0;
7742 struct device *pdev;
7743 struct qseecom_clk *qclk;
7744 char *core_clk_src = NULL;
7745 char *core_clk = NULL;
7746 char *iface_clk = NULL;
7747 char *bus_clk = NULL;
7748
7749 switch (ce) {
7750 case CLK_QSEE: {
7751 core_clk_src = "core_clk_src";
7752 core_clk = "core_clk";
7753 iface_clk = "iface_clk";
7754 bus_clk = "bus_clk";
7755 qclk = &qseecom.qsee;
7756 qclk->instance = CLK_QSEE;
7757 break;
7758 };
7759 case CLK_CE_DRV: {
7760 core_clk_src = "ce_drv_core_clk_src";
7761 core_clk = "ce_drv_core_clk";
7762 iface_clk = "ce_drv_iface_clk";
7763 bus_clk = "ce_drv_bus_clk";
7764 qclk = &qseecom.ce_drv;
7765 qclk->instance = CLK_CE_DRV;
7766 break;
7767 };
7768 default:
7769 pr_err("Invalid ce hw instance: %d!\n", ce);
7770 return -EIO;
7771 }
7772
7773 if (qseecom.no_clock_support) {
7774 qclk->ce_core_clk = NULL;
7775 qclk->ce_clk = NULL;
7776 qclk->ce_bus_clk = NULL;
7777 qclk->ce_core_src_clk = NULL;
7778 return 0;
7779 }
7780
7781 pdev = qseecom.pdev;
7782
7783 /* Get CE3 src core clk. */
7784 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7785 if (!IS_ERR(qclk->ce_core_src_clk)) {
7786 rc = clk_set_rate(qclk->ce_core_src_clk,
7787 qseecom.ce_opp_freq_hz);
7788 if (rc) {
7789 clk_put(qclk->ce_core_src_clk);
7790 qclk->ce_core_src_clk = NULL;
7791 pr_err("Unable to set the core src clk @%uMhz.\n",
7792 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7793 return -EIO;
7794 }
7795 } else {
7796 pr_warn("Unable to get CE core src clk, set to NULL\n");
7797 qclk->ce_core_src_clk = NULL;
7798 }
7799
7800 /* Get CE core clk */
7801 qclk->ce_core_clk = clk_get(pdev, core_clk);
7802 if (IS_ERR(qclk->ce_core_clk)) {
7803 rc = PTR_ERR(qclk->ce_core_clk);
7804 pr_err("Unable to get CE core clk\n");
7805 if (qclk->ce_core_src_clk != NULL)
7806 clk_put(qclk->ce_core_src_clk);
7807 return -EIO;
7808 }
7809
7810 /* Get CE Interface clk */
7811 qclk->ce_clk = clk_get(pdev, iface_clk);
7812 if (IS_ERR(qclk->ce_clk)) {
7813 rc = PTR_ERR(qclk->ce_clk);
7814 pr_err("Unable to get CE interface clk\n");
7815 if (qclk->ce_core_src_clk != NULL)
7816 clk_put(qclk->ce_core_src_clk);
7817 clk_put(qclk->ce_core_clk);
7818 return -EIO;
7819 }
7820
7821 /* Get CE AXI clk */
7822 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7823 if (IS_ERR(qclk->ce_bus_clk)) {
7824 rc = PTR_ERR(qclk->ce_bus_clk);
7825 pr_err("Unable to get CE BUS interface clk\n");
7826 if (qclk->ce_core_src_clk != NULL)
7827 clk_put(qclk->ce_core_src_clk);
7828 clk_put(qclk->ce_core_clk);
7829 clk_put(qclk->ce_clk);
7830 return -EIO;
7831 }
7832
7833 return rc;
7834}
7835
7836static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7837{
7838 struct qseecom_clk *qclk;
7839
7840 if (ce == CLK_QSEE)
7841 qclk = &qseecom.qsee;
7842 else
7843 qclk = &qseecom.ce_drv;
7844
7845 if (qclk->ce_clk != NULL) {
7846 clk_put(qclk->ce_clk);
7847 qclk->ce_clk = NULL;
7848 }
7849 if (qclk->ce_core_clk != NULL) {
7850 clk_put(qclk->ce_core_clk);
7851 qclk->ce_core_clk = NULL;
7852 }
7853 if (qclk->ce_bus_clk != NULL) {
7854 clk_put(qclk->ce_bus_clk);
7855 qclk->ce_bus_clk = NULL;
7856 }
7857 if (qclk->ce_core_src_clk != NULL) {
7858 clk_put(qclk->ce_core_src_clk);
7859 qclk->ce_core_src_clk = NULL;
7860 }
7861 qclk->instance = CLK_INVALID;
7862}
7863
7864static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7865{
7866 int rc = 0;
7867 uint32_t hlos_num_ce_hw_instances;
7868 uint32_t disk_encrypt_pipe;
7869 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007870 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007871 int i;
7872 const int *tbl;
7873 int size;
7874 int entry;
7875 struct qseecom_crypto_info *pfde_tbl = NULL;
7876 struct qseecom_crypto_info *p;
7877 int tbl_size;
7878 int j;
7879 bool old_db = true;
7880 struct qseecom_ce_info_use *pce_info_use;
7881 uint32_t *unit_tbl = NULL;
7882 int total_units = 0;
7883 struct qseecom_ce_pipe_entry *pce_entry;
7884
7885 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7886 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7887
7888 if (of_property_read_u32((&pdev->dev)->of_node,
7889 "qcom,qsee-ce-hw-instance",
7890 &qseecom.ce_info.qsee_ce_hw_instance)) {
7891 pr_err("Fail to get qsee ce hw instance information.\n");
7892 rc = -EINVAL;
7893 goto out;
7894 } else {
7895 pr_debug("qsee-ce-hw-instance=0x%x\n",
7896 qseecom.ce_info.qsee_ce_hw_instance);
7897 }
7898
7899 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7900 "qcom,support-fde");
7901 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7902 "qcom,support-pfe");
7903
7904 if (!qseecom.support_pfe && !qseecom.support_fde) {
7905 pr_warn("Device does not support PFE/FDE");
7906 goto out;
7907 }
7908
7909 if (qseecom.support_fde)
7910 tbl = of_get_property((&pdev->dev)->of_node,
7911 "qcom,full-disk-encrypt-info", &size);
7912 else
7913 tbl = NULL;
7914 if (tbl) {
7915 old_db = false;
7916 if (size % sizeof(struct qseecom_crypto_info)) {
7917 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7918 size);
7919 rc = -EINVAL;
7920 goto out;
7921 }
7922 tbl_size = size / sizeof
7923 (struct qseecom_crypto_info);
7924
7925 pfde_tbl = kzalloc(size, GFP_KERNEL);
7926 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7927 total_units = 0;
7928
7929 if (!pfde_tbl || !unit_tbl) {
7930 pr_err("failed to alloc memory\n");
7931 rc = -ENOMEM;
7932 goto out;
7933 }
7934 if (of_property_read_u32_array((&pdev->dev)->of_node,
7935 "qcom,full-disk-encrypt-info",
7936 (u32 *)pfde_tbl, size/sizeof(u32))) {
7937 pr_err("failed to read full-disk-encrypt-info tbl\n");
7938 rc = -EINVAL;
7939 goto out;
7940 }
7941
7942 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7943 for (j = 0; j < total_units; j++) {
7944 if (p->unit_num == *(unit_tbl + j))
7945 break;
7946 }
7947 if (j == total_units) {
7948 *(unit_tbl + total_units) = p->unit_num;
7949 total_units++;
7950 }
7951 }
7952
7953 qseecom.ce_info.num_fde = total_units;
7954 pce_info_use = qseecom.ce_info.fde = kcalloc(
7955 total_units, sizeof(struct qseecom_ce_info_use),
7956 GFP_KERNEL);
7957 if (!pce_info_use) {
7958 pr_err("failed to alloc memory\n");
7959 rc = -ENOMEM;
7960 goto out;
7961 }
7962
7963 for (j = 0; j < total_units; j++, pce_info_use++) {
7964 pce_info_use->unit_num = *(unit_tbl + j);
7965 pce_info_use->alloc = false;
7966 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7967 pce_info_use->num_ce_pipe_entries = 0;
7968 pce_info_use->ce_pipe_entry = NULL;
7969 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7970 if (p->unit_num == pce_info_use->unit_num)
7971 pce_info_use->num_ce_pipe_entries++;
7972 }
7973
7974 entry = pce_info_use->num_ce_pipe_entries;
7975 pce_entry = pce_info_use->ce_pipe_entry =
7976 kcalloc(entry,
7977 sizeof(struct qseecom_ce_pipe_entry),
7978 GFP_KERNEL);
7979 if (pce_entry == NULL) {
7980 pr_err("failed to alloc memory\n");
7981 rc = -ENOMEM;
7982 goto out;
7983 }
7984
7985 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7986 if (p->unit_num == pce_info_use->unit_num) {
7987 pce_entry->ce_num = p->ce;
7988 pce_entry->ce_pipe_pair =
7989 p->pipe_pair;
7990 pce_entry->valid = true;
7991 pce_entry++;
7992 }
7993 }
7994 }
7995 kfree(unit_tbl);
7996 unit_tbl = NULL;
7997 kfree(pfde_tbl);
7998 pfde_tbl = NULL;
7999 }
8000
8001 if (qseecom.support_pfe)
8002 tbl = of_get_property((&pdev->dev)->of_node,
8003 "qcom,per-file-encrypt-info", &size);
8004 else
8005 tbl = NULL;
8006 if (tbl) {
8007 old_db = false;
8008 if (size % sizeof(struct qseecom_crypto_info)) {
8009 pr_err("per-file-encrypt-info tbl size(%d)\n",
8010 size);
8011 rc = -EINVAL;
8012 goto out;
8013 }
8014 tbl_size = size / sizeof
8015 (struct qseecom_crypto_info);
8016
8017 pfde_tbl = kzalloc(size, GFP_KERNEL);
8018 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8019 total_units = 0;
8020 if (!pfde_tbl || !unit_tbl) {
8021 pr_err("failed to alloc memory\n");
8022 rc = -ENOMEM;
8023 goto out;
8024 }
8025 if (of_property_read_u32_array((&pdev->dev)->of_node,
8026 "qcom,per-file-encrypt-info",
8027 (u32 *)pfde_tbl, size/sizeof(u32))) {
8028 pr_err("failed to read per-file-encrypt-info tbl\n");
8029 rc = -EINVAL;
8030 goto out;
8031 }
8032
8033 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8034 for (j = 0; j < total_units; j++) {
8035 if (p->unit_num == *(unit_tbl + j))
8036 break;
8037 }
8038 if (j == total_units) {
8039 *(unit_tbl + total_units) = p->unit_num;
8040 total_units++;
8041 }
8042 }
8043
8044 qseecom.ce_info.num_pfe = total_units;
8045 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8046 total_units, sizeof(struct qseecom_ce_info_use),
8047 GFP_KERNEL);
8048 if (!pce_info_use) {
8049 pr_err("failed to alloc memory\n");
8050 rc = -ENOMEM;
8051 goto out;
8052 }
8053
8054 for (j = 0; j < total_units; j++, pce_info_use++) {
8055 pce_info_use->unit_num = *(unit_tbl + j);
8056 pce_info_use->alloc = false;
8057 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8058 pce_info_use->num_ce_pipe_entries = 0;
8059 pce_info_use->ce_pipe_entry = NULL;
8060 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8061 if (p->unit_num == pce_info_use->unit_num)
8062 pce_info_use->num_ce_pipe_entries++;
8063 }
8064
8065 entry = pce_info_use->num_ce_pipe_entries;
8066 pce_entry = pce_info_use->ce_pipe_entry =
8067 kcalloc(entry,
8068 sizeof(struct qseecom_ce_pipe_entry),
8069 GFP_KERNEL);
8070 if (pce_entry == NULL) {
8071 pr_err("failed to alloc memory\n");
8072 rc = -ENOMEM;
8073 goto out;
8074 }
8075
8076 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8077 if (p->unit_num == pce_info_use->unit_num) {
8078 pce_entry->ce_num = p->ce;
8079 pce_entry->ce_pipe_pair =
8080 p->pipe_pair;
8081 pce_entry->valid = true;
8082 pce_entry++;
8083 }
8084 }
8085 }
8086 kfree(unit_tbl);
8087 unit_tbl = NULL;
8088 kfree(pfde_tbl);
8089 pfde_tbl = NULL;
8090 }
8091
8092 if (!old_db)
8093 goto out1;
8094
8095 if (of_property_read_bool((&pdev->dev)->of_node,
8096 "qcom,support-multiple-ce-hw-instance")) {
8097 if (of_property_read_u32((&pdev->dev)->of_node,
8098 "qcom,hlos-num-ce-hw-instances",
8099 &hlos_num_ce_hw_instances)) {
8100 pr_err("Fail: get hlos number of ce hw instance\n");
8101 rc = -EINVAL;
8102 goto out;
8103 }
8104 } else {
8105 hlos_num_ce_hw_instances = 1;
8106 }
8107
8108 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8109 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8110 MAX_CE_PIPE_PAIR_PER_UNIT);
8111 rc = -EINVAL;
8112 goto out;
8113 }
8114
8115 if (of_property_read_u32_array((&pdev->dev)->of_node,
8116 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8117 hlos_num_ce_hw_instances)) {
8118 pr_err("Fail: get hlos ce hw instance info\n");
8119 rc = -EINVAL;
8120 goto out;
8121 }
8122
8123 if (qseecom.support_fde) {
8124 pce_info_use = qseecom.ce_info.fde =
8125 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8126 if (!pce_info_use) {
8127 pr_err("failed to alloc memory\n");
8128 rc = -ENOMEM;
8129 goto out;
8130 }
8131 /* by default for old db */
8132 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8133 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8134 pce_info_use->alloc = false;
8135 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8136 pce_info_use->ce_pipe_entry = NULL;
8137 if (of_property_read_u32((&pdev->dev)->of_node,
8138 "qcom,disk-encrypt-pipe-pair",
8139 &disk_encrypt_pipe)) {
8140 pr_err("Fail to get FDE pipe information.\n");
8141 rc = -EINVAL;
8142 goto out;
8143 } else {
8144 pr_debug("disk-encrypt-pipe-pair=0x%x",
8145 disk_encrypt_pipe);
8146 }
8147 entry = pce_info_use->num_ce_pipe_entries =
8148 hlos_num_ce_hw_instances;
8149 pce_entry = pce_info_use->ce_pipe_entry =
8150 kcalloc(entry,
8151 sizeof(struct qseecom_ce_pipe_entry),
8152 GFP_KERNEL);
8153 if (pce_entry == NULL) {
8154 pr_err("failed to alloc memory\n");
8155 rc = -ENOMEM;
8156 goto out;
8157 }
8158 for (i = 0; i < entry; i++) {
8159 pce_entry->ce_num = hlos_ce_hw_instance[i];
8160 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8161 pce_entry->valid = 1;
8162 pce_entry++;
8163 }
8164 } else {
8165 pr_warn("Device does not support FDE");
8166 disk_encrypt_pipe = 0xff;
8167 }
8168 if (qseecom.support_pfe) {
8169 pce_info_use = qseecom.ce_info.pfe =
8170 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8171 if (!pce_info_use) {
8172 pr_err("failed to alloc memory\n");
8173 rc = -ENOMEM;
8174 goto out;
8175 }
8176 /* by default for old db */
8177 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8178 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8179 pce_info_use->alloc = false;
8180 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8181 pce_info_use->ce_pipe_entry = NULL;
8182
8183 if (of_property_read_u32((&pdev->dev)->of_node,
8184 "qcom,file-encrypt-pipe-pair",
8185 &file_encrypt_pipe)) {
8186 pr_err("Fail to get PFE pipe information.\n");
8187 rc = -EINVAL;
8188 goto out;
8189 } else {
8190 pr_debug("file-encrypt-pipe-pair=0x%x",
8191 file_encrypt_pipe);
8192 }
8193 entry = pce_info_use->num_ce_pipe_entries =
8194 hlos_num_ce_hw_instances;
8195 pce_entry = pce_info_use->ce_pipe_entry =
8196 kcalloc(entry,
8197 sizeof(struct qseecom_ce_pipe_entry),
8198 GFP_KERNEL);
8199 if (pce_entry == NULL) {
8200 pr_err("failed to alloc memory\n");
8201 rc = -ENOMEM;
8202 goto out;
8203 }
8204 for (i = 0; i < entry; i++) {
8205 pce_entry->ce_num = hlos_ce_hw_instance[i];
8206 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8207 pce_entry->valid = 1;
8208 pce_entry++;
8209 }
8210 } else {
8211 pr_warn("Device does not support PFE");
8212 file_encrypt_pipe = 0xff;
8213 }
8214
8215out1:
8216 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8217 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8218out:
8219 if (rc) {
8220 if (qseecom.ce_info.fde) {
8221 pce_info_use = qseecom.ce_info.fde;
8222 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8223 pce_entry = pce_info_use->ce_pipe_entry;
8224 kfree(pce_entry);
8225 pce_info_use++;
8226 }
8227 }
8228 kfree(qseecom.ce_info.fde);
8229 qseecom.ce_info.fde = NULL;
8230 if (qseecom.ce_info.pfe) {
8231 pce_info_use = qseecom.ce_info.pfe;
8232 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8233 pce_entry = pce_info_use->ce_pipe_entry;
8234 kfree(pce_entry);
8235 pce_info_use++;
8236 }
8237 }
8238 kfree(qseecom.ce_info.pfe);
8239 qseecom.ce_info.pfe = NULL;
8240 }
8241 kfree(unit_tbl);
8242 kfree(pfde_tbl);
8243 return rc;
8244}
8245
8246static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8247 void __user *argp)
8248{
8249 struct qseecom_ce_info_req req;
8250 struct qseecom_ce_info_req *pinfo = &req;
8251 int ret = 0;
8252 int i;
8253 unsigned int entries;
8254 struct qseecom_ce_info_use *pce_info_use, *p;
8255 int total = 0;
8256 bool found = false;
8257 struct qseecom_ce_pipe_entry *pce_entry;
8258
8259 ret = copy_from_user(pinfo, argp,
8260 sizeof(struct qseecom_ce_info_req));
8261 if (ret) {
8262 pr_err("copy_from_user failed\n");
8263 return ret;
8264 }
8265
8266 switch (pinfo->usage) {
8267 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8268 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8269 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8270 if (qseecom.support_fde) {
8271 p = qseecom.ce_info.fde;
8272 total = qseecom.ce_info.num_fde;
8273 } else {
8274 pr_err("system does not support fde\n");
8275 return -EINVAL;
8276 }
8277 break;
8278 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8279 if (qseecom.support_pfe) {
8280 p = qseecom.ce_info.pfe;
8281 total = qseecom.ce_info.num_pfe;
8282 } else {
8283 pr_err("system does not support pfe\n");
8284 return -EINVAL;
8285 }
8286 break;
8287 default:
8288 pr_err("unsupported usage %d\n", pinfo->usage);
8289 return -EINVAL;
8290 }
8291
8292 pce_info_use = NULL;
8293 for (i = 0; i < total; i++) {
8294 if (!p->alloc)
8295 pce_info_use = p;
8296 else if (!memcmp(p->handle, pinfo->handle,
8297 MAX_CE_INFO_HANDLE_SIZE)) {
8298 pce_info_use = p;
8299 found = true;
8300 break;
8301 }
8302 p++;
8303 }
8304
8305 if (pce_info_use == NULL)
8306 return -EBUSY;
8307
8308 pinfo->unit_num = pce_info_use->unit_num;
8309 if (!pce_info_use->alloc) {
8310 pce_info_use->alloc = true;
8311 memcpy(pce_info_use->handle,
8312 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8313 }
8314 if (pce_info_use->num_ce_pipe_entries >
8315 MAX_CE_PIPE_PAIR_PER_UNIT)
8316 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8317 else
8318 entries = pce_info_use->num_ce_pipe_entries;
8319 pinfo->num_ce_pipe_entries = entries;
8320 pce_entry = pce_info_use->ce_pipe_entry;
8321 for (i = 0; i < entries; i++, pce_entry++)
8322 pinfo->ce_pipe_entry[i] = *pce_entry;
8323 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8324 pinfo->ce_pipe_entry[i].valid = 0;
8325
8326 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8327 pr_err("copy_to_user failed\n");
8328 ret = -EFAULT;
8329 }
8330 return ret;
8331}
8332
8333static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8334 void __user *argp)
8335{
8336 struct qseecom_ce_info_req req;
8337 struct qseecom_ce_info_req *pinfo = &req;
8338 int ret = 0;
8339 struct qseecom_ce_info_use *p;
8340 int total = 0;
8341 int i;
8342 bool found = false;
8343
8344 ret = copy_from_user(pinfo, argp,
8345 sizeof(struct qseecom_ce_info_req));
8346 if (ret)
8347 return ret;
8348
8349 switch (pinfo->usage) {
8350 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8351 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8352 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8353 if (qseecom.support_fde) {
8354 p = qseecom.ce_info.fde;
8355 total = qseecom.ce_info.num_fde;
8356 } else {
8357 pr_err("system does not support fde\n");
8358 return -EINVAL;
8359 }
8360 break;
8361 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8362 if (qseecom.support_pfe) {
8363 p = qseecom.ce_info.pfe;
8364 total = qseecom.ce_info.num_pfe;
8365 } else {
8366 pr_err("system does not support pfe\n");
8367 return -EINVAL;
8368 }
8369 break;
8370 default:
8371 pr_err("unsupported usage %d\n", pinfo->usage);
8372 return -EINVAL;
8373 }
8374
8375 for (i = 0; i < total; i++) {
8376 if (p->alloc &&
8377 !memcmp(p->handle, pinfo->handle,
8378 MAX_CE_INFO_HANDLE_SIZE)) {
8379 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8380 p->alloc = false;
8381 found = true;
8382 break;
8383 }
8384 p++;
8385 }
8386 return ret;
8387}
8388
8389static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8390 void __user *argp)
8391{
8392 struct qseecom_ce_info_req req;
8393 struct qseecom_ce_info_req *pinfo = &req;
8394 int ret = 0;
8395 int i;
8396 unsigned int entries;
8397 struct qseecom_ce_info_use *pce_info_use, *p;
8398 int total = 0;
8399 bool found = false;
8400 struct qseecom_ce_pipe_entry *pce_entry;
8401
8402 ret = copy_from_user(pinfo, argp,
8403 sizeof(struct qseecom_ce_info_req));
8404 if (ret)
8405 return ret;
8406
8407 switch (pinfo->usage) {
8408 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8409 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8410 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8411 if (qseecom.support_fde) {
8412 p = qseecom.ce_info.fde;
8413 total = qseecom.ce_info.num_fde;
8414 } else {
8415 pr_err("system does not support fde\n");
8416 return -EINVAL;
8417 }
8418 break;
8419 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8420 if (qseecom.support_pfe) {
8421 p = qseecom.ce_info.pfe;
8422 total = qseecom.ce_info.num_pfe;
8423 } else {
8424 pr_err("system does not support pfe\n");
8425 return -EINVAL;
8426 }
8427 break;
8428 default:
8429 pr_err("unsupported usage %d\n", pinfo->usage);
8430 return -EINVAL;
8431 }
8432
8433 pce_info_use = NULL;
8434 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8435 pinfo->num_ce_pipe_entries = 0;
8436 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8437 pinfo->ce_pipe_entry[i].valid = 0;
8438
8439 for (i = 0; i < total; i++) {
8440
8441 if (p->alloc && !memcmp(p->handle,
8442 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8443 pce_info_use = p;
8444 found = true;
8445 break;
8446 }
8447 p++;
8448 }
8449 if (!pce_info_use)
8450 goto out;
8451 pinfo->unit_num = pce_info_use->unit_num;
8452 if (pce_info_use->num_ce_pipe_entries >
8453 MAX_CE_PIPE_PAIR_PER_UNIT)
8454 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8455 else
8456 entries = pce_info_use->num_ce_pipe_entries;
8457 pinfo->num_ce_pipe_entries = entries;
8458 pce_entry = pce_info_use->ce_pipe_entry;
8459 for (i = 0; i < entries; i++, pce_entry++)
8460 pinfo->ce_pipe_entry[i] = *pce_entry;
8461 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8462 pinfo->ce_pipe_entry[i].valid = 0;
8463out:
8464 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8465 pr_err("copy_to_user failed\n");
8466 ret = -EFAULT;
8467 }
8468 return ret;
8469}
8470
8471/*
8472 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8473 * then whitelist feature is not supported.
8474 */
8475static int qseecom_check_whitelist_feature(void)
8476{
8477 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8478
8479 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8480}
8481
8482static int qseecom_probe(struct platform_device *pdev)
8483{
8484 int rc;
8485 int i;
8486 uint32_t feature = 10;
8487 struct device *class_dev;
8488 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8489 struct qseecom_command_scm_resp resp;
8490 struct qseecom_ce_info_use *pce_info_use = NULL;
8491
8492 qseecom.qsee_bw_count = 0;
8493 qseecom.qsee_perf_client = 0;
8494 qseecom.qsee_sfpb_bw_count = 0;
8495
8496 qseecom.qsee.ce_core_clk = NULL;
8497 qseecom.qsee.ce_clk = NULL;
8498 qseecom.qsee.ce_core_src_clk = NULL;
8499 qseecom.qsee.ce_bus_clk = NULL;
8500
8501 qseecom.cumulative_mode = 0;
8502 qseecom.current_mode = INACTIVE;
8503 qseecom.support_bus_scaling = false;
8504 qseecom.support_fde = false;
8505 qseecom.support_pfe = false;
8506
8507 qseecom.ce_drv.ce_core_clk = NULL;
8508 qseecom.ce_drv.ce_clk = NULL;
8509 qseecom.ce_drv.ce_core_src_clk = NULL;
8510 qseecom.ce_drv.ce_bus_clk = NULL;
8511 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8512
8513 qseecom.app_block_ref_cnt = 0;
8514 init_waitqueue_head(&qseecom.app_block_wq);
8515 qseecom.whitelist_support = true;
8516
8517 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8518 if (rc < 0) {
8519 pr_err("alloc_chrdev_region failed %d\n", rc);
8520 return rc;
8521 }
8522
8523 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8524 if (IS_ERR(driver_class)) {
8525 rc = -ENOMEM;
8526 pr_err("class_create failed %d\n", rc);
8527 goto exit_unreg_chrdev_region;
8528 }
8529
8530 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8531 QSEECOM_DEV);
8532 if (IS_ERR(class_dev)) {
8533 pr_err("class_device_create failed %d\n", rc);
8534 rc = -ENOMEM;
8535 goto exit_destroy_class;
8536 }
8537
8538 cdev_init(&qseecom.cdev, &qseecom_fops);
8539 qseecom.cdev.owner = THIS_MODULE;
8540
8541 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8542 if (rc < 0) {
8543 pr_err("cdev_add failed %d\n", rc);
8544 goto exit_destroy_device;
8545 }
8546
8547 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8548 spin_lock_init(&qseecom.registered_listener_list_lock);
8549 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8550 spin_lock_init(&qseecom.registered_app_list_lock);
8551 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8552 spin_lock_init(&qseecom.registered_kclient_list_lock);
8553 init_waitqueue_head(&qseecom.send_resp_wq);
8554 qseecom.send_resp_flag = 0;
8555
8556 qseecom.qsee_version = QSEEE_VERSION_00;
8557 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8558 &resp, sizeof(resp));
8559 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8560 if (rc) {
8561 pr_err("Failed to get QSEE version info %d\n", rc);
8562 goto exit_del_cdev;
8563 }
8564 qseecom.qsee_version = resp.result;
8565 qseecom.qseos_version = QSEOS_VERSION_14;
8566 qseecom.commonlib_loaded = false;
8567 qseecom.commonlib64_loaded = false;
8568 qseecom.pdev = class_dev;
8569 /* Create ION msm client */
8570 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8571 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8572 pr_err("Ion client cannot be created\n");
8573 rc = -ENOMEM;
8574 goto exit_del_cdev;
8575 }
8576
8577 /* register client for bus scaling */
8578 if (pdev->dev.of_node) {
8579 qseecom.pdev->of_node = pdev->dev.of_node;
8580 qseecom.support_bus_scaling =
8581 of_property_read_bool((&pdev->dev)->of_node,
8582 "qcom,support-bus-scaling");
8583 rc = qseecom_retrieve_ce_data(pdev);
8584 if (rc)
8585 goto exit_destroy_ion_client;
8586 qseecom.appsbl_qseecom_support =
8587 of_property_read_bool((&pdev->dev)->of_node,
8588 "qcom,appsbl-qseecom-support");
8589 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8590 qseecom.appsbl_qseecom_support);
8591
8592 qseecom.commonlib64_loaded =
8593 of_property_read_bool((&pdev->dev)->of_node,
8594 "qcom,commonlib64-loaded-by-uefi");
8595 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8596 qseecom.commonlib64_loaded);
8597 qseecom.fde_key_size =
8598 of_property_read_bool((&pdev->dev)->of_node,
8599 "qcom,fde-key-size");
8600 qseecom.no_clock_support =
8601 of_property_read_bool((&pdev->dev)->of_node,
8602 "qcom,no-clock-support");
8603 if (!qseecom.no_clock_support) {
8604 pr_info("qseecom clocks handled by other subsystem\n");
8605 } else {
8606 pr_info("no-clock-support=0x%x",
8607 qseecom.no_clock_support);
8608 }
8609
8610 if (of_property_read_u32((&pdev->dev)->of_node,
8611 "qcom,qsee-reentrancy-support",
8612 &qseecom.qsee_reentrancy_support)) {
8613 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8614 qseecom.qsee_reentrancy_support = 0;
8615 } else {
8616 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8617 qseecom.qsee_reentrancy_support);
8618 }
8619
8620 /*
8621 * The qseecom bus scaling flag can not be enabled when
8622 * crypto clock is not handled by HLOS.
8623 */
8624 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8625 pr_err("support_bus_scaling flag can not be enabled.\n");
8626 rc = -EINVAL;
8627 goto exit_destroy_ion_client;
8628 }
8629
8630 if (of_property_read_u32((&pdev->dev)->of_node,
8631 "qcom,ce-opp-freq",
8632 &qseecom.ce_opp_freq_hz)) {
8633 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8634 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8635 }
8636 rc = __qseecom_init_clk(CLK_QSEE);
8637 if (rc)
8638 goto exit_destroy_ion_client;
8639
8640 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8641 (qseecom.support_pfe || qseecom.support_fde)) {
8642 rc = __qseecom_init_clk(CLK_CE_DRV);
8643 if (rc) {
8644 __qseecom_deinit_clk(CLK_QSEE);
8645 goto exit_destroy_ion_client;
8646 }
8647 } else {
8648 struct qseecom_clk *qclk;
8649
8650 qclk = &qseecom.qsee;
8651 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8652 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8653 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8654 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8655 }
8656
8657 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8658 msm_bus_cl_get_pdata(pdev);
8659 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8660 (!qseecom.is_apps_region_protected &&
8661 !qseecom.appsbl_qseecom_support)) {
8662 struct resource *resource = NULL;
8663 struct qsee_apps_region_info_ireq req;
8664 struct qsee_apps_region_info_64bit_ireq req_64bit;
8665 struct qseecom_command_scm_resp resp;
8666 void *cmd_buf = NULL;
8667 size_t cmd_len;
8668
8669 resource = platform_get_resource_byname(pdev,
8670 IORESOURCE_MEM, "secapp-region");
8671 if (resource) {
8672 if (qseecom.qsee_version < QSEE_VERSION_40) {
8673 req.qsee_cmd_id =
8674 QSEOS_APP_REGION_NOTIFICATION;
8675 req.addr = (uint32_t)resource->start;
8676 req.size = resource_size(resource);
8677 cmd_buf = (void *)&req;
8678 cmd_len = sizeof(struct
8679 qsee_apps_region_info_ireq);
8680 pr_warn("secure app region addr=0x%x size=0x%x",
8681 req.addr, req.size);
8682 } else {
8683 req_64bit.qsee_cmd_id =
8684 QSEOS_APP_REGION_NOTIFICATION;
8685 req_64bit.addr = resource->start;
8686 req_64bit.size = resource_size(
8687 resource);
8688 cmd_buf = (void *)&req_64bit;
8689 cmd_len = sizeof(struct
8690 qsee_apps_region_info_64bit_ireq);
8691 pr_warn("secure app region addr=0x%llx size=0x%x",
8692 req_64bit.addr, req_64bit.size);
8693 }
8694 } else {
8695 pr_err("Fail to get secure app region info\n");
8696 rc = -EINVAL;
8697 goto exit_deinit_clock;
8698 }
8699 rc = __qseecom_enable_clk(CLK_QSEE);
8700 if (rc) {
8701 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8702 rc = -EIO;
8703 goto exit_deinit_clock;
8704 }
8705 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8706 cmd_buf, cmd_len,
8707 &resp, sizeof(resp));
8708 __qseecom_disable_clk(CLK_QSEE);
8709 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8710 pr_err("send secapp reg fail %d resp.res %d\n",
8711 rc, resp.result);
8712 rc = -EINVAL;
8713 goto exit_deinit_clock;
8714 }
8715 }
8716 /*
8717 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8718 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8719 * Pls add "qseecom.commonlib64_loaded = true" here too.
8720 */
8721 if (qseecom.is_apps_region_protected ||
8722 qseecom.appsbl_qseecom_support)
8723 qseecom.commonlib_loaded = true;
8724 } else {
8725 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8726 pdev->dev.platform_data;
8727 }
8728 if (qseecom.support_bus_scaling) {
8729 init_timer(&(qseecom.bw_scale_down_timer));
8730 INIT_WORK(&qseecom.bw_inactive_req_ws,
8731 qseecom_bw_inactive_req_work);
8732 qseecom.bw_scale_down_timer.function =
8733 qseecom_scale_bus_bandwidth_timer_callback;
8734 }
8735 qseecom.timer_running = false;
8736 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8737 qseecom_platform_support);
8738
8739 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8740 pr_warn("qseecom.whitelist_support = %d\n",
8741 qseecom.whitelist_support);
8742
8743 if (!qseecom.qsee_perf_client)
8744 pr_err("Unable to register bus client\n");
8745
8746 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8747 return 0;
8748
8749exit_deinit_clock:
8750 __qseecom_deinit_clk(CLK_QSEE);
8751 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8752 (qseecom.support_pfe || qseecom.support_fde))
8753 __qseecom_deinit_clk(CLK_CE_DRV);
8754exit_destroy_ion_client:
8755 if (qseecom.ce_info.fde) {
8756 pce_info_use = qseecom.ce_info.fde;
8757 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8758 kzfree(pce_info_use->ce_pipe_entry);
8759 pce_info_use++;
8760 }
8761 kfree(qseecom.ce_info.fde);
8762 }
8763 if (qseecom.ce_info.pfe) {
8764 pce_info_use = qseecom.ce_info.pfe;
8765 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8766 kzfree(pce_info_use->ce_pipe_entry);
8767 pce_info_use++;
8768 }
8769 kfree(qseecom.ce_info.pfe);
8770 }
8771 ion_client_destroy(qseecom.ion_clnt);
8772exit_del_cdev:
8773 cdev_del(&qseecom.cdev);
8774exit_destroy_device:
8775 device_destroy(driver_class, qseecom_device_no);
8776exit_destroy_class:
8777 class_destroy(driver_class);
8778exit_unreg_chrdev_region:
8779 unregister_chrdev_region(qseecom_device_no, 1);
8780 return rc;
8781}
8782
8783static int qseecom_remove(struct platform_device *pdev)
8784{
8785 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308786 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008787 unsigned long flags = 0;
8788 int ret = 0;
8789 int i;
8790 struct qseecom_ce_pipe_entry *pce_entry;
8791 struct qseecom_ce_info_use *pce_info_use;
8792
8793 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8794 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8795
Monika Singhe711b162018-04-24 09:54:50 +05308796 list_for_each_entry_safe(kclient, kclient_tmp,
8797 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008798
8799 /* Break the loop if client handle is NULL */
8800 if (!kclient->handle)
8801 goto exit_free_kclient;
8802
8803 if (list_empty(&kclient->list))
8804 goto exit_free_kc_handle;
8805
8806 list_del(&kclient->list);
8807 mutex_lock(&app_access_lock);
8808 ret = qseecom_unload_app(kclient->handle->dev, false);
8809 mutex_unlock(&app_access_lock);
8810 if (!ret) {
8811 kzfree(kclient->handle->dev);
8812 kzfree(kclient->handle);
8813 kzfree(kclient);
8814 }
8815 }
8816
8817exit_free_kc_handle:
8818 kzfree(kclient->handle);
8819exit_free_kclient:
8820 kzfree(kclient);
Monika Singhe711b162018-04-24 09:54:50 +05308821
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008822 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8823
8824 if (qseecom.qseos_version > QSEEE_VERSION_00)
8825 qseecom_unload_commonlib_image();
8826
8827 if (qseecom.qsee_perf_client)
8828 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8829 0);
8830 if (pdev->dev.platform_data != NULL)
8831 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8832
8833 if (qseecom.support_bus_scaling) {
8834 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8835 del_timer_sync(&qseecom.bw_scale_down_timer);
8836 }
8837
8838 if (qseecom.ce_info.fde) {
8839 pce_info_use = qseecom.ce_info.fde;
8840 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8841 pce_entry = pce_info_use->ce_pipe_entry;
8842 kfree(pce_entry);
8843 pce_info_use++;
8844 }
8845 }
8846 kfree(qseecom.ce_info.fde);
8847 if (qseecom.ce_info.pfe) {
8848 pce_info_use = qseecom.ce_info.pfe;
8849 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8850 pce_entry = pce_info_use->ce_pipe_entry;
8851 kfree(pce_entry);
8852 pce_info_use++;
8853 }
8854 }
8855 kfree(qseecom.ce_info.pfe);
8856
8857 /* register client for bus scaling */
8858 if (pdev->dev.of_node) {
8859 __qseecom_deinit_clk(CLK_QSEE);
8860 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8861 (qseecom.support_pfe || qseecom.support_fde))
8862 __qseecom_deinit_clk(CLK_CE_DRV);
8863 }
8864
8865 ion_client_destroy(qseecom.ion_clnt);
8866
8867 cdev_del(&qseecom.cdev);
8868
8869 device_destroy(driver_class, qseecom_device_no);
8870
8871 class_destroy(driver_class);
8872
8873 unregister_chrdev_region(qseecom_device_no, 1);
8874
8875 return ret;
8876}
8877
8878static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8879{
8880 int ret = 0;
8881 struct qseecom_clk *qclk;
8882
8883 qclk = &qseecom.qsee;
8884 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8885 if (qseecom.no_clock_support)
8886 return 0;
8887
8888 mutex_lock(&qsee_bw_mutex);
8889 mutex_lock(&clk_access_lock);
8890
8891 if (qseecom.current_mode != INACTIVE) {
8892 ret = msm_bus_scale_client_update_request(
8893 qseecom.qsee_perf_client, INACTIVE);
8894 if (ret)
8895 pr_err("Fail to scale down bus\n");
8896 else
8897 qseecom.current_mode = INACTIVE;
8898 }
8899
8900 if (qclk->clk_access_cnt) {
8901 if (qclk->ce_clk != NULL)
8902 clk_disable_unprepare(qclk->ce_clk);
8903 if (qclk->ce_core_clk != NULL)
8904 clk_disable_unprepare(qclk->ce_core_clk);
8905 if (qclk->ce_bus_clk != NULL)
8906 clk_disable_unprepare(qclk->ce_bus_clk);
8907 }
8908
8909 del_timer_sync(&(qseecom.bw_scale_down_timer));
8910 qseecom.timer_running = false;
8911
8912 mutex_unlock(&clk_access_lock);
8913 mutex_unlock(&qsee_bw_mutex);
8914 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8915
8916 return 0;
8917}
8918
8919static int qseecom_resume(struct platform_device *pdev)
8920{
8921 int mode = 0;
8922 int ret = 0;
8923 struct qseecom_clk *qclk;
8924
8925 qclk = &qseecom.qsee;
8926 if (qseecom.no_clock_support)
8927 goto exit;
8928
8929 mutex_lock(&qsee_bw_mutex);
8930 mutex_lock(&clk_access_lock);
8931 if (qseecom.cumulative_mode >= HIGH)
8932 mode = HIGH;
8933 else
8934 mode = qseecom.cumulative_mode;
8935
8936 if (qseecom.cumulative_mode != INACTIVE) {
8937 ret = msm_bus_scale_client_update_request(
8938 qseecom.qsee_perf_client, mode);
8939 if (ret)
8940 pr_err("Fail to scale up bus to %d\n", mode);
8941 else
8942 qseecom.current_mode = mode;
8943 }
8944
8945 if (qclk->clk_access_cnt) {
8946 if (qclk->ce_core_clk != NULL) {
8947 ret = clk_prepare_enable(qclk->ce_core_clk);
8948 if (ret) {
8949 pr_err("Unable to enable/prep CE core clk\n");
8950 qclk->clk_access_cnt = 0;
8951 goto err;
8952 }
8953 }
8954 if (qclk->ce_clk != NULL) {
8955 ret = clk_prepare_enable(qclk->ce_clk);
8956 if (ret) {
8957 pr_err("Unable to enable/prep CE iface clk\n");
8958 qclk->clk_access_cnt = 0;
8959 goto ce_clk_err;
8960 }
8961 }
8962 if (qclk->ce_bus_clk != NULL) {
8963 ret = clk_prepare_enable(qclk->ce_bus_clk);
8964 if (ret) {
8965 pr_err("Unable to enable/prep CE bus clk\n");
8966 qclk->clk_access_cnt = 0;
8967 goto ce_bus_clk_err;
8968 }
8969 }
8970 }
8971
8972 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8973 qseecom.bw_scale_down_timer.expires = jiffies +
8974 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8975 mod_timer(&(qseecom.bw_scale_down_timer),
8976 qseecom.bw_scale_down_timer.expires);
8977 qseecom.timer_running = true;
8978 }
8979
8980 mutex_unlock(&clk_access_lock);
8981 mutex_unlock(&qsee_bw_mutex);
8982 goto exit;
8983
8984ce_bus_clk_err:
8985 if (qclk->ce_clk)
8986 clk_disable_unprepare(qclk->ce_clk);
8987ce_clk_err:
8988 if (qclk->ce_core_clk)
8989 clk_disable_unprepare(qclk->ce_core_clk);
8990err:
8991 mutex_unlock(&clk_access_lock);
8992 mutex_unlock(&qsee_bw_mutex);
8993 ret = -EIO;
8994exit:
8995 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8996 return ret;
8997}
8998
8999static const struct of_device_id qseecom_match[] = {
9000 {
9001 .compatible = "qcom,qseecom",
9002 },
9003 {}
9004};
9005
9006static struct platform_driver qseecom_plat_driver = {
9007 .probe = qseecom_probe,
9008 .remove = qseecom_remove,
9009 .suspend = qseecom_suspend,
9010 .resume = qseecom_resume,
9011 .driver = {
9012 .name = "qseecom",
9013 .owner = THIS_MODULE,
9014 .of_match_table = qseecom_match,
9015 },
9016};
9017
9018static int qseecom_init(void)
9019{
9020 return platform_driver_register(&qseecom_plat_driver);
9021}
9022
9023static void qseecom_exit(void)
9024{
9025 platform_driver_unregister(&qseecom_plat_driver);
9026}
9027
9028MODULE_LICENSE("GPL v2");
9029MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9030
9031module_init(qseecom_init);
9032module_exit(qseecom_exit);