blob: f68d880745d359b8f5f1cd3326e028bb866317d9 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
255 struct ion_handle *cmnlib_ion_handle;
256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
583 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
584 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
585 ret = scm_call2(smc_id, &desc);
586 if (ret) {
587 smc_id = TZ_OS_REGISTER_LISTENER_ID;
588 __qseecom_reentrancy_check_if_no_app_blocked(
589 smc_id);
590 ret = scm_call2(smc_id, &desc);
591 }
592 break;
593 }
594 case QSEOS_DEREGISTER_LISTENER: {
595 struct qseecom_unregister_listener_ireq *req;
596
597 req = (struct qseecom_unregister_listener_ireq *)
598 req_buf;
599 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
600 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
601 desc.args[0] = req->listener_id;
602 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
603 ret = scm_call2(smc_id, &desc);
604 break;
605 }
606 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
607 struct qseecom_client_listener_data_irsp *req;
608
609 req = (struct qseecom_client_listener_data_irsp *)
610 req_buf;
611 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
612 desc.arginfo =
613 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
614 desc.args[0] = req->listener_id;
615 desc.args[1] = req->status;
616 ret = scm_call2(smc_id, &desc);
617 break;
618 }
619 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
620 struct qseecom_client_listener_data_irsp *req;
621 struct qseecom_client_listener_data_64bit_irsp *req_64;
622
623 smc_id =
624 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
625 desc.arginfo =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
627 if (qseecom.qsee_version < QSEE_VERSION_40) {
628 req =
629 (struct qseecom_client_listener_data_irsp *)
630 req_buf;
631 desc.args[0] = req->listener_id;
632 desc.args[1] = req->status;
633 desc.args[2] = req->sglistinfo_ptr;
634 desc.args[3] = req->sglistinfo_len;
635 } else {
636 req_64 =
637 (struct qseecom_client_listener_data_64bit_irsp *)
638 req_buf;
639 desc.args[0] = req_64->listener_id;
640 desc.args[1] = req_64->status;
641 desc.args[2] = req_64->sglistinfo_ptr;
642 desc.args[3] = req_64->sglistinfo_len;
643 }
644 ret = scm_call2(smc_id, &desc);
645 break;
646 }
647 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
648 struct qseecom_load_app_ireq *req;
649 struct qseecom_load_app_64bit_ireq *req_64bit;
650
651 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
652 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
653 if (qseecom.qsee_version < QSEE_VERSION_40) {
654 req = (struct qseecom_load_app_ireq *)req_buf;
655 desc.args[0] = req->mdt_len;
656 desc.args[1] = req->img_len;
657 desc.args[2] = req->phy_addr;
658 } else {
659 req_64bit =
660 (struct qseecom_load_app_64bit_ireq *)req_buf;
661 desc.args[0] = req_64bit->mdt_len;
662 desc.args[1] = req_64bit->img_len;
663 desc.args[2] = req_64bit->phy_addr;
664 }
665 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
666 ret = scm_call2(smc_id, &desc);
667 break;
668 }
669 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
670 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
671 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
672 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
673 ret = scm_call2(smc_id, &desc);
674 break;
675 }
676
677 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
678 struct qseecom_client_send_data_ireq *req;
679 struct qseecom_client_send_data_64bit_ireq *req_64bit;
680
681 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
682 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
683 if (qseecom.qsee_version < QSEE_VERSION_40) {
684 req = (struct qseecom_client_send_data_ireq *)
685 req_buf;
686 desc.args[0] = req->app_id;
687 desc.args[1] = req->req_ptr;
688 desc.args[2] = req->req_len;
689 desc.args[3] = req->rsp_ptr;
690 desc.args[4] = req->rsp_len;
691 } else {
692 req_64bit =
693 (struct qseecom_client_send_data_64bit_ireq *)
694 req_buf;
695 desc.args[0] = req_64bit->app_id;
696 desc.args[1] = req_64bit->req_ptr;
697 desc.args[2] = req_64bit->req_len;
698 desc.args[3] = req_64bit->rsp_ptr;
699 desc.args[4] = req_64bit->rsp_len;
700 }
701 ret = scm_call2(smc_id, &desc);
702 break;
703 }
704 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
705 struct qseecom_client_send_data_ireq *req;
706 struct qseecom_client_send_data_64bit_ireq *req_64bit;
707
708 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
709 desc.arginfo =
710 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
711 if (qseecom.qsee_version < QSEE_VERSION_40) {
712 req = (struct qseecom_client_send_data_ireq *)
713 req_buf;
714 desc.args[0] = req->app_id;
715 desc.args[1] = req->req_ptr;
716 desc.args[2] = req->req_len;
717 desc.args[3] = req->rsp_ptr;
718 desc.args[4] = req->rsp_len;
719 desc.args[5] = req->sglistinfo_ptr;
720 desc.args[6] = req->sglistinfo_len;
721 } else {
722 req_64bit =
723 (struct qseecom_client_send_data_64bit_ireq *)
724 req_buf;
725 desc.args[0] = req_64bit->app_id;
726 desc.args[1] = req_64bit->req_ptr;
727 desc.args[2] = req_64bit->req_len;
728 desc.args[3] = req_64bit->rsp_ptr;
729 desc.args[4] = req_64bit->rsp_len;
730 desc.args[5] = req_64bit->sglistinfo_ptr;
731 desc.args[6] = req_64bit->sglistinfo_len;
732 }
733 ret = scm_call2(smc_id, &desc);
734 break;
735 }
736 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
737 struct qseecom_client_send_service_ireq *req;
738
739 req = (struct qseecom_client_send_service_ireq *)
740 req_buf;
741 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
742 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
743 desc.args[0] = req->key_type;
744 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
745 ret = scm_call2(smc_id, &desc);
746 break;
747 }
748 case QSEOS_RPMB_ERASE_COMMAND: {
749 smc_id = TZ_OS_RPMB_ERASE_ID;
750 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
751 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
752 ret = scm_call2(smc_id, &desc);
753 break;
754 }
755 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
756 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
757 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
758 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
759 ret = scm_call2(smc_id, &desc);
760 break;
761 }
762 case QSEOS_GENERATE_KEY: {
763 u32 tzbuflen = PAGE_ALIGN(sizeof
764 (struct qseecom_key_generate_ireq) -
765 sizeof(uint32_t));
766 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
767
768 if (!tzbuf)
769 return -ENOMEM;
770 memset(tzbuf, 0, tzbuflen);
771 memcpy(tzbuf, req_buf + sizeof(uint32_t),
772 (sizeof(struct qseecom_key_generate_ireq) -
773 sizeof(uint32_t)));
774 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
775 smc_id = TZ_OS_KS_GEN_KEY_ID;
776 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
777 desc.args[0] = virt_to_phys(tzbuf);
778 desc.args[1] = tzbuflen;
779 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
780 ret = scm_call2(smc_id, &desc);
781 kzfree(tzbuf);
782 break;
783 }
784 case QSEOS_DELETE_KEY: {
785 u32 tzbuflen = PAGE_ALIGN(sizeof
786 (struct qseecom_key_delete_ireq) -
787 sizeof(uint32_t));
788 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
789
790 if (!tzbuf)
791 return -ENOMEM;
792 memset(tzbuf, 0, tzbuflen);
793 memcpy(tzbuf, req_buf + sizeof(uint32_t),
794 (sizeof(struct qseecom_key_delete_ireq) -
795 sizeof(uint32_t)));
796 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
797 smc_id = TZ_OS_KS_DEL_KEY_ID;
798 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
799 desc.args[0] = virt_to_phys(tzbuf);
800 desc.args[1] = tzbuflen;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
802 ret = scm_call2(smc_id, &desc);
803 kzfree(tzbuf);
804 break;
805 }
806 case QSEOS_SET_KEY: {
807 u32 tzbuflen = PAGE_ALIGN(sizeof
808 (struct qseecom_key_select_ireq) -
809 sizeof(uint32_t));
810 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
811
812 if (!tzbuf)
813 return -ENOMEM;
814 memset(tzbuf, 0, tzbuflen);
815 memcpy(tzbuf, req_buf + sizeof(uint32_t),
816 (sizeof(struct qseecom_key_select_ireq) -
817 sizeof(uint32_t)));
818 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
819 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
820 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
821 desc.args[0] = virt_to_phys(tzbuf);
822 desc.args[1] = tzbuflen;
823 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
824 ret = scm_call2(smc_id, &desc);
825 kzfree(tzbuf);
826 break;
827 }
828 case QSEOS_UPDATE_KEY_USERINFO: {
829 u32 tzbuflen = PAGE_ALIGN(sizeof
830 (struct qseecom_key_userinfo_update_ireq) -
831 sizeof(uint32_t));
832 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
833
834 if (!tzbuf)
835 return -ENOMEM;
836 memset(tzbuf, 0, tzbuflen);
837 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
838 (struct qseecom_key_userinfo_update_ireq) -
839 sizeof(uint32_t)));
840 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
841 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
842 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
843 desc.args[0] = virt_to_phys(tzbuf);
844 desc.args[1] = tzbuflen;
845 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
846 ret = scm_call2(smc_id, &desc);
847 kzfree(tzbuf);
848 break;
849 }
850 case QSEOS_TEE_OPEN_SESSION: {
851 struct qseecom_qteec_ireq *req;
852 struct qseecom_qteec_64bit_ireq *req_64bit;
853
854 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
855 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
856 if (qseecom.qsee_version < QSEE_VERSION_40) {
857 req = (struct qseecom_qteec_ireq *)req_buf;
858 desc.args[0] = req->app_id;
859 desc.args[1] = req->req_ptr;
860 desc.args[2] = req->req_len;
861 desc.args[3] = req->resp_ptr;
862 desc.args[4] = req->resp_len;
863 } else {
864 req_64bit = (struct qseecom_qteec_64bit_ireq *)
865 req_buf;
866 desc.args[0] = req_64bit->app_id;
867 desc.args[1] = req_64bit->req_ptr;
868 desc.args[2] = req_64bit->req_len;
869 desc.args[3] = req_64bit->resp_ptr;
870 desc.args[4] = req_64bit->resp_len;
871 }
872 ret = scm_call2(smc_id, &desc);
873 break;
874 }
875 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
876 struct qseecom_qteec_ireq *req;
877 struct qseecom_qteec_64bit_ireq *req_64bit;
878
879 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
880 desc.arginfo =
881 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
882 if (qseecom.qsee_version < QSEE_VERSION_40) {
883 req = (struct qseecom_qteec_ireq *)req_buf;
884 desc.args[0] = req->app_id;
885 desc.args[1] = req->req_ptr;
886 desc.args[2] = req->req_len;
887 desc.args[3] = req->resp_ptr;
888 desc.args[4] = req->resp_len;
889 desc.args[5] = req->sglistinfo_ptr;
890 desc.args[6] = req->sglistinfo_len;
891 } else {
892 req_64bit = (struct qseecom_qteec_64bit_ireq *)
893 req_buf;
894 desc.args[0] = req_64bit->app_id;
895 desc.args[1] = req_64bit->req_ptr;
896 desc.args[2] = req_64bit->req_len;
897 desc.args[3] = req_64bit->resp_ptr;
898 desc.args[4] = req_64bit->resp_len;
899 desc.args[5] = req_64bit->sglistinfo_ptr;
900 desc.args[6] = req_64bit->sglistinfo_len;
901 }
902 ret = scm_call2(smc_id, &desc);
903 break;
904 }
905 case QSEOS_TEE_INVOKE_COMMAND: {
906 struct qseecom_qteec_ireq *req;
907 struct qseecom_qteec_64bit_ireq *req_64bit;
908
909 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
910 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
911 if (qseecom.qsee_version < QSEE_VERSION_40) {
912 req = (struct qseecom_qteec_ireq *)req_buf;
913 desc.args[0] = req->app_id;
914 desc.args[1] = req->req_ptr;
915 desc.args[2] = req->req_len;
916 desc.args[3] = req->resp_ptr;
917 desc.args[4] = req->resp_len;
918 } else {
919 req_64bit = (struct qseecom_qteec_64bit_ireq *)
920 req_buf;
921 desc.args[0] = req_64bit->app_id;
922 desc.args[1] = req_64bit->req_ptr;
923 desc.args[2] = req_64bit->req_len;
924 desc.args[3] = req_64bit->resp_ptr;
925 desc.args[4] = req_64bit->resp_len;
926 }
927 ret = scm_call2(smc_id, &desc);
928 break;
929 }
930 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
931 struct qseecom_qteec_ireq *req;
932 struct qseecom_qteec_64bit_ireq *req_64bit;
933
934 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
935 desc.arginfo =
936 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
937 if (qseecom.qsee_version < QSEE_VERSION_40) {
938 req = (struct qseecom_qteec_ireq *)req_buf;
939 desc.args[0] = req->app_id;
940 desc.args[1] = req->req_ptr;
941 desc.args[2] = req->req_len;
942 desc.args[3] = req->resp_ptr;
943 desc.args[4] = req->resp_len;
944 desc.args[5] = req->sglistinfo_ptr;
945 desc.args[6] = req->sglistinfo_len;
946 } else {
947 req_64bit = (struct qseecom_qteec_64bit_ireq *)
948 req_buf;
949 desc.args[0] = req_64bit->app_id;
950 desc.args[1] = req_64bit->req_ptr;
951 desc.args[2] = req_64bit->req_len;
952 desc.args[3] = req_64bit->resp_ptr;
953 desc.args[4] = req_64bit->resp_len;
954 desc.args[5] = req_64bit->sglistinfo_ptr;
955 desc.args[6] = req_64bit->sglistinfo_len;
956 }
957 ret = scm_call2(smc_id, &desc);
958 break;
959 }
960 case QSEOS_TEE_CLOSE_SESSION: {
961 struct qseecom_qteec_ireq *req;
962 struct qseecom_qteec_64bit_ireq *req_64bit;
963
964 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
965 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
966 if (qseecom.qsee_version < QSEE_VERSION_40) {
967 req = (struct qseecom_qteec_ireq *)req_buf;
968 desc.args[0] = req->app_id;
969 desc.args[1] = req->req_ptr;
970 desc.args[2] = req->req_len;
971 desc.args[3] = req->resp_ptr;
972 desc.args[4] = req->resp_len;
973 } else {
974 req_64bit = (struct qseecom_qteec_64bit_ireq *)
975 req_buf;
976 desc.args[0] = req_64bit->app_id;
977 desc.args[1] = req_64bit->req_ptr;
978 desc.args[2] = req_64bit->req_len;
979 desc.args[3] = req_64bit->resp_ptr;
980 desc.args[4] = req_64bit->resp_len;
981 }
982 ret = scm_call2(smc_id, &desc);
983 break;
984 }
985 case QSEOS_TEE_REQUEST_CANCELLATION: {
986 struct qseecom_qteec_ireq *req;
987 struct qseecom_qteec_64bit_ireq *req_64bit;
988
989 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
990 desc.arginfo =
991 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
992 if (qseecom.qsee_version < QSEE_VERSION_40) {
993 req = (struct qseecom_qteec_ireq *)req_buf;
994 desc.args[0] = req->app_id;
995 desc.args[1] = req->req_ptr;
996 desc.args[2] = req->req_len;
997 desc.args[3] = req->resp_ptr;
998 desc.args[4] = req->resp_len;
999 } else {
1000 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1001 req_buf;
1002 desc.args[0] = req_64bit->app_id;
1003 desc.args[1] = req_64bit->req_ptr;
1004 desc.args[2] = req_64bit->req_len;
1005 desc.args[3] = req_64bit->resp_ptr;
1006 desc.args[4] = req_64bit->resp_len;
1007 }
1008 ret = scm_call2(smc_id, &desc);
1009 break;
1010 }
1011 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1012 struct qseecom_continue_blocked_request_ireq *req =
1013 (struct qseecom_continue_blocked_request_ireq *)
1014 req_buf;
1015 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
1016 desc.arginfo =
1017 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
1018 desc.args[0] = req->app_id;
1019 ret = scm_call2(smc_id, &desc);
1020 break;
1021 }
1022 default: {
1023 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1024 qseos_cmd_id);
1025 ret = -EINVAL;
1026 break;
1027 }
1028 } /*end of switch (qsee_cmd_id) */
1029 break;
1030 } /*end of case SCM_SVC_TZSCHEDULER*/
1031 default: {
1032 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1033 svc_id);
1034 ret = -EINVAL;
1035 break;
1036 }
1037 } /*end of switch svc_id */
1038 scm_resp->result = desc.ret[0];
1039 scm_resp->resp_type = desc.ret[1];
1040 scm_resp->data = desc.ret[2];
1041 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1042 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1043 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1044 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1045 return ret;
1046}
1047
1048
1049static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1050 size_t cmd_len, void *resp_buf, size_t resp_len)
1051{
1052 if (!is_scm_armv8())
1053 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1054 resp_buf, resp_len);
1055 else
1056 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1057}
1058
1059static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1060 struct qseecom_register_listener_req *svc)
1061{
1062 struct qseecom_registered_listener_list *ptr;
1063 int unique = 1;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1067 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1068 if (ptr->svc.listener_id == svc->listener_id) {
1069 pr_err("Service id: %u is already registered\n",
1070 ptr->svc.listener_id);
1071 unique = 0;
1072 break;
1073 }
1074 }
1075 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1076 return unique;
1077}
1078
1079static struct qseecom_registered_listener_list *__qseecom_find_svc(
1080 int32_t listener_id)
1081{
1082 struct qseecom_registered_listener_list *entry = NULL;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1086 list_for_each_entry(entry,
1087 &qseecom.registered_listener_list_head, list) {
1088 if (entry->svc.listener_id == listener_id)
1089 break;
1090 }
1091 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1092
1093 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1094 pr_err("Service id: %u is not found\n", listener_id);
1095 return NULL;
1096 }
1097
1098 return entry;
1099}
1100
1101static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1102 struct qseecom_dev_handle *handle,
1103 struct qseecom_register_listener_req *listener)
1104{
1105 int ret = 0;
1106 struct qseecom_register_listener_ireq req;
1107 struct qseecom_register_listener_64bit_ireq req_64bit;
1108 struct qseecom_command_scm_resp resp;
1109 ion_phys_addr_t pa;
1110 void *cmd_buf = NULL;
1111 size_t cmd_len;
1112
1113 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001114 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001115 listener->ifd_data_fd);
1116 if (IS_ERR_OR_NULL(svc->ihandle)) {
1117 pr_err("Ion client could not retrieve the handle\n");
1118 return -ENOMEM;
1119 }
1120
1121 /* Get the physical address of the ION BUF */
1122 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1123 if (ret) {
1124 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1125 ret);
1126 return ret;
1127 }
1128 /* Populate the structure for sending scm call to load image */
1129 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1130 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1131 pr_err("ION memory mapping for listener shared buffer failed\n");
1132 return -ENOMEM;
1133 }
1134 svc->sb_phys = (phys_addr_t)pa;
1135
1136 if (qseecom.qsee_version < QSEE_VERSION_40) {
1137 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1138 req.listener_id = svc->svc.listener_id;
1139 req.sb_len = svc->sb_length;
1140 req.sb_ptr = (uint32_t)svc->sb_phys;
1141 cmd_buf = (void *)&req;
1142 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1143 } else {
1144 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req_64bit.listener_id = svc->svc.listener_id;
1146 req_64bit.sb_len = svc->sb_length;
1147 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1148 cmd_buf = (void *)&req_64bit;
1149 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1150 }
1151
1152 resp.result = QSEOS_RESULT_INCOMPLETE;
1153
1154 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1155 &resp, sizeof(resp));
1156 if (ret) {
1157 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1158 return -EINVAL;
1159 }
1160
1161 if (resp.result != QSEOS_RESULT_SUCCESS) {
1162 pr_err("Error SB registration req: resp.result = %d\n",
1163 resp.result);
1164 return -EPERM;
1165 }
1166 return 0;
1167}
1168
1169static int qseecom_register_listener(struct qseecom_dev_handle *data,
1170 void __user *argp)
1171{
1172 int ret = 0;
1173 unsigned long flags;
1174 struct qseecom_register_listener_req rcvd_lstnr;
1175 struct qseecom_registered_listener_list *new_entry;
1176
1177 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1178 if (ret) {
1179 pr_err("copy_from_user failed\n");
1180 return ret;
1181 }
1182 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1183 rcvd_lstnr.sb_size))
1184 return -EFAULT;
1185
1186 data->listener.id = 0;
1187 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1188 pr_err("Service is not unique and is already registered\n");
1189 data->released = true;
1190 return -EBUSY;
1191 }
1192
1193 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1194 if (!new_entry)
1195 return -ENOMEM;
1196 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1197 new_entry->rcv_req_flag = 0;
1198
1199 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1200 new_entry->sb_length = rcvd_lstnr.sb_size;
1201 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1202 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1203 pr_err("qseecom_set_sb_memoryfailed\n");
1204 kzfree(new_entry);
1205 return -ENOMEM;
1206 }
1207
1208 data->listener.id = rcvd_lstnr.listener_id;
1209 init_waitqueue_head(&new_entry->rcv_req_wq);
1210 init_waitqueue_head(&new_entry->listener_block_app_wq);
1211 new_entry->send_resp_flag = 0;
1212 new_entry->listener_in_use = false;
1213 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1214 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1215 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1216
1217 return ret;
1218}
1219
1220static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1221{
1222 int ret = 0;
1223 unsigned long flags;
1224 uint32_t unmap_mem = 0;
1225 struct qseecom_register_listener_ireq req;
1226 struct qseecom_registered_listener_list *ptr_svc = NULL;
1227 struct qseecom_command_scm_resp resp;
1228 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1229
1230 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1231 req.listener_id = data->listener.id;
1232 resp.result = QSEOS_RESULT_INCOMPLETE;
1233
1234 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1235 sizeof(req), &resp, sizeof(resp));
1236 if (ret) {
1237 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1238 ret, data->listener.id);
1239 return ret;
1240 }
1241
1242 if (resp.result != QSEOS_RESULT_SUCCESS) {
1243 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1244 resp.result, data->listener.id);
1245 return -EPERM;
1246 }
1247
1248 data->abort = 1;
1249 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1250 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1251 list) {
1252 if (ptr_svc->svc.listener_id == data->listener.id) {
1253 wake_up_all(&ptr_svc->rcv_req_wq);
1254 break;
1255 }
1256 }
1257 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1258
1259 while (atomic_read(&data->ioctl_count) > 1) {
1260 if (wait_event_freezable(data->abort_wq,
1261 atomic_read(&data->ioctl_count) <= 1)) {
1262 pr_err("Interrupted from abort\n");
1263 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001264 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001265 }
1266 }
1267
1268 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1269 list_for_each_entry(ptr_svc,
1270 &qseecom.registered_listener_list_head, list) {
1271 if (ptr_svc->svc.listener_id == data->listener.id) {
1272 if (ptr_svc->sb_virt) {
1273 unmap_mem = 1;
1274 ihandle = ptr_svc->ihandle;
1275 }
1276 list_del(&ptr_svc->list);
1277 kzfree(ptr_svc);
1278 break;
1279 }
1280 }
1281 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1282
1283 /* Unmap the memory */
1284 if (unmap_mem) {
1285 if (!IS_ERR_OR_NULL(ihandle)) {
1286 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1287 ion_free(qseecom.ion_clnt, ihandle);
1288 }
1289 }
1290 data->released = true;
1291 return ret;
1292}
1293
1294static int __qseecom_set_msm_bus_request(uint32_t mode)
1295{
1296 int ret = 0;
1297 struct qseecom_clk *qclk;
1298
1299 qclk = &qseecom.qsee;
1300 if (qclk->ce_core_src_clk != NULL) {
1301 if (mode == INACTIVE) {
1302 __qseecom_disable_clk(CLK_QSEE);
1303 } else {
1304 ret = __qseecom_enable_clk(CLK_QSEE);
1305 if (ret)
1306 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1307 ret, mode);
1308 }
1309 }
1310
1311 if ((!ret) && (qseecom.current_mode != mode)) {
1312 ret = msm_bus_scale_client_update_request(
1313 qseecom.qsee_perf_client, mode);
1314 if (ret) {
1315 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1316 ret, mode);
1317 if (qclk->ce_core_src_clk != NULL) {
1318 if (mode == INACTIVE) {
1319 ret = __qseecom_enable_clk(CLK_QSEE);
1320 if (ret)
1321 pr_err("CLK enable failed\n");
1322 } else
1323 __qseecom_disable_clk(CLK_QSEE);
1324 }
1325 }
1326 qseecom.current_mode = mode;
1327 }
1328 return ret;
1329}
1330
1331static void qseecom_bw_inactive_req_work(struct work_struct *work)
1332{
1333 mutex_lock(&app_access_lock);
1334 mutex_lock(&qsee_bw_mutex);
1335 if (qseecom.timer_running)
1336 __qseecom_set_msm_bus_request(INACTIVE);
1337 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1338 qseecom.current_mode, qseecom.cumulative_mode);
1339 qseecom.timer_running = false;
1340 mutex_unlock(&qsee_bw_mutex);
1341 mutex_unlock(&app_access_lock);
1342}
1343
1344static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1345{
1346 schedule_work(&qseecom.bw_inactive_req_ws);
1347}
1348
1349static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1350{
1351 struct qseecom_clk *qclk;
1352 int ret = 0;
1353
1354 mutex_lock(&clk_access_lock);
1355 if (ce == CLK_QSEE)
1356 qclk = &qseecom.qsee;
1357 else
1358 qclk = &qseecom.ce_drv;
1359
1360 if (qclk->clk_access_cnt > 2) {
1361 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1362 ret = -EINVAL;
1363 goto err_dec_ref_cnt;
1364 }
1365 if (qclk->clk_access_cnt == 2)
1366 qclk->clk_access_cnt--;
1367
1368err_dec_ref_cnt:
1369 mutex_unlock(&clk_access_lock);
1370 return ret;
1371}
1372
1373
1374static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1375{
1376 int32_t ret = 0;
1377 int32_t request_mode = INACTIVE;
1378
1379 mutex_lock(&qsee_bw_mutex);
1380 if (mode == 0) {
1381 if (qseecom.cumulative_mode > MEDIUM)
1382 request_mode = HIGH;
1383 else
1384 request_mode = qseecom.cumulative_mode;
1385 } else {
1386 request_mode = mode;
1387 }
1388
1389 ret = __qseecom_set_msm_bus_request(request_mode);
1390 if (ret) {
1391 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1392 ret, request_mode);
1393 goto err_scale_timer;
1394 }
1395
1396 if (qseecom.timer_running) {
1397 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1398 if (ret) {
1399 pr_err("Failed to decrease clk ref count.\n");
1400 goto err_scale_timer;
1401 }
1402 del_timer_sync(&(qseecom.bw_scale_down_timer));
1403 qseecom.timer_running = false;
1404 }
1405err_scale_timer:
1406 mutex_unlock(&qsee_bw_mutex);
1407 return ret;
1408}
1409
1410
1411static int qseecom_unregister_bus_bandwidth_needs(
1412 struct qseecom_dev_handle *data)
1413{
1414 int32_t ret = 0;
1415
1416 qseecom.cumulative_mode -= data->mode;
1417 data->mode = INACTIVE;
1418
1419 return ret;
1420}
1421
1422static int __qseecom_register_bus_bandwidth_needs(
1423 struct qseecom_dev_handle *data, uint32_t request_mode)
1424{
1425 int32_t ret = 0;
1426
1427 if (data->mode == INACTIVE) {
1428 qseecom.cumulative_mode += request_mode;
1429 data->mode = request_mode;
1430 } else {
1431 if (data->mode != request_mode) {
1432 qseecom.cumulative_mode -= data->mode;
1433 qseecom.cumulative_mode += request_mode;
1434 data->mode = request_mode;
1435 }
1436 }
1437 return ret;
1438}
1439
1440static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1441{
1442 int ret = 0;
1443
1444 ret = qsee_vote_for_clock(data, CLK_DFAB);
1445 if (ret) {
1446 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1447 goto perf_enable_exit;
1448 }
1449 ret = qsee_vote_for_clock(data, CLK_SFPB);
1450 if (ret) {
1451 qsee_disable_clock_vote(data, CLK_DFAB);
1452 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455
1456perf_enable_exit:
1457 return ret;
1458}
1459
1460static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1461 void __user *argp)
1462{
1463 int32_t ret = 0;
1464 int32_t req_mode;
1465
1466 if (qseecom.no_clock_support)
1467 return 0;
1468
1469 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1470 if (ret) {
1471 pr_err("copy_from_user failed\n");
1472 return ret;
1473 }
1474 if (req_mode > HIGH) {
1475 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1476 return -EINVAL;
1477 }
1478
1479 /*
1480 * Register bus bandwidth needs if bus scaling feature is enabled;
1481 * otherwise, qseecom enable/disable clocks for the client directly.
1482 */
1483 if (qseecom.support_bus_scaling) {
1484 mutex_lock(&qsee_bw_mutex);
1485 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1486 mutex_unlock(&qsee_bw_mutex);
1487 } else {
1488 pr_debug("Bus scaling feature is NOT enabled\n");
1489 pr_debug("request bandwidth mode %d for the client\n",
1490 req_mode);
1491 if (req_mode != INACTIVE) {
1492 ret = qseecom_perf_enable(data);
1493 if (ret)
1494 pr_err("Failed to vote for clock with err %d\n",
1495 ret);
1496 } else {
1497 qsee_disable_clock_vote(data, CLK_DFAB);
1498 qsee_disable_clock_vote(data, CLK_SFPB);
1499 }
1500 }
1501 return ret;
1502}
1503
1504static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1505{
1506 if (qseecom.no_clock_support)
1507 return;
1508
1509 mutex_lock(&qsee_bw_mutex);
1510 qseecom.bw_scale_down_timer.expires = jiffies +
1511 msecs_to_jiffies(duration);
1512 mod_timer(&(qseecom.bw_scale_down_timer),
1513 qseecom.bw_scale_down_timer.expires);
1514 qseecom.timer_running = true;
1515 mutex_unlock(&qsee_bw_mutex);
1516}
1517
1518static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1519{
1520 if (!qseecom.support_bus_scaling)
1521 qsee_disable_clock_vote(data, CLK_SFPB);
1522 else
1523 __qseecom_add_bw_scale_down_timer(
1524 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1525}
1526
1527static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1528{
1529 int ret = 0;
1530
1531 if (qseecom.support_bus_scaling) {
1532 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1533 if (ret)
1534 pr_err("Failed to set bw MEDIUM.\n");
1535 } else {
1536 ret = qsee_vote_for_clock(data, CLK_SFPB);
1537 if (ret)
1538 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1539 }
1540 return ret;
1541}
1542
1543static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1544 void __user *argp)
1545{
1546 ion_phys_addr_t pa;
1547 int32_t ret;
1548 struct qseecom_set_sb_mem_param_req req;
1549 size_t len;
1550
1551 /* Copy the relevant information needed for loading the image */
1552 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1553 return -EFAULT;
1554
1555 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1556 (req.sb_len == 0)) {
1557 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1558 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1559 return -EFAULT;
1560 }
1561 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1562 req.sb_len))
1563 return -EFAULT;
1564
1565 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001566 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001567 req.ifd_data_fd);
1568 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1569 pr_err("Ion client could not retrieve the handle\n");
1570 return -ENOMEM;
1571 }
1572 /* Get the physical address of the ION BUF */
1573 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1574 if (ret) {
1575
1576 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1577 ret);
1578 return ret;
1579 }
1580
1581 if (len < req.sb_len) {
1582 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1583 req.sb_len, len);
1584 return -EINVAL;
1585 }
1586 /* Populate the structure for sending scm call to load image */
1587 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1588 data->client.ihandle);
1589 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1590 pr_err("ION memory mapping for client shared buf failed\n");
1591 return -ENOMEM;
1592 }
1593 data->client.sb_phys = (phys_addr_t)pa;
1594 data->client.sb_length = req.sb_len;
1595 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1596 return 0;
1597}
1598
1599static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1600{
1601 int ret;
1602
1603 ret = (qseecom.send_resp_flag != 0);
1604 return ret || data->abort;
1605}
1606
1607static int __qseecom_reentrancy_listener_has_sent_rsp(
1608 struct qseecom_dev_handle *data,
1609 struct qseecom_registered_listener_list *ptr_svc)
1610{
1611 int ret;
1612
1613 ret = (ptr_svc->send_resp_flag != 0);
1614 return ret || data->abort;
1615}
1616
1617static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1618 struct qseecom_command_scm_resp *resp,
1619 struct qseecom_client_listener_data_irsp *send_data_rsp,
1620 struct qseecom_registered_listener_list *ptr_svc,
1621 uint32_t lstnr) {
1622 int ret = 0;
1623
1624 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1625 qseecom.send_resp_flag = 0;
1626 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1627 send_data_rsp->listener_id = lstnr;
1628 if (ptr_svc)
1629 pr_warn("listener_id:%x, lstnr: %x\n",
1630 ptr_svc->svc.listener_id, lstnr);
1631 if (ptr_svc && ptr_svc->ihandle) {
1632 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1633 ptr_svc->sb_virt, ptr_svc->sb_length,
1634 ION_IOC_CLEAN_INV_CACHES);
1635 if (ret) {
1636 pr_err("cache operation failed %d\n", ret);
1637 return ret;
1638 }
1639 }
1640
1641 if (lstnr == RPMB_SERVICE) {
1642 ret = __qseecom_enable_clk(CLK_QSEE);
1643 if (ret)
1644 return ret;
1645 }
1646 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1647 sizeof(send_data_rsp), resp, sizeof(*resp));
1648 if (ret) {
1649 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1650 ret, data->client.app_id);
1651 if (lstnr == RPMB_SERVICE)
1652 __qseecom_disable_clk(CLK_QSEE);
1653 return ret;
1654 }
1655 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1656 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1657 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1658 resp->result, data->client.app_id, lstnr);
1659 ret = -EINVAL;
1660 }
1661 if (lstnr == RPMB_SERVICE)
1662 __qseecom_disable_clk(CLK_QSEE);
1663 return ret;
1664}
1665
1666static void __qseecom_clean_listener_sglistinfo(
1667 struct qseecom_registered_listener_list *ptr_svc)
1668{
1669 if (ptr_svc->sglist_cnt) {
1670 memset(ptr_svc->sglistinfo_ptr, 0,
1671 SGLISTINFO_TABLE_SIZE);
1672 ptr_svc->sglist_cnt = 0;
1673 }
1674}
1675
1676static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1677 struct qseecom_command_scm_resp *resp)
1678{
1679 int ret = 0;
1680 int rc = 0;
1681 uint32_t lstnr;
1682 unsigned long flags;
1683 struct qseecom_client_listener_data_irsp send_data_rsp;
1684 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1685 struct qseecom_registered_listener_list *ptr_svc = NULL;
1686 sigset_t new_sigset;
1687 sigset_t old_sigset;
1688 uint32_t status;
1689 void *cmd_buf = NULL;
1690 size_t cmd_len;
1691 struct sglist_info *table = NULL;
1692
1693 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1694 lstnr = resp->data;
1695 /*
1696 * Wake up blocking lsitener service with the lstnr id
1697 */
1698 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1699 flags);
1700 list_for_each_entry(ptr_svc,
1701 &qseecom.registered_listener_list_head, list) {
1702 if (ptr_svc->svc.listener_id == lstnr) {
1703 ptr_svc->listener_in_use = true;
1704 ptr_svc->rcv_req_flag = 1;
1705 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1706 break;
1707 }
1708 }
1709 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1710 flags);
1711
1712 if (ptr_svc == NULL) {
1713 pr_err("Listener Svc %d does not exist\n", lstnr);
1714 __qseecom_qseos_fail_return_resp_tz(data, resp,
1715 &send_data_rsp, ptr_svc, lstnr);
1716 return -EINVAL;
1717 }
1718
1719 if (!ptr_svc->ihandle) {
1720 pr_err("Client handle is not initialized\n");
1721 __qseecom_qseos_fail_return_resp_tz(data, resp,
1722 &send_data_rsp, ptr_svc, lstnr);
1723 return -EINVAL;
1724 }
1725
1726 if (ptr_svc->svc.listener_id != lstnr) {
1727 pr_warn("Service requested does not exist\n");
1728 __qseecom_qseos_fail_return_resp_tz(data, resp,
1729 &send_data_rsp, ptr_svc, lstnr);
1730 return -ERESTARTSYS;
1731 }
1732 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1733
1734 /* initialize the new signal mask with all signals*/
1735 sigfillset(&new_sigset);
1736 /* block all signals */
1737 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1738
1739 do {
1740 /*
1741 * When reentrancy is not supported, check global
1742 * send_resp_flag; otherwise, check this listener's
1743 * send_resp_flag.
1744 */
1745 if (!qseecom.qsee_reentrancy_support &&
1746 !wait_event_freezable(qseecom.send_resp_wq,
1747 __qseecom_listener_has_sent_rsp(data))) {
1748 break;
1749 }
1750
1751 if (qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_reentrancy_listener_has_sent_rsp(
1754 data, ptr_svc))) {
1755 break;
1756 }
1757 } while (1);
1758
1759 /* restore signal mask */
1760 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1761 if (data->abort) {
1762 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1763 data->client.app_id, lstnr, ret);
1764 rc = -ENODEV;
1765 status = QSEOS_RESULT_FAILURE;
1766 } else {
1767 status = QSEOS_RESULT_SUCCESS;
1768 }
1769
1770 qseecom.send_resp_flag = 0;
1771 ptr_svc->send_resp_flag = 0;
1772 table = ptr_svc->sglistinfo_ptr;
1773 if (qseecom.qsee_version < QSEE_VERSION_40) {
1774 send_data_rsp.listener_id = lstnr;
1775 send_data_rsp.status = status;
1776 send_data_rsp.sglistinfo_ptr =
1777 (uint32_t)virt_to_phys(table);
1778 send_data_rsp.sglistinfo_len =
1779 SGLISTINFO_TABLE_SIZE;
1780 dmac_flush_range((void *)table,
1781 (void *)table + SGLISTINFO_TABLE_SIZE);
1782 cmd_buf = (void *)&send_data_rsp;
1783 cmd_len = sizeof(send_data_rsp);
1784 } else {
1785 send_data_rsp_64bit.listener_id = lstnr;
1786 send_data_rsp_64bit.status = status;
1787 send_data_rsp_64bit.sglistinfo_ptr =
1788 virt_to_phys(table);
1789 send_data_rsp_64bit.sglistinfo_len =
1790 SGLISTINFO_TABLE_SIZE;
1791 dmac_flush_range((void *)table,
1792 (void *)table + SGLISTINFO_TABLE_SIZE);
1793 cmd_buf = (void *)&send_data_rsp_64bit;
1794 cmd_len = sizeof(send_data_rsp_64bit);
1795 }
1796 if (qseecom.whitelist_support == false)
1797 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1798 else
1799 *(uint32_t *)cmd_buf =
1800 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1801 if (ptr_svc) {
1802 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1803 ptr_svc->ihandle,
1804 ptr_svc->sb_virt, ptr_svc->sb_length,
1805 ION_IOC_CLEAN_INV_CACHES);
1806 if (ret) {
1807 pr_err("cache operation failed %d\n", ret);
1808 return ret;
1809 }
1810 }
1811
1812 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1813 ret = __qseecom_enable_clk(CLK_QSEE);
1814 if (ret)
1815 return ret;
1816 }
1817
1818 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1819 cmd_buf, cmd_len, resp, sizeof(*resp));
1820 ptr_svc->listener_in_use = false;
1821 __qseecom_clean_listener_sglistinfo(ptr_svc);
1822 if (ret) {
1823 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1824 ret, data->client.app_id);
1825 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1826 __qseecom_disable_clk(CLK_QSEE);
1827 return ret;
1828 }
1829 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1830 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1831 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1832 resp->result, data->client.app_id, lstnr);
1833 ret = -EINVAL;
1834 }
1835 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1836 __qseecom_disable_clk(CLK_QSEE);
1837
1838 }
1839 if (rc)
1840 return rc;
1841
1842 return ret;
1843}
1844
1845int __qseecom_process_reentrancy_blocked_on_listener(
1846 struct qseecom_command_scm_resp *resp,
1847 struct qseecom_registered_app_list *ptr_app,
1848 struct qseecom_dev_handle *data)
1849{
1850 struct qseecom_registered_listener_list *list_ptr;
1851 int ret = 0;
1852 struct qseecom_continue_blocked_request_ireq ireq;
1853 struct qseecom_command_scm_resp continue_resp;
1854 sigset_t new_sigset, old_sigset;
1855 unsigned long flags;
1856 bool found_app = false;
1857
1858 if (!resp || !data) {
1859 pr_err("invalid resp or data pointer\n");
1860 ret = -EINVAL;
1861 goto exit;
1862 }
1863
1864 /* find app_id & img_name from list */
1865 if (!ptr_app) {
1866 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1867 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1868 list) {
1869 if ((ptr_app->app_id == data->client.app_id) &&
1870 (!strcmp(ptr_app->app_name,
1871 data->client.app_name))) {
1872 found_app = true;
1873 break;
1874 }
1875 }
1876 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1877 flags);
1878 if (!found_app) {
1879 pr_err("app_id %d (%s) is not found\n",
1880 data->client.app_id,
1881 (char *)data->client.app_name);
1882 ret = -ENOENT;
1883 goto exit;
1884 }
1885 }
1886
1887 list_ptr = __qseecom_find_svc(resp->data);
1888 if (!list_ptr) {
1889 pr_err("Invalid listener ID\n");
1890 ret = -ENODATA;
1891 goto exit;
1892 }
1893 pr_debug("lsntr %d in_use = %d\n",
1894 resp->data, list_ptr->listener_in_use);
1895 ptr_app->blocked_on_listener_id = resp->data;
1896 /* sleep until listener is available */
1897 do {
1898 qseecom.app_block_ref_cnt++;
1899 ptr_app->app_blocked = true;
1900 sigfillset(&new_sigset);
1901 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1902 mutex_unlock(&app_access_lock);
1903 do {
1904 if (!wait_event_freezable(
1905 list_ptr->listener_block_app_wq,
1906 !list_ptr->listener_in_use)) {
1907 break;
1908 }
1909 } while (1);
1910 mutex_lock(&app_access_lock);
1911 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1912 ptr_app->app_blocked = false;
1913 qseecom.app_block_ref_cnt--;
1914 } while (list_ptr->listener_in_use == true);
1915 ptr_app->blocked_on_listener_id = 0;
1916 /* notify the blocked app that listener is available */
1917 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1918 resp->data, data->client.app_id,
1919 data->client.app_name);
1920 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1921 ireq.app_id = data->client.app_id;
1922 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1923 &ireq, sizeof(ireq),
1924 &continue_resp, sizeof(continue_resp));
1925 if (ret) {
1926 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1927 data->client.app_id,
1928 data->client.app_name, ret);
1929 goto exit;
1930 }
1931 /*
1932 * After TZ app is unblocked, then continue to next case
1933 * for incomplete request processing
1934 */
1935 resp->result = QSEOS_RESULT_INCOMPLETE;
1936exit:
1937 return ret;
1938}
1939
1940static int __qseecom_reentrancy_process_incomplete_cmd(
1941 struct qseecom_dev_handle *data,
1942 struct qseecom_command_scm_resp *resp)
1943{
1944 int ret = 0;
1945 int rc = 0;
1946 uint32_t lstnr;
1947 unsigned long flags;
1948 struct qseecom_client_listener_data_irsp send_data_rsp;
1949 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1950 struct qseecom_registered_listener_list *ptr_svc = NULL;
1951 sigset_t new_sigset;
1952 sigset_t old_sigset;
1953 uint32_t status;
1954 void *cmd_buf = NULL;
1955 size_t cmd_len;
1956 struct sglist_info *table = NULL;
1957
1958 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
1959 lstnr = resp->data;
1960 /*
1961 * Wake up blocking lsitener service with the lstnr id
1962 */
1963 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1964 flags);
1965 list_for_each_entry(ptr_svc,
1966 &qseecom.registered_listener_list_head, list) {
1967 if (ptr_svc->svc.listener_id == lstnr) {
1968 ptr_svc->listener_in_use = true;
1969 ptr_svc->rcv_req_flag = 1;
1970 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1971 break;
1972 }
1973 }
1974 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1975 flags);
1976
1977 if (ptr_svc == NULL) {
1978 pr_err("Listener Svc %d does not exist\n", lstnr);
1979 return -EINVAL;
1980 }
1981
1982 if (!ptr_svc->ihandle) {
1983 pr_err("Client handle is not initialized\n");
1984 return -EINVAL;
1985 }
1986
1987 if (ptr_svc->svc.listener_id != lstnr) {
1988 pr_warn("Service requested does not exist\n");
1989 return -ERESTARTSYS;
1990 }
1991 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1992
1993 /* initialize the new signal mask with all signals*/
1994 sigfillset(&new_sigset);
1995
1996 /* block all signals */
1997 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1998
1999 /* unlock mutex btw waking listener and sleep-wait */
2000 mutex_unlock(&app_access_lock);
2001 do {
2002 if (!wait_event_freezable(qseecom.send_resp_wq,
2003 __qseecom_reentrancy_listener_has_sent_rsp(
2004 data, ptr_svc))) {
2005 break;
2006 }
2007 } while (1);
2008 /* lock mutex again after resp sent */
2009 mutex_lock(&app_access_lock);
2010 ptr_svc->send_resp_flag = 0;
2011 qseecom.send_resp_flag = 0;
2012
2013 /* restore signal mask */
2014 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2015 if (data->abort) {
2016 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2017 data->client.app_id, lstnr, ret);
2018 rc = -ENODEV;
2019 status = QSEOS_RESULT_FAILURE;
2020 } else {
2021 status = QSEOS_RESULT_SUCCESS;
2022 }
2023 table = ptr_svc->sglistinfo_ptr;
2024 if (qseecom.qsee_version < QSEE_VERSION_40) {
2025 send_data_rsp.listener_id = lstnr;
2026 send_data_rsp.status = status;
2027 send_data_rsp.sglistinfo_ptr =
2028 (uint32_t)virt_to_phys(table);
2029 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2030 dmac_flush_range((void *)table,
2031 (void *)table + SGLISTINFO_TABLE_SIZE);
2032 cmd_buf = (void *)&send_data_rsp;
2033 cmd_len = sizeof(send_data_rsp);
2034 } else {
2035 send_data_rsp_64bit.listener_id = lstnr;
2036 send_data_rsp_64bit.status = status;
2037 send_data_rsp_64bit.sglistinfo_ptr =
2038 virt_to_phys(table);
2039 send_data_rsp_64bit.sglistinfo_len =
2040 SGLISTINFO_TABLE_SIZE;
2041 dmac_flush_range((void *)table,
2042 (void *)table + SGLISTINFO_TABLE_SIZE);
2043 cmd_buf = (void *)&send_data_rsp_64bit;
2044 cmd_len = sizeof(send_data_rsp_64bit);
2045 }
2046 if (qseecom.whitelist_support == false)
2047 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2048 else
2049 *(uint32_t *)cmd_buf =
2050 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2051 if (ptr_svc) {
2052 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2053 ptr_svc->ihandle,
2054 ptr_svc->sb_virt, ptr_svc->sb_length,
2055 ION_IOC_CLEAN_INV_CACHES);
2056 if (ret) {
2057 pr_err("cache operation failed %d\n", ret);
2058 return ret;
2059 }
2060 }
2061 if (lstnr == RPMB_SERVICE) {
2062 ret = __qseecom_enable_clk(CLK_QSEE);
2063 if (ret)
2064 return ret;
2065 }
2066
2067 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2068 cmd_buf, cmd_len, resp, sizeof(*resp));
2069 ptr_svc->listener_in_use = false;
2070 __qseecom_clean_listener_sglistinfo(ptr_svc);
2071 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2072
2073 if (ret) {
2074 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2075 ret, data->client.app_id);
2076 goto exit;
2077 }
2078
2079 switch (resp->result) {
2080 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2081 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2082 lstnr, data->client.app_id, resp->data);
2083 if (lstnr == resp->data) {
2084 pr_err("lstnr %d should not be blocked!\n",
2085 lstnr);
2086 ret = -EINVAL;
2087 goto exit;
2088 }
2089 ret = __qseecom_process_reentrancy_blocked_on_listener(
2090 resp, NULL, data);
2091 if (ret) {
2092 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2093 data->client.app_id,
2094 data->client.app_name, resp->data);
2095 goto exit;
2096 }
2097 case QSEOS_RESULT_SUCCESS:
2098 case QSEOS_RESULT_INCOMPLETE:
2099 break;
2100 default:
2101 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2102 resp->result, data->client.app_id, lstnr);
2103 ret = -EINVAL;
2104 goto exit;
2105 }
2106exit:
2107 if (lstnr == RPMB_SERVICE)
2108 __qseecom_disable_clk(CLK_QSEE);
2109
2110 }
2111 if (rc)
2112 return rc;
2113
2114 return ret;
2115}
2116
2117/*
2118 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2119 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2120 * So, needs to first check if no app blocked before sending OS level scm call,
2121 * then wait until all apps are unblocked.
2122 */
2123static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2124{
2125 sigset_t new_sigset, old_sigset;
2126
2127 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2128 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2129 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2130 /* thread sleep until this app unblocked */
2131 while (qseecom.app_block_ref_cnt > 0) {
2132 sigfillset(&new_sigset);
2133 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2134 mutex_unlock(&app_access_lock);
2135 do {
2136 if (!wait_event_freezable(qseecom.app_block_wq,
2137 (qseecom.app_block_ref_cnt == 0)))
2138 break;
2139 } while (1);
2140 mutex_lock(&app_access_lock);
2141 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2142 }
2143 }
2144}
2145
2146/*
2147 * scm_call of send data will fail if this TA is blocked or there are more
2148 * than one TA requesting listener services; So, first check to see if need
2149 * to wait.
2150 */
2151static void __qseecom_reentrancy_check_if_this_app_blocked(
2152 struct qseecom_registered_app_list *ptr_app)
2153{
2154 sigset_t new_sigset, old_sigset;
2155
2156 if (qseecom.qsee_reentrancy_support) {
2157 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2158 /* thread sleep until this app unblocked */
2159 sigfillset(&new_sigset);
2160 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2161 mutex_unlock(&app_access_lock);
2162 do {
2163 if (!wait_event_freezable(qseecom.app_block_wq,
2164 (!ptr_app->app_blocked &&
2165 qseecom.app_block_ref_cnt <= 1)))
2166 break;
2167 } while (1);
2168 mutex_lock(&app_access_lock);
2169 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2170 }
2171 }
2172}
2173
2174static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2175 uint32_t *app_id)
2176{
2177 int32_t ret;
2178 struct qseecom_command_scm_resp resp;
2179 bool found_app = false;
2180 struct qseecom_registered_app_list *entry = NULL;
2181 unsigned long flags = 0;
2182
2183 if (!app_id) {
2184 pr_err("Null pointer to app_id\n");
2185 return -EINVAL;
2186 }
2187 *app_id = 0;
2188
2189 /* check if app exists and has been registered locally */
2190 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2191 list_for_each_entry(entry,
2192 &qseecom.registered_app_list_head, list) {
2193 if (!strcmp(entry->app_name, req.app_name)) {
2194 found_app = true;
2195 break;
2196 }
2197 }
2198 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2199 if (found_app) {
2200 pr_debug("Found app with id %d\n", entry->app_id);
2201 *app_id = entry->app_id;
2202 return 0;
2203 }
2204
2205 memset((void *)&resp, 0, sizeof(resp));
2206
2207 /* SCM_CALL to check if app_id for the mentioned app exists */
2208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2209 sizeof(struct qseecom_check_app_ireq),
2210 &resp, sizeof(resp));
2211 if (ret) {
2212 pr_err("scm_call to check if app is already loaded failed\n");
2213 return -EINVAL;
2214 }
2215
2216 if (resp.result == QSEOS_RESULT_FAILURE)
2217 return 0;
2218
2219 switch (resp.resp_type) {
2220 /*qsee returned listener type response */
2221 case QSEOS_LISTENER_ID:
2222 pr_err("resp type is of listener type instead of app");
2223 return -EINVAL;
2224 case QSEOS_APP_ID:
2225 *app_id = resp.data;
2226 return 0;
2227 default:
2228 pr_err("invalid resp type (%d) from qsee",
2229 resp.resp_type);
2230 return -ENODEV;
2231 }
2232}
2233
2234static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2235{
2236 struct qseecom_registered_app_list *entry = NULL;
2237 unsigned long flags = 0;
2238 u32 app_id = 0;
2239 struct ion_handle *ihandle; /* Ion handle */
2240 struct qseecom_load_img_req load_img_req;
2241 int32_t ret = 0;
2242 ion_phys_addr_t pa = 0;
2243 size_t len;
2244 struct qseecom_command_scm_resp resp;
2245 struct qseecom_check_app_ireq req;
2246 struct qseecom_load_app_ireq load_req;
2247 struct qseecom_load_app_64bit_ireq load_req_64bit;
2248 void *cmd_buf = NULL;
2249 size_t cmd_len;
2250 bool first_time = false;
2251
2252 /* Copy the relevant information needed for loading the image */
2253 if (copy_from_user(&load_img_req,
2254 (void __user *)argp,
2255 sizeof(struct qseecom_load_img_req))) {
2256 pr_err("copy_from_user failed\n");
2257 return -EFAULT;
2258 }
2259
2260 /* Check and load cmnlib */
2261 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2262 if (!qseecom.commonlib_loaded &&
2263 load_img_req.app_arch == ELFCLASS32) {
2264 ret = qseecom_load_commonlib_image(data, "cmnlib");
2265 if (ret) {
2266 pr_err("failed to load cmnlib\n");
2267 return -EIO;
2268 }
2269 qseecom.commonlib_loaded = true;
2270 pr_debug("cmnlib is loaded\n");
2271 }
2272
2273 if (!qseecom.commonlib64_loaded &&
2274 load_img_req.app_arch == ELFCLASS64) {
2275 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2276 if (ret) {
2277 pr_err("failed to load cmnlib64\n");
2278 return -EIO;
2279 }
2280 qseecom.commonlib64_loaded = true;
2281 pr_debug("cmnlib64 is loaded\n");
2282 }
2283 }
2284
2285 if (qseecom.support_bus_scaling) {
2286 mutex_lock(&qsee_bw_mutex);
2287 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2288 mutex_unlock(&qsee_bw_mutex);
2289 if (ret)
2290 return ret;
2291 }
2292
2293 /* Vote for the SFPB clock */
2294 ret = __qseecom_enable_clk_scale_up(data);
2295 if (ret)
2296 goto enable_clk_err;
2297
2298 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2299 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2300 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2301
2302 ret = __qseecom_check_app_exists(req, &app_id);
2303 if (ret < 0)
2304 goto loadapp_err;
2305
2306 if (app_id) {
2307 pr_debug("App id %d (%s) already exists\n", app_id,
2308 (char *)(req.app_name));
2309 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2310 list_for_each_entry(entry,
2311 &qseecom.registered_app_list_head, list){
2312 if (entry->app_id == app_id) {
2313 entry->ref_cnt++;
2314 break;
2315 }
2316 }
2317 spin_unlock_irqrestore(
2318 &qseecom.registered_app_list_lock, flags);
2319 ret = 0;
2320 } else {
2321 first_time = true;
2322 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2323 (char *)(load_img_req.img_name));
2324 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002325 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002326 load_img_req.ifd_data_fd);
2327 if (IS_ERR_OR_NULL(ihandle)) {
2328 pr_err("Ion client could not retrieve the handle\n");
2329 ret = -ENOMEM;
2330 goto loadapp_err;
2331 }
2332
2333 /* Get the physical address of the ION BUF */
2334 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2335 if (ret) {
2336 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2337 ret);
2338 goto loadapp_err;
2339 }
2340 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2341 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2342 len, load_img_req.mdt_len,
2343 load_img_req.img_len);
2344 ret = -EINVAL;
2345 goto loadapp_err;
2346 }
2347 /* Populate the structure for sending scm call to load image */
2348 if (qseecom.qsee_version < QSEE_VERSION_40) {
2349 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2350 load_req.mdt_len = load_img_req.mdt_len;
2351 load_req.img_len = load_img_req.img_len;
2352 strlcpy(load_req.app_name, load_img_req.img_name,
2353 MAX_APP_NAME_SIZE);
2354 load_req.phy_addr = (uint32_t)pa;
2355 cmd_buf = (void *)&load_req;
2356 cmd_len = sizeof(struct qseecom_load_app_ireq);
2357 } else {
2358 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2359 load_req_64bit.mdt_len = load_img_req.mdt_len;
2360 load_req_64bit.img_len = load_img_req.img_len;
2361 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2362 MAX_APP_NAME_SIZE);
2363 load_req_64bit.phy_addr = (uint64_t)pa;
2364 cmd_buf = (void *)&load_req_64bit;
2365 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2366 }
2367
2368 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2369 ION_IOC_CLEAN_INV_CACHES);
2370 if (ret) {
2371 pr_err("cache operation failed %d\n", ret);
2372 goto loadapp_err;
2373 }
2374
2375 /* SCM_CALL to load the app and get the app_id back */
2376 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2377 cmd_len, &resp, sizeof(resp));
2378 if (ret) {
2379 pr_err("scm_call to load app failed\n");
2380 if (!IS_ERR_OR_NULL(ihandle))
2381 ion_free(qseecom.ion_clnt, ihandle);
2382 ret = -EINVAL;
2383 goto loadapp_err;
2384 }
2385
2386 if (resp.result == QSEOS_RESULT_FAILURE) {
2387 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2388 if (!IS_ERR_OR_NULL(ihandle))
2389 ion_free(qseecom.ion_clnt, ihandle);
2390 ret = -EFAULT;
2391 goto loadapp_err;
2392 }
2393
2394 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2395 ret = __qseecom_process_incomplete_cmd(data, &resp);
2396 if (ret) {
2397 pr_err("process_incomplete_cmd failed err: %d\n",
2398 ret);
2399 if (!IS_ERR_OR_NULL(ihandle))
2400 ion_free(qseecom.ion_clnt, ihandle);
2401 ret = -EFAULT;
2402 goto loadapp_err;
2403 }
2404 }
2405
2406 if (resp.result != QSEOS_RESULT_SUCCESS) {
2407 pr_err("scm_call failed resp.result unknown, %d\n",
2408 resp.result);
2409 if (!IS_ERR_OR_NULL(ihandle))
2410 ion_free(qseecom.ion_clnt, ihandle);
2411 ret = -EFAULT;
2412 goto loadapp_err;
2413 }
2414
2415 app_id = resp.data;
2416
2417 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2418 if (!entry) {
2419 ret = -ENOMEM;
2420 goto loadapp_err;
2421 }
2422 entry->app_id = app_id;
2423 entry->ref_cnt = 1;
2424 entry->app_arch = load_img_req.app_arch;
2425 /*
2426 * keymaster app may be first loaded as "keymaste" by qseecomd,
2427 * and then used as "keymaster" on some targets. To avoid app
2428 * name checking error, register "keymaster" into app_list and
2429 * thread private data.
2430 */
2431 if (!strcmp(load_img_req.img_name, "keymaste"))
2432 strlcpy(entry->app_name, "keymaster",
2433 MAX_APP_NAME_SIZE);
2434 else
2435 strlcpy(entry->app_name, load_img_req.img_name,
2436 MAX_APP_NAME_SIZE);
2437 entry->app_blocked = false;
2438 entry->blocked_on_listener_id = 0;
2439
2440 /* Deallocate the handle */
2441 if (!IS_ERR_OR_NULL(ihandle))
2442 ion_free(qseecom.ion_clnt, ihandle);
2443
2444 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2445 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2446 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2447 flags);
2448
2449 pr_warn("App with id %u (%s) now loaded\n", app_id,
2450 (char *)(load_img_req.img_name));
2451 }
2452 data->client.app_id = app_id;
2453 data->client.app_arch = load_img_req.app_arch;
2454 if (!strcmp(load_img_req.img_name, "keymaste"))
2455 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2456 else
2457 strlcpy(data->client.app_name, load_img_req.img_name,
2458 MAX_APP_NAME_SIZE);
2459 load_img_req.app_id = app_id;
2460 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2461 pr_err("copy_to_user failed\n");
2462 ret = -EFAULT;
2463 if (first_time == true) {
2464 spin_lock_irqsave(
2465 &qseecom.registered_app_list_lock, flags);
2466 list_del(&entry->list);
2467 spin_unlock_irqrestore(
2468 &qseecom.registered_app_list_lock, flags);
2469 kzfree(entry);
2470 }
2471 }
2472
2473loadapp_err:
2474 __qseecom_disable_clk_scale_down(data);
2475enable_clk_err:
2476 if (qseecom.support_bus_scaling) {
2477 mutex_lock(&qsee_bw_mutex);
2478 qseecom_unregister_bus_bandwidth_needs(data);
2479 mutex_unlock(&qsee_bw_mutex);
2480 }
2481 return ret;
2482}
2483
2484static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2485{
2486 int ret = 1; /* Set unload app */
2487
2488 wake_up_all(&qseecom.send_resp_wq);
2489 if (qseecom.qsee_reentrancy_support)
2490 mutex_unlock(&app_access_lock);
2491 while (atomic_read(&data->ioctl_count) > 1) {
2492 if (wait_event_freezable(data->abort_wq,
2493 atomic_read(&data->ioctl_count) <= 1)) {
2494 pr_err("Interrupted from abort\n");
2495 ret = -ERESTARTSYS;
2496 break;
2497 }
2498 }
2499 if (qseecom.qsee_reentrancy_support)
2500 mutex_lock(&app_access_lock);
2501 return ret;
2502}
2503
2504static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2505{
2506 int ret = 0;
2507
2508 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2509 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2510 ion_free(qseecom.ion_clnt, data->client.ihandle);
2511 data->client.ihandle = NULL;
2512 }
2513 return ret;
2514}
2515
2516static int qseecom_unload_app(struct qseecom_dev_handle *data,
2517 bool app_crash)
2518{
2519 unsigned long flags;
2520 unsigned long flags1;
2521 int ret = 0;
2522 struct qseecom_command_scm_resp resp;
2523 struct qseecom_registered_app_list *ptr_app = NULL;
2524 bool unload = false;
2525 bool found_app = false;
2526 bool found_dead_app = false;
2527
2528 if (!data) {
2529 pr_err("Invalid/uninitialized device handle\n");
2530 return -EINVAL;
2531 }
2532
2533 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2534 pr_debug("Do not unload keymaster app from tz\n");
2535 goto unload_exit;
2536 }
2537
2538 __qseecom_cleanup_app(data);
2539 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2540
2541 if (data->client.app_id > 0) {
2542 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2543 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2544 list) {
2545 if (ptr_app->app_id == data->client.app_id) {
2546 if (!strcmp((void *)ptr_app->app_name,
2547 (void *)data->client.app_name)) {
2548 found_app = true;
2549 if (app_crash || ptr_app->ref_cnt == 1)
2550 unload = true;
2551 break;
2552 }
2553 found_dead_app = true;
2554 break;
2555 }
2556 }
2557 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2558 flags);
2559 if (found_app == false && found_dead_app == false) {
2560 pr_err("Cannot find app with id = %d (%s)\n",
2561 data->client.app_id,
2562 (char *)data->client.app_name);
2563 ret = -EINVAL;
2564 goto unload_exit;
2565 }
2566 }
2567
2568 if (found_dead_app)
2569 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2570 (char *)data->client.app_name);
2571
2572 if (unload) {
2573 struct qseecom_unload_app_ireq req;
2574 /* Populate the structure for sending scm call to load image */
2575 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2576 req.app_id = data->client.app_id;
2577
2578 /* SCM_CALL to unload the app */
2579 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2580 sizeof(struct qseecom_unload_app_ireq),
2581 &resp, sizeof(resp));
2582 if (ret) {
2583 pr_err("scm_call to unload app (id = %d) failed\n",
2584 req.app_id);
2585 ret = -EFAULT;
2586 goto unload_exit;
2587 } else {
2588 pr_warn("App id %d now unloaded\n", req.app_id);
2589 }
2590 if (resp.result == QSEOS_RESULT_FAILURE) {
2591 pr_err("app (%d) unload_failed!!\n",
2592 data->client.app_id);
2593 ret = -EFAULT;
2594 goto unload_exit;
2595 }
2596 if (resp.result == QSEOS_RESULT_SUCCESS)
2597 pr_debug("App (%d) is unloaded!!\n",
2598 data->client.app_id);
2599 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2600 ret = __qseecom_process_incomplete_cmd(data, &resp);
2601 if (ret) {
2602 pr_err("process_incomplete_cmd fail err: %d\n",
2603 ret);
2604 goto unload_exit;
2605 }
2606 }
2607 }
2608
2609 if (found_app) {
2610 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2611 if (app_crash) {
2612 ptr_app->ref_cnt = 0;
2613 pr_debug("app_crash: ref_count = 0\n");
2614 } else {
2615 if (ptr_app->ref_cnt == 1) {
2616 ptr_app->ref_cnt = 0;
2617 pr_debug("ref_count set to 0\n");
2618 } else {
2619 ptr_app->ref_cnt--;
2620 pr_debug("Can't unload app(%d) inuse\n",
2621 ptr_app->app_id);
2622 }
2623 }
2624 if (unload) {
2625 list_del(&ptr_app->list);
2626 kzfree(ptr_app);
2627 }
2628 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2629 flags1);
2630 }
2631unload_exit:
2632 qseecom_unmap_ion_allocated_memory(data);
2633 data->released = true;
2634 return ret;
2635}
2636
2637static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2638 unsigned long virt)
2639{
2640 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2641}
2642
2643static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2644 unsigned long virt)
2645{
2646 return (uintptr_t)data->client.sb_virt +
2647 (virt - data->client.user_virt_sb_base);
2648}
2649
2650int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2651 struct qseecom_send_svc_cmd_req *req_ptr,
2652 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2653{
2654 int ret = 0;
2655 void *req_buf = NULL;
2656
2657 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2658 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2659 req_ptr, send_svc_ireq_ptr);
2660 return -EINVAL;
2661 }
2662
2663 /* Clients need to ensure req_buf is at base offset of shared buffer */
2664 if ((uintptr_t)req_ptr->cmd_req_buf !=
2665 data_ptr->client.user_virt_sb_base) {
2666 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2667 return -EINVAL;
2668 }
2669
2670 if (data_ptr->client.sb_length <
2671 sizeof(struct qseecom_rpmb_provision_key)) {
2672 pr_err("shared buffer is too small to hold key type\n");
2673 return -EINVAL;
2674 }
2675 req_buf = data_ptr->client.sb_virt;
2676
2677 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2678 send_svc_ireq_ptr->key_type =
2679 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2680 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2681 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2682 data_ptr, (uintptr_t)req_ptr->resp_buf));
2683 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2684
2685 return ret;
2686}
2687
2688int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2689 struct qseecom_send_svc_cmd_req *req_ptr,
2690 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2691{
2692 int ret = 0;
2693 uint32_t reqd_len_sb_in = 0;
2694
2695 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2696 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2697 req_ptr, send_svc_ireq_ptr);
2698 return -EINVAL;
2699 }
2700
2701 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2702 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2703 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2704 pr_err("Required: %u, Available: %zu\n",
2705 reqd_len_sb_in, data_ptr->client.sb_length);
2706 return -ENOMEM;
2707 }
2708
2709 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2710 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2711 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2712 data_ptr, (uintptr_t)req_ptr->resp_buf));
2713 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2714
2715 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2716 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2717
2718
2719 return ret;
2720}
2721
2722static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2723 struct qseecom_send_svc_cmd_req *req)
2724{
2725 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2726 pr_err("req or cmd buffer or response buffer is null\n");
2727 return -EINVAL;
2728 }
2729
2730 if (!data || !data->client.ihandle) {
2731 pr_err("Client or client handle is not initialized\n");
2732 return -EINVAL;
2733 }
2734
2735 if (data->client.sb_virt == NULL) {
2736 pr_err("sb_virt null\n");
2737 return -EINVAL;
2738 }
2739
2740 if (data->client.user_virt_sb_base == 0) {
2741 pr_err("user_virt_sb_base is null\n");
2742 return -EINVAL;
2743 }
2744
2745 if (data->client.sb_length == 0) {
2746 pr_err("sb_length is 0\n");
2747 return -EINVAL;
2748 }
2749
2750 if (((uintptr_t)req->cmd_req_buf <
2751 data->client.user_virt_sb_base) ||
2752 ((uintptr_t)req->cmd_req_buf >=
2753 (data->client.user_virt_sb_base + data->client.sb_length))) {
2754 pr_err("cmd buffer address not within shared bufffer\n");
2755 return -EINVAL;
2756 }
2757 if (((uintptr_t)req->resp_buf <
2758 data->client.user_virt_sb_base) ||
2759 ((uintptr_t)req->resp_buf >=
2760 (data->client.user_virt_sb_base + data->client.sb_length))) {
2761 pr_err("response buffer address not within shared bufffer\n");
2762 return -EINVAL;
2763 }
2764 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2765 (req->cmd_req_len > data->client.sb_length) ||
2766 (req->resp_len > data->client.sb_length)) {
2767 pr_err("cmd buf length or response buf length not valid\n");
2768 return -EINVAL;
2769 }
2770 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2771 pr_err("Integer overflow detected in req_len & rsp_len\n");
2772 return -EINVAL;
2773 }
2774
2775 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2776 pr_debug("Not enough memory to fit cmd_buf.\n");
2777 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2778 (req->cmd_req_len + req->resp_len),
2779 data->client.sb_length);
2780 return -ENOMEM;
2781 }
2782 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2783 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2784 return -EINVAL;
2785 }
2786 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2787 pr_err("Integer overflow in resp_len & resp_buf\n");
2788 return -EINVAL;
2789 }
2790 if (data->client.user_virt_sb_base >
2791 (ULONG_MAX - data->client.sb_length)) {
2792 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2793 return -EINVAL;
2794 }
2795 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2796 ((uintptr_t)data->client.user_virt_sb_base +
2797 data->client.sb_length)) ||
2798 (((uintptr_t)req->resp_buf + req->resp_len) >
2799 ((uintptr_t)data->client.user_virt_sb_base +
2800 data->client.sb_length))) {
2801 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2802 return -EINVAL;
2803 }
2804 return 0;
2805}
2806
2807static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2808 void __user *argp)
2809{
2810 int ret = 0;
2811 struct qseecom_client_send_service_ireq send_svc_ireq;
2812 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2813 struct qseecom_command_scm_resp resp;
2814 struct qseecom_send_svc_cmd_req req;
2815 void *send_req_ptr;
2816 size_t req_buf_size;
2817
2818 /*struct qseecom_command_scm_resp resp;*/
2819
2820 if (copy_from_user(&req,
2821 (void __user *)argp,
2822 sizeof(req))) {
2823 pr_err("copy_from_user failed\n");
2824 return -EFAULT;
2825 }
2826
2827 if (__validate_send_service_cmd_inputs(data, &req))
2828 return -EINVAL;
2829
2830 data->type = QSEECOM_SECURE_SERVICE;
2831
2832 switch (req.cmd_id) {
2833 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2834 case QSEOS_RPMB_ERASE_COMMAND:
2835 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2836 send_req_ptr = &send_svc_ireq;
2837 req_buf_size = sizeof(send_svc_ireq);
2838 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2839 send_req_ptr))
2840 return -EINVAL;
2841 break;
2842 case QSEOS_FSM_LTEOTA_REQ_CMD:
2843 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2844 case QSEOS_FSM_IKE_REQ_CMD:
2845 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2846 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2847 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2848 case QSEOS_FSM_ENCFS_REQ_CMD:
2849 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2850 send_req_ptr = &send_fsm_key_svc_ireq;
2851 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2852 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2853 send_req_ptr))
2854 return -EINVAL;
2855 break;
2856 default:
2857 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2858 return -EINVAL;
2859 }
2860
2861 if (qseecom.support_bus_scaling) {
2862 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2863 if (ret) {
2864 pr_err("Fail to set bw HIGH\n");
2865 return ret;
2866 }
2867 } else {
2868 ret = qseecom_perf_enable(data);
2869 if (ret) {
2870 pr_err("Failed to vote for clocks with err %d\n", ret);
2871 goto exit;
2872 }
2873 }
2874
2875 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2876 data->client.sb_virt, data->client.sb_length,
2877 ION_IOC_CLEAN_INV_CACHES);
2878 if (ret) {
2879 pr_err("cache operation failed %d\n", ret);
2880 goto exit;
2881 }
2882 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2883 (const void *)send_req_ptr,
2884 req_buf_size, &resp, sizeof(resp));
2885 if (ret) {
2886 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2887 if (!qseecom.support_bus_scaling) {
2888 qsee_disable_clock_vote(data, CLK_DFAB);
2889 qsee_disable_clock_vote(data, CLK_SFPB);
2890 } else {
2891 __qseecom_add_bw_scale_down_timer(
2892 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2893 }
2894 goto exit;
2895 }
2896 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2897 data->client.sb_virt, data->client.sb_length,
2898 ION_IOC_INV_CACHES);
2899 if (ret) {
2900 pr_err("cache operation failed %d\n", ret);
2901 goto exit;
2902 }
2903 switch (resp.result) {
2904 case QSEOS_RESULT_SUCCESS:
2905 break;
2906 case QSEOS_RESULT_INCOMPLETE:
2907 pr_debug("qseos_result_incomplete\n");
2908 ret = __qseecom_process_incomplete_cmd(data, &resp);
2909 if (ret) {
2910 pr_err("process_incomplete_cmd fail with result: %d\n",
2911 resp.result);
2912 }
2913 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2914 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302915 if (put_user(resp.result,
2916 (uint32_t __user *)req.resp_buf)) {
2917 ret = -EINVAL;
2918 goto exit;
2919 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002920 ret = 0;
2921 }
2922 break;
2923 case QSEOS_RESULT_FAILURE:
2924 pr_err("scm call failed with resp.result: %d\n", resp.result);
2925 ret = -EINVAL;
2926 break;
2927 default:
2928 pr_err("Response result %d not supported\n",
2929 resp.result);
2930 ret = -EINVAL;
2931 break;
2932 }
2933 if (!qseecom.support_bus_scaling) {
2934 qsee_disable_clock_vote(data, CLK_DFAB);
2935 qsee_disable_clock_vote(data, CLK_SFPB);
2936 } else {
2937 __qseecom_add_bw_scale_down_timer(
2938 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2939 }
2940
2941exit:
2942 return ret;
2943}
2944
2945static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
2946 struct qseecom_send_cmd_req *req)
2947
2948{
2949 if (!data || !data->client.ihandle) {
2950 pr_err("Client or client handle is not initialized\n");
2951 return -EINVAL;
2952 }
2953 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
2954 (req->cmd_req_buf == NULL)) {
2955 pr_err("cmd buffer or response buffer is null\n");
2956 return -EINVAL;
2957 }
2958 if (((uintptr_t)req->cmd_req_buf <
2959 data->client.user_virt_sb_base) ||
2960 ((uintptr_t)req->cmd_req_buf >=
2961 (data->client.user_virt_sb_base + data->client.sb_length))) {
2962 pr_err("cmd buffer address not within shared bufffer\n");
2963 return -EINVAL;
2964 }
2965 if (((uintptr_t)req->resp_buf <
2966 data->client.user_virt_sb_base) ||
2967 ((uintptr_t)req->resp_buf >=
2968 (data->client.user_virt_sb_base + data->client.sb_length))) {
2969 pr_err("response buffer address not within shared bufffer\n");
2970 return -EINVAL;
2971 }
2972 if ((req->cmd_req_len == 0) ||
2973 (req->cmd_req_len > data->client.sb_length) ||
2974 (req->resp_len > data->client.sb_length)) {
2975 pr_err("cmd buf length or response buf length not valid\n");
2976 return -EINVAL;
2977 }
2978 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2979 pr_err("Integer overflow detected in req_len & rsp_len\n");
2980 return -EINVAL;
2981 }
2982
2983 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2984 pr_debug("Not enough memory to fit cmd_buf.\n");
2985 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2986 (req->cmd_req_len + req->resp_len),
2987 data->client.sb_length);
2988 return -ENOMEM;
2989 }
2990 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2991 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2992 return -EINVAL;
2993 }
2994 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2995 pr_err("Integer overflow in resp_len & resp_buf\n");
2996 return -EINVAL;
2997 }
2998 if (data->client.user_virt_sb_base >
2999 (ULONG_MAX - data->client.sb_length)) {
3000 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3001 return -EINVAL;
3002 }
3003 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3004 ((uintptr_t)data->client.user_virt_sb_base +
3005 data->client.sb_length)) ||
3006 (((uintptr_t)req->resp_buf + req->resp_len) >
3007 ((uintptr_t)data->client.user_virt_sb_base +
3008 data->client.sb_length))) {
3009 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3010 return -EINVAL;
3011 }
3012 return 0;
3013}
3014
3015int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3016 struct qseecom_registered_app_list *ptr_app,
3017 struct qseecom_dev_handle *data)
3018{
3019 int ret = 0;
3020
3021 switch (resp->result) {
3022 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3023 pr_warn("App(%d) %s is blocked on listener %d\n",
3024 data->client.app_id, data->client.app_name,
3025 resp->data);
3026 ret = __qseecom_process_reentrancy_blocked_on_listener(
3027 resp, ptr_app, data);
3028 if (ret) {
3029 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3030 data->client.app_id, data->client.app_name, resp->data);
3031 return ret;
3032 }
3033
3034 case QSEOS_RESULT_INCOMPLETE:
3035 qseecom.app_block_ref_cnt++;
3036 ptr_app->app_blocked = true;
3037 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3038 ptr_app->app_blocked = false;
3039 qseecom.app_block_ref_cnt--;
3040 wake_up_interruptible(&qseecom.app_block_wq);
3041 if (ret)
3042 pr_err("process_incomplete_cmd failed err: %d\n",
3043 ret);
3044 return ret;
3045 case QSEOS_RESULT_SUCCESS:
3046 return ret;
3047 default:
3048 pr_err("Response result %d not supported\n",
3049 resp->result);
3050 return -EINVAL;
3051 }
3052}
3053
3054static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3055 struct qseecom_send_cmd_req *req)
3056{
3057 int ret = 0;
3058 u32 reqd_len_sb_in = 0;
3059 struct qseecom_client_send_data_ireq send_data_req = {0};
3060 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3061 struct qseecom_command_scm_resp resp;
3062 unsigned long flags;
3063 struct qseecom_registered_app_list *ptr_app;
3064 bool found_app = false;
3065 void *cmd_buf = NULL;
3066 size_t cmd_len;
3067 struct sglist_info *table = data->sglistinfo_ptr;
3068
3069 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3070 /* find app_id & img_name from list */
3071 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3072 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3073 list) {
3074 if ((ptr_app->app_id == data->client.app_id) &&
3075 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3076 found_app = true;
3077 break;
3078 }
3079 }
3080 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3081
3082 if (!found_app) {
3083 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3084 (char *)data->client.app_name);
3085 return -ENOENT;
3086 }
3087
3088 if (qseecom.qsee_version < QSEE_VERSION_40) {
3089 send_data_req.app_id = data->client.app_id;
3090 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3091 data, (uintptr_t)req->cmd_req_buf));
3092 send_data_req.req_len = req->cmd_req_len;
3093 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3094 data, (uintptr_t)req->resp_buf));
3095 send_data_req.rsp_len = req->resp_len;
3096 send_data_req.sglistinfo_ptr =
3097 (uint32_t)virt_to_phys(table);
3098 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3099 dmac_flush_range((void *)table,
3100 (void *)table + SGLISTINFO_TABLE_SIZE);
3101 cmd_buf = (void *)&send_data_req;
3102 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3103 } else {
3104 send_data_req_64bit.app_id = data->client.app_id;
3105 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3106 (uintptr_t)req->cmd_req_buf);
3107 send_data_req_64bit.req_len = req->cmd_req_len;
3108 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3109 (uintptr_t)req->resp_buf);
3110 send_data_req_64bit.rsp_len = req->resp_len;
3111 /* check if 32bit app's phys_addr region is under 4GB.*/
3112 if ((data->client.app_arch == ELFCLASS32) &&
3113 ((send_data_req_64bit.req_ptr >=
3114 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3115 (send_data_req_64bit.rsp_ptr >=
3116 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3117 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3118 data->client.app_name,
3119 send_data_req_64bit.req_ptr,
3120 send_data_req_64bit.req_len,
3121 send_data_req_64bit.rsp_ptr,
3122 send_data_req_64bit.rsp_len);
3123 return -EFAULT;
3124 }
3125 send_data_req_64bit.sglistinfo_ptr =
3126 (uint64_t)virt_to_phys(table);
3127 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3128 dmac_flush_range((void *)table,
3129 (void *)table + SGLISTINFO_TABLE_SIZE);
3130 cmd_buf = (void *)&send_data_req_64bit;
3131 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3132 }
3133
3134 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3135 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3136 else
3137 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3138
3139 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3140 data->client.sb_virt,
3141 reqd_len_sb_in,
3142 ION_IOC_CLEAN_INV_CACHES);
3143 if (ret) {
3144 pr_err("cache operation failed %d\n", ret);
3145 return ret;
3146 }
3147
3148 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3149
3150 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3151 cmd_buf, cmd_len,
3152 &resp, sizeof(resp));
3153 if (ret) {
3154 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3155 ret, data->client.app_id);
3156 return ret;
3157 }
3158
3159 if (qseecom.qsee_reentrancy_support) {
3160 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
3161 } else {
3162 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3163 ret = __qseecom_process_incomplete_cmd(data, &resp);
3164 if (ret) {
3165 pr_err("process_incomplete_cmd failed err: %d\n",
3166 ret);
3167 return ret;
3168 }
3169 } else {
3170 if (resp.result != QSEOS_RESULT_SUCCESS) {
3171 pr_err("Response result %d not supported\n",
3172 resp.result);
3173 ret = -EINVAL;
3174 }
3175 }
3176 }
3177 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3178 data->client.sb_virt, data->client.sb_length,
3179 ION_IOC_INV_CACHES);
3180 if (ret)
3181 pr_err("cache operation failed %d\n", ret);
3182 return ret;
3183}
3184
3185static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3186{
3187 int ret = 0;
3188 struct qseecom_send_cmd_req req;
3189
3190 ret = copy_from_user(&req, argp, sizeof(req));
3191 if (ret) {
3192 pr_err("copy_from_user failed\n");
3193 return ret;
3194 }
3195
3196 if (__validate_send_cmd_inputs(data, &req))
3197 return -EINVAL;
3198
3199 ret = __qseecom_send_cmd(data, &req);
3200
3201 if (ret)
3202 return ret;
3203
3204 return ret;
3205}
3206
3207int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3208 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3209 struct qseecom_dev_handle *data, int i) {
3210
3211 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3212 (req->ifd_data[i].fd > 0)) {
3213 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3214 (req->ifd_data[i].cmd_buf_offset >
3215 req->cmd_req_len - sizeof(uint32_t))) {
3216 pr_err("Invalid offset (req len) 0x%x\n",
3217 req->ifd_data[i].cmd_buf_offset);
3218 return -EINVAL;
3219 }
3220 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3221 (lstnr_resp->ifd_data[i].fd > 0)) {
3222 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3223 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3224 lstnr_resp->resp_len - sizeof(uint32_t))) {
3225 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3226 lstnr_resp->ifd_data[i].cmd_buf_offset);
3227 return -EINVAL;
3228 }
3229 }
3230 return 0;
3231}
3232
3233static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3234 struct qseecom_dev_handle *data)
3235{
3236 struct ion_handle *ihandle;
3237 char *field;
3238 int ret = 0;
3239 int i = 0;
3240 uint32_t len = 0;
3241 struct scatterlist *sg;
3242 struct qseecom_send_modfd_cmd_req *req = NULL;
3243 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3244 struct qseecom_registered_listener_list *this_lstnr = NULL;
3245 uint32_t offset;
3246 struct sg_table *sg_ptr;
3247
3248 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3249 (data->type != QSEECOM_CLIENT_APP))
3250 return -EFAULT;
3251
3252 if (msg == NULL) {
3253 pr_err("Invalid address\n");
3254 return -EINVAL;
3255 }
3256 if (data->type == QSEECOM_LISTENER_SERVICE) {
3257 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3258 this_lstnr = __qseecom_find_svc(data->listener.id);
3259 if (IS_ERR_OR_NULL(this_lstnr)) {
3260 pr_err("Invalid listener ID\n");
3261 return -ENOMEM;
3262 }
3263 } else {
3264 req = (struct qseecom_send_modfd_cmd_req *)msg;
3265 }
3266
3267 for (i = 0; i < MAX_ION_FD; i++) {
3268 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3269 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003270 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003271 req->ifd_data[i].fd);
3272 if (IS_ERR_OR_NULL(ihandle)) {
3273 pr_err("Ion client can't retrieve the handle\n");
3274 return -ENOMEM;
3275 }
3276 field = (char *) req->cmd_req_buf +
3277 req->ifd_data[i].cmd_buf_offset;
3278 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3279 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003280 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003281 lstnr_resp->ifd_data[i].fd);
3282 if (IS_ERR_OR_NULL(ihandle)) {
3283 pr_err("Ion client can't retrieve the handle\n");
3284 return -ENOMEM;
3285 }
3286 field = lstnr_resp->resp_buf_ptr +
3287 lstnr_resp->ifd_data[i].cmd_buf_offset;
3288 } else {
3289 continue;
3290 }
3291 /* Populate the cmd data structure with the phys_addr */
3292 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3293 if (IS_ERR_OR_NULL(sg_ptr)) {
3294 pr_err("IOn client could not retrieve sg table\n");
3295 goto err;
3296 }
3297 if (sg_ptr->nents == 0) {
3298 pr_err("Num of scattered entries is 0\n");
3299 goto err;
3300 }
3301 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3302 pr_err("Num of scattered entries");
3303 pr_err(" (%d) is greater than max supported %d\n",
3304 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3305 goto err;
3306 }
3307 sg = sg_ptr->sgl;
3308 if (sg_ptr->nents == 1) {
3309 uint32_t *update;
3310
3311 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3312 goto err;
3313 if ((data->type == QSEECOM_CLIENT_APP &&
3314 (data->client.app_arch == ELFCLASS32 ||
3315 data->client.app_arch == ELFCLASS64)) ||
3316 (data->type == QSEECOM_LISTENER_SERVICE)) {
3317 /*
3318 * Check if sg list phy add region is under 4GB
3319 */
3320 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3321 (!cleanup) &&
3322 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3323 >= PHY_ADDR_4G - sg->length)) {
3324 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3325 data->client.app_name,
3326 &(sg_dma_address(sg_ptr->sgl)),
3327 sg->length);
3328 goto err;
3329 }
3330 update = (uint32_t *) field;
3331 *update = cleanup ? 0 :
3332 (uint32_t)sg_dma_address(sg_ptr->sgl);
3333 } else {
3334 pr_err("QSEE app arch %u is not supported\n",
3335 data->client.app_arch);
3336 goto err;
3337 }
3338 len += (uint32_t)sg->length;
3339 } else {
3340 struct qseecom_sg_entry *update;
3341 int j = 0;
3342
3343 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3344 (req->ifd_data[i].fd > 0)) {
3345
3346 if ((req->cmd_req_len <
3347 SG_ENTRY_SZ * sg_ptr->nents) ||
3348 (req->ifd_data[i].cmd_buf_offset >
3349 (req->cmd_req_len -
3350 SG_ENTRY_SZ * sg_ptr->nents))) {
3351 pr_err("Invalid offset = 0x%x\n",
3352 req->ifd_data[i].cmd_buf_offset);
3353 goto err;
3354 }
3355
3356 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3357 (lstnr_resp->ifd_data[i].fd > 0)) {
3358
3359 if ((lstnr_resp->resp_len <
3360 SG_ENTRY_SZ * sg_ptr->nents) ||
3361 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3362 (lstnr_resp->resp_len -
3363 SG_ENTRY_SZ * sg_ptr->nents))) {
3364 goto err;
3365 }
3366 }
3367 if ((data->type == QSEECOM_CLIENT_APP &&
3368 (data->client.app_arch == ELFCLASS32 ||
3369 data->client.app_arch == ELFCLASS64)) ||
3370 (data->type == QSEECOM_LISTENER_SERVICE)) {
3371 update = (struct qseecom_sg_entry *)field;
3372 for (j = 0; j < sg_ptr->nents; j++) {
3373 /*
3374 * Check if sg list PA is under 4GB
3375 */
3376 if ((qseecom.qsee_version >=
3377 QSEE_VERSION_40) &&
3378 (!cleanup) &&
3379 ((uint64_t)(sg_dma_address(sg))
3380 >= PHY_ADDR_4G - sg->length)) {
3381 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3382 data->client.app_name,
3383 &(sg_dma_address(sg)),
3384 sg->length);
3385 goto err;
3386 }
3387 update->phys_addr = cleanup ? 0 :
3388 (uint32_t)sg_dma_address(sg);
3389 update->len = cleanup ? 0 : sg->length;
3390 update++;
3391 len += sg->length;
3392 sg = sg_next(sg);
3393 }
3394 } else {
3395 pr_err("QSEE app arch %u is not supported\n",
3396 data->client.app_arch);
3397 goto err;
3398 }
3399 }
3400
3401 if (cleanup) {
3402 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3403 ihandle, NULL, len,
3404 ION_IOC_INV_CACHES);
3405 if (ret) {
3406 pr_err("cache operation failed %d\n", ret);
3407 goto err;
3408 }
3409 } else {
3410 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3411 ihandle, NULL, len,
3412 ION_IOC_CLEAN_INV_CACHES);
3413 if (ret) {
3414 pr_err("cache operation failed %d\n", ret);
3415 goto err;
3416 }
3417 if (data->type == QSEECOM_CLIENT_APP) {
3418 offset = req->ifd_data[i].cmd_buf_offset;
3419 data->sglistinfo_ptr[i].indexAndFlags =
3420 SGLISTINFO_SET_INDEX_FLAG(
3421 (sg_ptr->nents == 1), 0, offset);
3422 data->sglistinfo_ptr[i].sizeOrCount =
3423 (sg_ptr->nents == 1) ?
3424 sg->length : sg_ptr->nents;
3425 data->sglist_cnt = i + 1;
3426 } else {
3427 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3428 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3429 (uintptr_t)this_lstnr->sb_virt);
3430 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3431 SGLISTINFO_SET_INDEX_FLAG(
3432 (sg_ptr->nents == 1), 0, offset);
3433 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3434 (sg_ptr->nents == 1) ?
3435 sg->length : sg_ptr->nents;
3436 this_lstnr->sglist_cnt = i + 1;
3437 }
3438 }
3439 /* Deallocate the handle */
3440 if (!IS_ERR_OR_NULL(ihandle))
3441 ion_free(qseecom.ion_clnt, ihandle);
3442 }
3443 return ret;
3444err:
3445 if (!IS_ERR_OR_NULL(ihandle))
3446 ion_free(qseecom.ion_clnt, ihandle);
3447 return -ENOMEM;
3448}
3449
3450static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3451 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3452{
3453 struct scatterlist *sg = sg_ptr->sgl;
3454 struct qseecom_sg_entry_64bit *sg_entry;
3455 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3456 void *buf;
3457 uint i;
3458 size_t size;
3459 dma_addr_t coh_pmem;
3460
3461 if (fd_idx >= MAX_ION_FD) {
3462 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3463 return -ENOMEM;
3464 }
3465 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3466 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3467 /* Allocate a contiguous kernel buffer */
3468 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3469 size = (size + PAGE_SIZE) & PAGE_MASK;
3470 buf = dma_alloc_coherent(qseecom.pdev,
3471 size, &coh_pmem, GFP_KERNEL);
3472 if (buf == NULL) {
3473 pr_err("failed to alloc memory for sg buf\n");
3474 return -ENOMEM;
3475 }
3476 /* update qseecom_sg_list_buf_hdr_64bit */
3477 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3478 buf_hdr->new_buf_phys_addr = coh_pmem;
3479 buf_hdr->nents_total = sg_ptr->nents;
3480 /* save the left sg entries into new allocated buf */
3481 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3482 for (i = 0; i < sg_ptr->nents; i++) {
3483 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3484 sg_entry->len = sg->length;
3485 sg_entry++;
3486 sg = sg_next(sg);
3487 }
3488
3489 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3490 data->client.sec_buf_fd[fd_idx].vbase = buf;
3491 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3492 data->client.sec_buf_fd[fd_idx].size = size;
3493
3494 return 0;
3495}
3496
3497static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3498 struct qseecom_dev_handle *data)
3499{
3500 struct ion_handle *ihandle;
3501 char *field;
3502 int ret = 0;
3503 int i = 0;
3504 uint32_t len = 0;
3505 struct scatterlist *sg;
3506 struct qseecom_send_modfd_cmd_req *req = NULL;
3507 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3508 struct qseecom_registered_listener_list *this_lstnr = NULL;
3509 uint32_t offset;
3510 struct sg_table *sg_ptr;
3511
3512 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3513 (data->type != QSEECOM_CLIENT_APP))
3514 return -EFAULT;
3515
3516 if (msg == NULL) {
3517 pr_err("Invalid address\n");
3518 return -EINVAL;
3519 }
3520 if (data->type == QSEECOM_LISTENER_SERVICE) {
3521 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3522 this_lstnr = __qseecom_find_svc(data->listener.id);
3523 if (IS_ERR_OR_NULL(this_lstnr)) {
3524 pr_err("Invalid listener ID\n");
3525 return -ENOMEM;
3526 }
3527 } else {
3528 req = (struct qseecom_send_modfd_cmd_req *)msg;
3529 }
3530
3531 for (i = 0; i < MAX_ION_FD; i++) {
3532 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3533 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003534 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003535 req->ifd_data[i].fd);
3536 if (IS_ERR_OR_NULL(ihandle)) {
3537 pr_err("Ion client can't retrieve the handle\n");
3538 return -ENOMEM;
3539 }
3540 field = (char *) req->cmd_req_buf +
3541 req->ifd_data[i].cmd_buf_offset;
3542 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3543 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003544 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003545 lstnr_resp->ifd_data[i].fd);
3546 if (IS_ERR_OR_NULL(ihandle)) {
3547 pr_err("Ion client can't retrieve the handle\n");
3548 return -ENOMEM;
3549 }
3550 field = lstnr_resp->resp_buf_ptr +
3551 lstnr_resp->ifd_data[i].cmd_buf_offset;
3552 } else {
3553 continue;
3554 }
3555 /* Populate the cmd data structure with the phys_addr */
3556 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3557 if (IS_ERR_OR_NULL(sg_ptr)) {
3558 pr_err("IOn client could not retrieve sg table\n");
3559 goto err;
3560 }
3561 if (sg_ptr->nents == 0) {
3562 pr_err("Num of scattered entries is 0\n");
3563 goto err;
3564 }
3565 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3566 pr_warn("Num of scattered entries");
3567 pr_warn(" (%d) is greater than %d\n",
3568 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3569 if (cleanup) {
3570 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3571 data->client.sec_buf_fd[i].vbase)
3572 dma_free_coherent(qseecom.pdev,
3573 data->client.sec_buf_fd[i].size,
3574 data->client.sec_buf_fd[i].vbase,
3575 data->client.sec_buf_fd[i].pbase);
3576 } else {
3577 ret = __qseecom_allocate_sg_list_buffer(data,
3578 field, i, sg_ptr);
3579 if (ret) {
3580 pr_err("Failed to allocate sg list buffer\n");
3581 goto err;
3582 }
3583 }
3584 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3585 sg = sg_ptr->sgl;
3586 goto cleanup;
3587 }
3588 sg = sg_ptr->sgl;
3589 if (sg_ptr->nents == 1) {
3590 uint64_t *update_64bit;
3591
3592 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3593 goto err;
3594 /* 64bit app uses 64bit address */
3595 update_64bit = (uint64_t *) field;
3596 *update_64bit = cleanup ? 0 :
3597 (uint64_t)sg_dma_address(sg_ptr->sgl);
3598 len += (uint32_t)sg->length;
3599 } else {
3600 struct qseecom_sg_entry_64bit *update_64bit;
3601 int j = 0;
3602
3603 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3604 (req->ifd_data[i].fd > 0)) {
3605
3606 if ((req->cmd_req_len <
3607 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3608 (req->ifd_data[i].cmd_buf_offset >
3609 (req->cmd_req_len -
3610 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3611 pr_err("Invalid offset = 0x%x\n",
3612 req->ifd_data[i].cmd_buf_offset);
3613 goto err;
3614 }
3615
3616 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3617 (lstnr_resp->ifd_data[i].fd > 0)) {
3618
3619 if ((lstnr_resp->resp_len <
3620 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3621 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3622 (lstnr_resp->resp_len -
3623 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3624 goto err;
3625 }
3626 }
3627 /* 64bit app uses 64bit address */
3628 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3629 for (j = 0; j < sg_ptr->nents; j++) {
3630 update_64bit->phys_addr = cleanup ? 0 :
3631 (uint64_t)sg_dma_address(sg);
3632 update_64bit->len = cleanup ? 0 :
3633 (uint32_t)sg->length;
3634 update_64bit++;
3635 len += sg->length;
3636 sg = sg_next(sg);
3637 }
3638 }
3639cleanup:
3640 if (cleanup) {
3641 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3642 ihandle, NULL, len,
3643 ION_IOC_INV_CACHES);
3644 if (ret) {
3645 pr_err("cache operation failed %d\n", ret);
3646 goto err;
3647 }
3648 } else {
3649 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3650 ihandle, NULL, len,
3651 ION_IOC_CLEAN_INV_CACHES);
3652 if (ret) {
3653 pr_err("cache operation failed %d\n", ret);
3654 goto err;
3655 }
3656 if (data->type == QSEECOM_CLIENT_APP) {
3657 offset = req->ifd_data[i].cmd_buf_offset;
3658 data->sglistinfo_ptr[i].indexAndFlags =
3659 SGLISTINFO_SET_INDEX_FLAG(
3660 (sg_ptr->nents == 1), 1, offset);
3661 data->sglistinfo_ptr[i].sizeOrCount =
3662 (sg_ptr->nents == 1) ?
3663 sg->length : sg_ptr->nents;
3664 data->sglist_cnt = i + 1;
3665 } else {
3666 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3667 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3668 (uintptr_t)this_lstnr->sb_virt);
3669 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3670 SGLISTINFO_SET_INDEX_FLAG(
3671 (sg_ptr->nents == 1), 1, offset);
3672 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3673 (sg_ptr->nents == 1) ?
3674 sg->length : sg_ptr->nents;
3675 this_lstnr->sglist_cnt = i + 1;
3676 }
3677 }
3678 /* Deallocate the handle */
3679 if (!IS_ERR_OR_NULL(ihandle))
3680 ion_free(qseecom.ion_clnt, ihandle);
3681 }
3682 return ret;
3683err:
3684 for (i = 0; i < MAX_ION_FD; i++)
3685 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3686 data->client.sec_buf_fd[i].vbase)
3687 dma_free_coherent(qseecom.pdev,
3688 data->client.sec_buf_fd[i].size,
3689 data->client.sec_buf_fd[i].vbase,
3690 data->client.sec_buf_fd[i].pbase);
3691 if (!IS_ERR_OR_NULL(ihandle))
3692 ion_free(qseecom.ion_clnt, ihandle);
3693 return -ENOMEM;
3694}
3695
3696static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3697 void __user *argp,
3698 bool is_64bit_addr)
3699{
3700 int ret = 0;
3701 int i;
3702 struct qseecom_send_modfd_cmd_req req;
3703 struct qseecom_send_cmd_req send_cmd_req;
3704
3705 ret = copy_from_user(&req, argp, sizeof(req));
3706 if (ret) {
3707 pr_err("copy_from_user failed\n");
3708 return ret;
3709 }
3710
3711 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3712 send_cmd_req.cmd_req_len = req.cmd_req_len;
3713 send_cmd_req.resp_buf = req.resp_buf;
3714 send_cmd_req.resp_len = req.resp_len;
3715
3716 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3717 return -EINVAL;
3718
3719 /* validate offsets */
3720 for (i = 0; i < MAX_ION_FD; i++) {
3721 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3722 pr_err("Invalid offset %d = 0x%x\n",
3723 i, req.ifd_data[i].cmd_buf_offset);
3724 return -EINVAL;
3725 }
3726 }
3727 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3728 (uintptr_t)req.cmd_req_buf);
3729 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3730 (uintptr_t)req.resp_buf);
3731
3732 if (!is_64bit_addr) {
3733 ret = __qseecom_update_cmd_buf(&req, false, data);
3734 if (ret)
3735 return ret;
3736 ret = __qseecom_send_cmd(data, &send_cmd_req);
3737 if (ret)
3738 return ret;
3739 ret = __qseecom_update_cmd_buf(&req, true, data);
3740 if (ret)
3741 return ret;
3742 } else {
3743 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3744 if (ret)
3745 return ret;
3746 ret = __qseecom_send_cmd(data, &send_cmd_req);
3747 if (ret)
3748 return ret;
3749 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3750 if (ret)
3751 return ret;
3752 }
3753
3754 return ret;
3755}
3756
3757static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3758 void __user *argp)
3759{
3760 return __qseecom_send_modfd_cmd(data, argp, false);
3761}
3762
3763static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3764 void __user *argp)
3765{
3766 return __qseecom_send_modfd_cmd(data, argp, true);
3767}
3768
3769
3770
3771static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3772 struct qseecom_registered_listener_list *svc)
3773{
3774 int ret;
3775
3776 ret = (svc->rcv_req_flag != 0);
3777 return ret || data->abort;
3778}
3779
3780static int qseecom_receive_req(struct qseecom_dev_handle *data)
3781{
3782 int ret = 0;
3783 struct qseecom_registered_listener_list *this_lstnr;
3784
3785 this_lstnr = __qseecom_find_svc(data->listener.id);
3786 if (!this_lstnr) {
3787 pr_err("Invalid listener ID\n");
3788 return -ENODATA;
3789 }
3790
3791 while (1) {
3792 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3793 __qseecom_listener_has_rcvd_req(data,
3794 this_lstnr))) {
3795 pr_debug("Interrupted: exiting Listener Service = %d\n",
3796 (uint32_t)data->listener.id);
3797 /* woken up for different reason */
3798 return -ERESTARTSYS;
3799 }
3800
3801 if (data->abort) {
3802 pr_err("Aborting Listener Service = %d\n",
3803 (uint32_t)data->listener.id);
3804 return -ENODEV;
3805 }
3806 this_lstnr->rcv_req_flag = 0;
3807 break;
3808 }
3809 return ret;
3810}
3811
3812static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3813{
3814 unsigned char app_arch = 0;
3815 struct elf32_hdr *ehdr;
3816 struct elf64_hdr *ehdr64;
3817
3818 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3819
3820 switch (app_arch) {
3821 case ELFCLASS32: {
3822 ehdr = (struct elf32_hdr *)fw_entry->data;
3823 if (fw_entry->size < sizeof(*ehdr)) {
3824 pr_err("%s: Not big enough to be an elf32 header\n",
3825 qseecom.pdev->init_name);
3826 return false;
3827 }
3828 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3829 pr_err("%s: Not an elf32 header\n",
3830 qseecom.pdev->init_name);
3831 return false;
3832 }
3833 if (ehdr->e_phnum == 0) {
3834 pr_err("%s: No loadable segments\n",
3835 qseecom.pdev->init_name);
3836 return false;
3837 }
3838 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3839 sizeof(struct elf32_hdr) > fw_entry->size) {
3840 pr_err("%s: Program headers not within mdt\n",
3841 qseecom.pdev->init_name);
3842 return false;
3843 }
3844 break;
3845 }
3846 case ELFCLASS64: {
3847 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3848 if (fw_entry->size < sizeof(*ehdr64)) {
3849 pr_err("%s: Not big enough to be an elf64 header\n",
3850 qseecom.pdev->init_name);
3851 return false;
3852 }
3853 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3854 pr_err("%s: Not an elf64 header\n",
3855 qseecom.pdev->init_name);
3856 return false;
3857 }
3858 if (ehdr64->e_phnum == 0) {
3859 pr_err("%s: No loadable segments\n",
3860 qseecom.pdev->init_name);
3861 return false;
3862 }
3863 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3864 sizeof(struct elf64_hdr) > fw_entry->size) {
3865 pr_err("%s: Program headers not within mdt\n",
3866 qseecom.pdev->init_name);
3867 return false;
3868 }
3869 break;
3870 }
3871 default: {
3872 pr_err("QSEE app arch %u is not supported\n", app_arch);
3873 return false;
3874 }
3875 }
3876 return true;
3877}
3878
3879static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3880 uint32_t *app_arch)
3881{
3882 int ret = -1;
3883 int i = 0, rc = 0;
3884 const struct firmware *fw_entry = NULL;
3885 char fw_name[MAX_APP_NAME_SIZE];
3886 struct elf32_hdr *ehdr;
3887 struct elf64_hdr *ehdr64;
3888 int num_images = 0;
3889
3890 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3891 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3892 if (rc) {
3893 pr_err("error with request_firmware\n");
3894 ret = -EIO;
3895 goto err;
3896 }
3897 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3898 ret = -EIO;
3899 goto err;
3900 }
3901 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3902 *fw_size = fw_entry->size;
3903 if (*app_arch == ELFCLASS32) {
3904 ehdr = (struct elf32_hdr *)fw_entry->data;
3905 num_images = ehdr->e_phnum;
3906 } else if (*app_arch == ELFCLASS64) {
3907 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3908 num_images = ehdr64->e_phnum;
3909 } else {
3910 pr_err("QSEE %s app, arch %u is not supported\n",
3911 appname, *app_arch);
3912 ret = -EIO;
3913 goto err;
3914 }
3915 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3916 release_firmware(fw_entry);
3917 fw_entry = NULL;
3918 for (i = 0; i < num_images; i++) {
3919 memset(fw_name, 0, sizeof(fw_name));
3920 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3921 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3922 if (ret)
3923 goto err;
3924 if (*fw_size > U32_MAX - fw_entry->size) {
3925 pr_err("QSEE %s app file size overflow\n", appname);
3926 ret = -EINVAL;
3927 goto err;
3928 }
3929 *fw_size += fw_entry->size;
3930 release_firmware(fw_entry);
3931 fw_entry = NULL;
3932 }
3933
3934 return ret;
3935err:
3936 if (fw_entry)
3937 release_firmware(fw_entry);
3938 *fw_size = 0;
3939 return ret;
3940}
3941
3942static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
3943 uint32_t fw_size,
3944 struct qseecom_load_app_ireq *load_req)
3945{
3946 int ret = -1;
3947 int i = 0, rc = 0;
3948 const struct firmware *fw_entry = NULL;
3949 char fw_name[MAX_APP_NAME_SIZE];
3950 u8 *img_data_ptr = img_data;
3951 struct elf32_hdr *ehdr;
3952 struct elf64_hdr *ehdr64;
3953 int num_images = 0;
3954 unsigned char app_arch = 0;
3955
3956 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3957 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3958 if (rc) {
3959 ret = -EIO;
3960 goto err;
3961 }
3962
3963 load_req->img_len = fw_entry->size;
3964 if (load_req->img_len > fw_size) {
3965 pr_err("app %s size %zu is larger than buf size %u\n",
3966 appname, fw_entry->size, fw_size);
3967 ret = -EINVAL;
3968 goto err;
3969 }
3970 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
3971 img_data_ptr = img_data_ptr + fw_entry->size;
3972 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
3973
3974 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3975 if (app_arch == ELFCLASS32) {
3976 ehdr = (struct elf32_hdr *)fw_entry->data;
3977 num_images = ehdr->e_phnum;
3978 } else if (app_arch == ELFCLASS64) {
3979 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3980 num_images = ehdr64->e_phnum;
3981 } else {
3982 pr_err("QSEE %s app, arch %u is not supported\n",
3983 appname, app_arch);
3984 ret = -EIO;
3985 goto err;
3986 }
3987 release_firmware(fw_entry);
3988 fw_entry = NULL;
3989 for (i = 0; i < num_images; i++) {
3990 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3991 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3992 if (ret) {
3993 pr_err("Failed to locate blob %s\n", fw_name);
3994 goto err;
3995 }
3996 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
3997 (fw_entry->size + load_req->img_len > fw_size)) {
3998 pr_err("Invalid file size for %s\n", fw_name);
3999 ret = -EINVAL;
4000 goto err;
4001 }
4002 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4003 img_data_ptr = img_data_ptr + fw_entry->size;
4004 load_req->img_len += fw_entry->size;
4005 release_firmware(fw_entry);
4006 fw_entry = NULL;
4007 }
4008 return ret;
4009err:
4010 release_firmware(fw_entry);
4011 return ret;
4012}
4013
4014static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4015 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4016{
4017 size_t len = 0;
4018 int ret = 0;
4019 ion_phys_addr_t pa;
4020 struct ion_handle *ihandle = NULL;
4021 u8 *img_data = NULL;
4022
4023 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4024 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4025
4026 if (IS_ERR_OR_NULL(ihandle)) {
4027 pr_err("ION alloc failed\n");
4028 return -ENOMEM;
4029 }
4030 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4031 ihandle);
4032
4033 if (IS_ERR_OR_NULL(img_data)) {
4034 pr_err("ION memory mapping for image loading failed\n");
4035 ret = -ENOMEM;
4036 goto exit_ion_free;
4037 }
4038 /* Get the physical address of the ION BUF */
4039 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4040 if (ret) {
4041 pr_err("physical memory retrieval failure\n");
4042 ret = -EIO;
4043 goto exit_ion_unmap_kernel;
4044 }
4045
4046 *pihandle = ihandle;
4047 *data = img_data;
4048 *paddr = pa;
4049 return ret;
4050
4051exit_ion_unmap_kernel:
4052 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4053exit_ion_free:
4054 ion_free(qseecom.ion_clnt, ihandle);
4055 ihandle = NULL;
4056 return ret;
4057}
4058
4059static void __qseecom_free_img_data(struct ion_handle **ihandle)
4060{
4061 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4062 ion_free(qseecom.ion_clnt, *ihandle);
4063 *ihandle = NULL;
4064}
4065
4066static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4067 uint32_t *app_id)
4068{
4069 int ret = -1;
4070 uint32_t fw_size = 0;
4071 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4072 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4073 struct qseecom_command_scm_resp resp;
4074 u8 *img_data = NULL;
4075 ion_phys_addr_t pa = 0;
4076 struct ion_handle *ihandle = NULL;
4077 void *cmd_buf = NULL;
4078 size_t cmd_len;
4079 uint32_t app_arch = 0;
4080
4081 if (!data || !appname || !app_id) {
4082 pr_err("Null pointer to data or appname or appid\n");
4083 return -EINVAL;
4084 }
4085 *app_id = 0;
4086 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4087 return -EIO;
4088 data->client.app_arch = app_arch;
4089
4090 /* Check and load cmnlib */
4091 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4092 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4093 ret = qseecom_load_commonlib_image(data, "cmnlib");
4094 if (ret) {
4095 pr_err("failed to load cmnlib\n");
4096 return -EIO;
4097 }
4098 qseecom.commonlib_loaded = true;
4099 pr_debug("cmnlib is loaded\n");
4100 }
4101
4102 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4103 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4104 if (ret) {
4105 pr_err("failed to load cmnlib64\n");
4106 return -EIO;
4107 }
4108 qseecom.commonlib64_loaded = true;
4109 pr_debug("cmnlib64 is loaded\n");
4110 }
4111 }
4112
4113 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4114 if (ret)
4115 return ret;
4116
4117 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4118 if (ret) {
4119 ret = -EIO;
4120 goto exit_free_img_data;
4121 }
4122
4123 /* Populate the load_req parameters */
4124 if (qseecom.qsee_version < QSEE_VERSION_40) {
4125 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4126 load_req.mdt_len = load_req.mdt_len;
4127 load_req.img_len = load_req.img_len;
4128 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4129 load_req.phy_addr = (uint32_t)pa;
4130 cmd_buf = (void *)&load_req;
4131 cmd_len = sizeof(struct qseecom_load_app_ireq);
4132 } else {
4133 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4134 load_req_64bit.mdt_len = load_req.mdt_len;
4135 load_req_64bit.img_len = load_req.img_len;
4136 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4137 load_req_64bit.phy_addr = (uint64_t)pa;
4138 cmd_buf = (void *)&load_req_64bit;
4139 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4140 }
4141
4142 if (qseecom.support_bus_scaling) {
4143 mutex_lock(&qsee_bw_mutex);
4144 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4145 mutex_unlock(&qsee_bw_mutex);
4146 if (ret) {
4147 ret = -EIO;
4148 goto exit_free_img_data;
4149 }
4150 }
4151
4152 ret = __qseecom_enable_clk_scale_up(data);
4153 if (ret) {
4154 ret = -EIO;
4155 goto exit_unregister_bus_bw_need;
4156 }
4157
4158 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4159 img_data, fw_size,
4160 ION_IOC_CLEAN_INV_CACHES);
4161 if (ret) {
4162 pr_err("cache operation failed %d\n", ret);
4163 goto exit_disable_clk_vote;
4164 }
4165
4166 /* SCM_CALL to load the image */
4167 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4168 &resp, sizeof(resp));
4169 if (ret) {
4170 pr_err("scm_call to load failed : ret %d\n", ret);
4171 ret = -EIO;
4172 goto exit_disable_clk_vote;
4173 }
4174
4175 switch (resp.result) {
4176 case QSEOS_RESULT_SUCCESS:
4177 *app_id = resp.data;
4178 break;
4179 case QSEOS_RESULT_INCOMPLETE:
4180 ret = __qseecom_process_incomplete_cmd(data, &resp);
4181 if (ret)
4182 pr_err("process_incomplete_cmd FAILED\n");
4183 else
4184 *app_id = resp.data;
4185 break;
4186 case QSEOS_RESULT_FAILURE:
4187 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4188 break;
4189 default:
4190 pr_err("scm call return unknown response %d\n", resp.result);
4191 ret = -EINVAL;
4192 break;
4193 }
4194
4195exit_disable_clk_vote:
4196 __qseecom_disable_clk_scale_down(data);
4197
4198exit_unregister_bus_bw_need:
4199 if (qseecom.support_bus_scaling) {
4200 mutex_lock(&qsee_bw_mutex);
4201 qseecom_unregister_bus_bandwidth_needs(data);
4202 mutex_unlock(&qsee_bw_mutex);
4203 }
4204
4205exit_free_img_data:
4206 __qseecom_free_img_data(&ihandle);
4207 return ret;
4208}
4209
4210static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4211 char *cmnlib_name)
4212{
4213 int ret = 0;
4214 uint32_t fw_size = 0;
4215 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4216 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4217 struct qseecom_command_scm_resp resp;
4218 u8 *img_data = NULL;
4219 ion_phys_addr_t pa = 0;
4220 void *cmd_buf = NULL;
4221 size_t cmd_len;
4222 uint32_t app_arch = 0;
4223
4224 if (!cmnlib_name) {
4225 pr_err("cmnlib_name is NULL\n");
4226 return -EINVAL;
4227 }
4228 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4229 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4230 cmnlib_name, strlen(cmnlib_name));
4231 return -EINVAL;
4232 }
4233
4234 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4235 return -EIO;
4236
4237 ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
4238 &img_data, fw_size, &pa);
4239 if (ret)
4240 return -EIO;
4241
4242 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4243 if (ret) {
4244 ret = -EIO;
4245 goto exit_free_img_data;
4246 }
4247 if (qseecom.qsee_version < QSEE_VERSION_40) {
4248 load_req.phy_addr = (uint32_t)pa;
4249 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4250 cmd_buf = (void *)&load_req;
4251 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4252 } else {
4253 load_req_64bit.phy_addr = (uint64_t)pa;
4254 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4255 load_req_64bit.img_len = load_req.img_len;
4256 load_req_64bit.mdt_len = load_req.mdt_len;
4257 cmd_buf = (void *)&load_req_64bit;
4258 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4259 }
4260
4261 if (qseecom.support_bus_scaling) {
4262 mutex_lock(&qsee_bw_mutex);
4263 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4264 mutex_unlock(&qsee_bw_mutex);
4265 if (ret) {
4266 ret = -EIO;
4267 goto exit_free_img_data;
4268 }
4269 }
4270
4271 /* Vote for the SFPB clock */
4272 ret = __qseecom_enable_clk_scale_up(data);
4273 if (ret) {
4274 ret = -EIO;
4275 goto exit_unregister_bus_bw_need;
4276 }
4277
4278 ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
4279 img_data, fw_size,
4280 ION_IOC_CLEAN_INV_CACHES);
4281 if (ret) {
4282 pr_err("cache operation failed %d\n", ret);
4283 goto exit_disable_clk_vote;
4284 }
4285
4286 /* SCM_CALL to load the image */
4287 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4288 &resp, sizeof(resp));
4289 if (ret) {
4290 pr_err("scm_call to load failed : ret %d\n", ret);
4291 ret = -EIO;
4292 goto exit_disable_clk_vote;
4293 }
4294
4295 switch (resp.result) {
4296 case QSEOS_RESULT_SUCCESS:
4297 break;
4298 case QSEOS_RESULT_FAILURE:
4299 pr_err("scm call failed w/response result%d\n", resp.result);
4300 ret = -EINVAL;
4301 goto exit_disable_clk_vote;
4302 case QSEOS_RESULT_INCOMPLETE:
4303 ret = __qseecom_process_incomplete_cmd(data, &resp);
4304 if (ret) {
4305 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4306 goto exit_disable_clk_vote;
4307 }
4308 break;
4309 default:
4310 pr_err("scm call return unknown response %d\n", resp.result);
4311 ret = -EINVAL;
4312 goto exit_disable_clk_vote;
4313 }
4314
4315exit_disable_clk_vote:
4316 __qseecom_disable_clk_scale_down(data);
4317
4318exit_unregister_bus_bw_need:
4319 if (qseecom.support_bus_scaling) {
4320 mutex_lock(&qsee_bw_mutex);
4321 qseecom_unregister_bus_bandwidth_needs(data);
4322 mutex_unlock(&qsee_bw_mutex);
4323 }
4324
4325exit_free_img_data:
4326 __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
4327 return ret;
4328}
4329
4330static int qseecom_unload_commonlib_image(void)
4331{
4332 int ret = -EINVAL;
4333 struct qseecom_unload_lib_image_ireq unload_req = {0};
4334 struct qseecom_command_scm_resp resp;
4335
4336 /* Populate the remaining parameters */
4337 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4338
4339 /* SCM_CALL to load the image */
4340 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4341 sizeof(struct qseecom_unload_lib_image_ireq),
4342 &resp, sizeof(resp));
4343 if (ret) {
4344 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4345 ret = -EIO;
4346 } else {
4347 switch (resp.result) {
4348 case QSEOS_RESULT_SUCCESS:
4349 break;
4350 case QSEOS_RESULT_FAILURE:
4351 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4352 break;
4353 default:
4354 pr_err("scm call return unknown response %d\n",
4355 resp.result);
4356 ret = -EINVAL;
4357 break;
4358 }
4359 }
4360
4361 return ret;
4362}
4363
4364int qseecom_start_app(struct qseecom_handle **handle,
4365 char *app_name, uint32_t size)
4366{
4367 int32_t ret = 0;
4368 unsigned long flags = 0;
4369 struct qseecom_dev_handle *data = NULL;
4370 struct qseecom_check_app_ireq app_ireq;
4371 struct qseecom_registered_app_list *entry = NULL;
4372 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4373 bool found_app = false;
4374 size_t len;
4375 ion_phys_addr_t pa;
4376 uint32_t fw_size, app_arch;
4377 uint32_t app_id = 0;
4378
4379 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4380 pr_err("Not allowed to be called in %d state\n",
4381 atomic_read(&qseecom.qseecom_state));
4382 return -EPERM;
4383 }
4384 if (!app_name) {
4385 pr_err("failed to get the app name\n");
4386 return -EINVAL;
4387 }
4388
4389 if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
4390 pr_err("The app_name (%s) with length %zu is not valid\n",
4391 app_name, strlen(app_name));
4392 return -EINVAL;
4393 }
4394
4395 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4396 if (!(*handle))
4397 return -ENOMEM;
4398
4399 data = kzalloc(sizeof(*data), GFP_KERNEL);
4400 if (!data) {
4401 if (ret == 0) {
4402 kfree(*handle);
4403 *handle = NULL;
4404 }
4405 return -ENOMEM;
4406 }
4407 data->abort = 0;
4408 data->type = QSEECOM_CLIENT_APP;
4409 data->released = false;
4410 data->client.sb_length = size;
4411 data->client.user_virt_sb_base = 0;
4412 data->client.ihandle = NULL;
4413
4414 init_waitqueue_head(&data->abort_wq);
4415
4416 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4417 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4418 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4419 pr_err("Ion client could not retrieve the handle\n");
4420 kfree(data);
4421 kfree(*handle);
4422 *handle = NULL;
4423 return -EINVAL;
4424 }
4425 mutex_lock(&app_access_lock);
4426
4427 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4428 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4429 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4430 if (ret)
4431 goto err;
4432
4433 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4434 if (app_id) {
4435 pr_warn("App id %d for [%s] app exists\n", app_id,
4436 (char *)app_ireq.app_name);
4437 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4438 list_for_each_entry(entry,
4439 &qseecom.registered_app_list_head, list){
4440 if (entry->app_id == app_id) {
4441 entry->ref_cnt++;
4442 found_app = true;
4443 break;
4444 }
4445 }
4446 spin_unlock_irqrestore(
4447 &qseecom.registered_app_list_lock, flags);
4448 if (!found_app)
4449 pr_warn("App_id %d [%s] was loaded but not registered\n",
4450 ret, (char *)app_ireq.app_name);
4451 } else {
4452 /* load the app and get the app_id */
4453 pr_debug("%s: Loading app for the first time'\n",
4454 qseecom.pdev->init_name);
4455 ret = __qseecom_load_fw(data, app_name, &app_id);
4456 if (ret < 0)
4457 goto err;
4458 }
4459 data->client.app_id = app_id;
4460 if (!found_app) {
4461 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4462 if (!entry) {
4463 pr_err("kmalloc for app entry failed\n");
4464 ret = -ENOMEM;
4465 goto err;
4466 }
4467 entry->app_id = app_id;
4468 entry->ref_cnt = 1;
4469 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4470 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4471 ret = -EIO;
4472 kfree(entry);
4473 goto err;
4474 }
4475 entry->app_arch = app_arch;
4476 entry->app_blocked = false;
4477 entry->blocked_on_listener_id = 0;
4478 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4479 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4480 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4481 flags);
4482 }
4483
4484 /* Get the physical address of the ION BUF */
4485 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4486 if (ret) {
4487 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4488 ret);
4489 goto err;
4490 }
4491
4492 /* Populate the structure for sending scm call to load image */
4493 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4494 data->client.ihandle);
4495 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4496 pr_err("ION memory mapping for client shared buf failed\n");
4497 ret = -ENOMEM;
4498 goto err;
4499 }
4500 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4501 data->client.sb_phys = (phys_addr_t)pa;
4502 (*handle)->dev = (void *)data;
4503 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4504 (*handle)->sbuf_len = data->client.sb_length;
4505
4506 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4507 if (!kclient_entry) {
4508 ret = -ENOMEM;
4509 goto err;
4510 }
4511 kclient_entry->handle = *handle;
4512
4513 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4514 list_add_tail(&kclient_entry->list,
4515 &qseecom.registered_kclient_list_head);
4516 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4517
4518 mutex_unlock(&app_access_lock);
4519 return 0;
4520
4521err:
4522 kfree(data);
4523 kfree(*handle);
4524 *handle = NULL;
4525 mutex_unlock(&app_access_lock);
4526 return ret;
4527}
4528EXPORT_SYMBOL(qseecom_start_app);
4529
4530int qseecom_shutdown_app(struct qseecom_handle **handle)
4531{
4532 int ret = -EINVAL;
4533 struct qseecom_dev_handle *data;
4534
4535 struct qseecom_registered_kclient_list *kclient = NULL;
4536 unsigned long flags = 0;
4537 bool found_handle = false;
4538
4539 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4540 pr_err("Not allowed to be called in %d state\n",
4541 atomic_read(&qseecom.qseecom_state));
4542 return -EPERM;
4543 }
4544
4545 if ((handle == NULL) || (*handle == NULL)) {
4546 pr_err("Handle is not initialized\n");
4547 return -EINVAL;
4548 }
4549 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4550 mutex_lock(&app_access_lock);
4551
4552 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4553 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4554 list) {
4555 if (kclient->handle == (*handle)) {
4556 list_del(&kclient->list);
4557 found_handle = true;
4558 break;
4559 }
4560 }
4561 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4562 if (!found_handle)
4563 pr_err("Unable to find the handle, exiting\n");
4564 else
4565 ret = qseecom_unload_app(data, false);
4566
4567 mutex_unlock(&app_access_lock);
4568 if (ret == 0) {
4569 kzfree(data);
4570 kzfree(*handle);
4571 kzfree(kclient);
4572 *handle = NULL;
4573 }
4574
4575 return ret;
4576}
4577EXPORT_SYMBOL(qseecom_shutdown_app);
4578
4579int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4580 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4581{
4582 int ret = 0;
4583 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4584 struct qseecom_dev_handle *data;
4585 bool perf_enabled = false;
4586
4587 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4588 pr_err("Not allowed to be called in %d state\n",
4589 atomic_read(&qseecom.qseecom_state));
4590 return -EPERM;
4591 }
4592
4593 if (handle == NULL) {
4594 pr_err("Handle is not initialized\n");
4595 return -EINVAL;
4596 }
4597 data = handle->dev;
4598
4599 req.cmd_req_len = sbuf_len;
4600 req.resp_len = rbuf_len;
4601 req.cmd_req_buf = send_buf;
4602 req.resp_buf = resp_buf;
4603
4604 if (__validate_send_cmd_inputs(data, &req))
4605 return -EINVAL;
4606
4607 mutex_lock(&app_access_lock);
4608 if (qseecom.support_bus_scaling) {
4609 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4610 if (ret) {
4611 pr_err("Failed to set bw.\n");
4612 mutex_unlock(&app_access_lock);
4613 return ret;
4614 }
4615 }
4616 /*
4617 * On targets where crypto clock is handled by HLOS,
4618 * if clk_access_cnt is zero and perf_enabled is false,
4619 * then the crypto clock was not enabled before sending cmd
4620 * to tz, qseecom will enable the clock to avoid service failure.
4621 */
4622 if (!qseecom.no_clock_support &&
4623 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4624 pr_debug("ce clock is not enabled!\n");
4625 ret = qseecom_perf_enable(data);
4626 if (ret) {
4627 pr_err("Failed to vote for clock with err %d\n",
4628 ret);
4629 mutex_unlock(&app_access_lock);
4630 return -EINVAL;
4631 }
4632 perf_enabled = true;
4633 }
4634 if (!strcmp(data->client.app_name, "securemm"))
4635 data->use_legacy_cmd = true;
4636
4637 ret = __qseecom_send_cmd(data, &req);
4638 data->use_legacy_cmd = false;
4639 if (qseecom.support_bus_scaling)
4640 __qseecom_add_bw_scale_down_timer(
4641 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4642
4643 if (perf_enabled) {
4644 qsee_disable_clock_vote(data, CLK_DFAB);
4645 qsee_disable_clock_vote(data, CLK_SFPB);
4646 }
4647
4648 mutex_unlock(&app_access_lock);
4649
4650 if (ret)
4651 return ret;
4652
4653 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4654 req.resp_len, req.resp_buf);
4655 return ret;
4656}
4657EXPORT_SYMBOL(qseecom_send_command);
4658
4659int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4660{
4661 int ret = 0;
4662
4663 if ((handle == NULL) || (handle->dev == NULL)) {
4664 pr_err("No valid kernel client\n");
4665 return -EINVAL;
4666 }
4667 if (high) {
4668 if (qseecom.support_bus_scaling) {
4669 mutex_lock(&qsee_bw_mutex);
4670 __qseecom_register_bus_bandwidth_needs(handle->dev,
4671 HIGH);
4672 mutex_unlock(&qsee_bw_mutex);
4673 } else {
4674 ret = qseecom_perf_enable(handle->dev);
4675 if (ret)
4676 pr_err("Failed to vote for clock with err %d\n",
4677 ret);
4678 }
4679 } else {
4680 if (!qseecom.support_bus_scaling) {
4681 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4682 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4683 } else {
4684 mutex_lock(&qsee_bw_mutex);
4685 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4686 mutex_unlock(&qsee_bw_mutex);
4687 }
4688 }
4689 return ret;
4690}
4691EXPORT_SYMBOL(qseecom_set_bandwidth);
4692
4693int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4694{
4695 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4696 struct qseecom_dev_handle dummy_private_data = {0};
4697 struct qseecom_command_scm_resp resp;
4698 int ret = 0;
4699
4700 if (!desc) {
4701 pr_err("desc is NULL\n");
4702 return -EINVAL;
4703 }
4704
4705 resp.result = desc->ret[0]; /*req_cmd*/
4706 resp.resp_type = desc->ret[1]; /*app_id*/
4707 resp.data = desc->ret[2]; /*listener_id*/
4708
4709 dummy_private_data.client.app_id = desc->ret[1];
4710 dummy_app_entry.app_id = desc->ret[1];
4711
4712 mutex_lock(&app_access_lock);
4713 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
4714 &dummy_private_data);
4715 mutex_unlock(&app_access_lock);
4716 if (ret)
4717 pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
4718 (int)desc->ret[0], (int)desc->ret[2],
4719 (int)desc->ret[1], ret);
4720 desc->ret[0] = resp.result;
4721 desc->ret[1] = resp.resp_type;
4722 desc->ret[2] = resp.data;
4723 return ret;
4724}
4725EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4726
4727static int qseecom_send_resp(void)
4728{
4729 qseecom.send_resp_flag = 1;
4730 wake_up_interruptible(&qseecom.send_resp_wq);
4731 return 0;
4732}
4733
4734static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4735{
4736 struct qseecom_registered_listener_list *this_lstnr = NULL;
4737
4738 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4739 this_lstnr = __qseecom_find_svc(data->listener.id);
4740 if (this_lstnr == NULL)
4741 return -EINVAL;
4742 qseecom.send_resp_flag = 1;
4743 this_lstnr->send_resp_flag = 1;
4744 wake_up_interruptible(&qseecom.send_resp_wq);
4745 return 0;
4746}
4747
4748static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4749 struct qseecom_send_modfd_listener_resp *resp,
4750 struct qseecom_registered_listener_list *this_lstnr)
4751{
4752 int i;
4753
4754 if (!data || !resp || !this_lstnr) {
4755 pr_err("listener handle or resp msg is null\n");
4756 return -EINVAL;
4757 }
4758
4759 if (resp->resp_buf_ptr == NULL) {
4760 pr_err("resp buffer is null\n");
4761 return -EINVAL;
4762 }
4763 /* validate resp buf length */
4764 if ((resp->resp_len == 0) ||
4765 (resp->resp_len > this_lstnr->sb_length)) {
4766 pr_err("resp buf length %d not valid\n", resp->resp_len);
4767 return -EINVAL;
4768 }
4769
4770 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4771 pr_err("Integer overflow in resp_len & resp_buf\n");
4772 return -EINVAL;
4773 }
4774 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4775 (ULONG_MAX - this_lstnr->sb_length)) {
4776 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4777 return -EINVAL;
4778 }
4779 /* validate resp buf */
4780 if (((uintptr_t)resp->resp_buf_ptr <
4781 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4782 ((uintptr_t)resp->resp_buf_ptr >=
4783 ((uintptr_t)this_lstnr->user_virt_sb_base +
4784 this_lstnr->sb_length)) ||
4785 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4786 ((uintptr_t)this_lstnr->user_virt_sb_base +
4787 this_lstnr->sb_length))) {
4788 pr_err("resp buf is out of shared buffer region\n");
4789 return -EINVAL;
4790 }
4791
4792 /* validate offsets */
4793 for (i = 0; i < MAX_ION_FD; i++) {
4794 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4795 pr_err("Invalid offset %d = 0x%x\n",
4796 i, resp->ifd_data[i].cmd_buf_offset);
4797 return -EINVAL;
4798 }
4799 }
4800
4801 return 0;
4802}
4803
4804static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4805 void __user *argp, bool is_64bit_addr)
4806{
4807 struct qseecom_send_modfd_listener_resp resp;
4808 struct qseecom_registered_listener_list *this_lstnr = NULL;
4809
4810 if (copy_from_user(&resp, argp, sizeof(resp))) {
4811 pr_err("copy_from_user failed");
4812 return -EINVAL;
4813 }
4814
4815 this_lstnr = __qseecom_find_svc(data->listener.id);
4816 if (this_lstnr == NULL)
4817 return -EINVAL;
4818
4819 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4820 return -EINVAL;
4821
4822 resp.resp_buf_ptr = this_lstnr->sb_virt +
4823 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4824
4825 if (!is_64bit_addr)
4826 __qseecom_update_cmd_buf(&resp, false, data);
4827 else
4828 __qseecom_update_cmd_buf_64(&resp, false, data);
4829 qseecom.send_resp_flag = 1;
4830 this_lstnr->send_resp_flag = 1;
4831 wake_up_interruptible(&qseecom.send_resp_wq);
4832 return 0;
4833}
4834
4835static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4836 void __user *argp)
4837{
4838 return __qseecom_send_modfd_resp(data, argp, false);
4839}
4840
4841static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4842 void __user *argp)
4843{
4844 return __qseecom_send_modfd_resp(data, argp, true);
4845}
4846
4847static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4848 void __user *argp)
4849{
4850 struct qseecom_qseos_version_req req;
4851
4852 if (copy_from_user(&req, argp, sizeof(req))) {
4853 pr_err("copy_from_user failed");
4854 return -EINVAL;
4855 }
4856 req.qseos_version = qseecom.qseos_version;
4857 if (copy_to_user(argp, &req, sizeof(req))) {
4858 pr_err("copy_to_user failed");
4859 return -EINVAL;
4860 }
4861 return 0;
4862}
4863
4864static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4865{
4866 int rc = 0;
4867 struct qseecom_clk *qclk = NULL;
4868
4869 if (qseecom.no_clock_support)
4870 return 0;
4871
4872 if (ce == CLK_QSEE)
4873 qclk = &qseecom.qsee;
4874 if (ce == CLK_CE_DRV)
4875 qclk = &qseecom.ce_drv;
4876
4877 if (qclk == NULL) {
4878 pr_err("CLK type not supported\n");
4879 return -EINVAL;
4880 }
4881 mutex_lock(&clk_access_lock);
4882
4883 if (qclk->clk_access_cnt == ULONG_MAX) {
4884 pr_err("clk_access_cnt beyond limitation\n");
4885 goto err;
4886 }
4887 if (qclk->clk_access_cnt > 0) {
4888 qclk->clk_access_cnt++;
4889 mutex_unlock(&clk_access_lock);
4890 return rc;
4891 }
4892
4893 /* Enable CE core clk */
4894 if (qclk->ce_core_clk != NULL) {
4895 rc = clk_prepare_enable(qclk->ce_core_clk);
4896 if (rc) {
4897 pr_err("Unable to enable/prepare CE core clk\n");
4898 goto err;
4899 }
4900 }
4901 /* Enable CE clk */
4902 if (qclk->ce_clk != NULL) {
4903 rc = clk_prepare_enable(qclk->ce_clk);
4904 if (rc) {
4905 pr_err("Unable to enable/prepare CE iface clk\n");
4906 goto ce_clk_err;
4907 }
4908 }
4909 /* Enable AXI clk */
4910 if (qclk->ce_bus_clk != NULL) {
4911 rc = clk_prepare_enable(qclk->ce_bus_clk);
4912 if (rc) {
4913 pr_err("Unable to enable/prepare CE bus clk\n");
4914 goto ce_bus_clk_err;
4915 }
4916 }
4917 qclk->clk_access_cnt++;
4918 mutex_unlock(&clk_access_lock);
4919 return 0;
4920
4921ce_bus_clk_err:
4922 if (qclk->ce_clk != NULL)
4923 clk_disable_unprepare(qclk->ce_clk);
4924ce_clk_err:
4925 if (qclk->ce_core_clk != NULL)
4926 clk_disable_unprepare(qclk->ce_core_clk);
4927err:
4928 mutex_unlock(&clk_access_lock);
4929 return -EIO;
4930}
4931
4932static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
4933{
4934 struct qseecom_clk *qclk;
4935
4936 if (qseecom.no_clock_support)
4937 return;
4938
4939 if (ce == CLK_QSEE)
4940 qclk = &qseecom.qsee;
4941 else
4942 qclk = &qseecom.ce_drv;
4943
4944 mutex_lock(&clk_access_lock);
4945
4946 if (qclk->clk_access_cnt == 0) {
4947 mutex_unlock(&clk_access_lock);
4948 return;
4949 }
4950
4951 if (qclk->clk_access_cnt == 1) {
4952 if (qclk->ce_clk != NULL)
4953 clk_disable_unprepare(qclk->ce_clk);
4954 if (qclk->ce_core_clk != NULL)
4955 clk_disable_unprepare(qclk->ce_core_clk);
4956 if (qclk->ce_bus_clk != NULL)
4957 clk_disable_unprepare(qclk->ce_bus_clk);
4958 }
4959 qclk->clk_access_cnt--;
4960 mutex_unlock(&clk_access_lock);
4961}
4962
4963static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
4964 int32_t clk_type)
4965{
4966 int ret = 0;
4967 struct qseecom_clk *qclk;
4968
4969 if (qseecom.no_clock_support)
4970 return 0;
4971
4972 qclk = &qseecom.qsee;
4973 if (!qseecom.qsee_perf_client)
4974 return ret;
4975
4976 switch (clk_type) {
4977 case CLK_DFAB:
4978 mutex_lock(&qsee_bw_mutex);
4979 if (!qseecom.qsee_bw_count) {
4980 if (qseecom.qsee_sfpb_bw_count > 0)
4981 ret = msm_bus_scale_client_update_request(
4982 qseecom.qsee_perf_client, 3);
4983 else {
4984 if (qclk->ce_core_src_clk != NULL)
4985 ret = __qseecom_enable_clk(CLK_QSEE);
4986 if (!ret) {
4987 ret =
4988 msm_bus_scale_client_update_request(
4989 qseecom.qsee_perf_client, 1);
4990 if ((ret) &&
4991 (qclk->ce_core_src_clk != NULL))
4992 __qseecom_disable_clk(CLK_QSEE);
4993 }
4994 }
4995 if (ret)
4996 pr_err("DFAB Bandwidth req failed (%d)\n",
4997 ret);
4998 else {
4999 qseecom.qsee_bw_count++;
5000 data->perf_enabled = true;
5001 }
5002 } else {
5003 qseecom.qsee_bw_count++;
5004 data->perf_enabled = true;
5005 }
5006 mutex_unlock(&qsee_bw_mutex);
5007 break;
5008 case CLK_SFPB:
5009 mutex_lock(&qsee_bw_mutex);
5010 if (!qseecom.qsee_sfpb_bw_count) {
5011 if (qseecom.qsee_bw_count > 0)
5012 ret = msm_bus_scale_client_update_request(
5013 qseecom.qsee_perf_client, 3);
5014 else {
5015 if (qclk->ce_core_src_clk != NULL)
5016 ret = __qseecom_enable_clk(CLK_QSEE);
5017 if (!ret) {
5018 ret =
5019 msm_bus_scale_client_update_request(
5020 qseecom.qsee_perf_client, 2);
5021 if ((ret) &&
5022 (qclk->ce_core_src_clk != NULL))
5023 __qseecom_disable_clk(CLK_QSEE);
5024 }
5025 }
5026
5027 if (ret)
5028 pr_err("SFPB Bandwidth req failed (%d)\n",
5029 ret);
5030 else {
5031 qseecom.qsee_sfpb_bw_count++;
5032 data->fast_load_enabled = true;
5033 }
5034 } else {
5035 qseecom.qsee_sfpb_bw_count++;
5036 data->fast_load_enabled = true;
5037 }
5038 mutex_unlock(&qsee_bw_mutex);
5039 break;
5040 default:
5041 pr_err("Clock type not defined\n");
5042 break;
5043 }
5044 return ret;
5045}
5046
5047static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5048 int32_t clk_type)
5049{
5050 int32_t ret = 0;
5051 struct qseecom_clk *qclk;
5052
5053 qclk = &qseecom.qsee;
5054
5055 if (qseecom.no_clock_support)
5056 return;
5057 if (!qseecom.qsee_perf_client)
5058 return;
5059
5060 switch (clk_type) {
5061 case CLK_DFAB:
5062 mutex_lock(&qsee_bw_mutex);
5063 if (qseecom.qsee_bw_count == 0) {
5064 pr_err("Client error.Extra call to disable DFAB clk\n");
5065 mutex_unlock(&qsee_bw_mutex);
5066 return;
5067 }
5068
5069 if (qseecom.qsee_bw_count == 1) {
5070 if (qseecom.qsee_sfpb_bw_count > 0)
5071 ret = msm_bus_scale_client_update_request(
5072 qseecom.qsee_perf_client, 2);
5073 else {
5074 ret = msm_bus_scale_client_update_request(
5075 qseecom.qsee_perf_client, 0);
5076 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5077 __qseecom_disable_clk(CLK_QSEE);
5078 }
5079 if (ret)
5080 pr_err("SFPB Bandwidth req fail (%d)\n",
5081 ret);
5082 else {
5083 qseecom.qsee_bw_count--;
5084 data->perf_enabled = false;
5085 }
5086 } else {
5087 qseecom.qsee_bw_count--;
5088 data->perf_enabled = false;
5089 }
5090 mutex_unlock(&qsee_bw_mutex);
5091 break;
5092 case CLK_SFPB:
5093 mutex_lock(&qsee_bw_mutex);
5094 if (qseecom.qsee_sfpb_bw_count == 0) {
5095 pr_err("Client error.Extra call to disable SFPB clk\n");
5096 mutex_unlock(&qsee_bw_mutex);
5097 return;
5098 }
5099 if (qseecom.qsee_sfpb_bw_count == 1) {
5100 if (qseecom.qsee_bw_count > 0)
5101 ret = msm_bus_scale_client_update_request(
5102 qseecom.qsee_perf_client, 1);
5103 else {
5104 ret = msm_bus_scale_client_update_request(
5105 qseecom.qsee_perf_client, 0);
5106 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5107 __qseecom_disable_clk(CLK_QSEE);
5108 }
5109 if (ret)
5110 pr_err("SFPB Bandwidth req fail (%d)\n",
5111 ret);
5112 else {
5113 qseecom.qsee_sfpb_bw_count--;
5114 data->fast_load_enabled = false;
5115 }
5116 } else {
5117 qseecom.qsee_sfpb_bw_count--;
5118 data->fast_load_enabled = false;
5119 }
5120 mutex_unlock(&qsee_bw_mutex);
5121 break;
5122 default:
5123 pr_err("Clock type not defined\n");
5124 break;
5125 }
5126
5127}
5128
5129static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5130 void __user *argp)
5131{
5132 struct ion_handle *ihandle; /* Ion handle */
5133 struct qseecom_load_img_req load_img_req;
5134 int uret = 0;
5135 int ret;
5136 ion_phys_addr_t pa = 0;
5137 size_t len;
5138 struct qseecom_load_app_ireq load_req;
5139 struct qseecom_load_app_64bit_ireq load_req_64bit;
5140 struct qseecom_command_scm_resp resp;
5141 void *cmd_buf = NULL;
5142 size_t cmd_len;
5143 /* Copy the relevant information needed for loading the image */
5144 if (copy_from_user(&load_img_req,
5145 (void __user *)argp,
5146 sizeof(struct qseecom_load_img_req))) {
5147 pr_err("copy_from_user failed\n");
5148 return -EFAULT;
5149 }
5150
5151 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005152 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005153 load_img_req.ifd_data_fd);
5154 if (IS_ERR_OR_NULL(ihandle)) {
5155 pr_err("Ion client could not retrieve the handle\n");
5156 return -ENOMEM;
5157 }
5158
5159 /* Get the physical address of the ION BUF */
5160 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5161 if (ret) {
5162 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5163 ret);
5164 return ret;
5165 }
5166 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5167 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5168 len, load_img_req.mdt_len,
5169 load_img_req.img_len);
5170 return ret;
5171 }
5172 /* Populate the structure for sending scm call to load image */
5173 if (qseecom.qsee_version < QSEE_VERSION_40) {
5174 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5175 load_req.mdt_len = load_img_req.mdt_len;
5176 load_req.img_len = load_img_req.img_len;
5177 load_req.phy_addr = (uint32_t)pa;
5178 cmd_buf = (void *)&load_req;
5179 cmd_len = sizeof(struct qseecom_load_app_ireq);
5180 } else {
5181 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5182 load_req_64bit.mdt_len = load_img_req.mdt_len;
5183 load_req_64bit.img_len = load_img_req.img_len;
5184 load_req_64bit.phy_addr = (uint64_t)pa;
5185 cmd_buf = (void *)&load_req_64bit;
5186 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5187 }
5188
5189 if (qseecom.support_bus_scaling) {
5190 mutex_lock(&qsee_bw_mutex);
5191 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5192 mutex_unlock(&qsee_bw_mutex);
5193 if (ret) {
5194 ret = -EIO;
5195 goto exit_cpu_restore;
5196 }
5197 }
5198
5199 /* Vote for the SFPB clock */
5200 ret = __qseecom_enable_clk_scale_up(data);
5201 if (ret) {
5202 ret = -EIO;
5203 goto exit_register_bus_bandwidth_needs;
5204 }
5205 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5206 ION_IOC_CLEAN_INV_CACHES);
5207 if (ret) {
5208 pr_err("cache operation failed %d\n", ret);
5209 goto exit_disable_clock;
5210 }
5211 /* SCM_CALL to load the external elf */
5212 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5213 &resp, sizeof(resp));
5214 if (ret) {
5215 pr_err("scm_call to load failed : ret %d\n",
5216 ret);
5217 ret = -EFAULT;
5218 goto exit_disable_clock;
5219 }
5220
5221 switch (resp.result) {
5222 case QSEOS_RESULT_SUCCESS:
5223 break;
5224 case QSEOS_RESULT_INCOMPLETE:
5225 pr_err("%s: qseos result incomplete\n", __func__);
5226 ret = __qseecom_process_incomplete_cmd(data, &resp);
5227 if (ret)
5228 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5229 break;
5230 case QSEOS_RESULT_FAILURE:
5231 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5232 ret = -EFAULT;
5233 break;
5234 default:
5235 pr_err("scm_call response result %d not supported\n",
5236 resp.result);
5237 ret = -EFAULT;
5238 break;
5239 }
5240
5241exit_disable_clock:
5242 __qseecom_disable_clk_scale_down(data);
5243
5244exit_register_bus_bandwidth_needs:
5245 if (qseecom.support_bus_scaling) {
5246 mutex_lock(&qsee_bw_mutex);
5247 uret = qseecom_unregister_bus_bandwidth_needs(data);
5248 mutex_unlock(&qsee_bw_mutex);
5249 if (uret)
5250 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5251 uret, ret);
5252 }
5253
5254exit_cpu_restore:
5255 /* Deallocate the handle */
5256 if (!IS_ERR_OR_NULL(ihandle))
5257 ion_free(qseecom.ion_clnt, ihandle);
5258 return ret;
5259}
5260
5261static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5262{
5263 int ret = 0;
5264 struct qseecom_command_scm_resp resp;
5265 struct qseecom_unload_app_ireq req;
5266
5267 /* unavailable client app */
5268 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5269
5270 /* Populate the structure for sending scm call to unload image */
5271 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5272
5273 /* SCM_CALL to unload the external elf */
5274 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5275 sizeof(struct qseecom_unload_app_ireq),
5276 &resp, sizeof(resp));
5277 if (ret) {
5278 pr_err("scm_call to unload failed : ret %d\n",
5279 ret);
5280 ret = -EFAULT;
5281 goto qseecom_unload_external_elf_scm_err;
5282 }
5283 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5284 ret = __qseecom_process_incomplete_cmd(data, &resp);
5285 if (ret)
5286 pr_err("process_incomplete_cmd fail err: %d\n",
5287 ret);
5288 } else {
5289 if (resp.result != QSEOS_RESULT_SUCCESS) {
5290 pr_err("scm_call to unload image failed resp.result =%d\n",
5291 resp.result);
5292 ret = -EFAULT;
5293 }
5294 }
5295
5296qseecom_unload_external_elf_scm_err:
5297
5298 return ret;
5299}
5300
5301static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5302 void __user *argp)
5303{
5304
5305 int32_t ret;
5306 struct qseecom_qseos_app_load_query query_req;
5307 struct qseecom_check_app_ireq req;
5308 struct qseecom_registered_app_list *entry = NULL;
5309 unsigned long flags = 0;
5310 uint32_t app_arch = 0, app_id = 0;
5311 bool found_app = false;
5312
5313 /* Copy the relevant information needed for loading the image */
5314 if (copy_from_user(&query_req,
5315 (void __user *)argp,
5316 sizeof(struct qseecom_qseos_app_load_query))) {
5317 pr_err("copy_from_user failed\n");
5318 return -EFAULT;
5319 }
5320
5321 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5322 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5323 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5324
5325 ret = __qseecom_check_app_exists(req, &app_id);
5326 if (ret) {
5327 pr_err(" scm call to check if app is loaded failed");
5328 return ret; /* scm call failed */
5329 }
5330 if (app_id) {
5331 pr_debug("App id %d (%s) already exists\n", app_id,
5332 (char *)(req.app_name));
5333 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5334 list_for_each_entry(entry,
5335 &qseecom.registered_app_list_head, list){
5336 if (entry->app_id == app_id) {
5337 app_arch = entry->app_arch;
5338 entry->ref_cnt++;
5339 found_app = true;
5340 break;
5341 }
5342 }
5343 spin_unlock_irqrestore(
5344 &qseecom.registered_app_list_lock, flags);
5345 data->client.app_id = app_id;
5346 query_req.app_id = app_id;
5347 if (app_arch) {
5348 data->client.app_arch = app_arch;
5349 query_req.app_arch = app_arch;
5350 } else {
5351 data->client.app_arch = 0;
5352 query_req.app_arch = 0;
5353 }
5354 strlcpy(data->client.app_name, query_req.app_name,
5355 MAX_APP_NAME_SIZE);
5356 /*
5357 * If app was loaded by appsbl before and was not registered,
5358 * regiser this app now.
5359 */
5360 if (!found_app) {
5361 pr_debug("Register app %d [%s] which was loaded before\n",
5362 ret, (char *)query_req.app_name);
5363 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5364 if (!entry) {
5365 pr_err("kmalloc for app entry failed\n");
5366 return -ENOMEM;
5367 }
5368 entry->app_id = app_id;
5369 entry->ref_cnt = 1;
5370 entry->app_arch = data->client.app_arch;
5371 strlcpy(entry->app_name, data->client.app_name,
5372 MAX_APP_NAME_SIZE);
5373 entry->app_blocked = false;
5374 entry->blocked_on_listener_id = 0;
5375 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5376 flags);
5377 list_add_tail(&entry->list,
5378 &qseecom.registered_app_list_head);
5379 spin_unlock_irqrestore(
5380 &qseecom.registered_app_list_lock, flags);
5381 }
5382 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5383 pr_err("copy_to_user failed\n");
5384 return -EFAULT;
5385 }
5386 return -EEXIST; /* app already loaded */
5387 } else {
5388 return 0; /* app not loaded */
5389 }
5390}
5391
5392static int __qseecom_get_ce_pipe_info(
5393 enum qseecom_key_management_usage_type usage,
5394 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5395{
5396 int ret = -EINVAL;
5397 int i, j;
5398 struct qseecom_ce_info_use *p = NULL;
5399 int total = 0;
5400 struct qseecom_ce_pipe_entry *pcepipe;
5401
5402 switch (usage) {
5403 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5404 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5405 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5406 if (qseecom.support_fde) {
5407 p = qseecom.ce_info.fde;
5408 total = qseecom.ce_info.num_fde;
5409 } else {
5410 pr_err("system does not support fde\n");
5411 return -EINVAL;
5412 }
5413 break;
5414 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5415 if (qseecom.support_pfe) {
5416 p = qseecom.ce_info.pfe;
5417 total = qseecom.ce_info.num_pfe;
5418 } else {
5419 pr_err("system does not support pfe\n");
5420 return -EINVAL;
5421 }
5422 break;
5423 default:
5424 pr_err("unsupported usage %d\n", usage);
5425 return -EINVAL;
5426 }
5427
5428 for (j = 0; j < total; j++) {
5429 if (p->unit_num == unit) {
5430 pcepipe = p->ce_pipe_entry;
5431 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5432 (*ce_hw)[i] = pcepipe->ce_num;
5433 *pipe = pcepipe->ce_pipe_pair;
5434 pcepipe++;
5435 }
5436 ret = 0;
5437 break;
5438 }
5439 p++;
5440 }
5441 return ret;
5442}
5443
5444static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5445 enum qseecom_key_management_usage_type usage,
5446 struct qseecom_key_generate_ireq *ireq)
5447{
5448 struct qseecom_command_scm_resp resp;
5449 int ret;
5450
5451 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5452 usage >= QSEOS_KM_USAGE_MAX) {
5453 pr_err("Error:: unsupported usage %d\n", usage);
5454 return -EFAULT;
5455 }
5456 ret = __qseecom_enable_clk(CLK_QSEE);
5457 if (ret)
5458 return ret;
5459
5460 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5461 ireq, sizeof(struct qseecom_key_generate_ireq),
5462 &resp, sizeof(resp));
5463 if (ret) {
5464 if (ret == -EINVAL &&
5465 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5466 pr_debug("Key ID exists.\n");
5467 ret = 0;
5468 } else {
5469 pr_err("scm call to generate key failed : %d\n", ret);
5470 ret = -EFAULT;
5471 }
5472 goto generate_key_exit;
5473 }
5474
5475 switch (resp.result) {
5476 case QSEOS_RESULT_SUCCESS:
5477 break;
5478 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5479 pr_debug("Key ID exists.\n");
5480 break;
5481 case QSEOS_RESULT_INCOMPLETE:
5482 ret = __qseecom_process_incomplete_cmd(data, &resp);
5483 if (ret) {
5484 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5485 pr_debug("Key ID exists.\n");
5486 ret = 0;
5487 } else {
5488 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5489 resp.result);
5490 }
5491 }
5492 break;
5493 case QSEOS_RESULT_FAILURE:
5494 default:
5495 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5496 ret = -EINVAL;
5497 break;
5498 }
5499generate_key_exit:
5500 __qseecom_disable_clk(CLK_QSEE);
5501 return ret;
5502}
5503
5504static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5505 enum qseecom_key_management_usage_type usage,
5506 struct qseecom_key_delete_ireq *ireq)
5507{
5508 struct qseecom_command_scm_resp resp;
5509 int ret;
5510
5511 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5512 usage >= QSEOS_KM_USAGE_MAX) {
5513 pr_err("Error:: unsupported usage %d\n", usage);
5514 return -EFAULT;
5515 }
5516 ret = __qseecom_enable_clk(CLK_QSEE);
5517 if (ret)
5518 return ret;
5519
5520 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5521 ireq, sizeof(struct qseecom_key_delete_ireq),
5522 &resp, sizeof(struct qseecom_command_scm_resp));
5523 if (ret) {
5524 if (ret == -EINVAL &&
5525 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5526 pr_debug("Max attempts to input password reached.\n");
5527 ret = -ERANGE;
5528 } else {
5529 pr_err("scm call to delete key failed : %d\n", ret);
5530 ret = -EFAULT;
5531 }
5532 goto del_key_exit;
5533 }
5534
5535 switch (resp.result) {
5536 case QSEOS_RESULT_SUCCESS:
5537 break;
5538 case QSEOS_RESULT_INCOMPLETE:
5539 ret = __qseecom_process_incomplete_cmd(data, &resp);
5540 if (ret) {
5541 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5542 resp.result);
5543 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5544 pr_debug("Max attempts to input password reached.\n");
5545 ret = -ERANGE;
5546 }
5547 }
5548 break;
5549 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5550 pr_debug("Max attempts to input password reached.\n");
5551 ret = -ERANGE;
5552 break;
5553 case QSEOS_RESULT_FAILURE:
5554 default:
5555 pr_err("Delete key scm call failed resp.result %d\n",
5556 resp.result);
5557 ret = -EINVAL;
5558 break;
5559 }
5560del_key_exit:
5561 __qseecom_disable_clk(CLK_QSEE);
5562 return ret;
5563}
5564
5565static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5566 enum qseecom_key_management_usage_type usage,
5567 struct qseecom_key_select_ireq *ireq)
5568{
5569 struct qseecom_command_scm_resp resp;
5570 int ret;
5571
5572 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5573 usage >= QSEOS_KM_USAGE_MAX) {
5574 pr_err("Error:: unsupported usage %d\n", usage);
5575 return -EFAULT;
5576 }
5577 ret = __qseecom_enable_clk(CLK_QSEE);
5578 if (ret)
5579 return ret;
5580
5581 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5582 ret = __qseecom_enable_clk(CLK_CE_DRV);
5583 if (ret)
5584 return ret;
5585 }
5586
5587 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5588 ireq, sizeof(struct qseecom_key_select_ireq),
5589 &resp, sizeof(struct qseecom_command_scm_resp));
5590 if (ret) {
5591 if (ret == -EINVAL &&
5592 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5593 pr_debug("Max attempts to input password reached.\n");
5594 ret = -ERANGE;
5595 } else if (ret == -EINVAL &&
5596 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5597 pr_debug("Set Key operation under processing...\n");
5598 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5599 } else {
5600 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5601 ret);
5602 ret = -EFAULT;
5603 }
5604 goto set_key_exit;
5605 }
5606
5607 switch (resp.result) {
5608 case QSEOS_RESULT_SUCCESS:
5609 break;
5610 case QSEOS_RESULT_INCOMPLETE:
5611 ret = __qseecom_process_incomplete_cmd(data, &resp);
5612 if (ret) {
5613 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5614 resp.result);
5615 if (resp.result ==
5616 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5617 pr_debug("Set Key operation under processing...\n");
5618 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5619 }
5620 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5621 pr_debug("Max attempts to input password reached.\n");
5622 ret = -ERANGE;
5623 }
5624 }
5625 break;
5626 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5627 pr_debug("Max attempts to input password reached.\n");
5628 ret = -ERANGE;
5629 break;
5630 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5631 pr_debug("Set Key operation under processing...\n");
5632 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5633 break;
5634 case QSEOS_RESULT_FAILURE:
5635 default:
5636 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5637 ret = -EINVAL;
5638 break;
5639 }
5640set_key_exit:
5641 __qseecom_disable_clk(CLK_QSEE);
5642 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5643 __qseecom_disable_clk(CLK_CE_DRV);
5644 return ret;
5645}
5646
5647static int __qseecom_update_current_key_user_info(
5648 struct qseecom_dev_handle *data,
5649 enum qseecom_key_management_usage_type usage,
5650 struct qseecom_key_userinfo_update_ireq *ireq)
5651{
5652 struct qseecom_command_scm_resp resp;
5653 int ret;
5654
5655 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5656 usage >= QSEOS_KM_USAGE_MAX) {
5657 pr_err("Error:: unsupported usage %d\n", usage);
5658 return -EFAULT;
5659 }
5660 ret = __qseecom_enable_clk(CLK_QSEE);
5661 if (ret)
5662 return ret;
5663
5664 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5665 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5666 &resp, sizeof(struct qseecom_command_scm_resp));
5667 if (ret) {
5668 if (ret == -EINVAL &&
5669 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5670 pr_debug("Set Key operation under processing...\n");
5671 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5672 } else {
5673 pr_err("scm call to update key userinfo failed: %d\n",
5674 ret);
5675 __qseecom_disable_clk(CLK_QSEE);
5676 return -EFAULT;
5677 }
5678 }
5679
5680 switch (resp.result) {
5681 case QSEOS_RESULT_SUCCESS:
5682 break;
5683 case QSEOS_RESULT_INCOMPLETE:
5684 ret = __qseecom_process_incomplete_cmd(data, &resp);
5685 if (resp.result ==
5686 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5687 pr_debug("Set Key operation under processing...\n");
5688 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5689 }
5690 if (ret)
5691 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5692 resp.result);
5693 break;
5694 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5695 pr_debug("Update Key operation under processing...\n");
5696 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5697 break;
5698 case QSEOS_RESULT_FAILURE:
5699 default:
5700 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5701 ret = -EINVAL;
5702 break;
5703 }
5704
5705 __qseecom_disable_clk(CLK_QSEE);
5706 return ret;
5707}
5708
5709
5710static int qseecom_enable_ice_setup(int usage)
5711{
5712 int ret = 0;
5713
5714 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5715 ret = qcom_ice_setup_ice_hw("ufs", true);
5716 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5717 ret = qcom_ice_setup_ice_hw("sdcc", true);
5718
5719 return ret;
5720}
5721
5722static int qseecom_disable_ice_setup(int usage)
5723{
5724 int ret = 0;
5725
5726 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5727 ret = qcom_ice_setup_ice_hw("ufs", false);
5728 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5729 ret = qcom_ice_setup_ice_hw("sdcc", false);
5730
5731 return ret;
5732}
5733
5734static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5735{
5736 struct qseecom_ce_info_use *pce_info_use, *p;
5737 int total = 0;
5738 int i;
5739
5740 switch (usage) {
5741 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5742 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5743 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5744 p = qseecom.ce_info.fde;
5745 total = qseecom.ce_info.num_fde;
5746 break;
5747 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5748 p = qseecom.ce_info.pfe;
5749 total = qseecom.ce_info.num_pfe;
5750 break;
5751 default:
5752 pr_err("unsupported usage %d\n", usage);
5753 return -EINVAL;
5754 }
5755
5756 pce_info_use = NULL;
5757
5758 for (i = 0; i < total; i++) {
5759 if (p->unit_num == unit) {
5760 pce_info_use = p;
5761 break;
5762 }
5763 p++;
5764 }
5765 if (!pce_info_use) {
5766 pr_err("can not find %d\n", unit);
5767 return -EINVAL;
5768 }
5769 return pce_info_use->num_ce_pipe_entries;
5770}
5771
5772static int qseecom_create_key(struct qseecom_dev_handle *data,
5773 void __user *argp)
5774{
5775 int i;
5776 uint32_t *ce_hw = NULL;
5777 uint32_t pipe = 0;
5778 int ret = 0;
5779 uint32_t flags = 0;
5780 struct qseecom_create_key_req create_key_req;
5781 struct qseecom_key_generate_ireq generate_key_ireq;
5782 struct qseecom_key_select_ireq set_key_ireq;
5783 uint32_t entries = 0;
5784
5785 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5786 if (ret) {
5787 pr_err("copy_from_user failed\n");
5788 return ret;
5789 }
5790
5791 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5792 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5793 pr_err("unsupported usage %d\n", create_key_req.usage);
5794 ret = -EFAULT;
5795 return ret;
5796 }
5797 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5798 create_key_req.usage);
5799 if (entries <= 0) {
5800 pr_err("no ce instance for usage %d instance %d\n",
5801 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5802 ret = -EINVAL;
5803 return ret;
5804 }
5805
5806 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5807 if (!ce_hw) {
5808 ret = -ENOMEM;
5809 return ret;
5810 }
5811 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5812 DEFAULT_CE_INFO_UNIT);
5813 if (ret) {
5814 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5815 ret = -EINVAL;
5816 goto free_buf;
5817 }
5818
5819 if (qseecom.fde_key_size)
5820 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5821 else
5822 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5823
5824 generate_key_ireq.flags = flags;
5825 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5826 memset((void *)generate_key_ireq.key_id,
5827 0, QSEECOM_KEY_ID_SIZE);
5828 memset((void *)generate_key_ireq.hash32,
5829 0, QSEECOM_HASH_SIZE);
5830 memcpy((void *)generate_key_ireq.key_id,
5831 (void *)key_id_array[create_key_req.usage].desc,
5832 QSEECOM_KEY_ID_SIZE);
5833 memcpy((void *)generate_key_ireq.hash32,
5834 (void *)create_key_req.hash32,
5835 QSEECOM_HASH_SIZE);
5836
5837 ret = __qseecom_generate_and_save_key(data,
5838 create_key_req.usage, &generate_key_ireq);
5839 if (ret) {
5840 pr_err("Failed to generate key on storage: %d\n", ret);
5841 goto free_buf;
5842 }
5843
5844 for (i = 0; i < entries; i++) {
5845 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5846 if (create_key_req.usage ==
5847 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5848 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5849 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5850
5851 } else if (create_key_req.usage ==
5852 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5853 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5854 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5855
5856 } else {
5857 set_key_ireq.ce = ce_hw[i];
5858 set_key_ireq.pipe = pipe;
5859 }
5860 set_key_ireq.flags = flags;
5861
5862 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5863 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5864 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5865 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5866 memcpy((void *)set_key_ireq.key_id,
5867 (void *)key_id_array[create_key_req.usage].desc,
5868 QSEECOM_KEY_ID_SIZE);
5869 memcpy((void *)set_key_ireq.hash32,
5870 (void *)create_key_req.hash32,
5871 QSEECOM_HASH_SIZE);
5872 /*
5873 * It will return false if it is GPCE based crypto instance or
5874 * ICE is setup properly
5875 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005876 ret = qseecom_enable_ice_setup(create_key_req.usage);
5877 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005878 goto free_buf;
5879
5880 do {
5881 ret = __qseecom_set_clear_ce_key(data,
5882 create_key_req.usage,
5883 &set_key_ireq);
5884 /*
5885 * wait a little before calling scm again to let other
5886 * processes run
5887 */
5888 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5889 msleep(50);
5890
5891 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5892
5893 qseecom_disable_ice_setup(create_key_req.usage);
5894
5895 if (ret) {
5896 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5897 pipe, ce_hw[i], ret);
5898 goto free_buf;
5899 } else {
5900 pr_err("Set the key successfully\n");
5901 if ((create_key_req.usage ==
5902 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5903 (create_key_req.usage ==
5904 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5905 goto free_buf;
5906 }
5907 }
5908
5909free_buf:
5910 kzfree(ce_hw);
5911 return ret;
5912}
5913
5914static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5915 void __user *argp)
5916{
5917 uint32_t *ce_hw = NULL;
5918 uint32_t pipe = 0;
5919 int ret = 0;
5920 uint32_t flags = 0;
5921 int i, j;
5922 struct qseecom_wipe_key_req wipe_key_req;
5923 struct qseecom_key_delete_ireq delete_key_ireq;
5924 struct qseecom_key_select_ireq clear_key_ireq;
5925 uint32_t entries = 0;
5926
5927 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
5928 if (ret) {
5929 pr_err("copy_from_user failed\n");
5930 return ret;
5931 }
5932
5933 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5934 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5935 pr_err("unsupported usage %d\n", wipe_key_req.usage);
5936 ret = -EFAULT;
5937 return ret;
5938 }
5939
5940 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5941 wipe_key_req.usage);
5942 if (entries <= 0) {
5943 pr_err("no ce instance for usage %d instance %d\n",
5944 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
5945 ret = -EINVAL;
5946 return ret;
5947 }
5948
5949 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5950 if (!ce_hw) {
5951 ret = -ENOMEM;
5952 return ret;
5953 }
5954
5955 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
5956 DEFAULT_CE_INFO_UNIT);
5957 if (ret) {
5958 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5959 ret = -EINVAL;
5960 goto free_buf;
5961 }
5962
5963 if (wipe_key_req.wipe_key_flag) {
5964 delete_key_ireq.flags = flags;
5965 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
5966 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5967 memcpy((void *)delete_key_ireq.key_id,
5968 (void *)key_id_array[wipe_key_req.usage].desc,
5969 QSEECOM_KEY_ID_SIZE);
5970 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5971
5972 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
5973 &delete_key_ireq);
5974 if (ret) {
5975 pr_err("Failed to delete key from ssd storage: %d\n",
5976 ret);
5977 ret = -EFAULT;
5978 goto free_buf;
5979 }
5980 }
5981
5982 for (j = 0; j < entries; j++) {
5983 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5984 if (wipe_key_req.usage ==
5985 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5986 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5987 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5988 } else if (wipe_key_req.usage ==
5989 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5990 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5991 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5992 } else {
5993 clear_key_ireq.ce = ce_hw[j];
5994 clear_key_ireq.pipe = pipe;
5995 }
5996 clear_key_ireq.flags = flags;
5997 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5998 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
5999 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6000 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6001
6002 /*
6003 * It will return false if it is GPCE based crypto instance or
6004 * ICE is setup properly
6005 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006006 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6007 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006008 goto free_buf;
6009
6010 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6011 &clear_key_ireq);
6012
6013 qseecom_disable_ice_setup(wipe_key_req.usage);
6014
6015 if (ret) {
6016 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6017 pipe, ce_hw[j], ret);
6018 ret = -EFAULT;
6019 goto free_buf;
6020 }
6021 }
6022
6023free_buf:
6024 kzfree(ce_hw);
6025 return ret;
6026}
6027
6028static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6029 void __user *argp)
6030{
6031 int ret = 0;
6032 uint32_t flags = 0;
6033 struct qseecom_update_key_userinfo_req update_key_req;
6034 struct qseecom_key_userinfo_update_ireq ireq;
6035
6036 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6037 if (ret) {
6038 pr_err("copy_from_user failed\n");
6039 return ret;
6040 }
6041
6042 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6043 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6044 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6045 return -EFAULT;
6046 }
6047
6048 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6049
6050 if (qseecom.fde_key_size)
6051 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6052 else
6053 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6054
6055 ireq.flags = flags;
6056 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6057 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6058 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6059 memcpy((void *)ireq.key_id,
6060 (void *)key_id_array[update_key_req.usage].desc,
6061 QSEECOM_KEY_ID_SIZE);
6062 memcpy((void *)ireq.current_hash32,
6063 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6064 memcpy((void *)ireq.new_hash32,
6065 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6066
6067 do {
6068 ret = __qseecom_update_current_key_user_info(data,
6069 update_key_req.usage,
6070 &ireq);
6071 /*
6072 * wait a little before calling scm again to let other
6073 * processes run
6074 */
6075 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6076 msleep(50);
6077
6078 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6079 if (ret) {
6080 pr_err("Failed to update key info: %d\n", ret);
6081 return ret;
6082 }
6083 return ret;
6084
6085}
6086static int qseecom_is_es_activated(void __user *argp)
6087{
6088 struct qseecom_is_es_activated_req req;
6089 struct qseecom_command_scm_resp resp;
6090 int ret;
6091
6092 if (qseecom.qsee_version < QSEE_VERSION_04) {
6093 pr_err("invalid qsee version\n");
6094 return -ENODEV;
6095 }
6096
6097 if (argp == NULL) {
6098 pr_err("arg is null\n");
6099 return -EINVAL;
6100 }
6101
6102 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6103 &req, sizeof(req), &resp, sizeof(resp));
6104 if (ret) {
6105 pr_err("scm_call failed\n");
6106 return ret;
6107 }
6108
6109 req.is_activated = resp.result;
6110 ret = copy_to_user(argp, &req, sizeof(req));
6111 if (ret) {
6112 pr_err("copy_to_user failed\n");
6113 return ret;
6114 }
6115
6116 return 0;
6117}
6118
6119static int qseecom_save_partition_hash(void __user *argp)
6120{
6121 struct qseecom_save_partition_hash_req req;
6122 struct qseecom_command_scm_resp resp;
6123 int ret;
6124
6125 memset(&resp, 0x00, sizeof(resp));
6126
6127 if (qseecom.qsee_version < QSEE_VERSION_04) {
6128 pr_err("invalid qsee version\n");
6129 return -ENODEV;
6130 }
6131
6132 if (argp == NULL) {
6133 pr_err("arg is null\n");
6134 return -EINVAL;
6135 }
6136
6137 ret = copy_from_user(&req, argp, sizeof(req));
6138 if (ret) {
6139 pr_err("copy_from_user failed\n");
6140 return ret;
6141 }
6142
6143 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6144 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6145 if (ret) {
6146 pr_err("qseecom_scm_call failed\n");
6147 return ret;
6148 }
6149
6150 return 0;
6151}
6152
6153static int qseecom_mdtp_cipher_dip(void __user *argp)
6154{
6155 struct qseecom_mdtp_cipher_dip_req req;
6156 u32 tzbuflenin, tzbuflenout;
6157 char *tzbufin = NULL, *tzbufout = NULL;
6158 struct scm_desc desc = {0};
6159 int ret;
6160
6161 do {
6162 /* Copy the parameters from userspace */
6163 if (argp == NULL) {
6164 pr_err("arg is null\n");
6165 ret = -EINVAL;
6166 break;
6167 }
6168
6169 ret = copy_from_user(&req, argp, sizeof(req));
6170 if (ret) {
6171 pr_err("copy_from_user failed, ret= %d\n", ret);
6172 break;
6173 }
6174
6175 if (req.in_buf == NULL || req.out_buf == NULL ||
6176 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6177 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6178 req.direction > 1) {
6179 pr_err("invalid parameters\n");
6180 ret = -EINVAL;
6181 break;
6182 }
6183
6184 /* Copy the input buffer from userspace to kernel space */
6185 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6186 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6187 if (!tzbufin) {
6188 pr_err("error allocating in buffer\n");
6189 ret = -ENOMEM;
6190 break;
6191 }
6192
6193 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6194 if (ret) {
6195 pr_err("copy_from_user failed, ret=%d\n", ret);
6196 break;
6197 }
6198
6199 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6200
6201 /* Prepare the output buffer in kernel space */
6202 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6203 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6204 if (!tzbufout) {
6205 pr_err("error allocating out buffer\n");
6206 ret = -ENOMEM;
6207 break;
6208 }
6209
6210 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6211
6212 /* Send the command to TZ */
6213 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6214 desc.args[0] = virt_to_phys(tzbufin);
6215 desc.args[1] = req.in_buf_size;
6216 desc.args[2] = virt_to_phys(tzbufout);
6217 desc.args[3] = req.out_buf_size;
6218 desc.args[4] = req.direction;
6219
6220 ret = __qseecom_enable_clk(CLK_QSEE);
6221 if (ret)
6222 break;
6223
6224 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6225
6226 __qseecom_disable_clk(CLK_QSEE);
6227
6228 if (ret) {
6229 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6230 ret);
6231 break;
6232 }
6233
6234 /* Copy the output buffer from kernel space to userspace */
6235 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6236 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6237 if (ret) {
6238 pr_err("copy_to_user failed, ret=%d\n", ret);
6239 break;
6240 }
6241 } while (0);
6242
6243 kzfree(tzbufin);
6244 kzfree(tzbufout);
6245
6246 return ret;
6247}
6248
6249static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6250 struct qseecom_qteec_req *req)
6251{
6252 if (!data || !data->client.ihandle) {
6253 pr_err("Client or client handle is not initialized\n");
6254 return -EINVAL;
6255 }
6256
6257 if (data->type != QSEECOM_CLIENT_APP)
6258 return -EFAULT;
6259
6260 if (req->req_len > UINT_MAX - req->resp_len) {
6261 pr_err("Integer overflow detected in req_len & rsp_len\n");
6262 return -EINVAL;
6263 }
6264
6265 if (req->req_len + req->resp_len > data->client.sb_length) {
6266 pr_debug("Not enough memory to fit cmd_buf.\n");
6267 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6268 (req->req_len + req->resp_len), data->client.sb_length);
6269 return -ENOMEM;
6270 }
6271
6272 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6273 pr_err("cmd buffer or response buffer is null\n");
6274 return -EINVAL;
6275 }
6276 if (((uintptr_t)req->req_ptr <
6277 data->client.user_virt_sb_base) ||
6278 ((uintptr_t)req->req_ptr >=
6279 (data->client.user_virt_sb_base + data->client.sb_length))) {
6280 pr_err("cmd buffer address not within shared bufffer\n");
6281 return -EINVAL;
6282 }
6283
6284 if (((uintptr_t)req->resp_ptr <
6285 data->client.user_virt_sb_base) ||
6286 ((uintptr_t)req->resp_ptr >=
6287 (data->client.user_virt_sb_base + data->client.sb_length))) {
6288 pr_err("response buffer address not within shared bufffer\n");
6289 return -EINVAL;
6290 }
6291
6292 if ((req->req_len == 0) || (req->resp_len == 0)) {
6293 pr_err("cmd buf lengtgh/response buf length not valid\n");
6294 return -EINVAL;
6295 }
6296
6297 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6298 pr_err("Integer overflow in req_len & req_ptr\n");
6299 return -EINVAL;
6300 }
6301
6302 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6303 pr_err("Integer overflow in resp_len & resp_ptr\n");
6304 return -EINVAL;
6305 }
6306
6307 if (data->client.user_virt_sb_base >
6308 (ULONG_MAX - data->client.sb_length)) {
6309 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6310 return -EINVAL;
6311 }
6312 if ((((uintptr_t)req->req_ptr + req->req_len) >
6313 ((uintptr_t)data->client.user_virt_sb_base +
6314 data->client.sb_length)) ||
6315 (((uintptr_t)req->resp_ptr + req->resp_len) >
6316 ((uintptr_t)data->client.user_virt_sb_base +
6317 data->client.sb_length))) {
6318 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6319 return -EINVAL;
6320 }
6321 return 0;
6322}
6323
6324static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6325 uint32_t fd_idx, struct sg_table *sg_ptr)
6326{
6327 struct scatterlist *sg = sg_ptr->sgl;
6328 struct qseecom_sg_entry *sg_entry;
6329 void *buf;
6330 uint i;
6331 size_t size;
6332 dma_addr_t coh_pmem;
6333
6334 if (fd_idx >= MAX_ION_FD) {
6335 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6336 return -ENOMEM;
6337 }
6338 /*
6339 * Allocate a buffer, populate it with number of entry plus
6340 * each sg entry's phy addr and length; then return the
6341 * phy_addr of the buffer.
6342 */
6343 size = sizeof(uint32_t) +
6344 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6345 size = (size + PAGE_SIZE) & PAGE_MASK;
6346 buf = dma_alloc_coherent(qseecom.pdev,
6347 size, &coh_pmem, GFP_KERNEL);
6348 if (buf == NULL) {
6349 pr_err("failed to alloc memory for sg buf\n");
6350 return -ENOMEM;
6351 }
6352 *(uint32_t *)buf = sg_ptr->nents;
6353 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6354 for (i = 0; i < sg_ptr->nents; i++) {
6355 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6356 sg_entry->len = sg->length;
6357 sg_entry++;
6358 sg = sg_next(sg);
6359 }
6360 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6361 data->client.sec_buf_fd[fd_idx].vbase = buf;
6362 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6363 data->client.sec_buf_fd[fd_idx].size = size;
6364 return 0;
6365}
6366
6367static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6368 struct qseecom_dev_handle *data, bool cleanup)
6369{
6370 struct ion_handle *ihandle;
6371 int ret = 0;
6372 int i = 0;
6373 uint32_t *update;
6374 struct sg_table *sg_ptr = NULL;
6375 struct scatterlist *sg;
6376 struct qseecom_param_memref *memref;
6377
6378 if (req == NULL) {
6379 pr_err("Invalid address\n");
6380 return -EINVAL;
6381 }
6382 for (i = 0; i < MAX_ION_FD; i++) {
6383 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006384 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006385 req->ifd_data[i].fd);
6386 if (IS_ERR_OR_NULL(ihandle)) {
6387 pr_err("Ion client can't retrieve the handle\n");
6388 return -ENOMEM;
6389 }
6390 if ((req->req_len < sizeof(uint32_t)) ||
6391 (req->ifd_data[i].cmd_buf_offset >
6392 req->req_len - sizeof(uint32_t))) {
6393 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6394 req->req_len,
6395 req->ifd_data[i].cmd_buf_offset);
6396 return -EINVAL;
6397 }
6398 update = (uint32_t *)((char *) req->req_ptr +
6399 req->ifd_data[i].cmd_buf_offset);
6400 if (!update) {
6401 pr_err("update pointer is NULL\n");
6402 return -EINVAL;
6403 }
6404 } else {
6405 continue;
6406 }
6407 /* Populate the cmd data structure with the phys_addr */
6408 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6409 if (IS_ERR_OR_NULL(sg_ptr)) {
6410 pr_err("IOn client could not retrieve sg table\n");
6411 goto err;
6412 }
6413 sg = sg_ptr->sgl;
6414 if (sg == NULL) {
6415 pr_err("sg is NULL\n");
6416 goto err;
6417 }
6418 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6419 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6420 sg_ptr->nents, sg->length);
6421 goto err;
6422 }
6423 /* clean up buf for pre-allocated fd */
6424 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6425 (*update)) {
6426 if (data->client.sec_buf_fd[i].vbase)
6427 dma_free_coherent(qseecom.pdev,
6428 data->client.sec_buf_fd[i].size,
6429 data->client.sec_buf_fd[i].vbase,
6430 data->client.sec_buf_fd[i].pbase);
6431 memset((void *)update, 0,
6432 sizeof(struct qseecom_param_memref));
6433 memset(&(data->client.sec_buf_fd[i]), 0,
6434 sizeof(struct qseecom_sec_buf_fd_info));
6435 goto clean;
6436 }
6437
6438 if (*update == 0) {
6439 /* update buf for pre-allocated fd from secure heap*/
6440 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6441 sg_ptr);
6442 if (ret) {
6443 pr_err("Failed to handle buf for fd[%d]\n", i);
6444 goto err;
6445 }
6446 memref = (struct qseecom_param_memref *)update;
6447 memref->buffer =
6448 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6449 memref->size =
6450 (uint32_t)(data->client.sec_buf_fd[i].size);
6451 } else {
6452 /* update buf for fd from non-secure qseecom heap */
6453 if (sg_ptr->nents != 1) {
6454 pr_err("Num of scat entr (%d) invalid\n",
6455 sg_ptr->nents);
6456 goto err;
6457 }
6458 if (cleanup)
6459 *update = 0;
6460 else
6461 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6462 }
6463clean:
6464 if (cleanup) {
6465 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6466 ihandle, NULL, sg->length,
6467 ION_IOC_INV_CACHES);
6468 if (ret) {
6469 pr_err("cache operation failed %d\n", ret);
6470 goto err;
6471 }
6472 } else {
6473 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6474 ihandle, NULL, sg->length,
6475 ION_IOC_CLEAN_INV_CACHES);
6476 if (ret) {
6477 pr_err("cache operation failed %d\n", ret);
6478 goto err;
6479 }
6480 data->sglistinfo_ptr[i].indexAndFlags =
6481 SGLISTINFO_SET_INDEX_FLAG(
6482 (sg_ptr->nents == 1), 0,
6483 req->ifd_data[i].cmd_buf_offset);
6484 data->sglistinfo_ptr[i].sizeOrCount =
6485 (sg_ptr->nents == 1) ?
6486 sg->length : sg_ptr->nents;
6487 data->sglist_cnt = i + 1;
6488 }
6489 /* Deallocate the handle */
6490 if (!IS_ERR_OR_NULL(ihandle))
6491 ion_free(qseecom.ion_clnt, ihandle);
6492 }
6493 return ret;
6494err:
6495 if (!IS_ERR_OR_NULL(ihandle))
6496 ion_free(qseecom.ion_clnt, ihandle);
6497 return -ENOMEM;
6498}
6499
6500static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6501 struct qseecom_qteec_req *req, uint32_t cmd_id)
6502{
6503 struct qseecom_command_scm_resp resp;
6504 struct qseecom_qteec_ireq ireq;
6505 struct qseecom_qteec_64bit_ireq ireq_64bit;
6506 struct qseecom_registered_app_list *ptr_app;
6507 bool found_app = false;
6508 unsigned long flags;
6509 int ret = 0;
6510 uint32_t reqd_len_sb_in = 0;
6511 void *cmd_buf = NULL;
6512 size_t cmd_len;
6513 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306514 void *req_ptr = NULL;
6515 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006516
6517 ret = __qseecom_qteec_validate_msg(data, req);
6518 if (ret)
6519 return ret;
6520
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306521 req_ptr = req->req_ptr;
6522 resp_ptr = req->resp_ptr;
6523
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006524 /* find app_id & img_name from list */
6525 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6526 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6527 list) {
6528 if ((ptr_app->app_id == data->client.app_id) &&
6529 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6530 found_app = true;
6531 break;
6532 }
6533 }
6534 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6535 if (!found_app) {
6536 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6537 (char *)data->client.app_name);
6538 return -ENOENT;
6539 }
6540
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306541 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6542 (uintptr_t)req->req_ptr);
6543 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6544 (uintptr_t)req->resp_ptr);
6545
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006546 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6547 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6548 ret = __qseecom_update_qteec_req_buf(
6549 (struct qseecom_qteec_modfd_req *)req, data, false);
6550 if (ret)
6551 return ret;
6552 }
6553
6554 if (qseecom.qsee_version < QSEE_VERSION_40) {
6555 ireq.app_id = data->client.app_id;
6556 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306557 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006558 ireq.req_len = req->req_len;
6559 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306560 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006561 ireq.resp_len = req->resp_len;
6562 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6563 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6564 dmac_flush_range((void *)table,
6565 (void *)table + SGLISTINFO_TABLE_SIZE);
6566 cmd_buf = (void *)&ireq;
6567 cmd_len = sizeof(struct qseecom_qteec_ireq);
6568 } else {
6569 ireq_64bit.app_id = data->client.app_id;
6570 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306571 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006572 ireq_64bit.req_len = req->req_len;
6573 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306574 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006575 ireq_64bit.resp_len = req->resp_len;
6576 if ((data->client.app_arch == ELFCLASS32) &&
6577 ((ireq_64bit.req_ptr >=
6578 PHY_ADDR_4G - ireq_64bit.req_len) ||
6579 (ireq_64bit.resp_ptr >=
6580 PHY_ADDR_4G - ireq_64bit.resp_len))){
6581 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6582 data->client.app_name, data->client.app_id);
6583 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6584 ireq_64bit.req_ptr, ireq_64bit.req_len,
6585 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6586 return -EFAULT;
6587 }
6588 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6589 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6590 dmac_flush_range((void *)table,
6591 (void *)table + SGLISTINFO_TABLE_SIZE);
6592 cmd_buf = (void *)&ireq_64bit;
6593 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6594 }
6595 if (qseecom.whitelist_support == true
6596 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6597 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6598 else
6599 *(uint32_t *)cmd_buf = cmd_id;
6600
6601 reqd_len_sb_in = req->req_len + req->resp_len;
6602 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6603 data->client.sb_virt,
6604 reqd_len_sb_in,
6605 ION_IOC_CLEAN_INV_CACHES);
6606 if (ret) {
6607 pr_err("cache operation failed %d\n", ret);
6608 return ret;
6609 }
6610
6611 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6612
6613 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6614 cmd_buf, cmd_len,
6615 &resp, sizeof(resp));
6616 if (ret) {
6617 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6618 ret, data->client.app_id);
6619 return ret;
6620 }
6621
6622 if (qseecom.qsee_reentrancy_support) {
6623 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6624 } else {
6625 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6626 ret = __qseecom_process_incomplete_cmd(data, &resp);
6627 if (ret) {
6628 pr_err("process_incomplete_cmd failed err: %d\n",
6629 ret);
6630 return ret;
6631 }
6632 } else {
6633 if (resp.result != QSEOS_RESULT_SUCCESS) {
6634 pr_err("Response result %d not supported\n",
6635 resp.result);
6636 ret = -EINVAL;
6637 }
6638 }
6639 }
6640 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6641 data->client.sb_virt, data->client.sb_length,
6642 ION_IOC_INV_CACHES);
6643 if (ret) {
6644 pr_err("cache operation failed %d\n", ret);
6645 return ret;
6646 }
6647
6648 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6649 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6650 ret = __qseecom_update_qteec_req_buf(
6651 (struct qseecom_qteec_modfd_req *)req, data, true);
6652 if (ret)
6653 return ret;
6654 }
6655 return 0;
6656}
6657
6658static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6659 void __user *argp)
6660{
6661 struct qseecom_qteec_modfd_req req;
6662 int ret = 0;
6663
6664 ret = copy_from_user(&req, argp,
6665 sizeof(struct qseecom_qteec_modfd_req));
6666 if (ret) {
6667 pr_err("copy_from_user failed\n");
6668 return ret;
6669 }
6670 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6671 QSEOS_TEE_OPEN_SESSION);
6672
6673 return ret;
6674}
6675
6676static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6677 void __user *argp)
6678{
6679 struct qseecom_qteec_req req;
6680 int ret = 0;
6681
6682 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6683 if (ret) {
6684 pr_err("copy_from_user failed\n");
6685 return ret;
6686 }
6687 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6688 return ret;
6689}
6690
6691static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6692 void __user *argp)
6693{
6694 struct qseecom_qteec_modfd_req req;
6695 struct qseecom_command_scm_resp resp;
6696 struct qseecom_qteec_ireq ireq;
6697 struct qseecom_qteec_64bit_ireq ireq_64bit;
6698 struct qseecom_registered_app_list *ptr_app;
6699 bool found_app = false;
6700 unsigned long flags;
6701 int ret = 0;
6702 int i = 0;
6703 uint32_t reqd_len_sb_in = 0;
6704 void *cmd_buf = NULL;
6705 size_t cmd_len;
6706 struct sglist_info *table = data->sglistinfo_ptr;
6707 void *req_ptr = NULL;
6708 void *resp_ptr = NULL;
6709
6710 ret = copy_from_user(&req, argp,
6711 sizeof(struct qseecom_qteec_modfd_req));
6712 if (ret) {
6713 pr_err("copy_from_user failed\n");
6714 return ret;
6715 }
6716 ret = __qseecom_qteec_validate_msg(data,
6717 (struct qseecom_qteec_req *)(&req));
6718 if (ret)
6719 return ret;
6720 req_ptr = req.req_ptr;
6721 resp_ptr = req.resp_ptr;
6722
6723 /* find app_id & img_name from list */
6724 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6725 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6726 list) {
6727 if ((ptr_app->app_id == data->client.app_id) &&
6728 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6729 found_app = true;
6730 break;
6731 }
6732 }
6733 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6734 if (!found_app) {
6735 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6736 (char *)data->client.app_name);
6737 return -ENOENT;
6738 }
6739
6740 /* validate offsets */
6741 for (i = 0; i < MAX_ION_FD; i++) {
6742 if (req.ifd_data[i].fd) {
6743 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6744 return -EINVAL;
6745 }
6746 }
6747 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6748 (uintptr_t)req.req_ptr);
6749 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6750 (uintptr_t)req.resp_ptr);
6751 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6752 if (ret)
6753 return ret;
6754
6755 if (qseecom.qsee_version < QSEE_VERSION_40) {
6756 ireq.app_id = data->client.app_id;
6757 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6758 (uintptr_t)req_ptr);
6759 ireq.req_len = req.req_len;
6760 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6761 (uintptr_t)resp_ptr);
6762 ireq.resp_len = req.resp_len;
6763 cmd_buf = (void *)&ireq;
6764 cmd_len = sizeof(struct qseecom_qteec_ireq);
6765 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6766 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6767 dmac_flush_range((void *)table,
6768 (void *)table + SGLISTINFO_TABLE_SIZE);
6769 } else {
6770 ireq_64bit.app_id = data->client.app_id;
6771 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6772 (uintptr_t)req_ptr);
6773 ireq_64bit.req_len = req.req_len;
6774 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6775 (uintptr_t)resp_ptr);
6776 ireq_64bit.resp_len = req.resp_len;
6777 cmd_buf = (void *)&ireq_64bit;
6778 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6779 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6780 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6781 dmac_flush_range((void *)table,
6782 (void *)table + SGLISTINFO_TABLE_SIZE);
6783 }
6784 reqd_len_sb_in = req.req_len + req.resp_len;
6785 if (qseecom.whitelist_support == true)
6786 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6787 else
6788 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6789
6790 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6791 data->client.sb_virt,
6792 reqd_len_sb_in,
6793 ION_IOC_CLEAN_INV_CACHES);
6794 if (ret) {
6795 pr_err("cache operation failed %d\n", ret);
6796 return ret;
6797 }
6798
6799 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6800
6801 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6802 cmd_buf, cmd_len,
6803 &resp, sizeof(resp));
6804 if (ret) {
6805 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6806 ret, data->client.app_id);
6807 return ret;
6808 }
6809
6810 if (qseecom.qsee_reentrancy_support) {
6811 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6812 } else {
6813 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6814 ret = __qseecom_process_incomplete_cmd(data, &resp);
6815 if (ret) {
6816 pr_err("process_incomplete_cmd failed err: %d\n",
6817 ret);
6818 return ret;
6819 }
6820 } else {
6821 if (resp.result != QSEOS_RESULT_SUCCESS) {
6822 pr_err("Response result %d not supported\n",
6823 resp.result);
6824 ret = -EINVAL;
6825 }
6826 }
6827 }
6828 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6829 if (ret)
6830 return ret;
6831
6832 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6833 data->client.sb_virt, data->client.sb_length,
6834 ION_IOC_INV_CACHES);
6835 if (ret) {
6836 pr_err("cache operation failed %d\n", ret);
6837 return ret;
6838 }
6839 return 0;
6840}
6841
6842static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6843 void __user *argp)
6844{
6845 struct qseecom_qteec_modfd_req req;
6846 int ret = 0;
6847
6848 ret = copy_from_user(&req, argp,
6849 sizeof(struct qseecom_qteec_modfd_req));
6850 if (ret) {
6851 pr_err("copy_from_user failed\n");
6852 return ret;
6853 }
6854 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6855 QSEOS_TEE_REQUEST_CANCELLATION);
6856
6857 return ret;
6858}
6859
6860static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6861{
6862 if (data->sglist_cnt) {
6863 memset(data->sglistinfo_ptr, 0,
6864 SGLISTINFO_TABLE_SIZE);
6865 data->sglist_cnt = 0;
6866 }
6867}
6868
6869static inline long qseecom_ioctl(struct file *file,
6870 unsigned int cmd, unsigned long arg)
6871{
6872 int ret = 0;
6873 struct qseecom_dev_handle *data = file->private_data;
6874 void __user *argp = (void __user *) arg;
6875 bool perf_enabled = false;
6876
6877 if (!data) {
6878 pr_err("Invalid/uninitialized device handle\n");
6879 return -EINVAL;
6880 }
6881
6882 if (data->abort) {
6883 pr_err("Aborting qseecom driver\n");
6884 return -ENODEV;
6885 }
6886
6887 switch (cmd) {
6888 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6889 if (data->type != QSEECOM_GENERIC) {
6890 pr_err("reg lstnr req: invalid handle (%d)\n",
6891 data->type);
6892 ret = -EINVAL;
6893 break;
6894 }
6895 pr_debug("ioctl register_listener_req()\n");
6896 mutex_lock(&app_access_lock);
6897 atomic_inc(&data->ioctl_count);
6898 data->type = QSEECOM_LISTENER_SERVICE;
6899 ret = qseecom_register_listener(data, argp);
6900 atomic_dec(&data->ioctl_count);
6901 wake_up_all(&data->abort_wq);
6902 mutex_unlock(&app_access_lock);
6903 if (ret)
6904 pr_err("failed qseecom_register_listener: %d\n", ret);
6905 break;
6906 }
6907 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6908 if ((data->listener.id == 0) ||
6909 (data->type != QSEECOM_LISTENER_SERVICE)) {
6910 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6911 data->type, data->listener.id);
6912 ret = -EINVAL;
6913 break;
6914 }
6915 pr_debug("ioctl unregister_listener_req()\n");
6916 mutex_lock(&app_access_lock);
6917 atomic_inc(&data->ioctl_count);
6918 ret = qseecom_unregister_listener(data);
6919 atomic_dec(&data->ioctl_count);
6920 wake_up_all(&data->abort_wq);
6921 mutex_unlock(&app_access_lock);
6922 if (ret)
6923 pr_err("failed qseecom_unregister_listener: %d\n", ret);
6924 break;
6925 }
6926 case QSEECOM_IOCTL_SEND_CMD_REQ: {
6927 if ((data->client.app_id == 0) ||
6928 (data->type != QSEECOM_CLIENT_APP)) {
6929 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
6930 data->type, data->client.app_id);
6931 ret = -EINVAL;
6932 break;
6933 }
6934 /* Only one client allowed here at a time */
6935 mutex_lock(&app_access_lock);
6936 if (qseecom.support_bus_scaling) {
6937 /* register bus bw in case the client doesn't do it */
6938 if (!data->mode) {
6939 mutex_lock(&qsee_bw_mutex);
6940 __qseecom_register_bus_bandwidth_needs(
6941 data, HIGH);
6942 mutex_unlock(&qsee_bw_mutex);
6943 }
6944 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
6945 if (ret) {
6946 pr_err("Failed to set bw.\n");
6947 ret = -EINVAL;
6948 mutex_unlock(&app_access_lock);
6949 break;
6950 }
6951 }
6952 /*
6953 * On targets where crypto clock is handled by HLOS,
6954 * if clk_access_cnt is zero and perf_enabled is false,
6955 * then the crypto clock was not enabled before sending cmd to
6956 * tz, qseecom will enable the clock to avoid service failure.
6957 */
6958 if (!qseecom.no_clock_support &&
6959 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
6960 pr_debug("ce clock is not enabled!\n");
6961 ret = qseecom_perf_enable(data);
6962 if (ret) {
6963 pr_err("Failed to vote for clock with err %d\n",
6964 ret);
6965 mutex_unlock(&app_access_lock);
6966 ret = -EINVAL;
6967 break;
6968 }
6969 perf_enabled = true;
6970 }
6971 atomic_inc(&data->ioctl_count);
6972 ret = qseecom_send_cmd(data, argp);
6973 if (qseecom.support_bus_scaling)
6974 __qseecom_add_bw_scale_down_timer(
6975 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
6976 if (perf_enabled) {
6977 qsee_disable_clock_vote(data, CLK_DFAB);
6978 qsee_disable_clock_vote(data, CLK_SFPB);
6979 }
6980 atomic_dec(&data->ioctl_count);
6981 wake_up_all(&data->abort_wq);
6982 mutex_unlock(&app_access_lock);
6983 if (ret)
6984 pr_err("failed qseecom_send_cmd: %d\n", ret);
6985 break;
6986 }
6987 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
6988 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
6989 if ((data->client.app_id == 0) ||
6990 (data->type != QSEECOM_CLIENT_APP)) {
6991 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
6992 data->type, data->client.app_id);
6993 ret = -EINVAL;
6994 break;
6995 }
6996 /* Only one client allowed here at a time */
6997 mutex_lock(&app_access_lock);
6998 if (qseecom.support_bus_scaling) {
6999 if (!data->mode) {
7000 mutex_lock(&qsee_bw_mutex);
7001 __qseecom_register_bus_bandwidth_needs(
7002 data, HIGH);
7003 mutex_unlock(&qsee_bw_mutex);
7004 }
7005 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7006 if (ret) {
7007 pr_err("Failed to set bw.\n");
7008 mutex_unlock(&app_access_lock);
7009 ret = -EINVAL;
7010 break;
7011 }
7012 }
7013 /*
7014 * On targets where crypto clock is handled by HLOS,
7015 * if clk_access_cnt is zero and perf_enabled is false,
7016 * then the crypto clock was not enabled before sending cmd to
7017 * tz, qseecom will enable the clock to avoid service failure.
7018 */
7019 if (!qseecom.no_clock_support &&
7020 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7021 pr_debug("ce clock is not enabled!\n");
7022 ret = qseecom_perf_enable(data);
7023 if (ret) {
7024 pr_err("Failed to vote for clock with err %d\n",
7025 ret);
7026 mutex_unlock(&app_access_lock);
7027 ret = -EINVAL;
7028 break;
7029 }
7030 perf_enabled = true;
7031 }
7032 atomic_inc(&data->ioctl_count);
7033 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7034 ret = qseecom_send_modfd_cmd(data, argp);
7035 else
7036 ret = qseecom_send_modfd_cmd_64(data, argp);
7037 if (qseecom.support_bus_scaling)
7038 __qseecom_add_bw_scale_down_timer(
7039 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7040 if (perf_enabled) {
7041 qsee_disable_clock_vote(data, CLK_DFAB);
7042 qsee_disable_clock_vote(data, CLK_SFPB);
7043 }
7044 atomic_dec(&data->ioctl_count);
7045 wake_up_all(&data->abort_wq);
7046 mutex_unlock(&app_access_lock);
7047 if (ret)
7048 pr_err("failed qseecom_send_cmd: %d\n", ret);
7049 __qseecom_clean_data_sglistinfo(data);
7050 break;
7051 }
7052 case QSEECOM_IOCTL_RECEIVE_REQ: {
7053 if ((data->listener.id == 0) ||
7054 (data->type != QSEECOM_LISTENER_SERVICE)) {
7055 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7056 data->type, data->listener.id);
7057 ret = -EINVAL;
7058 break;
7059 }
7060 atomic_inc(&data->ioctl_count);
7061 ret = qseecom_receive_req(data);
7062 atomic_dec(&data->ioctl_count);
7063 wake_up_all(&data->abort_wq);
7064 if (ret && (ret != -ERESTARTSYS))
7065 pr_err("failed qseecom_receive_req: %d\n", ret);
7066 break;
7067 }
7068 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7069 if ((data->listener.id == 0) ||
7070 (data->type != QSEECOM_LISTENER_SERVICE)) {
7071 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7072 data->type, data->listener.id);
7073 ret = -EINVAL;
7074 break;
7075 }
7076 atomic_inc(&data->ioctl_count);
7077 if (!qseecom.qsee_reentrancy_support)
7078 ret = qseecom_send_resp();
7079 else
7080 ret = qseecom_reentrancy_send_resp(data);
7081 atomic_dec(&data->ioctl_count);
7082 wake_up_all(&data->abort_wq);
7083 if (ret)
7084 pr_err("failed qseecom_send_resp: %d\n", ret);
7085 break;
7086 }
7087 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7088 if ((data->type != QSEECOM_CLIENT_APP) &&
7089 (data->type != QSEECOM_GENERIC) &&
7090 (data->type != QSEECOM_SECURE_SERVICE)) {
7091 pr_err("set mem param req: invalid handle (%d)\n",
7092 data->type);
7093 ret = -EINVAL;
7094 break;
7095 }
7096 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7097 mutex_lock(&app_access_lock);
7098 atomic_inc(&data->ioctl_count);
7099 ret = qseecom_set_client_mem_param(data, argp);
7100 atomic_dec(&data->ioctl_count);
7101 mutex_unlock(&app_access_lock);
7102 if (ret)
7103 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7104 ret);
7105 break;
7106 }
7107 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7108 if ((data->type != QSEECOM_GENERIC) &&
7109 (data->type != QSEECOM_CLIENT_APP)) {
7110 pr_err("load app req: invalid handle (%d)\n",
7111 data->type);
7112 ret = -EINVAL;
7113 break;
7114 }
7115 data->type = QSEECOM_CLIENT_APP;
7116 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7117 mutex_lock(&app_access_lock);
7118 atomic_inc(&data->ioctl_count);
7119 ret = qseecom_load_app(data, argp);
7120 atomic_dec(&data->ioctl_count);
7121 mutex_unlock(&app_access_lock);
7122 if (ret)
7123 pr_err("failed load_app request: %d\n", ret);
7124 break;
7125 }
7126 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7127 if ((data->client.app_id == 0) ||
7128 (data->type != QSEECOM_CLIENT_APP)) {
7129 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7130 data->type, data->client.app_id);
7131 ret = -EINVAL;
7132 break;
7133 }
7134 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7135 mutex_lock(&app_access_lock);
7136 atomic_inc(&data->ioctl_count);
7137 ret = qseecom_unload_app(data, false);
7138 atomic_dec(&data->ioctl_count);
7139 mutex_unlock(&app_access_lock);
7140 if (ret)
7141 pr_err("failed unload_app request: %d\n", ret);
7142 break;
7143 }
7144 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7145 atomic_inc(&data->ioctl_count);
7146 ret = qseecom_get_qseos_version(data, argp);
7147 if (ret)
7148 pr_err("qseecom_get_qseos_version: %d\n", ret);
7149 atomic_dec(&data->ioctl_count);
7150 break;
7151 }
7152 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7153 if ((data->type != QSEECOM_GENERIC) &&
7154 (data->type != QSEECOM_CLIENT_APP)) {
7155 pr_err("perf enable req: invalid handle (%d)\n",
7156 data->type);
7157 ret = -EINVAL;
7158 break;
7159 }
7160 if ((data->type == QSEECOM_CLIENT_APP) &&
7161 (data->client.app_id == 0)) {
7162 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7163 data->type, data->client.app_id);
7164 ret = -EINVAL;
7165 break;
7166 }
7167 atomic_inc(&data->ioctl_count);
7168 if (qseecom.support_bus_scaling) {
7169 mutex_lock(&qsee_bw_mutex);
7170 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7171 mutex_unlock(&qsee_bw_mutex);
7172 } else {
7173 ret = qseecom_perf_enable(data);
7174 if (ret)
7175 pr_err("Fail to vote for clocks %d\n", ret);
7176 }
7177 atomic_dec(&data->ioctl_count);
7178 break;
7179 }
7180 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7181 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7182 (data->type != QSEECOM_CLIENT_APP)) {
7183 pr_err("perf disable req: invalid handle (%d)\n",
7184 data->type);
7185 ret = -EINVAL;
7186 break;
7187 }
7188 if ((data->type == QSEECOM_CLIENT_APP) &&
7189 (data->client.app_id == 0)) {
7190 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7191 data->type, data->client.app_id);
7192 ret = -EINVAL;
7193 break;
7194 }
7195 atomic_inc(&data->ioctl_count);
7196 if (!qseecom.support_bus_scaling) {
7197 qsee_disable_clock_vote(data, CLK_DFAB);
7198 qsee_disable_clock_vote(data, CLK_SFPB);
7199 } else {
7200 mutex_lock(&qsee_bw_mutex);
7201 qseecom_unregister_bus_bandwidth_needs(data);
7202 mutex_unlock(&qsee_bw_mutex);
7203 }
7204 atomic_dec(&data->ioctl_count);
7205 break;
7206 }
7207
7208 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7209 /* If crypto clock is not handled by HLOS, return directly. */
7210 if (qseecom.no_clock_support) {
7211 pr_debug("crypto clock is not handled by HLOS\n");
7212 break;
7213 }
7214 if ((data->client.app_id == 0) ||
7215 (data->type != QSEECOM_CLIENT_APP)) {
7216 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7217 data->type, data->client.app_id);
7218 ret = -EINVAL;
7219 break;
7220 }
7221 atomic_inc(&data->ioctl_count);
7222 ret = qseecom_scale_bus_bandwidth(data, argp);
7223 atomic_dec(&data->ioctl_count);
7224 break;
7225 }
7226 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7227 if (data->type != QSEECOM_GENERIC) {
7228 pr_err("load ext elf req: invalid client handle (%d)\n",
7229 data->type);
7230 ret = -EINVAL;
7231 break;
7232 }
7233 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7234 data->released = true;
7235 mutex_lock(&app_access_lock);
7236 atomic_inc(&data->ioctl_count);
7237 ret = qseecom_load_external_elf(data, argp);
7238 atomic_dec(&data->ioctl_count);
7239 mutex_unlock(&app_access_lock);
7240 if (ret)
7241 pr_err("failed load_external_elf request: %d\n", ret);
7242 break;
7243 }
7244 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7245 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7246 pr_err("unload ext elf req: invalid handle (%d)\n",
7247 data->type);
7248 ret = -EINVAL;
7249 break;
7250 }
7251 data->released = true;
7252 mutex_lock(&app_access_lock);
7253 atomic_inc(&data->ioctl_count);
7254 ret = qseecom_unload_external_elf(data);
7255 atomic_dec(&data->ioctl_count);
7256 mutex_unlock(&app_access_lock);
7257 if (ret)
7258 pr_err("failed unload_app request: %d\n", ret);
7259 break;
7260 }
7261 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7262 data->type = QSEECOM_CLIENT_APP;
7263 mutex_lock(&app_access_lock);
7264 atomic_inc(&data->ioctl_count);
7265 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7266 ret = qseecom_query_app_loaded(data, argp);
7267 atomic_dec(&data->ioctl_count);
7268 mutex_unlock(&app_access_lock);
7269 break;
7270 }
7271 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7272 if (data->type != QSEECOM_GENERIC) {
7273 pr_err("send cmd svc req: invalid handle (%d)\n",
7274 data->type);
7275 ret = -EINVAL;
7276 break;
7277 }
7278 data->type = QSEECOM_SECURE_SERVICE;
7279 if (qseecom.qsee_version < QSEE_VERSION_03) {
7280 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7281 qseecom.qsee_version);
7282 return -EINVAL;
7283 }
7284 mutex_lock(&app_access_lock);
7285 atomic_inc(&data->ioctl_count);
7286 ret = qseecom_send_service_cmd(data, argp);
7287 atomic_dec(&data->ioctl_count);
7288 mutex_unlock(&app_access_lock);
7289 break;
7290 }
7291 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7292 if (!(qseecom.support_pfe || qseecom.support_fde))
7293 pr_err("Features requiring key init not supported\n");
7294 if (data->type != QSEECOM_GENERIC) {
7295 pr_err("create key req: invalid handle (%d)\n",
7296 data->type);
7297 ret = -EINVAL;
7298 break;
7299 }
7300 if (qseecom.qsee_version < QSEE_VERSION_05) {
7301 pr_err("Create Key feature unsupported: qsee ver %u\n",
7302 qseecom.qsee_version);
7303 return -EINVAL;
7304 }
7305 data->released = true;
7306 mutex_lock(&app_access_lock);
7307 atomic_inc(&data->ioctl_count);
7308 ret = qseecom_create_key(data, argp);
7309 if (ret)
7310 pr_err("failed to create encryption key: %d\n", ret);
7311
7312 atomic_dec(&data->ioctl_count);
7313 mutex_unlock(&app_access_lock);
7314 break;
7315 }
7316 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7317 if (!(qseecom.support_pfe || qseecom.support_fde))
7318 pr_err("Features requiring key init not supported\n");
7319 if (data->type != QSEECOM_GENERIC) {
7320 pr_err("wipe key req: invalid handle (%d)\n",
7321 data->type);
7322 ret = -EINVAL;
7323 break;
7324 }
7325 if (qseecom.qsee_version < QSEE_VERSION_05) {
7326 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7327 qseecom.qsee_version);
7328 return -EINVAL;
7329 }
7330 data->released = true;
7331 mutex_lock(&app_access_lock);
7332 atomic_inc(&data->ioctl_count);
7333 ret = qseecom_wipe_key(data, argp);
7334 if (ret)
7335 pr_err("failed to wipe encryption key: %d\n", ret);
7336 atomic_dec(&data->ioctl_count);
7337 mutex_unlock(&app_access_lock);
7338 break;
7339 }
7340 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7341 if (!(qseecom.support_pfe || qseecom.support_fde))
7342 pr_err("Features requiring key init not supported\n");
7343 if (data->type != QSEECOM_GENERIC) {
7344 pr_err("update key req: invalid handle (%d)\n",
7345 data->type);
7346 ret = -EINVAL;
7347 break;
7348 }
7349 if (qseecom.qsee_version < QSEE_VERSION_05) {
7350 pr_err("Update Key feature unsupported in qsee ver %u\n",
7351 qseecom.qsee_version);
7352 return -EINVAL;
7353 }
7354 data->released = true;
7355 mutex_lock(&app_access_lock);
7356 atomic_inc(&data->ioctl_count);
7357 ret = qseecom_update_key_user_info(data, argp);
7358 if (ret)
7359 pr_err("failed to update key user info: %d\n", ret);
7360 atomic_dec(&data->ioctl_count);
7361 mutex_unlock(&app_access_lock);
7362 break;
7363 }
7364 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7365 if (data->type != QSEECOM_GENERIC) {
7366 pr_err("save part hash req: invalid handle (%d)\n",
7367 data->type);
7368 ret = -EINVAL;
7369 break;
7370 }
7371 data->released = true;
7372 mutex_lock(&app_access_lock);
7373 atomic_inc(&data->ioctl_count);
7374 ret = qseecom_save_partition_hash(argp);
7375 atomic_dec(&data->ioctl_count);
7376 mutex_unlock(&app_access_lock);
7377 break;
7378 }
7379 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7380 if (data->type != QSEECOM_GENERIC) {
7381 pr_err("ES activated req: invalid handle (%d)\n",
7382 data->type);
7383 ret = -EINVAL;
7384 break;
7385 }
7386 data->released = true;
7387 mutex_lock(&app_access_lock);
7388 atomic_inc(&data->ioctl_count);
7389 ret = qseecom_is_es_activated(argp);
7390 atomic_dec(&data->ioctl_count);
7391 mutex_unlock(&app_access_lock);
7392 break;
7393 }
7394 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7395 if (data->type != QSEECOM_GENERIC) {
7396 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7397 data->type);
7398 ret = -EINVAL;
7399 break;
7400 }
7401 data->released = true;
7402 mutex_lock(&app_access_lock);
7403 atomic_inc(&data->ioctl_count);
7404 ret = qseecom_mdtp_cipher_dip(argp);
7405 atomic_dec(&data->ioctl_count);
7406 mutex_unlock(&app_access_lock);
7407 break;
7408 }
7409 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7410 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7411 if ((data->listener.id == 0) ||
7412 (data->type != QSEECOM_LISTENER_SERVICE)) {
7413 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7414 data->type, data->listener.id);
7415 ret = -EINVAL;
7416 break;
7417 }
7418 atomic_inc(&data->ioctl_count);
7419 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7420 ret = qseecom_send_modfd_resp(data, argp);
7421 else
7422 ret = qseecom_send_modfd_resp_64(data, argp);
7423 atomic_dec(&data->ioctl_count);
7424 wake_up_all(&data->abort_wq);
7425 if (ret)
7426 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7427 __qseecom_clean_data_sglistinfo(data);
7428 break;
7429 }
7430 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7431 if ((data->client.app_id == 0) ||
7432 (data->type != QSEECOM_CLIENT_APP)) {
7433 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7434 data->type, data->client.app_id);
7435 ret = -EINVAL;
7436 break;
7437 }
7438 if (qseecom.qsee_version < QSEE_VERSION_40) {
7439 pr_err("GP feature unsupported: qsee ver %u\n",
7440 qseecom.qsee_version);
7441 return -EINVAL;
7442 }
7443 /* Only one client allowed here at a time */
7444 mutex_lock(&app_access_lock);
7445 atomic_inc(&data->ioctl_count);
7446 ret = qseecom_qteec_open_session(data, argp);
7447 atomic_dec(&data->ioctl_count);
7448 wake_up_all(&data->abort_wq);
7449 mutex_unlock(&app_access_lock);
7450 if (ret)
7451 pr_err("failed open_session_cmd: %d\n", ret);
7452 __qseecom_clean_data_sglistinfo(data);
7453 break;
7454 }
7455 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7456 if ((data->client.app_id == 0) ||
7457 (data->type != QSEECOM_CLIENT_APP)) {
7458 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7459 data->type, data->client.app_id);
7460 ret = -EINVAL;
7461 break;
7462 }
7463 if (qseecom.qsee_version < QSEE_VERSION_40) {
7464 pr_err("GP feature unsupported: qsee ver %u\n",
7465 qseecom.qsee_version);
7466 return -EINVAL;
7467 }
7468 /* Only one client allowed here at a time */
7469 mutex_lock(&app_access_lock);
7470 atomic_inc(&data->ioctl_count);
7471 ret = qseecom_qteec_close_session(data, argp);
7472 atomic_dec(&data->ioctl_count);
7473 wake_up_all(&data->abort_wq);
7474 mutex_unlock(&app_access_lock);
7475 if (ret)
7476 pr_err("failed close_session_cmd: %d\n", ret);
7477 break;
7478 }
7479 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7480 if ((data->client.app_id == 0) ||
7481 (data->type != QSEECOM_CLIENT_APP)) {
7482 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7483 data->type, data->client.app_id);
7484 ret = -EINVAL;
7485 break;
7486 }
7487 if (qseecom.qsee_version < QSEE_VERSION_40) {
7488 pr_err("GP feature unsupported: qsee ver %u\n",
7489 qseecom.qsee_version);
7490 return -EINVAL;
7491 }
7492 /* Only one client allowed here at a time */
7493 mutex_lock(&app_access_lock);
7494 atomic_inc(&data->ioctl_count);
7495 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7496 atomic_dec(&data->ioctl_count);
7497 wake_up_all(&data->abort_wq);
7498 mutex_unlock(&app_access_lock);
7499 if (ret)
7500 pr_err("failed Invoke cmd: %d\n", ret);
7501 __qseecom_clean_data_sglistinfo(data);
7502 break;
7503 }
7504 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7505 if ((data->client.app_id == 0) ||
7506 (data->type != QSEECOM_CLIENT_APP)) {
7507 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7508 data->type, data->client.app_id);
7509 ret = -EINVAL;
7510 break;
7511 }
7512 if (qseecom.qsee_version < QSEE_VERSION_40) {
7513 pr_err("GP feature unsupported: qsee ver %u\n",
7514 qseecom.qsee_version);
7515 return -EINVAL;
7516 }
7517 /* Only one client allowed here at a time */
7518 mutex_lock(&app_access_lock);
7519 atomic_inc(&data->ioctl_count);
7520 ret = qseecom_qteec_request_cancellation(data, argp);
7521 atomic_dec(&data->ioctl_count);
7522 wake_up_all(&data->abort_wq);
7523 mutex_unlock(&app_access_lock);
7524 if (ret)
7525 pr_err("failed request_cancellation: %d\n", ret);
7526 break;
7527 }
7528 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7529 atomic_inc(&data->ioctl_count);
7530 ret = qseecom_get_ce_info(data, argp);
7531 if (ret)
7532 pr_err("failed get fde ce pipe info: %d\n", ret);
7533 atomic_dec(&data->ioctl_count);
7534 break;
7535 }
7536 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7537 atomic_inc(&data->ioctl_count);
7538 ret = qseecom_free_ce_info(data, argp);
7539 if (ret)
7540 pr_err("failed get fde ce pipe info: %d\n", ret);
7541 atomic_dec(&data->ioctl_count);
7542 break;
7543 }
7544 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7545 atomic_inc(&data->ioctl_count);
7546 ret = qseecom_query_ce_info(data, argp);
7547 if (ret)
7548 pr_err("failed get fde ce pipe info: %d\n", ret);
7549 atomic_dec(&data->ioctl_count);
7550 break;
7551 }
7552 default:
7553 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7554 return -EINVAL;
7555 }
7556 return ret;
7557}
7558
7559static int qseecom_open(struct inode *inode, struct file *file)
7560{
7561 int ret = 0;
7562 struct qseecom_dev_handle *data;
7563
7564 data = kzalloc(sizeof(*data), GFP_KERNEL);
7565 if (!data)
7566 return -ENOMEM;
7567 file->private_data = data;
7568 data->abort = 0;
7569 data->type = QSEECOM_GENERIC;
7570 data->released = false;
7571 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7572 data->mode = INACTIVE;
7573 init_waitqueue_head(&data->abort_wq);
7574 atomic_set(&data->ioctl_count, 0);
7575 return ret;
7576}
7577
7578static int qseecom_release(struct inode *inode, struct file *file)
7579{
7580 struct qseecom_dev_handle *data = file->private_data;
7581 int ret = 0;
7582
7583 if (data->released == false) {
7584 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7585 data->type, data->mode, data);
7586 switch (data->type) {
7587 case QSEECOM_LISTENER_SERVICE:
7588 mutex_lock(&app_access_lock);
7589 ret = qseecom_unregister_listener(data);
7590 mutex_unlock(&app_access_lock);
7591 break;
7592 case QSEECOM_CLIENT_APP:
7593 mutex_lock(&app_access_lock);
7594 ret = qseecom_unload_app(data, true);
7595 mutex_unlock(&app_access_lock);
7596 break;
7597 case QSEECOM_SECURE_SERVICE:
7598 case QSEECOM_GENERIC:
7599 ret = qseecom_unmap_ion_allocated_memory(data);
7600 if (ret)
7601 pr_err("Ion Unmap failed\n");
7602 break;
7603 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7604 break;
7605 default:
7606 pr_err("Unsupported clnt_handle_type %d",
7607 data->type);
7608 break;
7609 }
7610 }
7611
7612 if (qseecom.support_bus_scaling) {
7613 mutex_lock(&qsee_bw_mutex);
7614 if (data->mode != INACTIVE) {
7615 qseecom_unregister_bus_bandwidth_needs(data);
7616 if (qseecom.cumulative_mode == INACTIVE) {
7617 ret = __qseecom_set_msm_bus_request(INACTIVE);
7618 if (ret)
7619 pr_err("Fail to scale down bus\n");
7620 }
7621 }
7622 mutex_unlock(&qsee_bw_mutex);
7623 } else {
7624 if (data->fast_load_enabled == true)
7625 qsee_disable_clock_vote(data, CLK_SFPB);
7626 if (data->perf_enabled == true)
7627 qsee_disable_clock_vote(data, CLK_DFAB);
7628 }
7629 kfree(data);
7630
7631 return ret;
7632}
7633
7634#ifdef CONFIG_COMPAT
7635#include "compat_qseecom.c"
7636#else
7637#define compat_qseecom_ioctl NULL
7638#endif
7639
7640static const struct file_operations qseecom_fops = {
7641 .owner = THIS_MODULE,
7642 .unlocked_ioctl = qseecom_ioctl,
7643 .compat_ioctl = compat_qseecom_ioctl,
7644 .open = qseecom_open,
7645 .release = qseecom_release
7646};
7647
7648static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7649{
7650 int rc = 0;
7651 struct device *pdev;
7652 struct qseecom_clk *qclk;
7653 char *core_clk_src = NULL;
7654 char *core_clk = NULL;
7655 char *iface_clk = NULL;
7656 char *bus_clk = NULL;
7657
7658 switch (ce) {
7659 case CLK_QSEE: {
7660 core_clk_src = "core_clk_src";
7661 core_clk = "core_clk";
7662 iface_clk = "iface_clk";
7663 bus_clk = "bus_clk";
7664 qclk = &qseecom.qsee;
7665 qclk->instance = CLK_QSEE;
7666 break;
7667 };
7668 case CLK_CE_DRV: {
7669 core_clk_src = "ce_drv_core_clk_src";
7670 core_clk = "ce_drv_core_clk";
7671 iface_clk = "ce_drv_iface_clk";
7672 bus_clk = "ce_drv_bus_clk";
7673 qclk = &qseecom.ce_drv;
7674 qclk->instance = CLK_CE_DRV;
7675 break;
7676 };
7677 default:
7678 pr_err("Invalid ce hw instance: %d!\n", ce);
7679 return -EIO;
7680 }
7681
7682 if (qseecom.no_clock_support) {
7683 qclk->ce_core_clk = NULL;
7684 qclk->ce_clk = NULL;
7685 qclk->ce_bus_clk = NULL;
7686 qclk->ce_core_src_clk = NULL;
7687 return 0;
7688 }
7689
7690 pdev = qseecom.pdev;
7691
7692 /* Get CE3 src core clk. */
7693 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7694 if (!IS_ERR(qclk->ce_core_src_clk)) {
7695 rc = clk_set_rate(qclk->ce_core_src_clk,
7696 qseecom.ce_opp_freq_hz);
7697 if (rc) {
7698 clk_put(qclk->ce_core_src_clk);
7699 qclk->ce_core_src_clk = NULL;
7700 pr_err("Unable to set the core src clk @%uMhz.\n",
7701 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7702 return -EIO;
7703 }
7704 } else {
7705 pr_warn("Unable to get CE core src clk, set to NULL\n");
7706 qclk->ce_core_src_clk = NULL;
7707 }
7708
7709 /* Get CE core clk */
7710 qclk->ce_core_clk = clk_get(pdev, core_clk);
7711 if (IS_ERR(qclk->ce_core_clk)) {
7712 rc = PTR_ERR(qclk->ce_core_clk);
7713 pr_err("Unable to get CE core clk\n");
7714 if (qclk->ce_core_src_clk != NULL)
7715 clk_put(qclk->ce_core_src_clk);
7716 return -EIO;
7717 }
7718
7719 /* Get CE Interface clk */
7720 qclk->ce_clk = clk_get(pdev, iface_clk);
7721 if (IS_ERR(qclk->ce_clk)) {
7722 rc = PTR_ERR(qclk->ce_clk);
7723 pr_err("Unable to get CE interface clk\n");
7724 if (qclk->ce_core_src_clk != NULL)
7725 clk_put(qclk->ce_core_src_clk);
7726 clk_put(qclk->ce_core_clk);
7727 return -EIO;
7728 }
7729
7730 /* Get CE AXI clk */
7731 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7732 if (IS_ERR(qclk->ce_bus_clk)) {
7733 rc = PTR_ERR(qclk->ce_bus_clk);
7734 pr_err("Unable to get CE BUS interface clk\n");
7735 if (qclk->ce_core_src_clk != NULL)
7736 clk_put(qclk->ce_core_src_clk);
7737 clk_put(qclk->ce_core_clk);
7738 clk_put(qclk->ce_clk);
7739 return -EIO;
7740 }
7741
7742 return rc;
7743}
7744
7745static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7746{
7747 struct qseecom_clk *qclk;
7748
7749 if (ce == CLK_QSEE)
7750 qclk = &qseecom.qsee;
7751 else
7752 qclk = &qseecom.ce_drv;
7753
7754 if (qclk->ce_clk != NULL) {
7755 clk_put(qclk->ce_clk);
7756 qclk->ce_clk = NULL;
7757 }
7758 if (qclk->ce_core_clk != NULL) {
7759 clk_put(qclk->ce_core_clk);
7760 qclk->ce_core_clk = NULL;
7761 }
7762 if (qclk->ce_bus_clk != NULL) {
7763 clk_put(qclk->ce_bus_clk);
7764 qclk->ce_bus_clk = NULL;
7765 }
7766 if (qclk->ce_core_src_clk != NULL) {
7767 clk_put(qclk->ce_core_src_clk);
7768 qclk->ce_core_src_clk = NULL;
7769 }
7770 qclk->instance = CLK_INVALID;
7771}
7772
7773static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7774{
7775 int rc = 0;
7776 uint32_t hlos_num_ce_hw_instances;
7777 uint32_t disk_encrypt_pipe;
7778 uint32_t file_encrypt_pipe;
7779 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
7780 int i;
7781 const int *tbl;
7782 int size;
7783 int entry;
7784 struct qseecom_crypto_info *pfde_tbl = NULL;
7785 struct qseecom_crypto_info *p;
7786 int tbl_size;
7787 int j;
7788 bool old_db = true;
7789 struct qseecom_ce_info_use *pce_info_use;
7790 uint32_t *unit_tbl = NULL;
7791 int total_units = 0;
7792 struct qseecom_ce_pipe_entry *pce_entry;
7793
7794 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7795 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7796
7797 if (of_property_read_u32((&pdev->dev)->of_node,
7798 "qcom,qsee-ce-hw-instance",
7799 &qseecom.ce_info.qsee_ce_hw_instance)) {
7800 pr_err("Fail to get qsee ce hw instance information.\n");
7801 rc = -EINVAL;
7802 goto out;
7803 } else {
7804 pr_debug("qsee-ce-hw-instance=0x%x\n",
7805 qseecom.ce_info.qsee_ce_hw_instance);
7806 }
7807
7808 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7809 "qcom,support-fde");
7810 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7811 "qcom,support-pfe");
7812
7813 if (!qseecom.support_pfe && !qseecom.support_fde) {
7814 pr_warn("Device does not support PFE/FDE");
7815 goto out;
7816 }
7817
7818 if (qseecom.support_fde)
7819 tbl = of_get_property((&pdev->dev)->of_node,
7820 "qcom,full-disk-encrypt-info", &size);
7821 else
7822 tbl = NULL;
7823 if (tbl) {
7824 old_db = false;
7825 if (size % sizeof(struct qseecom_crypto_info)) {
7826 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7827 size);
7828 rc = -EINVAL;
7829 goto out;
7830 }
7831 tbl_size = size / sizeof
7832 (struct qseecom_crypto_info);
7833
7834 pfde_tbl = kzalloc(size, GFP_KERNEL);
7835 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7836 total_units = 0;
7837
7838 if (!pfde_tbl || !unit_tbl) {
7839 pr_err("failed to alloc memory\n");
7840 rc = -ENOMEM;
7841 goto out;
7842 }
7843 if (of_property_read_u32_array((&pdev->dev)->of_node,
7844 "qcom,full-disk-encrypt-info",
7845 (u32 *)pfde_tbl, size/sizeof(u32))) {
7846 pr_err("failed to read full-disk-encrypt-info tbl\n");
7847 rc = -EINVAL;
7848 goto out;
7849 }
7850
7851 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7852 for (j = 0; j < total_units; j++) {
7853 if (p->unit_num == *(unit_tbl + j))
7854 break;
7855 }
7856 if (j == total_units) {
7857 *(unit_tbl + total_units) = p->unit_num;
7858 total_units++;
7859 }
7860 }
7861
7862 qseecom.ce_info.num_fde = total_units;
7863 pce_info_use = qseecom.ce_info.fde = kcalloc(
7864 total_units, sizeof(struct qseecom_ce_info_use),
7865 GFP_KERNEL);
7866 if (!pce_info_use) {
7867 pr_err("failed to alloc memory\n");
7868 rc = -ENOMEM;
7869 goto out;
7870 }
7871
7872 for (j = 0; j < total_units; j++, pce_info_use++) {
7873 pce_info_use->unit_num = *(unit_tbl + j);
7874 pce_info_use->alloc = false;
7875 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7876 pce_info_use->num_ce_pipe_entries = 0;
7877 pce_info_use->ce_pipe_entry = NULL;
7878 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7879 if (p->unit_num == pce_info_use->unit_num)
7880 pce_info_use->num_ce_pipe_entries++;
7881 }
7882
7883 entry = pce_info_use->num_ce_pipe_entries;
7884 pce_entry = pce_info_use->ce_pipe_entry =
7885 kcalloc(entry,
7886 sizeof(struct qseecom_ce_pipe_entry),
7887 GFP_KERNEL);
7888 if (pce_entry == NULL) {
7889 pr_err("failed to alloc memory\n");
7890 rc = -ENOMEM;
7891 goto out;
7892 }
7893
7894 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7895 if (p->unit_num == pce_info_use->unit_num) {
7896 pce_entry->ce_num = p->ce;
7897 pce_entry->ce_pipe_pair =
7898 p->pipe_pair;
7899 pce_entry->valid = true;
7900 pce_entry++;
7901 }
7902 }
7903 }
7904 kfree(unit_tbl);
7905 unit_tbl = NULL;
7906 kfree(pfde_tbl);
7907 pfde_tbl = NULL;
7908 }
7909
7910 if (qseecom.support_pfe)
7911 tbl = of_get_property((&pdev->dev)->of_node,
7912 "qcom,per-file-encrypt-info", &size);
7913 else
7914 tbl = NULL;
7915 if (tbl) {
7916 old_db = false;
7917 if (size % sizeof(struct qseecom_crypto_info)) {
7918 pr_err("per-file-encrypt-info tbl size(%d)\n",
7919 size);
7920 rc = -EINVAL;
7921 goto out;
7922 }
7923 tbl_size = size / sizeof
7924 (struct qseecom_crypto_info);
7925
7926 pfde_tbl = kzalloc(size, GFP_KERNEL);
7927 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7928 total_units = 0;
7929 if (!pfde_tbl || !unit_tbl) {
7930 pr_err("failed to alloc memory\n");
7931 rc = -ENOMEM;
7932 goto out;
7933 }
7934 if (of_property_read_u32_array((&pdev->dev)->of_node,
7935 "qcom,per-file-encrypt-info",
7936 (u32 *)pfde_tbl, size/sizeof(u32))) {
7937 pr_err("failed to read per-file-encrypt-info tbl\n");
7938 rc = -EINVAL;
7939 goto out;
7940 }
7941
7942 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7943 for (j = 0; j < total_units; j++) {
7944 if (p->unit_num == *(unit_tbl + j))
7945 break;
7946 }
7947 if (j == total_units) {
7948 *(unit_tbl + total_units) = p->unit_num;
7949 total_units++;
7950 }
7951 }
7952
7953 qseecom.ce_info.num_pfe = total_units;
7954 pce_info_use = qseecom.ce_info.pfe = kcalloc(
7955 total_units, sizeof(struct qseecom_ce_info_use),
7956 GFP_KERNEL);
7957 if (!pce_info_use) {
7958 pr_err("failed to alloc memory\n");
7959 rc = -ENOMEM;
7960 goto out;
7961 }
7962
7963 for (j = 0; j < total_units; j++, pce_info_use++) {
7964 pce_info_use->unit_num = *(unit_tbl + j);
7965 pce_info_use->alloc = false;
7966 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
7967 pce_info_use->num_ce_pipe_entries = 0;
7968 pce_info_use->ce_pipe_entry = NULL;
7969 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7970 if (p->unit_num == pce_info_use->unit_num)
7971 pce_info_use->num_ce_pipe_entries++;
7972 }
7973
7974 entry = pce_info_use->num_ce_pipe_entries;
7975 pce_entry = pce_info_use->ce_pipe_entry =
7976 kcalloc(entry,
7977 sizeof(struct qseecom_ce_pipe_entry),
7978 GFP_KERNEL);
7979 if (pce_entry == NULL) {
7980 pr_err("failed to alloc memory\n");
7981 rc = -ENOMEM;
7982 goto out;
7983 }
7984
7985 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7986 if (p->unit_num == pce_info_use->unit_num) {
7987 pce_entry->ce_num = p->ce;
7988 pce_entry->ce_pipe_pair =
7989 p->pipe_pair;
7990 pce_entry->valid = true;
7991 pce_entry++;
7992 }
7993 }
7994 }
7995 kfree(unit_tbl);
7996 unit_tbl = NULL;
7997 kfree(pfde_tbl);
7998 pfde_tbl = NULL;
7999 }
8000
8001 if (!old_db)
8002 goto out1;
8003
8004 if (of_property_read_bool((&pdev->dev)->of_node,
8005 "qcom,support-multiple-ce-hw-instance")) {
8006 if (of_property_read_u32((&pdev->dev)->of_node,
8007 "qcom,hlos-num-ce-hw-instances",
8008 &hlos_num_ce_hw_instances)) {
8009 pr_err("Fail: get hlos number of ce hw instance\n");
8010 rc = -EINVAL;
8011 goto out;
8012 }
8013 } else {
8014 hlos_num_ce_hw_instances = 1;
8015 }
8016
8017 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8018 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8019 MAX_CE_PIPE_PAIR_PER_UNIT);
8020 rc = -EINVAL;
8021 goto out;
8022 }
8023
8024 if (of_property_read_u32_array((&pdev->dev)->of_node,
8025 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8026 hlos_num_ce_hw_instances)) {
8027 pr_err("Fail: get hlos ce hw instance info\n");
8028 rc = -EINVAL;
8029 goto out;
8030 }
8031
8032 if (qseecom.support_fde) {
8033 pce_info_use = qseecom.ce_info.fde =
8034 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8035 if (!pce_info_use) {
8036 pr_err("failed to alloc memory\n");
8037 rc = -ENOMEM;
8038 goto out;
8039 }
8040 /* by default for old db */
8041 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8042 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8043 pce_info_use->alloc = false;
8044 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8045 pce_info_use->ce_pipe_entry = NULL;
8046 if (of_property_read_u32((&pdev->dev)->of_node,
8047 "qcom,disk-encrypt-pipe-pair",
8048 &disk_encrypt_pipe)) {
8049 pr_err("Fail to get FDE pipe information.\n");
8050 rc = -EINVAL;
8051 goto out;
8052 } else {
8053 pr_debug("disk-encrypt-pipe-pair=0x%x",
8054 disk_encrypt_pipe);
8055 }
8056 entry = pce_info_use->num_ce_pipe_entries =
8057 hlos_num_ce_hw_instances;
8058 pce_entry = pce_info_use->ce_pipe_entry =
8059 kcalloc(entry,
8060 sizeof(struct qseecom_ce_pipe_entry),
8061 GFP_KERNEL);
8062 if (pce_entry == NULL) {
8063 pr_err("failed to alloc memory\n");
8064 rc = -ENOMEM;
8065 goto out;
8066 }
8067 for (i = 0; i < entry; i++) {
8068 pce_entry->ce_num = hlos_ce_hw_instance[i];
8069 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8070 pce_entry->valid = 1;
8071 pce_entry++;
8072 }
8073 } else {
8074 pr_warn("Device does not support FDE");
8075 disk_encrypt_pipe = 0xff;
8076 }
8077 if (qseecom.support_pfe) {
8078 pce_info_use = qseecom.ce_info.pfe =
8079 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8080 if (!pce_info_use) {
8081 pr_err("failed to alloc memory\n");
8082 rc = -ENOMEM;
8083 goto out;
8084 }
8085 /* by default for old db */
8086 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8087 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8088 pce_info_use->alloc = false;
8089 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8090 pce_info_use->ce_pipe_entry = NULL;
8091
8092 if (of_property_read_u32((&pdev->dev)->of_node,
8093 "qcom,file-encrypt-pipe-pair",
8094 &file_encrypt_pipe)) {
8095 pr_err("Fail to get PFE pipe information.\n");
8096 rc = -EINVAL;
8097 goto out;
8098 } else {
8099 pr_debug("file-encrypt-pipe-pair=0x%x",
8100 file_encrypt_pipe);
8101 }
8102 entry = pce_info_use->num_ce_pipe_entries =
8103 hlos_num_ce_hw_instances;
8104 pce_entry = pce_info_use->ce_pipe_entry =
8105 kcalloc(entry,
8106 sizeof(struct qseecom_ce_pipe_entry),
8107 GFP_KERNEL);
8108 if (pce_entry == NULL) {
8109 pr_err("failed to alloc memory\n");
8110 rc = -ENOMEM;
8111 goto out;
8112 }
8113 for (i = 0; i < entry; i++) {
8114 pce_entry->ce_num = hlos_ce_hw_instance[i];
8115 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8116 pce_entry->valid = 1;
8117 pce_entry++;
8118 }
8119 } else {
8120 pr_warn("Device does not support PFE");
8121 file_encrypt_pipe = 0xff;
8122 }
8123
8124out1:
8125 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8126 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8127out:
8128 if (rc) {
8129 if (qseecom.ce_info.fde) {
8130 pce_info_use = qseecom.ce_info.fde;
8131 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8132 pce_entry = pce_info_use->ce_pipe_entry;
8133 kfree(pce_entry);
8134 pce_info_use++;
8135 }
8136 }
8137 kfree(qseecom.ce_info.fde);
8138 qseecom.ce_info.fde = NULL;
8139 if (qseecom.ce_info.pfe) {
8140 pce_info_use = qseecom.ce_info.pfe;
8141 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8142 pce_entry = pce_info_use->ce_pipe_entry;
8143 kfree(pce_entry);
8144 pce_info_use++;
8145 }
8146 }
8147 kfree(qseecom.ce_info.pfe);
8148 qseecom.ce_info.pfe = NULL;
8149 }
8150 kfree(unit_tbl);
8151 kfree(pfde_tbl);
8152 return rc;
8153}
8154
8155static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8156 void __user *argp)
8157{
8158 struct qseecom_ce_info_req req;
8159 struct qseecom_ce_info_req *pinfo = &req;
8160 int ret = 0;
8161 int i;
8162 unsigned int entries;
8163 struct qseecom_ce_info_use *pce_info_use, *p;
8164 int total = 0;
8165 bool found = false;
8166 struct qseecom_ce_pipe_entry *pce_entry;
8167
8168 ret = copy_from_user(pinfo, argp,
8169 sizeof(struct qseecom_ce_info_req));
8170 if (ret) {
8171 pr_err("copy_from_user failed\n");
8172 return ret;
8173 }
8174
8175 switch (pinfo->usage) {
8176 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8177 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8178 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8179 if (qseecom.support_fde) {
8180 p = qseecom.ce_info.fde;
8181 total = qseecom.ce_info.num_fde;
8182 } else {
8183 pr_err("system does not support fde\n");
8184 return -EINVAL;
8185 }
8186 break;
8187 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8188 if (qseecom.support_pfe) {
8189 p = qseecom.ce_info.pfe;
8190 total = qseecom.ce_info.num_pfe;
8191 } else {
8192 pr_err("system does not support pfe\n");
8193 return -EINVAL;
8194 }
8195 break;
8196 default:
8197 pr_err("unsupported usage %d\n", pinfo->usage);
8198 return -EINVAL;
8199 }
8200
8201 pce_info_use = NULL;
8202 for (i = 0; i < total; i++) {
8203 if (!p->alloc)
8204 pce_info_use = p;
8205 else if (!memcmp(p->handle, pinfo->handle,
8206 MAX_CE_INFO_HANDLE_SIZE)) {
8207 pce_info_use = p;
8208 found = true;
8209 break;
8210 }
8211 p++;
8212 }
8213
8214 if (pce_info_use == NULL)
8215 return -EBUSY;
8216
8217 pinfo->unit_num = pce_info_use->unit_num;
8218 if (!pce_info_use->alloc) {
8219 pce_info_use->alloc = true;
8220 memcpy(pce_info_use->handle,
8221 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8222 }
8223 if (pce_info_use->num_ce_pipe_entries >
8224 MAX_CE_PIPE_PAIR_PER_UNIT)
8225 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8226 else
8227 entries = pce_info_use->num_ce_pipe_entries;
8228 pinfo->num_ce_pipe_entries = entries;
8229 pce_entry = pce_info_use->ce_pipe_entry;
8230 for (i = 0; i < entries; i++, pce_entry++)
8231 pinfo->ce_pipe_entry[i] = *pce_entry;
8232 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8233 pinfo->ce_pipe_entry[i].valid = 0;
8234
8235 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8236 pr_err("copy_to_user failed\n");
8237 ret = -EFAULT;
8238 }
8239 return ret;
8240}
8241
8242static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8243 void __user *argp)
8244{
8245 struct qseecom_ce_info_req req;
8246 struct qseecom_ce_info_req *pinfo = &req;
8247 int ret = 0;
8248 struct qseecom_ce_info_use *p;
8249 int total = 0;
8250 int i;
8251 bool found = false;
8252
8253 ret = copy_from_user(pinfo, argp,
8254 sizeof(struct qseecom_ce_info_req));
8255 if (ret)
8256 return ret;
8257
8258 switch (pinfo->usage) {
8259 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8260 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8261 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8262 if (qseecom.support_fde) {
8263 p = qseecom.ce_info.fde;
8264 total = qseecom.ce_info.num_fde;
8265 } else {
8266 pr_err("system does not support fde\n");
8267 return -EINVAL;
8268 }
8269 break;
8270 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8271 if (qseecom.support_pfe) {
8272 p = qseecom.ce_info.pfe;
8273 total = qseecom.ce_info.num_pfe;
8274 } else {
8275 pr_err("system does not support pfe\n");
8276 return -EINVAL;
8277 }
8278 break;
8279 default:
8280 pr_err("unsupported usage %d\n", pinfo->usage);
8281 return -EINVAL;
8282 }
8283
8284 for (i = 0; i < total; i++) {
8285 if (p->alloc &&
8286 !memcmp(p->handle, pinfo->handle,
8287 MAX_CE_INFO_HANDLE_SIZE)) {
8288 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8289 p->alloc = false;
8290 found = true;
8291 break;
8292 }
8293 p++;
8294 }
8295 return ret;
8296}
8297
8298static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8299 void __user *argp)
8300{
8301 struct qseecom_ce_info_req req;
8302 struct qseecom_ce_info_req *pinfo = &req;
8303 int ret = 0;
8304 int i;
8305 unsigned int entries;
8306 struct qseecom_ce_info_use *pce_info_use, *p;
8307 int total = 0;
8308 bool found = false;
8309 struct qseecom_ce_pipe_entry *pce_entry;
8310
8311 ret = copy_from_user(pinfo, argp,
8312 sizeof(struct qseecom_ce_info_req));
8313 if (ret)
8314 return ret;
8315
8316 switch (pinfo->usage) {
8317 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8318 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8319 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8320 if (qseecom.support_fde) {
8321 p = qseecom.ce_info.fde;
8322 total = qseecom.ce_info.num_fde;
8323 } else {
8324 pr_err("system does not support fde\n");
8325 return -EINVAL;
8326 }
8327 break;
8328 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8329 if (qseecom.support_pfe) {
8330 p = qseecom.ce_info.pfe;
8331 total = qseecom.ce_info.num_pfe;
8332 } else {
8333 pr_err("system does not support pfe\n");
8334 return -EINVAL;
8335 }
8336 break;
8337 default:
8338 pr_err("unsupported usage %d\n", pinfo->usage);
8339 return -EINVAL;
8340 }
8341
8342 pce_info_use = NULL;
8343 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8344 pinfo->num_ce_pipe_entries = 0;
8345 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8346 pinfo->ce_pipe_entry[i].valid = 0;
8347
8348 for (i = 0; i < total; i++) {
8349
8350 if (p->alloc && !memcmp(p->handle,
8351 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8352 pce_info_use = p;
8353 found = true;
8354 break;
8355 }
8356 p++;
8357 }
8358 if (!pce_info_use)
8359 goto out;
8360 pinfo->unit_num = pce_info_use->unit_num;
8361 if (pce_info_use->num_ce_pipe_entries >
8362 MAX_CE_PIPE_PAIR_PER_UNIT)
8363 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8364 else
8365 entries = pce_info_use->num_ce_pipe_entries;
8366 pinfo->num_ce_pipe_entries = entries;
8367 pce_entry = pce_info_use->ce_pipe_entry;
8368 for (i = 0; i < entries; i++, pce_entry++)
8369 pinfo->ce_pipe_entry[i] = *pce_entry;
8370 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8371 pinfo->ce_pipe_entry[i].valid = 0;
8372out:
8373 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8374 pr_err("copy_to_user failed\n");
8375 ret = -EFAULT;
8376 }
8377 return ret;
8378}
8379
8380/*
8381 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8382 * then whitelist feature is not supported.
8383 */
8384static int qseecom_check_whitelist_feature(void)
8385{
8386 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8387
8388 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8389}
8390
8391static int qseecom_probe(struct platform_device *pdev)
8392{
8393 int rc;
8394 int i;
8395 uint32_t feature = 10;
8396 struct device *class_dev;
8397 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8398 struct qseecom_command_scm_resp resp;
8399 struct qseecom_ce_info_use *pce_info_use = NULL;
8400
8401 qseecom.qsee_bw_count = 0;
8402 qseecom.qsee_perf_client = 0;
8403 qseecom.qsee_sfpb_bw_count = 0;
8404
8405 qseecom.qsee.ce_core_clk = NULL;
8406 qseecom.qsee.ce_clk = NULL;
8407 qseecom.qsee.ce_core_src_clk = NULL;
8408 qseecom.qsee.ce_bus_clk = NULL;
8409
8410 qseecom.cumulative_mode = 0;
8411 qseecom.current_mode = INACTIVE;
8412 qseecom.support_bus_scaling = false;
8413 qseecom.support_fde = false;
8414 qseecom.support_pfe = false;
8415
8416 qseecom.ce_drv.ce_core_clk = NULL;
8417 qseecom.ce_drv.ce_clk = NULL;
8418 qseecom.ce_drv.ce_core_src_clk = NULL;
8419 qseecom.ce_drv.ce_bus_clk = NULL;
8420 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8421
8422 qseecom.app_block_ref_cnt = 0;
8423 init_waitqueue_head(&qseecom.app_block_wq);
8424 qseecom.whitelist_support = true;
8425
8426 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8427 if (rc < 0) {
8428 pr_err("alloc_chrdev_region failed %d\n", rc);
8429 return rc;
8430 }
8431
8432 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8433 if (IS_ERR(driver_class)) {
8434 rc = -ENOMEM;
8435 pr_err("class_create failed %d\n", rc);
8436 goto exit_unreg_chrdev_region;
8437 }
8438
8439 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8440 QSEECOM_DEV);
8441 if (IS_ERR(class_dev)) {
8442 pr_err("class_device_create failed %d\n", rc);
8443 rc = -ENOMEM;
8444 goto exit_destroy_class;
8445 }
8446
8447 cdev_init(&qseecom.cdev, &qseecom_fops);
8448 qseecom.cdev.owner = THIS_MODULE;
8449
8450 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8451 if (rc < 0) {
8452 pr_err("cdev_add failed %d\n", rc);
8453 goto exit_destroy_device;
8454 }
8455
8456 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8457 spin_lock_init(&qseecom.registered_listener_list_lock);
8458 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8459 spin_lock_init(&qseecom.registered_app_list_lock);
8460 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8461 spin_lock_init(&qseecom.registered_kclient_list_lock);
8462 init_waitqueue_head(&qseecom.send_resp_wq);
8463 qseecom.send_resp_flag = 0;
8464
8465 qseecom.qsee_version = QSEEE_VERSION_00;
8466 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8467 &resp, sizeof(resp));
8468 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8469 if (rc) {
8470 pr_err("Failed to get QSEE version info %d\n", rc);
8471 goto exit_del_cdev;
8472 }
8473 qseecom.qsee_version = resp.result;
8474 qseecom.qseos_version = QSEOS_VERSION_14;
8475 qseecom.commonlib_loaded = false;
8476 qseecom.commonlib64_loaded = false;
8477 qseecom.pdev = class_dev;
8478 /* Create ION msm client */
8479 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8480 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8481 pr_err("Ion client cannot be created\n");
8482 rc = -ENOMEM;
8483 goto exit_del_cdev;
8484 }
8485
8486 /* register client for bus scaling */
8487 if (pdev->dev.of_node) {
8488 qseecom.pdev->of_node = pdev->dev.of_node;
8489 qseecom.support_bus_scaling =
8490 of_property_read_bool((&pdev->dev)->of_node,
8491 "qcom,support-bus-scaling");
8492 rc = qseecom_retrieve_ce_data(pdev);
8493 if (rc)
8494 goto exit_destroy_ion_client;
8495 qseecom.appsbl_qseecom_support =
8496 of_property_read_bool((&pdev->dev)->of_node,
8497 "qcom,appsbl-qseecom-support");
8498 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8499 qseecom.appsbl_qseecom_support);
8500
8501 qseecom.commonlib64_loaded =
8502 of_property_read_bool((&pdev->dev)->of_node,
8503 "qcom,commonlib64-loaded-by-uefi");
8504 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8505 qseecom.commonlib64_loaded);
8506 qseecom.fde_key_size =
8507 of_property_read_bool((&pdev->dev)->of_node,
8508 "qcom,fde-key-size");
8509 qseecom.no_clock_support =
8510 of_property_read_bool((&pdev->dev)->of_node,
8511 "qcom,no-clock-support");
8512 if (!qseecom.no_clock_support) {
8513 pr_info("qseecom clocks handled by other subsystem\n");
8514 } else {
8515 pr_info("no-clock-support=0x%x",
8516 qseecom.no_clock_support);
8517 }
8518
8519 if (of_property_read_u32((&pdev->dev)->of_node,
8520 "qcom,qsee-reentrancy-support",
8521 &qseecom.qsee_reentrancy_support)) {
8522 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8523 qseecom.qsee_reentrancy_support = 0;
8524 } else {
8525 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8526 qseecom.qsee_reentrancy_support);
8527 }
8528
8529 /*
8530 * The qseecom bus scaling flag can not be enabled when
8531 * crypto clock is not handled by HLOS.
8532 */
8533 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8534 pr_err("support_bus_scaling flag can not be enabled.\n");
8535 rc = -EINVAL;
8536 goto exit_destroy_ion_client;
8537 }
8538
8539 if (of_property_read_u32((&pdev->dev)->of_node,
8540 "qcom,ce-opp-freq",
8541 &qseecom.ce_opp_freq_hz)) {
8542 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8543 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8544 }
8545 rc = __qseecom_init_clk(CLK_QSEE);
8546 if (rc)
8547 goto exit_destroy_ion_client;
8548
8549 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8550 (qseecom.support_pfe || qseecom.support_fde)) {
8551 rc = __qseecom_init_clk(CLK_CE_DRV);
8552 if (rc) {
8553 __qseecom_deinit_clk(CLK_QSEE);
8554 goto exit_destroy_ion_client;
8555 }
8556 } else {
8557 struct qseecom_clk *qclk;
8558
8559 qclk = &qseecom.qsee;
8560 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8561 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8562 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8563 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8564 }
8565
8566 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8567 msm_bus_cl_get_pdata(pdev);
8568 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8569 (!qseecom.is_apps_region_protected &&
8570 !qseecom.appsbl_qseecom_support)) {
8571 struct resource *resource = NULL;
8572 struct qsee_apps_region_info_ireq req;
8573 struct qsee_apps_region_info_64bit_ireq req_64bit;
8574 struct qseecom_command_scm_resp resp;
8575 void *cmd_buf = NULL;
8576 size_t cmd_len;
8577
8578 resource = platform_get_resource_byname(pdev,
8579 IORESOURCE_MEM, "secapp-region");
8580 if (resource) {
8581 if (qseecom.qsee_version < QSEE_VERSION_40) {
8582 req.qsee_cmd_id =
8583 QSEOS_APP_REGION_NOTIFICATION;
8584 req.addr = (uint32_t)resource->start;
8585 req.size = resource_size(resource);
8586 cmd_buf = (void *)&req;
8587 cmd_len = sizeof(struct
8588 qsee_apps_region_info_ireq);
8589 pr_warn("secure app region addr=0x%x size=0x%x",
8590 req.addr, req.size);
8591 } else {
8592 req_64bit.qsee_cmd_id =
8593 QSEOS_APP_REGION_NOTIFICATION;
8594 req_64bit.addr = resource->start;
8595 req_64bit.size = resource_size(
8596 resource);
8597 cmd_buf = (void *)&req_64bit;
8598 cmd_len = sizeof(struct
8599 qsee_apps_region_info_64bit_ireq);
8600 pr_warn("secure app region addr=0x%llx size=0x%x",
8601 req_64bit.addr, req_64bit.size);
8602 }
8603 } else {
8604 pr_err("Fail to get secure app region info\n");
8605 rc = -EINVAL;
8606 goto exit_deinit_clock;
8607 }
8608 rc = __qseecom_enable_clk(CLK_QSEE);
8609 if (rc) {
8610 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8611 rc = -EIO;
8612 goto exit_deinit_clock;
8613 }
8614 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8615 cmd_buf, cmd_len,
8616 &resp, sizeof(resp));
8617 __qseecom_disable_clk(CLK_QSEE);
8618 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8619 pr_err("send secapp reg fail %d resp.res %d\n",
8620 rc, resp.result);
8621 rc = -EINVAL;
8622 goto exit_deinit_clock;
8623 }
8624 }
8625 /*
8626 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8627 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8628 * Pls add "qseecom.commonlib64_loaded = true" here too.
8629 */
8630 if (qseecom.is_apps_region_protected ||
8631 qseecom.appsbl_qseecom_support)
8632 qseecom.commonlib_loaded = true;
8633 } else {
8634 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8635 pdev->dev.platform_data;
8636 }
8637 if (qseecom.support_bus_scaling) {
8638 init_timer(&(qseecom.bw_scale_down_timer));
8639 INIT_WORK(&qseecom.bw_inactive_req_ws,
8640 qseecom_bw_inactive_req_work);
8641 qseecom.bw_scale_down_timer.function =
8642 qseecom_scale_bus_bandwidth_timer_callback;
8643 }
8644 qseecom.timer_running = false;
8645 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8646 qseecom_platform_support);
8647
8648 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8649 pr_warn("qseecom.whitelist_support = %d\n",
8650 qseecom.whitelist_support);
8651
8652 if (!qseecom.qsee_perf_client)
8653 pr_err("Unable to register bus client\n");
8654
8655 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8656 return 0;
8657
8658exit_deinit_clock:
8659 __qseecom_deinit_clk(CLK_QSEE);
8660 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8661 (qseecom.support_pfe || qseecom.support_fde))
8662 __qseecom_deinit_clk(CLK_CE_DRV);
8663exit_destroy_ion_client:
8664 if (qseecom.ce_info.fde) {
8665 pce_info_use = qseecom.ce_info.fde;
8666 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8667 kzfree(pce_info_use->ce_pipe_entry);
8668 pce_info_use++;
8669 }
8670 kfree(qseecom.ce_info.fde);
8671 }
8672 if (qseecom.ce_info.pfe) {
8673 pce_info_use = qseecom.ce_info.pfe;
8674 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8675 kzfree(pce_info_use->ce_pipe_entry);
8676 pce_info_use++;
8677 }
8678 kfree(qseecom.ce_info.pfe);
8679 }
8680 ion_client_destroy(qseecom.ion_clnt);
8681exit_del_cdev:
8682 cdev_del(&qseecom.cdev);
8683exit_destroy_device:
8684 device_destroy(driver_class, qseecom_device_no);
8685exit_destroy_class:
8686 class_destroy(driver_class);
8687exit_unreg_chrdev_region:
8688 unregister_chrdev_region(qseecom_device_no, 1);
8689 return rc;
8690}
8691
8692static int qseecom_remove(struct platform_device *pdev)
8693{
8694 struct qseecom_registered_kclient_list *kclient = NULL;
8695 unsigned long flags = 0;
8696 int ret = 0;
8697 int i;
8698 struct qseecom_ce_pipe_entry *pce_entry;
8699 struct qseecom_ce_info_use *pce_info_use;
8700
8701 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8702 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8703
8704 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8705 list) {
8706 if (!kclient)
8707 goto exit_irqrestore;
8708
8709 /* Break the loop if client handle is NULL */
8710 if (!kclient->handle)
8711 goto exit_free_kclient;
8712
8713 if (list_empty(&kclient->list))
8714 goto exit_free_kc_handle;
8715
8716 list_del(&kclient->list);
8717 mutex_lock(&app_access_lock);
8718 ret = qseecom_unload_app(kclient->handle->dev, false);
8719 mutex_unlock(&app_access_lock);
8720 if (!ret) {
8721 kzfree(kclient->handle->dev);
8722 kzfree(kclient->handle);
8723 kzfree(kclient);
8724 }
8725 }
8726
8727exit_free_kc_handle:
8728 kzfree(kclient->handle);
8729exit_free_kclient:
8730 kzfree(kclient);
8731exit_irqrestore:
8732 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8733
8734 if (qseecom.qseos_version > QSEEE_VERSION_00)
8735 qseecom_unload_commonlib_image();
8736
8737 if (qseecom.qsee_perf_client)
8738 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8739 0);
8740 if (pdev->dev.platform_data != NULL)
8741 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8742
8743 if (qseecom.support_bus_scaling) {
8744 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8745 del_timer_sync(&qseecom.bw_scale_down_timer);
8746 }
8747
8748 if (qseecom.ce_info.fde) {
8749 pce_info_use = qseecom.ce_info.fde;
8750 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8751 pce_entry = pce_info_use->ce_pipe_entry;
8752 kfree(pce_entry);
8753 pce_info_use++;
8754 }
8755 }
8756 kfree(qseecom.ce_info.fde);
8757 if (qseecom.ce_info.pfe) {
8758 pce_info_use = qseecom.ce_info.pfe;
8759 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8760 pce_entry = pce_info_use->ce_pipe_entry;
8761 kfree(pce_entry);
8762 pce_info_use++;
8763 }
8764 }
8765 kfree(qseecom.ce_info.pfe);
8766
8767 /* register client for bus scaling */
8768 if (pdev->dev.of_node) {
8769 __qseecom_deinit_clk(CLK_QSEE);
8770 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8771 (qseecom.support_pfe || qseecom.support_fde))
8772 __qseecom_deinit_clk(CLK_CE_DRV);
8773 }
8774
8775 ion_client_destroy(qseecom.ion_clnt);
8776
8777 cdev_del(&qseecom.cdev);
8778
8779 device_destroy(driver_class, qseecom_device_no);
8780
8781 class_destroy(driver_class);
8782
8783 unregister_chrdev_region(qseecom_device_no, 1);
8784
8785 return ret;
8786}
8787
8788static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8789{
8790 int ret = 0;
8791 struct qseecom_clk *qclk;
8792
8793 qclk = &qseecom.qsee;
8794 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8795 if (qseecom.no_clock_support)
8796 return 0;
8797
8798 mutex_lock(&qsee_bw_mutex);
8799 mutex_lock(&clk_access_lock);
8800
8801 if (qseecom.current_mode != INACTIVE) {
8802 ret = msm_bus_scale_client_update_request(
8803 qseecom.qsee_perf_client, INACTIVE);
8804 if (ret)
8805 pr_err("Fail to scale down bus\n");
8806 else
8807 qseecom.current_mode = INACTIVE;
8808 }
8809
8810 if (qclk->clk_access_cnt) {
8811 if (qclk->ce_clk != NULL)
8812 clk_disable_unprepare(qclk->ce_clk);
8813 if (qclk->ce_core_clk != NULL)
8814 clk_disable_unprepare(qclk->ce_core_clk);
8815 if (qclk->ce_bus_clk != NULL)
8816 clk_disable_unprepare(qclk->ce_bus_clk);
8817 }
8818
8819 del_timer_sync(&(qseecom.bw_scale_down_timer));
8820 qseecom.timer_running = false;
8821
8822 mutex_unlock(&clk_access_lock);
8823 mutex_unlock(&qsee_bw_mutex);
8824 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8825
8826 return 0;
8827}
8828
8829static int qseecom_resume(struct platform_device *pdev)
8830{
8831 int mode = 0;
8832 int ret = 0;
8833 struct qseecom_clk *qclk;
8834
8835 qclk = &qseecom.qsee;
8836 if (qseecom.no_clock_support)
8837 goto exit;
8838
8839 mutex_lock(&qsee_bw_mutex);
8840 mutex_lock(&clk_access_lock);
8841 if (qseecom.cumulative_mode >= HIGH)
8842 mode = HIGH;
8843 else
8844 mode = qseecom.cumulative_mode;
8845
8846 if (qseecom.cumulative_mode != INACTIVE) {
8847 ret = msm_bus_scale_client_update_request(
8848 qseecom.qsee_perf_client, mode);
8849 if (ret)
8850 pr_err("Fail to scale up bus to %d\n", mode);
8851 else
8852 qseecom.current_mode = mode;
8853 }
8854
8855 if (qclk->clk_access_cnt) {
8856 if (qclk->ce_core_clk != NULL) {
8857 ret = clk_prepare_enable(qclk->ce_core_clk);
8858 if (ret) {
8859 pr_err("Unable to enable/prep CE core clk\n");
8860 qclk->clk_access_cnt = 0;
8861 goto err;
8862 }
8863 }
8864 if (qclk->ce_clk != NULL) {
8865 ret = clk_prepare_enable(qclk->ce_clk);
8866 if (ret) {
8867 pr_err("Unable to enable/prep CE iface clk\n");
8868 qclk->clk_access_cnt = 0;
8869 goto ce_clk_err;
8870 }
8871 }
8872 if (qclk->ce_bus_clk != NULL) {
8873 ret = clk_prepare_enable(qclk->ce_bus_clk);
8874 if (ret) {
8875 pr_err("Unable to enable/prep CE bus clk\n");
8876 qclk->clk_access_cnt = 0;
8877 goto ce_bus_clk_err;
8878 }
8879 }
8880 }
8881
8882 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8883 qseecom.bw_scale_down_timer.expires = jiffies +
8884 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8885 mod_timer(&(qseecom.bw_scale_down_timer),
8886 qseecom.bw_scale_down_timer.expires);
8887 qseecom.timer_running = true;
8888 }
8889
8890 mutex_unlock(&clk_access_lock);
8891 mutex_unlock(&qsee_bw_mutex);
8892 goto exit;
8893
8894ce_bus_clk_err:
8895 if (qclk->ce_clk)
8896 clk_disable_unprepare(qclk->ce_clk);
8897ce_clk_err:
8898 if (qclk->ce_core_clk)
8899 clk_disable_unprepare(qclk->ce_core_clk);
8900err:
8901 mutex_unlock(&clk_access_lock);
8902 mutex_unlock(&qsee_bw_mutex);
8903 ret = -EIO;
8904exit:
8905 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8906 return ret;
8907}
8908
8909static const struct of_device_id qseecom_match[] = {
8910 {
8911 .compatible = "qcom,qseecom",
8912 },
8913 {}
8914};
8915
8916static struct platform_driver qseecom_plat_driver = {
8917 .probe = qseecom_probe,
8918 .remove = qseecom_remove,
8919 .suspend = qseecom_suspend,
8920 .resume = qseecom_resume,
8921 .driver = {
8922 .name = "qseecom",
8923 .owner = THIS_MODULE,
8924 .of_match_table = qseecom_match,
8925 },
8926};
8927
8928static int qseecom_init(void)
8929{
8930 return platform_driver_register(&qseecom_plat_driver);
8931}
8932
8933static void qseecom_exit(void)
8934{
8935 platform_driver_unregister(&qseecom_plat_driver);
8936}
8937
8938MODULE_LICENSE("GPL v2");
8939MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
8940
8941module_init(qseecom_init);
8942module_exit(qseecom_exit);