blob: a3f88f1d1b89feb0768be8bdb4ffc50109d601da [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
255 struct ion_handle *cmnlib_ion_handle;
256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
583 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
584 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
585 ret = scm_call2(smc_id, &desc);
586 if (ret) {
587 smc_id = TZ_OS_REGISTER_LISTENER_ID;
588 __qseecom_reentrancy_check_if_no_app_blocked(
589 smc_id);
590 ret = scm_call2(smc_id, &desc);
591 }
592 break;
593 }
594 case QSEOS_DEREGISTER_LISTENER: {
595 struct qseecom_unregister_listener_ireq *req;
596
597 req = (struct qseecom_unregister_listener_ireq *)
598 req_buf;
599 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
600 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
601 desc.args[0] = req->listener_id;
602 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
603 ret = scm_call2(smc_id, &desc);
604 break;
605 }
606 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
607 struct qseecom_client_listener_data_irsp *req;
608
609 req = (struct qseecom_client_listener_data_irsp *)
610 req_buf;
611 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
612 desc.arginfo =
613 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
614 desc.args[0] = req->listener_id;
615 desc.args[1] = req->status;
616 ret = scm_call2(smc_id, &desc);
617 break;
618 }
619 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
620 struct qseecom_client_listener_data_irsp *req;
621 struct qseecom_client_listener_data_64bit_irsp *req_64;
622
623 smc_id =
624 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
625 desc.arginfo =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
627 if (qseecom.qsee_version < QSEE_VERSION_40) {
628 req =
629 (struct qseecom_client_listener_data_irsp *)
630 req_buf;
631 desc.args[0] = req->listener_id;
632 desc.args[1] = req->status;
633 desc.args[2] = req->sglistinfo_ptr;
634 desc.args[3] = req->sglistinfo_len;
635 } else {
636 req_64 =
637 (struct qseecom_client_listener_data_64bit_irsp *)
638 req_buf;
639 desc.args[0] = req_64->listener_id;
640 desc.args[1] = req_64->status;
641 desc.args[2] = req_64->sglistinfo_ptr;
642 desc.args[3] = req_64->sglistinfo_len;
643 }
644 ret = scm_call2(smc_id, &desc);
645 break;
646 }
647 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
648 struct qseecom_load_app_ireq *req;
649 struct qseecom_load_app_64bit_ireq *req_64bit;
650
651 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
652 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
653 if (qseecom.qsee_version < QSEE_VERSION_40) {
654 req = (struct qseecom_load_app_ireq *)req_buf;
655 desc.args[0] = req->mdt_len;
656 desc.args[1] = req->img_len;
657 desc.args[2] = req->phy_addr;
658 } else {
659 req_64bit =
660 (struct qseecom_load_app_64bit_ireq *)req_buf;
661 desc.args[0] = req_64bit->mdt_len;
662 desc.args[1] = req_64bit->img_len;
663 desc.args[2] = req_64bit->phy_addr;
664 }
665 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
666 ret = scm_call2(smc_id, &desc);
667 break;
668 }
669 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
670 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
671 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
672 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
673 ret = scm_call2(smc_id, &desc);
674 break;
675 }
676
677 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
678 struct qseecom_client_send_data_ireq *req;
679 struct qseecom_client_send_data_64bit_ireq *req_64bit;
680
681 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
682 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
683 if (qseecom.qsee_version < QSEE_VERSION_40) {
684 req = (struct qseecom_client_send_data_ireq *)
685 req_buf;
686 desc.args[0] = req->app_id;
687 desc.args[1] = req->req_ptr;
688 desc.args[2] = req->req_len;
689 desc.args[3] = req->rsp_ptr;
690 desc.args[4] = req->rsp_len;
691 } else {
692 req_64bit =
693 (struct qseecom_client_send_data_64bit_ireq *)
694 req_buf;
695 desc.args[0] = req_64bit->app_id;
696 desc.args[1] = req_64bit->req_ptr;
697 desc.args[2] = req_64bit->req_len;
698 desc.args[3] = req_64bit->rsp_ptr;
699 desc.args[4] = req_64bit->rsp_len;
700 }
701 ret = scm_call2(smc_id, &desc);
702 break;
703 }
704 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
705 struct qseecom_client_send_data_ireq *req;
706 struct qseecom_client_send_data_64bit_ireq *req_64bit;
707
708 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
709 desc.arginfo =
710 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
711 if (qseecom.qsee_version < QSEE_VERSION_40) {
712 req = (struct qseecom_client_send_data_ireq *)
713 req_buf;
714 desc.args[0] = req->app_id;
715 desc.args[1] = req->req_ptr;
716 desc.args[2] = req->req_len;
717 desc.args[3] = req->rsp_ptr;
718 desc.args[4] = req->rsp_len;
719 desc.args[5] = req->sglistinfo_ptr;
720 desc.args[6] = req->sglistinfo_len;
721 } else {
722 req_64bit =
723 (struct qseecom_client_send_data_64bit_ireq *)
724 req_buf;
725 desc.args[0] = req_64bit->app_id;
726 desc.args[1] = req_64bit->req_ptr;
727 desc.args[2] = req_64bit->req_len;
728 desc.args[3] = req_64bit->rsp_ptr;
729 desc.args[4] = req_64bit->rsp_len;
730 desc.args[5] = req_64bit->sglistinfo_ptr;
731 desc.args[6] = req_64bit->sglistinfo_len;
732 }
733 ret = scm_call2(smc_id, &desc);
734 break;
735 }
736 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
737 struct qseecom_client_send_service_ireq *req;
738
739 req = (struct qseecom_client_send_service_ireq *)
740 req_buf;
741 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
742 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
743 desc.args[0] = req->key_type;
744 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
745 ret = scm_call2(smc_id, &desc);
746 break;
747 }
748 case QSEOS_RPMB_ERASE_COMMAND: {
749 smc_id = TZ_OS_RPMB_ERASE_ID;
750 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
751 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
752 ret = scm_call2(smc_id, &desc);
753 break;
754 }
755 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
756 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
757 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
758 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
759 ret = scm_call2(smc_id, &desc);
760 break;
761 }
762 case QSEOS_GENERATE_KEY: {
763 u32 tzbuflen = PAGE_ALIGN(sizeof
764 (struct qseecom_key_generate_ireq) -
765 sizeof(uint32_t));
766 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
767
768 if (!tzbuf)
769 return -ENOMEM;
770 memset(tzbuf, 0, tzbuflen);
771 memcpy(tzbuf, req_buf + sizeof(uint32_t),
772 (sizeof(struct qseecom_key_generate_ireq) -
773 sizeof(uint32_t)));
774 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
775 smc_id = TZ_OS_KS_GEN_KEY_ID;
776 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
777 desc.args[0] = virt_to_phys(tzbuf);
778 desc.args[1] = tzbuflen;
779 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
780 ret = scm_call2(smc_id, &desc);
781 kzfree(tzbuf);
782 break;
783 }
784 case QSEOS_DELETE_KEY: {
785 u32 tzbuflen = PAGE_ALIGN(sizeof
786 (struct qseecom_key_delete_ireq) -
787 sizeof(uint32_t));
788 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
789
790 if (!tzbuf)
791 return -ENOMEM;
792 memset(tzbuf, 0, tzbuflen);
793 memcpy(tzbuf, req_buf + sizeof(uint32_t),
794 (sizeof(struct qseecom_key_delete_ireq) -
795 sizeof(uint32_t)));
796 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
797 smc_id = TZ_OS_KS_DEL_KEY_ID;
798 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
799 desc.args[0] = virt_to_phys(tzbuf);
800 desc.args[1] = tzbuflen;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
802 ret = scm_call2(smc_id, &desc);
803 kzfree(tzbuf);
804 break;
805 }
806 case QSEOS_SET_KEY: {
807 u32 tzbuflen = PAGE_ALIGN(sizeof
808 (struct qseecom_key_select_ireq) -
809 sizeof(uint32_t));
810 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
811
812 if (!tzbuf)
813 return -ENOMEM;
814 memset(tzbuf, 0, tzbuflen);
815 memcpy(tzbuf, req_buf + sizeof(uint32_t),
816 (sizeof(struct qseecom_key_select_ireq) -
817 sizeof(uint32_t)));
818 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
819 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
820 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
821 desc.args[0] = virt_to_phys(tzbuf);
822 desc.args[1] = tzbuflen;
823 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
824 ret = scm_call2(smc_id, &desc);
825 kzfree(tzbuf);
826 break;
827 }
828 case QSEOS_UPDATE_KEY_USERINFO: {
829 u32 tzbuflen = PAGE_ALIGN(sizeof
830 (struct qseecom_key_userinfo_update_ireq) -
831 sizeof(uint32_t));
832 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
833
834 if (!tzbuf)
835 return -ENOMEM;
836 memset(tzbuf, 0, tzbuflen);
837 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
838 (struct qseecom_key_userinfo_update_ireq) -
839 sizeof(uint32_t)));
840 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
841 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
842 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
843 desc.args[0] = virt_to_phys(tzbuf);
844 desc.args[1] = tzbuflen;
845 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
846 ret = scm_call2(smc_id, &desc);
847 kzfree(tzbuf);
848 break;
849 }
850 case QSEOS_TEE_OPEN_SESSION: {
851 struct qseecom_qteec_ireq *req;
852 struct qseecom_qteec_64bit_ireq *req_64bit;
853
854 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
855 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
856 if (qseecom.qsee_version < QSEE_VERSION_40) {
857 req = (struct qseecom_qteec_ireq *)req_buf;
858 desc.args[0] = req->app_id;
859 desc.args[1] = req->req_ptr;
860 desc.args[2] = req->req_len;
861 desc.args[3] = req->resp_ptr;
862 desc.args[4] = req->resp_len;
863 } else {
864 req_64bit = (struct qseecom_qteec_64bit_ireq *)
865 req_buf;
866 desc.args[0] = req_64bit->app_id;
867 desc.args[1] = req_64bit->req_ptr;
868 desc.args[2] = req_64bit->req_len;
869 desc.args[3] = req_64bit->resp_ptr;
870 desc.args[4] = req_64bit->resp_len;
871 }
872 ret = scm_call2(smc_id, &desc);
873 break;
874 }
875 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
876 struct qseecom_qteec_ireq *req;
877 struct qseecom_qteec_64bit_ireq *req_64bit;
878
879 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
880 desc.arginfo =
881 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
882 if (qseecom.qsee_version < QSEE_VERSION_40) {
883 req = (struct qseecom_qteec_ireq *)req_buf;
884 desc.args[0] = req->app_id;
885 desc.args[1] = req->req_ptr;
886 desc.args[2] = req->req_len;
887 desc.args[3] = req->resp_ptr;
888 desc.args[4] = req->resp_len;
889 desc.args[5] = req->sglistinfo_ptr;
890 desc.args[6] = req->sglistinfo_len;
891 } else {
892 req_64bit = (struct qseecom_qteec_64bit_ireq *)
893 req_buf;
894 desc.args[0] = req_64bit->app_id;
895 desc.args[1] = req_64bit->req_ptr;
896 desc.args[2] = req_64bit->req_len;
897 desc.args[3] = req_64bit->resp_ptr;
898 desc.args[4] = req_64bit->resp_len;
899 desc.args[5] = req_64bit->sglistinfo_ptr;
900 desc.args[6] = req_64bit->sglistinfo_len;
901 }
902 ret = scm_call2(smc_id, &desc);
903 break;
904 }
905 case QSEOS_TEE_INVOKE_COMMAND: {
906 struct qseecom_qteec_ireq *req;
907 struct qseecom_qteec_64bit_ireq *req_64bit;
908
909 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
910 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
911 if (qseecom.qsee_version < QSEE_VERSION_40) {
912 req = (struct qseecom_qteec_ireq *)req_buf;
913 desc.args[0] = req->app_id;
914 desc.args[1] = req->req_ptr;
915 desc.args[2] = req->req_len;
916 desc.args[3] = req->resp_ptr;
917 desc.args[4] = req->resp_len;
918 } else {
919 req_64bit = (struct qseecom_qteec_64bit_ireq *)
920 req_buf;
921 desc.args[0] = req_64bit->app_id;
922 desc.args[1] = req_64bit->req_ptr;
923 desc.args[2] = req_64bit->req_len;
924 desc.args[3] = req_64bit->resp_ptr;
925 desc.args[4] = req_64bit->resp_len;
926 }
927 ret = scm_call2(smc_id, &desc);
928 break;
929 }
930 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
931 struct qseecom_qteec_ireq *req;
932 struct qseecom_qteec_64bit_ireq *req_64bit;
933
934 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
935 desc.arginfo =
936 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
937 if (qseecom.qsee_version < QSEE_VERSION_40) {
938 req = (struct qseecom_qteec_ireq *)req_buf;
939 desc.args[0] = req->app_id;
940 desc.args[1] = req->req_ptr;
941 desc.args[2] = req->req_len;
942 desc.args[3] = req->resp_ptr;
943 desc.args[4] = req->resp_len;
944 desc.args[5] = req->sglistinfo_ptr;
945 desc.args[6] = req->sglistinfo_len;
946 } else {
947 req_64bit = (struct qseecom_qteec_64bit_ireq *)
948 req_buf;
949 desc.args[0] = req_64bit->app_id;
950 desc.args[1] = req_64bit->req_ptr;
951 desc.args[2] = req_64bit->req_len;
952 desc.args[3] = req_64bit->resp_ptr;
953 desc.args[4] = req_64bit->resp_len;
954 desc.args[5] = req_64bit->sglistinfo_ptr;
955 desc.args[6] = req_64bit->sglistinfo_len;
956 }
957 ret = scm_call2(smc_id, &desc);
958 break;
959 }
960 case QSEOS_TEE_CLOSE_SESSION: {
961 struct qseecom_qteec_ireq *req;
962 struct qseecom_qteec_64bit_ireq *req_64bit;
963
964 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
965 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
966 if (qseecom.qsee_version < QSEE_VERSION_40) {
967 req = (struct qseecom_qteec_ireq *)req_buf;
968 desc.args[0] = req->app_id;
969 desc.args[1] = req->req_ptr;
970 desc.args[2] = req->req_len;
971 desc.args[3] = req->resp_ptr;
972 desc.args[4] = req->resp_len;
973 } else {
974 req_64bit = (struct qseecom_qteec_64bit_ireq *)
975 req_buf;
976 desc.args[0] = req_64bit->app_id;
977 desc.args[1] = req_64bit->req_ptr;
978 desc.args[2] = req_64bit->req_len;
979 desc.args[3] = req_64bit->resp_ptr;
980 desc.args[4] = req_64bit->resp_len;
981 }
982 ret = scm_call2(smc_id, &desc);
983 break;
984 }
985 case QSEOS_TEE_REQUEST_CANCELLATION: {
986 struct qseecom_qteec_ireq *req;
987 struct qseecom_qteec_64bit_ireq *req_64bit;
988
989 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
990 desc.arginfo =
991 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
992 if (qseecom.qsee_version < QSEE_VERSION_40) {
993 req = (struct qseecom_qteec_ireq *)req_buf;
994 desc.args[0] = req->app_id;
995 desc.args[1] = req->req_ptr;
996 desc.args[2] = req->req_len;
997 desc.args[3] = req->resp_ptr;
998 desc.args[4] = req->resp_len;
999 } else {
1000 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1001 req_buf;
1002 desc.args[0] = req_64bit->app_id;
1003 desc.args[1] = req_64bit->req_ptr;
1004 desc.args[2] = req_64bit->req_len;
1005 desc.args[3] = req_64bit->resp_ptr;
1006 desc.args[4] = req_64bit->resp_len;
1007 }
1008 ret = scm_call2(smc_id, &desc);
1009 break;
1010 }
1011 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1012 struct qseecom_continue_blocked_request_ireq *req =
1013 (struct qseecom_continue_blocked_request_ireq *)
1014 req_buf;
1015 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
1016 desc.arginfo =
1017 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
1018 desc.args[0] = req->app_id;
1019 ret = scm_call2(smc_id, &desc);
1020 break;
1021 }
1022 default: {
1023 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1024 qseos_cmd_id);
1025 ret = -EINVAL;
1026 break;
1027 }
1028 } /*end of switch (qsee_cmd_id) */
1029 break;
1030 } /*end of case SCM_SVC_TZSCHEDULER*/
1031 default: {
1032 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1033 svc_id);
1034 ret = -EINVAL;
1035 break;
1036 }
1037 } /*end of switch svc_id */
1038 scm_resp->result = desc.ret[0];
1039 scm_resp->resp_type = desc.ret[1];
1040 scm_resp->data = desc.ret[2];
1041 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1042 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1043 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1044 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1045 return ret;
1046}
1047
1048
1049static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1050 size_t cmd_len, void *resp_buf, size_t resp_len)
1051{
1052 if (!is_scm_armv8())
1053 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1054 resp_buf, resp_len);
1055 else
1056 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1057}
1058
1059static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1060 struct qseecom_register_listener_req *svc)
1061{
1062 struct qseecom_registered_listener_list *ptr;
1063 int unique = 1;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1067 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1068 if (ptr->svc.listener_id == svc->listener_id) {
1069 pr_err("Service id: %u is already registered\n",
1070 ptr->svc.listener_id);
1071 unique = 0;
1072 break;
1073 }
1074 }
1075 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1076 return unique;
1077}
1078
1079static struct qseecom_registered_listener_list *__qseecom_find_svc(
1080 int32_t listener_id)
1081{
1082 struct qseecom_registered_listener_list *entry = NULL;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1086 list_for_each_entry(entry,
1087 &qseecom.registered_listener_list_head, list) {
1088 if (entry->svc.listener_id == listener_id)
1089 break;
1090 }
1091 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1092
1093 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1094 pr_err("Service id: %u is not found\n", listener_id);
1095 return NULL;
1096 }
1097
1098 return entry;
1099}
1100
1101static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1102 struct qseecom_dev_handle *handle,
1103 struct qseecom_register_listener_req *listener)
1104{
1105 int ret = 0;
1106 struct qseecom_register_listener_ireq req;
1107 struct qseecom_register_listener_64bit_ireq req_64bit;
1108 struct qseecom_command_scm_resp resp;
1109 ion_phys_addr_t pa;
1110 void *cmd_buf = NULL;
1111 size_t cmd_len;
1112
1113 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001114 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001115 listener->ifd_data_fd);
1116 if (IS_ERR_OR_NULL(svc->ihandle)) {
1117 pr_err("Ion client could not retrieve the handle\n");
1118 return -ENOMEM;
1119 }
1120
1121 /* Get the physical address of the ION BUF */
1122 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1123 if (ret) {
1124 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1125 ret);
1126 return ret;
1127 }
1128 /* Populate the structure for sending scm call to load image */
1129 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1130 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1131 pr_err("ION memory mapping for listener shared buffer failed\n");
1132 return -ENOMEM;
1133 }
1134 svc->sb_phys = (phys_addr_t)pa;
1135
1136 if (qseecom.qsee_version < QSEE_VERSION_40) {
1137 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1138 req.listener_id = svc->svc.listener_id;
1139 req.sb_len = svc->sb_length;
1140 req.sb_ptr = (uint32_t)svc->sb_phys;
1141 cmd_buf = (void *)&req;
1142 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1143 } else {
1144 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req_64bit.listener_id = svc->svc.listener_id;
1146 req_64bit.sb_len = svc->sb_length;
1147 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1148 cmd_buf = (void *)&req_64bit;
1149 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1150 }
1151
1152 resp.result = QSEOS_RESULT_INCOMPLETE;
1153
1154 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1155 &resp, sizeof(resp));
1156 if (ret) {
1157 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1158 return -EINVAL;
1159 }
1160
1161 if (resp.result != QSEOS_RESULT_SUCCESS) {
1162 pr_err("Error SB registration req: resp.result = %d\n",
1163 resp.result);
1164 return -EPERM;
1165 }
1166 return 0;
1167}
1168
1169static int qseecom_register_listener(struct qseecom_dev_handle *data,
1170 void __user *argp)
1171{
1172 int ret = 0;
1173 unsigned long flags;
1174 struct qseecom_register_listener_req rcvd_lstnr;
1175 struct qseecom_registered_listener_list *new_entry;
1176
1177 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1178 if (ret) {
1179 pr_err("copy_from_user failed\n");
1180 return ret;
1181 }
1182 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1183 rcvd_lstnr.sb_size))
1184 return -EFAULT;
1185
1186 data->listener.id = 0;
1187 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1188 pr_err("Service is not unique and is already registered\n");
1189 data->released = true;
1190 return -EBUSY;
1191 }
1192
1193 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1194 if (!new_entry)
1195 return -ENOMEM;
1196 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1197 new_entry->rcv_req_flag = 0;
1198
1199 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1200 new_entry->sb_length = rcvd_lstnr.sb_size;
1201 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1202 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1203 pr_err("qseecom_set_sb_memoryfailed\n");
1204 kzfree(new_entry);
1205 return -ENOMEM;
1206 }
1207
1208 data->listener.id = rcvd_lstnr.listener_id;
1209 init_waitqueue_head(&new_entry->rcv_req_wq);
1210 init_waitqueue_head(&new_entry->listener_block_app_wq);
1211 new_entry->send_resp_flag = 0;
1212 new_entry->listener_in_use = false;
1213 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1214 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1215 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1216
1217 return ret;
1218}
1219
1220static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1221{
1222 int ret = 0;
1223 unsigned long flags;
1224 uint32_t unmap_mem = 0;
1225 struct qseecom_register_listener_ireq req;
1226 struct qseecom_registered_listener_list *ptr_svc = NULL;
1227 struct qseecom_command_scm_resp resp;
1228 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1229
1230 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1231 req.listener_id = data->listener.id;
1232 resp.result = QSEOS_RESULT_INCOMPLETE;
1233
1234 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1235 sizeof(req), &resp, sizeof(resp));
1236 if (ret) {
1237 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1238 ret, data->listener.id);
1239 return ret;
1240 }
1241
1242 if (resp.result != QSEOS_RESULT_SUCCESS) {
1243 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1244 resp.result, data->listener.id);
1245 return -EPERM;
1246 }
1247
1248 data->abort = 1;
1249 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1250 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1251 list) {
1252 if (ptr_svc->svc.listener_id == data->listener.id) {
1253 wake_up_all(&ptr_svc->rcv_req_wq);
1254 break;
1255 }
1256 }
1257 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1258
1259 while (atomic_read(&data->ioctl_count) > 1) {
1260 if (wait_event_freezable(data->abort_wq,
1261 atomic_read(&data->ioctl_count) <= 1)) {
1262 pr_err("Interrupted from abort\n");
1263 ret = -ERESTARTSYS;
1264 break;
1265 }
1266 }
1267
1268 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1269 list_for_each_entry(ptr_svc,
1270 &qseecom.registered_listener_list_head, list) {
1271 if (ptr_svc->svc.listener_id == data->listener.id) {
1272 if (ptr_svc->sb_virt) {
1273 unmap_mem = 1;
1274 ihandle = ptr_svc->ihandle;
1275 }
1276 list_del(&ptr_svc->list);
1277 kzfree(ptr_svc);
1278 break;
1279 }
1280 }
1281 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1282
1283 /* Unmap the memory */
1284 if (unmap_mem) {
1285 if (!IS_ERR_OR_NULL(ihandle)) {
1286 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1287 ion_free(qseecom.ion_clnt, ihandle);
1288 }
1289 }
1290 data->released = true;
1291 return ret;
1292}
1293
1294static int __qseecom_set_msm_bus_request(uint32_t mode)
1295{
1296 int ret = 0;
1297 struct qseecom_clk *qclk;
1298
1299 qclk = &qseecom.qsee;
1300 if (qclk->ce_core_src_clk != NULL) {
1301 if (mode == INACTIVE) {
1302 __qseecom_disable_clk(CLK_QSEE);
1303 } else {
1304 ret = __qseecom_enable_clk(CLK_QSEE);
1305 if (ret)
1306 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1307 ret, mode);
1308 }
1309 }
1310
1311 if ((!ret) && (qseecom.current_mode != mode)) {
1312 ret = msm_bus_scale_client_update_request(
1313 qseecom.qsee_perf_client, mode);
1314 if (ret) {
1315 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1316 ret, mode);
1317 if (qclk->ce_core_src_clk != NULL) {
1318 if (mode == INACTIVE) {
1319 ret = __qseecom_enable_clk(CLK_QSEE);
1320 if (ret)
1321 pr_err("CLK enable failed\n");
1322 } else
1323 __qseecom_disable_clk(CLK_QSEE);
1324 }
1325 }
1326 qseecom.current_mode = mode;
1327 }
1328 return ret;
1329}
1330
1331static void qseecom_bw_inactive_req_work(struct work_struct *work)
1332{
1333 mutex_lock(&app_access_lock);
1334 mutex_lock(&qsee_bw_mutex);
1335 if (qseecom.timer_running)
1336 __qseecom_set_msm_bus_request(INACTIVE);
1337 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1338 qseecom.current_mode, qseecom.cumulative_mode);
1339 qseecom.timer_running = false;
1340 mutex_unlock(&qsee_bw_mutex);
1341 mutex_unlock(&app_access_lock);
1342}
1343
1344static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1345{
1346 schedule_work(&qseecom.bw_inactive_req_ws);
1347}
1348
1349static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1350{
1351 struct qseecom_clk *qclk;
1352 int ret = 0;
1353
1354 mutex_lock(&clk_access_lock);
1355 if (ce == CLK_QSEE)
1356 qclk = &qseecom.qsee;
1357 else
1358 qclk = &qseecom.ce_drv;
1359
1360 if (qclk->clk_access_cnt > 2) {
1361 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1362 ret = -EINVAL;
1363 goto err_dec_ref_cnt;
1364 }
1365 if (qclk->clk_access_cnt == 2)
1366 qclk->clk_access_cnt--;
1367
1368err_dec_ref_cnt:
1369 mutex_unlock(&clk_access_lock);
1370 return ret;
1371}
1372
1373
1374static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1375{
1376 int32_t ret = 0;
1377 int32_t request_mode = INACTIVE;
1378
1379 mutex_lock(&qsee_bw_mutex);
1380 if (mode == 0) {
1381 if (qseecom.cumulative_mode > MEDIUM)
1382 request_mode = HIGH;
1383 else
1384 request_mode = qseecom.cumulative_mode;
1385 } else {
1386 request_mode = mode;
1387 }
1388
1389 ret = __qseecom_set_msm_bus_request(request_mode);
1390 if (ret) {
1391 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1392 ret, request_mode);
1393 goto err_scale_timer;
1394 }
1395
1396 if (qseecom.timer_running) {
1397 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1398 if (ret) {
1399 pr_err("Failed to decrease clk ref count.\n");
1400 goto err_scale_timer;
1401 }
1402 del_timer_sync(&(qseecom.bw_scale_down_timer));
1403 qseecom.timer_running = false;
1404 }
1405err_scale_timer:
1406 mutex_unlock(&qsee_bw_mutex);
1407 return ret;
1408}
1409
1410
1411static int qseecom_unregister_bus_bandwidth_needs(
1412 struct qseecom_dev_handle *data)
1413{
1414 int32_t ret = 0;
1415
1416 qseecom.cumulative_mode -= data->mode;
1417 data->mode = INACTIVE;
1418
1419 return ret;
1420}
1421
1422static int __qseecom_register_bus_bandwidth_needs(
1423 struct qseecom_dev_handle *data, uint32_t request_mode)
1424{
1425 int32_t ret = 0;
1426
1427 if (data->mode == INACTIVE) {
1428 qseecom.cumulative_mode += request_mode;
1429 data->mode = request_mode;
1430 } else {
1431 if (data->mode != request_mode) {
1432 qseecom.cumulative_mode -= data->mode;
1433 qseecom.cumulative_mode += request_mode;
1434 data->mode = request_mode;
1435 }
1436 }
1437 return ret;
1438}
1439
1440static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1441{
1442 int ret = 0;
1443
1444 ret = qsee_vote_for_clock(data, CLK_DFAB);
1445 if (ret) {
1446 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1447 goto perf_enable_exit;
1448 }
1449 ret = qsee_vote_for_clock(data, CLK_SFPB);
1450 if (ret) {
1451 qsee_disable_clock_vote(data, CLK_DFAB);
1452 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455
1456perf_enable_exit:
1457 return ret;
1458}
1459
1460static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1461 void __user *argp)
1462{
1463 int32_t ret = 0;
1464 int32_t req_mode;
1465
1466 if (qseecom.no_clock_support)
1467 return 0;
1468
1469 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1470 if (ret) {
1471 pr_err("copy_from_user failed\n");
1472 return ret;
1473 }
1474 if (req_mode > HIGH) {
1475 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1476 return -EINVAL;
1477 }
1478
1479 /*
1480 * Register bus bandwidth needs if bus scaling feature is enabled;
1481 * otherwise, qseecom enable/disable clocks for the client directly.
1482 */
1483 if (qseecom.support_bus_scaling) {
1484 mutex_lock(&qsee_bw_mutex);
1485 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1486 mutex_unlock(&qsee_bw_mutex);
1487 } else {
1488 pr_debug("Bus scaling feature is NOT enabled\n");
1489 pr_debug("request bandwidth mode %d for the client\n",
1490 req_mode);
1491 if (req_mode != INACTIVE) {
1492 ret = qseecom_perf_enable(data);
1493 if (ret)
1494 pr_err("Failed to vote for clock with err %d\n",
1495 ret);
1496 } else {
1497 qsee_disable_clock_vote(data, CLK_DFAB);
1498 qsee_disable_clock_vote(data, CLK_SFPB);
1499 }
1500 }
1501 return ret;
1502}
1503
1504static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1505{
1506 if (qseecom.no_clock_support)
1507 return;
1508
1509 mutex_lock(&qsee_bw_mutex);
1510 qseecom.bw_scale_down_timer.expires = jiffies +
1511 msecs_to_jiffies(duration);
1512 mod_timer(&(qseecom.bw_scale_down_timer),
1513 qseecom.bw_scale_down_timer.expires);
1514 qseecom.timer_running = true;
1515 mutex_unlock(&qsee_bw_mutex);
1516}
1517
1518static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1519{
1520 if (!qseecom.support_bus_scaling)
1521 qsee_disable_clock_vote(data, CLK_SFPB);
1522 else
1523 __qseecom_add_bw_scale_down_timer(
1524 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1525}
1526
1527static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1528{
1529 int ret = 0;
1530
1531 if (qseecom.support_bus_scaling) {
1532 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1533 if (ret)
1534 pr_err("Failed to set bw MEDIUM.\n");
1535 } else {
1536 ret = qsee_vote_for_clock(data, CLK_SFPB);
1537 if (ret)
1538 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1539 }
1540 return ret;
1541}
1542
1543static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1544 void __user *argp)
1545{
1546 ion_phys_addr_t pa;
1547 int32_t ret;
1548 struct qseecom_set_sb_mem_param_req req;
1549 size_t len;
1550
1551 /* Copy the relevant information needed for loading the image */
1552 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1553 return -EFAULT;
1554
1555 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1556 (req.sb_len == 0)) {
1557 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1558 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1559 return -EFAULT;
1560 }
1561 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1562 req.sb_len))
1563 return -EFAULT;
1564
1565 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001566 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001567 req.ifd_data_fd);
1568 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1569 pr_err("Ion client could not retrieve the handle\n");
1570 return -ENOMEM;
1571 }
1572 /* Get the physical address of the ION BUF */
1573 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1574 if (ret) {
1575
1576 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1577 ret);
1578 return ret;
1579 }
1580
1581 if (len < req.sb_len) {
1582 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1583 req.sb_len, len);
1584 return -EINVAL;
1585 }
1586 /* Populate the structure for sending scm call to load image */
1587 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1588 data->client.ihandle);
1589 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1590 pr_err("ION memory mapping for client shared buf failed\n");
1591 return -ENOMEM;
1592 }
1593 data->client.sb_phys = (phys_addr_t)pa;
1594 data->client.sb_length = req.sb_len;
1595 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1596 return 0;
1597}
1598
1599static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1600{
1601 int ret;
1602
1603 ret = (qseecom.send_resp_flag != 0);
1604 return ret || data->abort;
1605}
1606
1607static int __qseecom_reentrancy_listener_has_sent_rsp(
1608 struct qseecom_dev_handle *data,
1609 struct qseecom_registered_listener_list *ptr_svc)
1610{
1611 int ret;
1612
1613 ret = (ptr_svc->send_resp_flag != 0);
1614 return ret || data->abort;
1615}
1616
1617static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1618 struct qseecom_command_scm_resp *resp,
1619 struct qseecom_client_listener_data_irsp *send_data_rsp,
1620 struct qseecom_registered_listener_list *ptr_svc,
1621 uint32_t lstnr) {
1622 int ret = 0;
1623
1624 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1625 qseecom.send_resp_flag = 0;
1626 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1627 send_data_rsp->listener_id = lstnr;
1628 if (ptr_svc)
1629 pr_warn("listener_id:%x, lstnr: %x\n",
1630 ptr_svc->svc.listener_id, lstnr);
1631 if (ptr_svc && ptr_svc->ihandle) {
1632 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1633 ptr_svc->sb_virt, ptr_svc->sb_length,
1634 ION_IOC_CLEAN_INV_CACHES);
1635 if (ret) {
1636 pr_err("cache operation failed %d\n", ret);
1637 return ret;
1638 }
1639 }
1640
1641 if (lstnr == RPMB_SERVICE) {
1642 ret = __qseecom_enable_clk(CLK_QSEE);
1643 if (ret)
1644 return ret;
1645 }
1646 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1647 sizeof(send_data_rsp), resp, sizeof(*resp));
1648 if (ret) {
1649 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1650 ret, data->client.app_id);
1651 if (lstnr == RPMB_SERVICE)
1652 __qseecom_disable_clk(CLK_QSEE);
1653 return ret;
1654 }
1655 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1656 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1657 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1658 resp->result, data->client.app_id, lstnr);
1659 ret = -EINVAL;
1660 }
1661 if (lstnr == RPMB_SERVICE)
1662 __qseecom_disable_clk(CLK_QSEE);
1663 return ret;
1664}
1665
1666static void __qseecom_clean_listener_sglistinfo(
1667 struct qseecom_registered_listener_list *ptr_svc)
1668{
1669 if (ptr_svc->sglist_cnt) {
1670 memset(ptr_svc->sglistinfo_ptr, 0,
1671 SGLISTINFO_TABLE_SIZE);
1672 ptr_svc->sglist_cnt = 0;
1673 }
1674}
1675
1676static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1677 struct qseecom_command_scm_resp *resp)
1678{
1679 int ret = 0;
1680 int rc = 0;
1681 uint32_t lstnr;
1682 unsigned long flags;
1683 struct qseecom_client_listener_data_irsp send_data_rsp;
1684 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1685 struct qseecom_registered_listener_list *ptr_svc = NULL;
1686 sigset_t new_sigset;
1687 sigset_t old_sigset;
1688 uint32_t status;
1689 void *cmd_buf = NULL;
1690 size_t cmd_len;
1691 struct sglist_info *table = NULL;
1692
1693 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1694 lstnr = resp->data;
1695 /*
1696 * Wake up blocking lsitener service with the lstnr id
1697 */
1698 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1699 flags);
1700 list_for_each_entry(ptr_svc,
1701 &qseecom.registered_listener_list_head, list) {
1702 if (ptr_svc->svc.listener_id == lstnr) {
1703 ptr_svc->listener_in_use = true;
1704 ptr_svc->rcv_req_flag = 1;
1705 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1706 break;
1707 }
1708 }
1709 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1710 flags);
1711
1712 if (ptr_svc == NULL) {
1713 pr_err("Listener Svc %d does not exist\n", lstnr);
1714 __qseecom_qseos_fail_return_resp_tz(data, resp,
1715 &send_data_rsp, ptr_svc, lstnr);
1716 return -EINVAL;
1717 }
1718
1719 if (!ptr_svc->ihandle) {
1720 pr_err("Client handle is not initialized\n");
1721 __qseecom_qseos_fail_return_resp_tz(data, resp,
1722 &send_data_rsp, ptr_svc, lstnr);
1723 return -EINVAL;
1724 }
1725
1726 if (ptr_svc->svc.listener_id != lstnr) {
1727 pr_warn("Service requested does not exist\n");
1728 __qseecom_qseos_fail_return_resp_tz(data, resp,
1729 &send_data_rsp, ptr_svc, lstnr);
1730 return -ERESTARTSYS;
1731 }
1732 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1733
1734 /* initialize the new signal mask with all signals*/
1735 sigfillset(&new_sigset);
1736 /* block all signals */
1737 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1738
1739 do {
1740 /*
1741 * When reentrancy is not supported, check global
1742 * send_resp_flag; otherwise, check this listener's
1743 * send_resp_flag.
1744 */
1745 if (!qseecom.qsee_reentrancy_support &&
1746 !wait_event_freezable(qseecom.send_resp_wq,
1747 __qseecom_listener_has_sent_rsp(data))) {
1748 break;
1749 }
1750
1751 if (qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_reentrancy_listener_has_sent_rsp(
1754 data, ptr_svc))) {
1755 break;
1756 }
1757 } while (1);
1758
1759 /* restore signal mask */
1760 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1761 if (data->abort) {
1762 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1763 data->client.app_id, lstnr, ret);
1764 rc = -ENODEV;
1765 status = QSEOS_RESULT_FAILURE;
1766 } else {
1767 status = QSEOS_RESULT_SUCCESS;
1768 }
1769
1770 qseecom.send_resp_flag = 0;
1771 ptr_svc->send_resp_flag = 0;
1772 table = ptr_svc->sglistinfo_ptr;
1773 if (qseecom.qsee_version < QSEE_VERSION_40) {
1774 send_data_rsp.listener_id = lstnr;
1775 send_data_rsp.status = status;
1776 send_data_rsp.sglistinfo_ptr =
1777 (uint32_t)virt_to_phys(table);
1778 send_data_rsp.sglistinfo_len =
1779 SGLISTINFO_TABLE_SIZE;
1780 dmac_flush_range((void *)table,
1781 (void *)table + SGLISTINFO_TABLE_SIZE);
1782 cmd_buf = (void *)&send_data_rsp;
1783 cmd_len = sizeof(send_data_rsp);
1784 } else {
1785 send_data_rsp_64bit.listener_id = lstnr;
1786 send_data_rsp_64bit.status = status;
1787 send_data_rsp_64bit.sglistinfo_ptr =
1788 virt_to_phys(table);
1789 send_data_rsp_64bit.sglistinfo_len =
1790 SGLISTINFO_TABLE_SIZE;
1791 dmac_flush_range((void *)table,
1792 (void *)table + SGLISTINFO_TABLE_SIZE);
1793 cmd_buf = (void *)&send_data_rsp_64bit;
1794 cmd_len = sizeof(send_data_rsp_64bit);
1795 }
1796 if (qseecom.whitelist_support == false)
1797 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1798 else
1799 *(uint32_t *)cmd_buf =
1800 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1801 if (ptr_svc) {
1802 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1803 ptr_svc->ihandle,
1804 ptr_svc->sb_virt, ptr_svc->sb_length,
1805 ION_IOC_CLEAN_INV_CACHES);
1806 if (ret) {
1807 pr_err("cache operation failed %d\n", ret);
1808 return ret;
1809 }
1810 }
1811
1812 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1813 ret = __qseecom_enable_clk(CLK_QSEE);
1814 if (ret)
1815 return ret;
1816 }
1817
1818 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1819 cmd_buf, cmd_len, resp, sizeof(*resp));
1820 ptr_svc->listener_in_use = false;
1821 __qseecom_clean_listener_sglistinfo(ptr_svc);
1822 if (ret) {
1823 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1824 ret, data->client.app_id);
1825 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1826 __qseecom_disable_clk(CLK_QSEE);
1827 return ret;
1828 }
1829 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1830 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1831 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1832 resp->result, data->client.app_id, lstnr);
1833 ret = -EINVAL;
1834 }
1835 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1836 __qseecom_disable_clk(CLK_QSEE);
1837
1838 }
1839 if (rc)
1840 return rc;
1841
1842 return ret;
1843}
1844
1845int __qseecom_process_reentrancy_blocked_on_listener(
1846 struct qseecom_command_scm_resp *resp,
1847 struct qseecom_registered_app_list *ptr_app,
1848 struct qseecom_dev_handle *data)
1849{
1850 struct qseecom_registered_listener_list *list_ptr;
1851 int ret = 0;
1852 struct qseecom_continue_blocked_request_ireq ireq;
1853 struct qseecom_command_scm_resp continue_resp;
1854 sigset_t new_sigset, old_sigset;
1855 unsigned long flags;
1856 bool found_app = false;
1857
1858 if (!resp || !data) {
1859 pr_err("invalid resp or data pointer\n");
1860 ret = -EINVAL;
1861 goto exit;
1862 }
1863
1864 /* find app_id & img_name from list */
1865 if (!ptr_app) {
1866 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1867 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1868 list) {
1869 if ((ptr_app->app_id == data->client.app_id) &&
1870 (!strcmp(ptr_app->app_name,
1871 data->client.app_name))) {
1872 found_app = true;
1873 break;
1874 }
1875 }
1876 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1877 flags);
1878 if (!found_app) {
1879 pr_err("app_id %d (%s) is not found\n",
1880 data->client.app_id,
1881 (char *)data->client.app_name);
1882 ret = -ENOENT;
1883 goto exit;
1884 }
1885 }
1886
1887 list_ptr = __qseecom_find_svc(resp->data);
1888 if (!list_ptr) {
1889 pr_err("Invalid listener ID\n");
1890 ret = -ENODATA;
1891 goto exit;
1892 }
1893 pr_debug("lsntr %d in_use = %d\n",
1894 resp->data, list_ptr->listener_in_use);
1895 ptr_app->blocked_on_listener_id = resp->data;
1896 /* sleep until listener is available */
1897 do {
1898 qseecom.app_block_ref_cnt++;
1899 ptr_app->app_blocked = true;
1900 sigfillset(&new_sigset);
1901 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1902 mutex_unlock(&app_access_lock);
1903 do {
1904 if (!wait_event_freezable(
1905 list_ptr->listener_block_app_wq,
1906 !list_ptr->listener_in_use)) {
1907 break;
1908 }
1909 } while (1);
1910 mutex_lock(&app_access_lock);
1911 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1912 ptr_app->app_blocked = false;
1913 qseecom.app_block_ref_cnt--;
1914 } while (list_ptr->listener_in_use == true);
1915 ptr_app->blocked_on_listener_id = 0;
1916 /* notify the blocked app that listener is available */
1917 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1918 resp->data, data->client.app_id,
1919 data->client.app_name);
1920 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1921 ireq.app_id = data->client.app_id;
1922 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1923 &ireq, sizeof(ireq),
1924 &continue_resp, sizeof(continue_resp));
1925 if (ret) {
1926 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1927 data->client.app_id,
1928 data->client.app_name, ret);
1929 goto exit;
1930 }
1931 /*
1932 * After TZ app is unblocked, then continue to next case
1933 * for incomplete request processing
1934 */
1935 resp->result = QSEOS_RESULT_INCOMPLETE;
1936exit:
1937 return ret;
1938}
1939
1940static int __qseecom_reentrancy_process_incomplete_cmd(
1941 struct qseecom_dev_handle *data,
1942 struct qseecom_command_scm_resp *resp)
1943{
1944 int ret = 0;
1945 int rc = 0;
1946 uint32_t lstnr;
1947 unsigned long flags;
1948 struct qseecom_client_listener_data_irsp send_data_rsp;
1949 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1950 struct qseecom_registered_listener_list *ptr_svc = NULL;
1951 sigset_t new_sigset;
1952 sigset_t old_sigset;
1953 uint32_t status;
1954 void *cmd_buf = NULL;
1955 size_t cmd_len;
1956 struct sglist_info *table = NULL;
1957
1958 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
1959 lstnr = resp->data;
1960 /*
1961 * Wake up blocking lsitener service with the lstnr id
1962 */
1963 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1964 flags);
1965 list_for_each_entry(ptr_svc,
1966 &qseecom.registered_listener_list_head, list) {
1967 if (ptr_svc->svc.listener_id == lstnr) {
1968 ptr_svc->listener_in_use = true;
1969 ptr_svc->rcv_req_flag = 1;
1970 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1971 break;
1972 }
1973 }
1974 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1975 flags);
1976
1977 if (ptr_svc == NULL) {
1978 pr_err("Listener Svc %d does not exist\n", lstnr);
1979 return -EINVAL;
1980 }
1981
1982 if (!ptr_svc->ihandle) {
1983 pr_err("Client handle is not initialized\n");
1984 return -EINVAL;
1985 }
1986
1987 if (ptr_svc->svc.listener_id != lstnr) {
1988 pr_warn("Service requested does not exist\n");
1989 return -ERESTARTSYS;
1990 }
1991 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1992
1993 /* initialize the new signal mask with all signals*/
1994 sigfillset(&new_sigset);
1995
1996 /* block all signals */
1997 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1998
1999 /* unlock mutex btw waking listener and sleep-wait */
2000 mutex_unlock(&app_access_lock);
2001 do {
2002 if (!wait_event_freezable(qseecom.send_resp_wq,
2003 __qseecom_reentrancy_listener_has_sent_rsp(
2004 data, ptr_svc))) {
2005 break;
2006 }
2007 } while (1);
2008 /* lock mutex again after resp sent */
2009 mutex_lock(&app_access_lock);
2010 ptr_svc->send_resp_flag = 0;
2011 qseecom.send_resp_flag = 0;
2012
2013 /* restore signal mask */
2014 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2015 if (data->abort) {
2016 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2017 data->client.app_id, lstnr, ret);
2018 rc = -ENODEV;
2019 status = QSEOS_RESULT_FAILURE;
2020 } else {
2021 status = QSEOS_RESULT_SUCCESS;
2022 }
2023 table = ptr_svc->sglistinfo_ptr;
2024 if (qseecom.qsee_version < QSEE_VERSION_40) {
2025 send_data_rsp.listener_id = lstnr;
2026 send_data_rsp.status = status;
2027 send_data_rsp.sglistinfo_ptr =
2028 (uint32_t)virt_to_phys(table);
2029 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2030 dmac_flush_range((void *)table,
2031 (void *)table + SGLISTINFO_TABLE_SIZE);
2032 cmd_buf = (void *)&send_data_rsp;
2033 cmd_len = sizeof(send_data_rsp);
2034 } else {
2035 send_data_rsp_64bit.listener_id = lstnr;
2036 send_data_rsp_64bit.status = status;
2037 send_data_rsp_64bit.sglistinfo_ptr =
2038 virt_to_phys(table);
2039 send_data_rsp_64bit.sglistinfo_len =
2040 SGLISTINFO_TABLE_SIZE;
2041 dmac_flush_range((void *)table,
2042 (void *)table + SGLISTINFO_TABLE_SIZE);
2043 cmd_buf = (void *)&send_data_rsp_64bit;
2044 cmd_len = sizeof(send_data_rsp_64bit);
2045 }
2046 if (qseecom.whitelist_support == false)
2047 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2048 else
2049 *(uint32_t *)cmd_buf =
2050 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2051 if (ptr_svc) {
2052 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2053 ptr_svc->ihandle,
2054 ptr_svc->sb_virt, ptr_svc->sb_length,
2055 ION_IOC_CLEAN_INV_CACHES);
2056 if (ret) {
2057 pr_err("cache operation failed %d\n", ret);
2058 return ret;
2059 }
2060 }
2061 if (lstnr == RPMB_SERVICE) {
2062 ret = __qseecom_enable_clk(CLK_QSEE);
2063 if (ret)
2064 return ret;
2065 }
2066
2067 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2068 cmd_buf, cmd_len, resp, sizeof(*resp));
2069 ptr_svc->listener_in_use = false;
2070 __qseecom_clean_listener_sglistinfo(ptr_svc);
2071 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2072
2073 if (ret) {
2074 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2075 ret, data->client.app_id);
2076 goto exit;
2077 }
2078
2079 switch (resp->result) {
2080 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2081 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2082 lstnr, data->client.app_id, resp->data);
2083 if (lstnr == resp->data) {
2084 pr_err("lstnr %d should not be blocked!\n",
2085 lstnr);
2086 ret = -EINVAL;
2087 goto exit;
2088 }
2089 ret = __qseecom_process_reentrancy_blocked_on_listener(
2090 resp, NULL, data);
2091 if (ret) {
2092 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2093 data->client.app_id,
2094 data->client.app_name, resp->data);
2095 goto exit;
2096 }
2097 case QSEOS_RESULT_SUCCESS:
2098 case QSEOS_RESULT_INCOMPLETE:
2099 break;
2100 default:
2101 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2102 resp->result, data->client.app_id, lstnr);
2103 ret = -EINVAL;
2104 goto exit;
2105 }
2106exit:
2107 if (lstnr == RPMB_SERVICE)
2108 __qseecom_disable_clk(CLK_QSEE);
2109
2110 }
2111 if (rc)
2112 return rc;
2113
2114 return ret;
2115}
2116
2117/*
2118 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2119 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2120 * So, needs to first check if no app blocked before sending OS level scm call,
2121 * then wait until all apps are unblocked.
2122 */
2123static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2124{
2125 sigset_t new_sigset, old_sigset;
2126
2127 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2128 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2129 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2130 /* thread sleep until this app unblocked */
2131 while (qseecom.app_block_ref_cnt > 0) {
2132 sigfillset(&new_sigset);
2133 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2134 mutex_unlock(&app_access_lock);
2135 do {
2136 if (!wait_event_freezable(qseecom.app_block_wq,
2137 (qseecom.app_block_ref_cnt == 0)))
2138 break;
2139 } while (1);
2140 mutex_lock(&app_access_lock);
2141 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2142 }
2143 }
2144}
2145
2146/*
2147 * scm_call of send data will fail if this TA is blocked or there are more
2148 * than one TA requesting listener services; So, first check to see if need
2149 * to wait.
2150 */
2151static void __qseecom_reentrancy_check_if_this_app_blocked(
2152 struct qseecom_registered_app_list *ptr_app)
2153{
2154 sigset_t new_sigset, old_sigset;
2155
2156 if (qseecom.qsee_reentrancy_support) {
2157 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2158 /* thread sleep until this app unblocked */
2159 sigfillset(&new_sigset);
2160 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2161 mutex_unlock(&app_access_lock);
2162 do {
2163 if (!wait_event_freezable(qseecom.app_block_wq,
2164 (!ptr_app->app_blocked &&
2165 qseecom.app_block_ref_cnt <= 1)))
2166 break;
2167 } while (1);
2168 mutex_lock(&app_access_lock);
2169 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2170 }
2171 }
2172}
2173
2174static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2175 uint32_t *app_id)
2176{
2177 int32_t ret;
2178 struct qseecom_command_scm_resp resp;
2179 bool found_app = false;
2180 struct qseecom_registered_app_list *entry = NULL;
2181 unsigned long flags = 0;
2182
2183 if (!app_id) {
2184 pr_err("Null pointer to app_id\n");
2185 return -EINVAL;
2186 }
2187 *app_id = 0;
2188
2189 /* check if app exists and has been registered locally */
2190 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2191 list_for_each_entry(entry,
2192 &qseecom.registered_app_list_head, list) {
2193 if (!strcmp(entry->app_name, req.app_name)) {
2194 found_app = true;
2195 break;
2196 }
2197 }
2198 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2199 if (found_app) {
2200 pr_debug("Found app with id %d\n", entry->app_id);
2201 *app_id = entry->app_id;
2202 return 0;
2203 }
2204
2205 memset((void *)&resp, 0, sizeof(resp));
2206
2207 /* SCM_CALL to check if app_id for the mentioned app exists */
2208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2209 sizeof(struct qseecom_check_app_ireq),
2210 &resp, sizeof(resp));
2211 if (ret) {
2212 pr_err("scm_call to check if app is already loaded failed\n");
2213 return -EINVAL;
2214 }
2215
2216 if (resp.result == QSEOS_RESULT_FAILURE)
2217 return 0;
2218
2219 switch (resp.resp_type) {
2220 /*qsee returned listener type response */
2221 case QSEOS_LISTENER_ID:
2222 pr_err("resp type is of listener type instead of app");
2223 return -EINVAL;
2224 case QSEOS_APP_ID:
2225 *app_id = resp.data;
2226 return 0;
2227 default:
2228 pr_err("invalid resp type (%d) from qsee",
2229 resp.resp_type);
2230 return -ENODEV;
2231 }
2232}
2233
2234static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2235{
2236 struct qseecom_registered_app_list *entry = NULL;
2237 unsigned long flags = 0;
2238 u32 app_id = 0;
2239 struct ion_handle *ihandle; /* Ion handle */
2240 struct qseecom_load_img_req load_img_req;
2241 int32_t ret = 0;
2242 ion_phys_addr_t pa = 0;
2243 size_t len;
2244 struct qseecom_command_scm_resp resp;
2245 struct qseecom_check_app_ireq req;
2246 struct qseecom_load_app_ireq load_req;
2247 struct qseecom_load_app_64bit_ireq load_req_64bit;
2248 void *cmd_buf = NULL;
2249 size_t cmd_len;
2250 bool first_time = false;
2251
2252 /* Copy the relevant information needed for loading the image */
2253 if (copy_from_user(&load_img_req,
2254 (void __user *)argp,
2255 sizeof(struct qseecom_load_img_req))) {
2256 pr_err("copy_from_user failed\n");
2257 return -EFAULT;
2258 }
2259
2260 /* Check and load cmnlib */
2261 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2262 if (!qseecom.commonlib_loaded &&
2263 load_img_req.app_arch == ELFCLASS32) {
2264 ret = qseecom_load_commonlib_image(data, "cmnlib");
2265 if (ret) {
2266 pr_err("failed to load cmnlib\n");
2267 return -EIO;
2268 }
2269 qseecom.commonlib_loaded = true;
2270 pr_debug("cmnlib is loaded\n");
2271 }
2272
2273 if (!qseecom.commonlib64_loaded &&
2274 load_img_req.app_arch == ELFCLASS64) {
2275 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2276 if (ret) {
2277 pr_err("failed to load cmnlib64\n");
2278 return -EIO;
2279 }
2280 qseecom.commonlib64_loaded = true;
2281 pr_debug("cmnlib64 is loaded\n");
2282 }
2283 }
2284
2285 if (qseecom.support_bus_scaling) {
2286 mutex_lock(&qsee_bw_mutex);
2287 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2288 mutex_unlock(&qsee_bw_mutex);
2289 if (ret)
2290 return ret;
2291 }
2292
2293 /* Vote for the SFPB clock */
2294 ret = __qseecom_enable_clk_scale_up(data);
2295 if (ret)
2296 goto enable_clk_err;
2297
2298 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2299 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2300 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2301
2302 ret = __qseecom_check_app_exists(req, &app_id);
2303 if (ret < 0)
2304 goto loadapp_err;
2305
2306 if (app_id) {
2307 pr_debug("App id %d (%s) already exists\n", app_id,
2308 (char *)(req.app_name));
2309 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2310 list_for_each_entry(entry,
2311 &qseecom.registered_app_list_head, list){
2312 if (entry->app_id == app_id) {
2313 entry->ref_cnt++;
2314 break;
2315 }
2316 }
2317 spin_unlock_irqrestore(
2318 &qseecom.registered_app_list_lock, flags);
2319 ret = 0;
2320 } else {
2321 first_time = true;
2322 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2323 (char *)(load_img_req.img_name));
2324 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002325 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002326 load_img_req.ifd_data_fd);
2327 if (IS_ERR_OR_NULL(ihandle)) {
2328 pr_err("Ion client could not retrieve the handle\n");
2329 ret = -ENOMEM;
2330 goto loadapp_err;
2331 }
2332
2333 /* Get the physical address of the ION BUF */
2334 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2335 if (ret) {
2336 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2337 ret);
2338 goto loadapp_err;
2339 }
2340 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2341 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2342 len, load_img_req.mdt_len,
2343 load_img_req.img_len);
2344 ret = -EINVAL;
2345 goto loadapp_err;
2346 }
2347 /* Populate the structure for sending scm call to load image */
2348 if (qseecom.qsee_version < QSEE_VERSION_40) {
2349 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2350 load_req.mdt_len = load_img_req.mdt_len;
2351 load_req.img_len = load_img_req.img_len;
2352 strlcpy(load_req.app_name, load_img_req.img_name,
2353 MAX_APP_NAME_SIZE);
2354 load_req.phy_addr = (uint32_t)pa;
2355 cmd_buf = (void *)&load_req;
2356 cmd_len = sizeof(struct qseecom_load_app_ireq);
2357 } else {
2358 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2359 load_req_64bit.mdt_len = load_img_req.mdt_len;
2360 load_req_64bit.img_len = load_img_req.img_len;
2361 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2362 MAX_APP_NAME_SIZE);
2363 load_req_64bit.phy_addr = (uint64_t)pa;
2364 cmd_buf = (void *)&load_req_64bit;
2365 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2366 }
2367
2368 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2369 ION_IOC_CLEAN_INV_CACHES);
2370 if (ret) {
2371 pr_err("cache operation failed %d\n", ret);
2372 goto loadapp_err;
2373 }
2374
2375 /* SCM_CALL to load the app and get the app_id back */
2376 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2377 cmd_len, &resp, sizeof(resp));
2378 if (ret) {
2379 pr_err("scm_call to load app failed\n");
2380 if (!IS_ERR_OR_NULL(ihandle))
2381 ion_free(qseecom.ion_clnt, ihandle);
2382 ret = -EINVAL;
2383 goto loadapp_err;
2384 }
2385
2386 if (resp.result == QSEOS_RESULT_FAILURE) {
2387 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2388 if (!IS_ERR_OR_NULL(ihandle))
2389 ion_free(qseecom.ion_clnt, ihandle);
2390 ret = -EFAULT;
2391 goto loadapp_err;
2392 }
2393
2394 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2395 ret = __qseecom_process_incomplete_cmd(data, &resp);
2396 if (ret) {
2397 pr_err("process_incomplete_cmd failed err: %d\n",
2398 ret);
2399 if (!IS_ERR_OR_NULL(ihandle))
2400 ion_free(qseecom.ion_clnt, ihandle);
2401 ret = -EFAULT;
2402 goto loadapp_err;
2403 }
2404 }
2405
2406 if (resp.result != QSEOS_RESULT_SUCCESS) {
2407 pr_err("scm_call failed resp.result unknown, %d\n",
2408 resp.result);
2409 if (!IS_ERR_OR_NULL(ihandle))
2410 ion_free(qseecom.ion_clnt, ihandle);
2411 ret = -EFAULT;
2412 goto loadapp_err;
2413 }
2414
2415 app_id = resp.data;
2416
2417 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2418 if (!entry) {
2419 ret = -ENOMEM;
2420 goto loadapp_err;
2421 }
2422 entry->app_id = app_id;
2423 entry->ref_cnt = 1;
2424 entry->app_arch = load_img_req.app_arch;
2425 /*
2426 * keymaster app may be first loaded as "keymaste" by qseecomd,
2427 * and then used as "keymaster" on some targets. To avoid app
2428 * name checking error, register "keymaster" into app_list and
2429 * thread private data.
2430 */
2431 if (!strcmp(load_img_req.img_name, "keymaste"))
2432 strlcpy(entry->app_name, "keymaster",
2433 MAX_APP_NAME_SIZE);
2434 else
2435 strlcpy(entry->app_name, load_img_req.img_name,
2436 MAX_APP_NAME_SIZE);
2437 entry->app_blocked = false;
2438 entry->blocked_on_listener_id = 0;
2439
2440 /* Deallocate the handle */
2441 if (!IS_ERR_OR_NULL(ihandle))
2442 ion_free(qseecom.ion_clnt, ihandle);
2443
2444 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2445 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2446 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2447 flags);
2448
2449 pr_warn("App with id %u (%s) now loaded\n", app_id,
2450 (char *)(load_img_req.img_name));
2451 }
2452 data->client.app_id = app_id;
2453 data->client.app_arch = load_img_req.app_arch;
2454 if (!strcmp(load_img_req.img_name, "keymaste"))
2455 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2456 else
2457 strlcpy(data->client.app_name, load_img_req.img_name,
2458 MAX_APP_NAME_SIZE);
2459 load_img_req.app_id = app_id;
2460 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2461 pr_err("copy_to_user failed\n");
2462 ret = -EFAULT;
2463 if (first_time == true) {
2464 spin_lock_irqsave(
2465 &qseecom.registered_app_list_lock, flags);
2466 list_del(&entry->list);
2467 spin_unlock_irqrestore(
2468 &qseecom.registered_app_list_lock, flags);
2469 kzfree(entry);
2470 }
2471 }
2472
2473loadapp_err:
2474 __qseecom_disable_clk_scale_down(data);
2475enable_clk_err:
2476 if (qseecom.support_bus_scaling) {
2477 mutex_lock(&qsee_bw_mutex);
2478 qseecom_unregister_bus_bandwidth_needs(data);
2479 mutex_unlock(&qsee_bw_mutex);
2480 }
2481 return ret;
2482}
2483
2484static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2485{
2486 int ret = 1; /* Set unload app */
2487
2488 wake_up_all(&qseecom.send_resp_wq);
2489 if (qseecom.qsee_reentrancy_support)
2490 mutex_unlock(&app_access_lock);
2491 while (atomic_read(&data->ioctl_count) > 1) {
2492 if (wait_event_freezable(data->abort_wq,
2493 atomic_read(&data->ioctl_count) <= 1)) {
2494 pr_err("Interrupted from abort\n");
2495 ret = -ERESTARTSYS;
2496 break;
2497 }
2498 }
2499 if (qseecom.qsee_reentrancy_support)
2500 mutex_lock(&app_access_lock);
2501 return ret;
2502}
2503
2504static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2505{
2506 int ret = 0;
2507
2508 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2509 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2510 ion_free(qseecom.ion_clnt, data->client.ihandle);
2511 data->client.ihandle = NULL;
2512 }
2513 return ret;
2514}
2515
2516static int qseecom_unload_app(struct qseecom_dev_handle *data,
2517 bool app_crash)
2518{
2519 unsigned long flags;
2520 unsigned long flags1;
2521 int ret = 0;
2522 struct qseecom_command_scm_resp resp;
2523 struct qseecom_registered_app_list *ptr_app = NULL;
2524 bool unload = false;
2525 bool found_app = false;
2526 bool found_dead_app = false;
2527
2528 if (!data) {
2529 pr_err("Invalid/uninitialized device handle\n");
2530 return -EINVAL;
2531 }
2532
2533 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2534 pr_debug("Do not unload keymaster app from tz\n");
2535 goto unload_exit;
2536 }
2537
2538 __qseecom_cleanup_app(data);
2539 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2540
2541 if (data->client.app_id > 0) {
2542 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2543 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2544 list) {
2545 if (ptr_app->app_id == data->client.app_id) {
2546 if (!strcmp((void *)ptr_app->app_name,
2547 (void *)data->client.app_name)) {
2548 found_app = true;
2549 if (app_crash || ptr_app->ref_cnt == 1)
2550 unload = true;
2551 break;
2552 }
2553 found_dead_app = true;
2554 break;
2555 }
2556 }
2557 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2558 flags);
2559 if (found_app == false && found_dead_app == false) {
2560 pr_err("Cannot find app with id = %d (%s)\n",
2561 data->client.app_id,
2562 (char *)data->client.app_name);
2563 ret = -EINVAL;
2564 goto unload_exit;
2565 }
2566 }
2567
2568 if (found_dead_app)
2569 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2570 (char *)data->client.app_name);
2571
2572 if (unload) {
2573 struct qseecom_unload_app_ireq req;
2574 /* Populate the structure for sending scm call to load image */
2575 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2576 req.app_id = data->client.app_id;
2577
2578 /* SCM_CALL to unload the app */
2579 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2580 sizeof(struct qseecom_unload_app_ireq),
2581 &resp, sizeof(resp));
2582 if (ret) {
2583 pr_err("scm_call to unload app (id = %d) failed\n",
2584 req.app_id);
2585 ret = -EFAULT;
2586 goto unload_exit;
2587 } else {
2588 pr_warn("App id %d now unloaded\n", req.app_id);
2589 }
2590 if (resp.result == QSEOS_RESULT_FAILURE) {
2591 pr_err("app (%d) unload_failed!!\n",
2592 data->client.app_id);
2593 ret = -EFAULT;
2594 goto unload_exit;
2595 }
2596 if (resp.result == QSEOS_RESULT_SUCCESS)
2597 pr_debug("App (%d) is unloaded!!\n",
2598 data->client.app_id);
2599 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2600 ret = __qseecom_process_incomplete_cmd(data, &resp);
2601 if (ret) {
2602 pr_err("process_incomplete_cmd fail err: %d\n",
2603 ret);
2604 goto unload_exit;
2605 }
2606 }
2607 }
2608
2609 if (found_app) {
2610 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2611 if (app_crash) {
2612 ptr_app->ref_cnt = 0;
2613 pr_debug("app_crash: ref_count = 0\n");
2614 } else {
2615 if (ptr_app->ref_cnt == 1) {
2616 ptr_app->ref_cnt = 0;
2617 pr_debug("ref_count set to 0\n");
2618 } else {
2619 ptr_app->ref_cnt--;
2620 pr_debug("Can't unload app(%d) inuse\n",
2621 ptr_app->app_id);
2622 }
2623 }
2624 if (unload) {
2625 list_del(&ptr_app->list);
2626 kzfree(ptr_app);
2627 }
2628 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2629 flags1);
2630 }
2631unload_exit:
2632 qseecom_unmap_ion_allocated_memory(data);
2633 data->released = true;
2634 return ret;
2635}
2636
2637static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2638 unsigned long virt)
2639{
2640 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2641}
2642
2643static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2644 unsigned long virt)
2645{
2646 return (uintptr_t)data->client.sb_virt +
2647 (virt - data->client.user_virt_sb_base);
2648}
2649
2650int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2651 struct qseecom_send_svc_cmd_req *req_ptr,
2652 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2653{
2654 int ret = 0;
2655 void *req_buf = NULL;
2656
2657 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2658 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2659 req_ptr, send_svc_ireq_ptr);
2660 return -EINVAL;
2661 }
2662
2663 /* Clients need to ensure req_buf is at base offset of shared buffer */
2664 if ((uintptr_t)req_ptr->cmd_req_buf !=
2665 data_ptr->client.user_virt_sb_base) {
2666 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2667 return -EINVAL;
2668 }
2669
2670 if (data_ptr->client.sb_length <
2671 sizeof(struct qseecom_rpmb_provision_key)) {
2672 pr_err("shared buffer is too small to hold key type\n");
2673 return -EINVAL;
2674 }
2675 req_buf = data_ptr->client.sb_virt;
2676
2677 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2678 send_svc_ireq_ptr->key_type =
2679 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2680 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2681 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2682 data_ptr, (uintptr_t)req_ptr->resp_buf));
2683 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2684
2685 return ret;
2686}
2687
2688int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2689 struct qseecom_send_svc_cmd_req *req_ptr,
2690 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2691{
2692 int ret = 0;
2693 uint32_t reqd_len_sb_in = 0;
2694
2695 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2696 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2697 req_ptr, send_svc_ireq_ptr);
2698 return -EINVAL;
2699 }
2700
2701 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2702 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2703 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2704 pr_err("Required: %u, Available: %zu\n",
2705 reqd_len_sb_in, data_ptr->client.sb_length);
2706 return -ENOMEM;
2707 }
2708
2709 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2710 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2711 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2712 data_ptr, (uintptr_t)req_ptr->resp_buf));
2713 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2714
2715 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2716 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2717
2718
2719 return ret;
2720}
2721
2722static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2723 struct qseecom_send_svc_cmd_req *req)
2724{
2725 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2726 pr_err("req or cmd buffer or response buffer is null\n");
2727 return -EINVAL;
2728 }
2729
2730 if (!data || !data->client.ihandle) {
2731 pr_err("Client or client handle is not initialized\n");
2732 return -EINVAL;
2733 }
2734
2735 if (data->client.sb_virt == NULL) {
2736 pr_err("sb_virt null\n");
2737 return -EINVAL;
2738 }
2739
2740 if (data->client.user_virt_sb_base == 0) {
2741 pr_err("user_virt_sb_base is null\n");
2742 return -EINVAL;
2743 }
2744
2745 if (data->client.sb_length == 0) {
2746 pr_err("sb_length is 0\n");
2747 return -EINVAL;
2748 }
2749
2750 if (((uintptr_t)req->cmd_req_buf <
2751 data->client.user_virt_sb_base) ||
2752 ((uintptr_t)req->cmd_req_buf >=
2753 (data->client.user_virt_sb_base + data->client.sb_length))) {
2754 pr_err("cmd buffer address not within shared bufffer\n");
2755 return -EINVAL;
2756 }
2757 if (((uintptr_t)req->resp_buf <
2758 data->client.user_virt_sb_base) ||
2759 ((uintptr_t)req->resp_buf >=
2760 (data->client.user_virt_sb_base + data->client.sb_length))) {
2761 pr_err("response buffer address not within shared bufffer\n");
2762 return -EINVAL;
2763 }
2764 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2765 (req->cmd_req_len > data->client.sb_length) ||
2766 (req->resp_len > data->client.sb_length)) {
2767 pr_err("cmd buf length or response buf length not valid\n");
2768 return -EINVAL;
2769 }
2770 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2771 pr_err("Integer overflow detected in req_len & rsp_len\n");
2772 return -EINVAL;
2773 }
2774
2775 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2776 pr_debug("Not enough memory to fit cmd_buf.\n");
2777 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2778 (req->cmd_req_len + req->resp_len),
2779 data->client.sb_length);
2780 return -ENOMEM;
2781 }
2782 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2783 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2784 return -EINVAL;
2785 }
2786 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2787 pr_err("Integer overflow in resp_len & resp_buf\n");
2788 return -EINVAL;
2789 }
2790 if (data->client.user_virt_sb_base >
2791 (ULONG_MAX - data->client.sb_length)) {
2792 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2793 return -EINVAL;
2794 }
2795 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2796 ((uintptr_t)data->client.user_virt_sb_base +
2797 data->client.sb_length)) ||
2798 (((uintptr_t)req->resp_buf + req->resp_len) >
2799 ((uintptr_t)data->client.user_virt_sb_base +
2800 data->client.sb_length))) {
2801 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2802 return -EINVAL;
2803 }
2804 return 0;
2805}
2806
2807static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2808 void __user *argp)
2809{
2810 int ret = 0;
2811 struct qseecom_client_send_service_ireq send_svc_ireq;
2812 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2813 struct qseecom_command_scm_resp resp;
2814 struct qseecom_send_svc_cmd_req req;
2815 void *send_req_ptr;
2816 size_t req_buf_size;
2817
2818 /*struct qseecom_command_scm_resp resp;*/
2819
2820 if (copy_from_user(&req,
2821 (void __user *)argp,
2822 sizeof(req))) {
2823 pr_err("copy_from_user failed\n");
2824 return -EFAULT;
2825 }
2826
2827 if (__validate_send_service_cmd_inputs(data, &req))
2828 return -EINVAL;
2829
2830 data->type = QSEECOM_SECURE_SERVICE;
2831
2832 switch (req.cmd_id) {
2833 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2834 case QSEOS_RPMB_ERASE_COMMAND:
2835 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2836 send_req_ptr = &send_svc_ireq;
2837 req_buf_size = sizeof(send_svc_ireq);
2838 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2839 send_req_ptr))
2840 return -EINVAL;
2841 break;
2842 case QSEOS_FSM_LTEOTA_REQ_CMD:
2843 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2844 case QSEOS_FSM_IKE_REQ_CMD:
2845 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2846 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2847 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2848 case QSEOS_FSM_ENCFS_REQ_CMD:
2849 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2850 send_req_ptr = &send_fsm_key_svc_ireq;
2851 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2852 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2853 send_req_ptr))
2854 return -EINVAL;
2855 break;
2856 default:
2857 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2858 return -EINVAL;
2859 }
2860
2861 if (qseecom.support_bus_scaling) {
2862 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2863 if (ret) {
2864 pr_err("Fail to set bw HIGH\n");
2865 return ret;
2866 }
2867 } else {
2868 ret = qseecom_perf_enable(data);
2869 if (ret) {
2870 pr_err("Failed to vote for clocks with err %d\n", ret);
2871 goto exit;
2872 }
2873 }
2874
2875 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2876 data->client.sb_virt, data->client.sb_length,
2877 ION_IOC_CLEAN_INV_CACHES);
2878 if (ret) {
2879 pr_err("cache operation failed %d\n", ret);
2880 goto exit;
2881 }
2882 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2883 (const void *)send_req_ptr,
2884 req_buf_size, &resp, sizeof(resp));
2885 if (ret) {
2886 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2887 if (!qseecom.support_bus_scaling) {
2888 qsee_disable_clock_vote(data, CLK_DFAB);
2889 qsee_disable_clock_vote(data, CLK_SFPB);
2890 } else {
2891 __qseecom_add_bw_scale_down_timer(
2892 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2893 }
2894 goto exit;
2895 }
2896 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2897 data->client.sb_virt, data->client.sb_length,
2898 ION_IOC_INV_CACHES);
2899 if (ret) {
2900 pr_err("cache operation failed %d\n", ret);
2901 goto exit;
2902 }
2903 switch (resp.result) {
2904 case QSEOS_RESULT_SUCCESS:
2905 break;
2906 case QSEOS_RESULT_INCOMPLETE:
2907 pr_debug("qseos_result_incomplete\n");
2908 ret = __qseecom_process_incomplete_cmd(data, &resp);
2909 if (ret) {
2910 pr_err("process_incomplete_cmd fail with result: %d\n",
2911 resp.result);
2912 }
2913 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2914 pr_warn("RPMB key status is 0x%x\n", resp.result);
2915 *(uint32_t *)req.resp_buf = resp.result;
2916 ret = 0;
2917 }
2918 break;
2919 case QSEOS_RESULT_FAILURE:
2920 pr_err("scm call failed with resp.result: %d\n", resp.result);
2921 ret = -EINVAL;
2922 break;
2923 default:
2924 pr_err("Response result %d not supported\n",
2925 resp.result);
2926 ret = -EINVAL;
2927 break;
2928 }
2929 if (!qseecom.support_bus_scaling) {
2930 qsee_disable_clock_vote(data, CLK_DFAB);
2931 qsee_disable_clock_vote(data, CLK_SFPB);
2932 } else {
2933 __qseecom_add_bw_scale_down_timer(
2934 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2935 }
2936
2937exit:
2938 return ret;
2939}
2940
2941static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
2942 struct qseecom_send_cmd_req *req)
2943
2944{
2945 if (!data || !data->client.ihandle) {
2946 pr_err("Client or client handle is not initialized\n");
2947 return -EINVAL;
2948 }
2949 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
2950 (req->cmd_req_buf == NULL)) {
2951 pr_err("cmd buffer or response buffer is null\n");
2952 return -EINVAL;
2953 }
2954 if (((uintptr_t)req->cmd_req_buf <
2955 data->client.user_virt_sb_base) ||
2956 ((uintptr_t)req->cmd_req_buf >=
2957 (data->client.user_virt_sb_base + data->client.sb_length))) {
2958 pr_err("cmd buffer address not within shared bufffer\n");
2959 return -EINVAL;
2960 }
2961 if (((uintptr_t)req->resp_buf <
2962 data->client.user_virt_sb_base) ||
2963 ((uintptr_t)req->resp_buf >=
2964 (data->client.user_virt_sb_base + data->client.sb_length))) {
2965 pr_err("response buffer address not within shared bufffer\n");
2966 return -EINVAL;
2967 }
2968 if ((req->cmd_req_len == 0) ||
2969 (req->cmd_req_len > data->client.sb_length) ||
2970 (req->resp_len > data->client.sb_length)) {
2971 pr_err("cmd buf length or response buf length not valid\n");
2972 return -EINVAL;
2973 }
2974 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2975 pr_err("Integer overflow detected in req_len & rsp_len\n");
2976 return -EINVAL;
2977 }
2978
2979 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2980 pr_debug("Not enough memory to fit cmd_buf.\n");
2981 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2982 (req->cmd_req_len + req->resp_len),
2983 data->client.sb_length);
2984 return -ENOMEM;
2985 }
2986 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2987 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2988 return -EINVAL;
2989 }
2990 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2991 pr_err("Integer overflow in resp_len & resp_buf\n");
2992 return -EINVAL;
2993 }
2994 if (data->client.user_virt_sb_base >
2995 (ULONG_MAX - data->client.sb_length)) {
2996 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2997 return -EINVAL;
2998 }
2999 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3000 ((uintptr_t)data->client.user_virt_sb_base +
3001 data->client.sb_length)) ||
3002 (((uintptr_t)req->resp_buf + req->resp_len) >
3003 ((uintptr_t)data->client.user_virt_sb_base +
3004 data->client.sb_length))) {
3005 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3006 return -EINVAL;
3007 }
3008 return 0;
3009}
3010
3011int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3012 struct qseecom_registered_app_list *ptr_app,
3013 struct qseecom_dev_handle *data)
3014{
3015 int ret = 0;
3016
3017 switch (resp->result) {
3018 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3019 pr_warn("App(%d) %s is blocked on listener %d\n",
3020 data->client.app_id, data->client.app_name,
3021 resp->data);
3022 ret = __qseecom_process_reentrancy_blocked_on_listener(
3023 resp, ptr_app, data);
3024 if (ret) {
3025 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3026 data->client.app_id, data->client.app_name, resp->data);
3027 return ret;
3028 }
3029
3030 case QSEOS_RESULT_INCOMPLETE:
3031 qseecom.app_block_ref_cnt++;
3032 ptr_app->app_blocked = true;
3033 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3034 ptr_app->app_blocked = false;
3035 qseecom.app_block_ref_cnt--;
3036 wake_up_interruptible(&qseecom.app_block_wq);
3037 if (ret)
3038 pr_err("process_incomplete_cmd failed err: %d\n",
3039 ret);
3040 return ret;
3041 case QSEOS_RESULT_SUCCESS:
3042 return ret;
3043 default:
3044 pr_err("Response result %d not supported\n",
3045 resp->result);
3046 return -EINVAL;
3047 }
3048}
3049
3050static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3051 struct qseecom_send_cmd_req *req)
3052{
3053 int ret = 0;
3054 u32 reqd_len_sb_in = 0;
3055 struct qseecom_client_send_data_ireq send_data_req = {0};
3056 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3057 struct qseecom_command_scm_resp resp;
3058 unsigned long flags;
3059 struct qseecom_registered_app_list *ptr_app;
3060 bool found_app = false;
3061 void *cmd_buf = NULL;
3062 size_t cmd_len;
3063 struct sglist_info *table = data->sglistinfo_ptr;
3064
3065 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3066 /* find app_id & img_name from list */
3067 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3068 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3069 list) {
3070 if ((ptr_app->app_id == data->client.app_id) &&
3071 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3072 found_app = true;
3073 break;
3074 }
3075 }
3076 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3077
3078 if (!found_app) {
3079 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3080 (char *)data->client.app_name);
3081 return -ENOENT;
3082 }
3083
3084 if (qseecom.qsee_version < QSEE_VERSION_40) {
3085 send_data_req.app_id = data->client.app_id;
3086 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3087 data, (uintptr_t)req->cmd_req_buf));
3088 send_data_req.req_len = req->cmd_req_len;
3089 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3090 data, (uintptr_t)req->resp_buf));
3091 send_data_req.rsp_len = req->resp_len;
3092 send_data_req.sglistinfo_ptr =
3093 (uint32_t)virt_to_phys(table);
3094 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3095 dmac_flush_range((void *)table,
3096 (void *)table + SGLISTINFO_TABLE_SIZE);
3097 cmd_buf = (void *)&send_data_req;
3098 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3099 } else {
3100 send_data_req_64bit.app_id = data->client.app_id;
3101 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3102 (uintptr_t)req->cmd_req_buf);
3103 send_data_req_64bit.req_len = req->cmd_req_len;
3104 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3105 (uintptr_t)req->resp_buf);
3106 send_data_req_64bit.rsp_len = req->resp_len;
3107 /* check if 32bit app's phys_addr region is under 4GB.*/
3108 if ((data->client.app_arch == ELFCLASS32) &&
3109 ((send_data_req_64bit.req_ptr >=
3110 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3111 (send_data_req_64bit.rsp_ptr >=
3112 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3113 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3114 data->client.app_name,
3115 send_data_req_64bit.req_ptr,
3116 send_data_req_64bit.req_len,
3117 send_data_req_64bit.rsp_ptr,
3118 send_data_req_64bit.rsp_len);
3119 return -EFAULT;
3120 }
3121 send_data_req_64bit.sglistinfo_ptr =
3122 (uint64_t)virt_to_phys(table);
3123 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3124 dmac_flush_range((void *)table,
3125 (void *)table + SGLISTINFO_TABLE_SIZE);
3126 cmd_buf = (void *)&send_data_req_64bit;
3127 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3128 }
3129
3130 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3131 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3132 else
3133 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3134
3135 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3136 data->client.sb_virt,
3137 reqd_len_sb_in,
3138 ION_IOC_CLEAN_INV_CACHES);
3139 if (ret) {
3140 pr_err("cache operation failed %d\n", ret);
3141 return ret;
3142 }
3143
3144 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3145
3146 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3147 cmd_buf, cmd_len,
3148 &resp, sizeof(resp));
3149 if (ret) {
3150 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3151 ret, data->client.app_id);
3152 return ret;
3153 }
3154
3155 if (qseecom.qsee_reentrancy_support) {
3156 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
3157 } else {
3158 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3159 ret = __qseecom_process_incomplete_cmd(data, &resp);
3160 if (ret) {
3161 pr_err("process_incomplete_cmd failed err: %d\n",
3162 ret);
3163 return ret;
3164 }
3165 } else {
3166 if (resp.result != QSEOS_RESULT_SUCCESS) {
3167 pr_err("Response result %d not supported\n",
3168 resp.result);
3169 ret = -EINVAL;
3170 }
3171 }
3172 }
3173 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3174 data->client.sb_virt, data->client.sb_length,
3175 ION_IOC_INV_CACHES);
3176 if (ret)
3177 pr_err("cache operation failed %d\n", ret);
3178 return ret;
3179}
3180
3181static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3182{
3183 int ret = 0;
3184 struct qseecom_send_cmd_req req;
3185
3186 ret = copy_from_user(&req, argp, sizeof(req));
3187 if (ret) {
3188 pr_err("copy_from_user failed\n");
3189 return ret;
3190 }
3191
3192 if (__validate_send_cmd_inputs(data, &req))
3193 return -EINVAL;
3194
3195 ret = __qseecom_send_cmd(data, &req);
3196
3197 if (ret)
3198 return ret;
3199
3200 return ret;
3201}
3202
3203int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3204 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3205 struct qseecom_dev_handle *data, int i) {
3206
3207 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3208 (req->ifd_data[i].fd > 0)) {
3209 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3210 (req->ifd_data[i].cmd_buf_offset >
3211 req->cmd_req_len - sizeof(uint32_t))) {
3212 pr_err("Invalid offset (req len) 0x%x\n",
3213 req->ifd_data[i].cmd_buf_offset);
3214 return -EINVAL;
3215 }
3216 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3217 (lstnr_resp->ifd_data[i].fd > 0)) {
3218 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3219 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3220 lstnr_resp->resp_len - sizeof(uint32_t))) {
3221 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3222 lstnr_resp->ifd_data[i].cmd_buf_offset);
3223 return -EINVAL;
3224 }
3225 }
3226 return 0;
3227}
3228
3229static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3230 struct qseecom_dev_handle *data)
3231{
3232 struct ion_handle *ihandle;
3233 char *field;
3234 int ret = 0;
3235 int i = 0;
3236 uint32_t len = 0;
3237 struct scatterlist *sg;
3238 struct qseecom_send_modfd_cmd_req *req = NULL;
3239 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3240 struct qseecom_registered_listener_list *this_lstnr = NULL;
3241 uint32_t offset;
3242 struct sg_table *sg_ptr;
3243
3244 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3245 (data->type != QSEECOM_CLIENT_APP))
3246 return -EFAULT;
3247
3248 if (msg == NULL) {
3249 pr_err("Invalid address\n");
3250 return -EINVAL;
3251 }
3252 if (data->type == QSEECOM_LISTENER_SERVICE) {
3253 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3254 this_lstnr = __qseecom_find_svc(data->listener.id);
3255 if (IS_ERR_OR_NULL(this_lstnr)) {
3256 pr_err("Invalid listener ID\n");
3257 return -ENOMEM;
3258 }
3259 } else {
3260 req = (struct qseecom_send_modfd_cmd_req *)msg;
3261 }
3262
3263 for (i = 0; i < MAX_ION_FD; i++) {
3264 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3265 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003266 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003267 req->ifd_data[i].fd);
3268 if (IS_ERR_OR_NULL(ihandle)) {
3269 pr_err("Ion client can't retrieve the handle\n");
3270 return -ENOMEM;
3271 }
3272 field = (char *) req->cmd_req_buf +
3273 req->ifd_data[i].cmd_buf_offset;
3274 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3275 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003276 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003277 lstnr_resp->ifd_data[i].fd);
3278 if (IS_ERR_OR_NULL(ihandle)) {
3279 pr_err("Ion client can't retrieve the handle\n");
3280 return -ENOMEM;
3281 }
3282 field = lstnr_resp->resp_buf_ptr +
3283 lstnr_resp->ifd_data[i].cmd_buf_offset;
3284 } else {
3285 continue;
3286 }
3287 /* Populate the cmd data structure with the phys_addr */
3288 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3289 if (IS_ERR_OR_NULL(sg_ptr)) {
3290 pr_err("IOn client could not retrieve sg table\n");
3291 goto err;
3292 }
3293 if (sg_ptr->nents == 0) {
3294 pr_err("Num of scattered entries is 0\n");
3295 goto err;
3296 }
3297 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3298 pr_err("Num of scattered entries");
3299 pr_err(" (%d) is greater than max supported %d\n",
3300 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3301 goto err;
3302 }
3303 sg = sg_ptr->sgl;
3304 if (sg_ptr->nents == 1) {
3305 uint32_t *update;
3306
3307 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3308 goto err;
3309 if ((data->type == QSEECOM_CLIENT_APP &&
3310 (data->client.app_arch == ELFCLASS32 ||
3311 data->client.app_arch == ELFCLASS64)) ||
3312 (data->type == QSEECOM_LISTENER_SERVICE)) {
3313 /*
3314 * Check if sg list phy add region is under 4GB
3315 */
3316 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3317 (!cleanup) &&
3318 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3319 >= PHY_ADDR_4G - sg->length)) {
3320 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3321 data->client.app_name,
3322 &(sg_dma_address(sg_ptr->sgl)),
3323 sg->length);
3324 goto err;
3325 }
3326 update = (uint32_t *) field;
3327 *update = cleanup ? 0 :
3328 (uint32_t)sg_dma_address(sg_ptr->sgl);
3329 } else {
3330 pr_err("QSEE app arch %u is not supported\n",
3331 data->client.app_arch);
3332 goto err;
3333 }
3334 len += (uint32_t)sg->length;
3335 } else {
3336 struct qseecom_sg_entry *update;
3337 int j = 0;
3338
3339 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3340 (req->ifd_data[i].fd > 0)) {
3341
3342 if ((req->cmd_req_len <
3343 SG_ENTRY_SZ * sg_ptr->nents) ||
3344 (req->ifd_data[i].cmd_buf_offset >
3345 (req->cmd_req_len -
3346 SG_ENTRY_SZ * sg_ptr->nents))) {
3347 pr_err("Invalid offset = 0x%x\n",
3348 req->ifd_data[i].cmd_buf_offset);
3349 goto err;
3350 }
3351
3352 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3353 (lstnr_resp->ifd_data[i].fd > 0)) {
3354
3355 if ((lstnr_resp->resp_len <
3356 SG_ENTRY_SZ * sg_ptr->nents) ||
3357 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3358 (lstnr_resp->resp_len -
3359 SG_ENTRY_SZ * sg_ptr->nents))) {
3360 goto err;
3361 }
3362 }
3363 if ((data->type == QSEECOM_CLIENT_APP &&
3364 (data->client.app_arch == ELFCLASS32 ||
3365 data->client.app_arch == ELFCLASS64)) ||
3366 (data->type == QSEECOM_LISTENER_SERVICE)) {
3367 update = (struct qseecom_sg_entry *)field;
3368 for (j = 0; j < sg_ptr->nents; j++) {
3369 /*
3370 * Check if sg list PA is under 4GB
3371 */
3372 if ((qseecom.qsee_version >=
3373 QSEE_VERSION_40) &&
3374 (!cleanup) &&
3375 ((uint64_t)(sg_dma_address(sg))
3376 >= PHY_ADDR_4G - sg->length)) {
3377 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3378 data->client.app_name,
3379 &(sg_dma_address(sg)),
3380 sg->length);
3381 goto err;
3382 }
3383 update->phys_addr = cleanup ? 0 :
3384 (uint32_t)sg_dma_address(sg);
3385 update->len = cleanup ? 0 : sg->length;
3386 update++;
3387 len += sg->length;
3388 sg = sg_next(sg);
3389 }
3390 } else {
3391 pr_err("QSEE app arch %u is not supported\n",
3392 data->client.app_arch);
3393 goto err;
3394 }
3395 }
3396
3397 if (cleanup) {
3398 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3399 ihandle, NULL, len,
3400 ION_IOC_INV_CACHES);
3401 if (ret) {
3402 pr_err("cache operation failed %d\n", ret);
3403 goto err;
3404 }
3405 } else {
3406 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3407 ihandle, NULL, len,
3408 ION_IOC_CLEAN_INV_CACHES);
3409 if (ret) {
3410 pr_err("cache operation failed %d\n", ret);
3411 goto err;
3412 }
3413 if (data->type == QSEECOM_CLIENT_APP) {
3414 offset = req->ifd_data[i].cmd_buf_offset;
3415 data->sglistinfo_ptr[i].indexAndFlags =
3416 SGLISTINFO_SET_INDEX_FLAG(
3417 (sg_ptr->nents == 1), 0, offset);
3418 data->sglistinfo_ptr[i].sizeOrCount =
3419 (sg_ptr->nents == 1) ?
3420 sg->length : sg_ptr->nents;
3421 data->sglist_cnt = i + 1;
3422 } else {
3423 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3424 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3425 (uintptr_t)this_lstnr->sb_virt);
3426 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3427 SGLISTINFO_SET_INDEX_FLAG(
3428 (sg_ptr->nents == 1), 0, offset);
3429 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3430 (sg_ptr->nents == 1) ?
3431 sg->length : sg_ptr->nents;
3432 this_lstnr->sglist_cnt = i + 1;
3433 }
3434 }
3435 /* Deallocate the handle */
3436 if (!IS_ERR_OR_NULL(ihandle))
3437 ion_free(qseecom.ion_clnt, ihandle);
3438 }
3439 return ret;
3440err:
3441 if (!IS_ERR_OR_NULL(ihandle))
3442 ion_free(qseecom.ion_clnt, ihandle);
3443 return -ENOMEM;
3444}
3445
3446static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3447 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3448{
3449 struct scatterlist *sg = sg_ptr->sgl;
3450 struct qseecom_sg_entry_64bit *sg_entry;
3451 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3452 void *buf;
3453 uint i;
3454 size_t size;
3455 dma_addr_t coh_pmem;
3456
3457 if (fd_idx >= MAX_ION_FD) {
3458 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3459 return -ENOMEM;
3460 }
3461 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3462 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3463 /* Allocate a contiguous kernel buffer */
3464 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3465 size = (size + PAGE_SIZE) & PAGE_MASK;
3466 buf = dma_alloc_coherent(qseecom.pdev,
3467 size, &coh_pmem, GFP_KERNEL);
3468 if (buf == NULL) {
3469 pr_err("failed to alloc memory for sg buf\n");
3470 return -ENOMEM;
3471 }
3472 /* update qseecom_sg_list_buf_hdr_64bit */
3473 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3474 buf_hdr->new_buf_phys_addr = coh_pmem;
3475 buf_hdr->nents_total = sg_ptr->nents;
3476 /* save the left sg entries into new allocated buf */
3477 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3478 for (i = 0; i < sg_ptr->nents; i++) {
3479 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3480 sg_entry->len = sg->length;
3481 sg_entry++;
3482 sg = sg_next(sg);
3483 }
3484
3485 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3486 data->client.sec_buf_fd[fd_idx].vbase = buf;
3487 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3488 data->client.sec_buf_fd[fd_idx].size = size;
3489
3490 return 0;
3491}
3492
3493static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3494 struct qseecom_dev_handle *data)
3495{
3496 struct ion_handle *ihandle;
3497 char *field;
3498 int ret = 0;
3499 int i = 0;
3500 uint32_t len = 0;
3501 struct scatterlist *sg;
3502 struct qseecom_send_modfd_cmd_req *req = NULL;
3503 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3504 struct qseecom_registered_listener_list *this_lstnr = NULL;
3505 uint32_t offset;
3506 struct sg_table *sg_ptr;
3507
3508 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3509 (data->type != QSEECOM_CLIENT_APP))
3510 return -EFAULT;
3511
3512 if (msg == NULL) {
3513 pr_err("Invalid address\n");
3514 return -EINVAL;
3515 }
3516 if (data->type == QSEECOM_LISTENER_SERVICE) {
3517 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3518 this_lstnr = __qseecom_find_svc(data->listener.id);
3519 if (IS_ERR_OR_NULL(this_lstnr)) {
3520 pr_err("Invalid listener ID\n");
3521 return -ENOMEM;
3522 }
3523 } else {
3524 req = (struct qseecom_send_modfd_cmd_req *)msg;
3525 }
3526
3527 for (i = 0; i < MAX_ION_FD; i++) {
3528 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3529 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003530 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003531 req->ifd_data[i].fd);
3532 if (IS_ERR_OR_NULL(ihandle)) {
3533 pr_err("Ion client can't retrieve the handle\n");
3534 return -ENOMEM;
3535 }
3536 field = (char *) req->cmd_req_buf +
3537 req->ifd_data[i].cmd_buf_offset;
3538 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3539 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003540 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003541 lstnr_resp->ifd_data[i].fd);
3542 if (IS_ERR_OR_NULL(ihandle)) {
3543 pr_err("Ion client can't retrieve the handle\n");
3544 return -ENOMEM;
3545 }
3546 field = lstnr_resp->resp_buf_ptr +
3547 lstnr_resp->ifd_data[i].cmd_buf_offset;
3548 } else {
3549 continue;
3550 }
3551 /* Populate the cmd data structure with the phys_addr */
3552 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3553 if (IS_ERR_OR_NULL(sg_ptr)) {
3554 pr_err("IOn client could not retrieve sg table\n");
3555 goto err;
3556 }
3557 if (sg_ptr->nents == 0) {
3558 pr_err("Num of scattered entries is 0\n");
3559 goto err;
3560 }
3561 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3562 pr_warn("Num of scattered entries");
3563 pr_warn(" (%d) is greater than %d\n",
3564 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3565 if (cleanup) {
3566 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3567 data->client.sec_buf_fd[i].vbase)
3568 dma_free_coherent(qseecom.pdev,
3569 data->client.sec_buf_fd[i].size,
3570 data->client.sec_buf_fd[i].vbase,
3571 data->client.sec_buf_fd[i].pbase);
3572 } else {
3573 ret = __qseecom_allocate_sg_list_buffer(data,
3574 field, i, sg_ptr);
3575 if (ret) {
3576 pr_err("Failed to allocate sg list buffer\n");
3577 goto err;
3578 }
3579 }
3580 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3581 sg = sg_ptr->sgl;
3582 goto cleanup;
3583 }
3584 sg = sg_ptr->sgl;
3585 if (sg_ptr->nents == 1) {
3586 uint64_t *update_64bit;
3587
3588 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3589 goto err;
3590 /* 64bit app uses 64bit address */
3591 update_64bit = (uint64_t *) field;
3592 *update_64bit = cleanup ? 0 :
3593 (uint64_t)sg_dma_address(sg_ptr->sgl);
3594 len += (uint32_t)sg->length;
3595 } else {
3596 struct qseecom_sg_entry_64bit *update_64bit;
3597 int j = 0;
3598
3599 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3600 (req->ifd_data[i].fd > 0)) {
3601
3602 if ((req->cmd_req_len <
3603 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3604 (req->ifd_data[i].cmd_buf_offset >
3605 (req->cmd_req_len -
3606 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3607 pr_err("Invalid offset = 0x%x\n",
3608 req->ifd_data[i].cmd_buf_offset);
3609 goto err;
3610 }
3611
3612 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3613 (lstnr_resp->ifd_data[i].fd > 0)) {
3614
3615 if ((lstnr_resp->resp_len <
3616 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3617 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3618 (lstnr_resp->resp_len -
3619 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3620 goto err;
3621 }
3622 }
3623 /* 64bit app uses 64bit address */
3624 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3625 for (j = 0; j < sg_ptr->nents; j++) {
3626 update_64bit->phys_addr = cleanup ? 0 :
3627 (uint64_t)sg_dma_address(sg);
3628 update_64bit->len = cleanup ? 0 :
3629 (uint32_t)sg->length;
3630 update_64bit++;
3631 len += sg->length;
3632 sg = sg_next(sg);
3633 }
3634 }
3635cleanup:
3636 if (cleanup) {
3637 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3638 ihandle, NULL, len,
3639 ION_IOC_INV_CACHES);
3640 if (ret) {
3641 pr_err("cache operation failed %d\n", ret);
3642 goto err;
3643 }
3644 } else {
3645 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3646 ihandle, NULL, len,
3647 ION_IOC_CLEAN_INV_CACHES);
3648 if (ret) {
3649 pr_err("cache operation failed %d\n", ret);
3650 goto err;
3651 }
3652 if (data->type == QSEECOM_CLIENT_APP) {
3653 offset = req->ifd_data[i].cmd_buf_offset;
3654 data->sglistinfo_ptr[i].indexAndFlags =
3655 SGLISTINFO_SET_INDEX_FLAG(
3656 (sg_ptr->nents == 1), 1, offset);
3657 data->sglistinfo_ptr[i].sizeOrCount =
3658 (sg_ptr->nents == 1) ?
3659 sg->length : sg_ptr->nents;
3660 data->sglist_cnt = i + 1;
3661 } else {
3662 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3663 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3664 (uintptr_t)this_lstnr->sb_virt);
3665 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3666 SGLISTINFO_SET_INDEX_FLAG(
3667 (sg_ptr->nents == 1), 1, offset);
3668 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3669 (sg_ptr->nents == 1) ?
3670 sg->length : sg_ptr->nents;
3671 this_lstnr->sglist_cnt = i + 1;
3672 }
3673 }
3674 /* Deallocate the handle */
3675 if (!IS_ERR_OR_NULL(ihandle))
3676 ion_free(qseecom.ion_clnt, ihandle);
3677 }
3678 return ret;
3679err:
3680 for (i = 0; i < MAX_ION_FD; i++)
3681 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3682 data->client.sec_buf_fd[i].vbase)
3683 dma_free_coherent(qseecom.pdev,
3684 data->client.sec_buf_fd[i].size,
3685 data->client.sec_buf_fd[i].vbase,
3686 data->client.sec_buf_fd[i].pbase);
3687 if (!IS_ERR_OR_NULL(ihandle))
3688 ion_free(qseecom.ion_clnt, ihandle);
3689 return -ENOMEM;
3690}
3691
3692static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3693 void __user *argp,
3694 bool is_64bit_addr)
3695{
3696 int ret = 0;
3697 int i;
3698 struct qseecom_send_modfd_cmd_req req;
3699 struct qseecom_send_cmd_req send_cmd_req;
3700
3701 ret = copy_from_user(&req, argp, sizeof(req));
3702 if (ret) {
3703 pr_err("copy_from_user failed\n");
3704 return ret;
3705 }
3706
3707 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3708 send_cmd_req.cmd_req_len = req.cmd_req_len;
3709 send_cmd_req.resp_buf = req.resp_buf;
3710 send_cmd_req.resp_len = req.resp_len;
3711
3712 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3713 return -EINVAL;
3714
3715 /* validate offsets */
3716 for (i = 0; i < MAX_ION_FD; i++) {
3717 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3718 pr_err("Invalid offset %d = 0x%x\n",
3719 i, req.ifd_data[i].cmd_buf_offset);
3720 return -EINVAL;
3721 }
3722 }
3723 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3724 (uintptr_t)req.cmd_req_buf);
3725 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3726 (uintptr_t)req.resp_buf);
3727
3728 if (!is_64bit_addr) {
3729 ret = __qseecom_update_cmd_buf(&req, false, data);
3730 if (ret)
3731 return ret;
3732 ret = __qseecom_send_cmd(data, &send_cmd_req);
3733 if (ret)
3734 return ret;
3735 ret = __qseecom_update_cmd_buf(&req, true, data);
3736 if (ret)
3737 return ret;
3738 } else {
3739 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3740 if (ret)
3741 return ret;
3742 ret = __qseecom_send_cmd(data, &send_cmd_req);
3743 if (ret)
3744 return ret;
3745 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3746 if (ret)
3747 return ret;
3748 }
3749
3750 return ret;
3751}
3752
3753static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3754 void __user *argp)
3755{
3756 return __qseecom_send_modfd_cmd(data, argp, false);
3757}
3758
3759static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3760 void __user *argp)
3761{
3762 return __qseecom_send_modfd_cmd(data, argp, true);
3763}
3764
3765
3766
3767static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3768 struct qseecom_registered_listener_list *svc)
3769{
3770 int ret;
3771
3772 ret = (svc->rcv_req_flag != 0);
3773 return ret || data->abort;
3774}
3775
3776static int qseecom_receive_req(struct qseecom_dev_handle *data)
3777{
3778 int ret = 0;
3779 struct qseecom_registered_listener_list *this_lstnr;
3780
3781 this_lstnr = __qseecom_find_svc(data->listener.id);
3782 if (!this_lstnr) {
3783 pr_err("Invalid listener ID\n");
3784 return -ENODATA;
3785 }
3786
3787 while (1) {
3788 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3789 __qseecom_listener_has_rcvd_req(data,
3790 this_lstnr))) {
3791 pr_debug("Interrupted: exiting Listener Service = %d\n",
3792 (uint32_t)data->listener.id);
3793 /* woken up for different reason */
3794 return -ERESTARTSYS;
3795 }
3796
3797 if (data->abort) {
3798 pr_err("Aborting Listener Service = %d\n",
3799 (uint32_t)data->listener.id);
3800 return -ENODEV;
3801 }
3802 this_lstnr->rcv_req_flag = 0;
3803 break;
3804 }
3805 return ret;
3806}
3807
3808static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3809{
3810 unsigned char app_arch = 0;
3811 struct elf32_hdr *ehdr;
3812 struct elf64_hdr *ehdr64;
3813
3814 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3815
3816 switch (app_arch) {
3817 case ELFCLASS32: {
3818 ehdr = (struct elf32_hdr *)fw_entry->data;
3819 if (fw_entry->size < sizeof(*ehdr)) {
3820 pr_err("%s: Not big enough to be an elf32 header\n",
3821 qseecom.pdev->init_name);
3822 return false;
3823 }
3824 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3825 pr_err("%s: Not an elf32 header\n",
3826 qseecom.pdev->init_name);
3827 return false;
3828 }
3829 if (ehdr->e_phnum == 0) {
3830 pr_err("%s: No loadable segments\n",
3831 qseecom.pdev->init_name);
3832 return false;
3833 }
3834 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3835 sizeof(struct elf32_hdr) > fw_entry->size) {
3836 pr_err("%s: Program headers not within mdt\n",
3837 qseecom.pdev->init_name);
3838 return false;
3839 }
3840 break;
3841 }
3842 case ELFCLASS64: {
3843 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3844 if (fw_entry->size < sizeof(*ehdr64)) {
3845 pr_err("%s: Not big enough to be an elf64 header\n",
3846 qseecom.pdev->init_name);
3847 return false;
3848 }
3849 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3850 pr_err("%s: Not an elf64 header\n",
3851 qseecom.pdev->init_name);
3852 return false;
3853 }
3854 if (ehdr64->e_phnum == 0) {
3855 pr_err("%s: No loadable segments\n",
3856 qseecom.pdev->init_name);
3857 return false;
3858 }
3859 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3860 sizeof(struct elf64_hdr) > fw_entry->size) {
3861 pr_err("%s: Program headers not within mdt\n",
3862 qseecom.pdev->init_name);
3863 return false;
3864 }
3865 break;
3866 }
3867 default: {
3868 pr_err("QSEE app arch %u is not supported\n", app_arch);
3869 return false;
3870 }
3871 }
3872 return true;
3873}
3874
3875static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3876 uint32_t *app_arch)
3877{
3878 int ret = -1;
3879 int i = 0, rc = 0;
3880 const struct firmware *fw_entry = NULL;
3881 char fw_name[MAX_APP_NAME_SIZE];
3882 struct elf32_hdr *ehdr;
3883 struct elf64_hdr *ehdr64;
3884 int num_images = 0;
3885
3886 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3887 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3888 if (rc) {
3889 pr_err("error with request_firmware\n");
3890 ret = -EIO;
3891 goto err;
3892 }
3893 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3894 ret = -EIO;
3895 goto err;
3896 }
3897 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3898 *fw_size = fw_entry->size;
3899 if (*app_arch == ELFCLASS32) {
3900 ehdr = (struct elf32_hdr *)fw_entry->data;
3901 num_images = ehdr->e_phnum;
3902 } else if (*app_arch == ELFCLASS64) {
3903 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3904 num_images = ehdr64->e_phnum;
3905 } else {
3906 pr_err("QSEE %s app, arch %u is not supported\n",
3907 appname, *app_arch);
3908 ret = -EIO;
3909 goto err;
3910 }
3911 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3912 release_firmware(fw_entry);
3913 fw_entry = NULL;
3914 for (i = 0; i < num_images; i++) {
3915 memset(fw_name, 0, sizeof(fw_name));
3916 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3917 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3918 if (ret)
3919 goto err;
3920 if (*fw_size > U32_MAX - fw_entry->size) {
3921 pr_err("QSEE %s app file size overflow\n", appname);
3922 ret = -EINVAL;
3923 goto err;
3924 }
3925 *fw_size += fw_entry->size;
3926 release_firmware(fw_entry);
3927 fw_entry = NULL;
3928 }
3929
3930 return ret;
3931err:
3932 if (fw_entry)
3933 release_firmware(fw_entry);
3934 *fw_size = 0;
3935 return ret;
3936}
3937
3938static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
3939 uint32_t fw_size,
3940 struct qseecom_load_app_ireq *load_req)
3941{
3942 int ret = -1;
3943 int i = 0, rc = 0;
3944 const struct firmware *fw_entry = NULL;
3945 char fw_name[MAX_APP_NAME_SIZE];
3946 u8 *img_data_ptr = img_data;
3947 struct elf32_hdr *ehdr;
3948 struct elf64_hdr *ehdr64;
3949 int num_images = 0;
3950 unsigned char app_arch = 0;
3951
3952 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3953 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3954 if (rc) {
3955 ret = -EIO;
3956 goto err;
3957 }
3958
3959 load_req->img_len = fw_entry->size;
3960 if (load_req->img_len > fw_size) {
3961 pr_err("app %s size %zu is larger than buf size %u\n",
3962 appname, fw_entry->size, fw_size);
3963 ret = -EINVAL;
3964 goto err;
3965 }
3966 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
3967 img_data_ptr = img_data_ptr + fw_entry->size;
3968 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
3969
3970 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3971 if (app_arch == ELFCLASS32) {
3972 ehdr = (struct elf32_hdr *)fw_entry->data;
3973 num_images = ehdr->e_phnum;
3974 } else if (app_arch == ELFCLASS64) {
3975 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3976 num_images = ehdr64->e_phnum;
3977 } else {
3978 pr_err("QSEE %s app, arch %u is not supported\n",
3979 appname, app_arch);
3980 ret = -EIO;
3981 goto err;
3982 }
3983 release_firmware(fw_entry);
3984 fw_entry = NULL;
3985 for (i = 0; i < num_images; i++) {
3986 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3987 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3988 if (ret) {
3989 pr_err("Failed to locate blob %s\n", fw_name);
3990 goto err;
3991 }
3992 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
3993 (fw_entry->size + load_req->img_len > fw_size)) {
3994 pr_err("Invalid file size for %s\n", fw_name);
3995 ret = -EINVAL;
3996 goto err;
3997 }
3998 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
3999 img_data_ptr = img_data_ptr + fw_entry->size;
4000 load_req->img_len += fw_entry->size;
4001 release_firmware(fw_entry);
4002 fw_entry = NULL;
4003 }
4004 return ret;
4005err:
4006 release_firmware(fw_entry);
4007 return ret;
4008}
4009
4010static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4011 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4012{
4013 size_t len = 0;
4014 int ret = 0;
4015 ion_phys_addr_t pa;
4016 struct ion_handle *ihandle = NULL;
4017 u8 *img_data = NULL;
4018
4019 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4020 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4021
4022 if (IS_ERR_OR_NULL(ihandle)) {
4023 pr_err("ION alloc failed\n");
4024 return -ENOMEM;
4025 }
4026 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4027 ihandle);
4028
4029 if (IS_ERR_OR_NULL(img_data)) {
4030 pr_err("ION memory mapping for image loading failed\n");
4031 ret = -ENOMEM;
4032 goto exit_ion_free;
4033 }
4034 /* Get the physical address of the ION BUF */
4035 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4036 if (ret) {
4037 pr_err("physical memory retrieval failure\n");
4038 ret = -EIO;
4039 goto exit_ion_unmap_kernel;
4040 }
4041
4042 *pihandle = ihandle;
4043 *data = img_data;
4044 *paddr = pa;
4045 return ret;
4046
4047exit_ion_unmap_kernel:
4048 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4049exit_ion_free:
4050 ion_free(qseecom.ion_clnt, ihandle);
4051 ihandle = NULL;
4052 return ret;
4053}
4054
4055static void __qseecom_free_img_data(struct ion_handle **ihandle)
4056{
4057 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4058 ion_free(qseecom.ion_clnt, *ihandle);
4059 *ihandle = NULL;
4060}
4061
4062static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4063 uint32_t *app_id)
4064{
4065 int ret = -1;
4066 uint32_t fw_size = 0;
4067 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4068 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4069 struct qseecom_command_scm_resp resp;
4070 u8 *img_data = NULL;
4071 ion_phys_addr_t pa = 0;
4072 struct ion_handle *ihandle = NULL;
4073 void *cmd_buf = NULL;
4074 size_t cmd_len;
4075 uint32_t app_arch = 0;
4076
4077 if (!data || !appname || !app_id) {
4078 pr_err("Null pointer to data or appname or appid\n");
4079 return -EINVAL;
4080 }
4081 *app_id = 0;
4082 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4083 return -EIO;
4084 data->client.app_arch = app_arch;
4085
4086 /* Check and load cmnlib */
4087 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4088 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4089 ret = qseecom_load_commonlib_image(data, "cmnlib");
4090 if (ret) {
4091 pr_err("failed to load cmnlib\n");
4092 return -EIO;
4093 }
4094 qseecom.commonlib_loaded = true;
4095 pr_debug("cmnlib is loaded\n");
4096 }
4097
4098 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4099 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4100 if (ret) {
4101 pr_err("failed to load cmnlib64\n");
4102 return -EIO;
4103 }
4104 qseecom.commonlib64_loaded = true;
4105 pr_debug("cmnlib64 is loaded\n");
4106 }
4107 }
4108
4109 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4110 if (ret)
4111 return ret;
4112
4113 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4114 if (ret) {
4115 ret = -EIO;
4116 goto exit_free_img_data;
4117 }
4118
4119 /* Populate the load_req parameters */
4120 if (qseecom.qsee_version < QSEE_VERSION_40) {
4121 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4122 load_req.mdt_len = load_req.mdt_len;
4123 load_req.img_len = load_req.img_len;
4124 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4125 load_req.phy_addr = (uint32_t)pa;
4126 cmd_buf = (void *)&load_req;
4127 cmd_len = sizeof(struct qseecom_load_app_ireq);
4128 } else {
4129 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4130 load_req_64bit.mdt_len = load_req.mdt_len;
4131 load_req_64bit.img_len = load_req.img_len;
4132 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4133 load_req_64bit.phy_addr = (uint64_t)pa;
4134 cmd_buf = (void *)&load_req_64bit;
4135 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4136 }
4137
4138 if (qseecom.support_bus_scaling) {
4139 mutex_lock(&qsee_bw_mutex);
4140 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4141 mutex_unlock(&qsee_bw_mutex);
4142 if (ret) {
4143 ret = -EIO;
4144 goto exit_free_img_data;
4145 }
4146 }
4147
4148 ret = __qseecom_enable_clk_scale_up(data);
4149 if (ret) {
4150 ret = -EIO;
4151 goto exit_unregister_bus_bw_need;
4152 }
4153
4154 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4155 img_data, fw_size,
4156 ION_IOC_CLEAN_INV_CACHES);
4157 if (ret) {
4158 pr_err("cache operation failed %d\n", ret);
4159 goto exit_disable_clk_vote;
4160 }
4161
4162 /* SCM_CALL to load the image */
4163 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4164 &resp, sizeof(resp));
4165 if (ret) {
4166 pr_err("scm_call to load failed : ret %d\n", ret);
4167 ret = -EIO;
4168 goto exit_disable_clk_vote;
4169 }
4170
4171 switch (resp.result) {
4172 case QSEOS_RESULT_SUCCESS:
4173 *app_id = resp.data;
4174 break;
4175 case QSEOS_RESULT_INCOMPLETE:
4176 ret = __qseecom_process_incomplete_cmd(data, &resp);
4177 if (ret)
4178 pr_err("process_incomplete_cmd FAILED\n");
4179 else
4180 *app_id = resp.data;
4181 break;
4182 case QSEOS_RESULT_FAILURE:
4183 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4184 break;
4185 default:
4186 pr_err("scm call return unknown response %d\n", resp.result);
4187 ret = -EINVAL;
4188 break;
4189 }
4190
4191exit_disable_clk_vote:
4192 __qseecom_disable_clk_scale_down(data);
4193
4194exit_unregister_bus_bw_need:
4195 if (qseecom.support_bus_scaling) {
4196 mutex_lock(&qsee_bw_mutex);
4197 qseecom_unregister_bus_bandwidth_needs(data);
4198 mutex_unlock(&qsee_bw_mutex);
4199 }
4200
4201exit_free_img_data:
4202 __qseecom_free_img_data(&ihandle);
4203 return ret;
4204}
4205
4206static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4207 char *cmnlib_name)
4208{
4209 int ret = 0;
4210 uint32_t fw_size = 0;
4211 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4212 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4213 struct qseecom_command_scm_resp resp;
4214 u8 *img_data = NULL;
4215 ion_phys_addr_t pa = 0;
4216 void *cmd_buf = NULL;
4217 size_t cmd_len;
4218 uint32_t app_arch = 0;
4219
4220 if (!cmnlib_name) {
4221 pr_err("cmnlib_name is NULL\n");
4222 return -EINVAL;
4223 }
4224 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4225 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4226 cmnlib_name, strlen(cmnlib_name));
4227 return -EINVAL;
4228 }
4229
4230 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4231 return -EIO;
4232
4233 ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
4234 &img_data, fw_size, &pa);
4235 if (ret)
4236 return -EIO;
4237
4238 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4239 if (ret) {
4240 ret = -EIO;
4241 goto exit_free_img_data;
4242 }
4243 if (qseecom.qsee_version < QSEE_VERSION_40) {
4244 load_req.phy_addr = (uint32_t)pa;
4245 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4246 cmd_buf = (void *)&load_req;
4247 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4248 } else {
4249 load_req_64bit.phy_addr = (uint64_t)pa;
4250 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4251 load_req_64bit.img_len = load_req.img_len;
4252 load_req_64bit.mdt_len = load_req.mdt_len;
4253 cmd_buf = (void *)&load_req_64bit;
4254 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4255 }
4256
4257 if (qseecom.support_bus_scaling) {
4258 mutex_lock(&qsee_bw_mutex);
4259 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4260 mutex_unlock(&qsee_bw_mutex);
4261 if (ret) {
4262 ret = -EIO;
4263 goto exit_free_img_data;
4264 }
4265 }
4266
4267 /* Vote for the SFPB clock */
4268 ret = __qseecom_enable_clk_scale_up(data);
4269 if (ret) {
4270 ret = -EIO;
4271 goto exit_unregister_bus_bw_need;
4272 }
4273
4274 ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
4275 img_data, fw_size,
4276 ION_IOC_CLEAN_INV_CACHES);
4277 if (ret) {
4278 pr_err("cache operation failed %d\n", ret);
4279 goto exit_disable_clk_vote;
4280 }
4281
4282 /* SCM_CALL to load the image */
4283 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4284 &resp, sizeof(resp));
4285 if (ret) {
4286 pr_err("scm_call to load failed : ret %d\n", ret);
4287 ret = -EIO;
4288 goto exit_disable_clk_vote;
4289 }
4290
4291 switch (resp.result) {
4292 case QSEOS_RESULT_SUCCESS:
4293 break;
4294 case QSEOS_RESULT_FAILURE:
4295 pr_err("scm call failed w/response result%d\n", resp.result);
4296 ret = -EINVAL;
4297 goto exit_disable_clk_vote;
4298 case QSEOS_RESULT_INCOMPLETE:
4299 ret = __qseecom_process_incomplete_cmd(data, &resp);
4300 if (ret) {
4301 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4302 goto exit_disable_clk_vote;
4303 }
4304 break;
4305 default:
4306 pr_err("scm call return unknown response %d\n", resp.result);
4307 ret = -EINVAL;
4308 goto exit_disable_clk_vote;
4309 }
4310
4311exit_disable_clk_vote:
4312 __qseecom_disable_clk_scale_down(data);
4313
4314exit_unregister_bus_bw_need:
4315 if (qseecom.support_bus_scaling) {
4316 mutex_lock(&qsee_bw_mutex);
4317 qseecom_unregister_bus_bandwidth_needs(data);
4318 mutex_unlock(&qsee_bw_mutex);
4319 }
4320
4321exit_free_img_data:
4322 __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
4323 return ret;
4324}
4325
4326static int qseecom_unload_commonlib_image(void)
4327{
4328 int ret = -EINVAL;
4329 struct qseecom_unload_lib_image_ireq unload_req = {0};
4330 struct qseecom_command_scm_resp resp;
4331
4332 /* Populate the remaining parameters */
4333 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4334
4335 /* SCM_CALL to load the image */
4336 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4337 sizeof(struct qseecom_unload_lib_image_ireq),
4338 &resp, sizeof(resp));
4339 if (ret) {
4340 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4341 ret = -EIO;
4342 } else {
4343 switch (resp.result) {
4344 case QSEOS_RESULT_SUCCESS:
4345 break;
4346 case QSEOS_RESULT_FAILURE:
4347 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4348 break;
4349 default:
4350 pr_err("scm call return unknown response %d\n",
4351 resp.result);
4352 ret = -EINVAL;
4353 break;
4354 }
4355 }
4356
4357 return ret;
4358}
4359
4360int qseecom_start_app(struct qseecom_handle **handle,
4361 char *app_name, uint32_t size)
4362{
4363 int32_t ret = 0;
4364 unsigned long flags = 0;
4365 struct qseecom_dev_handle *data = NULL;
4366 struct qseecom_check_app_ireq app_ireq;
4367 struct qseecom_registered_app_list *entry = NULL;
4368 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4369 bool found_app = false;
4370 size_t len;
4371 ion_phys_addr_t pa;
4372 uint32_t fw_size, app_arch;
4373 uint32_t app_id = 0;
4374
4375 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4376 pr_err("Not allowed to be called in %d state\n",
4377 atomic_read(&qseecom.qseecom_state));
4378 return -EPERM;
4379 }
4380 if (!app_name) {
4381 pr_err("failed to get the app name\n");
4382 return -EINVAL;
4383 }
4384
Zhen Kong64a6d7282017-06-16 11:55:07 -07004385 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004386 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004387 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004388 return -EINVAL;
4389 }
4390
4391 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4392 if (!(*handle))
4393 return -ENOMEM;
4394
4395 data = kzalloc(sizeof(*data), GFP_KERNEL);
4396 if (!data) {
4397 if (ret == 0) {
4398 kfree(*handle);
4399 *handle = NULL;
4400 }
4401 return -ENOMEM;
4402 }
4403 data->abort = 0;
4404 data->type = QSEECOM_CLIENT_APP;
4405 data->released = false;
4406 data->client.sb_length = size;
4407 data->client.user_virt_sb_base = 0;
4408 data->client.ihandle = NULL;
4409
4410 init_waitqueue_head(&data->abort_wq);
4411
4412 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4413 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4414 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4415 pr_err("Ion client could not retrieve the handle\n");
4416 kfree(data);
4417 kfree(*handle);
4418 *handle = NULL;
4419 return -EINVAL;
4420 }
4421 mutex_lock(&app_access_lock);
4422
4423 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4424 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4425 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4426 if (ret)
4427 goto err;
4428
4429 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4430 if (app_id) {
4431 pr_warn("App id %d for [%s] app exists\n", app_id,
4432 (char *)app_ireq.app_name);
4433 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4434 list_for_each_entry(entry,
4435 &qseecom.registered_app_list_head, list){
4436 if (entry->app_id == app_id) {
4437 entry->ref_cnt++;
4438 found_app = true;
4439 break;
4440 }
4441 }
4442 spin_unlock_irqrestore(
4443 &qseecom.registered_app_list_lock, flags);
4444 if (!found_app)
4445 pr_warn("App_id %d [%s] was loaded but not registered\n",
4446 ret, (char *)app_ireq.app_name);
4447 } else {
4448 /* load the app and get the app_id */
4449 pr_debug("%s: Loading app for the first time'\n",
4450 qseecom.pdev->init_name);
4451 ret = __qseecom_load_fw(data, app_name, &app_id);
4452 if (ret < 0)
4453 goto err;
4454 }
4455 data->client.app_id = app_id;
4456 if (!found_app) {
4457 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4458 if (!entry) {
4459 pr_err("kmalloc for app entry failed\n");
4460 ret = -ENOMEM;
4461 goto err;
4462 }
4463 entry->app_id = app_id;
4464 entry->ref_cnt = 1;
4465 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4466 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4467 ret = -EIO;
4468 kfree(entry);
4469 goto err;
4470 }
4471 entry->app_arch = app_arch;
4472 entry->app_blocked = false;
4473 entry->blocked_on_listener_id = 0;
4474 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4475 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4476 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4477 flags);
4478 }
4479
4480 /* Get the physical address of the ION BUF */
4481 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4482 if (ret) {
4483 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4484 ret);
4485 goto err;
4486 }
4487
4488 /* Populate the structure for sending scm call to load image */
4489 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4490 data->client.ihandle);
4491 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4492 pr_err("ION memory mapping for client shared buf failed\n");
4493 ret = -ENOMEM;
4494 goto err;
4495 }
4496 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4497 data->client.sb_phys = (phys_addr_t)pa;
4498 (*handle)->dev = (void *)data;
4499 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4500 (*handle)->sbuf_len = data->client.sb_length;
4501
4502 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4503 if (!kclient_entry) {
4504 ret = -ENOMEM;
4505 goto err;
4506 }
4507 kclient_entry->handle = *handle;
4508
4509 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4510 list_add_tail(&kclient_entry->list,
4511 &qseecom.registered_kclient_list_head);
4512 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4513
4514 mutex_unlock(&app_access_lock);
4515 return 0;
4516
4517err:
4518 kfree(data);
4519 kfree(*handle);
4520 *handle = NULL;
4521 mutex_unlock(&app_access_lock);
4522 return ret;
4523}
4524EXPORT_SYMBOL(qseecom_start_app);
4525
4526int qseecom_shutdown_app(struct qseecom_handle **handle)
4527{
4528 int ret = -EINVAL;
4529 struct qseecom_dev_handle *data;
4530
4531 struct qseecom_registered_kclient_list *kclient = NULL;
4532 unsigned long flags = 0;
4533 bool found_handle = false;
4534
4535 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4536 pr_err("Not allowed to be called in %d state\n",
4537 atomic_read(&qseecom.qseecom_state));
4538 return -EPERM;
4539 }
4540
4541 if ((handle == NULL) || (*handle == NULL)) {
4542 pr_err("Handle is not initialized\n");
4543 return -EINVAL;
4544 }
4545 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4546 mutex_lock(&app_access_lock);
4547
4548 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4549 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4550 list) {
4551 if (kclient->handle == (*handle)) {
4552 list_del(&kclient->list);
4553 found_handle = true;
4554 break;
4555 }
4556 }
4557 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4558 if (!found_handle)
4559 pr_err("Unable to find the handle, exiting\n");
4560 else
4561 ret = qseecom_unload_app(data, false);
4562
4563 mutex_unlock(&app_access_lock);
4564 if (ret == 0) {
4565 kzfree(data);
4566 kzfree(*handle);
4567 kzfree(kclient);
4568 *handle = NULL;
4569 }
4570
4571 return ret;
4572}
4573EXPORT_SYMBOL(qseecom_shutdown_app);
4574
4575int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4576 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4577{
4578 int ret = 0;
4579 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4580 struct qseecom_dev_handle *data;
4581 bool perf_enabled = false;
4582
4583 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4584 pr_err("Not allowed to be called in %d state\n",
4585 atomic_read(&qseecom.qseecom_state));
4586 return -EPERM;
4587 }
4588
4589 if (handle == NULL) {
4590 pr_err("Handle is not initialized\n");
4591 return -EINVAL;
4592 }
4593 data = handle->dev;
4594
4595 req.cmd_req_len = sbuf_len;
4596 req.resp_len = rbuf_len;
4597 req.cmd_req_buf = send_buf;
4598 req.resp_buf = resp_buf;
4599
4600 if (__validate_send_cmd_inputs(data, &req))
4601 return -EINVAL;
4602
4603 mutex_lock(&app_access_lock);
4604 if (qseecom.support_bus_scaling) {
4605 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4606 if (ret) {
4607 pr_err("Failed to set bw.\n");
4608 mutex_unlock(&app_access_lock);
4609 return ret;
4610 }
4611 }
4612 /*
4613 * On targets where crypto clock is handled by HLOS,
4614 * if clk_access_cnt is zero and perf_enabled is false,
4615 * then the crypto clock was not enabled before sending cmd
4616 * to tz, qseecom will enable the clock to avoid service failure.
4617 */
4618 if (!qseecom.no_clock_support &&
4619 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4620 pr_debug("ce clock is not enabled!\n");
4621 ret = qseecom_perf_enable(data);
4622 if (ret) {
4623 pr_err("Failed to vote for clock with err %d\n",
4624 ret);
4625 mutex_unlock(&app_access_lock);
4626 return -EINVAL;
4627 }
4628 perf_enabled = true;
4629 }
4630 if (!strcmp(data->client.app_name, "securemm"))
4631 data->use_legacy_cmd = true;
4632
4633 ret = __qseecom_send_cmd(data, &req);
4634 data->use_legacy_cmd = false;
4635 if (qseecom.support_bus_scaling)
4636 __qseecom_add_bw_scale_down_timer(
4637 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4638
4639 if (perf_enabled) {
4640 qsee_disable_clock_vote(data, CLK_DFAB);
4641 qsee_disable_clock_vote(data, CLK_SFPB);
4642 }
4643
4644 mutex_unlock(&app_access_lock);
4645
4646 if (ret)
4647 return ret;
4648
4649 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4650 req.resp_len, req.resp_buf);
4651 return ret;
4652}
4653EXPORT_SYMBOL(qseecom_send_command);
4654
4655int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4656{
4657 int ret = 0;
4658
4659 if ((handle == NULL) || (handle->dev == NULL)) {
4660 pr_err("No valid kernel client\n");
4661 return -EINVAL;
4662 }
4663 if (high) {
4664 if (qseecom.support_bus_scaling) {
4665 mutex_lock(&qsee_bw_mutex);
4666 __qseecom_register_bus_bandwidth_needs(handle->dev,
4667 HIGH);
4668 mutex_unlock(&qsee_bw_mutex);
4669 } else {
4670 ret = qseecom_perf_enable(handle->dev);
4671 if (ret)
4672 pr_err("Failed to vote for clock with err %d\n",
4673 ret);
4674 }
4675 } else {
4676 if (!qseecom.support_bus_scaling) {
4677 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4678 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4679 } else {
4680 mutex_lock(&qsee_bw_mutex);
4681 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4682 mutex_unlock(&qsee_bw_mutex);
4683 }
4684 }
4685 return ret;
4686}
4687EXPORT_SYMBOL(qseecom_set_bandwidth);
4688
4689int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4690{
4691 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4692 struct qseecom_dev_handle dummy_private_data = {0};
4693 struct qseecom_command_scm_resp resp;
4694 int ret = 0;
4695
4696 if (!desc) {
4697 pr_err("desc is NULL\n");
4698 return -EINVAL;
4699 }
4700
4701 resp.result = desc->ret[0]; /*req_cmd*/
4702 resp.resp_type = desc->ret[1]; /*app_id*/
4703 resp.data = desc->ret[2]; /*listener_id*/
4704
4705 dummy_private_data.client.app_id = desc->ret[1];
4706 dummy_app_entry.app_id = desc->ret[1];
4707
4708 mutex_lock(&app_access_lock);
4709 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
4710 &dummy_private_data);
4711 mutex_unlock(&app_access_lock);
4712 if (ret)
4713 pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
4714 (int)desc->ret[0], (int)desc->ret[2],
4715 (int)desc->ret[1], ret);
4716 desc->ret[0] = resp.result;
4717 desc->ret[1] = resp.resp_type;
4718 desc->ret[2] = resp.data;
4719 return ret;
4720}
4721EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4722
4723static int qseecom_send_resp(void)
4724{
4725 qseecom.send_resp_flag = 1;
4726 wake_up_interruptible(&qseecom.send_resp_wq);
4727 return 0;
4728}
4729
4730static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4731{
4732 struct qseecom_registered_listener_list *this_lstnr = NULL;
4733
4734 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4735 this_lstnr = __qseecom_find_svc(data->listener.id);
4736 if (this_lstnr == NULL)
4737 return -EINVAL;
4738 qseecom.send_resp_flag = 1;
4739 this_lstnr->send_resp_flag = 1;
4740 wake_up_interruptible(&qseecom.send_resp_wq);
4741 return 0;
4742}
4743
4744static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4745 struct qseecom_send_modfd_listener_resp *resp,
4746 struct qseecom_registered_listener_list *this_lstnr)
4747{
4748 int i;
4749
4750 if (!data || !resp || !this_lstnr) {
4751 pr_err("listener handle or resp msg is null\n");
4752 return -EINVAL;
4753 }
4754
4755 if (resp->resp_buf_ptr == NULL) {
4756 pr_err("resp buffer is null\n");
4757 return -EINVAL;
4758 }
4759 /* validate resp buf length */
4760 if ((resp->resp_len == 0) ||
4761 (resp->resp_len > this_lstnr->sb_length)) {
4762 pr_err("resp buf length %d not valid\n", resp->resp_len);
4763 return -EINVAL;
4764 }
4765
4766 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4767 pr_err("Integer overflow in resp_len & resp_buf\n");
4768 return -EINVAL;
4769 }
4770 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4771 (ULONG_MAX - this_lstnr->sb_length)) {
4772 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4773 return -EINVAL;
4774 }
4775 /* validate resp buf */
4776 if (((uintptr_t)resp->resp_buf_ptr <
4777 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4778 ((uintptr_t)resp->resp_buf_ptr >=
4779 ((uintptr_t)this_lstnr->user_virt_sb_base +
4780 this_lstnr->sb_length)) ||
4781 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4782 ((uintptr_t)this_lstnr->user_virt_sb_base +
4783 this_lstnr->sb_length))) {
4784 pr_err("resp buf is out of shared buffer region\n");
4785 return -EINVAL;
4786 }
4787
4788 /* validate offsets */
4789 for (i = 0; i < MAX_ION_FD; i++) {
4790 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4791 pr_err("Invalid offset %d = 0x%x\n",
4792 i, resp->ifd_data[i].cmd_buf_offset);
4793 return -EINVAL;
4794 }
4795 }
4796
4797 return 0;
4798}
4799
4800static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4801 void __user *argp, bool is_64bit_addr)
4802{
4803 struct qseecom_send_modfd_listener_resp resp;
4804 struct qseecom_registered_listener_list *this_lstnr = NULL;
4805
4806 if (copy_from_user(&resp, argp, sizeof(resp))) {
4807 pr_err("copy_from_user failed");
4808 return -EINVAL;
4809 }
4810
4811 this_lstnr = __qseecom_find_svc(data->listener.id);
4812 if (this_lstnr == NULL)
4813 return -EINVAL;
4814
4815 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4816 return -EINVAL;
4817
4818 resp.resp_buf_ptr = this_lstnr->sb_virt +
4819 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4820
4821 if (!is_64bit_addr)
4822 __qseecom_update_cmd_buf(&resp, false, data);
4823 else
4824 __qseecom_update_cmd_buf_64(&resp, false, data);
4825 qseecom.send_resp_flag = 1;
4826 this_lstnr->send_resp_flag = 1;
4827 wake_up_interruptible(&qseecom.send_resp_wq);
4828 return 0;
4829}
4830
4831static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4832 void __user *argp)
4833{
4834 return __qseecom_send_modfd_resp(data, argp, false);
4835}
4836
4837static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4838 void __user *argp)
4839{
4840 return __qseecom_send_modfd_resp(data, argp, true);
4841}
4842
4843static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4844 void __user *argp)
4845{
4846 struct qseecom_qseos_version_req req;
4847
4848 if (copy_from_user(&req, argp, sizeof(req))) {
4849 pr_err("copy_from_user failed");
4850 return -EINVAL;
4851 }
4852 req.qseos_version = qseecom.qseos_version;
4853 if (copy_to_user(argp, &req, sizeof(req))) {
4854 pr_err("copy_to_user failed");
4855 return -EINVAL;
4856 }
4857 return 0;
4858}
4859
4860static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4861{
4862 int rc = 0;
4863 struct qseecom_clk *qclk = NULL;
4864
4865 if (qseecom.no_clock_support)
4866 return 0;
4867
4868 if (ce == CLK_QSEE)
4869 qclk = &qseecom.qsee;
4870 if (ce == CLK_CE_DRV)
4871 qclk = &qseecom.ce_drv;
4872
4873 if (qclk == NULL) {
4874 pr_err("CLK type not supported\n");
4875 return -EINVAL;
4876 }
4877 mutex_lock(&clk_access_lock);
4878
4879 if (qclk->clk_access_cnt == ULONG_MAX) {
4880 pr_err("clk_access_cnt beyond limitation\n");
4881 goto err;
4882 }
4883 if (qclk->clk_access_cnt > 0) {
4884 qclk->clk_access_cnt++;
4885 mutex_unlock(&clk_access_lock);
4886 return rc;
4887 }
4888
4889 /* Enable CE core clk */
4890 if (qclk->ce_core_clk != NULL) {
4891 rc = clk_prepare_enable(qclk->ce_core_clk);
4892 if (rc) {
4893 pr_err("Unable to enable/prepare CE core clk\n");
4894 goto err;
4895 }
4896 }
4897 /* Enable CE clk */
4898 if (qclk->ce_clk != NULL) {
4899 rc = clk_prepare_enable(qclk->ce_clk);
4900 if (rc) {
4901 pr_err("Unable to enable/prepare CE iface clk\n");
4902 goto ce_clk_err;
4903 }
4904 }
4905 /* Enable AXI clk */
4906 if (qclk->ce_bus_clk != NULL) {
4907 rc = clk_prepare_enable(qclk->ce_bus_clk);
4908 if (rc) {
4909 pr_err("Unable to enable/prepare CE bus clk\n");
4910 goto ce_bus_clk_err;
4911 }
4912 }
4913 qclk->clk_access_cnt++;
4914 mutex_unlock(&clk_access_lock);
4915 return 0;
4916
4917ce_bus_clk_err:
4918 if (qclk->ce_clk != NULL)
4919 clk_disable_unprepare(qclk->ce_clk);
4920ce_clk_err:
4921 if (qclk->ce_core_clk != NULL)
4922 clk_disable_unprepare(qclk->ce_core_clk);
4923err:
4924 mutex_unlock(&clk_access_lock);
4925 return -EIO;
4926}
4927
4928static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
4929{
4930 struct qseecom_clk *qclk;
4931
4932 if (qseecom.no_clock_support)
4933 return;
4934
4935 if (ce == CLK_QSEE)
4936 qclk = &qseecom.qsee;
4937 else
4938 qclk = &qseecom.ce_drv;
4939
4940 mutex_lock(&clk_access_lock);
4941
4942 if (qclk->clk_access_cnt == 0) {
4943 mutex_unlock(&clk_access_lock);
4944 return;
4945 }
4946
4947 if (qclk->clk_access_cnt == 1) {
4948 if (qclk->ce_clk != NULL)
4949 clk_disable_unprepare(qclk->ce_clk);
4950 if (qclk->ce_core_clk != NULL)
4951 clk_disable_unprepare(qclk->ce_core_clk);
4952 if (qclk->ce_bus_clk != NULL)
4953 clk_disable_unprepare(qclk->ce_bus_clk);
4954 }
4955 qclk->clk_access_cnt--;
4956 mutex_unlock(&clk_access_lock);
4957}
4958
4959static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
4960 int32_t clk_type)
4961{
4962 int ret = 0;
4963 struct qseecom_clk *qclk;
4964
4965 if (qseecom.no_clock_support)
4966 return 0;
4967
4968 qclk = &qseecom.qsee;
4969 if (!qseecom.qsee_perf_client)
4970 return ret;
4971
4972 switch (clk_type) {
4973 case CLK_DFAB:
4974 mutex_lock(&qsee_bw_mutex);
4975 if (!qseecom.qsee_bw_count) {
4976 if (qseecom.qsee_sfpb_bw_count > 0)
4977 ret = msm_bus_scale_client_update_request(
4978 qseecom.qsee_perf_client, 3);
4979 else {
4980 if (qclk->ce_core_src_clk != NULL)
4981 ret = __qseecom_enable_clk(CLK_QSEE);
4982 if (!ret) {
4983 ret =
4984 msm_bus_scale_client_update_request(
4985 qseecom.qsee_perf_client, 1);
4986 if ((ret) &&
4987 (qclk->ce_core_src_clk != NULL))
4988 __qseecom_disable_clk(CLK_QSEE);
4989 }
4990 }
4991 if (ret)
4992 pr_err("DFAB Bandwidth req failed (%d)\n",
4993 ret);
4994 else {
4995 qseecom.qsee_bw_count++;
4996 data->perf_enabled = true;
4997 }
4998 } else {
4999 qseecom.qsee_bw_count++;
5000 data->perf_enabled = true;
5001 }
5002 mutex_unlock(&qsee_bw_mutex);
5003 break;
5004 case CLK_SFPB:
5005 mutex_lock(&qsee_bw_mutex);
5006 if (!qseecom.qsee_sfpb_bw_count) {
5007 if (qseecom.qsee_bw_count > 0)
5008 ret = msm_bus_scale_client_update_request(
5009 qseecom.qsee_perf_client, 3);
5010 else {
5011 if (qclk->ce_core_src_clk != NULL)
5012 ret = __qseecom_enable_clk(CLK_QSEE);
5013 if (!ret) {
5014 ret =
5015 msm_bus_scale_client_update_request(
5016 qseecom.qsee_perf_client, 2);
5017 if ((ret) &&
5018 (qclk->ce_core_src_clk != NULL))
5019 __qseecom_disable_clk(CLK_QSEE);
5020 }
5021 }
5022
5023 if (ret)
5024 pr_err("SFPB Bandwidth req failed (%d)\n",
5025 ret);
5026 else {
5027 qseecom.qsee_sfpb_bw_count++;
5028 data->fast_load_enabled = true;
5029 }
5030 } else {
5031 qseecom.qsee_sfpb_bw_count++;
5032 data->fast_load_enabled = true;
5033 }
5034 mutex_unlock(&qsee_bw_mutex);
5035 break;
5036 default:
5037 pr_err("Clock type not defined\n");
5038 break;
5039 }
5040 return ret;
5041}
5042
5043static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5044 int32_t clk_type)
5045{
5046 int32_t ret = 0;
5047 struct qseecom_clk *qclk;
5048
5049 qclk = &qseecom.qsee;
5050
5051 if (qseecom.no_clock_support)
5052 return;
5053 if (!qseecom.qsee_perf_client)
5054 return;
5055
5056 switch (clk_type) {
5057 case CLK_DFAB:
5058 mutex_lock(&qsee_bw_mutex);
5059 if (qseecom.qsee_bw_count == 0) {
5060 pr_err("Client error.Extra call to disable DFAB clk\n");
5061 mutex_unlock(&qsee_bw_mutex);
5062 return;
5063 }
5064
5065 if (qseecom.qsee_bw_count == 1) {
5066 if (qseecom.qsee_sfpb_bw_count > 0)
5067 ret = msm_bus_scale_client_update_request(
5068 qseecom.qsee_perf_client, 2);
5069 else {
5070 ret = msm_bus_scale_client_update_request(
5071 qseecom.qsee_perf_client, 0);
5072 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5073 __qseecom_disable_clk(CLK_QSEE);
5074 }
5075 if (ret)
5076 pr_err("SFPB Bandwidth req fail (%d)\n",
5077 ret);
5078 else {
5079 qseecom.qsee_bw_count--;
5080 data->perf_enabled = false;
5081 }
5082 } else {
5083 qseecom.qsee_bw_count--;
5084 data->perf_enabled = false;
5085 }
5086 mutex_unlock(&qsee_bw_mutex);
5087 break;
5088 case CLK_SFPB:
5089 mutex_lock(&qsee_bw_mutex);
5090 if (qseecom.qsee_sfpb_bw_count == 0) {
5091 pr_err("Client error.Extra call to disable SFPB clk\n");
5092 mutex_unlock(&qsee_bw_mutex);
5093 return;
5094 }
5095 if (qseecom.qsee_sfpb_bw_count == 1) {
5096 if (qseecom.qsee_bw_count > 0)
5097 ret = msm_bus_scale_client_update_request(
5098 qseecom.qsee_perf_client, 1);
5099 else {
5100 ret = msm_bus_scale_client_update_request(
5101 qseecom.qsee_perf_client, 0);
5102 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5103 __qseecom_disable_clk(CLK_QSEE);
5104 }
5105 if (ret)
5106 pr_err("SFPB Bandwidth req fail (%d)\n",
5107 ret);
5108 else {
5109 qseecom.qsee_sfpb_bw_count--;
5110 data->fast_load_enabled = false;
5111 }
5112 } else {
5113 qseecom.qsee_sfpb_bw_count--;
5114 data->fast_load_enabled = false;
5115 }
5116 mutex_unlock(&qsee_bw_mutex);
5117 break;
5118 default:
5119 pr_err("Clock type not defined\n");
5120 break;
5121 }
5122
5123}
5124
5125static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5126 void __user *argp)
5127{
5128 struct ion_handle *ihandle; /* Ion handle */
5129 struct qseecom_load_img_req load_img_req;
5130 int uret = 0;
5131 int ret;
5132 ion_phys_addr_t pa = 0;
5133 size_t len;
5134 struct qseecom_load_app_ireq load_req;
5135 struct qseecom_load_app_64bit_ireq load_req_64bit;
5136 struct qseecom_command_scm_resp resp;
5137 void *cmd_buf = NULL;
5138 size_t cmd_len;
5139 /* Copy the relevant information needed for loading the image */
5140 if (copy_from_user(&load_img_req,
5141 (void __user *)argp,
5142 sizeof(struct qseecom_load_img_req))) {
5143 pr_err("copy_from_user failed\n");
5144 return -EFAULT;
5145 }
5146
5147 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005148 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005149 load_img_req.ifd_data_fd);
5150 if (IS_ERR_OR_NULL(ihandle)) {
5151 pr_err("Ion client could not retrieve the handle\n");
5152 return -ENOMEM;
5153 }
5154
5155 /* Get the physical address of the ION BUF */
5156 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5157 if (ret) {
5158 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5159 ret);
5160 return ret;
5161 }
5162 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5163 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5164 len, load_img_req.mdt_len,
5165 load_img_req.img_len);
5166 return ret;
5167 }
5168 /* Populate the structure for sending scm call to load image */
5169 if (qseecom.qsee_version < QSEE_VERSION_40) {
5170 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5171 load_req.mdt_len = load_img_req.mdt_len;
5172 load_req.img_len = load_img_req.img_len;
5173 load_req.phy_addr = (uint32_t)pa;
5174 cmd_buf = (void *)&load_req;
5175 cmd_len = sizeof(struct qseecom_load_app_ireq);
5176 } else {
5177 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5178 load_req_64bit.mdt_len = load_img_req.mdt_len;
5179 load_req_64bit.img_len = load_img_req.img_len;
5180 load_req_64bit.phy_addr = (uint64_t)pa;
5181 cmd_buf = (void *)&load_req_64bit;
5182 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5183 }
5184
5185 if (qseecom.support_bus_scaling) {
5186 mutex_lock(&qsee_bw_mutex);
5187 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5188 mutex_unlock(&qsee_bw_mutex);
5189 if (ret) {
5190 ret = -EIO;
5191 goto exit_cpu_restore;
5192 }
5193 }
5194
5195 /* Vote for the SFPB clock */
5196 ret = __qseecom_enable_clk_scale_up(data);
5197 if (ret) {
5198 ret = -EIO;
5199 goto exit_register_bus_bandwidth_needs;
5200 }
5201 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5202 ION_IOC_CLEAN_INV_CACHES);
5203 if (ret) {
5204 pr_err("cache operation failed %d\n", ret);
5205 goto exit_disable_clock;
5206 }
5207 /* SCM_CALL to load the external elf */
5208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5209 &resp, sizeof(resp));
5210 if (ret) {
5211 pr_err("scm_call to load failed : ret %d\n",
5212 ret);
5213 ret = -EFAULT;
5214 goto exit_disable_clock;
5215 }
5216
5217 switch (resp.result) {
5218 case QSEOS_RESULT_SUCCESS:
5219 break;
5220 case QSEOS_RESULT_INCOMPLETE:
5221 pr_err("%s: qseos result incomplete\n", __func__);
5222 ret = __qseecom_process_incomplete_cmd(data, &resp);
5223 if (ret)
5224 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5225 break;
5226 case QSEOS_RESULT_FAILURE:
5227 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5228 ret = -EFAULT;
5229 break;
5230 default:
5231 pr_err("scm_call response result %d not supported\n",
5232 resp.result);
5233 ret = -EFAULT;
5234 break;
5235 }
5236
5237exit_disable_clock:
5238 __qseecom_disable_clk_scale_down(data);
5239
5240exit_register_bus_bandwidth_needs:
5241 if (qseecom.support_bus_scaling) {
5242 mutex_lock(&qsee_bw_mutex);
5243 uret = qseecom_unregister_bus_bandwidth_needs(data);
5244 mutex_unlock(&qsee_bw_mutex);
5245 if (uret)
5246 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5247 uret, ret);
5248 }
5249
5250exit_cpu_restore:
5251 /* Deallocate the handle */
5252 if (!IS_ERR_OR_NULL(ihandle))
5253 ion_free(qseecom.ion_clnt, ihandle);
5254 return ret;
5255}
5256
5257static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5258{
5259 int ret = 0;
5260 struct qseecom_command_scm_resp resp;
5261 struct qseecom_unload_app_ireq req;
5262
5263 /* unavailable client app */
5264 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5265
5266 /* Populate the structure for sending scm call to unload image */
5267 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5268
5269 /* SCM_CALL to unload the external elf */
5270 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5271 sizeof(struct qseecom_unload_app_ireq),
5272 &resp, sizeof(resp));
5273 if (ret) {
5274 pr_err("scm_call to unload failed : ret %d\n",
5275 ret);
5276 ret = -EFAULT;
5277 goto qseecom_unload_external_elf_scm_err;
5278 }
5279 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5280 ret = __qseecom_process_incomplete_cmd(data, &resp);
5281 if (ret)
5282 pr_err("process_incomplete_cmd fail err: %d\n",
5283 ret);
5284 } else {
5285 if (resp.result != QSEOS_RESULT_SUCCESS) {
5286 pr_err("scm_call to unload image failed resp.result =%d\n",
5287 resp.result);
5288 ret = -EFAULT;
5289 }
5290 }
5291
5292qseecom_unload_external_elf_scm_err:
5293
5294 return ret;
5295}
5296
5297static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5298 void __user *argp)
5299{
5300
5301 int32_t ret;
5302 struct qseecom_qseos_app_load_query query_req;
5303 struct qseecom_check_app_ireq req;
5304 struct qseecom_registered_app_list *entry = NULL;
5305 unsigned long flags = 0;
5306 uint32_t app_arch = 0, app_id = 0;
5307 bool found_app = false;
5308
5309 /* Copy the relevant information needed for loading the image */
5310 if (copy_from_user(&query_req,
5311 (void __user *)argp,
5312 sizeof(struct qseecom_qseos_app_load_query))) {
5313 pr_err("copy_from_user failed\n");
5314 return -EFAULT;
5315 }
5316
5317 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5318 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5319 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5320
5321 ret = __qseecom_check_app_exists(req, &app_id);
5322 if (ret) {
5323 pr_err(" scm call to check if app is loaded failed");
5324 return ret; /* scm call failed */
5325 }
5326 if (app_id) {
5327 pr_debug("App id %d (%s) already exists\n", app_id,
5328 (char *)(req.app_name));
5329 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5330 list_for_each_entry(entry,
5331 &qseecom.registered_app_list_head, list){
5332 if (entry->app_id == app_id) {
5333 app_arch = entry->app_arch;
5334 entry->ref_cnt++;
5335 found_app = true;
5336 break;
5337 }
5338 }
5339 spin_unlock_irqrestore(
5340 &qseecom.registered_app_list_lock, flags);
5341 data->client.app_id = app_id;
5342 query_req.app_id = app_id;
5343 if (app_arch) {
5344 data->client.app_arch = app_arch;
5345 query_req.app_arch = app_arch;
5346 } else {
5347 data->client.app_arch = 0;
5348 query_req.app_arch = 0;
5349 }
5350 strlcpy(data->client.app_name, query_req.app_name,
5351 MAX_APP_NAME_SIZE);
5352 /*
5353 * If app was loaded by appsbl before and was not registered,
5354 * regiser this app now.
5355 */
5356 if (!found_app) {
5357 pr_debug("Register app %d [%s] which was loaded before\n",
5358 ret, (char *)query_req.app_name);
5359 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5360 if (!entry) {
5361 pr_err("kmalloc for app entry failed\n");
5362 return -ENOMEM;
5363 }
5364 entry->app_id = app_id;
5365 entry->ref_cnt = 1;
5366 entry->app_arch = data->client.app_arch;
5367 strlcpy(entry->app_name, data->client.app_name,
5368 MAX_APP_NAME_SIZE);
5369 entry->app_blocked = false;
5370 entry->blocked_on_listener_id = 0;
5371 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5372 flags);
5373 list_add_tail(&entry->list,
5374 &qseecom.registered_app_list_head);
5375 spin_unlock_irqrestore(
5376 &qseecom.registered_app_list_lock, flags);
5377 }
5378 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5379 pr_err("copy_to_user failed\n");
5380 return -EFAULT;
5381 }
5382 return -EEXIST; /* app already loaded */
5383 } else {
5384 return 0; /* app not loaded */
5385 }
5386}
5387
5388static int __qseecom_get_ce_pipe_info(
5389 enum qseecom_key_management_usage_type usage,
5390 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5391{
5392 int ret = -EINVAL;
5393 int i, j;
5394 struct qseecom_ce_info_use *p = NULL;
5395 int total = 0;
5396 struct qseecom_ce_pipe_entry *pcepipe;
5397
5398 switch (usage) {
5399 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5400 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5401 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5402 if (qseecom.support_fde) {
5403 p = qseecom.ce_info.fde;
5404 total = qseecom.ce_info.num_fde;
5405 } else {
5406 pr_err("system does not support fde\n");
5407 return -EINVAL;
5408 }
5409 break;
5410 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5411 if (qseecom.support_pfe) {
5412 p = qseecom.ce_info.pfe;
5413 total = qseecom.ce_info.num_pfe;
5414 } else {
5415 pr_err("system does not support pfe\n");
5416 return -EINVAL;
5417 }
5418 break;
5419 default:
5420 pr_err("unsupported usage %d\n", usage);
5421 return -EINVAL;
5422 }
5423
5424 for (j = 0; j < total; j++) {
5425 if (p->unit_num == unit) {
5426 pcepipe = p->ce_pipe_entry;
5427 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5428 (*ce_hw)[i] = pcepipe->ce_num;
5429 *pipe = pcepipe->ce_pipe_pair;
5430 pcepipe++;
5431 }
5432 ret = 0;
5433 break;
5434 }
5435 p++;
5436 }
5437 return ret;
5438}
5439
5440static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5441 enum qseecom_key_management_usage_type usage,
5442 struct qseecom_key_generate_ireq *ireq)
5443{
5444 struct qseecom_command_scm_resp resp;
5445 int ret;
5446
5447 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5448 usage >= QSEOS_KM_USAGE_MAX) {
5449 pr_err("Error:: unsupported usage %d\n", usage);
5450 return -EFAULT;
5451 }
5452 ret = __qseecom_enable_clk(CLK_QSEE);
5453 if (ret)
5454 return ret;
5455
5456 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5457 ireq, sizeof(struct qseecom_key_generate_ireq),
5458 &resp, sizeof(resp));
5459 if (ret) {
5460 if (ret == -EINVAL &&
5461 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5462 pr_debug("Key ID exists.\n");
5463 ret = 0;
5464 } else {
5465 pr_err("scm call to generate key failed : %d\n", ret);
5466 ret = -EFAULT;
5467 }
5468 goto generate_key_exit;
5469 }
5470
5471 switch (resp.result) {
5472 case QSEOS_RESULT_SUCCESS:
5473 break;
5474 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5475 pr_debug("Key ID exists.\n");
5476 break;
5477 case QSEOS_RESULT_INCOMPLETE:
5478 ret = __qseecom_process_incomplete_cmd(data, &resp);
5479 if (ret) {
5480 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5481 pr_debug("Key ID exists.\n");
5482 ret = 0;
5483 } else {
5484 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5485 resp.result);
5486 }
5487 }
5488 break;
5489 case QSEOS_RESULT_FAILURE:
5490 default:
5491 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5492 ret = -EINVAL;
5493 break;
5494 }
5495generate_key_exit:
5496 __qseecom_disable_clk(CLK_QSEE);
5497 return ret;
5498}
5499
5500static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5501 enum qseecom_key_management_usage_type usage,
5502 struct qseecom_key_delete_ireq *ireq)
5503{
5504 struct qseecom_command_scm_resp resp;
5505 int ret;
5506
5507 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5508 usage >= QSEOS_KM_USAGE_MAX) {
5509 pr_err("Error:: unsupported usage %d\n", usage);
5510 return -EFAULT;
5511 }
5512 ret = __qseecom_enable_clk(CLK_QSEE);
5513 if (ret)
5514 return ret;
5515
5516 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5517 ireq, sizeof(struct qseecom_key_delete_ireq),
5518 &resp, sizeof(struct qseecom_command_scm_resp));
5519 if (ret) {
5520 if (ret == -EINVAL &&
5521 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5522 pr_debug("Max attempts to input password reached.\n");
5523 ret = -ERANGE;
5524 } else {
5525 pr_err("scm call to delete key failed : %d\n", ret);
5526 ret = -EFAULT;
5527 }
5528 goto del_key_exit;
5529 }
5530
5531 switch (resp.result) {
5532 case QSEOS_RESULT_SUCCESS:
5533 break;
5534 case QSEOS_RESULT_INCOMPLETE:
5535 ret = __qseecom_process_incomplete_cmd(data, &resp);
5536 if (ret) {
5537 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5538 resp.result);
5539 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5540 pr_debug("Max attempts to input password reached.\n");
5541 ret = -ERANGE;
5542 }
5543 }
5544 break;
5545 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5546 pr_debug("Max attempts to input password reached.\n");
5547 ret = -ERANGE;
5548 break;
5549 case QSEOS_RESULT_FAILURE:
5550 default:
5551 pr_err("Delete key scm call failed resp.result %d\n",
5552 resp.result);
5553 ret = -EINVAL;
5554 break;
5555 }
5556del_key_exit:
5557 __qseecom_disable_clk(CLK_QSEE);
5558 return ret;
5559}
5560
5561static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5562 enum qseecom_key_management_usage_type usage,
5563 struct qseecom_key_select_ireq *ireq)
5564{
5565 struct qseecom_command_scm_resp resp;
5566 int ret;
5567
5568 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5569 usage >= QSEOS_KM_USAGE_MAX) {
5570 pr_err("Error:: unsupported usage %d\n", usage);
5571 return -EFAULT;
5572 }
5573 ret = __qseecom_enable_clk(CLK_QSEE);
5574 if (ret)
5575 return ret;
5576
5577 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5578 ret = __qseecom_enable_clk(CLK_CE_DRV);
5579 if (ret)
5580 return ret;
5581 }
5582
5583 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5584 ireq, sizeof(struct qseecom_key_select_ireq),
5585 &resp, sizeof(struct qseecom_command_scm_resp));
5586 if (ret) {
5587 if (ret == -EINVAL &&
5588 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5589 pr_debug("Max attempts to input password reached.\n");
5590 ret = -ERANGE;
5591 } else if (ret == -EINVAL &&
5592 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5593 pr_debug("Set Key operation under processing...\n");
5594 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5595 } else {
5596 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5597 ret);
5598 ret = -EFAULT;
5599 }
5600 goto set_key_exit;
5601 }
5602
5603 switch (resp.result) {
5604 case QSEOS_RESULT_SUCCESS:
5605 break;
5606 case QSEOS_RESULT_INCOMPLETE:
5607 ret = __qseecom_process_incomplete_cmd(data, &resp);
5608 if (ret) {
5609 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5610 resp.result);
5611 if (resp.result ==
5612 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5613 pr_debug("Set Key operation under processing...\n");
5614 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5615 }
5616 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5617 pr_debug("Max attempts to input password reached.\n");
5618 ret = -ERANGE;
5619 }
5620 }
5621 break;
5622 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5623 pr_debug("Max attempts to input password reached.\n");
5624 ret = -ERANGE;
5625 break;
5626 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5627 pr_debug("Set Key operation under processing...\n");
5628 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5629 break;
5630 case QSEOS_RESULT_FAILURE:
5631 default:
5632 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5633 ret = -EINVAL;
5634 break;
5635 }
5636set_key_exit:
5637 __qseecom_disable_clk(CLK_QSEE);
5638 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5639 __qseecom_disable_clk(CLK_CE_DRV);
5640 return ret;
5641}
5642
5643static int __qseecom_update_current_key_user_info(
5644 struct qseecom_dev_handle *data,
5645 enum qseecom_key_management_usage_type usage,
5646 struct qseecom_key_userinfo_update_ireq *ireq)
5647{
5648 struct qseecom_command_scm_resp resp;
5649 int ret;
5650
5651 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5652 usage >= QSEOS_KM_USAGE_MAX) {
5653 pr_err("Error:: unsupported usage %d\n", usage);
5654 return -EFAULT;
5655 }
5656 ret = __qseecom_enable_clk(CLK_QSEE);
5657 if (ret)
5658 return ret;
5659
5660 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5661 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5662 &resp, sizeof(struct qseecom_command_scm_resp));
5663 if (ret) {
5664 if (ret == -EINVAL &&
5665 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5666 pr_debug("Set Key operation under processing...\n");
5667 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5668 } else {
5669 pr_err("scm call to update key userinfo failed: %d\n",
5670 ret);
5671 __qseecom_disable_clk(CLK_QSEE);
5672 return -EFAULT;
5673 }
5674 }
5675
5676 switch (resp.result) {
5677 case QSEOS_RESULT_SUCCESS:
5678 break;
5679 case QSEOS_RESULT_INCOMPLETE:
5680 ret = __qseecom_process_incomplete_cmd(data, &resp);
5681 if (resp.result ==
5682 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5683 pr_debug("Set Key operation under processing...\n");
5684 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5685 }
5686 if (ret)
5687 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5688 resp.result);
5689 break;
5690 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5691 pr_debug("Update Key operation under processing...\n");
5692 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5693 break;
5694 case QSEOS_RESULT_FAILURE:
5695 default:
5696 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5697 ret = -EINVAL;
5698 break;
5699 }
5700
5701 __qseecom_disable_clk(CLK_QSEE);
5702 return ret;
5703}
5704
5705
5706static int qseecom_enable_ice_setup(int usage)
5707{
5708 int ret = 0;
5709
5710 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5711 ret = qcom_ice_setup_ice_hw("ufs", true);
5712 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5713 ret = qcom_ice_setup_ice_hw("sdcc", true);
5714
5715 return ret;
5716}
5717
5718static int qseecom_disable_ice_setup(int usage)
5719{
5720 int ret = 0;
5721
5722 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5723 ret = qcom_ice_setup_ice_hw("ufs", false);
5724 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5725 ret = qcom_ice_setup_ice_hw("sdcc", false);
5726
5727 return ret;
5728}
5729
5730static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5731{
5732 struct qseecom_ce_info_use *pce_info_use, *p;
5733 int total = 0;
5734 int i;
5735
5736 switch (usage) {
5737 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5738 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5739 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5740 p = qseecom.ce_info.fde;
5741 total = qseecom.ce_info.num_fde;
5742 break;
5743 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5744 p = qseecom.ce_info.pfe;
5745 total = qseecom.ce_info.num_pfe;
5746 break;
5747 default:
5748 pr_err("unsupported usage %d\n", usage);
5749 return -EINVAL;
5750 }
5751
5752 pce_info_use = NULL;
5753
5754 for (i = 0; i < total; i++) {
5755 if (p->unit_num == unit) {
5756 pce_info_use = p;
5757 break;
5758 }
5759 p++;
5760 }
5761 if (!pce_info_use) {
5762 pr_err("can not find %d\n", unit);
5763 return -EINVAL;
5764 }
5765 return pce_info_use->num_ce_pipe_entries;
5766}
5767
5768static int qseecom_create_key(struct qseecom_dev_handle *data,
5769 void __user *argp)
5770{
5771 int i;
5772 uint32_t *ce_hw = NULL;
5773 uint32_t pipe = 0;
5774 int ret = 0;
5775 uint32_t flags = 0;
5776 struct qseecom_create_key_req create_key_req;
5777 struct qseecom_key_generate_ireq generate_key_ireq;
5778 struct qseecom_key_select_ireq set_key_ireq;
5779 uint32_t entries = 0;
5780
5781 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5782 if (ret) {
5783 pr_err("copy_from_user failed\n");
5784 return ret;
5785 }
5786
5787 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5788 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5789 pr_err("unsupported usage %d\n", create_key_req.usage);
5790 ret = -EFAULT;
5791 return ret;
5792 }
5793 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5794 create_key_req.usage);
5795 if (entries <= 0) {
5796 pr_err("no ce instance for usage %d instance %d\n",
5797 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5798 ret = -EINVAL;
5799 return ret;
5800 }
5801
5802 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5803 if (!ce_hw) {
5804 ret = -ENOMEM;
5805 return ret;
5806 }
5807 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5808 DEFAULT_CE_INFO_UNIT);
5809 if (ret) {
5810 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5811 ret = -EINVAL;
5812 goto free_buf;
5813 }
5814
5815 if (qseecom.fde_key_size)
5816 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5817 else
5818 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5819
5820 generate_key_ireq.flags = flags;
5821 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5822 memset((void *)generate_key_ireq.key_id,
5823 0, QSEECOM_KEY_ID_SIZE);
5824 memset((void *)generate_key_ireq.hash32,
5825 0, QSEECOM_HASH_SIZE);
5826 memcpy((void *)generate_key_ireq.key_id,
5827 (void *)key_id_array[create_key_req.usage].desc,
5828 QSEECOM_KEY_ID_SIZE);
5829 memcpy((void *)generate_key_ireq.hash32,
5830 (void *)create_key_req.hash32,
5831 QSEECOM_HASH_SIZE);
5832
5833 ret = __qseecom_generate_and_save_key(data,
5834 create_key_req.usage, &generate_key_ireq);
5835 if (ret) {
5836 pr_err("Failed to generate key on storage: %d\n", ret);
5837 goto free_buf;
5838 }
5839
5840 for (i = 0; i < entries; i++) {
5841 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5842 if (create_key_req.usage ==
5843 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5844 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5845 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5846
5847 } else if (create_key_req.usage ==
5848 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5849 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5850 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5851
5852 } else {
5853 set_key_ireq.ce = ce_hw[i];
5854 set_key_ireq.pipe = pipe;
5855 }
5856 set_key_ireq.flags = flags;
5857
5858 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5859 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5860 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5861 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5862 memcpy((void *)set_key_ireq.key_id,
5863 (void *)key_id_array[create_key_req.usage].desc,
5864 QSEECOM_KEY_ID_SIZE);
5865 memcpy((void *)set_key_ireq.hash32,
5866 (void *)create_key_req.hash32,
5867 QSEECOM_HASH_SIZE);
5868 /*
5869 * It will return false if it is GPCE based crypto instance or
5870 * ICE is setup properly
5871 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005872 ret = qseecom_enable_ice_setup(create_key_req.usage);
5873 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005874 goto free_buf;
5875
5876 do {
5877 ret = __qseecom_set_clear_ce_key(data,
5878 create_key_req.usage,
5879 &set_key_ireq);
5880 /*
5881 * wait a little before calling scm again to let other
5882 * processes run
5883 */
5884 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5885 msleep(50);
5886
5887 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5888
5889 qseecom_disable_ice_setup(create_key_req.usage);
5890
5891 if (ret) {
5892 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5893 pipe, ce_hw[i], ret);
5894 goto free_buf;
5895 } else {
5896 pr_err("Set the key successfully\n");
5897 if ((create_key_req.usage ==
5898 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5899 (create_key_req.usage ==
5900 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5901 goto free_buf;
5902 }
5903 }
5904
5905free_buf:
5906 kzfree(ce_hw);
5907 return ret;
5908}
5909
5910static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5911 void __user *argp)
5912{
5913 uint32_t *ce_hw = NULL;
5914 uint32_t pipe = 0;
5915 int ret = 0;
5916 uint32_t flags = 0;
5917 int i, j;
5918 struct qseecom_wipe_key_req wipe_key_req;
5919 struct qseecom_key_delete_ireq delete_key_ireq;
5920 struct qseecom_key_select_ireq clear_key_ireq;
5921 uint32_t entries = 0;
5922
5923 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
5924 if (ret) {
5925 pr_err("copy_from_user failed\n");
5926 return ret;
5927 }
5928
5929 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5930 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5931 pr_err("unsupported usage %d\n", wipe_key_req.usage);
5932 ret = -EFAULT;
5933 return ret;
5934 }
5935
5936 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5937 wipe_key_req.usage);
5938 if (entries <= 0) {
5939 pr_err("no ce instance for usage %d instance %d\n",
5940 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
5941 ret = -EINVAL;
5942 return ret;
5943 }
5944
5945 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5946 if (!ce_hw) {
5947 ret = -ENOMEM;
5948 return ret;
5949 }
5950
5951 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
5952 DEFAULT_CE_INFO_UNIT);
5953 if (ret) {
5954 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5955 ret = -EINVAL;
5956 goto free_buf;
5957 }
5958
5959 if (wipe_key_req.wipe_key_flag) {
5960 delete_key_ireq.flags = flags;
5961 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
5962 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5963 memcpy((void *)delete_key_ireq.key_id,
5964 (void *)key_id_array[wipe_key_req.usage].desc,
5965 QSEECOM_KEY_ID_SIZE);
5966 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5967
5968 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
5969 &delete_key_ireq);
5970 if (ret) {
5971 pr_err("Failed to delete key from ssd storage: %d\n",
5972 ret);
5973 ret = -EFAULT;
5974 goto free_buf;
5975 }
5976 }
5977
5978 for (j = 0; j < entries; j++) {
5979 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5980 if (wipe_key_req.usage ==
5981 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5982 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5983 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5984 } else if (wipe_key_req.usage ==
5985 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5986 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5987 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5988 } else {
5989 clear_key_ireq.ce = ce_hw[j];
5990 clear_key_ireq.pipe = pipe;
5991 }
5992 clear_key_ireq.flags = flags;
5993 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5994 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
5995 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
5996 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5997
5998 /*
5999 * It will return false if it is GPCE based crypto instance or
6000 * ICE is setup properly
6001 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006002 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6003 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006004 goto free_buf;
6005
6006 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6007 &clear_key_ireq);
6008
6009 qseecom_disable_ice_setup(wipe_key_req.usage);
6010
6011 if (ret) {
6012 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6013 pipe, ce_hw[j], ret);
6014 ret = -EFAULT;
6015 goto free_buf;
6016 }
6017 }
6018
6019free_buf:
6020 kzfree(ce_hw);
6021 return ret;
6022}
6023
6024static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6025 void __user *argp)
6026{
6027 int ret = 0;
6028 uint32_t flags = 0;
6029 struct qseecom_update_key_userinfo_req update_key_req;
6030 struct qseecom_key_userinfo_update_ireq ireq;
6031
6032 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6033 if (ret) {
6034 pr_err("copy_from_user failed\n");
6035 return ret;
6036 }
6037
6038 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6039 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6040 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6041 return -EFAULT;
6042 }
6043
6044 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6045
6046 if (qseecom.fde_key_size)
6047 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6048 else
6049 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6050
6051 ireq.flags = flags;
6052 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6053 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6054 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6055 memcpy((void *)ireq.key_id,
6056 (void *)key_id_array[update_key_req.usage].desc,
6057 QSEECOM_KEY_ID_SIZE);
6058 memcpy((void *)ireq.current_hash32,
6059 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6060 memcpy((void *)ireq.new_hash32,
6061 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6062
6063 do {
6064 ret = __qseecom_update_current_key_user_info(data,
6065 update_key_req.usage,
6066 &ireq);
6067 /*
6068 * wait a little before calling scm again to let other
6069 * processes run
6070 */
6071 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6072 msleep(50);
6073
6074 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6075 if (ret) {
6076 pr_err("Failed to update key info: %d\n", ret);
6077 return ret;
6078 }
6079 return ret;
6080
6081}
6082static int qseecom_is_es_activated(void __user *argp)
6083{
6084 struct qseecom_is_es_activated_req req;
6085 struct qseecom_command_scm_resp resp;
6086 int ret;
6087
6088 if (qseecom.qsee_version < QSEE_VERSION_04) {
6089 pr_err("invalid qsee version\n");
6090 return -ENODEV;
6091 }
6092
6093 if (argp == NULL) {
6094 pr_err("arg is null\n");
6095 return -EINVAL;
6096 }
6097
6098 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6099 &req, sizeof(req), &resp, sizeof(resp));
6100 if (ret) {
6101 pr_err("scm_call failed\n");
6102 return ret;
6103 }
6104
6105 req.is_activated = resp.result;
6106 ret = copy_to_user(argp, &req, sizeof(req));
6107 if (ret) {
6108 pr_err("copy_to_user failed\n");
6109 return ret;
6110 }
6111
6112 return 0;
6113}
6114
6115static int qseecom_save_partition_hash(void __user *argp)
6116{
6117 struct qseecom_save_partition_hash_req req;
6118 struct qseecom_command_scm_resp resp;
6119 int ret;
6120
6121 memset(&resp, 0x00, sizeof(resp));
6122
6123 if (qseecom.qsee_version < QSEE_VERSION_04) {
6124 pr_err("invalid qsee version\n");
6125 return -ENODEV;
6126 }
6127
6128 if (argp == NULL) {
6129 pr_err("arg is null\n");
6130 return -EINVAL;
6131 }
6132
6133 ret = copy_from_user(&req, argp, sizeof(req));
6134 if (ret) {
6135 pr_err("copy_from_user failed\n");
6136 return ret;
6137 }
6138
6139 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6140 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6141 if (ret) {
6142 pr_err("qseecom_scm_call failed\n");
6143 return ret;
6144 }
6145
6146 return 0;
6147}
6148
6149static int qseecom_mdtp_cipher_dip(void __user *argp)
6150{
6151 struct qseecom_mdtp_cipher_dip_req req;
6152 u32 tzbuflenin, tzbuflenout;
6153 char *tzbufin = NULL, *tzbufout = NULL;
6154 struct scm_desc desc = {0};
6155 int ret;
6156
6157 do {
6158 /* Copy the parameters from userspace */
6159 if (argp == NULL) {
6160 pr_err("arg is null\n");
6161 ret = -EINVAL;
6162 break;
6163 }
6164
6165 ret = copy_from_user(&req, argp, sizeof(req));
6166 if (ret) {
6167 pr_err("copy_from_user failed, ret= %d\n", ret);
6168 break;
6169 }
6170
6171 if (req.in_buf == NULL || req.out_buf == NULL ||
6172 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6173 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6174 req.direction > 1) {
6175 pr_err("invalid parameters\n");
6176 ret = -EINVAL;
6177 break;
6178 }
6179
6180 /* Copy the input buffer from userspace to kernel space */
6181 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6182 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6183 if (!tzbufin) {
6184 pr_err("error allocating in buffer\n");
6185 ret = -ENOMEM;
6186 break;
6187 }
6188
6189 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6190 if (ret) {
6191 pr_err("copy_from_user failed, ret=%d\n", ret);
6192 break;
6193 }
6194
6195 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6196
6197 /* Prepare the output buffer in kernel space */
6198 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6199 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6200 if (!tzbufout) {
6201 pr_err("error allocating out buffer\n");
6202 ret = -ENOMEM;
6203 break;
6204 }
6205
6206 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6207
6208 /* Send the command to TZ */
6209 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6210 desc.args[0] = virt_to_phys(tzbufin);
6211 desc.args[1] = req.in_buf_size;
6212 desc.args[2] = virt_to_phys(tzbufout);
6213 desc.args[3] = req.out_buf_size;
6214 desc.args[4] = req.direction;
6215
6216 ret = __qseecom_enable_clk(CLK_QSEE);
6217 if (ret)
6218 break;
6219
6220 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6221
6222 __qseecom_disable_clk(CLK_QSEE);
6223
6224 if (ret) {
6225 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6226 ret);
6227 break;
6228 }
6229
6230 /* Copy the output buffer from kernel space to userspace */
6231 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6232 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6233 if (ret) {
6234 pr_err("copy_to_user failed, ret=%d\n", ret);
6235 break;
6236 }
6237 } while (0);
6238
6239 kzfree(tzbufin);
6240 kzfree(tzbufout);
6241
6242 return ret;
6243}
6244
6245static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6246 struct qseecom_qteec_req *req)
6247{
6248 if (!data || !data->client.ihandle) {
6249 pr_err("Client or client handle is not initialized\n");
6250 return -EINVAL;
6251 }
6252
6253 if (data->type != QSEECOM_CLIENT_APP)
6254 return -EFAULT;
6255
6256 if (req->req_len > UINT_MAX - req->resp_len) {
6257 pr_err("Integer overflow detected in req_len & rsp_len\n");
6258 return -EINVAL;
6259 }
6260
6261 if (req->req_len + req->resp_len > data->client.sb_length) {
6262 pr_debug("Not enough memory to fit cmd_buf.\n");
6263 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6264 (req->req_len + req->resp_len), data->client.sb_length);
6265 return -ENOMEM;
6266 }
6267
6268 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6269 pr_err("cmd buffer or response buffer is null\n");
6270 return -EINVAL;
6271 }
6272 if (((uintptr_t)req->req_ptr <
6273 data->client.user_virt_sb_base) ||
6274 ((uintptr_t)req->req_ptr >=
6275 (data->client.user_virt_sb_base + data->client.sb_length))) {
6276 pr_err("cmd buffer address not within shared bufffer\n");
6277 return -EINVAL;
6278 }
6279
6280 if (((uintptr_t)req->resp_ptr <
6281 data->client.user_virt_sb_base) ||
6282 ((uintptr_t)req->resp_ptr >=
6283 (data->client.user_virt_sb_base + data->client.sb_length))) {
6284 pr_err("response buffer address not within shared bufffer\n");
6285 return -EINVAL;
6286 }
6287
6288 if ((req->req_len == 0) || (req->resp_len == 0)) {
6289 pr_err("cmd buf lengtgh/response buf length not valid\n");
6290 return -EINVAL;
6291 }
6292
6293 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6294 pr_err("Integer overflow in req_len & req_ptr\n");
6295 return -EINVAL;
6296 }
6297
6298 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6299 pr_err("Integer overflow in resp_len & resp_ptr\n");
6300 return -EINVAL;
6301 }
6302
6303 if (data->client.user_virt_sb_base >
6304 (ULONG_MAX - data->client.sb_length)) {
6305 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6306 return -EINVAL;
6307 }
6308 if ((((uintptr_t)req->req_ptr + req->req_len) >
6309 ((uintptr_t)data->client.user_virt_sb_base +
6310 data->client.sb_length)) ||
6311 (((uintptr_t)req->resp_ptr + req->resp_len) >
6312 ((uintptr_t)data->client.user_virt_sb_base +
6313 data->client.sb_length))) {
6314 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6315 return -EINVAL;
6316 }
6317 return 0;
6318}
6319
6320static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6321 uint32_t fd_idx, struct sg_table *sg_ptr)
6322{
6323 struct scatterlist *sg = sg_ptr->sgl;
6324 struct qseecom_sg_entry *sg_entry;
6325 void *buf;
6326 uint i;
6327 size_t size;
6328 dma_addr_t coh_pmem;
6329
6330 if (fd_idx >= MAX_ION_FD) {
6331 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6332 return -ENOMEM;
6333 }
6334 /*
6335 * Allocate a buffer, populate it with number of entry plus
6336 * each sg entry's phy addr and length; then return the
6337 * phy_addr of the buffer.
6338 */
6339 size = sizeof(uint32_t) +
6340 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6341 size = (size + PAGE_SIZE) & PAGE_MASK;
6342 buf = dma_alloc_coherent(qseecom.pdev,
6343 size, &coh_pmem, GFP_KERNEL);
6344 if (buf == NULL) {
6345 pr_err("failed to alloc memory for sg buf\n");
6346 return -ENOMEM;
6347 }
6348 *(uint32_t *)buf = sg_ptr->nents;
6349 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6350 for (i = 0; i < sg_ptr->nents; i++) {
6351 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6352 sg_entry->len = sg->length;
6353 sg_entry++;
6354 sg = sg_next(sg);
6355 }
6356 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6357 data->client.sec_buf_fd[fd_idx].vbase = buf;
6358 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6359 data->client.sec_buf_fd[fd_idx].size = size;
6360 return 0;
6361}
6362
6363static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6364 struct qseecom_dev_handle *data, bool cleanup)
6365{
6366 struct ion_handle *ihandle;
6367 int ret = 0;
6368 int i = 0;
6369 uint32_t *update;
6370 struct sg_table *sg_ptr = NULL;
6371 struct scatterlist *sg;
6372 struct qseecom_param_memref *memref;
6373
6374 if (req == NULL) {
6375 pr_err("Invalid address\n");
6376 return -EINVAL;
6377 }
6378 for (i = 0; i < MAX_ION_FD; i++) {
6379 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006380 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006381 req->ifd_data[i].fd);
6382 if (IS_ERR_OR_NULL(ihandle)) {
6383 pr_err("Ion client can't retrieve the handle\n");
6384 return -ENOMEM;
6385 }
6386 if ((req->req_len < sizeof(uint32_t)) ||
6387 (req->ifd_data[i].cmd_buf_offset >
6388 req->req_len - sizeof(uint32_t))) {
6389 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6390 req->req_len,
6391 req->ifd_data[i].cmd_buf_offset);
6392 return -EINVAL;
6393 }
6394 update = (uint32_t *)((char *) req->req_ptr +
6395 req->ifd_data[i].cmd_buf_offset);
6396 if (!update) {
6397 pr_err("update pointer is NULL\n");
6398 return -EINVAL;
6399 }
6400 } else {
6401 continue;
6402 }
6403 /* Populate the cmd data structure with the phys_addr */
6404 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6405 if (IS_ERR_OR_NULL(sg_ptr)) {
6406 pr_err("IOn client could not retrieve sg table\n");
6407 goto err;
6408 }
6409 sg = sg_ptr->sgl;
6410 if (sg == NULL) {
6411 pr_err("sg is NULL\n");
6412 goto err;
6413 }
6414 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6415 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6416 sg_ptr->nents, sg->length);
6417 goto err;
6418 }
6419 /* clean up buf for pre-allocated fd */
6420 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6421 (*update)) {
6422 if (data->client.sec_buf_fd[i].vbase)
6423 dma_free_coherent(qseecom.pdev,
6424 data->client.sec_buf_fd[i].size,
6425 data->client.sec_buf_fd[i].vbase,
6426 data->client.sec_buf_fd[i].pbase);
6427 memset((void *)update, 0,
6428 sizeof(struct qseecom_param_memref));
6429 memset(&(data->client.sec_buf_fd[i]), 0,
6430 sizeof(struct qseecom_sec_buf_fd_info));
6431 goto clean;
6432 }
6433
6434 if (*update == 0) {
6435 /* update buf for pre-allocated fd from secure heap*/
6436 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6437 sg_ptr);
6438 if (ret) {
6439 pr_err("Failed to handle buf for fd[%d]\n", i);
6440 goto err;
6441 }
6442 memref = (struct qseecom_param_memref *)update;
6443 memref->buffer =
6444 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6445 memref->size =
6446 (uint32_t)(data->client.sec_buf_fd[i].size);
6447 } else {
6448 /* update buf for fd from non-secure qseecom heap */
6449 if (sg_ptr->nents != 1) {
6450 pr_err("Num of scat entr (%d) invalid\n",
6451 sg_ptr->nents);
6452 goto err;
6453 }
6454 if (cleanup)
6455 *update = 0;
6456 else
6457 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6458 }
6459clean:
6460 if (cleanup) {
6461 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6462 ihandle, NULL, sg->length,
6463 ION_IOC_INV_CACHES);
6464 if (ret) {
6465 pr_err("cache operation failed %d\n", ret);
6466 goto err;
6467 }
6468 } else {
6469 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6470 ihandle, NULL, sg->length,
6471 ION_IOC_CLEAN_INV_CACHES);
6472 if (ret) {
6473 pr_err("cache operation failed %d\n", ret);
6474 goto err;
6475 }
6476 data->sglistinfo_ptr[i].indexAndFlags =
6477 SGLISTINFO_SET_INDEX_FLAG(
6478 (sg_ptr->nents == 1), 0,
6479 req->ifd_data[i].cmd_buf_offset);
6480 data->sglistinfo_ptr[i].sizeOrCount =
6481 (sg_ptr->nents == 1) ?
6482 sg->length : sg_ptr->nents;
6483 data->sglist_cnt = i + 1;
6484 }
6485 /* Deallocate the handle */
6486 if (!IS_ERR_OR_NULL(ihandle))
6487 ion_free(qseecom.ion_clnt, ihandle);
6488 }
6489 return ret;
6490err:
6491 if (!IS_ERR_OR_NULL(ihandle))
6492 ion_free(qseecom.ion_clnt, ihandle);
6493 return -ENOMEM;
6494}
6495
6496static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6497 struct qseecom_qteec_req *req, uint32_t cmd_id)
6498{
6499 struct qseecom_command_scm_resp resp;
6500 struct qseecom_qteec_ireq ireq;
6501 struct qseecom_qteec_64bit_ireq ireq_64bit;
6502 struct qseecom_registered_app_list *ptr_app;
6503 bool found_app = false;
6504 unsigned long flags;
6505 int ret = 0;
6506 uint32_t reqd_len_sb_in = 0;
6507 void *cmd_buf = NULL;
6508 size_t cmd_len;
6509 struct sglist_info *table = data->sglistinfo_ptr;
6510
6511 ret = __qseecom_qteec_validate_msg(data, req);
6512 if (ret)
6513 return ret;
6514
6515 /* find app_id & img_name from list */
6516 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6517 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6518 list) {
6519 if ((ptr_app->app_id == data->client.app_id) &&
6520 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6521 found_app = true;
6522 break;
6523 }
6524 }
6525 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6526 if (!found_app) {
6527 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6528 (char *)data->client.app_name);
6529 return -ENOENT;
6530 }
6531
6532 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6533 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6534 ret = __qseecom_update_qteec_req_buf(
6535 (struct qseecom_qteec_modfd_req *)req, data, false);
6536 if (ret)
6537 return ret;
6538 }
6539
6540 if (qseecom.qsee_version < QSEE_VERSION_40) {
6541 ireq.app_id = data->client.app_id;
6542 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6543 (uintptr_t)req->req_ptr);
6544 ireq.req_len = req->req_len;
6545 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6546 (uintptr_t)req->resp_ptr);
6547 ireq.resp_len = req->resp_len;
6548 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6549 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6550 dmac_flush_range((void *)table,
6551 (void *)table + SGLISTINFO_TABLE_SIZE);
6552 cmd_buf = (void *)&ireq;
6553 cmd_len = sizeof(struct qseecom_qteec_ireq);
6554 } else {
6555 ireq_64bit.app_id = data->client.app_id;
6556 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6557 (uintptr_t)req->req_ptr);
6558 ireq_64bit.req_len = req->req_len;
6559 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6560 (uintptr_t)req->resp_ptr);
6561 ireq_64bit.resp_len = req->resp_len;
6562 if ((data->client.app_arch == ELFCLASS32) &&
6563 ((ireq_64bit.req_ptr >=
6564 PHY_ADDR_4G - ireq_64bit.req_len) ||
6565 (ireq_64bit.resp_ptr >=
6566 PHY_ADDR_4G - ireq_64bit.resp_len))){
6567 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6568 data->client.app_name, data->client.app_id);
6569 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6570 ireq_64bit.req_ptr, ireq_64bit.req_len,
6571 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6572 return -EFAULT;
6573 }
6574 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6575 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6576 dmac_flush_range((void *)table,
6577 (void *)table + SGLISTINFO_TABLE_SIZE);
6578 cmd_buf = (void *)&ireq_64bit;
6579 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6580 }
6581 if (qseecom.whitelist_support == true
6582 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6583 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6584 else
6585 *(uint32_t *)cmd_buf = cmd_id;
6586
6587 reqd_len_sb_in = req->req_len + req->resp_len;
6588 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6589 data->client.sb_virt,
6590 reqd_len_sb_in,
6591 ION_IOC_CLEAN_INV_CACHES);
6592 if (ret) {
6593 pr_err("cache operation failed %d\n", ret);
6594 return ret;
6595 }
6596
6597 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6598
6599 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6600 cmd_buf, cmd_len,
6601 &resp, sizeof(resp));
6602 if (ret) {
6603 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6604 ret, data->client.app_id);
6605 return ret;
6606 }
6607
6608 if (qseecom.qsee_reentrancy_support) {
6609 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6610 } else {
6611 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6612 ret = __qseecom_process_incomplete_cmd(data, &resp);
6613 if (ret) {
6614 pr_err("process_incomplete_cmd failed err: %d\n",
6615 ret);
6616 return ret;
6617 }
6618 } else {
6619 if (resp.result != QSEOS_RESULT_SUCCESS) {
6620 pr_err("Response result %d not supported\n",
6621 resp.result);
6622 ret = -EINVAL;
6623 }
6624 }
6625 }
6626 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6627 data->client.sb_virt, data->client.sb_length,
6628 ION_IOC_INV_CACHES);
6629 if (ret) {
6630 pr_err("cache operation failed %d\n", ret);
6631 return ret;
6632 }
6633
6634 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6635 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6636 ret = __qseecom_update_qteec_req_buf(
6637 (struct qseecom_qteec_modfd_req *)req, data, true);
6638 if (ret)
6639 return ret;
6640 }
6641 return 0;
6642}
6643
6644static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6645 void __user *argp)
6646{
6647 struct qseecom_qteec_modfd_req req;
6648 int ret = 0;
6649
6650 ret = copy_from_user(&req, argp,
6651 sizeof(struct qseecom_qteec_modfd_req));
6652 if (ret) {
6653 pr_err("copy_from_user failed\n");
6654 return ret;
6655 }
6656 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6657 QSEOS_TEE_OPEN_SESSION);
6658
6659 return ret;
6660}
6661
6662static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6663 void __user *argp)
6664{
6665 struct qseecom_qteec_req req;
6666 int ret = 0;
6667
6668 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6669 if (ret) {
6670 pr_err("copy_from_user failed\n");
6671 return ret;
6672 }
6673 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6674 return ret;
6675}
6676
6677static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6678 void __user *argp)
6679{
6680 struct qseecom_qteec_modfd_req req;
6681 struct qseecom_command_scm_resp resp;
6682 struct qseecom_qteec_ireq ireq;
6683 struct qseecom_qteec_64bit_ireq ireq_64bit;
6684 struct qseecom_registered_app_list *ptr_app;
6685 bool found_app = false;
6686 unsigned long flags;
6687 int ret = 0;
6688 int i = 0;
6689 uint32_t reqd_len_sb_in = 0;
6690 void *cmd_buf = NULL;
6691 size_t cmd_len;
6692 struct sglist_info *table = data->sglistinfo_ptr;
6693 void *req_ptr = NULL;
6694 void *resp_ptr = NULL;
6695
6696 ret = copy_from_user(&req, argp,
6697 sizeof(struct qseecom_qteec_modfd_req));
6698 if (ret) {
6699 pr_err("copy_from_user failed\n");
6700 return ret;
6701 }
6702 ret = __qseecom_qteec_validate_msg(data,
6703 (struct qseecom_qteec_req *)(&req));
6704 if (ret)
6705 return ret;
6706 req_ptr = req.req_ptr;
6707 resp_ptr = req.resp_ptr;
6708
6709 /* find app_id & img_name from list */
6710 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6711 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6712 list) {
6713 if ((ptr_app->app_id == data->client.app_id) &&
6714 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6715 found_app = true;
6716 break;
6717 }
6718 }
6719 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6720 if (!found_app) {
6721 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6722 (char *)data->client.app_name);
6723 return -ENOENT;
6724 }
6725
6726 /* validate offsets */
6727 for (i = 0; i < MAX_ION_FD; i++) {
6728 if (req.ifd_data[i].fd) {
6729 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6730 return -EINVAL;
6731 }
6732 }
6733 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6734 (uintptr_t)req.req_ptr);
6735 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6736 (uintptr_t)req.resp_ptr);
6737 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6738 if (ret)
6739 return ret;
6740
6741 if (qseecom.qsee_version < QSEE_VERSION_40) {
6742 ireq.app_id = data->client.app_id;
6743 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6744 (uintptr_t)req_ptr);
6745 ireq.req_len = req.req_len;
6746 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6747 (uintptr_t)resp_ptr);
6748 ireq.resp_len = req.resp_len;
6749 cmd_buf = (void *)&ireq;
6750 cmd_len = sizeof(struct qseecom_qteec_ireq);
6751 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6752 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6753 dmac_flush_range((void *)table,
6754 (void *)table + SGLISTINFO_TABLE_SIZE);
6755 } else {
6756 ireq_64bit.app_id = data->client.app_id;
6757 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6758 (uintptr_t)req_ptr);
6759 ireq_64bit.req_len = req.req_len;
6760 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6761 (uintptr_t)resp_ptr);
6762 ireq_64bit.resp_len = req.resp_len;
6763 cmd_buf = (void *)&ireq_64bit;
6764 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6765 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6766 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6767 dmac_flush_range((void *)table,
6768 (void *)table + SGLISTINFO_TABLE_SIZE);
6769 }
6770 reqd_len_sb_in = req.req_len + req.resp_len;
6771 if (qseecom.whitelist_support == true)
6772 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6773 else
6774 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6775
6776 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6777 data->client.sb_virt,
6778 reqd_len_sb_in,
6779 ION_IOC_CLEAN_INV_CACHES);
6780 if (ret) {
6781 pr_err("cache operation failed %d\n", ret);
6782 return ret;
6783 }
6784
6785 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6786
6787 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6788 cmd_buf, cmd_len,
6789 &resp, sizeof(resp));
6790 if (ret) {
6791 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6792 ret, data->client.app_id);
6793 return ret;
6794 }
6795
6796 if (qseecom.qsee_reentrancy_support) {
6797 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6798 } else {
6799 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6800 ret = __qseecom_process_incomplete_cmd(data, &resp);
6801 if (ret) {
6802 pr_err("process_incomplete_cmd failed err: %d\n",
6803 ret);
6804 return ret;
6805 }
6806 } else {
6807 if (resp.result != QSEOS_RESULT_SUCCESS) {
6808 pr_err("Response result %d not supported\n",
6809 resp.result);
6810 ret = -EINVAL;
6811 }
6812 }
6813 }
6814 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6815 if (ret)
6816 return ret;
6817
6818 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6819 data->client.sb_virt, data->client.sb_length,
6820 ION_IOC_INV_CACHES);
6821 if (ret) {
6822 pr_err("cache operation failed %d\n", ret);
6823 return ret;
6824 }
6825 return 0;
6826}
6827
6828static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6829 void __user *argp)
6830{
6831 struct qseecom_qteec_modfd_req req;
6832 int ret = 0;
6833
6834 ret = copy_from_user(&req, argp,
6835 sizeof(struct qseecom_qteec_modfd_req));
6836 if (ret) {
6837 pr_err("copy_from_user failed\n");
6838 return ret;
6839 }
6840 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6841 QSEOS_TEE_REQUEST_CANCELLATION);
6842
6843 return ret;
6844}
6845
6846static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6847{
6848 if (data->sglist_cnt) {
6849 memset(data->sglistinfo_ptr, 0,
6850 SGLISTINFO_TABLE_SIZE);
6851 data->sglist_cnt = 0;
6852 }
6853}
6854
6855static inline long qseecom_ioctl(struct file *file,
6856 unsigned int cmd, unsigned long arg)
6857{
6858 int ret = 0;
6859 struct qseecom_dev_handle *data = file->private_data;
6860 void __user *argp = (void __user *) arg;
6861 bool perf_enabled = false;
6862
6863 if (!data) {
6864 pr_err("Invalid/uninitialized device handle\n");
6865 return -EINVAL;
6866 }
6867
6868 if (data->abort) {
6869 pr_err("Aborting qseecom driver\n");
6870 return -ENODEV;
6871 }
6872
6873 switch (cmd) {
6874 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6875 if (data->type != QSEECOM_GENERIC) {
6876 pr_err("reg lstnr req: invalid handle (%d)\n",
6877 data->type);
6878 ret = -EINVAL;
6879 break;
6880 }
6881 pr_debug("ioctl register_listener_req()\n");
6882 mutex_lock(&app_access_lock);
6883 atomic_inc(&data->ioctl_count);
6884 data->type = QSEECOM_LISTENER_SERVICE;
6885 ret = qseecom_register_listener(data, argp);
6886 atomic_dec(&data->ioctl_count);
6887 wake_up_all(&data->abort_wq);
6888 mutex_unlock(&app_access_lock);
6889 if (ret)
6890 pr_err("failed qseecom_register_listener: %d\n", ret);
6891 break;
6892 }
6893 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6894 if ((data->listener.id == 0) ||
6895 (data->type != QSEECOM_LISTENER_SERVICE)) {
6896 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6897 data->type, data->listener.id);
6898 ret = -EINVAL;
6899 break;
6900 }
6901 pr_debug("ioctl unregister_listener_req()\n");
6902 mutex_lock(&app_access_lock);
6903 atomic_inc(&data->ioctl_count);
6904 ret = qseecom_unregister_listener(data);
6905 atomic_dec(&data->ioctl_count);
6906 wake_up_all(&data->abort_wq);
6907 mutex_unlock(&app_access_lock);
6908 if (ret)
6909 pr_err("failed qseecom_unregister_listener: %d\n", ret);
6910 break;
6911 }
6912 case QSEECOM_IOCTL_SEND_CMD_REQ: {
6913 if ((data->client.app_id == 0) ||
6914 (data->type != QSEECOM_CLIENT_APP)) {
6915 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
6916 data->type, data->client.app_id);
6917 ret = -EINVAL;
6918 break;
6919 }
6920 /* Only one client allowed here at a time */
6921 mutex_lock(&app_access_lock);
6922 if (qseecom.support_bus_scaling) {
6923 /* register bus bw in case the client doesn't do it */
6924 if (!data->mode) {
6925 mutex_lock(&qsee_bw_mutex);
6926 __qseecom_register_bus_bandwidth_needs(
6927 data, HIGH);
6928 mutex_unlock(&qsee_bw_mutex);
6929 }
6930 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
6931 if (ret) {
6932 pr_err("Failed to set bw.\n");
6933 ret = -EINVAL;
6934 mutex_unlock(&app_access_lock);
6935 break;
6936 }
6937 }
6938 /*
6939 * On targets where crypto clock is handled by HLOS,
6940 * if clk_access_cnt is zero and perf_enabled is false,
6941 * then the crypto clock was not enabled before sending cmd to
6942 * tz, qseecom will enable the clock to avoid service failure.
6943 */
6944 if (!qseecom.no_clock_support &&
6945 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
6946 pr_debug("ce clock is not enabled!\n");
6947 ret = qseecom_perf_enable(data);
6948 if (ret) {
6949 pr_err("Failed to vote for clock with err %d\n",
6950 ret);
6951 mutex_unlock(&app_access_lock);
6952 ret = -EINVAL;
6953 break;
6954 }
6955 perf_enabled = true;
6956 }
6957 atomic_inc(&data->ioctl_count);
6958 ret = qseecom_send_cmd(data, argp);
6959 if (qseecom.support_bus_scaling)
6960 __qseecom_add_bw_scale_down_timer(
6961 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
6962 if (perf_enabled) {
6963 qsee_disable_clock_vote(data, CLK_DFAB);
6964 qsee_disable_clock_vote(data, CLK_SFPB);
6965 }
6966 atomic_dec(&data->ioctl_count);
6967 wake_up_all(&data->abort_wq);
6968 mutex_unlock(&app_access_lock);
6969 if (ret)
6970 pr_err("failed qseecom_send_cmd: %d\n", ret);
6971 break;
6972 }
6973 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
6974 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
6975 if ((data->client.app_id == 0) ||
6976 (data->type != QSEECOM_CLIENT_APP)) {
6977 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
6978 data->type, data->client.app_id);
6979 ret = -EINVAL;
6980 break;
6981 }
6982 /* Only one client allowed here at a time */
6983 mutex_lock(&app_access_lock);
6984 if (qseecom.support_bus_scaling) {
6985 if (!data->mode) {
6986 mutex_lock(&qsee_bw_mutex);
6987 __qseecom_register_bus_bandwidth_needs(
6988 data, HIGH);
6989 mutex_unlock(&qsee_bw_mutex);
6990 }
6991 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
6992 if (ret) {
6993 pr_err("Failed to set bw.\n");
6994 mutex_unlock(&app_access_lock);
6995 ret = -EINVAL;
6996 break;
6997 }
6998 }
6999 /*
7000 * On targets where crypto clock is handled by HLOS,
7001 * if clk_access_cnt is zero and perf_enabled is false,
7002 * then the crypto clock was not enabled before sending cmd to
7003 * tz, qseecom will enable the clock to avoid service failure.
7004 */
7005 if (!qseecom.no_clock_support &&
7006 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7007 pr_debug("ce clock is not enabled!\n");
7008 ret = qseecom_perf_enable(data);
7009 if (ret) {
7010 pr_err("Failed to vote for clock with err %d\n",
7011 ret);
7012 mutex_unlock(&app_access_lock);
7013 ret = -EINVAL;
7014 break;
7015 }
7016 perf_enabled = true;
7017 }
7018 atomic_inc(&data->ioctl_count);
7019 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7020 ret = qseecom_send_modfd_cmd(data, argp);
7021 else
7022 ret = qseecom_send_modfd_cmd_64(data, argp);
7023 if (qseecom.support_bus_scaling)
7024 __qseecom_add_bw_scale_down_timer(
7025 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7026 if (perf_enabled) {
7027 qsee_disable_clock_vote(data, CLK_DFAB);
7028 qsee_disable_clock_vote(data, CLK_SFPB);
7029 }
7030 atomic_dec(&data->ioctl_count);
7031 wake_up_all(&data->abort_wq);
7032 mutex_unlock(&app_access_lock);
7033 if (ret)
7034 pr_err("failed qseecom_send_cmd: %d\n", ret);
7035 __qseecom_clean_data_sglistinfo(data);
7036 break;
7037 }
7038 case QSEECOM_IOCTL_RECEIVE_REQ: {
7039 if ((data->listener.id == 0) ||
7040 (data->type != QSEECOM_LISTENER_SERVICE)) {
7041 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7042 data->type, data->listener.id);
7043 ret = -EINVAL;
7044 break;
7045 }
7046 atomic_inc(&data->ioctl_count);
7047 ret = qseecom_receive_req(data);
7048 atomic_dec(&data->ioctl_count);
7049 wake_up_all(&data->abort_wq);
7050 if (ret && (ret != -ERESTARTSYS))
7051 pr_err("failed qseecom_receive_req: %d\n", ret);
7052 break;
7053 }
7054 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7055 if ((data->listener.id == 0) ||
7056 (data->type != QSEECOM_LISTENER_SERVICE)) {
7057 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7058 data->type, data->listener.id);
7059 ret = -EINVAL;
7060 break;
7061 }
7062 atomic_inc(&data->ioctl_count);
7063 if (!qseecom.qsee_reentrancy_support)
7064 ret = qseecom_send_resp();
7065 else
7066 ret = qseecom_reentrancy_send_resp(data);
7067 atomic_dec(&data->ioctl_count);
7068 wake_up_all(&data->abort_wq);
7069 if (ret)
7070 pr_err("failed qseecom_send_resp: %d\n", ret);
7071 break;
7072 }
7073 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7074 if ((data->type != QSEECOM_CLIENT_APP) &&
7075 (data->type != QSEECOM_GENERIC) &&
7076 (data->type != QSEECOM_SECURE_SERVICE)) {
7077 pr_err("set mem param req: invalid handle (%d)\n",
7078 data->type);
7079 ret = -EINVAL;
7080 break;
7081 }
7082 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7083 mutex_lock(&app_access_lock);
7084 atomic_inc(&data->ioctl_count);
7085 ret = qseecom_set_client_mem_param(data, argp);
7086 atomic_dec(&data->ioctl_count);
7087 mutex_unlock(&app_access_lock);
7088 if (ret)
7089 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7090 ret);
7091 break;
7092 }
7093 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7094 if ((data->type != QSEECOM_GENERIC) &&
7095 (data->type != QSEECOM_CLIENT_APP)) {
7096 pr_err("load app req: invalid handle (%d)\n",
7097 data->type);
7098 ret = -EINVAL;
7099 break;
7100 }
7101 data->type = QSEECOM_CLIENT_APP;
7102 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7103 mutex_lock(&app_access_lock);
7104 atomic_inc(&data->ioctl_count);
7105 ret = qseecom_load_app(data, argp);
7106 atomic_dec(&data->ioctl_count);
7107 mutex_unlock(&app_access_lock);
7108 if (ret)
7109 pr_err("failed load_app request: %d\n", ret);
7110 break;
7111 }
7112 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7113 if ((data->client.app_id == 0) ||
7114 (data->type != QSEECOM_CLIENT_APP)) {
7115 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7116 data->type, data->client.app_id);
7117 ret = -EINVAL;
7118 break;
7119 }
7120 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7121 mutex_lock(&app_access_lock);
7122 atomic_inc(&data->ioctl_count);
7123 ret = qseecom_unload_app(data, false);
7124 atomic_dec(&data->ioctl_count);
7125 mutex_unlock(&app_access_lock);
7126 if (ret)
7127 pr_err("failed unload_app request: %d\n", ret);
7128 break;
7129 }
7130 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7131 atomic_inc(&data->ioctl_count);
7132 ret = qseecom_get_qseos_version(data, argp);
7133 if (ret)
7134 pr_err("qseecom_get_qseos_version: %d\n", ret);
7135 atomic_dec(&data->ioctl_count);
7136 break;
7137 }
7138 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7139 if ((data->type != QSEECOM_GENERIC) &&
7140 (data->type != QSEECOM_CLIENT_APP)) {
7141 pr_err("perf enable req: invalid handle (%d)\n",
7142 data->type);
7143 ret = -EINVAL;
7144 break;
7145 }
7146 if ((data->type == QSEECOM_CLIENT_APP) &&
7147 (data->client.app_id == 0)) {
7148 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7149 data->type, data->client.app_id);
7150 ret = -EINVAL;
7151 break;
7152 }
7153 atomic_inc(&data->ioctl_count);
7154 if (qseecom.support_bus_scaling) {
7155 mutex_lock(&qsee_bw_mutex);
7156 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7157 mutex_unlock(&qsee_bw_mutex);
7158 } else {
7159 ret = qseecom_perf_enable(data);
7160 if (ret)
7161 pr_err("Fail to vote for clocks %d\n", ret);
7162 }
7163 atomic_dec(&data->ioctl_count);
7164 break;
7165 }
7166 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7167 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7168 (data->type != QSEECOM_CLIENT_APP)) {
7169 pr_err("perf disable req: invalid handle (%d)\n",
7170 data->type);
7171 ret = -EINVAL;
7172 break;
7173 }
7174 if ((data->type == QSEECOM_CLIENT_APP) &&
7175 (data->client.app_id == 0)) {
7176 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7177 data->type, data->client.app_id);
7178 ret = -EINVAL;
7179 break;
7180 }
7181 atomic_inc(&data->ioctl_count);
7182 if (!qseecom.support_bus_scaling) {
7183 qsee_disable_clock_vote(data, CLK_DFAB);
7184 qsee_disable_clock_vote(data, CLK_SFPB);
7185 } else {
7186 mutex_lock(&qsee_bw_mutex);
7187 qseecom_unregister_bus_bandwidth_needs(data);
7188 mutex_unlock(&qsee_bw_mutex);
7189 }
7190 atomic_dec(&data->ioctl_count);
7191 break;
7192 }
7193
7194 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7195 /* If crypto clock is not handled by HLOS, return directly. */
7196 if (qseecom.no_clock_support) {
7197 pr_debug("crypto clock is not handled by HLOS\n");
7198 break;
7199 }
7200 if ((data->client.app_id == 0) ||
7201 (data->type != QSEECOM_CLIENT_APP)) {
7202 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7203 data->type, data->client.app_id);
7204 ret = -EINVAL;
7205 break;
7206 }
7207 atomic_inc(&data->ioctl_count);
7208 ret = qseecom_scale_bus_bandwidth(data, argp);
7209 atomic_dec(&data->ioctl_count);
7210 break;
7211 }
7212 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7213 if (data->type != QSEECOM_GENERIC) {
7214 pr_err("load ext elf req: invalid client handle (%d)\n",
7215 data->type);
7216 ret = -EINVAL;
7217 break;
7218 }
7219 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7220 data->released = true;
7221 mutex_lock(&app_access_lock);
7222 atomic_inc(&data->ioctl_count);
7223 ret = qseecom_load_external_elf(data, argp);
7224 atomic_dec(&data->ioctl_count);
7225 mutex_unlock(&app_access_lock);
7226 if (ret)
7227 pr_err("failed load_external_elf request: %d\n", ret);
7228 break;
7229 }
7230 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7231 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7232 pr_err("unload ext elf req: invalid handle (%d)\n",
7233 data->type);
7234 ret = -EINVAL;
7235 break;
7236 }
7237 data->released = true;
7238 mutex_lock(&app_access_lock);
7239 atomic_inc(&data->ioctl_count);
7240 ret = qseecom_unload_external_elf(data);
7241 atomic_dec(&data->ioctl_count);
7242 mutex_unlock(&app_access_lock);
7243 if (ret)
7244 pr_err("failed unload_app request: %d\n", ret);
7245 break;
7246 }
7247 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7248 data->type = QSEECOM_CLIENT_APP;
7249 mutex_lock(&app_access_lock);
7250 atomic_inc(&data->ioctl_count);
7251 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7252 ret = qseecom_query_app_loaded(data, argp);
7253 atomic_dec(&data->ioctl_count);
7254 mutex_unlock(&app_access_lock);
7255 break;
7256 }
7257 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7258 if (data->type != QSEECOM_GENERIC) {
7259 pr_err("send cmd svc req: invalid handle (%d)\n",
7260 data->type);
7261 ret = -EINVAL;
7262 break;
7263 }
7264 data->type = QSEECOM_SECURE_SERVICE;
7265 if (qseecom.qsee_version < QSEE_VERSION_03) {
7266 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7267 qseecom.qsee_version);
7268 return -EINVAL;
7269 }
7270 mutex_lock(&app_access_lock);
7271 atomic_inc(&data->ioctl_count);
7272 ret = qseecom_send_service_cmd(data, argp);
7273 atomic_dec(&data->ioctl_count);
7274 mutex_unlock(&app_access_lock);
7275 break;
7276 }
7277 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7278 if (!(qseecom.support_pfe || qseecom.support_fde))
7279 pr_err("Features requiring key init not supported\n");
7280 if (data->type != QSEECOM_GENERIC) {
7281 pr_err("create key req: invalid handle (%d)\n",
7282 data->type);
7283 ret = -EINVAL;
7284 break;
7285 }
7286 if (qseecom.qsee_version < QSEE_VERSION_05) {
7287 pr_err("Create Key feature unsupported: qsee ver %u\n",
7288 qseecom.qsee_version);
7289 return -EINVAL;
7290 }
7291 data->released = true;
7292 mutex_lock(&app_access_lock);
7293 atomic_inc(&data->ioctl_count);
7294 ret = qseecom_create_key(data, argp);
7295 if (ret)
7296 pr_err("failed to create encryption key: %d\n", ret);
7297
7298 atomic_dec(&data->ioctl_count);
7299 mutex_unlock(&app_access_lock);
7300 break;
7301 }
7302 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7303 if (!(qseecom.support_pfe || qseecom.support_fde))
7304 pr_err("Features requiring key init not supported\n");
7305 if (data->type != QSEECOM_GENERIC) {
7306 pr_err("wipe key req: invalid handle (%d)\n",
7307 data->type);
7308 ret = -EINVAL;
7309 break;
7310 }
7311 if (qseecom.qsee_version < QSEE_VERSION_05) {
7312 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7313 qseecom.qsee_version);
7314 return -EINVAL;
7315 }
7316 data->released = true;
7317 mutex_lock(&app_access_lock);
7318 atomic_inc(&data->ioctl_count);
7319 ret = qseecom_wipe_key(data, argp);
7320 if (ret)
7321 pr_err("failed to wipe encryption key: %d\n", ret);
7322 atomic_dec(&data->ioctl_count);
7323 mutex_unlock(&app_access_lock);
7324 break;
7325 }
7326 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7327 if (!(qseecom.support_pfe || qseecom.support_fde))
7328 pr_err("Features requiring key init not supported\n");
7329 if (data->type != QSEECOM_GENERIC) {
7330 pr_err("update key req: invalid handle (%d)\n",
7331 data->type);
7332 ret = -EINVAL;
7333 break;
7334 }
7335 if (qseecom.qsee_version < QSEE_VERSION_05) {
7336 pr_err("Update Key feature unsupported in qsee ver %u\n",
7337 qseecom.qsee_version);
7338 return -EINVAL;
7339 }
7340 data->released = true;
7341 mutex_lock(&app_access_lock);
7342 atomic_inc(&data->ioctl_count);
7343 ret = qseecom_update_key_user_info(data, argp);
7344 if (ret)
7345 pr_err("failed to update key user info: %d\n", ret);
7346 atomic_dec(&data->ioctl_count);
7347 mutex_unlock(&app_access_lock);
7348 break;
7349 }
7350 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7351 if (data->type != QSEECOM_GENERIC) {
7352 pr_err("save part hash req: invalid handle (%d)\n",
7353 data->type);
7354 ret = -EINVAL;
7355 break;
7356 }
7357 data->released = true;
7358 mutex_lock(&app_access_lock);
7359 atomic_inc(&data->ioctl_count);
7360 ret = qseecom_save_partition_hash(argp);
7361 atomic_dec(&data->ioctl_count);
7362 mutex_unlock(&app_access_lock);
7363 break;
7364 }
7365 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7366 if (data->type != QSEECOM_GENERIC) {
7367 pr_err("ES activated req: invalid handle (%d)\n",
7368 data->type);
7369 ret = -EINVAL;
7370 break;
7371 }
7372 data->released = true;
7373 mutex_lock(&app_access_lock);
7374 atomic_inc(&data->ioctl_count);
7375 ret = qseecom_is_es_activated(argp);
7376 atomic_dec(&data->ioctl_count);
7377 mutex_unlock(&app_access_lock);
7378 break;
7379 }
7380 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7381 if (data->type != QSEECOM_GENERIC) {
7382 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7383 data->type);
7384 ret = -EINVAL;
7385 break;
7386 }
7387 data->released = true;
7388 mutex_lock(&app_access_lock);
7389 atomic_inc(&data->ioctl_count);
7390 ret = qseecom_mdtp_cipher_dip(argp);
7391 atomic_dec(&data->ioctl_count);
7392 mutex_unlock(&app_access_lock);
7393 break;
7394 }
7395 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7396 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7397 if ((data->listener.id == 0) ||
7398 (data->type != QSEECOM_LISTENER_SERVICE)) {
7399 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7400 data->type, data->listener.id);
7401 ret = -EINVAL;
7402 break;
7403 }
7404 atomic_inc(&data->ioctl_count);
7405 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7406 ret = qseecom_send_modfd_resp(data, argp);
7407 else
7408 ret = qseecom_send_modfd_resp_64(data, argp);
7409 atomic_dec(&data->ioctl_count);
7410 wake_up_all(&data->abort_wq);
7411 if (ret)
7412 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7413 __qseecom_clean_data_sglistinfo(data);
7414 break;
7415 }
7416 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7417 if ((data->client.app_id == 0) ||
7418 (data->type != QSEECOM_CLIENT_APP)) {
7419 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7420 data->type, data->client.app_id);
7421 ret = -EINVAL;
7422 break;
7423 }
7424 if (qseecom.qsee_version < QSEE_VERSION_40) {
7425 pr_err("GP feature unsupported: qsee ver %u\n",
7426 qseecom.qsee_version);
7427 return -EINVAL;
7428 }
7429 /* Only one client allowed here at a time */
7430 mutex_lock(&app_access_lock);
7431 atomic_inc(&data->ioctl_count);
7432 ret = qseecom_qteec_open_session(data, argp);
7433 atomic_dec(&data->ioctl_count);
7434 wake_up_all(&data->abort_wq);
7435 mutex_unlock(&app_access_lock);
7436 if (ret)
7437 pr_err("failed open_session_cmd: %d\n", ret);
7438 __qseecom_clean_data_sglistinfo(data);
7439 break;
7440 }
7441 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7442 if ((data->client.app_id == 0) ||
7443 (data->type != QSEECOM_CLIENT_APP)) {
7444 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7445 data->type, data->client.app_id);
7446 ret = -EINVAL;
7447 break;
7448 }
7449 if (qseecom.qsee_version < QSEE_VERSION_40) {
7450 pr_err("GP feature unsupported: qsee ver %u\n",
7451 qseecom.qsee_version);
7452 return -EINVAL;
7453 }
7454 /* Only one client allowed here at a time */
7455 mutex_lock(&app_access_lock);
7456 atomic_inc(&data->ioctl_count);
7457 ret = qseecom_qteec_close_session(data, argp);
7458 atomic_dec(&data->ioctl_count);
7459 wake_up_all(&data->abort_wq);
7460 mutex_unlock(&app_access_lock);
7461 if (ret)
7462 pr_err("failed close_session_cmd: %d\n", ret);
7463 break;
7464 }
7465 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7466 if ((data->client.app_id == 0) ||
7467 (data->type != QSEECOM_CLIENT_APP)) {
7468 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7469 data->type, data->client.app_id);
7470 ret = -EINVAL;
7471 break;
7472 }
7473 if (qseecom.qsee_version < QSEE_VERSION_40) {
7474 pr_err("GP feature unsupported: qsee ver %u\n",
7475 qseecom.qsee_version);
7476 return -EINVAL;
7477 }
7478 /* Only one client allowed here at a time */
7479 mutex_lock(&app_access_lock);
7480 atomic_inc(&data->ioctl_count);
7481 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7482 atomic_dec(&data->ioctl_count);
7483 wake_up_all(&data->abort_wq);
7484 mutex_unlock(&app_access_lock);
7485 if (ret)
7486 pr_err("failed Invoke cmd: %d\n", ret);
7487 __qseecom_clean_data_sglistinfo(data);
7488 break;
7489 }
7490 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7491 if ((data->client.app_id == 0) ||
7492 (data->type != QSEECOM_CLIENT_APP)) {
7493 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7494 data->type, data->client.app_id);
7495 ret = -EINVAL;
7496 break;
7497 }
7498 if (qseecom.qsee_version < QSEE_VERSION_40) {
7499 pr_err("GP feature unsupported: qsee ver %u\n",
7500 qseecom.qsee_version);
7501 return -EINVAL;
7502 }
7503 /* Only one client allowed here at a time */
7504 mutex_lock(&app_access_lock);
7505 atomic_inc(&data->ioctl_count);
7506 ret = qseecom_qteec_request_cancellation(data, argp);
7507 atomic_dec(&data->ioctl_count);
7508 wake_up_all(&data->abort_wq);
7509 mutex_unlock(&app_access_lock);
7510 if (ret)
7511 pr_err("failed request_cancellation: %d\n", ret);
7512 break;
7513 }
7514 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7515 atomic_inc(&data->ioctl_count);
7516 ret = qseecom_get_ce_info(data, argp);
7517 if (ret)
7518 pr_err("failed get fde ce pipe info: %d\n", ret);
7519 atomic_dec(&data->ioctl_count);
7520 break;
7521 }
7522 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7523 atomic_inc(&data->ioctl_count);
7524 ret = qseecom_free_ce_info(data, argp);
7525 if (ret)
7526 pr_err("failed get fde ce pipe info: %d\n", ret);
7527 atomic_dec(&data->ioctl_count);
7528 break;
7529 }
7530 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7531 atomic_inc(&data->ioctl_count);
7532 ret = qseecom_query_ce_info(data, argp);
7533 if (ret)
7534 pr_err("failed get fde ce pipe info: %d\n", ret);
7535 atomic_dec(&data->ioctl_count);
7536 break;
7537 }
7538 default:
7539 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7540 return -EINVAL;
7541 }
7542 return ret;
7543}
7544
7545static int qseecom_open(struct inode *inode, struct file *file)
7546{
7547 int ret = 0;
7548 struct qseecom_dev_handle *data;
7549
7550 data = kzalloc(sizeof(*data), GFP_KERNEL);
7551 if (!data)
7552 return -ENOMEM;
7553 file->private_data = data;
7554 data->abort = 0;
7555 data->type = QSEECOM_GENERIC;
7556 data->released = false;
7557 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7558 data->mode = INACTIVE;
7559 init_waitqueue_head(&data->abort_wq);
7560 atomic_set(&data->ioctl_count, 0);
7561 return ret;
7562}
7563
7564static int qseecom_release(struct inode *inode, struct file *file)
7565{
7566 struct qseecom_dev_handle *data = file->private_data;
7567 int ret = 0;
7568
7569 if (data->released == false) {
7570 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7571 data->type, data->mode, data);
7572 switch (data->type) {
7573 case QSEECOM_LISTENER_SERVICE:
7574 mutex_lock(&app_access_lock);
7575 ret = qseecom_unregister_listener(data);
7576 mutex_unlock(&app_access_lock);
7577 break;
7578 case QSEECOM_CLIENT_APP:
7579 mutex_lock(&app_access_lock);
7580 ret = qseecom_unload_app(data, true);
7581 mutex_unlock(&app_access_lock);
7582 break;
7583 case QSEECOM_SECURE_SERVICE:
7584 case QSEECOM_GENERIC:
7585 ret = qseecom_unmap_ion_allocated_memory(data);
7586 if (ret)
7587 pr_err("Ion Unmap failed\n");
7588 break;
7589 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7590 break;
7591 default:
7592 pr_err("Unsupported clnt_handle_type %d",
7593 data->type);
7594 break;
7595 }
7596 }
7597
7598 if (qseecom.support_bus_scaling) {
7599 mutex_lock(&qsee_bw_mutex);
7600 if (data->mode != INACTIVE) {
7601 qseecom_unregister_bus_bandwidth_needs(data);
7602 if (qseecom.cumulative_mode == INACTIVE) {
7603 ret = __qseecom_set_msm_bus_request(INACTIVE);
7604 if (ret)
7605 pr_err("Fail to scale down bus\n");
7606 }
7607 }
7608 mutex_unlock(&qsee_bw_mutex);
7609 } else {
7610 if (data->fast_load_enabled == true)
7611 qsee_disable_clock_vote(data, CLK_SFPB);
7612 if (data->perf_enabled == true)
7613 qsee_disable_clock_vote(data, CLK_DFAB);
7614 }
7615 kfree(data);
7616
7617 return ret;
7618}
7619
7620#ifdef CONFIG_COMPAT
7621#include "compat_qseecom.c"
7622#else
7623#define compat_qseecom_ioctl NULL
7624#endif
7625
7626static const struct file_operations qseecom_fops = {
7627 .owner = THIS_MODULE,
7628 .unlocked_ioctl = qseecom_ioctl,
7629 .compat_ioctl = compat_qseecom_ioctl,
7630 .open = qseecom_open,
7631 .release = qseecom_release
7632};
7633
7634static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7635{
7636 int rc = 0;
7637 struct device *pdev;
7638 struct qseecom_clk *qclk;
7639 char *core_clk_src = NULL;
7640 char *core_clk = NULL;
7641 char *iface_clk = NULL;
7642 char *bus_clk = NULL;
7643
7644 switch (ce) {
7645 case CLK_QSEE: {
7646 core_clk_src = "core_clk_src";
7647 core_clk = "core_clk";
7648 iface_clk = "iface_clk";
7649 bus_clk = "bus_clk";
7650 qclk = &qseecom.qsee;
7651 qclk->instance = CLK_QSEE;
7652 break;
7653 };
7654 case CLK_CE_DRV: {
7655 core_clk_src = "ce_drv_core_clk_src";
7656 core_clk = "ce_drv_core_clk";
7657 iface_clk = "ce_drv_iface_clk";
7658 bus_clk = "ce_drv_bus_clk";
7659 qclk = &qseecom.ce_drv;
7660 qclk->instance = CLK_CE_DRV;
7661 break;
7662 };
7663 default:
7664 pr_err("Invalid ce hw instance: %d!\n", ce);
7665 return -EIO;
7666 }
7667
7668 if (qseecom.no_clock_support) {
7669 qclk->ce_core_clk = NULL;
7670 qclk->ce_clk = NULL;
7671 qclk->ce_bus_clk = NULL;
7672 qclk->ce_core_src_clk = NULL;
7673 return 0;
7674 }
7675
7676 pdev = qseecom.pdev;
7677
7678 /* Get CE3 src core clk. */
7679 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7680 if (!IS_ERR(qclk->ce_core_src_clk)) {
7681 rc = clk_set_rate(qclk->ce_core_src_clk,
7682 qseecom.ce_opp_freq_hz);
7683 if (rc) {
7684 clk_put(qclk->ce_core_src_clk);
7685 qclk->ce_core_src_clk = NULL;
7686 pr_err("Unable to set the core src clk @%uMhz.\n",
7687 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7688 return -EIO;
7689 }
7690 } else {
7691 pr_warn("Unable to get CE core src clk, set to NULL\n");
7692 qclk->ce_core_src_clk = NULL;
7693 }
7694
7695 /* Get CE core clk */
7696 qclk->ce_core_clk = clk_get(pdev, core_clk);
7697 if (IS_ERR(qclk->ce_core_clk)) {
7698 rc = PTR_ERR(qclk->ce_core_clk);
7699 pr_err("Unable to get CE core clk\n");
7700 if (qclk->ce_core_src_clk != NULL)
7701 clk_put(qclk->ce_core_src_clk);
7702 return -EIO;
7703 }
7704
7705 /* Get CE Interface clk */
7706 qclk->ce_clk = clk_get(pdev, iface_clk);
7707 if (IS_ERR(qclk->ce_clk)) {
7708 rc = PTR_ERR(qclk->ce_clk);
7709 pr_err("Unable to get CE interface clk\n");
7710 if (qclk->ce_core_src_clk != NULL)
7711 clk_put(qclk->ce_core_src_clk);
7712 clk_put(qclk->ce_core_clk);
7713 return -EIO;
7714 }
7715
7716 /* Get CE AXI clk */
7717 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7718 if (IS_ERR(qclk->ce_bus_clk)) {
7719 rc = PTR_ERR(qclk->ce_bus_clk);
7720 pr_err("Unable to get CE BUS interface clk\n");
7721 if (qclk->ce_core_src_clk != NULL)
7722 clk_put(qclk->ce_core_src_clk);
7723 clk_put(qclk->ce_core_clk);
7724 clk_put(qclk->ce_clk);
7725 return -EIO;
7726 }
7727
7728 return rc;
7729}
7730
7731static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7732{
7733 struct qseecom_clk *qclk;
7734
7735 if (ce == CLK_QSEE)
7736 qclk = &qseecom.qsee;
7737 else
7738 qclk = &qseecom.ce_drv;
7739
7740 if (qclk->ce_clk != NULL) {
7741 clk_put(qclk->ce_clk);
7742 qclk->ce_clk = NULL;
7743 }
7744 if (qclk->ce_core_clk != NULL) {
7745 clk_put(qclk->ce_core_clk);
7746 qclk->ce_core_clk = NULL;
7747 }
7748 if (qclk->ce_bus_clk != NULL) {
7749 clk_put(qclk->ce_bus_clk);
7750 qclk->ce_bus_clk = NULL;
7751 }
7752 if (qclk->ce_core_src_clk != NULL) {
7753 clk_put(qclk->ce_core_src_clk);
7754 qclk->ce_core_src_clk = NULL;
7755 }
7756 qclk->instance = CLK_INVALID;
7757}
7758
7759static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7760{
7761 int rc = 0;
7762 uint32_t hlos_num_ce_hw_instances;
7763 uint32_t disk_encrypt_pipe;
7764 uint32_t file_encrypt_pipe;
7765 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
7766 int i;
7767 const int *tbl;
7768 int size;
7769 int entry;
7770 struct qseecom_crypto_info *pfde_tbl = NULL;
7771 struct qseecom_crypto_info *p;
7772 int tbl_size;
7773 int j;
7774 bool old_db = true;
7775 struct qseecom_ce_info_use *pce_info_use;
7776 uint32_t *unit_tbl = NULL;
7777 int total_units = 0;
7778 struct qseecom_ce_pipe_entry *pce_entry;
7779
7780 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7781 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7782
7783 if (of_property_read_u32((&pdev->dev)->of_node,
7784 "qcom,qsee-ce-hw-instance",
7785 &qseecom.ce_info.qsee_ce_hw_instance)) {
7786 pr_err("Fail to get qsee ce hw instance information.\n");
7787 rc = -EINVAL;
7788 goto out;
7789 } else {
7790 pr_debug("qsee-ce-hw-instance=0x%x\n",
7791 qseecom.ce_info.qsee_ce_hw_instance);
7792 }
7793
7794 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7795 "qcom,support-fde");
7796 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7797 "qcom,support-pfe");
7798
7799 if (!qseecom.support_pfe && !qseecom.support_fde) {
7800 pr_warn("Device does not support PFE/FDE");
7801 goto out;
7802 }
7803
7804 if (qseecom.support_fde)
7805 tbl = of_get_property((&pdev->dev)->of_node,
7806 "qcom,full-disk-encrypt-info", &size);
7807 else
7808 tbl = NULL;
7809 if (tbl) {
7810 old_db = false;
7811 if (size % sizeof(struct qseecom_crypto_info)) {
7812 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7813 size);
7814 rc = -EINVAL;
7815 goto out;
7816 }
7817 tbl_size = size / sizeof
7818 (struct qseecom_crypto_info);
7819
7820 pfde_tbl = kzalloc(size, GFP_KERNEL);
7821 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7822 total_units = 0;
7823
7824 if (!pfde_tbl || !unit_tbl) {
7825 pr_err("failed to alloc memory\n");
7826 rc = -ENOMEM;
7827 goto out;
7828 }
7829 if (of_property_read_u32_array((&pdev->dev)->of_node,
7830 "qcom,full-disk-encrypt-info",
7831 (u32 *)pfde_tbl, size/sizeof(u32))) {
7832 pr_err("failed to read full-disk-encrypt-info tbl\n");
7833 rc = -EINVAL;
7834 goto out;
7835 }
7836
7837 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7838 for (j = 0; j < total_units; j++) {
7839 if (p->unit_num == *(unit_tbl + j))
7840 break;
7841 }
7842 if (j == total_units) {
7843 *(unit_tbl + total_units) = p->unit_num;
7844 total_units++;
7845 }
7846 }
7847
7848 qseecom.ce_info.num_fde = total_units;
7849 pce_info_use = qseecom.ce_info.fde = kcalloc(
7850 total_units, sizeof(struct qseecom_ce_info_use),
7851 GFP_KERNEL);
7852 if (!pce_info_use) {
7853 pr_err("failed to alloc memory\n");
7854 rc = -ENOMEM;
7855 goto out;
7856 }
7857
7858 for (j = 0; j < total_units; j++, pce_info_use++) {
7859 pce_info_use->unit_num = *(unit_tbl + j);
7860 pce_info_use->alloc = false;
7861 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7862 pce_info_use->num_ce_pipe_entries = 0;
7863 pce_info_use->ce_pipe_entry = NULL;
7864 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7865 if (p->unit_num == pce_info_use->unit_num)
7866 pce_info_use->num_ce_pipe_entries++;
7867 }
7868
7869 entry = pce_info_use->num_ce_pipe_entries;
7870 pce_entry = pce_info_use->ce_pipe_entry =
7871 kcalloc(entry,
7872 sizeof(struct qseecom_ce_pipe_entry),
7873 GFP_KERNEL);
7874 if (pce_entry == NULL) {
7875 pr_err("failed to alloc memory\n");
7876 rc = -ENOMEM;
7877 goto out;
7878 }
7879
7880 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7881 if (p->unit_num == pce_info_use->unit_num) {
7882 pce_entry->ce_num = p->ce;
7883 pce_entry->ce_pipe_pair =
7884 p->pipe_pair;
7885 pce_entry->valid = true;
7886 pce_entry++;
7887 }
7888 }
7889 }
7890 kfree(unit_tbl);
7891 unit_tbl = NULL;
7892 kfree(pfde_tbl);
7893 pfde_tbl = NULL;
7894 }
7895
7896 if (qseecom.support_pfe)
7897 tbl = of_get_property((&pdev->dev)->of_node,
7898 "qcom,per-file-encrypt-info", &size);
7899 else
7900 tbl = NULL;
7901 if (tbl) {
7902 old_db = false;
7903 if (size % sizeof(struct qseecom_crypto_info)) {
7904 pr_err("per-file-encrypt-info tbl size(%d)\n",
7905 size);
7906 rc = -EINVAL;
7907 goto out;
7908 }
7909 tbl_size = size / sizeof
7910 (struct qseecom_crypto_info);
7911
7912 pfde_tbl = kzalloc(size, GFP_KERNEL);
7913 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7914 total_units = 0;
7915 if (!pfde_tbl || !unit_tbl) {
7916 pr_err("failed to alloc memory\n");
7917 rc = -ENOMEM;
7918 goto out;
7919 }
7920 if (of_property_read_u32_array((&pdev->dev)->of_node,
7921 "qcom,per-file-encrypt-info",
7922 (u32 *)pfde_tbl, size/sizeof(u32))) {
7923 pr_err("failed to read per-file-encrypt-info tbl\n");
7924 rc = -EINVAL;
7925 goto out;
7926 }
7927
7928 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7929 for (j = 0; j < total_units; j++) {
7930 if (p->unit_num == *(unit_tbl + j))
7931 break;
7932 }
7933 if (j == total_units) {
7934 *(unit_tbl + total_units) = p->unit_num;
7935 total_units++;
7936 }
7937 }
7938
7939 qseecom.ce_info.num_pfe = total_units;
7940 pce_info_use = qseecom.ce_info.pfe = kcalloc(
7941 total_units, sizeof(struct qseecom_ce_info_use),
7942 GFP_KERNEL);
7943 if (!pce_info_use) {
7944 pr_err("failed to alloc memory\n");
7945 rc = -ENOMEM;
7946 goto out;
7947 }
7948
7949 for (j = 0; j < total_units; j++, pce_info_use++) {
7950 pce_info_use->unit_num = *(unit_tbl + j);
7951 pce_info_use->alloc = false;
7952 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
7953 pce_info_use->num_ce_pipe_entries = 0;
7954 pce_info_use->ce_pipe_entry = NULL;
7955 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7956 if (p->unit_num == pce_info_use->unit_num)
7957 pce_info_use->num_ce_pipe_entries++;
7958 }
7959
7960 entry = pce_info_use->num_ce_pipe_entries;
7961 pce_entry = pce_info_use->ce_pipe_entry =
7962 kcalloc(entry,
7963 sizeof(struct qseecom_ce_pipe_entry),
7964 GFP_KERNEL);
7965 if (pce_entry == NULL) {
7966 pr_err("failed to alloc memory\n");
7967 rc = -ENOMEM;
7968 goto out;
7969 }
7970
7971 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7972 if (p->unit_num == pce_info_use->unit_num) {
7973 pce_entry->ce_num = p->ce;
7974 pce_entry->ce_pipe_pair =
7975 p->pipe_pair;
7976 pce_entry->valid = true;
7977 pce_entry++;
7978 }
7979 }
7980 }
7981 kfree(unit_tbl);
7982 unit_tbl = NULL;
7983 kfree(pfde_tbl);
7984 pfde_tbl = NULL;
7985 }
7986
7987 if (!old_db)
7988 goto out1;
7989
7990 if (of_property_read_bool((&pdev->dev)->of_node,
7991 "qcom,support-multiple-ce-hw-instance")) {
7992 if (of_property_read_u32((&pdev->dev)->of_node,
7993 "qcom,hlos-num-ce-hw-instances",
7994 &hlos_num_ce_hw_instances)) {
7995 pr_err("Fail: get hlos number of ce hw instance\n");
7996 rc = -EINVAL;
7997 goto out;
7998 }
7999 } else {
8000 hlos_num_ce_hw_instances = 1;
8001 }
8002
8003 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8004 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8005 MAX_CE_PIPE_PAIR_PER_UNIT);
8006 rc = -EINVAL;
8007 goto out;
8008 }
8009
8010 if (of_property_read_u32_array((&pdev->dev)->of_node,
8011 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8012 hlos_num_ce_hw_instances)) {
8013 pr_err("Fail: get hlos ce hw instance info\n");
8014 rc = -EINVAL;
8015 goto out;
8016 }
8017
8018 if (qseecom.support_fde) {
8019 pce_info_use = qseecom.ce_info.fde =
8020 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8021 if (!pce_info_use) {
8022 pr_err("failed to alloc memory\n");
8023 rc = -ENOMEM;
8024 goto out;
8025 }
8026 /* by default for old db */
8027 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8028 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8029 pce_info_use->alloc = false;
8030 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8031 pce_info_use->ce_pipe_entry = NULL;
8032 if (of_property_read_u32((&pdev->dev)->of_node,
8033 "qcom,disk-encrypt-pipe-pair",
8034 &disk_encrypt_pipe)) {
8035 pr_err("Fail to get FDE pipe information.\n");
8036 rc = -EINVAL;
8037 goto out;
8038 } else {
8039 pr_debug("disk-encrypt-pipe-pair=0x%x",
8040 disk_encrypt_pipe);
8041 }
8042 entry = pce_info_use->num_ce_pipe_entries =
8043 hlos_num_ce_hw_instances;
8044 pce_entry = pce_info_use->ce_pipe_entry =
8045 kcalloc(entry,
8046 sizeof(struct qseecom_ce_pipe_entry),
8047 GFP_KERNEL);
8048 if (pce_entry == NULL) {
8049 pr_err("failed to alloc memory\n");
8050 rc = -ENOMEM;
8051 goto out;
8052 }
8053 for (i = 0; i < entry; i++) {
8054 pce_entry->ce_num = hlos_ce_hw_instance[i];
8055 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8056 pce_entry->valid = 1;
8057 pce_entry++;
8058 }
8059 } else {
8060 pr_warn("Device does not support FDE");
8061 disk_encrypt_pipe = 0xff;
8062 }
8063 if (qseecom.support_pfe) {
8064 pce_info_use = qseecom.ce_info.pfe =
8065 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8066 if (!pce_info_use) {
8067 pr_err("failed to alloc memory\n");
8068 rc = -ENOMEM;
8069 goto out;
8070 }
8071 /* by default for old db */
8072 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8073 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8074 pce_info_use->alloc = false;
8075 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8076 pce_info_use->ce_pipe_entry = NULL;
8077
8078 if (of_property_read_u32((&pdev->dev)->of_node,
8079 "qcom,file-encrypt-pipe-pair",
8080 &file_encrypt_pipe)) {
8081 pr_err("Fail to get PFE pipe information.\n");
8082 rc = -EINVAL;
8083 goto out;
8084 } else {
8085 pr_debug("file-encrypt-pipe-pair=0x%x",
8086 file_encrypt_pipe);
8087 }
8088 entry = pce_info_use->num_ce_pipe_entries =
8089 hlos_num_ce_hw_instances;
8090 pce_entry = pce_info_use->ce_pipe_entry =
8091 kcalloc(entry,
8092 sizeof(struct qseecom_ce_pipe_entry),
8093 GFP_KERNEL);
8094 if (pce_entry == NULL) {
8095 pr_err("failed to alloc memory\n");
8096 rc = -ENOMEM;
8097 goto out;
8098 }
8099 for (i = 0; i < entry; i++) {
8100 pce_entry->ce_num = hlos_ce_hw_instance[i];
8101 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8102 pce_entry->valid = 1;
8103 pce_entry++;
8104 }
8105 } else {
8106 pr_warn("Device does not support PFE");
8107 file_encrypt_pipe = 0xff;
8108 }
8109
8110out1:
8111 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8112 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8113out:
8114 if (rc) {
8115 if (qseecom.ce_info.fde) {
8116 pce_info_use = qseecom.ce_info.fde;
8117 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8118 pce_entry = pce_info_use->ce_pipe_entry;
8119 kfree(pce_entry);
8120 pce_info_use++;
8121 }
8122 }
8123 kfree(qseecom.ce_info.fde);
8124 qseecom.ce_info.fde = NULL;
8125 if (qseecom.ce_info.pfe) {
8126 pce_info_use = qseecom.ce_info.pfe;
8127 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8128 pce_entry = pce_info_use->ce_pipe_entry;
8129 kfree(pce_entry);
8130 pce_info_use++;
8131 }
8132 }
8133 kfree(qseecom.ce_info.pfe);
8134 qseecom.ce_info.pfe = NULL;
8135 }
8136 kfree(unit_tbl);
8137 kfree(pfde_tbl);
8138 return rc;
8139}
8140
8141static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8142 void __user *argp)
8143{
8144 struct qseecom_ce_info_req req;
8145 struct qseecom_ce_info_req *pinfo = &req;
8146 int ret = 0;
8147 int i;
8148 unsigned int entries;
8149 struct qseecom_ce_info_use *pce_info_use, *p;
8150 int total = 0;
8151 bool found = false;
8152 struct qseecom_ce_pipe_entry *pce_entry;
8153
8154 ret = copy_from_user(pinfo, argp,
8155 sizeof(struct qseecom_ce_info_req));
8156 if (ret) {
8157 pr_err("copy_from_user failed\n");
8158 return ret;
8159 }
8160
8161 switch (pinfo->usage) {
8162 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8163 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8164 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8165 if (qseecom.support_fde) {
8166 p = qseecom.ce_info.fde;
8167 total = qseecom.ce_info.num_fde;
8168 } else {
8169 pr_err("system does not support fde\n");
8170 return -EINVAL;
8171 }
8172 break;
8173 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8174 if (qseecom.support_pfe) {
8175 p = qseecom.ce_info.pfe;
8176 total = qseecom.ce_info.num_pfe;
8177 } else {
8178 pr_err("system does not support pfe\n");
8179 return -EINVAL;
8180 }
8181 break;
8182 default:
8183 pr_err("unsupported usage %d\n", pinfo->usage);
8184 return -EINVAL;
8185 }
8186
8187 pce_info_use = NULL;
8188 for (i = 0; i < total; i++) {
8189 if (!p->alloc)
8190 pce_info_use = p;
8191 else if (!memcmp(p->handle, pinfo->handle,
8192 MAX_CE_INFO_HANDLE_SIZE)) {
8193 pce_info_use = p;
8194 found = true;
8195 break;
8196 }
8197 p++;
8198 }
8199
8200 if (pce_info_use == NULL)
8201 return -EBUSY;
8202
8203 pinfo->unit_num = pce_info_use->unit_num;
8204 if (!pce_info_use->alloc) {
8205 pce_info_use->alloc = true;
8206 memcpy(pce_info_use->handle,
8207 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8208 }
8209 if (pce_info_use->num_ce_pipe_entries >
8210 MAX_CE_PIPE_PAIR_PER_UNIT)
8211 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8212 else
8213 entries = pce_info_use->num_ce_pipe_entries;
8214 pinfo->num_ce_pipe_entries = entries;
8215 pce_entry = pce_info_use->ce_pipe_entry;
8216 for (i = 0; i < entries; i++, pce_entry++)
8217 pinfo->ce_pipe_entry[i] = *pce_entry;
8218 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8219 pinfo->ce_pipe_entry[i].valid = 0;
8220
8221 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8222 pr_err("copy_to_user failed\n");
8223 ret = -EFAULT;
8224 }
8225 return ret;
8226}
8227
8228static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8229 void __user *argp)
8230{
8231 struct qseecom_ce_info_req req;
8232 struct qseecom_ce_info_req *pinfo = &req;
8233 int ret = 0;
8234 struct qseecom_ce_info_use *p;
8235 int total = 0;
8236 int i;
8237 bool found = false;
8238
8239 ret = copy_from_user(pinfo, argp,
8240 sizeof(struct qseecom_ce_info_req));
8241 if (ret)
8242 return ret;
8243
8244 switch (pinfo->usage) {
8245 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8246 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8247 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8248 if (qseecom.support_fde) {
8249 p = qseecom.ce_info.fde;
8250 total = qseecom.ce_info.num_fde;
8251 } else {
8252 pr_err("system does not support fde\n");
8253 return -EINVAL;
8254 }
8255 break;
8256 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8257 if (qseecom.support_pfe) {
8258 p = qseecom.ce_info.pfe;
8259 total = qseecom.ce_info.num_pfe;
8260 } else {
8261 pr_err("system does not support pfe\n");
8262 return -EINVAL;
8263 }
8264 break;
8265 default:
8266 pr_err("unsupported usage %d\n", pinfo->usage);
8267 return -EINVAL;
8268 }
8269
8270 for (i = 0; i < total; i++) {
8271 if (p->alloc &&
8272 !memcmp(p->handle, pinfo->handle,
8273 MAX_CE_INFO_HANDLE_SIZE)) {
8274 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8275 p->alloc = false;
8276 found = true;
8277 break;
8278 }
8279 p++;
8280 }
8281 return ret;
8282}
8283
8284static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8285 void __user *argp)
8286{
8287 struct qseecom_ce_info_req req;
8288 struct qseecom_ce_info_req *pinfo = &req;
8289 int ret = 0;
8290 int i;
8291 unsigned int entries;
8292 struct qseecom_ce_info_use *pce_info_use, *p;
8293 int total = 0;
8294 bool found = false;
8295 struct qseecom_ce_pipe_entry *pce_entry;
8296
8297 ret = copy_from_user(pinfo, argp,
8298 sizeof(struct qseecom_ce_info_req));
8299 if (ret)
8300 return ret;
8301
8302 switch (pinfo->usage) {
8303 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8304 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8305 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8306 if (qseecom.support_fde) {
8307 p = qseecom.ce_info.fde;
8308 total = qseecom.ce_info.num_fde;
8309 } else {
8310 pr_err("system does not support fde\n");
8311 return -EINVAL;
8312 }
8313 break;
8314 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8315 if (qseecom.support_pfe) {
8316 p = qseecom.ce_info.pfe;
8317 total = qseecom.ce_info.num_pfe;
8318 } else {
8319 pr_err("system does not support pfe\n");
8320 return -EINVAL;
8321 }
8322 break;
8323 default:
8324 pr_err("unsupported usage %d\n", pinfo->usage);
8325 return -EINVAL;
8326 }
8327
8328 pce_info_use = NULL;
8329 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8330 pinfo->num_ce_pipe_entries = 0;
8331 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8332 pinfo->ce_pipe_entry[i].valid = 0;
8333
8334 for (i = 0; i < total; i++) {
8335
8336 if (p->alloc && !memcmp(p->handle,
8337 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8338 pce_info_use = p;
8339 found = true;
8340 break;
8341 }
8342 p++;
8343 }
8344 if (!pce_info_use)
8345 goto out;
8346 pinfo->unit_num = pce_info_use->unit_num;
8347 if (pce_info_use->num_ce_pipe_entries >
8348 MAX_CE_PIPE_PAIR_PER_UNIT)
8349 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8350 else
8351 entries = pce_info_use->num_ce_pipe_entries;
8352 pinfo->num_ce_pipe_entries = entries;
8353 pce_entry = pce_info_use->ce_pipe_entry;
8354 for (i = 0; i < entries; i++, pce_entry++)
8355 pinfo->ce_pipe_entry[i] = *pce_entry;
8356 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8357 pinfo->ce_pipe_entry[i].valid = 0;
8358out:
8359 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8360 pr_err("copy_to_user failed\n");
8361 ret = -EFAULT;
8362 }
8363 return ret;
8364}
8365
8366/*
8367 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8368 * then whitelist feature is not supported.
8369 */
8370static int qseecom_check_whitelist_feature(void)
8371{
8372 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8373
8374 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8375}
8376
8377static int qseecom_probe(struct platform_device *pdev)
8378{
8379 int rc;
8380 int i;
8381 uint32_t feature = 10;
8382 struct device *class_dev;
8383 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8384 struct qseecom_command_scm_resp resp;
8385 struct qseecom_ce_info_use *pce_info_use = NULL;
8386
8387 qseecom.qsee_bw_count = 0;
8388 qseecom.qsee_perf_client = 0;
8389 qseecom.qsee_sfpb_bw_count = 0;
8390
8391 qseecom.qsee.ce_core_clk = NULL;
8392 qseecom.qsee.ce_clk = NULL;
8393 qseecom.qsee.ce_core_src_clk = NULL;
8394 qseecom.qsee.ce_bus_clk = NULL;
8395
8396 qseecom.cumulative_mode = 0;
8397 qseecom.current_mode = INACTIVE;
8398 qseecom.support_bus_scaling = false;
8399 qseecom.support_fde = false;
8400 qseecom.support_pfe = false;
8401
8402 qseecom.ce_drv.ce_core_clk = NULL;
8403 qseecom.ce_drv.ce_clk = NULL;
8404 qseecom.ce_drv.ce_core_src_clk = NULL;
8405 qseecom.ce_drv.ce_bus_clk = NULL;
8406 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8407
8408 qseecom.app_block_ref_cnt = 0;
8409 init_waitqueue_head(&qseecom.app_block_wq);
8410 qseecom.whitelist_support = true;
8411
8412 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8413 if (rc < 0) {
8414 pr_err("alloc_chrdev_region failed %d\n", rc);
8415 return rc;
8416 }
8417
8418 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8419 if (IS_ERR(driver_class)) {
8420 rc = -ENOMEM;
8421 pr_err("class_create failed %d\n", rc);
8422 goto exit_unreg_chrdev_region;
8423 }
8424
8425 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8426 QSEECOM_DEV);
8427 if (IS_ERR(class_dev)) {
8428 pr_err("class_device_create failed %d\n", rc);
8429 rc = -ENOMEM;
8430 goto exit_destroy_class;
8431 }
8432
8433 cdev_init(&qseecom.cdev, &qseecom_fops);
8434 qseecom.cdev.owner = THIS_MODULE;
8435
8436 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8437 if (rc < 0) {
8438 pr_err("cdev_add failed %d\n", rc);
8439 goto exit_destroy_device;
8440 }
8441
8442 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8443 spin_lock_init(&qseecom.registered_listener_list_lock);
8444 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8445 spin_lock_init(&qseecom.registered_app_list_lock);
8446 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8447 spin_lock_init(&qseecom.registered_kclient_list_lock);
8448 init_waitqueue_head(&qseecom.send_resp_wq);
8449 qseecom.send_resp_flag = 0;
8450
8451 qseecom.qsee_version = QSEEE_VERSION_00;
8452 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8453 &resp, sizeof(resp));
8454 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8455 if (rc) {
8456 pr_err("Failed to get QSEE version info %d\n", rc);
8457 goto exit_del_cdev;
8458 }
8459 qseecom.qsee_version = resp.result;
8460 qseecom.qseos_version = QSEOS_VERSION_14;
8461 qseecom.commonlib_loaded = false;
8462 qseecom.commonlib64_loaded = false;
8463 qseecom.pdev = class_dev;
8464 /* Create ION msm client */
8465 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8466 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8467 pr_err("Ion client cannot be created\n");
8468 rc = -ENOMEM;
8469 goto exit_del_cdev;
8470 }
8471
8472 /* register client for bus scaling */
8473 if (pdev->dev.of_node) {
8474 qseecom.pdev->of_node = pdev->dev.of_node;
8475 qseecom.support_bus_scaling =
8476 of_property_read_bool((&pdev->dev)->of_node,
8477 "qcom,support-bus-scaling");
8478 rc = qseecom_retrieve_ce_data(pdev);
8479 if (rc)
8480 goto exit_destroy_ion_client;
8481 qseecom.appsbl_qseecom_support =
8482 of_property_read_bool((&pdev->dev)->of_node,
8483 "qcom,appsbl-qseecom-support");
8484 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8485 qseecom.appsbl_qseecom_support);
8486
8487 qseecom.commonlib64_loaded =
8488 of_property_read_bool((&pdev->dev)->of_node,
8489 "qcom,commonlib64-loaded-by-uefi");
8490 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8491 qseecom.commonlib64_loaded);
8492 qseecom.fde_key_size =
8493 of_property_read_bool((&pdev->dev)->of_node,
8494 "qcom,fde-key-size");
8495 qseecom.no_clock_support =
8496 of_property_read_bool((&pdev->dev)->of_node,
8497 "qcom,no-clock-support");
8498 if (!qseecom.no_clock_support) {
8499 pr_info("qseecom clocks handled by other subsystem\n");
8500 } else {
8501 pr_info("no-clock-support=0x%x",
8502 qseecom.no_clock_support);
8503 }
8504
8505 if (of_property_read_u32((&pdev->dev)->of_node,
8506 "qcom,qsee-reentrancy-support",
8507 &qseecom.qsee_reentrancy_support)) {
8508 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8509 qseecom.qsee_reentrancy_support = 0;
8510 } else {
8511 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8512 qseecom.qsee_reentrancy_support);
8513 }
8514
8515 /*
8516 * The qseecom bus scaling flag can not be enabled when
8517 * crypto clock is not handled by HLOS.
8518 */
8519 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8520 pr_err("support_bus_scaling flag can not be enabled.\n");
8521 rc = -EINVAL;
8522 goto exit_destroy_ion_client;
8523 }
8524
8525 if (of_property_read_u32((&pdev->dev)->of_node,
8526 "qcom,ce-opp-freq",
8527 &qseecom.ce_opp_freq_hz)) {
8528 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8529 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8530 }
8531 rc = __qseecom_init_clk(CLK_QSEE);
8532 if (rc)
8533 goto exit_destroy_ion_client;
8534
8535 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8536 (qseecom.support_pfe || qseecom.support_fde)) {
8537 rc = __qseecom_init_clk(CLK_CE_DRV);
8538 if (rc) {
8539 __qseecom_deinit_clk(CLK_QSEE);
8540 goto exit_destroy_ion_client;
8541 }
8542 } else {
8543 struct qseecom_clk *qclk;
8544
8545 qclk = &qseecom.qsee;
8546 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8547 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8548 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8549 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8550 }
8551
8552 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8553 msm_bus_cl_get_pdata(pdev);
8554 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8555 (!qseecom.is_apps_region_protected &&
8556 !qseecom.appsbl_qseecom_support)) {
8557 struct resource *resource = NULL;
8558 struct qsee_apps_region_info_ireq req;
8559 struct qsee_apps_region_info_64bit_ireq req_64bit;
8560 struct qseecom_command_scm_resp resp;
8561 void *cmd_buf = NULL;
8562 size_t cmd_len;
8563
8564 resource = platform_get_resource_byname(pdev,
8565 IORESOURCE_MEM, "secapp-region");
8566 if (resource) {
8567 if (qseecom.qsee_version < QSEE_VERSION_40) {
8568 req.qsee_cmd_id =
8569 QSEOS_APP_REGION_NOTIFICATION;
8570 req.addr = (uint32_t)resource->start;
8571 req.size = resource_size(resource);
8572 cmd_buf = (void *)&req;
8573 cmd_len = sizeof(struct
8574 qsee_apps_region_info_ireq);
8575 pr_warn("secure app region addr=0x%x size=0x%x",
8576 req.addr, req.size);
8577 } else {
8578 req_64bit.qsee_cmd_id =
8579 QSEOS_APP_REGION_NOTIFICATION;
8580 req_64bit.addr = resource->start;
8581 req_64bit.size = resource_size(
8582 resource);
8583 cmd_buf = (void *)&req_64bit;
8584 cmd_len = sizeof(struct
8585 qsee_apps_region_info_64bit_ireq);
8586 pr_warn("secure app region addr=0x%llx size=0x%x",
8587 req_64bit.addr, req_64bit.size);
8588 }
8589 } else {
8590 pr_err("Fail to get secure app region info\n");
8591 rc = -EINVAL;
8592 goto exit_deinit_clock;
8593 }
8594 rc = __qseecom_enable_clk(CLK_QSEE);
8595 if (rc) {
8596 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8597 rc = -EIO;
8598 goto exit_deinit_clock;
8599 }
8600 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8601 cmd_buf, cmd_len,
8602 &resp, sizeof(resp));
8603 __qseecom_disable_clk(CLK_QSEE);
8604 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8605 pr_err("send secapp reg fail %d resp.res %d\n",
8606 rc, resp.result);
8607 rc = -EINVAL;
8608 goto exit_deinit_clock;
8609 }
8610 }
8611 /*
8612 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8613 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8614 * Pls add "qseecom.commonlib64_loaded = true" here too.
8615 */
8616 if (qseecom.is_apps_region_protected ||
8617 qseecom.appsbl_qseecom_support)
8618 qseecom.commonlib_loaded = true;
8619 } else {
8620 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8621 pdev->dev.platform_data;
8622 }
8623 if (qseecom.support_bus_scaling) {
8624 init_timer(&(qseecom.bw_scale_down_timer));
8625 INIT_WORK(&qseecom.bw_inactive_req_ws,
8626 qseecom_bw_inactive_req_work);
8627 qseecom.bw_scale_down_timer.function =
8628 qseecom_scale_bus_bandwidth_timer_callback;
8629 }
8630 qseecom.timer_running = false;
8631 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8632 qseecom_platform_support);
8633
8634 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8635 pr_warn("qseecom.whitelist_support = %d\n",
8636 qseecom.whitelist_support);
8637
8638 if (!qseecom.qsee_perf_client)
8639 pr_err("Unable to register bus client\n");
8640
8641 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8642 return 0;
8643
8644exit_deinit_clock:
8645 __qseecom_deinit_clk(CLK_QSEE);
8646 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8647 (qseecom.support_pfe || qseecom.support_fde))
8648 __qseecom_deinit_clk(CLK_CE_DRV);
8649exit_destroy_ion_client:
8650 if (qseecom.ce_info.fde) {
8651 pce_info_use = qseecom.ce_info.fde;
8652 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8653 kzfree(pce_info_use->ce_pipe_entry);
8654 pce_info_use++;
8655 }
8656 kfree(qseecom.ce_info.fde);
8657 }
8658 if (qseecom.ce_info.pfe) {
8659 pce_info_use = qseecom.ce_info.pfe;
8660 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8661 kzfree(pce_info_use->ce_pipe_entry);
8662 pce_info_use++;
8663 }
8664 kfree(qseecom.ce_info.pfe);
8665 }
8666 ion_client_destroy(qseecom.ion_clnt);
8667exit_del_cdev:
8668 cdev_del(&qseecom.cdev);
8669exit_destroy_device:
8670 device_destroy(driver_class, qseecom_device_no);
8671exit_destroy_class:
8672 class_destroy(driver_class);
8673exit_unreg_chrdev_region:
8674 unregister_chrdev_region(qseecom_device_no, 1);
8675 return rc;
8676}
8677
8678static int qseecom_remove(struct platform_device *pdev)
8679{
8680 struct qseecom_registered_kclient_list *kclient = NULL;
8681 unsigned long flags = 0;
8682 int ret = 0;
8683 int i;
8684 struct qseecom_ce_pipe_entry *pce_entry;
8685 struct qseecom_ce_info_use *pce_info_use;
8686
8687 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8688 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8689
8690 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8691 list) {
8692 if (!kclient)
8693 goto exit_irqrestore;
8694
8695 /* Break the loop if client handle is NULL */
8696 if (!kclient->handle)
8697 goto exit_free_kclient;
8698
8699 if (list_empty(&kclient->list))
8700 goto exit_free_kc_handle;
8701
8702 list_del(&kclient->list);
8703 mutex_lock(&app_access_lock);
8704 ret = qseecom_unload_app(kclient->handle->dev, false);
8705 mutex_unlock(&app_access_lock);
8706 if (!ret) {
8707 kzfree(kclient->handle->dev);
8708 kzfree(kclient->handle);
8709 kzfree(kclient);
8710 }
8711 }
8712
8713exit_free_kc_handle:
8714 kzfree(kclient->handle);
8715exit_free_kclient:
8716 kzfree(kclient);
8717exit_irqrestore:
8718 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8719
8720 if (qseecom.qseos_version > QSEEE_VERSION_00)
8721 qseecom_unload_commonlib_image();
8722
8723 if (qseecom.qsee_perf_client)
8724 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8725 0);
8726 if (pdev->dev.platform_data != NULL)
8727 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8728
8729 if (qseecom.support_bus_scaling) {
8730 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8731 del_timer_sync(&qseecom.bw_scale_down_timer);
8732 }
8733
8734 if (qseecom.ce_info.fde) {
8735 pce_info_use = qseecom.ce_info.fde;
8736 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8737 pce_entry = pce_info_use->ce_pipe_entry;
8738 kfree(pce_entry);
8739 pce_info_use++;
8740 }
8741 }
8742 kfree(qseecom.ce_info.fde);
8743 if (qseecom.ce_info.pfe) {
8744 pce_info_use = qseecom.ce_info.pfe;
8745 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8746 pce_entry = pce_info_use->ce_pipe_entry;
8747 kfree(pce_entry);
8748 pce_info_use++;
8749 }
8750 }
8751 kfree(qseecom.ce_info.pfe);
8752
8753 /* register client for bus scaling */
8754 if (pdev->dev.of_node) {
8755 __qseecom_deinit_clk(CLK_QSEE);
8756 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8757 (qseecom.support_pfe || qseecom.support_fde))
8758 __qseecom_deinit_clk(CLK_CE_DRV);
8759 }
8760
8761 ion_client_destroy(qseecom.ion_clnt);
8762
8763 cdev_del(&qseecom.cdev);
8764
8765 device_destroy(driver_class, qseecom_device_no);
8766
8767 class_destroy(driver_class);
8768
8769 unregister_chrdev_region(qseecom_device_no, 1);
8770
8771 return ret;
8772}
8773
8774static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8775{
8776 int ret = 0;
8777 struct qseecom_clk *qclk;
8778
8779 qclk = &qseecom.qsee;
8780 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8781 if (qseecom.no_clock_support)
8782 return 0;
8783
8784 mutex_lock(&qsee_bw_mutex);
8785 mutex_lock(&clk_access_lock);
8786
8787 if (qseecom.current_mode != INACTIVE) {
8788 ret = msm_bus_scale_client_update_request(
8789 qseecom.qsee_perf_client, INACTIVE);
8790 if (ret)
8791 pr_err("Fail to scale down bus\n");
8792 else
8793 qseecom.current_mode = INACTIVE;
8794 }
8795
8796 if (qclk->clk_access_cnt) {
8797 if (qclk->ce_clk != NULL)
8798 clk_disable_unprepare(qclk->ce_clk);
8799 if (qclk->ce_core_clk != NULL)
8800 clk_disable_unprepare(qclk->ce_core_clk);
8801 if (qclk->ce_bus_clk != NULL)
8802 clk_disable_unprepare(qclk->ce_bus_clk);
8803 }
8804
8805 del_timer_sync(&(qseecom.bw_scale_down_timer));
8806 qseecom.timer_running = false;
8807
8808 mutex_unlock(&clk_access_lock);
8809 mutex_unlock(&qsee_bw_mutex);
8810 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8811
8812 return 0;
8813}
8814
8815static int qseecom_resume(struct platform_device *pdev)
8816{
8817 int mode = 0;
8818 int ret = 0;
8819 struct qseecom_clk *qclk;
8820
8821 qclk = &qseecom.qsee;
8822 if (qseecom.no_clock_support)
8823 goto exit;
8824
8825 mutex_lock(&qsee_bw_mutex);
8826 mutex_lock(&clk_access_lock);
8827 if (qseecom.cumulative_mode >= HIGH)
8828 mode = HIGH;
8829 else
8830 mode = qseecom.cumulative_mode;
8831
8832 if (qseecom.cumulative_mode != INACTIVE) {
8833 ret = msm_bus_scale_client_update_request(
8834 qseecom.qsee_perf_client, mode);
8835 if (ret)
8836 pr_err("Fail to scale up bus to %d\n", mode);
8837 else
8838 qseecom.current_mode = mode;
8839 }
8840
8841 if (qclk->clk_access_cnt) {
8842 if (qclk->ce_core_clk != NULL) {
8843 ret = clk_prepare_enable(qclk->ce_core_clk);
8844 if (ret) {
8845 pr_err("Unable to enable/prep CE core clk\n");
8846 qclk->clk_access_cnt = 0;
8847 goto err;
8848 }
8849 }
8850 if (qclk->ce_clk != NULL) {
8851 ret = clk_prepare_enable(qclk->ce_clk);
8852 if (ret) {
8853 pr_err("Unable to enable/prep CE iface clk\n");
8854 qclk->clk_access_cnt = 0;
8855 goto ce_clk_err;
8856 }
8857 }
8858 if (qclk->ce_bus_clk != NULL) {
8859 ret = clk_prepare_enable(qclk->ce_bus_clk);
8860 if (ret) {
8861 pr_err("Unable to enable/prep CE bus clk\n");
8862 qclk->clk_access_cnt = 0;
8863 goto ce_bus_clk_err;
8864 }
8865 }
8866 }
8867
8868 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8869 qseecom.bw_scale_down_timer.expires = jiffies +
8870 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8871 mod_timer(&(qseecom.bw_scale_down_timer),
8872 qseecom.bw_scale_down_timer.expires);
8873 qseecom.timer_running = true;
8874 }
8875
8876 mutex_unlock(&clk_access_lock);
8877 mutex_unlock(&qsee_bw_mutex);
8878 goto exit;
8879
8880ce_bus_clk_err:
8881 if (qclk->ce_clk)
8882 clk_disable_unprepare(qclk->ce_clk);
8883ce_clk_err:
8884 if (qclk->ce_core_clk)
8885 clk_disable_unprepare(qclk->ce_core_clk);
8886err:
8887 mutex_unlock(&clk_access_lock);
8888 mutex_unlock(&qsee_bw_mutex);
8889 ret = -EIO;
8890exit:
8891 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8892 return ret;
8893}
8894
8895static const struct of_device_id qseecom_match[] = {
8896 {
8897 .compatible = "qcom,qseecom",
8898 },
8899 {}
8900};
8901
8902static struct platform_driver qseecom_plat_driver = {
8903 .probe = qseecom_probe,
8904 .remove = qseecom_remove,
8905 .suspend = qseecom_suspend,
8906 .resume = qseecom_resume,
8907 .driver = {
8908 .name = "qseecom",
8909 .owner = THIS_MODULE,
8910 .of_match_table = qseecom_match,
8911 },
8912};
8913
8914static int qseecom_init(void)
8915{
8916 return platform_driver_register(&qseecom_plat_driver);
8917}
8918
8919static void qseecom_exit(void)
8920{
8921 platform_driver_unregister(&qseecom_plat_driver);
8922}
8923
8924MODULE_LICENSE("GPL v2");
8925MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
8926
8927module_init(qseecom_init);
8928module_exit(qseecom_exit);