blob: 7567f86c01e37fc9db660f63235bf2617c1d50d8 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
255 struct ion_handle *cmnlib_ion_handle;
256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700284 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700285};
286
287struct qseecom_sec_buf_fd_info {
288 bool is_sec_buf_fd;
289 size_t size;
290 void *vbase;
291 dma_addr_t pbase;
292};
293
294struct qseecom_param_memref {
295 uint32_t buffer;
296 uint32_t size;
297};
298
299struct qseecom_client_handle {
300 u32 app_id;
301 u8 *sb_virt;
302 phys_addr_t sb_phys;
303 unsigned long user_virt_sb_base;
304 size_t sb_length;
305 struct ion_handle *ihandle; /* Retrieve phy addr */
306 char app_name[MAX_APP_NAME_SIZE];
307 u32 app_arch;
308 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
309};
310
311struct qseecom_listener_handle {
312 u32 id;
313};
314
315static struct qseecom_control qseecom;
316
317struct qseecom_dev_handle {
318 enum qseecom_client_handle_type type;
319 union {
320 struct qseecom_client_handle client;
321 struct qseecom_listener_handle listener;
322 };
323 bool released;
324 int abort;
325 wait_queue_head_t abort_wq;
326 atomic_t ioctl_count;
327 bool perf_enabled;
328 bool fast_load_enabled;
329 enum qseecom_bandwidth_request_mode mode;
330 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
331 uint32_t sglist_cnt;
332 bool use_legacy_cmd;
333};
334
335struct qseecom_key_id_usage_desc {
336 uint8_t desc[QSEECOM_KEY_ID_SIZE];
337};
338
339struct qseecom_crypto_info {
340 unsigned int unit_num;
341 unsigned int ce;
342 unsigned int pipe_pair;
343};
344
345static struct qseecom_key_id_usage_desc key_id_array[] = {
346 {
347 .desc = "Undefined Usage Index",
348 },
349
350 {
351 .desc = "Full Disk Encryption",
352 },
353
354 {
355 .desc = "Per File Encryption",
356 },
357
358 {
359 .desc = "UFS ICE Full Disk Encryption",
360 },
361
362 {
363 .desc = "SDCC ICE Full Disk Encryption",
364 },
365};
366
367/* Function proto types */
368static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
369static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
370static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
371static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
372static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
373static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
374 char *cmnlib_name);
375static int qseecom_enable_ice_setup(int usage);
376static int qseecom_disable_ice_setup(int usage);
377static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
378static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
379 void __user *argp);
380static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
381 void __user *argp);
382static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
383 void __user *argp);
384
385static int get_qseecom_keymaster_status(char *str)
386{
387 get_option(&str, &qseecom.is_apps_region_protected);
388 return 1;
389}
390__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
391
392static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
393 const void *req_buf, void *resp_buf)
394{
395 int ret = 0;
396 uint32_t smc_id = 0;
397 uint32_t qseos_cmd_id = 0;
398 struct scm_desc desc = {0};
399 struct qseecom_command_scm_resp *scm_resp = NULL;
400
401 if (!req_buf || !resp_buf) {
402 pr_err("Invalid buffer pointer\n");
403 return -EINVAL;
404 }
405 qseos_cmd_id = *(uint32_t *)req_buf;
406 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
407
408 switch (svc_id) {
409 case 6: {
410 if (tz_cmd_id == 3) {
411 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
412 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
413 desc.args[0] = *(uint32_t *)req_buf;
414 } else {
415 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
416 svc_id, tz_cmd_id);
417 return -EINVAL;
418 }
419 ret = scm_call2(smc_id, &desc);
420 break;
421 }
422 case SCM_SVC_ES: {
423 switch (tz_cmd_id) {
424 case SCM_SAVE_PARTITION_HASH_ID: {
425 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
426 struct qseecom_save_partition_hash_req *p_hash_req =
427 (struct qseecom_save_partition_hash_req *)
428 req_buf;
429 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
430
431 if (!tzbuf)
432 return -ENOMEM;
433 memset(tzbuf, 0, tzbuflen);
434 memcpy(tzbuf, p_hash_req->digest,
435 SHA256_DIGEST_LENGTH);
436 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
437 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
438 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
439 desc.args[0] = p_hash_req->partition_id;
440 desc.args[1] = virt_to_phys(tzbuf);
441 desc.args[2] = SHA256_DIGEST_LENGTH;
442 ret = scm_call2(smc_id, &desc);
443 kzfree(tzbuf);
444 break;
445 }
446 default: {
447 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
448 tz_cmd_id);
449 ret = -EINVAL;
450 break;
451 }
452 } /* end of switch (tz_cmd_id) */
453 break;
454 } /* end of case SCM_SVC_ES */
455 case SCM_SVC_TZSCHEDULER: {
456 switch (qseos_cmd_id) {
457 case QSEOS_APP_START_COMMAND: {
458 struct qseecom_load_app_ireq *req;
459 struct qseecom_load_app_64bit_ireq *req_64bit;
460
461 smc_id = TZ_OS_APP_START_ID;
462 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
463 if (qseecom.qsee_version < QSEE_VERSION_40) {
464 req = (struct qseecom_load_app_ireq *)req_buf;
465 desc.args[0] = req->mdt_len;
466 desc.args[1] = req->img_len;
467 desc.args[2] = req->phy_addr;
468 } else {
469 req_64bit =
470 (struct qseecom_load_app_64bit_ireq *)
471 req_buf;
472 desc.args[0] = req_64bit->mdt_len;
473 desc.args[1] = req_64bit->img_len;
474 desc.args[2] = req_64bit->phy_addr;
475 }
476 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
477 ret = scm_call2(smc_id, &desc);
478 break;
479 }
480 case QSEOS_APP_SHUTDOWN_COMMAND: {
481 struct qseecom_unload_app_ireq *req;
482
483 req = (struct qseecom_unload_app_ireq *)req_buf;
484 smc_id = TZ_OS_APP_SHUTDOWN_ID;
485 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
486 desc.args[0] = req->app_id;
487 ret = scm_call2(smc_id, &desc);
488 break;
489 }
490 case QSEOS_APP_LOOKUP_COMMAND: {
491 struct qseecom_check_app_ireq *req;
492 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
493 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
494
495 if (!tzbuf)
496 return -ENOMEM;
497 req = (struct qseecom_check_app_ireq *)req_buf;
498 pr_debug("Lookup app_name = %s\n", req->app_name);
499 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
500 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
501 smc_id = TZ_OS_APP_LOOKUP_ID;
502 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
503 desc.args[0] = virt_to_phys(tzbuf);
504 desc.args[1] = strlen(req->app_name);
505 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
506 ret = scm_call2(smc_id, &desc);
507 kzfree(tzbuf);
508 break;
509 }
510 case QSEOS_APP_REGION_NOTIFICATION: {
511 struct qsee_apps_region_info_ireq *req;
512 struct qsee_apps_region_info_64bit_ireq *req_64bit;
513
514 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
515 desc.arginfo =
516 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
517 if (qseecom.qsee_version < QSEE_VERSION_40) {
518 req = (struct qsee_apps_region_info_ireq *)
519 req_buf;
520 desc.args[0] = req->addr;
521 desc.args[1] = req->size;
522 } else {
523 req_64bit =
524 (struct qsee_apps_region_info_64bit_ireq *)
525 req_buf;
526 desc.args[0] = req_64bit->addr;
527 desc.args[1] = req_64bit->size;
528 }
529 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
530 ret = scm_call2(smc_id, &desc);
531 break;
532 }
533 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
534 struct qseecom_load_lib_image_ireq *req;
535 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
536
537 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
538 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
539 if (qseecom.qsee_version < QSEE_VERSION_40) {
540 req = (struct qseecom_load_lib_image_ireq *)
541 req_buf;
542 desc.args[0] = req->mdt_len;
543 desc.args[1] = req->img_len;
544 desc.args[2] = req->phy_addr;
545 } else {
546 req_64bit =
547 (struct qseecom_load_lib_image_64bit_ireq *)
548 req_buf;
549 desc.args[0] = req_64bit->mdt_len;
550 desc.args[1] = req_64bit->img_len;
551 desc.args[2] = req_64bit->phy_addr;
552 }
553 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
554 ret = scm_call2(smc_id, &desc);
555 break;
556 }
557 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
558 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
559 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
560 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
561 ret = scm_call2(smc_id, &desc);
562 break;
563 }
564 case QSEOS_REGISTER_LISTENER: {
565 struct qseecom_register_listener_ireq *req;
566 struct qseecom_register_listener_64bit_ireq *req_64bit;
567
568 desc.arginfo =
569 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
570 if (qseecom.qsee_version < QSEE_VERSION_40) {
571 req = (struct qseecom_register_listener_ireq *)
572 req_buf;
573 desc.args[0] = req->listener_id;
574 desc.args[1] = req->sb_ptr;
575 desc.args[2] = req->sb_len;
576 } else {
577 req_64bit =
578 (struct qseecom_register_listener_64bit_ireq *)
579 req_buf;
580 desc.args[0] = req_64bit->listener_id;
581 desc.args[1] = req_64bit->sb_ptr;
582 desc.args[2] = req_64bit->sb_len;
583 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700584 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700585 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
586 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
587 ret = scm_call2(smc_id, &desc);
588 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700589 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700590 smc_id = TZ_OS_REGISTER_LISTENER_ID;
591 __qseecom_reentrancy_check_if_no_app_blocked(
592 smc_id);
593 ret = scm_call2(smc_id, &desc);
594 }
595 break;
596 }
597 case QSEOS_DEREGISTER_LISTENER: {
598 struct qseecom_unregister_listener_ireq *req;
599
600 req = (struct qseecom_unregister_listener_ireq *)
601 req_buf;
602 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
603 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
604 desc.args[0] = req->listener_id;
605 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
606 ret = scm_call2(smc_id, &desc);
607 break;
608 }
609 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
610 struct qseecom_client_listener_data_irsp *req;
611
612 req = (struct qseecom_client_listener_data_irsp *)
613 req_buf;
614 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
615 desc.arginfo =
616 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
617 desc.args[0] = req->listener_id;
618 desc.args[1] = req->status;
619 ret = scm_call2(smc_id, &desc);
620 break;
621 }
622 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
623 struct qseecom_client_listener_data_irsp *req;
624 struct qseecom_client_listener_data_64bit_irsp *req_64;
625
626 smc_id =
627 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
628 desc.arginfo =
629 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
630 if (qseecom.qsee_version < QSEE_VERSION_40) {
631 req =
632 (struct qseecom_client_listener_data_irsp *)
633 req_buf;
634 desc.args[0] = req->listener_id;
635 desc.args[1] = req->status;
636 desc.args[2] = req->sglistinfo_ptr;
637 desc.args[3] = req->sglistinfo_len;
638 } else {
639 req_64 =
640 (struct qseecom_client_listener_data_64bit_irsp *)
641 req_buf;
642 desc.args[0] = req_64->listener_id;
643 desc.args[1] = req_64->status;
644 desc.args[2] = req_64->sglistinfo_ptr;
645 desc.args[3] = req_64->sglistinfo_len;
646 }
647 ret = scm_call2(smc_id, &desc);
648 break;
649 }
650 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
651 struct qseecom_load_app_ireq *req;
652 struct qseecom_load_app_64bit_ireq *req_64bit;
653
654 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
655 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
656 if (qseecom.qsee_version < QSEE_VERSION_40) {
657 req = (struct qseecom_load_app_ireq *)req_buf;
658 desc.args[0] = req->mdt_len;
659 desc.args[1] = req->img_len;
660 desc.args[2] = req->phy_addr;
661 } else {
662 req_64bit =
663 (struct qseecom_load_app_64bit_ireq *)req_buf;
664 desc.args[0] = req_64bit->mdt_len;
665 desc.args[1] = req_64bit->img_len;
666 desc.args[2] = req_64bit->phy_addr;
667 }
668 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
669 ret = scm_call2(smc_id, &desc);
670 break;
671 }
672 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
673 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
674 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
675 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
676 ret = scm_call2(smc_id, &desc);
677 break;
678 }
679
680 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
681 struct qseecom_client_send_data_ireq *req;
682 struct qseecom_client_send_data_64bit_ireq *req_64bit;
683
684 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
685 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
686 if (qseecom.qsee_version < QSEE_VERSION_40) {
687 req = (struct qseecom_client_send_data_ireq *)
688 req_buf;
689 desc.args[0] = req->app_id;
690 desc.args[1] = req->req_ptr;
691 desc.args[2] = req->req_len;
692 desc.args[3] = req->rsp_ptr;
693 desc.args[4] = req->rsp_len;
694 } else {
695 req_64bit =
696 (struct qseecom_client_send_data_64bit_ireq *)
697 req_buf;
698 desc.args[0] = req_64bit->app_id;
699 desc.args[1] = req_64bit->req_ptr;
700 desc.args[2] = req_64bit->req_len;
701 desc.args[3] = req_64bit->rsp_ptr;
702 desc.args[4] = req_64bit->rsp_len;
703 }
704 ret = scm_call2(smc_id, &desc);
705 break;
706 }
707 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
708 struct qseecom_client_send_data_ireq *req;
709 struct qseecom_client_send_data_64bit_ireq *req_64bit;
710
711 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
712 desc.arginfo =
713 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
714 if (qseecom.qsee_version < QSEE_VERSION_40) {
715 req = (struct qseecom_client_send_data_ireq *)
716 req_buf;
717 desc.args[0] = req->app_id;
718 desc.args[1] = req->req_ptr;
719 desc.args[2] = req->req_len;
720 desc.args[3] = req->rsp_ptr;
721 desc.args[4] = req->rsp_len;
722 desc.args[5] = req->sglistinfo_ptr;
723 desc.args[6] = req->sglistinfo_len;
724 } else {
725 req_64bit =
726 (struct qseecom_client_send_data_64bit_ireq *)
727 req_buf;
728 desc.args[0] = req_64bit->app_id;
729 desc.args[1] = req_64bit->req_ptr;
730 desc.args[2] = req_64bit->req_len;
731 desc.args[3] = req_64bit->rsp_ptr;
732 desc.args[4] = req_64bit->rsp_len;
733 desc.args[5] = req_64bit->sglistinfo_ptr;
734 desc.args[6] = req_64bit->sglistinfo_len;
735 }
736 ret = scm_call2(smc_id, &desc);
737 break;
738 }
739 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
740 struct qseecom_client_send_service_ireq *req;
741
742 req = (struct qseecom_client_send_service_ireq *)
743 req_buf;
744 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
745 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
746 desc.args[0] = req->key_type;
747 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
748 ret = scm_call2(smc_id, &desc);
749 break;
750 }
751 case QSEOS_RPMB_ERASE_COMMAND: {
752 smc_id = TZ_OS_RPMB_ERASE_ID;
753 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
754 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
755 ret = scm_call2(smc_id, &desc);
756 break;
757 }
758 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
759 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
760 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
761 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
762 ret = scm_call2(smc_id, &desc);
763 break;
764 }
765 case QSEOS_GENERATE_KEY: {
766 u32 tzbuflen = PAGE_ALIGN(sizeof
767 (struct qseecom_key_generate_ireq) -
768 sizeof(uint32_t));
769 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
770
771 if (!tzbuf)
772 return -ENOMEM;
773 memset(tzbuf, 0, tzbuflen);
774 memcpy(tzbuf, req_buf + sizeof(uint32_t),
775 (sizeof(struct qseecom_key_generate_ireq) -
776 sizeof(uint32_t)));
777 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
778 smc_id = TZ_OS_KS_GEN_KEY_ID;
779 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
780 desc.args[0] = virt_to_phys(tzbuf);
781 desc.args[1] = tzbuflen;
782 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
783 ret = scm_call2(smc_id, &desc);
784 kzfree(tzbuf);
785 break;
786 }
787 case QSEOS_DELETE_KEY: {
788 u32 tzbuflen = PAGE_ALIGN(sizeof
789 (struct qseecom_key_delete_ireq) -
790 sizeof(uint32_t));
791 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
792
793 if (!tzbuf)
794 return -ENOMEM;
795 memset(tzbuf, 0, tzbuflen);
796 memcpy(tzbuf, req_buf + sizeof(uint32_t),
797 (sizeof(struct qseecom_key_delete_ireq) -
798 sizeof(uint32_t)));
799 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
800 smc_id = TZ_OS_KS_DEL_KEY_ID;
801 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
802 desc.args[0] = virt_to_phys(tzbuf);
803 desc.args[1] = tzbuflen;
804 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
805 ret = scm_call2(smc_id, &desc);
806 kzfree(tzbuf);
807 break;
808 }
809 case QSEOS_SET_KEY: {
810 u32 tzbuflen = PAGE_ALIGN(sizeof
811 (struct qseecom_key_select_ireq) -
812 sizeof(uint32_t));
813 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
814
815 if (!tzbuf)
816 return -ENOMEM;
817 memset(tzbuf, 0, tzbuflen);
818 memcpy(tzbuf, req_buf + sizeof(uint32_t),
819 (sizeof(struct qseecom_key_select_ireq) -
820 sizeof(uint32_t)));
821 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
822 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
823 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
824 desc.args[0] = virt_to_phys(tzbuf);
825 desc.args[1] = tzbuflen;
826 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
827 ret = scm_call2(smc_id, &desc);
828 kzfree(tzbuf);
829 break;
830 }
831 case QSEOS_UPDATE_KEY_USERINFO: {
832 u32 tzbuflen = PAGE_ALIGN(sizeof
833 (struct qseecom_key_userinfo_update_ireq) -
834 sizeof(uint32_t));
835 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
836
837 if (!tzbuf)
838 return -ENOMEM;
839 memset(tzbuf, 0, tzbuflen);
840 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
841 (struct qseecom_key_userinfo_update_ireq) -
842 sizeof(uint32_t)));
843 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
844 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
845 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
846 desc.args[0] = virt_to_phys(tzbuf);
847 desc.args[1] = tzbuflen;
848 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
849 ret = scm_call2(smc_id, &desc);
850 kzfree(tzbuf);
851 break;
852 }
853 case QSEOS_TEE_OPEN_SESSION: {
854 struct qseecom_qteec_ireq *req;
855 struct qseecom_qteec_64bit_ireq *req_64bit;
856
857 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
858 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
859 if (qseecom.qsee_version < QSEE_VERSION_40) {
860 req = (struct qseecom_qteec_ireq *)req_buf;
861 desc.args[0] = req->app_id;
862 desc.args[1] = req->req_ptr;
863 desc.args[2] = req->req_len;
864 desc.args[3] = req->resp_ptr;
865 desc.args[4] = req->resp_len;
866 } else {
867 req_64bit = (struct qseecom_qteec_64bit_ireq *)
868 req_buf;
869 desc.args[0] = req_64bit->app_id;
870 desc.args[1] = req_64bit->req_ptr;
871 desc.args[2] = req_64bit->req_len;
872 desc.args[3] = req_64bit->resp_ptr;
873 desc.args[4] = req_64bit->resp_len;
874 }
875 ret = scm_call2(smc_id, &desc);
876 break;
877 }
878 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
879 struct qseecom_qteec_ireq *req;
880 struct qseecom_qteec_64bit_ireq *req_64bit;
881
882 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
883 desc.arginfo =
884 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
885 if (qseecom.qsee_version < QSEE_VERSION_40) {
886 req = (struct qseecom_qteec_ireq *)req_buf;
887 desc.args[0] = req->app_id;
888 desc.args[1] = req->req_ptr;
889 desc.args[2] = req->req_len;
890 desc.args[3] = req->resp_ptr;
891 desc.args[4] = req->resp_len;
892 desc.args[5] = req->sglistinfo_ptr;
893 desc.args[6] = req->sglistinfo_len;
894 } else {
895 req_64bit = (struct qseecom_qteec_64bit_ireq *)
896 req_buf;
897 desc.args[0] = req_64bit->app_id;
898 desc.args[1] = req_64bit->req_ptr;
899 desc.args[2] = req_64bit->req_len;
900 desc.args[3] = req_64bit->resp_ptr;
901 desc.args[4] = req_64bit->resp_len;
902 desc.args[5] = req_64bit->sglistinfo_ptr;
903 desc.args[6] = req_64bit->sglistinfo_len;
904 }
905 ret = scm_call2(smc_id, &desc);
906 break;
907 }
908 case QSEOS_TEE_INVOKE_COMMAND: {
909 struct qseecom_qteec_ireq *req;
910 struct qseecom_qteec_64bit_ireq *req_64bit;
911
912 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
913 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
914 if (qseecom.qsee_version < QSEE_VERSION_40) {
915 req = (struct qseecom_qteec_ireq *)req_buf;
916 desc.args[0] = req->app_id;
917 desc.args[1] = req->req_ptr;
918 desc.args[2] = req->req_len;
919 desc.args[3] = req->resp_ptr;
920 desc.args[4] = req->resp_len;
921 } else {
922 req_64bit = (struct qseecom_qteec_64bit_ireq *)
923 req_buf;
924 desc.args[0] = req_64bit->app_id;
925 desc.args[1] = req_64bit->req_ptr;
926 desc.args[2] = req_64bit->req_len;
927 desc.args[3] = req_64bit->resp_ptr;
928 desc.args[4] = req_64bit->resp_len;
929 }
930 ret = scm_call2(smc_id, &desc);
931 break;
932 }
933 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
934 struct qseecom_qteec_ireq *req;
935 struct qseecom_qteec_64bit_ireq *req_64bit;
936
937 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
938 desc.arginfo =
939 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
940 if (qseecom.qsee_version < QSEE_VERSION_40) {
941 req = (struct qseecom_qteec_ireq *)req_buf;
942 desc.args[0] = req->app_id;
943 desc.args[1] = req->req_ptr;
944 desc.args[2] = req->req_len;
945 desc.args[3] = req->resp_ptr;
946 desc.args[4] = req->resp_len;
947 desc.args[5] = req->sglistinfo_ptr;
948 desc.args[6] = req->sglistinfo_len;
949 } else {
950 req_64bit = (struct qseecom_qteec_64bit_ireq *)
951 req_buf;
952 desc.args[0] = req_64bit->app_id;
953 desc.args[1] = req_64bit->req_ptr;
954 desc.args[2] = req_64bit->req_len;
955 desc.args[3] = req_64bit->resp_ptr;
956 desc.args[4] = req_64bit->resp_len;
957 desc.args[5] = req_64bit->sglistinfo_ptr;
958 desc.args[6] = req_64bit->sglistinfo_len;
959 }
960 ret = scm_call2(smc_id, &desc);
961 break;
962 }
963 case QSEOS_TEE_CLOSE_SESSION: {
964 struct qseecom_qteec_ireq *req;
965 struct qseecom_qteec_64bit_ireq *req_64bit;
966
967 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
968 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
969 if (qseecom.qsee_version < QSEE_VERSION_40) {
970 req = (struct qseecom_qteec_ireq *)req_buf;
971 desc.args[0] = req->app_id;
972 desc.args[1] = req->req_ptr;
973 desc.args[2] = req->req_len;
974 desc.args[3] = req->resp_ptr;
975 desc.args[4] = req->resp_len;
976 } else {
977 req_64bit = (struct qseecom_qteec_64bit_ireq *)
978 req_buf;
979 desc.args[0] = req_64bit->app_id;
980 desc.args[1] = req_64bit->req_ptr;
981 desc.args[2] = req_64bit->req_len;
982 desc.args[3] = req_64bit->resp_ptr;
983 desc.args[4] = req_64bit->resp_len;
984 }
985 ret = scm_call2(smc_id, &desc);
986 break;
987 }
988 case QSEOS_TEE_REQUEST_CANCELLATION: {
989 struct qseecom_qteec_ireq *req;
990 struct qseecom_qteec_64bit_ireq *req_64bit;
991
992 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
993 desc.arginfo =
994 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
995 if (qseecom.qsee_version < QSEE_VERSION_40) {
996 req = (struct qseecom_qteec_ireq *)req_buf;
997 desc.args[0] = req->app_id;
998 desc.args[1] = req->req_ptr;
999 desc.args[2] = req->req_len;
1000 desc.args[3] = req->resp_ptr;
1001 desc.args[4] = req->resp_len;
1002 } else {
1003 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1004 req_buf;
1005 desc.args[0] = req_64bit->app_id;
1006 desc.args[1] = req_64bit->req_ptr;
1007 desc.args[2] = req_64bit->req_len;
1008 desc.args[3] = req_64bit->resp_ptr;
1009 desc.args[4] = req_64bit->resp_len;
1010 }
1011 ret = scm_call2(smc_id, &desc);
1012 break;
1013 }
1014 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1015 struct qseecom_continue_blocked_request_ireq *req =
1016 (struct qseecom_continue_blocked_request_ireq *)
1017 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001018 if (qseecom.smcinvoke_support)
1019 smc_id =
1020 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1021 else
1022 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001023 desc.arginfo =
1024 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001025 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001026 ret = scm_call2(smc_id, &desc);
1027 break;
1028 }
1029 default: {
1030 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1031 qseos_cmd_id);
1032 ret = -EINVAL;
1033 break;
1034 }
1035 } /*end of switch (qsee_cmd_id) */
1036 break;
1037 } /*end of case SCM_SVC_TZSCHEDULER*/
1038 default: {
1039 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1040 svc_id);
1041 ret = -EINVAL;
1042 break;
1043 }
1044 } /*end of switch svc_id */
1045 scm_resp->result = desc.ret[0];
1046 scm_resp->resp_type = desc.ret[1];
1047 scm_resp->data = desc.ret[2];
1048 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1049 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1050 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1051 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1052 return ret;
1053}
1054
1055
1056static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1057 size_t cmd_len, void *resp_buf, size_t resp_len)
1058{
1059 if (!is_scm_armv8())
1060 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1061 resp_buf, resp_len);
1062 else
1063 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1064}
1065
1066static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1067 struct qseecom_register_listener_req *svc)
1068{
1069 struct qseecom_registered_listener_list *ptr;
1070 int unique = 1;
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1074 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1075 if (ptr->svc.listener_id == svc->listener_id) {
1076 pr_err("Service id: %u is already registered\n",
1077 ptr->svc.listener_id);
1078 unique = 0;
1079 break;
1080 }
1081 }
1082 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1083 return unique;
1084}
1085
1086static struct qseecom_registered_listener_list *__qseecom_find_svc(
1087 int32_t listener_id)
1088{
1089 struct qseecom_registered_listener_list *entry = NULL;
1090 unsigned long flags;
1091
1092 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1093 list_for_each_entry(entry,
1094 &qseecom.registered_listener_list_head, list) {
1095 if (entry->svc.listener_id == listener_id)
1096 break;
1097 }
1098 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1099
1100 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1101 pr_err("Service id: %u is not found\n", listener_id);
1102 return NULL;
1103 }
1104
1105 return entry;
1106}
1107
1108static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1109 struct qseecom_dev_handle *handle,
1110 struct qseecom_register_listener_req *listener)
1111{
1112 int ret = 0;
1113 struct qseecom_register_listener_ireq req;
1114 struct qseecom_register_listener_64bit_ireq req_64bit;
1115 struct qseecom_command_scm_resp resp;
1116 ion_phys_addr_t pa;
1117 void *cmd_buf = NULL;
1118 size_t cmd_len;
1119
1120 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001121 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001122 listener->ifd_data_fd);
1123 if (IS_ERR_OR_NULL(svc->ihandle)) {
1124 pr_err("Ion client could not retrieve the handle\n");
1125 return -ENOMEM;
1126 }
1127
1128 /* Get the physical address of the ION BUF */
1129 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1130 if (ret) {
1131 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1132 ret);
1133 return ret;
1134 }
1135 /* Populate the structure for sending scm call to load image */
1136 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1137 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1138 pr_err("ION memory mapping for listener shared buffer failed\n");
1139 return -ENOMEM;
1140 }
1141 svc->sb_phys = (phys_addr_t)pa;
1142
1143 if (qseecom.qsee_version < QSEE_VERSION_40) {
1144 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req.listener_id = svc->svc.listener_id;
1146 req.sb_len = svc->sb_length;
1147 req.sb_ptr = (uint32_t)svc->sb_phys;
1148 cmd_buf = (void *)&req;
1149 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1150 } else {
1151 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1152 req_64bit.listener_id = svc->svc.listener_id;
1153 req_64bit.sb_len = svc->sb_length;
1154 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1155 cmd_buf = (void *)&req_64bit;
1156 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1157 }
1158
1159 resp.result = QSEOS_RESULT_INCOMPLETE;
1160
1161 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1162 &resp, sizeof(resp));
1163 if (ret) {
1164 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1165 return -EINVAL;
1166 }
1167
1168 if (resp.result != QSEOS_RESULT_SUCCESS) {
1169 pr_err("Error SB registration req: resp.result = %d\n",
1170 resp.result);
1171 return -EPERM;
1172 }
1173 return 0;
1174}
1175
1176static int qseecom_register_listener(struct qseecom_dev_handle *data,
1177 void __user *argp)
1178{
1179 int ret = 0;
1180 unsigned long flags;
1181 struct qseecom_register_listener_req rcvd_lstnr;
1182 struct qseecom_registered_listener_list *new_entry;
1183
1184 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1185 if (ret) {
1186 pr_err("copy_from_user failed\n");
1187 return ret;
1188 }
1189 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1190 rcvd_lstnr.sb_size))
1191 return -EFAULT;
1192
1193 data->listener.id = 0;
1194 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1195 pr_err("Service is not unique and is already registered\n");
1196 data->released = true;
1197 return -EBUSY;
1198 }
1199
1200 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1201 if (!new_entry)
1202 return -ENOMEM;
1203 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1204 new_entry->rcv_req_flag = 0;
1205
1206 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1207 new_entry->sb_length = rcvd_lstnr.sb_size;
1208 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1209 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1210 pr_err("qseecom_set_sb_memoryfailed\n");
1211 kzfree(new_entry);
1212 return -ENOMEM;
1213 }
1214
1215 data->listener.id = rcvd_lstnr.listener_id;
1216 init_waitqueue_head(&new_entry->rcv_req_wq);
1217 init_waitqueue_head(&new_entry->listener_block_app_wq);
1218 new_entry->send_resp_flag = 0;
1219 new_entry->listener_in_use = false;
1220 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1221 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1222 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1223
1224 return ret;
1225}
1226
1227static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1228{
1229 int ret = 0;
1230 unsigned long flags;
1231 uint32_t unmap_mem = 0;
1232 struct qseecom_register_listener_ireq req;
1233 struct qseecom_registered_listener_list *ptr_svc = NULL;
1234 struct qseecom_command_scm_resp resp;
1235 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1236
1237 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1238 req.listener_id = data->listener.id;
1239 resp.result = QSEOS_RESULT_INCOMPLETE;
1240
1241 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1242 sizeof(req), &resp, sizeof(resp));
1243 if (ret) {
1244 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1245 ret, data->listener.id);
1246 return ret;
1247 }
1248
1249 if (resp.result != QSEOS_RESULT_SUCCESS) {
1250 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1251 resp.result, data->listener.id);
1252 return -EPERM;
1253 }
1254
1255 data->abort = 1;
1256 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1257 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1258 list) {
1259 if (ptr_svc->svc.listener_id == data->listener.id) {
1260 wake_up_all(&ptr_svc->rcv_req_wq);
1261 break;
1262 }
1263 }
1264 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1265
1266 while (atomic_read(&data->ioctl_count) > 1) {
1267 if (wait_event_freezable(data->abort_wq,
1268 atomic_read(&data->ioctl_count) <= 1)) {
1269 pr_err("Interrupted from abort\n");
1270 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001271 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 }
1273 }
1274
1275 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1276 list_for_each_entry(ptr_svc,
1277 &qseecom.registered_listener_list_head, list) {
1278 if (ptr_svc->svc.listener_id == data->listener.id) {
1279 if (ptr_svc->sb_virt) {
1280 unmap_mem = 1;
1281 ihandle = ptr_svc->ihandle;
1282 }
1283 list_del(&ptr_svc->list);
1284 kzfree(ptr_svc);
1285 break;
1286 }
1287 }
1288 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1289
1290 /* Unmap the memory */
1291 if (unmap_mem) {
1292 if (!IS_ERR_OR_NULL(ihandle)) {
1293 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1294 ion_free(qseecom.ion_clnt, ihandle);
1295 }
1296 }
1297 data->released = true;
1298 return ret;
1299}
1300
1301static int __qseecom_set_msm_bus_request(uint32_t mode)
1302{
1303 int ret = 0;
1304 struct qseecom_clk *qclk;
1305
1306 qclk = &qseecom.qsee;
1307 if (qclk->ce_core_src_clk != NULL) {
1308 if (mode == INACTIVE) {
1309 __qseecom_disable_clk(CLK_QSEE);
1310 } else {
1311 ret = __qseecom_enable_clk(CLK_QSEE);
1312 if (ret)
1313 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1314 ret, mode);
1315 }
1316 }
1317
1318 if ((!ret) && (qseecom.current_mode != mode)) {
1319 ret = msm_bus_scale_client_update_request(
1320 qseecom.qsee_perf_client, mode);
1321 if (ret) {
1322 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1323 ret, mode);
1324 if (qclk->ce_core_src_clk != NULL) {
1325 if (mode == INACTIVE) {
1326 ret = __qseecom_enable_clk(CLK_QSEE);
1327 if (ret)
1328 pr_err("CLK enable failed\n");
1329 } else
1330 __qseecom_disable_clk(CLK_QSEE);
1331 }
1332 }
1333 qseecom.current_mode = mode;
1334 }
1335 return ret;
1336}
1337
1338static void qseecom_bw_inactive_req_work(struct work_struct *work)
1339{
1340 mutex_lock(&app_access_lock);
1341 mutex_lock(&qsee_bw_mutex);
1342 if (qseecom.timer_running)
1343 __qseecom_set_msm_bus_request(INACTIVE);
1344 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1345 qseecom.current_mode, qseecom.cumulative_mode);
1346 qseecom.timer_running = false;
1347 mutex_unlock(&qsee_bw_mutex);
1348 mutex_unlock(&app_access_lock);
1349}
1350
1351static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1352{
1353 schedule_work(&qseecom.bw_inactive_req_ws);
1354}
1355
1356static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1357{
1358 struct qseecom_clk *qclk;
1359 int ret = 0;
1360
1361 mutex_lock(&clk_access_lock);
1362 if (ce == CLK_QSEE)
1363 qclk = &qseecom.qsee;
1364 else
1365 qclk = &qseecom.ce_drv;
1366
1367 if (qclk->clk_access_cnt > 2) {
1368 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1369 ret = -EINVAL;
1370 goto err_dec_ref_cnt;
1371 }
1372 if (qclk->clk_access_cnt == 2)
1373 qclk->clk_access_cnt--;
1374
1375err_dec_ref_cnt:
1376 mutex_unlock(&clk_access_lock);
1377 return ret;
1378}
1379
1380
1381static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1382{
1383 int32_t ret = 0;
1384 int32_t request_mode = INACTIVE;
1385
1386 mutex_lock(&qsee_bw_mutex);
1387 if (mode == 0) {
1388 if (qseecom.cumulative_mode > MEDIUM)
1389 request_mode = HIGH;
1390 else
1391 request_mode = qseecom.cumulative_mode;
1392 } else {
1393 request_mode = mode;
1394 }
1395
1396 ret = __qseecom_set_msm_bus_request(request_mode);
1397 if (ret) {
1398 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1399 ret, request_mode);
1400 goto err_scale_timer;
1401 }
1402
1403 if (qseecom.timer_running) {
1404 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1405 if (ret) {
1406 pr_err("Failed to decrease clk ref count.\n");
1407 goto err_scale_timer;
1408 }
1409 del_timer_sync(&(qseecom.bw_scale_down_timer));
1410 qseecom.timer_running = false;
1411 }
1412err_scale_timer:
1413 mutex_unlock(&qsee_bw_mutex);
1414 return ret;
1415}
1416
1417
1418static int qseecom_unregister_bus_bandwidth_needs(
1419 struct qseecom_dev_handle *data)
1420{
1421 int32_t ret = 0;
1422
1423 qseecom.cumulative_mode -= data->mode;
1424 data->mode = INACTIVE;
1425
1426 return ret;
1427}
1428
1429static int __qseecom_register_bus_bandwidth_needs(
1430 struct qseecom_dev_handle *data, uint32_t request_mode)
1431{
1432 int32_t ret = 0;
1433
1434 if (data->mode == INACTIVE) {
1435 qseecom.cumulative_mode += request_mode;
1436 data->mode = request_mode;
1437 } else {
1438 if (data->mode != request_mode) {
1439 qseecom.cumulative_mode -= data->mode;
1440 qseecom.cumulative_mode += request_mode;
1441 data->mode = request_mode;
1442 }
1443 }
1444 return ret;
1445}
1446
1447static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1448{
1449 int ret = 0;
1450
1451 ret = qsee_vote_for_clock(data, CLK_DFAB);
1452 if (ret) {
1453 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1454 goto perf_enable_exit;
1455 }
1456 ret = qsee_vote_for_clock(data, CLK_SFPB);
1457 if (ret) {
1458 qsee_disable_clock_vote(data, CLK_DFAB);
1459 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1460 goto perf_enable_exit;
1461 }
1462
1463perf_enable_exit:
1464 return ret;
1465}
1466
1467static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1468 void __user *argp)
1469{
1470 int32_t ret = 0;
1471 int32_t req_mode;
1472
1473 if (qseecom.no_clock_support)
1474 return 0;
1475
1476 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1477 if (ret) {
1478 pr_err("copy_from_user failed\n");
1479 return ret;
1480 }
1481 if (req_mode > HIGH) {
1482 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1483 return -EINVAL;
1484 }
1485
1486 /*
1487 * Register bus bandwidth needs if bus scaling feature is enabled;
1488 * otherwise, qseecom enable/disable clocks for the client directly.
1489 */
1490 if (qseecom.support_bus_scaling) {
1491 mutex_lock(&qsee_bw_mutex);
1492 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1493 mutex_unlock(&qsee_bw_mutex);
1494 } else {
1495 pr_debug("Bus scaling feature is NOT enabled\n");
1496 pr_debug("request bandwidth mode %d for the client\n",
1497 req_mode);
1498 if (req_mode != INACTIVE) {
1499 ret = qseecom_perf_enable(data);
1500 if (ret)
1501 pr_err("Failed to vote for clock with err %d\n",
1502 ret);
1503 } else {
1504 qsee_disable_clock_vote(data, CLK_DFAB);
1505 qsee_disable_clock_vote(data, CLK_SFPB);
1506 }
1507 }
1508 return ret;
1509}
1510
1511static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1512{
1513 if (qseecom.no_clock_support)
1514 return;
1515
1516 mutex_lock(&qsee_bw_mutex);
1517 qseecom.bw_scale_down_timer.expires = jiffies +
1518 msecs_to_jiffies(duration);
1519 mod_timer(&(qseecom.bw_scale_down_timer),
1520 qseecom.bw_scale_down_timer.expires);
1521 qseecom.timer_running = true;
1522 mutex_unlock(&qsee_bw_mutex);
1523}
1524
1525static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1526{
1527 if (!qseecom.support_bus_scaling)
1528 qsee_disable_clock_vote(data, CLK_SFPB);
1529 else
1530 __qseecom_add_bw_scale_down_timer(
1531 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1532}
1533
1534static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1535{
1536 int ret = 0;
1537
1538 if (qseecom.support_bus_scaling) {
1539 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1540 if (ret)
1541 pr_err("Failed to set bw MEDIUM.\n");
1542 } else {
1543 ret = qsee_vote_for_clock(data, CLK_SFPB);
1544 if (ret)
1545 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1546 }
1547 return ret;
1548}
1549
1550static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1551 void __user *argp)
1552{
1553 ion_phys_addr_t pa;
1554 int32_t ret;
1555 struct qseecom_set_sb_mem_param_req req;
1556 size_t len;
1557
1558 /* Copy the relevant information needed for loading the image */
1559 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1560 return -EFAULT;
1561
1562 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1563 (req.sb_len == 0)) {
1564 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1565 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1566 return -EFAULT;
1567 }
1568 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1569 req.sb_len))
1570 return -EFAULT;
1571
1572 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001573 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001574 req.ifd_data_fd);
1575 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1576 pr_err("Ion client could not retrieve the handle\n");
1577 return -ENOMEM;
1578 }
1579 /* Get the physical address of the ION BUF */
1580 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1581 if (ret) {
1582
1583 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1584 ret);
1585 return ret;
1586 }
1587
1588 if (len < req.sb_len) {
1589 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1590 req.sb_len, len);
1591 return -EINVAL;
1592 }
1593 /* Populate the structure for sending scm call to load image */
1594 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1595 data->client.ihandle);
1596 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1597 pr_err("ION memory mapping for client shared buf failed\n");
1598 return -ENOMEM;
1599 }
1600 data->client.sb_phys = (phys_addr_t)pa;
1601 data->client.sb_length = req.sb_len;
1602 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1603 return 0;
1604}
1605
1606static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1607{
1608 int ret;
1609
1610 ret = (qseecom.send_resp_flag != 0);
1611 return ret || data->abort;
1612}
1613
1614static int __qseecom_reentrancy_listener_has_sent_rsp(
1615 struct qseecom_dev_handle *data,
1616 struct qseecom_registered_listener_list *ptr_svc)
1617{
1618 int ret;
1619
1620 ret = (ptr_svc->send_resp_flag != 0);
1621 return ret || data->abort;
1622}
1623
1624static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1625 struct qseecom_command_scm_resp *resp,
1626 struct qseecom_client_listener_data_irsp *send_data_rsp,
1627 struct qseecom_registered_listener_list *ptr_svc,
1628 uint32_t lstnr) {
1629 int ret = 0;
1630
1631 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1632 qseecom.send_resp_flag = 0;
1633 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1634 send_data_rsp->listener_id = lstnr;
1635 if (ptr_svc)
1636 pr_warn("listener_id:%x, lstnr: %x\n",
1637 ptr_svc->svc.listener_id, lstnr);
1638 if (ptr_svc && ptr_svc->ihandle) {
1639 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1640 ptr_svc->sb_virt, ptr_svc->sb_length,
1641 ION_IOC_CLEAN_INV_CACHES);
1642 if (ret) {
1643 pr_err("cache operation failed %d\n", ret);
1644 return ret;
1645 }
1646 }
1647
1648 if (lstnr == RPMB_SERVICE) {
1649 ret = __qseecom_enable_clk(CLK_QSEE);
1650 if (ret)
1651 return ret;
1652 }
1653 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1654 sizeof(send_data_rsp), resp, sizeof(*resp));
1655 if (ret) {
1656 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1657 ret, data->client.app_id);
1658 if (lstnr == RPMB_SERVICE)
1659 __qseecom_disable_clk(CLK_QSEE);
1660 return ret;
1661 }
1662 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1663 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1664 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1665 resp->result, data->client.app_id, lstnr);
1666 ret = -EINVAL;
1667 }
1668 if (lstnr == RPMB_SERVICE)
1669 __qseecom_disable_clk(CLK_QSEE);
1670 return ret;
1671}
1672
1673static void __qseecom_clean_listener_sglistinfo(
1674 struct qseecom_registered_listener_list *ptr_svc)
1675{
1676 if (ptr_svc->sglist_cnt) {
1677 memset(ptr_svc->sglistinfo_ptr, 0,
1678 SGLISTINFO_TABLE_SIZE);
1679 ptr_svc->sglist_cnt = 0;
1680 }
1681}
1682
1683static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1684 struct qseecom_command_scm_resp *resp)
1685{
1686 int ret = 0;
1687 int rc = 0;
1688 uint32_t lstnr;
1689 unsigned long flags;
1690 struct qseecom_client_listener_data_irsp send_data_rsp;
1691 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1692 struct qseecom_registered_listener_list *ptr_svc = NULL;
1693 sigset_t new_sigset;
1694 sigset_t old_sigset;
1695 uint32_t status;
1696 void *cmd_buf = NULL;
1697 size_t cmd_len;
1698 struct sglist_info *table = NULL;
1699
1700 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1701 lstnr = resp->data;
1702 /*
1703 * Wake up blocking lsitener service with the lstnr id
1704 */
1705 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1706 flags);
1707 list_for_each_entry(ptr_svc,
1708 &qseecom.registered_listener_list_head, list) {
1709 if (ptr_svc->svc.listener_id == lstnr) {
1710 ptr_svc->listener_in_use = true;
1711 ptr_svc->rcv_req_flag = 1;
1712 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1713 break;
1714 }
1715 }
1716 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1717 flags);
1718
1719 if (ptr_svc == NULL) {
1720 pr_err("Listener Svc %d does not exist\n", lstnr);
1721 __qseecom_qseos_fail_return_resp_tz(data, resp,
1722 &send_data_rsp, ptr_svc, lstnr);
1723 return -EINVAL;
1724 }
1725
1726 if (!ptr_svc->ihandle) {
1727 pr_err("Client handle is not initialized\n");
1728 __qseecom_qseos_fail_return_resp_tz(data, resp,
1729 &send_data_rsp, ptr_svc, lstnr);
1730 return -EINVAL;
1731 }
1732
1733 if (ptr_svc->svc.listener_id != lstnr) {
1734 pr_warn("Service requested does not exist\n");
1735 __qseecom_qseos_fail_return_resp_tz(data, resp,
1736 &send_data_rsp, ptr_svc, lstnr);
1737 return -ERESTARTSYS;
1738 }
1739 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1740
1741 /* initialize the new signal mask with all signals*/
1742 sigfillset(&new_sigset);
1743 /* block all signals */
1744 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1745
1746 do {
1747 /*
1748 * When reentrancy is not supported, check global
1749 * send_resp_flag; otherwise, check this listener's
1750 * send_resp_flag.
1751 */
1752 if (!qseecom.qsee_reentrancy_support &&
1753 !wait_event_freezable(qseecom.send_resp_wq,
1754 __qseecom_listener_has_sent_rsp(data))) {
1755 break;
1756 }
1757
1758 if (qseecom.qsee_reentrancy_support &&
1759 !wait_event_freezable(qseecom.send_resp_wq,
1760 __qseecom_reentrancy_listener_has_sent_rsp(
1761 data, ptr_svc))) {
1762 break;
1763 }
1764 } while (1);
1765
1766 /* restore signal mask */
1767 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1768 if (data->abort) {
1769 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1770 data->client.app_id, lstnr, ret);
1771 rc = -ENODEV;
1772 status = QSEOS_RESULT_FAILURE;
1773 } else {
1774 status = QSEOS_RESULT_SUCCESS;
1775 }
1776
1777 qseecom.send_resp_flag = 0;
1778 ptr_svc->send_resp_flag = 0;
1779 table = ptr_svc->sglistinfo_ptr;
1780 if (qseecom.qsee_version < QSEE_VERSION_40) {
1781 send_data_rsp.listener_id = lstnr;
1782 send_data_rsp.status = status;
1783 send_data_rsp.sglistinfo_ptr =
1784 (uint32_t)virt_to_phys(table);
1785 send_data_rsp.sglistinfo_len =
1786 SGLISTINFO_TABLE_SIZE;
1787 dmac_flush_range((void *)table,
1788 (void *)table + SGLISTINFO_TABLE_SIZE);
1789 cmd_buf = (void *)&send_data_rsp;
1790 cmd_len = sizeof(send_data_rsp);
1791 } else {
1792 send_data_rsp_64bit.listener_id = lstnr;
1793 send_data_rsp_64bit.status = status;
1794 send_data_rsp_64bit.sglistinfo_ptr =
1795 virt_to_phys(table);
1796 send_data_rsp_64bit.sglistinfo_len =
1797 SGLISTINFO_TABLE_SIZE;
1798 dmac_flush_range((void *)table,
1799 (void *)table + SGLISTINFO_TABLE_SIZE);
1800 cmd_buf = (void *)&send_data_rsp_64bit;
1801 cmd_len = sizeof(send_data_rsp_64bit);
1802 }
1803 if (qseecom.whitelist_support == false)
1804 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1805 else
1806 *(uint32_t *)cmd_buf =
1807 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1808 if (ptr_svc) {
1809 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1810 ptr_svc->ihandle,
1811 ptr_svc->sb_virt, ptr_svc->sb_length,
1812 ION_IOC_CLEAN_INV_CACHES);
1813 if (ret) {
1814 pr_err("cache operation failed %d\n", ret);
1815 return ret;
1816 }
1817 }
1818
1819 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1820 ret = __qseecom_enable_clk(CLK_QSEE);
1821 if (ret)
1822 return ret;
1823 }
1824
1825 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1826 cmd_buf, cmd_len, resp, sizeof(*resp));
1827 ptr_svc->listener_in_use = false;
1828 __qseecom_clean_listener_sglistinfo(ptr_svc);
1829 if (ret) {
1830 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1831 ret, data->client.app_id);
1832 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1833 __qseecom_disable_clk(CLK_QSEE);
1834 return ret;
1835 }
1836 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1837 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1838 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1839 resp->result, data->client.app_id, lstnr);
1840 ret = -EINVAL;
1841 }
1842 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1843 __qseecom_disable_clk(CLK_QSEE);
1844
1845 }
1846 if (rc)
1847 return rc;
1848
1849 return ret;
1850}
1851
Zhen Kong2f60f492017-06-29 15:22:14 -07001852static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001853 struct qseecom_command_scm_resp *resp,
1854 struct qseecom_registered_app_list *ptr_app,
1855 struct qseecom_dev_handle *data)
1856{
1857 struct qseecom_registered_listener_list *list_ptr;
1858 int ret = 0;
1859 struct qseecom_continue_blocked_request_ireq ireq;
1860 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001862 unsigned long flags;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001863
1864 if (!resp || !data) {
1865 pr_err("invalid resp or data pointer\n");
1866 ret = -EINVAL;
1867 goto exit;
1868 }
1869
1870 /* find app_id & img_name from list */
1871 if (!ptr_app) {
1872 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1873 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1874 list) {
1875 if ((ptr_app->app_id == data->client.app_id) &&
1876 (!strcmp(ptr_app->app_name,
1877 data->client.app_name))) {
1878 found_app = true;
1879 break;
1880 }
1881 }
1882 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1883 flags);
1884 if (!found_app) {
1885 pr_err("app_id %d (%s) is not found\n",
1886 data->client.app_id,
1887 (char *)data->client.app_name);
1888 ret = -ENOENT;
1889 goto exit;
1890 }
1891 }
1892
1893 list_ptr = __qseecom_find_svc(resp->data);
1894 if (!list_ptr) {
1895 pr_err("Invalid listener ID\n");
1896 ret = -ENODATA;
1897 goto exit;
1898 }
1899 pr_debug("lsntr %d in_use = %d\n",
1900 resp->data, list_ptr->listener_in_use);
1901 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001902
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001903 /* sleep until listener is available */
Zhen Kong2f60f492017-06-29 15:22:14 -07001904 qseecom.app_block_ref_cnt++;
1905 ptr_app->app_blocked = true;
1906 mutex_unlock(&app_access_lock);
1907 if (wait_event_freezable(
1908 list_ptr->listener_block_app_wq,
1909 !list_ptr->listener_in_use)) {
1910 pr_err("Interrupted: listener_id %d, app_id %d\n",
1911 resp->data, ptr_app->app_id);
1912 ret = -ERESTARTSYS;
1913 goto exit;
1914 }
1915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001919 ptr_app->blocked_on_listener_id = 0;
1920 /* notify the blocked app that listener is available */
1921 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1922 resp->data, data->client.app_id,
1923 data->client.app_name);
1924 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001925 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 &ireq, sizeof(ireq),
1928 &continue_resp, sizeof(continue_resp));
1929 if (ret) {
1930 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1931 data->client.app_id,
1932 data->client.app_name, ret);
1933 goto exit;
1934 }
1935 /*
1936 * After TZ app is unblocked, then continue to next case
1937 * for incomplete request processing
1938 */
1939 resp->result = QSEOS_RESULT_INCOMPLETE;
1940exit:
1941 return ret;
1942}
1943
Zhen Kong2f60f492017-06-29 15:22:14 -07001944static int __qseecom_process_blocked_on_listener_smcinvoke(
1945 struct qseecom_command_scm_resp *resp)
1946{
1947 struct qseecom_registered_listener_list *list_ptr;
1948 int ret = 0;
1949 struct qseecom_continue_blocked_request_ireq ireq;
1950 struct qseecom_command_scm_resp continue_resp;
1951 unsigned int session_id;
1952
1953 if (!resp) {
1954 pr_err("invalid resp pointer\n");
1955 ret = -EINVAL;
1956 goto exit;
1957 }
1958 session_id = resp->resp_type;
1959 list_ptr = __qseecom_find_svc(resp->data);
1960 if (!list_ptr) {
1961 pr_err("Invalid listener ID\n");
1962 ret = -ENODATA;
1963 goto exit;
1964 }
1965 pr_debug("lsntr %d in_use = %d\n",
1966 resp->data, list_ptr->listener_in_use);
1967 /* sleep until listener is available */
1968 qseecom.app_block_ref_cnt++;
1969 mutex_unlock(&app_access_lock);
1970 if (wait_event_freezable(
1971 list_ptr->listener_block_app_wq,
1972 !list_ptr->listener_in_use)) {
1973 pr_err("Interrupted: listener_id %d, session_id %d\n",
1974 resp->data, session_id);
1975 ret = -ERESTARTSYS;
1976 goto exit;
1977 }
1978 mutex_lock(&app_access_lock);
1979 qseecom.app_block_ref_cnt--;
1980
1981 /* notify TZ that listener is available */
1982 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1983 resp->data, session_id);
1984 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1985 ireq.app_or_session_id = session_id;
1986 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1987 &ireq, sizeof(ireq),
1988 &continue_resp, sizeof(continue_resp));
1989 if (ret) {
1990 pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
1991 session_id, ret);
1992 goto exit;
1993 }
1994 resp->result = QSEOS_RESULT_INCOMPLETE;
1995exit:
1996 return ret;
1997}
1998
1999static int __qseecom_process_reentrancy_blocked_on_listener(
2000 struct qseecom_command_scm_resp *resp,
2001 struct qseecom_registered_app_list *ptr_app,
2002 struct qseecom_dev_handle *data)
2003{
2004 if (!qseecom.smcinvoke_support)
2005 return __qseecom_process_blocked_on_listener_legacy(
2006 resp, ptr_app, data);
2007 else
2008 return __qseecom_process_blocked_on_listener_smcinvoke(
2009 resp);
2010}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002011static int __qseecom_reentrancy_process_incomplete_cmd(
2012 struct qseecom_dev_handle *data,
2013 struct qseecom_command_scm_resp *resp)
2014{
2015 int ret = 0;
2016 int rc = 0;
2017 uint32_t lstnr;
2018 unsigned long flags;
2019 struct qseecom_client_listener_data_irsp send_data_rsp;
2020 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2021 struct qseecom_registered_listener_list *ptr_svc = NULL;
2022 sigset_t new_sigset;
2023 sigset_t old_sigset;
2024 uint32_t status;
2025 void *cmd_buf = NULL;
2026 size_t cmd_len;
2027 struct sglist_info *table = NULL;
2028
2029 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2030 lstnr = resp->data;
2031 /*
2032 * Wake up blocking lsitener service with the lstnr id
2033 */
2034 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2035 flags);
2036 list_for_each_entry(ptr_svc,
2037 &qseecom.registered_listener_list_head, list) {
2038 if (ptr_svc->svc.listener_id == lstnr) {
2039 ptr_svc->listener_in_use = true;
2040 ptr_svc->rcv_req_flag = 1;
2041 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2042 break;
2043 }
2044 }
2045 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2046 flags);
2047
2048 if (ptr_svc == NULL) {
2049 pr_err("Listener Svc %d does not exist\n", lstnr);
2050 return -EINVAL;
2051 }
2052
2053 if (!ptr_svc->ihandle) {
2054 pr_err("Client handle is not initialized\n");
2055 return -EINVAL;
2056 }
2057
2058 if (ptr_svc->svc.listener_id != lstnr) {
2059 pr_warn("Service requested does not exist\n");
2060 return -ERESTARTSYS;
2061 }
2062 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2063
2064 /* initialize the new signal mask with all signals*/
2065 sigfillset(&new_sigset);
2066
2067 /* block all signals */
2068 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2069
2070 /* unlock mutex btw waking listener and sleep-wait */
2071 mutex_unlock(&app_access_lock);
2072 do {
2073 if (!wait_event_freezable(qseecom.send_resp_wq,
2074 __qseecom_reentrancy_listener_has_sent_rsp(
2075 data, ptr_svc))) {
2076 break;
2077 }
2078 } while (1);
2079 /* lock mutex again after resp sent */
2080 mutex_lock(&app_access_lock);
2081 ptr_svc->send_resp_flag = 0;
2082 qseecom.send_resp_flag = 0;
2083
2084 /* restore signal mask */
2085 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2086 if (data->abort) {
2087 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2088 data->client.app_id, lstnr, ret);
2089 rc = -ENODEV;
2090 status = QSEOS_RESULT_FAILURE;
2091 } else {
2092 status = QSEOS_RESULT_SUCCESS;
2093 }
2094 table = ptr_svc->sglistinfo_ptr;
2095 if (qseecom.qsee_version < QSEE_VERSION_40) {
2096 send_data_rsp.listener_id = lstnr;
2097 send_data_rsp.status = status;
2098 send_data_rsp.sglistinfo_ptr =
2099 (uint32_t)virt_to_phys(table);
2100 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2101 dmac_flush_range((void *)table,
2102 (void *)table + SGLISTINFO_TABLE_SIZE);
2103 cmd_buf = (void *)&send_data_rsp;
2104 cmd_len = sizeof(send_data_rsp);
2105 } else {
2106 send_data_rsp_64bit.listener_id = lstnr;
2107 send_data_rsp_64bit.status = status;
2108 send_data_rsp_64bit.sglistinfo_ptr =
2109 virt_to_phys(table);
2110 send_data_rsp_64bit.sglistinfo_len =
2111 SGLISTINFO_TABLE_SIZE;
2112 dmac_flush_range((void *)table,
2113 (void *)table + SGLISTINFO_TABLE_SIZE);
2114 cmd_buf = (void *)&send_data_rsp_64bit;
2115 cmd_len = sizeof(send_data_rsp_64bit);
2116 }
2117 if (qseecom.whitelist_support == false)
2118 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2119 else
2120 *(uint32_t *)cmd_buf =
2121 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2122 if (ptr_svc) {
2123 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2124 ptr_svc->ihandle,
2125 ptr_svc->sb_virt, ptr_svc->sb_length,
2126 ION_IOC_CLEAN_INV_CACHES);
2127 if (ret) {
2128 pr_err("cache operation failed %d\n", ret);
2129 return ret;
2130 }
2131 }
2132 if (lstnr == RPMB_SERVICE) {
2133 ret = __qseecom_enable_clk(CLK_QSEE);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2139 cmd_buf, cmd_len, resp, sizeof(*resp));
2140 ptr_svc->listener_in_use = false;
2141 __qseecom_clean_listener_sglistinfo(ptr_svc);
2142 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2143
2144 if (ret) {
2145 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2146 ret, data->client.app_id);
2147 goto exit;
2148 }
2149
2150 switch (resp->result) {
2151 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2152 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2153 lstnr, data->client.app_id, resp->data);
2154 if (lstnr == resp->data) {
2155 pr_err("lstnr %d should not be blocked!\n",
2156 lstnr);
2157 ret = -EINVAL;
2158 goto exit;
2159 }
2160 ret = __qseecom_process_reentrancy_blocked_on_listener(
2161 resp, NULL, data);
2162 if (ret) {
2163 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2164 data->client.app_id,
2165 data->client.app_name, resp->data);
2166 goto exit;
2167 }
2168 case QSEOS_RESULT_SUCCESS:
2169 case QSEOS_RESULT_INCOMPLETE:
2170 break;
2171 default:
2172 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2173 resp->result, data->client.app_id, lstnr);
2174 ret = -EINVAL;
2175 goto exit;
2176 }
2177exit:
2178 if (lstnr == RPMB_SERVICE)
2179 __qseecom_disable_clk(CLK_QSEE);
2180
2181 }
2182 if (rc)
2183 return rc;
2184
2185 return ret;
2186}
2187
2188/*
2189 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2190 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2191 * So, needs to first check if no app blocked before sending OS level scm call,
2192 * then wait until all apps are unblocked.
2193 */
2194static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2195{
2196 sigset_t new_sigset, old_sigset;
2197
2198 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2199 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2200 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2201 /* thread sleep until this app unblocked */
2202 while (qseecom.app_block_ref_cnt > 0) {
2203 sigfillset(&new_sigset);
2204 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2205 mutex_unlock(&app_access_lock);
2206 do {
2207 if (!wait_event_freezable(qseecom.app_block_wq,
2208 (qseecom.app_block_ref_cnt == 0)))
2209 break;
2210 } while (1);
2211 mutex_lock(&app_access_lock);
2212 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2213 }
2214 }
2215}
2216
2217/*
2218 * scm_call of send data will fail if this TA is blocked or there are more
2219 * than one TA requesting listener services; So, first check to see if need
2220 * to wait.
2221 */
2222static void __qseecom_reentrancy_check_if_this_app_blocked(
2223 struct qseecom_registered_app_list *ptr_app)
2224{
2225 sigset_t new_sigset, old_sigset;
2226
2227 if (qseecom.qsee_reentrancy_support) {
2228 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2229 /* thread sleep until this app unblocked */
2230 sigfillset(&new_sigset);
2231 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2232 mutex_unlock(&app_access_lock);
2233 do {
2234 if (!wait_event_freezable(qseecom.app_block_wq,
2235 (!ptr_app->app_blocked &&
2236 qseecom.app_block_ref_cnt <= 1)))
2237 break;
2238 } while (1);
2239 mutex_lock(&app_access_lock);
2240 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2241 }
2242 }
2243}
2244
2245static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2246 uint32_t *app_id)
2247{
2248 int32_t ret;
2249 struct qseecom_command_scm_resp resp;
2250 bool found_app = false;
2251 struct qseecom_registered_app_list *entry = NULL;
2252 unsigned long flags = 0;
2253
2254 if (!app_id) {
2255 pr_err("Null pointer to app_id\n");
2256 return -EINVAL;
2257 }
2258 *app_id = 0;
2259
2260 /* check if app exists and has been registered locally */
2261 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2262 list_for_each_entry(entry,
2263 &qseecom.registered_app_list_head, list) {
2264 if (!strcmp(entry->app_name, req.app_name)) {
2265 found_app = true;
2266 break;
2267 }
2268 }
2269 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2270 if (found_app) {
2271 pr_debug("Found app with id %d\n", entry->app_id);
2272 *app_id = entry->app_id;
2273 return 0;
2274 }
2275
2276 memset((void *)&resp, 0, sizeof(resp));
2277
2278 /* SCM_CALL to check if app_id for the mentioned app exists */
2279 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2280 sizeof(struct qseecom_check_app_ireq),
2281 &resp, sizeof(resp));
2282 if (ret) {
2283 pr_err("scm_call to check if app is already loaded failed\n");
2284 return -EINVAL;
2285 }
2286
2287 if (resp.result == QSEOS_RESULT_FAILURE)
2288 return 0;
2289
2290 switch (resp.resp_type) {
2291 /*qsee returned listener type response */
2292 case QSEOS_LISTENER_ID:
2293 pr_err("resp type is of listener type instead of app");
2294 return -EINVAL;
2295 case QSEOS_APP_ID:
2296 *app_id = resp.data;
2297 return 0;
2298 default:
2299 pr_err("invalid resp type (%d) from qsee",
2300 resp.resp_type);
2301 return -ENODEV;
2302 }
2303}
2304
2305static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2306{
2307 struct qseecom_registered_app_list *entry = NULL;
2308 unsigned long flags = 0;
2309 u32 app_id = 0;
2310 struct ion_handle *ihandle; /* Ion handle */
2311 struct qseecom_load_img_req load_img_req;
2312 int32_t ret = 0;
2313 ion_phys_addr_t pa = 0;
2314 size_t len;
2315 struct qseecom_command_scm_resp resp;
2316 struct qseecom_check_app_ireq req;
2317 struct qseecom_load_app_ireq load_req;
2318 struct qseecom_load_app_64bit_ireq load_req_64bit;
2319 void *cmd_buf = NULL;
2320 size_t cmd_len;
2321 bool first_time = false;
2322
2323 /* Copy the relevant information needed for loading the image */
2324 if (copy_from_user(&load_img_req,
2325 (void __user *)argp,
2326 sizeof(struct qseecom_load_img_req))) {
2327 pr_err("copy_from_user failed\n");
2328 return -EFAULT;
2329 }
2330
2331 /* Check and load cmnlib */
2332 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2333 if (!qseecom.commonlib_loaded &&
2334 load_img_req.app_arch == ELFCLASS32) {
2335 ret = qseecom_load_commonlib_image(data, "cmnlib");
2336 if (ret) {
2337 pr_err("failed to load cmnlib\n");
2338 return -EIO;
2339 }
2340 qseecom.commonlib_loaded = true;
2341 pr_debug("cmnlib is loaded\n");
2342 }
2343
2344 if (!qseecom.commonlib64_loaded &&
2345 load_img_req.app_arch == ELFCLASS64) {
2346 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2347 if (ret) {
2348 pr_err("failed to load cmnlib64\n");
2349 return -EIO;
2350 }
2351 qseecom.commonlib64_loaded = true;
2352 pr_debug("cmnlib64 is loaded\n");
2353 }
2354 }
2355
2356 if (qseecom.support_bus_scaling) {
2357 mutex_lock(&qsee_bw_mutex);
2358 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2359 mutex_unlock(&qsee_bw_mutex);
2360 if (ret)
2361 return ret;
2362 }
2363
2364 /* Vote for the SFPB clock */
2365 ret = __qseecom_enable_clk_scale_up(data);
2366 if (ret)
2367 goto enable_clk_err;
2368
2369 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2370 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2371 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2372
2373 ret = __qseecom_check_app_exists(req, &app_id);
2374 if (ret < 0)
2375 goto loadapp_err;
2376
2377 if (app_id) {
2378 pr_debug("App id %d (%s) already exists\n", app_id,
2379 (char *)(req.app_name));
2380 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2381 list_for_each_entry(entry,
2382 &qseecom.registered_app_list_head, list){
2383 if (entry->app_id == app_id) {
2384 entry->ref_cnt++;
2385 break;
2386 }
2387 }
2388 spin_unlock_irqrestore(
2389 &qseecom.registered_app_list_lock, flags);
2390 ret = 0;
2391 } else {
2392 first_time = true;
2393 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2394 (char *)(load_img_req.img_name));
2395 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002396 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002397 load_img_req.ifd_data_fd);
2398 if (IS_ERR_OR_NULL(ihandle)) {
2399 pr_err("Ion client could not retrieve the handle\n");
2400 ret = -ENOMEM;
2401 goto loadapp_err;
2402 }
2403
2404 /* Get the physical address of the ION BUF */
2405 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2406 if (ret) {
2407 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2408 ret);
2409 goto loadapp_err;
2410 }
2411 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2412 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2413 len, load_img_req.mdt_len,
2414 load_img_req.img_len);
2415 ret = -EINVAL;
2416 goto loadapp_err;
2417 }
2418 /* Populate the structure for sending scm call to load image */
2419 if (qseecom.qsee_version < QSEE_VERSION_40) {
2420 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2421 load_req.mdt_len = load_img_req.mdt_len;
2422 load_req.img_len = load_img_req.img_len;
2423 strlcpy(load_req.app_name, load_img_req.img_name,
2424 MAX_APP_NAME_SIZE);
2425 load_req.phy_addr = (uint32_t)pa;
2426 cmd_buf = (void *)&load_req;
2427 cmd_len = sizeof(struct qseecom_load_app_ireq);
2428 } else {
2429 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2430 load_req_64bit.mdt_len = load_img_req.mdt_len;
2431 load_req_64bit.img_len = load_img_req.img_len;
2432 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2433 MAX_APP_NAME_SIZE);
2434 load_req_64bit.phy_addr = (uint64_t)pa;
2435 cmd_buf = (void *)&load_req_64bit;
2436 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2437 }
2438
2439 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2440 ION_IOC_CLEAN_INV_CACHES);
2441 if (ret) {
2442 pr_err("cache operation failed %d\n", ret);
2443 goto loadapp_err;
2444 }
2445
2446 /* SCM_CALL to load the app and get the app_id back */
2447 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2448 cmd_len, &resp, sizeof(resp));
2449 if (ret) {
2450 pr_err("scm_call to load app failed\n");
2451 if (!IS_ERR_OR_NULL(ihandle))
2452 ion_free(qseecom.ion_clnt, ihandle);
2453 ret = -EINVAL;
2454 goto loadapp_err;
2455 }
2456
2457 if (resp.result == QSEOS_RESULT_FAILURE) {
2458 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2459 if (!IS_ERR_OR_NULL(ihandle))
2460 ion_free(qseecom.ion_clnt, ihandle);
2461 ret = -EFAULT;
2462 goto loadapp_err;
2463 }
2464
2465 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2466 ret = __qseecom_process_incomplete_cmd(data, &resp);
2467 if (ret) {
2468 pr_err("process_incomplete_cmd failed err: %d\n",
2469 ret);
2470 if (!IS_ERR_OR_NULL(ihandle))
2471 ion_free(qseecom.ion_clnt, ihandle);
2472 ret = -EFAULT;
2473 goto loadapp_err;
2474 }
2475 }
2476
2477 if (resp.result != QSEOS_RESULT_SUCCESS) {
2478 pr_err("scm_call failed resp.result unknown, %d\n",
2479 resp.result);
2480 if (!IS_ERR_OR_NULL(ihandle))
2481 ion_free(qseecom.ion_clnt, ihandle);
2482 ret = -EFAULT;
2483 goto loadapp_err;
2484 }
2485
2486 app_id = resp.data;
2487
2488 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2489 if (!entry) {
2490 ret = -ENOMEM;
2491 goto loadapp_err;
2492 }
2493 entry->app_id = app_id;
2494 entry->ref_cnt = 1;
2495 entry->app_arch = load_img_req.app_arch;
2496 /*
2497 * keymaster app may be first loaded as "keymaste" by qseecomd,
2498 * and then used as "keymaster" on some targets. To avoid app
2499 * name checking error, register "keymaster" into app_list and
2500 * thread private data.
2501 */
2502 if (!strcmp(load_img_req.img_name, "keymaste"))
2503 strlcpy(entry->app_name, "keymaster",
2504 MAX_APP_NAME_SIZE);
2505 else
2506 strlcpy(entry->app_name, load_img_req.img_name,
2507 MAX_APP_NAME_SIZE);
2508 entry->app_blocked = false;
2509 entry->blocked_on_listener_id = 0;
2510
2511 /* Deallocate the handle */
2512 if (!IS_ERR_OR_NULL(ihandle))
2513 ion_free(qseecom.ion_clnt, ihandle);
2514
2515 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2516 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2517 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2518 flags);
2519
2520 pr_warn("App with id %u (%s) now loaded\n", app_id,
2521 (char *)(load_img_req.img_name));
2522 }
2523 data->client.app_id = app_id;
2524 data->client.app_arch = load_img_req.app_arch;
2525 if (!strcmp(load_img_req.img_name, "keymaste"))
2526 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2527 else
2528 strlcpy(data->client.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_img_req.app_id = app_id;
2531 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2532 pr_err("copy_to_user failed\n");
2533 ret = -EFAULT;
2534 if (first_time == true) {
2535 spin_lock_irqsave(
2536 &qseecom.registered_app_list_lock, flags);
2537 list_del(&entry->list);
2538 spin_unlock_irqrestore(
2539 &qseecom.registered_app_list_lock, flags);
2540 kzfree(entry);
2541 }
2542 }
2543
2544loadapp_err:
2545 __qseecom_disable_clk_scale_down(data);
2546enable_clk_err:
2547 if (qseecom.support_bus_scaling) {
2548 mutex_lock(&qsee_bw_mutex);
2549 qseecom_unregister_bus_bandwidth_needs(data);
2550 mutex_unlock(&qsee_bw_mutex);
2551 }
2552 return ret;
2553}
2554
2555static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2556{
2557 int ret = 1; /* Set unload app */
2558
2559 wake_up_all(&qseecom.send_resp_wq);
2560 if (qseecom.qsee_reentrancy_support)
2561 mutex_unlock(&app_access_lock);
2562 while (atomic_read(&data->ioctl_count) > 1) {
2563 if (wait_event_freezable(data->abort_wq,
2564 atomic_read(&data->ioctl_count) <= 1)) {
2565 pr_err("Interrupted from abort\n");
2566 ret = -ERESTARTSYS;
2567 break;
2568 }
2569 }
2570 if (qseecom.qsee_reentrancy_support)
2571 mutex_lock(&app_access_lock);
2572 return ret;
2573}
2574
2575static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2576{
2577 int ret = 0;
2578
2579 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2580 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2581 ion_free(qseecom.ion_clnt, data->client.ihandle);
2582 data->client.ihandle = NULL;
2583 }
2584 return ret;
2585}
2586
2587static int qseecom_unload_app(struct qseecom_dev_handle *data,
2588 bool app_crash)
2589{
2590 unsigned long flags;
2591 unsigned long flags1;
2592 int ret = 0;
2593 struct qseecom_command_scm_resp resp;
2594 struct qseecom_registered_app_list *ptr_app = NULL;
2595 bool unload = false;
2596 bool found_app = false;
2597 bool found_dead_app = false;
2598
2599 if (!data) {
2600 pr_err("Invalid/uninitialized device handle\n");
2601 return -EINVAL;
2602 }
2603
2604 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2605 pr_debug("Do not unload keymaster app from tz\n");
2606 goto unload_exit;
2607 }
2608
2609 __qseecom_cleanup_app(data);
2610 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2611
2612 if (data->client.app_id > 0) {
2613 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2614 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2615 list) {
2616 if (ptr_app->app_id == data->client.app_id) {
2617 if (!strcmp((void *)ptr_app->app_name,
2618 (void *)data->client.app_name)) {
2619 found_app = true;
2620 if (app_crash || ptr_app->ref_cnt == 1)
2621 unload = true;
2622 break;
2623 }
2624 found_dead_app = true;
2625 break;
2626 }
2627 }
2628 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2629 flags);
2630 if (found_app == false && found_dead_app == false) {
2631 pr_err("Cannot find app with id = %d (%s)\n",
2632 data->client.app_id,
2633 (char *)data->client.app_name);
2634 ret = -EINVAL;
2635 goto unload_exit;
2636 }
2637 }
2638
2639 if (found_dead_app)
2640 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2641 (char *)data->client.app_name);
2642
2643 if (unload) {
2644 struct qseecom_unload_app_ireq req;
2645 /* Populate the structure for sending scm call to load image */
2646 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2647 req.app_id = data->client.app_id;
2648
2649 /* SCM_CALL to unload the app */
2650 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2651 sizeof(struct qseecom_unload_app_ireq),
2652 &resp, sizeof(resp));
2653 if (ret) {
2654 pr_err("scm_call to unload app (id = %d) failed\n",
2655 req.app_id);
2656 ret = -EFAULT;
2657 goto unload_exit;
2658 } else {
2659 pr_warn("App id %d now unloaded\n", req.app_id);
2660 }
2661 if (resp.result == QSEOS_RESULT_FAILURE) {
2662 pr_err("app (%d) unload_failed!!\n",
2663 data->client.app_id);
2664 ret = -EFAULT;
2665 goto unload_exit;
2666 }
2667 if (resp.result == QSEOS_RESULT_SUCCESS)
2668 pr_debug("App (%d) is unloaded!!\n",
2669 data->client.app_id);
2670 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2671 ret = __qseecom_process_incomplete_cmd(data, &resp);
2672 if (ret) {
2673 pr_err("process_incomplete_cmd fail err: %d\n",
2674 ret);
2675 goto unload_exit;
2676 }
2677 }
2678 }
2679
2680 if (found_app) {
2681 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2682 if (app_crash) {
2683 ptr_app->ref_cnt = 0;
2684 pr_debug("app_crash: ref_count = 0\n");
2685 } else {
2686 if (ptr_app->ref_cnt == 1) {
2687 ptr_app->ref_cnt = 0;
2688 pr_debug("ref_count set to 0\n");
2689 } else {
2690 ptr_app->ref_cnt--;
2691 pr_debug("Can't unload app(%d) inuse\n",
2692 ptr_app->app_id);
2693 }
2694 }
2695 if (unload) {
2696 list_del(&ptr_app->list);
2697 kzfree(ptr_app);
2698 }
2699 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2700 flags1);
2701 }
2702unload_exit:
2703 qseecom_unmap_ion_allocated_memory(data);
2704 data->released = true;
2705 return ret;
2706}
2707
2708static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2709 unsigned long virt)
2710{
2711 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2712}
2713
2714static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2715 unsigned long virt)
2716{
2717 return (uintptr_t)data->client.sb_virt +
2718 (virt - data->client.user_virt_sb_base);
2719}
2720
2721int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2722 struct qseecom_send_svc_cmd_req *req_ptr,
2723 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2724{
2725 int ret = 0;
2726 void *req_buf = NULL;
2727
2728 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2729 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2730 req_ptr, send_svc_ireq_ptr);
2731 return -EINVAL;
2732 }
2733
2734 /* Clients need to ensure req_buf is at base offset of shared buffer */
2735 if ((uintptr_t)req_ptr->cmd_req_buf !=
2736 data_ptr->client.user_virt_sb_base) {
2737 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2738 return -EINVAL;
2739 }
2740
2741 if (data_ptr->client.sb_length <
2742 sizeof(struct qseecom_rpmb_provision_key)) {
2743 pr_err("shared buffer is too small to hold key type\n");
2744 return -EINVAL;
2745 }
2746 req_buf = data_ptr->client.sb_virt;
2747
2748 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2749 send_svc_ireq_ptr->key_type =
2750 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2751 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2752 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2753 data_ptr, (uintptr_t)req_ptr->resp_buf));
2754 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2755
2756 return ret;
2757}
2758
2759int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2760 struct qseecom_send_svc_cmd_req *req_ptr,
2761 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2762{
2763 int ret = 0;
2764 uint32_t reqd_len_sb_in = 0;
2765
2766 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2767 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2768 req_ptr, send_svc_ireq_ptr);
2769 return -EINVAL;
2770 }
2771
2772 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2773 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2774 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2775 pr_err("Required: %u, Available: %zu\n",
2776 reqd_len_sb_in, data_ptr->client.sb_length);
2777 return -ENOMEM;
2778 }
2779
2780 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2781 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2782 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2783 data_ptr, (uintptr_t)req_ptr->resp_buf));
2784 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2785
2786 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2787 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2788
2789
2790 return ret;
2791}
2792
2793static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2794 struct qseecom_send_svc_cmd_req *req)
2795{
2796 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2797 pr_err("req or cmd buffer or response buffer is null\n");
2798 return -EINVAL;
2799 }
2800
2801 if (!data || !data->client.ihandle) {
2802 pr_err("Client or client handle is not initialized\n");
2803 return -EINVAL;
2804 }
2805
2806 if (data->client.sb_virt == NULL) {
2807 pr_err("sb_virt null\n");
2808 return -EINVAL;
2809 }
2810
2811 if (data->client.user_virt_sb_base == 0) {
2812 pr_err("user_virt_sb_base is null\n");
2813 return -EINVAL;
2814 }
2815
2816 if (data->client.sb_length == 0) {
2817 pr_err("sb_length is 0\n");
2818 return -EINVAL;
2819 }
2820
2821 if (((uintptr_t)req->cmd_req_buf <
2822 data->client.user_virt_sb_base) ||
2823 ((uintptr_t)req->cmd_req_buf >=
2824 (data->client.user_virt_sb_base + data->client.sb_length))) {
2825 pr_err("cmd buffer address not within shared bufffer\n");
2826 return -EINVAL;
2827 }
2828 if (((uintptr_t)req->resp_buf <
2829 data->client.user_virt_sb_base) ||
2830 ((uintptr_t)req->resp_buf >=
2831 (data->client.user_virt_sb_base + data->client.sb_length))) {
2832 pr_err("response buffer address not within shared bufffer\n");
2833 return -EINVAL;
2834 }
2835 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2836 (req->cmd_req_len > data->client.sb_length) ||
2837 (req->resp_len > data->client.sb_length)) {
2838 pr_err("cmd buf length or response buf length not valid\n");
2839 return -EINVAL;
2840 }
2841 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2842 pr_err("Integer overflow detected in req_len & rsp_len\n");
2843 return -EINVAL;
2844 }
2845
2846 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2847 pr_debug("Not enough memory to fit cmd_buf.\n");
2848 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2849 (req->cmd_req_len + req->resp_len),
2850 data->client.sb_length);
2851 return -ENOMEM;
2852 }
2853 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2854 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2855 return -EINVAL;
2856 }
2857 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2858 pr_err("Integer overflow in resp_len & resp_buf\n");
2859 return -EINVAL;
2860 }
2861 if (data->client.user_virt_sb_base >
2862 (ULONG_MAX - data->client.sb_length)) {
2863 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2864 return -EINVAL;
2865 }
2866 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2867 ((uintptr_t)data->client.user_virt_sb_base +
2868 data->client.sb_length)) ||
2869 (((uintptr_t)req->resp_buf + req->resp_len) >
2870 ((uintptr_t)data->client.user_virt_sb_base +
2871 data->client.sb_length))) {
2872 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2873 return -EINVAL;
2874 }
2875 return 0;
2876}
2877
2878static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2879 void __user *argp)
2880{
2881 int ret = 0;
2882 struct qseecom_client_send_service_ireq send_svc_ireq;
2883 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2884 struct qseecom_command_scm_resp resp;
2885 struct qseecom_send_svc_cmd_req req;
2886 void *send_req_ptr;
2887 size_t req_buf_size;
2888
2889 /*struct qseecom_command_scm_resp resp;*/
2890
2891 if (copy_from_user(&req,
2892 (void __user *)argp,
2893 sizeof(req))) {
2894 pr_err("copy_from_user failed\n");
2895 return -EFAULT;
2896 }
2897
2898 if (__validate_send_service_cmd_inputs(data, &req))
2899 return -EINVAL;
2900
2901 data->type = QSEECOM_SECURE_SERVICE;
2902
2903 switch (req.cmd_id) {
2904 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2905 case QSEOS_RPMB_ERASE_COMMAND:
2906 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2907 send_req_ptr = &send_svc_ireq;
2908 req_buf_size = sizeof(send_svc_ireq);
2909 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2910 send_req_ptr))
2911 return -EINVAL;
2912 break;
2913 case QSEOS_FSM_LTEOTA_REQ_CMD:
2914 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2915 case QSEOS_FSM_IKE_REQ_CMD:
2916 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2917 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2918 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2919 case QSEOS_FSM_ENCFS_REQ_CMD:
2920 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2921 send_req_ptr = &send_fsm_key_svc_ireq;
2922 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2923 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2924 send_req_ptr))
2925 return -EINVAL;
2926 break;
2927 default:
2928 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2929 return -EINVAL;
2930 }
2931
2932 if (qseecom.support_bus_scaling) {
2933 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2934 if (ret) {
2935 pr_err("Fail to set bw HIGH\n");
2936 return ret;
2937 }
2938 } else {
2939 ret = qseecom_perf_enable(data);
2940 if (ret) {
2941 pr_err("Failed to vote for clocks with err %d\n", ret);
2942 goto exit;
2943 }
2944 }
2945
2946 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2947 data->client.sb_virt, data->client.sb_length,
2948 ION_IOC_CLEAN_INV_CACHES);
2949 if (ret) {
2950 pr_err("cache operation failed %d\n", ret);
2951 goto exit;
2952 }
2953 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2954 (const void *)send_req_ptr,
2955 req_buf_size, &resp, sizeof(resp));
2956 if (ret) {
2957 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2958 if (!qseecom.support_bus_scaling) {
2959 qsee_disable_clock_vote(data, CLK_DFAB);
2960 qsee_disable_clock_vote(data, CLK_SFPB);
2961 } else {
2962 __qseecom_add_bw_scale_down_timer(
2963 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2964 }
2965 goto exit;
2966 }
2967 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2968 data->client.sb_virt, data->client.sb_length,
2969 ION_IOC_INV_CACHES);
2970 if (ret) {
2971 pr_err("cache operation failed %d\n", ret);
2972 goto exit;
2973 }
2974 switch (resp.result) {
2975 case QSEOS_RESULT_SUCCESS:
2976 break;
2977 case QSEOS_RESULT_INCOMPLETE:
2978 pr_debug("qseos_result_incomplete\n");
2979 ret = __qseecom_process_incomplete_cmd(data, &resp);
2980 if (ret) {
2981 pr_err("process_incomplete_cmd fail with result: %d\n",
2982 resp.result);
2983 }
2984 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2985 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302986 if (put_user(resp.result,
2987 (uint32_t __user *)req.resp_buf)) {
2988 ret = -EINVAL;
2989 goto exit;
2990 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002991 ret = 0;
2992 }
2993 break;
2994 case QSEOS_RESULT_FAILURE:
2995 pr_err("scm call failed with resp.result: %d\n", resp.result);
2996 ret = -EINVAL;
2997 break;
2998 default:
2999 pr_err("Response result %d not supported\n",
3000 resp.result);
3001 ret = -EINVAL;
3002 break;
3003 }
3004 if (!qseecom.support_bus_scaling) {
3005 qsee_disable_clock_vote(data, CLK_DFAB);
3006 qsee_disable_clock_vote(data, CLK_SFPB);
3007 } else {
3008 __qseecom_add_bw_scale_down_timer(
3009 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3010 }
3011
3012exit:
3013 return ret;
3014}
3015
3016static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3017 struct qseecom_send_cmd_req *req)
3018
3019{
3020 if (!data || !data->client.ihandle) {
3021 pr_err("Client or client handle is not initialized\n");
3022 return -EINVAL;
3023 }
3024 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3025 (req->cmd_req_buf == NULL)) {
3026 pr_err("cmd buffer or response buffer is null\n");
3027 return -EINVAL;
3028 }
3029 if (((uintptr_t)req->cmd_req_buf <
3030 data->client.user_virt_sb_base) ||
3031 ((uintptr_t)req->cmd_req_buf >=
3032 (data->client.user_virt_sb_base + data->client.sb_length))) {
3033 pr_err("cmd buffer address not within shared bufffer\n");
3034 return -EINVAL;
3035 }
3036 if (((uintptr_t)req->resp_buf <
3037 data->client.user_virt_sb_base) ||
3038 ((uintptr_t)req->resp_buf >=
3039 (data->client.user_virt_sb_base + data->client.sb_length))) {
3040 pr_err("response buffer address not within shared bufffer\n");
3041 return -EINVAL;
3042 }
3043 if ((req->cmd_req_len == 0) ||
3044 (req->cmd_req_len > data->client.sb_length) ||
3045 (req->resp_len > data->client.sb_length)) {
3046 pr_err("cmd buf length or response buf length not valid\n");
3047 return -EINVAL;
3048 }
3049 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3050 pr_err("Integer overflow detected in req_len & rsp_len\n");
3051 return -EINVAL;
3052 }
3053
3054 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3055 pr_debug("Not enough memory to fit cmd_buf.\n");
3056 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3057 (req->cmd_req_len + req->resp_len),
3058 data->client.sb_length);
3059 return -ENOMEM;
3060 }
3061 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3062 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3063 return -EINVAL;
3064 }
3065 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3066 pr_err("Integer overflow in resp_len & resp_buf\n");
3067 return -EINVAL;
3068 }
3069 if (data->client.user_virt_sb_base >
3070 (ULONG_MAX - data->client.sb_length)) {
3071 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3072 return -EINVAL;
3073 }
3074 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3075 ((uintptr_t)data->client.user_virt_sb_base +
3076 data->client.sb_length)) ||
3077 (((uintptr_t)req->resp_buf + req->resp_len) >
3078 ((uintptr_t)data->client.user_virt_sb_base +
3079 data->client.sb_length))) {
3080 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3081 return -EINVAL;
3082 }
3083 return 0;
3084}
3085
3086int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3087 struct qseecom_registered_app_list *ptr_app,
3088 struct qseecom_dev_handle *data)
3089{
3090 int ret = 0;
3091
3092 switch (resp->result) {
3093 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3094 pr_warn("App(%d) %s is blocked on listener %d\n",
3095 data->client.app_id, data->client.app_name,
3096 resp->data);
3097 ret = __qseecom_process_reentrancy_blocked_on_listener(
3098 resp, ptr_app, data);
3099 if (ret) {
3100 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3101 data->client.app_id, data->client.app_name, resp->data);
3102 return ret;
3103 }
3104
3105 case QSEOS_RESULT_INCOMPLETE:
3106 qseecom.app_block_ref_cnt++;
3107 ptr_app->app_blocked = true;
3108 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3109 ptr_app->app_blocked = false;
3110 qseecom.app_block_ref_cnt--;
3111 wake_up_interruptible(&qseecom.app_block_wq);
3112 if (ret)
3113 pr_err("process_incomplete_cmd failed err: %d\n",
3114 ret);
3115 return ret;
3116 case QSEOS_RESULT_SUCCESS:
3117 return ret;
3118 default:
3119 pr_err("Response result %d not supported\n",
3120 resp->result);
3121 return -EINVAL;
3122 }
3123}
3124
3125static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3126 struct qseecom_send_cmd_req *req)
3127{
3128 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003129 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003130 u32 reqd_len_sb_in = 0;
3131 struct qseecom_client_send_data_ireq send_data_req = {0};
3132 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3133 struct qseecom_command_scm_resp resp;
3134 unsigned long flags;
3135 struct qseecom_registered_app_list *ptr_app;
3136 bool found_app = false;
3137 void *cmd_buf = NULL;
3138 size_t cmd_len;
3139 struct sglist_info *table = data->sglistinfo_ptr;
3140
3141 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3142 /* find app_id & img_name from list */
3143 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3144 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3145 list) {
3146 if ((ptr_app->app_id == data->client.app_id) &&
3147 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3148 found_app = true;
3149 break;
3150 }
3151 }
3152 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3153
3154 if (!found_app) {
3155 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3156 (char *)data->client.app_name);
3157 return -ENOENT;
3158 }
3159
3160 if (qseecom.qsee_version < QSEE_VERSION_40) {
3161 send_data_req.app_id = data->client.app_id;
3162 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3163 data, (uintptr_t)req->cmd_req_buf));
3164 send_data_req.req_len = req->cmd_req_len;
3165 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3166 data, (uintptr_t)req->resp_buf));
3167 send_data_req.rsp_len = req->resp_len;
3168 send_data_req.sglistinfo_ptr =
3169 (uint32_t)virt_to_phys(table);
3170 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3171 dmac_flush_range((void *)table,
3172 (void *)table + SGLISTINFO_TABLE_SIZE);
3173 cmd_buf = (void *)&send_data_req;
3174 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3175 } else {
3176 send_data_req_64bit.app_id = data->client.app_id;
3177 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3178 (uintptr_t)req->cmd_req_buf);
3179 send_data_req_64bit.req_len = req->cmd_req_len;
3180 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3181 (uintptr_t)req->resp_buf);
3182 send_data_req_64bit.rsp_len = req->resp_len;
3183 /* check if 32bit app's phys_addr region is under 4GB.*/
3184 if ((data->client.app_arch == ELFCLASS32) &&
3185 ((send_data_req_64bit.req_ptr >=
3186 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3187 (send_data_req_64bit.rsp_ptr >=
3188 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3189 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3190 data->client.app_name,
3191 send_data_req_64bit.req_ptr,
3192 send_data_req_64bit.req_len,
3193 send_data_req_64bit.rsp_ptr,
3194 send_data_req_64bit.rsp_len);
3195 return -EFAULT;
3196 }
3197 send_data_req_64bit.sglistinfo_ptr =
3198 (uint64_t)virt_to_phys(table);
3199 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3200 dmac_flush_range((void *)table,
3201 (void *)table + SGLISTINFO_TABLE_SIZE);
3202 cmd_buf = (void *)&send_data_req_64bit;
3203 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3204 }
3205
3206 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3207 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3208 else
3209 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3210
3211 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3212 data->client.sb_virt,
3213 reqd_len_sb_in,
3214 ION_IOC_CLEAN_INV_CACHES);
3215 if (ret) {
3216 pr_err("cache operation failed %d\n", ret);
3217 return ret;
3218 }
3219
3220 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3221
3222 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3223 cmd_buf, cmd_len,
3224 &resp, sizeof(resp));
3225 if (ret) {
3226 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3227 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003228 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003229 }
3230
3231 if (qseecom.qsee_reentrancy_support) {
3232 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003233 if (ret)
3234 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003235 } else {
3236 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3237 ret = __qseecom_process_incomplete_cmd(data, &resp);
3238 if (ret) {
3239 pr_err("process_incomplete_cmd failed err: %d\n",
3240 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003241 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003242 }
3243 } else {
3244 if (resp.result != QSEOS_RESULT_SUCCESS) {
3245 pr_err("Response result %d not supported\n",
3246 resp.result);
3247 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003248 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003249 }
3250 }
3251 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003252exit:
3253 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003254 data->client.sb_virt, data->client.sb_length,
3255 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003256 if (ret2) {
3257 pr_err("cache operation failed %d\n", ret2);
3258 return ret2;
3259 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003260 return ret;
3261}
3262
3263static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3264{
3265 int ret = 0;
3266 struct qseecom_send_cmd_req req;
3267
3268 ret = copy_from_user(&req, argp, sizeof(req));
3269 if (ret) {
3270 pr_err("copy_from_user failed\n");
3271 return ret;
3272 }
3273
3274 if (__validate_send_cmd_inputs(data, &req))
3275 return -EINVAL;
3276
3277 ret = __qseecom_send_cmd(data, &req);
3278
3279 if (ret)
3280 return ret;
3281
3282 return ret;
3283}
3284
3285int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3286 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3287 struct qseecom_dev_handle *data, int i) {
3288
3289 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3290 (req->ifd_data[i].fd > 0)) {
3291 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3292 (req->ifd_data[i].cmd_buf_offset >
3293 req->cmd_req_len - sizeof(uint32_t))) {
3294 pr_err("Invalid offset (req len) 0x%x\n",
3295 req->ifd_data[i].cmd_buf_offset);
3296 return -EINVAL;
3297 }
3298 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3299 (lstnr_resp->ifd_data[i].fd > 0)) {
3300 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3301 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3302 lstnr_resp->resp_len - sizeof(uint32_t))) {
3303 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3304 lstnr_resp->ifd_data[i].cmd_buf_offset);
3305 return -EINVAL;
3306 }
3307 }
3308 return 0;
3309}
3310
3311static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3312 struct qseecom_dev_handle *data)
3313{
3314 struct ion_handle *ihandle;
3315 char *field;
3316 int ret = 0;
3317 int i = 0;
3318 uint32_t len = 0;
3319 struct scatterlist *sg;
3320 struct qseecom_send_modfd_cmd_req *req = NULL;
3321 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3322 struct qseecom_registered_listener_list *this_lstnr = NULL;
3323 uint32_t offset;
3324 struct sg_table *sg_ptr;
3325
3326 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3327 (data->type != QSEECOM_CLIENT_APP))
3328 return -EFAULT;
3329
3330 if (msg == NULL) {
3331 pr_err("Invalid address\n");
3332 return -EINVAL;
3333 }
3334 if (data->type == QSEECOM_LISTENER_SERVICE) {
3335 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3336 this_lstnr = __qseecom_find_svc(data->listener.id);
3337 if (IS_ERR_OR_NULL(this_lstnr)) {
3338 pr_err("Invalid listener ID\n");
3339 return -ENOMEM;
3340 }
3341 } else {
3342 req = (struct qseecom_send_modfd_cmd_req *)msg;
3343 }
3344
3345 for (i = 0; i < MAX_ION_FD; i++) {
3346 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3347 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003348 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003349 req->ifd_data[i].fd);
3350 if (IS_ERR_OR_NULL(ihandle)) {
3351 pr_err("Ion client can't retrieve the handle\n");
3352 return -ENOMEM;
3353 }
3354 field = (char *) req->cmd_req_buf +
3355 req->ifd_data[i].cmd_buf_offset;
3356 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3357 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003358 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003359 lstnr_resp->ifd_data[i].fd);
3360 if (IS_ERR_OR_NULL(ihandle)) {
3361 pr_err("Ion client can't retrieve the handle\n");
3362 return -ENOMEM;
3363 }
3364 field = lstnr_resp->resp_buf_ptr +
3365 lstnr_resp->ifd_data[i].cmd_buf_offset;
3366 } else {
3367 continue;
3368 }
3369 /* Populate the cmd data structure with the phys_addr */
3370 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3371 if (IS_ERR_OR_NULL(sg_ptr)) {
3372 pr_err("IOn client could not retrieve sg table\n");
3373 goto err;
3374 }
3375 if (sg_ptr->nents == 0) {
3376 pr_err("Num of scattered entries is 0\n");
3377 goto err;
3378 }
3379 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3380 pr_err("Num of scattered entries");
3381 pr_err(" (%d) is greater than max supported %d\n",
3382 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3383 goto err;
3384 }
3385 sg = sg_ptr->sgl;
3386 if (sg_ptr->nents == 1) {
3387 uint32_t *update;
3388
3389 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3390 goto err;
3391 if ((data->type == QSEECOM_CLIENT_APP &&
3392 (data->client.app_arch == ELFCLASS32 ||
3393 data->client.app_arch == ELFCLASS64)) ||
3394 (data->type == QSEECOM_LISTENER_SERVICE)) {
3395 /*
3396 * Check if sg list phy add region is under 4GB
3397 */
3398 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3399 (!cleanup) &&
3400 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3401 >= PHY_ADDR_4G - sg->length)) {
3402 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3403 data->client.app_name,
3404 &(sg_dma_address(sg_ptr->sgl)),
3405 sg->length);
3406 goto err;
3407 }
3408 update = (uint32_t *) field;
3409 *update = cleanup ? 0 :
3410 (uint32_t)sg_dma_address(sg_ptr->sgl);
3411 } else {
3412 pr_err("QSEE app arch %u is not supported\n",
3413 data->client.app_arch);
3414 goto err;
3415 }
3416 len += (uint32_t)sg->length;
3417 } else {
3418 struct qseecom_sg_entry *update;
3419 int j = 0;
3420
3421 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3422 (req->ifd_data[i].fd > 0)) {
3423
3424 if ((req->cmd_req_len <
3425 SG_ENTRY_SZ * sg_ptr->nents) ||
3426 (req->ifd_data[i].cmd_buf_offset >
3427 (req->cmd_req_len -
3428 SG_ENTRY_SZ * sg_ptr->nents))) {
3429 pr_err("Invalid offset = 0x%x\n",
3430 req->ifd_data[i].cmd_buf_offset);
3431 goto err;
3432 }
3433
3434 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3435 (lstnr_resp->ifd_data[i].fd > 0)) {
3436
3437 if ((lstnr_resp->resp_len <
3438 SG_ENTRY_SZ * sg_ptr->nents) ||
3439 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3440 (lstnr_resp->resp_len -
3441 SG_ENTRY_SZ * sg_ptr->nents))) {
3442 goto err;
3443 }
3444 }
3445 if ((data->type == QSEECOM_CLIENT_APP &&
3446 (data->client.app_arch == ELFCLASS32 ||
3447 data->client.app_arch == ELFCLASS64)) ||
3448 (data->type == QSEECOM_LISTENER_SERVICE)) {
3449 update = (struct qseecom_sg_entry *)field;
3450 for (j = 0; j < sg_ptr->nents; j++) {
3451 /*
3452 * Check if sg list PA is under 4GB
3453 */
3454 if ((qseecom.qsee_version >=
3455 QSEE_VERSION_40) &&
3456 (!cleanup) &&
3457 ((uint64_t)(sg_dma_address(sg))
3458 >= PHY_ADDR_4G - sg->length)) {
3459 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3460 data->client.app_name,
3461 &(sg_dma_address(sg)),
3462 sg->length);
3463 goto err;
3464 }
3465 update->phys_addr = cleanup ? 0 :
3466 (uint32_t)sg_dma_address(sg);
3467 update->len = cleanup ? 0 : sg->length;
3468 update++;
3469 len += sg->length;
3470 sg = sg_next(sg);
3471 }
3472 } else {
3473 pr_err("QSEE app arch %u is not supported\n",
3474 data->client.app_arch);
3475 goto err;
3476 }
3477 }
3478
3479 if (cleanup) {
3480 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3481 ihandle, NULL, len,
3482 ION_IOC_INV_CACHES);
3483 if (ret) {
3484 pr_err("cache operation failed %d\n", ret);
3485 goto err;
3486 }
3487 } else {
3488 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3489 ihandle, NULL, len,
3490 ION_IOC_CLEAN_INV_CACHES);
3491 if (ret) {
3492 pr_err("cache operation failed %d\n", ret);
3493 goto err;
3494 }
3495 if (data->type == QSEECOM_CLIENT_APP) {
3496 offset = req->ifd_data[i].cmd_buf_offset;
3497 data->sglistinfo_ptr[i].indexAndFlags =
3498 SGLISTINFO_SET_INDEX_FLAG(
3499 (sg_ptr->nents == 1), 0, offset);
3500 data->sglistinfo_ptr[i].sizeOrCount =
3501 (sg_ptr->nents == 1) ?
3502 sg->length : sg_ptr->nents;
3503 data->sglist_cnt = i + 1;
3504 } else {
3505 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3506 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3507 (uintptr_t)this_lstnr->sb_virt);
3508 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3509 SGLISTINFO_SET_INDEX_FLAG(
3510 (sg_ptr->nents == 1), 0, offset);
3511 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3512 (sg_ptr->nents == 1) ?
3513 sg->length : sg_ptr->nents;
3514 this_lstnr->sglist_cnt = i + 1;
3515 }
3516 }
3517 /* Deallocate the handle */
3518 if (!IS_ERR_OR_NULL(ihandle))
3519 ion_free(qseecom.ion_clnt, ihandle);
3520 }
3521 return ret;
3522err:
3523 if (!IS_ERR_OR_NULL(ihandle))
3524 ion_free(qseecom.ion_clnt, ihandle);
3525 return -ENOMEM;
3526}
3527
3528static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3529 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3530{
3531 struct scatterlist *sg = sg_ptr->sgl;
3532 struct qseecom_sg_entry_64bit *sg_entry;
3533 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3534 void *buf;
3535 uint i;
3536 size_t size;
3537 dma_addr_t coh_pmem;
3538
3539 if (fd_idx >= MAX_ION_FD) {
3540 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3541 return -ENOMEM;
3542 }
3543 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3544 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3545 /* Allocate a contiguous kernel buffer */
3546 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3547 size = (size + PAGE_SIZE) & PAGE_MASK;
3548 buf = dma_alloc_coherent(qseecom.pdev,
3549 size, &coh_pmem, GFP_KERNEL);
3550 if (buf == NULL) {
3551 pr_err("failed to alloc memory for sg buf\n");
3552 return -ENOMEM;
3553 }
3554 /* update qseecom_sg_list_buf_hdr_64bit */
3555 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3556 buf_hdr->new_buf_phys_addr = coh_pmem;
3557 buf_hdr->nents_total = sg_ptr->nents;
3558 /* save the left sg entries into new allocated buf */
3559 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3560 for (i = 0; i < sg_ptr->nents; i++) {
3561 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3562 sg_entry->len = sg->length;
3563 sg_entry++;
3564 sg = sg_next(sg);
3565 }
3566
3567 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3568 data->client.sec_buf_fd[fd_idx].vbase = buf;
3569 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3570 data->client.sec_buf_fd[fd_idx].size = size;
3571
3572 return 0;
3573}
3574
3575static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3576 struct qseecom_dev_handle *data)
3577{
3578 struct ion_handle *ihandle;
3579 char *field;
3580 int ret = 0;
3581 int i = 0;
3582 uint32_t len = 0;
3583 struct scatterlist *sg;
3584 struct qseecom_send_modfd_cmd_req *req = NULL;
3585 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3586 struct qseecom_registered_listener_list *this_lstnr = NULL;
3587 uint32_t offset;
3588 struct sg_table *sg_ptr;
3589
3590 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3591 (data->type != QSEECOM_CLIENT_APP))
3592 return -EFAULT;
3593
3594 if (msg == NULL) {
3595 pr_err("Invalid address\n");
3596 return -EINVAL;
3597 }
3598 if (data->type == QSEECOM_LISTENER_SERVICE) {
3599 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3600 this_lstnr = __qseecom_find_svc(data->listener.id);
3601 if (IS_ERR_OR_NULL(this_lstnr)) {
3602 pr_err("Invalid listener ID\n");
3603 return -ENOMEM;
3604 }
3605 } else {
3606 req = (struct qseecom_send_modfd_cmd_req *)msg;
3607 }
3608
3609 for (i = 0; i < MAX_ION_FD; i++) {
3610 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3611 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003612 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003613 req->ifd_data[i].fd);
3614 if (IS_ERR_OR_NULL(ihandle)) {
3615 pr_err("Ion client can't retrieve the handle\n");
3616 return -ENOMEM;
3617 }
3618 field = (char *) req->cmd_req_buf +
3619 req->ifd_data[i].cmd_buf_offset;
3620 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3621 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003622 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003623 lstnr_resp->ifd_data[i].fd);
3624 if (IS_ERR_OR_NULL(ihandle)) {
3625 pr_err("Ion client can't retrieve the handle\n");
3626 return -ENOMEM;
3627 }
3628 field = lstnr_resp->resp_buf_ptr +
3629 lstnr_resp->ifd_data[i].cmd_buf_offset;
3630 } else {
3631 continue;
3632 }
3633 /* Populate the cmd data structure with the phys_addr */
3634 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3635 if (IS_ERR_OR_NULL(sg_ptr)) {
3636 pr_err("IOn client could not retrieve sg table\n");
3637 goto err;
3638 }
3639 if (sg_ptr->nents == 0) {
3640 pr_err("Num of scattered entries is 0\n");
3641 goto err;
3642 }
3643 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3644 pr_warn("Num of scattered entries");
3645 pr_warn(" (%d) is greater than %d\n",
3646 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3647 if (cleanup) {
3648 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3649 data->client.sec_buf_fd[i].vbase)
3650 dma_free_coherent(qseecom.pdev,
3651 data->client.sec_buf_fd[i].size,
3652 data->client.sec_buf_fd[i].vbase,
3653 data->client.sec_buf_fd[i].pbase);
3654 } else {
3655 ret = __qseecom_allocate_sg_list_buffer(data,
3656 field, i, sg_ptr);
3657 if (ret) {
3658 pr_err("Failed to allocate sg list buffer\n");
3659 goto err;
3660 }
3661 }
3662 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3663 sg = sg_ptr->sgl;
3664 goto cleanup;
3665 }
3666 sg = sg_ptr->sgl;
3667 if (sg_ptr->nents == 1) {
3668 uint64_t *update_64bit;
3669
3670 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3671 goto err;
3672 /* 64bit app uses 64bit address */
3673 update_64bit = (uint64_t *) field;
3674 *update_64bit = cleanup ? 0 :
3675 (uint64_t)sg_dma_address(sg_ptr->sgl);
3676 len += (uint32_t)sg->length;
3677 } else {
3678 struct qseecom_sg_entry_64bit *update_64bit;
3679 int j = 0;
3680
3681 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3682 (req->ifd_data[i].fd > 0)) {
3683
3684 if ((req->cmd_req_len <
3685 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3686 (req->ifd_data[i].cmd_buf_offset >
3687 (req->cmd_req_len -
3688 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3689 pr_err("Invalid offset = 0x%x\n",
3690 req->ifd_data[i].cmd_buf_offset);
3691 goto err;
3692 }
3693
3694 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3695 (lstnr_resp->ifd_data[i].fd > 0)) {
3696
3697 if ((lstnr_resp->resp_len <
3698 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3699 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3700 (lstnr_resp->resp_len -
3701 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3702 goto err;
3703 }
3704 }
3705 /* 64bit app uses 64bit address */
3706 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3707 for (j = 0; j < sg_ptr->nents; j++) {
3708 update_64bit->phys_addr = cleanup ? 0 :
3709 (uint64_t)sg_dma_address(sg);
3710 update_64bit->len = cleanup ? 0 :
3711 (uint32_t)sg->length;
3712 update_64bit++;
3713 len += sg->length;
3714 sg = sg_next(sg);
3715 }
3716 }
3717cleanup:
3718 if (cleanup) {
3719 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3720 ihandle, NULL, len,
3721 ION_IOC_INV_CACHES);
3722 if (ret) {
3723 pr_err("cache operation failed %d\n", ret);
3724 goto err;
3725 }
3726 } else {
3727 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3728 ihandle, NULL, len,
3729 ION_IOC_CLEAN_INV_CACHES);
3730 if (ret) {
3731 pr_err("cache operation failed %d\n", ret);
3732 goto err;
3733 }
3734 if (data->type == QSEECOM_CLIENT_APP) {
3735 offset = req->ifd_data[i].cmd_buf_offset;
3736 data->sglistinfo_ptr[i].indexAndFlags =
3737 SGLISTINFO_SET_INDEX_FLAG(
3738 (sg_ptr->nents == 1), 1, offset);
3739 data->sglistinfo_ptr[i].sizeOrCount =
3740 (sg_ptr->nents == 1) ?
3741 sg->length : sg_ptr->nents;
3742 data->sglist_cnt = i + 1;
3743 } else {
3744 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3745 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3746 (uintptr_t)this_lstnr->sb_virt);
3747 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3748 SGLISTINFO_SET_INDEX_FLAG(
3749 (sg_ptr->nents == 1), 1, offset);
3750 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3751 (sg_ptr->nents == 1) ?
3752 sg->length : sg_ptr->nents;
3753 this_lstnr->sglist_cnt = i + 1;
3754 }
3755 }
3756 /* Deallocate the handle */
3757 if (!IS_ERR_OR_NULL(ihandle))
3758 ion_free(qseecom.ion_clnt, ihandle);
3759 }
3760 return ret;
3761err:
3762 for (i = 0; i < MAX_ION_FD; i++)
3763 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3764 data->client.sec_buf_fd[i].vbase)
3765 dma_free_coherent(qseecom.pdev,
3766 data->client.sec_buf_fd[i].size,
3767 data->client.sec_buf_fd[i].vbase,
3768 data->client.sec_buf_fd[i].pbase);
3769 if (!IS_ERR_OR_NULL(ihandle))
3770 ion_free(qseecom.ion_clnt, ihandle);
3771 return -ENOMEM;
3772}
3773
3774static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3775 void __user *argp,
3776 bool is_64bit_addr)
3777{
3778 int ret = 0;
3779 int i;
3780 struct qseecom_send_modfd_cmd_req req;
3781 struct qseecom_send_cmd_req send_cmd_req;
3782
3783 ret = copy_from_user(&req, argp, sizeof(req));
3784 if (ret) {
3785 pr_err("copy_from_user failed\n");
3786 return ret;
3787 }
3788
3789 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3790 send_cmd_req.cmd_req_len = req.cmd_req_len;
3791 send_cmd_req.resp_buf = req.resp_buf;
3792 send_cmd_req.resp_len = req.resp_len;
3793
3794 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3795 return -EINVAL;
3796
3797 /* validate offsets */
3798 for (i = 0; i < MAX_ION_FD; i++) {
3799 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3800 pr_err("Invalid offset %d = 0x%x\n",
3801 i, req.ifd_data[i].cmd_buf_offset);
3802 return -EINVAL;
3803 }
3804 }
3805 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3806 (uintptr_t)req.cmd_req_buf);
3807 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3808 (uintptr_t)req.resp_buf);
3809
3810 if (!is_64bit_addr) {
3811 ret = __qseecom_update_cmd_buf(&req, false, data);
3812 if (ret)
3813 return ret;
3814 ret = __qseecom_send_cmd(data, &send_cmd_req);
3815 if (ret)
3816 return ret;
3817 ret = __qseecom_update_cmd_buf(&req, true, data);
3818 if (ret)
3819 return ret;
3820 } else {
3821 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3822 if (ret)
3823 return ret;
3824 ret = __qseecom_send_cmd(data, &send_cmd_req);
3825 if (ret)
3826 return ret;
3827 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3828 if (ret)
3829 return ret;
3830 }
3831
3832 return ret;
3833}
3834
3835static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3836 void __user *argp)
3837{
3838 return __qseecom_send_modfd_cmd(data, argp, false);
3839}
3840
3841static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3842 void __user *argp)
3843{
3844 return __qseecom_send_modfd_cmd(data, argp, true);
3845}
3846
3847
3848
3849static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3850 struct qseecom_registered_listener_list *svc)
3851{
3852 int ret;
3853
3854 ret = (svc->rcv_req_flag != 0);
3855 return ret || data->abort;
3856}
3857
3858static int qseecom_receive_req(struct qseecom_dev_handle *data)
3859{
3860 int ret = 0;
3861 struct qseecom_registered_listener_list *this_lstnr;
3862
3863 this_lstnr = __qseecom_find_svc(data->listener.id);
3864 if (!this_lstnr) {
3865 pr_err("Invalid listener ID\n");
3866 return -ENODATA;
3867 }
3868
3869 while (1) {
3870 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3871 __qseecom_listener_has_rcvd_req(data,
3872 this_lstnr))) {
3873 pr_debug("Interrupted: exiting Listener Service = %d\n",
3874 (uint32_t)data->listener.id);
3875 /* woken up for different reason */
3876 return -ERESTARTSYS;
3877 }
3878
3879 if (data->abort) {
3880 pr_err("Aborting Listener Service = %d\n",
3881 (uint32_t)data->listener.id);
3882 return -ENODEV;
3883 }
3884 this_lstnr->rcv_req_flag = 0;
3885 break;
3886 }
3887 return ret;
3888}
3889
3890static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3891{
3892 unsigned char app_arch = 0;
3893 struct elf32_hdr *ehdr;
3894 struct elf64_hdr *ehdr64;
3895
3896 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3897
3898 switch (app_arch) {
3899 case ELFCLASS32: {
3900 ehdr = (struct elf32_hdr *)fw_entry->data;
3901 if (fw_entry->size < sizeof(*ehdr)) {
3902 pr_err("%s: Not big enough to be an elf32 header\n",
3903 qseecom.pdev->init_name);
3904 return false;
3905 }
3906 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3907 pr_err("%s: Not an elf32 header\n",
3908 qseecom.pdev->init_name);
3909 return false;
3910 }
3911 if (ehdr->e_phnum == 0) {
3912 pr_err("%s: No loadable segments\n",
3913 qseecom.pdev->init_name);
3914 return false;
3915 }
3916 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3917 sizeof(struct elf32_hdr) > fw_entry->size) {
3918 pr_err("%s: Program headers not within mdt\n",
3919 qseecom.pdev->init_name);
3920 return false;
3921 }
3922 break;
3923 }
3924 case ELFCLASS64: {
3925 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3926 if (fw_entry->size < sizeof(*ehdr64)) {
3927 pr_err("%s: Not big enough to be an elf64 header\n",
3928 qseecom.pdev->init_name);
3929 return false;
3930 }
3931 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3932 pr_err("%s: Not an elf64 header\n",
3933 qseecom.pdev->init_name);
3934 return false;
3935 }
3936 if (ehdr64->e_phnum == 0) {
3937 pr_err("%s: No loadable segments\n",
3938 qseecom.pdev->init_name);
3939 return false;
3940 }
3941 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3942 sizeof(struct elf64_hdr) > fw_entry->size) {
3943 pr_err("%s: Program headers not within mdt\n",
3944 qseecom.pdev->init_name);
3945 return false;
3946 }
3947 break;
3948 }
3949 default: {
3950 pr_err("QSEE app arch %u is not supported\n", app_arch);
3951 return false;
3952 }
3953 }
3954 return true;
3955}
3956
3957static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3958 uint32_t *app_arch)
3959{
3960 int ret = -1;
3961 int i = 0, rc = 0;
3962 const struct firmware *fw_entry = NULL;
3963 char fw_name[MAX_APP_NAME_SIZE];
3964 struct elf32_hdr *ehdr;
3965 struct elf64_hdr *ehdr64;
3966 int num_images = 0;
3967
3968 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3969 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3970 if (rc) {
3971 pr_err("error with request_firmware\n");
3972 ret = -EIO;
3973 goto err;
3974 }
3975 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3976 ret = -EIO;
3977 goto err;
3978 }
3979 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3980 *fw_size = fw_entry->size;
3981 if (*app_arch == ELFCLASS32) {
3982 ehdr = (struct elf32_hdr *)fw_entry->data;
3983 num_images = ehdr->e_phnum;
3984 } else if (*app_arch == ELFCLASS64) {
3985 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3986 num_images = ehdr64->e_phnum;
3987 } else {
3988 pr_err("QSEE %s app, arch %u is not supported\n",
3989 appname, *app_arch);
3990 ret = -EIO;
3991 goto err;
3992 }
3993 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3994 release_firmware(fw_entry);
3995 fw_entry = NULL;
3996 for (i = 0; i < num_images; i++) {
3997 memset(fw_name, 0, sizeof(fw_name));
3998 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3999 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4000 if (ret)
4001 goto err;
4002 if (*fw_size > U32_MAX - fw_entry->size) {
4003 pr_err("QSEE %s app file size overflow\n", appname);
4004 ret = -EINVAL;
4005 goto err;
4006 }
4007 *fw_size += fw_entry->size;
4008 release_firmware(fw_entry);
4009 fw_entry = NULL;
4010 }
4011
4012 return ret;
4013err:
4014 if (fw_entry)
4015 release_firmware(fw_entry);
4016 *fw_size = 0;
4017 return ret;
4018}
4019
4020static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4021 uint32_t fw_size,
4022 struct qseecom_load_app_ireq *load_req)
4023{
4024 int ret = -1;
4025 int i = 0, rc = 0;
4026 const struct firmware *fw_entry = NULL;
4027 char fw_name[MAX_APP_NAME_SIZE];
4028 u8 *img_data_ptr = img_data;
4029 struct elf32_hdr *ehdr;
4030 struct elf64_hdr *ehdr64;
4031 int num_images = 0;
4032 unsigned char app_arch = 0;
4033
4034 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4035 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4036 if (rc) {
4037 ret = -EIO;
4038 goto err;
4039 }
4040
4041 load_req->img_len = fw_entry->size;
4042 if (load_req->img_len > fw_size) {
4043 pr_err("app %s size %zu is larger than buf size %u\n",
4044 appname, fw_entry->size, fw_size);
4045 ret = -EINVAL;
4046 goto err;
4047 }
4048 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4049 img_data_ptr = img_data_ptr + fw_entry->size;
4050 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4051
4052 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4053 if (app_arch == ELFCLASS32) {
4054 ehdr = (struct elf32_hdr *)fw_entry->data;
4055 num_images = ehdr->e_phnum;
4056 } else if (app_arch == ELFCLASS64) {
4057 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4058 num_images = ehdr64->e_phnum;
4059 } else {
4060 pr_err("QSEE %s app, arch %u is not supported\n",
4061 appname, app_arch);
4062 ret = -EIO;
4063 goto err;
4064 }
4065 release_firmware(fw_entry);
4066 fw_entry = NULL;
4067 for (i = 0; i < num_images; i++) {
4068 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4069 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4070 if (ret) {
4071 pr_err("Failed to locate blob %s\n", fw_name);
4072 goto err;
4073 }
4074 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4075 (fw_entry->size + load_req->img_len > fw_size)) {
4076 pr_err("Invalid file size for %s\n", fw_name);
4077 ret = -EINVAL;
4078 goto err;
4079 }
4080 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4081 img_data_ptr = img_data_ptr + fw_entry->size;
4082 load_req->img_len += fw_entry->size;
4083 release_firmware(fw_entry);
4084 fw_entry = NULL;
4085 }
4086 return ret;
4087err:
4088 release_firmware(fw_entry);
4089 return ret;
4090}
4091
4092static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4093 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4094{
4095 size_t len = 0;
4096 int ret = 0;
4097 ion_phys_addr_t pa;
4098 struct ion_handle *ihandle = NULL;
4099 u8 *img_data = NULL;
4100
4101 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4102 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4103
4104 if (IS_ERR_OR_NULL(ihandle)) {
4105 pr_err("ION alloc failed\n");
4106 return -ENOMEM;
4107 }
4108 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4109 ihandle);
4110
4111 if (IS_ERR_OR_NULL(img_data)) {
4112 pr_err("ION memory mapping for image loading failed\n");
4113 ret = -ENOMEM;
4114 goto exit_ion_free;
4115 }
4116 /* Get the physical address of the ION BUF */
4117 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4118 if (ret) {
4119 pr_err("physical memory retrieval failure\n");
4120 ret = -EIO;
4121 goto exit_ion_unmap_kernel;
4122 }
4123
4124 *pihandle = ihandle;
4125 *data = img_data;
4126 *paddr = pa;
4127 return ret;
4128
4129exit_ion_unmap_kernel:
4130 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4131exit_ion_free:
4132 ion_free(qseecom.ion_clnt, ihandle);
4133 ihandle = NULL;
4134 return ret;
4135}
4136
4137static void __qseecom_free_img_data(struct ion_handle **ihandle)
4138{
4139 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4140 ion_free(qseecom.ion_clnt, *ihandle);
4141 *ihandle = NULL;
4142}
4143
4144static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4145 uint32_t *app_id)
4146{
4147 int ret = -1;
4148 uint32_t fw_size = 0;
4149 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4150 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4151 struct qseecom_command_scm_resp resp;
4152 u8 *img_data = NULL;
4153 ion_phys_addr_t pa = 0;
4154 struct ion_handle *ihandle = NULL;
4155 void *cmd_buf = NULL;
4156 size_t cmd_len;
4157 uint32_t app_arch = 0;
4158
4159 if (!data || !appname || !app_id) {
4160 pr_err("Null pointer to data or appname or appid\n");
4161 return -EINVAL;
4162 }
4163 *app_id = 0;
4164 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4165 return -EIO;
4166 data->client.app_arch = app_arch;
4167
4168 /* Check and load cmnlib */
4169 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4170 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4171 ret = qseecom_load_commonlib_image(data, "cmnlib");
4172 if (ret) {
4173 pr_err("failed to load cmnlib\n");
4174 return -EIO;
4175 }
4176 qseecom.commonlib_loaded = true;
4177 pr_debug("cmnlib is loaded\n");
4178 }
4179
4180 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4181 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4182 if (ret) {
4183 pr_err("failed to load cmnlib64\n");
4184 return -EIO;
4185 }
4186 qseecom.commonlib64_loaded = true;
4187 pr_debug("cmnlib64 is loaded\n");
4188 }
4189 }
4190
4191 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4192 if (ret)
4193 return ret;
4194
4195 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4196 if (ret) {
4197 ret = -EIO;
4198 goto exit_free_img_data;
4199 }
4200
4201 /* Populate the load_req parameters */
4202 if (qseecom.qsee_version < QSEE_VERSION_40) {
4203 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4204 load_req.mdt_len = load_req.mdt_len;
4205 load_req.img_len = load_req.img_len;
4206 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4207 load_req.phy_addr = (uint32_t)pa;
4208 cmd_buf = (void *)&load_req;
4209 cmd_len = sizeof(struct qseecom_load_app_ireq);
4210 } else {
4211 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4212 load_req_64bit.mdt_len = load_req.mdt_len;
4213 load_req_64bit.img_len = load_req.img_len;
4214 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4215 load_req_64bit.phy_addr = (uint64_t)pa;
4216 cmd_buf = (void *)&load_req_64bit;
4217 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4218 }
4219
4220 if (qseecom.support_bus_scaling) {
4221 mutex_lock(&qsee_bw_mutex);
4222 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4223 mutex_unlock(&qsee_bw_mutex);
4224 if (ret) {
4225 ret = -EIO;
4226 goto exit_free_img_data;
4227 }
4228 }
4229
4230 ret = __qseecom_enable_clk_scale_up(data);
4231 if (ret) {
4232 ret = -EIO;
4233 goto exit_unregister_bus_bw_need;
4234 }
4235
4236 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4237 img_data, fw_size,
4238 ION_IOC_CLEAN_INV_CACHES);
4239 if (ret) {
4240 pr_err("cache operation failed %d\n", ret);
4241 goto exit_disable_clk_vote;
4242 }
4243
4244 /* SCM_CALL to load the image */
4245 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4246 &resp, sizeof(resp));
4247 if (ret) {
4248 pr_err("scm_call to load failed : ret %d\n", ret);
4249 ret = -EIO;
4250 goto exit_disable_clk_vote;
4251 }
4252
4253 switch (resp.result) {
4254 case QSEOS_RESULT_SUCCESS:
4255 *app_id = resp.data;
4256 break;
4257 case QSEOS_RESULT_INCOMPLETE:
4258 ret = __qseecom_process_incomplete_cmd(data, &resp);
4259 if (ret)
4260 pr_err("process_incomplete_cmd FAILED\n");
4261 else
4262 *app_id = resp.data;
4263 break;
4264 case QSEOS_RESULT_FAILURE:
4265 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4266 break;
4267 default:
4268 pr_err("scm call return unknown response %d\n", resp.result);
4269 ret = -EINVAL;
4270 break;
4271 }
4272
4273exit_disable_clk_vote:
4274 __qseecom_disable_clk_scale_down(data);
4275
4276exit_unregister_bus_bw_need:
4277 if (qseecom.support_bus_scaling) {
4278 mutex_lock(&qsee_bw_mutex);
4279 qseecom_unregister_bus_bandwidth_needs(data);
4280 mutex_unlock(&qsee_bw_mutex);
4281 }
4282
4283exit_free_img_data:
4284 __qseecom_free_img_data(&ihandle);
4285 return ret;
4286}
4287
4288static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4289 char *cmnlib_name)
4290{
4291 int ret = 0;
4292 uint32_t fw_size = 0;
4293 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4294 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4295 struct qseecom_command_scm_resp resp;
4296 u8 *img_data = NULL;
4297 ion_phys_addr_t pa = 0;
4298 void *cmd_buf = NULL;
4299 size_t cmd_len;
4300 uint32_t app_arch = 0;
4301
4302 if (!cmnlib_name) {
4303 pr_err("cmnlib_name is NULL\n");
4304 return -EINVAL;
4305 }
4306 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4307 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4308 cmnlib_name, strlen(cmnlib_name));
4309 return -EINVAL;
4310 }
4311
4312 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4313 return -EIO;
4314
4315 ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
4316 &img_data, fw_size, &pa);
4317 if (ret)
4318 return -EIO;
4319
4320 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4321 if (ret) {
4322 ret = -EIO;
4323 goto exit_free_img_data;
4324 }
4325 if (qseecom.qsee_version < QSEE_VERSION_40) {
4326 load_req.phy_addr = (uint32_t)pa;
4327 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4328 cmd_buf = (void *)&load_req;
4329 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4330 } else {
4331 load_req_64bit.phy_addr = (uint64_t)pa;
4332 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4333 load_req_64bit.img_len = load_req.img_len;
4334 load_req_64bit.mdt_len = load_req.mdt_len;
4335 cmd_buf = (void *)&load_req_64bit;
4336 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4337 }
4338
4339 if (qseecom.support_bus_scaling) {
4340 mutex_lock(&qsee_bw_mutex);
4341 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4342 mutex_unlock(&qsee_bw_mutex);
4343 if (ret) {
4344 ret = -EIO;
4345 goto exit_free_img_data;
4346 }
4347 }
4348
4349 /* Vote for the SFPB clock */
4350 ret = __qseecom_enable_clk_scale_up(data);
4351 if (ret) {
4352 ret = -EIO;
4353 goto exit_unregister_bus_bw_need;
4354 }
4355
4356 ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
4357 img_data, fw_size,
4358 ION_IOC_CLEAN_INV_CACHES);
4359 if (ret) {
4360 pr_err("cache operation failed %d\n", ret);
4361 goto exit_disable_clk_vote;
4362 }
4363
4364 /* SCM_CALL to load the image */
4365 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4366 &resp, sizeof(resp));
4367 if (ret) {
4368 pr_err("scm_call to load failed : ret %d\n", ret);
4369 ret = -EIO;
4370 goto exit_disable_clk_vote;
4371 }
4372
4373 switch (resp.result) {
4374 case QSEOS_RESULT_SUCCESS:
4375 break;
4376 case QSEOS_RESULT_FAILURE:
4377 pr_err("scm call failed w/response result%d\n", resp.result);
4378 ret = -EINVAL;
4379 goto exit_disable_clk_vote;
4380 case QSEOS_RESULT_INCOMPLETE:
4381 ret = __qseecom_process_incomplete_cmd(data, &resp);
4382 if (ret) {
4383 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4384 goto exit_disable_clk_vote;
4385 }
4386 break;
4387 default:
4388 pr_err("scm call return unknown response %d\n", resp.result);
4389 ret = -EINVAL;
4390 goto exit_disable_clk_vote;
4391 }
4392
4393exit_disable_clk_vote:
4394 __qseecom_disable_clk_scale_down(data);
4395
4396exit_unregister_bus_bw_need:
4397 if (qseecom.support_bus_scaling) {
4398 mutex_lock(&qsee_bw_mutex);
4399 qseecom_unregister_bus_bandwidth_needs(data);
4400 mutex_unlock(&qsee_bw_mutex);
4401 }
4402
4403exit_free_img_data:
4404 __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
4405 return ret;
4406}
4407
4408static int qseecom_unload_commonlib_image(void)
4409{
4410 int ret = -EINVAL;
4411 struct qseecom_unload_lib_image_ireq unload_req = {0};
4412 struct qseecom_command_scm_resp resp;
4413
4414 /* Populate the remaining parameters */
4415 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4416
4417 /* SCM_CALL to load the image */
4418 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4419 sizeof(struct qseecom_unload_lib_image_ireq),
4420 &resp, sizeof(resp));
4421 if (ret) {
4422 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4423 ret = -EIO;
4424 } else {
4425 switch (resp.result) {
4426 case QSEOS_RESULT_SUCCESS:
4427 break;
4428 case QSEOS_RESULT_FAILURE:
4429 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4430 break;
4431 default:
4432 pr_err("scm call return unknown response %d\n",
4433 resp.result);
4434 ret = -EINVAL;
4435 break;
4436 }
4437 }
4438
4439 return ret;
4440}
4441
4442int qseecom_start_app(struct qseecom_handle **handle,
4443 char *app_name, uint32_t size)
4444{
4445 int32_t ret = 0;
4446 unsigned long flags = 0;
4447 struct qseecom_dev_handle *data = NULL;
4448 struct qseecom_check_app_ireq app_ireq;
4449 struct qseecom_registered_app_list *entry = NULL;
4450 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4451 bool found_app = false;
4452 size_t len;
4453 ion_phys_addr_t pa;
4454 uint32_t fw_size, app_arch;
4455 uint32_t app_id = 0;
4456
4457 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4458 pr_err("Not allowed to be called in %d state\n",
4459 atomic_read(&qseecom.qseecom_state));
4460 return -EPERM;
4461 }
4462 if (!app_name) {
4463 pr_err("failed to get the app name\n");
4464 return -EINVAL;
4465 }
4466
Zhen Kong64a6d7282017-06-16 11:55:07 -07004467 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004468 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004469 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004470 return -EINVAL;
4471 }
4472
4473 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4474 if (!(*handle))
4475 return -ENOMEM;
4476
4477 data = kzalloc(sizeof(*data), GFP_KERNEL);
4478 if (!data) {
4479 if (ret == 0) {
4480 kfree(*handle);
4481 *handle = NULL;
4482 }
4483 return -ENOMEM;
4484 }
4485 data->abort = 0;
4486 data->type = QSEECOM_CLIENT_APP;
4487 data->released = false;
4488 data->client.sb_length = size;
4489 data->client.user_virt_sb_base = 0;
4490 data->client.ihandle = NULL;
4491
4492 init_waitqueue_head(&data->abort_wq);
4493
4494 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4495 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4496 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4497 pr_err("Ion client could not retrieve the handle\n");
4498 kfree(data);
4499 kfree(*handle);
4500 *handle = NULL;
4501 return -EINVAL;
4502 }
4503 mutex_lock(&app_access_lock);
4504
4505 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4506 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4507 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4508 if (ret)
4509 goto err;
4510
4511 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4512 if (app_id) {
4513 pr_warn("App id %d for [%s] app exists\n", app_id,
4514 (char *)app_ireq.app_name);
4515 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4516 list_for_each_entry(entry,
4517 &qseecom.registered_app_list_head, list){
4518 if (entry->app_id == app_id) {
4519 entry->ref_cnt++;
4520 found_app = true;
4521 break;
4522 }
4523 }
4524 spin_unlock_irqrestore(
4525 &qseecom.registered_app_list_lock, flags);
4526 if (!found_app)
4527 pr_warn("App_id %d [%s] was loaded but not registered\n",
4528 ret, (char *)app_ireq.app_name);
4529 } else {
4530 /* load the app and get the app_id */
4531 pr_debug("%s: Loading app for the first time'\n",
4532 qseecom.pdev->init_name);
4533 ret = __qseecom_load_fw(data, app_name, &app_id);
4534 if (ret < 0)
4535 goto err;
4536 }
4537 data->client.app_id = app_id;
4538 if (!found_app) {
4539 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4540 if (!entry) {
4541 pr_err("kmalloc for app entry failed\n");
4542 ret = -ENOMEM;
4543 goto err;
4544 }
4545 entry->app_id = app_id;
4546 entry->ref_cnt = 1;
4547 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4548 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4549 ret = -EIO;
4550 kfree(entry);
4551 goto err;
4552 }
4553 entry->app_arch = app_arch;
4554 entry->app_blocked = false;
4555 entry->blocked_on_listener_id = 0;
4556 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4557 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4558 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4559 flags);
4560 }
4561
4562 /* Get the physical address of the ION BUF */
4563 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4564 if (ret) {
4565 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4566 ret);
4567 goto err;
4568 }
4569
4570 /* Populate the structure for sending scm call to load image */
4571 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4572 data->client.ihandle);
4573 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4574 pr_err("ION memory mapping for client shared buf failed\n");
4575 ret = -ENOMEM;
4576 goto err;
4577 }
4578 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4579 data->client.sb_phys = (phys_addr_t)pa;
4580 (*handle)->dev = (void *)data;
4581 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4582 (*handle)->sbuf_len = data->client.sb_length;
4583
4584 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4585 if (!kclient_entry) {
4586 ret = -ENOMEM;
4587 goto err;
4588 }
4589 kclient_entry->handle = *handle;
4590
4591 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4592 list_add_tail(&kclient_entry->list,
4593 &qseecom.registered_kclient_list_head);
4594 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4595
4596 mutex_unlock(&app_access_lock);
4597 return 0;
4598
4599err:
4600 kfree(data);
4601 kfree(*handle);
4602 *handle = NULL;
4603 mutex_unlock(&app_access_lock);
4604 return ret;
4605}
4606EXPORT_SYMBOL(qseecom_start_app);
4607
4608int qseecom_shutdown_app(struct qseecom_handle **handle)
4609{
4610 int ret = -EINVAL;
4611 struct qseecom_dev_handle *data;
4612
4613 struct qseecom_registered_kclient_list *kclient = NULL;
4614 unsigned long flags = 0;
4615 bool found_handle = false;
4616
4617 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4618 pr_err("Not allowed to be called in %d state\n",
4619 atomic_read(&qseecom.qseecom_state));
4620 return -EPERM;
4621 }
4622
4623 if ((handle == NULL) || (*handle == NULL)) {
4624 pr_err("Handle is not initialized\n");
4625 return -EINVAL;
4626 }
4627 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4628 mutex_lock(&app_access_lock);
4629
4630 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4631 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4632 list) {
4633 if (kclient->handle == (*handle)) {
4634 list_del(&kclient->list);
4635 found_handle = true;
4636 break;
4637 }
4638 }
4639 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4640 if (!found_handle)
4641 pr_err("Unable to find the handle, exiting\n");
4642 else
4643 ret = qseecom_unload_app(data, false);
4644
4645 mutex_unlock(&app_access_lock);
4646 if (ret == 0) {
4647 kzfree(data);
4648 kzfree(*handle);
4649 kzfree(kclient);
4650 *handle = NULL;
4651 }
4652
4653 return ret;
4654}
4655EXPORT_SYMBOL(qseecom_shutdown_app);
4656
4657int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4658 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4659{
4660 int ret = 0;
4661 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4662 struct qseecom_dev_handle *data;
4663 bool perf_enabled = false;
4664
4665 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4666 pr_err("Not allowed to be called in %d state\n",
4667 atomic_read(&qseecom.qseecom_state));
4668 return -EPERM;
4669 }
4670
4671 if (handle == NULL) {
4672 pr_err("Handle is not initialized\n");
4673 return -EINVAL;
4674 }
4675 data = handle->dev;
4676
4677 req.cmd_req_len = sbuf_len;
4678 req.resp_len = rbuf_len;
4679 req.cmd_req_buf = send_buf;
4680 req.resp_buf = resp_buf;
4681
4682 if (__validate_send_cmd_inputs(data, &req))
4683 return -EINVAL;
4684
4685 mutex_lock(&app_access_lock);
4686 if (qseecom.support_bus_scaling) {
4687 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4688 if (ret) {
4689 pr_err("Failed to set bw.\n");
4690 mutex_unlock(&app_access_lock);
4691 return ret;
4692 }
4693 }
4694 /*
4695 * On targets where crypto clock is handled by HLOS,
4696 * if clk_access_cnt is zero and perf_enabled is false,
4697 * then the crypto clock was not enabled before sending cmd
4698 * to tz, qseecom will enable the clock to avoid service failure.
4699 */
4700 if (!qseecom.no_clock_support &&
4701 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4702 pr_debug("ce clock is not enabled!\n");
4703 ret = qseecom_perf_enable(data);
4704 if (ret) {
4705 pr_err("Failed to vote for clock with err %d\n",
4706 ret);
4707 mutex_unlock(&app_access_lock);
4708 return -EINVAL;
4709 }
4710 perf_enabled = true;
4711 }
4712 if (!strcmp(data->client.app_name, "securemm"))
4713 data->use_legacy_cmd = true;
4714
4715 ret = __qseecom_send_cmd(data, &req);
4716 data->use_legacy_cmd = false;
4717 if (qseecom.support_bus_scaling)
4718 __qseecom_add_bw_scale_down_timer(
4719 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4720
4721 if (perf_enabled) {
4722 qsee_disable_clock_vote(data, CLK_DFAB);
4723 qsee_disable_clock_vote(data, CLK_SFPB);
4724 }
4725
4726 mutex_unlock(&app_access_lock);
4727
4728 if (ret)
4729 return ret;
4730
4731 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4732 req.resp_len, req.resp_buf);
4733 return ret;
4734}
4735EXPORT_SYMBOL(qseecom_send_command);
4736
4737int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4738{
4739 int ret = 0;
4740
4741 if ((handle == NULL) || (handle->dev == NULL)) {
4742 pr_err("No valid kernel client\n");
4743 return -EINVAL;
4744 }
4745 if (high) {
4746 if (qseecom.support_bus_scaling) {
4747 mutex_lock(&qsee_bw_mutex);
4748 __qseecom_register_bus_bandwidth_needs(handle->dev,
4749 HIGH);
4750 mutex_unlock(&qsee_bw_mutex);
4751 } else {
4752 ret = qseecom_perf_enable(handle->dev);
4753 if (ret)
4754 pr_err("Failed to vote for clock with err %d\n",
4755 ret);
4756 }
4757 } else {
4758 if (!qseecom.support_bus_scaling) {
4759 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4760 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4761 } else {
4762 mutex_lock(&qsee_bw_mutex);
4763 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4764 mutex_unlock(&qsee_bw_mutex);
4765 }
4766 }
4767 return ret;
4768}
4769EXPORT_SYMBOL(qseecom_set_bandwidth);
4770
4771int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4772{
4773 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4774 struct qseecom_dev_handle dummy_private_data = {0};
4775 struct qseecom_command_scm_resp resp;
4776 int ret = 0;
4777
4778 if (!desc) {
4779 pr_err("desc is NULL\n");
4780 return -EINVAL;
4781 }
4782
4783 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004784 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004785 resp.data = desc->ret[2]; /*listener_id*/
4786
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004787 mutex_lock(&app_access_lock);
4788 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
4789 &dummy_private_data);
4790 mutex_unlock(&app_access_lock);
4791 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004792 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004793 (int)desc->ret[0], (int)desc->ret[2],
4794 (int)desc->ret[1], ret);
4795 desc->ret[0] = resp.result;
4796 desc->ret[1] = resp.resp_type;
4797 desc->ret[2] = resp.data;
4798 return ret;
4799}
4800EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4801
4802static int qseecom_send_resp(void)
4803{
4804 qseecom.send_resp_flag = 1;
4805 wake_up_interruptible(&qseecom.send_resp_wq);
4806 return 0;
4807}
4808
4809static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4810{
4811 struct qseecom_registered_listener_list *this_lstnr = NULL;
4812
4813 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4814 this_lstnr = __qseecom_find_svc(data->listener.id);
4815 if (this_lstnr == NULL)
4816 return -EINVAL;
4817 qseecom.send_resp_flag = 1;
4818 this_lstnr->send_resp_flag = 1;
4819 wake_up_interruptible(&qseecom.send_resp_wq);
4820 return 0;
4821}
4822
4823static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4824 struct qseecom_send_modfd_listener_resp *resp,
4825 struct qseecom_registered_listener_list *this_lstnr)
4826{
4827 int i;
4828
4829 if (!data || !resp || !this_lstnr) {
4830 pr_err("listener handle or resp msg is null\n");
4831 return -EINVAL;
4832 }
4833
4834 if (resp->resp_buf_ptr == NULL) {
4835 pr_err("resp buffer is null\n");
4836 return -EINVAL;
4837 }
4838 /* validate resp buf length */
4839 if ((resp->resp_len == 0) ||
4840 (resp->resp_len > this_lstnr->sb_length)) {
4841 pr_err("resp buf length %d not valid\n", resp->resp_len);
4842 return -EINVAL;
4843 }
4844
4845 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4846 pr_err("Integer overflow in resp_len & resp_buf\n");
4847 return -EINVAL;
4848 }
4849 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4850 (ULONG_MAX - this_lstnr->sb_length)) {
4851 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4852 return -EINVAL;
4853 }
4854 /* validate resp buf */
4855 if (((uintptr_t)resp->resp_buf_ptr <
4856 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4857 ((uintptr_t)resp->resp_buf_ptr >=
4858 ((uintptr_t)this_lstnr->user_virt_sb_base +
4859 this_lstnr->sb_length)) ||
4860 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4861 ((uintptr_t)this_lstnr->user_virt_sb_base +
4862 this_lstnr->sb_length))) {
4863 pr_err("resp buf is out of shared buffer region\n");
4864 return -EINVAL;
4865 }
4866
4867 /* validate offsets */
4868 for (i = 0; i < MAX_ION_FD; i++) {
4869 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4870 pr_err("Invalid offset %d = 0x%x\n",
4871 i, resp->ifd_data[i].cmd_buf_offset);
4872 return -EINVAL;
4873 }
4874 }
4875
4876 return 0;
4877}
4878
4879static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4880 void __user *argp, bool is_64bit_addr)
4881{
4882 struct qseecom_send_modfd_listener_resp resp;
4883 struct qseecom_registered_listener_list *this_lstnr = NULL;
4884
4885 if (copy_from_user(&resp, argp, sizeof(resp))) {
4886 pr_err("copy_from_user failed");
4887 return -EINVAL;
4888 }
4889
4890 this_lstnr = __qseecom_find_svc(data->listener.id);
4891 if (this_lstnr == NULL)
4892 return -EINVAL;
4893
4894 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4895 return -EINVAL;
4896
4897 resp.resp_buf_ptr = this_lstnr->sb_virt +
4898 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4899
4900 if (!is_64bit_addr)
4901 __qseecom_update_cmd_buf(&resp, false, data);
4902 else
4903 __qseecom_update_cmd_buf_64(&resp, false, data);
4904 qseecom.send_resp_flag = 1;
4905 this_lstnr->send_resp_flag = 1;
4906 wake_up_interruptible(&qseecom.send_resp_wq);
4907 return 0;
4908}
4909
4910static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4911 void __user *argp)
4912{
4913 return __qseecom_send_modfd_resp(data, argp, false);
4914}
4915
4916static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4917 void __user *argp)
4918{
4919 return __qseecom_send_modfd_resp(data, argp, true);
4920}
4921
4922static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4923 void __user *argp)
4924{
4925 struct qseecom_qseos_version_req req;
4926
4927 if (copy_from_user(&req, argp, sizeof(req))) {
4928 pr_err("copy_from_user failed");
4929 return -EINVAL;
4930 }
4931 req.qseos_version = qseecom.qseos_version;
4932 if (copy_to_user(argp, &req, sizeof(req))) {
4933 pr_err("copy_to_user failed");
4934 return -EINVAL;
4935 }
4936 return 0;
4937}
4938
4939static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4940{
4941 int rc = 0;
4942 struct qseecom_clk *qclk = NULL;
4943
4944 if (qseecom.no_clock_support)
4945 return 0;
4946
4947 if (ce == CLK_QSEE)
4948 qclk = &qseecom.qsee;
4949 if (ce == CLK_CE_DRV)
4950 qclk = &qseecom.ce_drv;
4951
4952 if (qclk == NULL) {
4953 pr_err("CLK type not supported\n");
4954 return -EINVAL;
4955 }
4956 mutex_lock(&clk_access_lock);
4957
4958 if (qclk->clk_access_cnt == ULONG_MAX) {
4959 pr_err("clk_access_cnt beyond limitation\n");
4960 goto err;
4961 }
4962 if (qclk->clk_access_cnt > 0) {
4963 qclk->clk_access_cnt++;
4964 mutex_unlock(&clk_access_lock);
4965 return rc;
4966 }
4967
4968 /* Enable CE core clk */
4969 if (qclk->ce_core_clk != NULL) {
4970 rc = clk_prepare_enable(qclk->ce_core_clk);
4971 if (rc) {
4972 pr_err("Unable to enable/prepare CE core clk\n");
4973 goto err;
4974 }
4975 }
4976 /* Enable CE clk */
4977 if (qclk->ce_clk != NULL) {
4978 rc = clk_prepare_enable(qclk->ce_clk);
4979 if (rc) {
4980 pr_err("Unable to enable/prepare CE iface clk\n");
4981 goto ce_clk_err;
4982 }
4983 }
4984 /* Enable AXI clk */
4985 if (qclk->ce_bus_clk != NULL) {
4986 rc = clk_prepare_enable(qclk->ce_bus_clk);
4987 if (rc) {
4988 pr_err("Unable to enable/prepare CE bus clk\n");
4989 goto ce_bus_clk_err;
4990 }
4991 }
4992 qclk->clk_access_cnt++;
4993 mutex_unlock(&clk_access_lock);
4994 return 0;
4995
4996ce_bus_clk_err:
4997 if (qclk->ce_clk != NULL)
4998 clk_disable_unprepare(qclk->ce_clk);
4999ce_clk_err:
5000 if (qclk->ce_core_clk != NULL)
5001 clk_disable_unprepare(qclk->ce_core_clk);
5002err:
5003 mutex_unlock(&clk_access_lock);
5004 return -EIO;
5005}
5006
5007static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5008{
5009 struct qseecom_clk *qclk;
5010
5011 if (qseecom.no_clock_support)
5012 return;
5013
5014 if (ce == CLK_QSEE)
5015 qclk = &qseecom.qsee;
5016 else
5017 qclk = &qseecom.ce_drv;
5018
5019 mutex_lock(&clk_access_lock);
5020
5021 if (qclk->clk_access_cnt == 0) {
5022 mutex_unlock(&clk_access_lock);
5023 return;
5024 }
5025
5026 if (qclk->clk_access_cnt == 1) {
5027 if (qclk->ce_clk != NULL)
5028 clk_disable_unprepare(qclk->ce_clk);
5029 if (qclk->ce_core_clk != NULL)
5030 clk_disable_unprepare(qclk->ce_core_clk);
5031 if (qclk->ce_bus_clk != NULL)
5032 clk_disable_unprepare(qclk->ce_bus_clk);
5033 }
5034 qclk->clk_access_cnt--;
5035 mutex_unlock(&clk_access_lock);
5036}
5037
5038static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5039 int32_t clk_type)
5040{
5041 int ret = 0;
5042 struct qseecom_clk *qclk;
5043
5044 if (qseecom.no_clock_support)
5045 return 0;
5046
5047 qclk = &qseecom.qsee;
5048 if (!qseecom.qsee_perf_client)
5049 return ret;
5050
5051 switch (clk_type) {
5052 case CLK_DFAB:
5053 mutex_lock(&qsee_bw_mutex);
5054 if (!qseecom.qsee_bw_count) {
5055 if (qseecom.qsee_sfpb_bw_count > 0)
5056 ret = msm_bus_scale_client_update_request(
5057 qseecom.qsee_perf_client, 3);
5058 else {
5059 if (qclk->ce_core_src_clk != NULL)
5060 ret = __qseecom_enable_clk(CLK_QSEE);
5061 if (!ret) {
5062 ret =
5063 msm_bus_scale_client_update_request(
5064 qseecom.qsee_perf_client, 1);
5065 if ((ret) &&
5066 (qclk->ce_core_src_clk != NULL))
5067 __qseecom_disable_clk(CLK_QSEE);
5068 }
5069 }
5070 if (ret)
5071 pr_err("DFAB Bandwidth req failed (%d)\n",
5072 ret);
5073 else {
5074 qseecom.qsee_bw_count++;
5075 data->perf_enabled = true;
5076 }
5077 } else {
5078 qseecom.qsee_bw_count++;
5079 data->perf_enabled = true;
5080 }
5081 mutex_unlock(&qsee_bw_mutex);
5082 break;
5083 case CLK_SFPB:
5084 mutex_lock(&qsee_bw_mutex);
5085 if (!qseecom.qsee_sfpb_bw_count) {
5086 if (qseecom.qsee_bw_count > 0)
5087 ret = msm_bus_scale_client_update_request(
5088 qseecom.qsee_perf_client, 3);
5089 else {
5090 if (qclk->ce_core_src_clk != NULL)
5091 ret = __qseecom_enable_clk(CLK_QSEE);
5092 if (!ret) {
5093 ret =
5094 msm_bus_scale_client_update_request(
5095 qseecom.qsee_perf_client, 2);
5096 if ((ret) &&
5097 (qclk->ce_core_src_clk != NULL))
5098 __qseecom_disable_clk(CLK_QSEE);
5099 }
5100 }
5101
5102 if (ret)
5103 pr_err("SFPB Bandwidth req failed (%d)\n",
5104 ret);
5105 else {
5106 qseecom.qsee_sfpb_bw_count++;
5107 data->fast_load_enabled = true;
5108 }
5109 } else {
5110 qseecom.qsee_sfpb_bw_count++;
5111 data->fast_load_enabled = true;
5112 }
5113 mutex_unlock(&qsee_bw_mutex);
5114 break;
5115 default:
5116 pr_err("Clock type not defined\n");
5117 break;
5118 }
5119 return ret;
5120}
5121
5122static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5123 int32_t clk_type)
5124{
5125 int32_t ret = 0;
5126 struct qseecom_clk *qclk;
5127
5128 qclk = &qseecom.qsee;
5129
5130 if (qseecom.no_clock_support)
5131 return;
5132 if (!qseecom.qsee_perf_client)
5133 return;
5134
5135 switch (clk_type) {
5136 case CLK_DFAB:
5137 mutex_lock(&qsee_bw_mutex);
5138 if (qseecom.qsee_bw_count == 0) {
5139 pr_err("Client error.Extra call to disable DFAB clk\n");
5140 mutex_unlock(&qsee_bw_mutex);
5141 return;
5142 }
5143
5144 if (qseecom.qsee_bw_count == 1) {
5145 if (qseecom.qsee_sfpb_bw_count > 0)
5146 ret = msm_bus_scale_client_update_request(
5147 qseecom.qsee_perf_client, 2);
5148 else {
5149 ret = msm_bus_scale_client_update_request(
5150 qseecom.qsee_perf_client, 0);
5151 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5152 __qseecom_disable_clk(CLK_QSEE);
5153 }
5154 if (ret)
5155 pr_err("SFPB Bandwidth req fail (%d)\n",
5156 ret);
5157 else {
5158 qseecom.qsee_bw_count--;
5159 data->perf_enabled = false;
5160 }
5161 } else {
5162 qseecom.qsee_bw_count--;
5163 data->perf_enabled = false;
5164 }
5165 mutex_unlock(&qsee_bw_mutex);
5166 break;
5167 case CLK_SFPB:
5168 mutex_lock(&qsee_bw_mutex);
5169 if (qseecom.qsee_sfpb_bw_count == 0) {
5170 pr_err("Client error.Extra call to disable SFPB clk\n");
5171 mutex_unlock(&qsee_bw_mutex);
5172 return;
5173 }
5174 if (qseecom.qsee_sfpb_bw_count == 1) {
5175 if (qseecom.qsee_bw_count > 0)
5176 ret = msm_bus_scale_client_update_request(
5177 qseecom.qsee_perf_client, 1);
5178 else {
5179 ret = msm_bus_scale_client_update_request(
5180 qseecom.qsee_perf_client, 0);
5181 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5182 __qseecom_disable_clk(CLK_QSEE);
5183 }
5184 if (ret)
5185 pr_err("SFPB Bandwidth req fail (%d)\n",
5186 ret);
5187 else {
5188 qseecom.qsee_sfpb_bw_count--;
5189 data->fast_load_enabled = false;
5190 }
5191 } else {
5192 qseecom.qsee_sfpb_bw_count--;
5193 data->fast_load_enabled = false;
5194 }
5195 mutex_unlock(&qsee_bw_mutex);
5196 break;
5197 default:
5198 pr_err("Clock type not defined\n");
5199 break;
5200 }
5201
5202}
5203
5204static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5205 void __user *argp)
5206{
5207 struct ion_handle *ihandle; /* Ion handle */
5208 struct qseecom_load_img_req load_img_req;
5209 int uret = 0;
5210 int ret;
5211 ion_phys_addr_t pa = 0;
5212 size_t len;
5213 struct qseecom_load_app_ireq load_req;
5214 struct qseecom_load_app_64bit_ireq load_req_64bit;
5215 struct qseecom_command_scm_resp resp;
5216 void *cmd_buf = NULL;
5217 size_t cmd_len;
5218 /* Copy the relevant information needed for loading the image */
5219 if (copy_from_user(&load_img_req,
5220 (void __user *)argp,
5221 sizeof(struct qseecom_load_img_req))) {
5222 pr_err("copy_from_user failed\n");
5223 return -EFAULT;
5224 }
5225
5226 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005227 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005228 load_img_req.ifd_data_fd);
5229 if (IS_ERR_OR_NULL(ihandle)) {
5230 pr_err("Ion client could not retrieve the handle\n");
5231 return -ENOMEM;
5232 }
5233
5234 /* Get the physical address of the ION BUF */
5235 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5236 if (ret) {
5237 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5238 ret);
5239 return ret;
5240 }
5241 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5242 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5243 len, load_img_req.mdt_len,
5244 load_img_req.img_len);
5245 return ret;
5246 }
5247 /* Populate the structure for sending scm call to load image */
5248 if (qseecom.qsee_version < QSEE_VERSION_40) {
5249 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5250 load_req.mdt_len = load_img_req.mdt_len;
5251 load_req.img_len = load_img_req.img_len;
5252 load_req.phy_addr = (uint32_t)pa;
5253 cmd_buf = (void *)&load_req;
5254 cmd_len = sizeof(struct qseecom_load_app_ireq);
5255 } else {
5256 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5257 load_req_64bit.mdt_len = load_img_req.mdt_len;
5258 load_req_64bit.img_len = load_img_req.img_len;
5259 load_req_64bit.phy_addr = (uint64_t)pa;
5260 cmd_buf = (void *)&load_req_64bit;
5261 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5262 }
5263
5264 if (qseecom.support_bus_scaling) {
5265 mutex_lock(&qsee_bw_mutex);
5266 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5267 mutex_unlock(&qsee_bw_mutex);
5268 if (ret) {
5269 ret = -EIO;
5270 goto exit_cpu_restore;
5271 }
5272 }
5273
5274 /* Vote for the SFPB clock */
5275 ret = __qseecom_enable_clk_scale_up(data);
5276 if (ret) {
5277 ret = -EIO;
5278 goto exit_register_bus_bandwidth_needs;
5279 }
5280 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5281 ION_IOC_CLEAN_INV_CACHES);
5282 if (ret) {
5283 pr_err("cache operation failed %d\n", ret);
5284 goto exit_disable_clock;
5285 }
5286 /* SCM_CALL to load the external elf */
5287 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5288 &resp, sizeof(resp));
5289 if (ret) {
5290 pr_err("scm_call to load failed : ret %d\n",
5291 ret);
5292 ret = -EFAULT;
5293 goto exit_disable_clock;
5294 }
5295
5296 switch (resp.result) {
5297 case QSEOS_RESULT_SUCCESS:
5298 break;
5299 case QSEOS_RESULT_INCOMPLETE:
5300 pr_err("%s: qseos result incomplete\n", __func__);
5301 ret = __qseecom_process_incomplete_cmd(data, &resp);
5302 if (ret)
5303 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5304 break;
5305 case QSEOS_RESULT_FAILURE:
5306 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5307 ret = -EFAULT;
5308 break;
5309 default:
5310 pr_err("scm_call response result %d not supported\n",
5311 resp.result);
5312 ret = -EFAULT;
5313 break;
5314 }
5315
5316exit_disable_clock:
5317 __qseecom_disable_clk_scale_down(data);
5318
5319exit_register_bus_bandwidth_needs:
5320 if (qseecom.support_bus_scaling) {
5321 mutex_lock(&qsee_bw_mutex);
5322 uret = qseecom_unregister_bus_bandwidth_needs(data);
5323 mutex_unlock(&qsee_bw_mutex);
5324 if (uret)
5325 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5326 uret, ret);
5327 }
5328
5329exit_cpu_restore:
5330 /* Deallocate the handle */
5331 if (!IS_ERR_OR_NULL(ihandle))
5332 ion_free(qseecom.ion_clnt, ihandle);
5333 return ret;
5334}
5335
5336static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5337{
5338 int ret = 0;
5339 struct qseecom_command_scm_resp resp;
5340 struct qseecom_unload_app_ireq req;
5341
5342 /* unavailable client app */
5343 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5344
5345 /* Populate the structure for sending scm call to unload image */
5346 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5347
5348 /* SCM_CALL to unload the external elf */
5349 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5350 sizeof(struct qseecom_unload_app_ireq),
5351 &resp, sizeof(resp));
5352 if (ret) {
5353 pr_err("scm_call to unload failed : ret %d\n",
5354 ret);
5355 ret = -EFAULT;
5356 goto qseecom_unload_external_elf_scm_err;
5357 }
5358 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5359 ret = __qseecom_process_incomplete_cmd(data, &resp);
5360 if (ret)
5361 pr_err("process_incomplete_cmd fail err: %d\n",
5362 ret);
5363 } else {
5364 if (resp.result != QSEOS_RESULT_SUCCESS) {
5365 pr_err("scm_call to unload image failed resp.result =%d\n",
5366 resp.result);
5367 ret = -EFAULT;
5368 }
5369 }
5370
5371qseecom_unload_external_elf_scm_err:
5372
5373 return ret;
5374}
5375
5376static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5377 void __user *argp)
5378{
5379
5380 int32_t ret;
5381 struct qseecom_qseos_app_load_query query_req;
5382 struct qseecom_check_app_ireq req;
5383 struct qseecom_registered_app_list *entry = NULL;
5384 unsigned long flags = 0;
5385 uint32_t app_arch = 0, app_id = 0;
5386 bool found_app = false;
5387
5388 /* Copy the relevant information needed for loading the image */
5389 if (copy_from_user(&query_req,
5390 (void __user *)argp,
5391 sizeof(struct qseecom_qseos_app_load_query))) {
5392 pr_err("copy_from_user failed\n");
5393 return -EFAULT;
5394 }
5395
5396 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5397 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5398 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5399
5400 ret = __qseecom_check_app_exists(req, &app_id);
5401 if (ret) {
5402 pr_err(" scm call to check if app is loaded failed");
5403 return ret; /* scm call failed */
5404 }
5405 if (app_id) {
5406 pr_debug("App id %d (%s) already exists\n", app_id,
5407 (char *)(req.app_name));
5408 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5409 list_for_each_entry(entry,
5410 &qseecom.registered_app_list_head, list){
5411 if (entry->app_id == app_id) {
5412 app_arch = entry->app_arch;
5413 entry->ref_cnt++;
5414 found_app = true;
5415 break;
5416 }
5417 }
5418 spin_unlock_irqrestore(
5419 &qseecom.registered_app_list_lock, flags);
5420 data->client.app_id = app_id;
5421 query_req.app_id = app_id;
5422 if (app_arch) {
5423 data->client.app_arch = app_arch;
5424 query_req.app_arch = app_arch;
5425 } else {
5426 data->client.app_arch = 0;
5427 query_req.app_arch = 0;
5428 }
5429 strlcpy(data->client.app_name, query_req.app_name,
5430 MAX_APP_NAME_SIZE);
5431 /*
5432 * If app was loaded by appsbl before and was not registered,
5433 * regiser this app now.
5434 */
5435 if (!found_app) {
5436 pr_debug("Register app %d [%s] which was loaded before\n",
5437 ret, (char *)query_req.app_name);
5438 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5439 if (!entry) {
5440 pr_err("kmalloc for app entry failed\n");
5441 return -ENOMEM;
5442 }
5443 entry->app_id = app_id;
5444 entry->ref_cnt = 1;
5445 entry->app_arch = data->client.app_arch;
5446 strlcpy(entry->app_name, data->client.app_name,
5447 MAX_APP_NAME_SIZE);
5448 entry->app_blocked = false;
5449 entry->blocked_on_listener_id = 0;
5450 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5451 flags);
5452 list_add_tail(&entry->list,
5453 &qseecom.registered_app_list_head);
5454 spin_unlock_irqrestore(
5455 &qseecom.registered_app_list_lock, flags);
5456 }
5457 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5458 pr_err("copy_to_user failed\n");
5459 return -EFAULT;
5460 }
5461 return -EEXIST; /* app already loaded */
5462 } else {
5463 return 0; /* app not loaded */
5464 }
5465}
5466
5467static int __qseecom_get_ce_pipe_info(
5468 enum qseecom_key_management_usage_type usage,
5469 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5470{
5471 int ret = -EINVAL;
5472 int i, j;
5473 struct qseecom_ce_info_use *p = NULL;
5474 int total = 0;
5475 struct qseecom_ce_pipe_entry *pcepipe;
5476
5477 switch (usage) {
5478 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5479 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5480 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5481 if (qseecom.support_fde) {
5482 p = qseecom.ce_info.fde;
5483 total = qseecom.ce_info.num_fde;
5484 } else {
5485 pr_err("system does not support fde\n");
5486 return -EINVAL;
5487 }
5488 break;
5489 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5490 if (qseecom.support_pfe) {
5491 p = qseecom.ce_info.pfe;
5492 total = qseecom.ce_info.num_pfe;
5493 } else {
5494 pr_err("system does not support pfe\n");
5495 return -EINVAL;
5496 }
5497 break;
5498 default:
5499 pr_err("unsupported usage %d\n", usage);
5500 return -EINVAL;
5501 }
5502
5503 for (j = 0; j < total; j++) {
5504 if (p->unit_num == unit) {
5505 pcepipe = p->ce_pipe_entry;
5506 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5507 (*ce_hw)[i] = pcepipe->ce_num;
5508 *pipe = pcepipe->ce_pipe_pair;
5509 pcepipe++;
5510 }
5511 ret = 0;
5512 break;
5513 }
5514 p++;
5515 }
5516 return ret;
5517}
5518
5519static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5520 enum qseecom_key_management_usage_type usage,
5521 struct qseecom_key_generate_ireq *ireq)
5522{
5523 struct qseecom_command_scm_resp resp;
5524 int ret;
5525
5526 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5527 usage >= QSEOS_KM_USAGE_MAX) {
5528 pr_err("Error:: unsupported usage %d\n", usage);
5529 return -EFAULT;
5530 }
5531 ret = __qseecom_enable_clk(CLK_QSEE);
5532 if (ret)
5533 return ret;
5534
5535 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5536 ireq, sizeof(struct qseecom_key_generate_ireq),
5537 &resp, sizeof(resp));
5538 if (ret) {
5539 if (ret == -EINVAL &&
5540 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5541 pr_debug("Key ID exists.\n");
5542 ret = 0;
5543 } else {
5544 pr_err("scm call to generate key failed : %d\n", ret);
5545 ret = -EFAULT;
5546 }
5547 goto generate_key_exit;
5548 }
5549
5550 switch (resp.result) {
5551 case QSEOS_RESULT_SUCCESS:
5552 break;
5553 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5554 pr_debug("Key ID exists.\n");
5555 break;
5556 case QSEOS_RESULT_INCOMPLETE:
5557 ret = __qseecom_process_incomplete_cmd(data, &resp);
5558 if (ret) {
5559 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5560 pr_debug("Key ID exists.\n");
5561 ret = 0;
5562 } else {
5563 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5564 resp.result);
5565 }
5566 }
5567 break;
5568 case QSEOS_RESULT_FAILURE:
5569 default:
5570 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5571 ret = -EINVAL;
5572 break;
5573 }
5574generate_key_exit:
5575 __qseecom_disable_clk(CLK_QSEE);
5576 return ret;
5577}
5578
5579static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5580 enum qseecom_key_management_usage_type usage,
5581 struct qseecom_key_delete_ireq *ireq)
5582{
5583 struct qseecom_command_scm_resp resp;
5584 int ret;
5585
5586 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5587 usage >= QSEOS_KM_USAGE_MAX) {
5588 pr_err("Error:: unsupported usage %d\n", usage);
5589 return -EFAULT;
5590 }
5591 ret = __qseecom_enable_clk(CLK_QSEE);
5592 if (ret)
5593 return ret;
5594
5595 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5596 ireq, sizeof(struct qseecom_key_delete_ireq),
5597 &resp, sizeof(struct qseecom_command_scm_resp));
5598 if (ret) {
5599 if (ret == -EINVAL &&
5600 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5601 pr_debug("Max attempts to input password reached.\n");
5602 ret = -ERANGE;
5603 } else {
5604 pr_err("scm call to delete key failed : %d\n", ret);
5605 ret = -EFAULT;
5606 }
5607 goto del_key_exit;
5608 }
5609
5610 switch (resp.result) {
5611 case QSEOS_RESULT_SUCCESS:
5612 break;
5613 case QSEOS_RESULT_INCOMPLETE:
5614 ret = __qseecom_process_incomplete_cmd(data, &resp);
5615 if (ret) {
5616 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5617 resp.result);
5618 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5619 pr_debug("Max attempts to input password reached.\n");
5620 ret = -ERANGE;
5621 }
5622 }
5623 break;
5624 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5625 pr_debug("Max attempts to input password reached.\n");
5626 ret = -ERANGE;
5627 break;
5628 case QSEOS_RESULT_FAILURE:
5629 default:
5630 pr_err("Delete key scm call failed resp.result %d\n",
5631 resp.result);
5632 ret = -EINVAL;
5633 break;
5634 }
5635del_key_exit:
5636 __qseecom_disable_clk(CLK_QSEE);
5637 return ret;
5638}
5639
5640static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5641 enum qseecom_key_management_usage_type usage,
5642 struct qseecom_key_select_ireq *ireq)
5643{
5644 struct qseecom_command_scm_resp resp;
5645 int ret;
5646
5647 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5648 usage >= QSEOS_KM_USAGE_MAX) {
5649 pr_err("Error:: unsupported usage %d\n", usage);
5650 return -EFAULT;
5651 }
5652 ret = __qseecom_enable_clk(CLK_QSEE);
5653 if (ret)
5654 return ret;
5655
5656 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5657 ret = __qseecom_enable_clk(CLK_CE_DRV);
5658 if (ret)
5659 return ret;
5660 }
5661
5662 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5663 ireq, sizeof(struct qseecom_key_select_ireq),
5664 &resp, sizeof(struct qseecom_command_scm_resp));
5665 if (ret) {
5666 if (ret == -EINVAL &&
5667 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5668 pr_debug("Max attempts to input password reached.\n");
5669 ret = -ERANGE;
5670 } else if (ret == -EINVAL &&
5671 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5672 pr_debug("Set Key operation under processing...\n");
5673 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5674 } else {
5675 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5676 ret);
5677 ret = -EFAULT;
5678 }
5679 goto set_key_exit;
5680 }
5681
5682 switch (resp.result) {
5683 case QSEOS_RESULT_SUCCESS:
5684 break;
5685 case QSEOS_RESULT_INCOMPLETE:
5686 ret = __qseecom_process_incomplete_cmd(data, &resp);
5687 if (ret) {
5688 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5689 resp.result);
5690 if (resp.result ==
5691 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5692 pr_debug("Set Key operation under processing...\n");
5693 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5694 }
5695 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5696 pr_debug("Max attempts to input password reached.\n");
5697 ret = -ERANGE;
5698 }
5699 }
5700 break;
5701 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5702 pr_debug("Max attempts to input password reached.\n");
5703 ret = -ERANGE;
5704 break;
5705 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5706 pr_debug("Set Key operation under processing...\n");
5707 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5708 break;
5709 case QSEOS_RESULT_FAILURE:
5710 default:
5711 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5712 ret = -EINVAL;
5713 break;
5714 }
5715set_key_exit:
5716 __qseecom_disable_clk(CLK_QSEE);
5717 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5718 __qseecom_disable_clk(CLK_CE_DRV);
5719 return ret;
5720}
5721
5722static int __qseecom_update_current_key_user_info(
5723 struct qseecom_dev_handle *data,
5724 enum qseecom_key_management_usage_type usage,
5725 struct qseecom_key_userinfo_update_ireq *ireq)
5726{
5727 struct qseecom_command_scm_resp resp;
5728 int ret;
5729
5730 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5731 usage >= QSEOS_KM_USAGE_MAX) {
5732 pr_err("Error:: unsupported usage %d\n", usage);
5733 return -EFAULT;
5734 }
5735 ret = __qseecom_enable_clk(CLK_QSEE);
5736 if (ret)
5737 return ret;
5738
5739 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5740 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5741 &resp, sizeof(struct qseecom_command_scm_resp));
5742 if (ret) {
5743 if (ret == -EINVAL &&
5744 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5745 pr_debug("Set Key operation under processing...\n");
5746 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5747 } else {
5748 pr_err("scm call to update key userinfo failed: %d\n",
5749 ret);
5750 __qseecom_disable_clk(CLK_QSEE);
5751 return -EFAULT;
5752 }
5753 }
5754
5755 switch (resp.result) {
5756 case QSEOS_RESULT_SUCCESS:
5757 break;
5758 case QSEOS_RESULT_INCOMPLETE:
5759 ret = __qseecom_process_incomplete_cmd(data, &resp);
5760 if (resp.result ==
5761 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5762 pr_debug("Set Key operation under processing...\n");
5763 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5764 }
5765 if (ret)
5766 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5767 resp.result);
5768 break;
5769 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5770 pr_debug("Update Key operation under processing...\n");
5771 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5772 break;
5773 case QSEOS_RESULT_FAILURE:
5774 default:
5775 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5776 ret = -EINVAL;
5777 break;
5778 }
5779
5780 __qseecom_disable_clk(CLK_QSEE);
5781 return ret;
5782}
5783
5784
5785static int qseecom_enable_ice_setup(int usage)
5786{
5787 int ret = 0;
5788
5789 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5790 ret = qcom_ice_setup_ice_hw("ufs", true);
5791 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5792 ret = qcom_ice_setup_ice_hw("sdcc", true);
5793
5794 return ret;
5795}
5796
5797static int qseecom_disable_ice_setup(int usage)
5798{
5799 int ret = 0;
5800
5801 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5802 ret = qcom_ice_setup_ice_hw("ufs", false);
5803 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5804 ret = qcom_ice_setup_ice_hw("sdcc", false);
5805
5806 return ret;
5807}
5808
5809static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5810{
5811 struct qseecom_ce_info_use *pce_info_use, *p;
5812 int total = 0;
5813 int i;
5814
5815 switch (usage) {
5816 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5817 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5818 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5819 p = qseecom.ce_info.fde;
5820 total = qseecom.ce_info.num_fde;
5821 break;
5822 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5823 p = qseecom.ce_info.pfe;
5824 total = qseecom.ce_info.num_pfe;
5825 break;
5826 default:
5827 pr_err("unsupported usage %d\n", usage);
5828 return -EINVAL;
5829 }
5830
5831 pce_info_use = NULL;
5832
5833 for (i = 0; i < total; i++) {
5834 if (p->unit_num == unit) {
5835 pce_info_use = p;
5836 break;
5837 }
5838 p++;
5839 }
5840 if (!pce_info_use) {
5841 pr_err("can not find %d\n", unit);
5842 return -EINVAL;
5843 }
5844 return pce_info_use->num_ce_pipe_entries;
5845}
5846
5847static int qseecom_create_key(struct qseecom_dev_handle *data,
5848 void __user *argp)
5849{
5850 int i;
5851 uint32_t *ce_hw = NULL;
5852 uint32_t pipe = 0;
5853 int ret = 0;
5854 uint32_t flags = 0;
5855 struct qseecom_create_key_req create_key_req;
5856 struct qseecom_key_generate_ireq generate_key_ireq;
5857 struct qseecom_key_select_ireq set_key_ireq;
5858 uint32_t entries = 0;
5859
5860 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5861 if (ret) {
5862 pr_err("copy_from_user failed\n");
5863 return ret;
5864 }
5865
5866 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5867 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5868 pr_err("unsupported usage %d\n", create_key_req.usage);
5869 ret = -EFAULT;
5870 return ret;
5871 }
5872 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5873 create_key_req.usage);
5874 if (entries <= 0) {
5875 pr_err("no ce instance for usage %d instance %d\n",
5876 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5877 ret = -EINVAL;
5878 return ret;
5879 }
5880
5881 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5882 if (!ce_hw) {
5883 ret = -ENOMEM;
5884 return ret;
5885 }
5886 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5887 DEFAULT_CE_INFO_UNIT);
5888 if (ret) {
5889 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5890 ret = -EINVAL;
5891 goto free_buf;
5892 }
5893
5894 if (qseecom.fde_key_size)
5895 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5896 else
5897 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5898
5899 generate_key_ireq.flags = flags;
5900 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5901 memset((void *)generate_key_ireq.key_id,
5902 0, QSEECOM_KEY_ID_SIZE);
5903 memset((void *)generate_key_ireq.hash32,
5904 0, QSEECOM_HASH_SIZE);
5905 memcpy((void *)generate_key_ireq.key_id,
5906 (void *)key_id_array[create_key_req.usage].desc,
5907 QSEECOM_KEY_ID_SIZE);
5908 memcpy((void *)generate_key_ireq.hash32,
5909 (void *)create_key_req.hash32,
5910 QSEECOM_HASH_SIZE);
5911
5912 ret = __qseecom_generate_and_save_key(data,
5913 create_key_req.usage, &generate_key_ireq);
5914 if (ret) {
5915 pr_err("Failed to generate key on storage: %d\n", ret);
5916 goto free_buf;
5917 }
5918
5919 for (i = 0; i < entries; i++) {
5920 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5921 if (create_key_req.usage ==
5922 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5923 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5924 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5925
5926 } else if (create_key_req.usage ==
5927 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5928 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5929 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5930
5931 } else {
5932 set_key_ireq.ce = ce_hw[i];
5933 set_key_ireq.pipe = pipe;
5934 }
5935 set_key_ireq.flags = flags;
5936
5937 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5938 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5939 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5940 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5941 memcpy((void *)set_key_ireq.key_id,
5942 (void *)key_id_array[create_key_req.usage].desc,
5943 QSEECOM_KEY_ID_SIZE);
5944 memcpy((void *)set_key_ireq.hash32,
5945 (void *)create_key_req.hash32,
5946 QSEECOM_HASH_SIZE);
5947 /*
5948 * It will return false if it is GPCE based crypto instance or
5949 * ICE is setup properly
5950 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005951 ret = qseecom_enable_ice_setup(create_key_req.usage);
5952 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005953 goto free_buf;
5954
5955 do {
5956 ret = __qseecom_set_clear_ce_key(data,
5957 create_key_req.usage,
5958 &set_key_ireq);
5959 /*
5960 * wait a little before calling scm again to let other
5961 * processes run
5962 */
5963 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5964 msleep(50);
5965
5966 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5967
5968 qseecom_disable_ice_setup(create_key_req.usage);
5969
5970 if (ret) {
5971 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5972 pipe, ce_hw[i], ret);
5973 goto free_buf;
5974 } else {
5975 pr_err("Set the key successfully\n");
5976 if ((create_key_req.usage ==
5977 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5978 (create_key_req.usage ==
5979 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5980 goto free_buf;
5981 }
5982 }
5983
5984free_buf:
5985 kzfree(ce_hw);
5986 return ret;
5987}
5988
5989static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5990 void __user *argp)
5991{
5992 uint32_t *ce_hw = NULL;
5993 uint32_t pipe = 0;
5994 int ret = 0;
5995 uint32_t flags = 0;
5996 int i, j;
5997 struct qseecom_wipe_key_req wipe_key_req;
5998 struct qseecom_key_delete_ireq delete_key_ireq;
5999 struct qseecom_key_select_ireq clear_key_ireq;
6000 uint32_t entries = 0;
6001
6002 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6003 if (ret) {
6004 pr_err("copy_from_user failed\n");
6005 return ret;
6006 }
6007
6008 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6009 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6010 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6011 ret = -EFAULT;
6012 return ret;
6013 }
6014
6015 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6016 wipe_key_req.usage);
6017 if (entries <= 0) {
6018 pr_err("no ce instance for usage %d instance %d\n",
6019 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6020 ret = -EINVAL;
6021 return ret;
6022 }
6023
6024 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6025 if (!ce_hw) {
6026 ret = -ENOMEM;
6027 return ret;
6028 }
6029
6030 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6031 DEFAULT_CE_INFO_UNIT);
6032 if (ret) {
6033 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6034 ret = -EINVAL;
6035 goto free_buf;
6036 }
6037
6038 if (wipe_key_req.wipe_key_flag) {
6039 delete_key_ireq.flags = flags;
6040 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6041 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6042 memcpy((void *)delete_key_ireq.key_id,
6043 (void *)key_id_array[wipe_key_req.usage].desc,
6044 QSEECOM_KEY_ID_SIZE);
6045 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6046
6047 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6048 &delete_key_ireq);
6049 if (ret) {
6050 pr_err("Failed to delete key from ssd storage: %d\n",
6051 ret);
6052 ret = -EFAULT;
6053 goto free_buf;
6054 }
6055 }
6056
6057 for (j = 0; j < entries; j++) {
6058 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6059 if (wipe_key_req.usage ==
6060 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6061 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6062 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6063 } else if (wipe_key_req.usage ==
6064 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6065 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6066 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6067 } else {
6068 clear_key_ireq.ce = ce_hw[j];
6069 clear_key_ireq.pipe = pipe;
6070 }
6071 clear_key_ireq.flags = flags;
6072 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6073 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6074 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6075 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6076
6077 /*
6078 * It will return false if it is GPCE based crypto instance or
6079 * ICE is setup properly
6080 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006081 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6082 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006083 goto free_buf;
6084
6085 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6086 &clear_key_ireq);
6087
6088 qseecom_disable_ice_setup(wipe_key_req.usage);
6089
6090 if (ret) {
6091 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6092 pipe, ce_hw[j], ret);
6093 ret = -EFAULT;
6094 goto free_buf;
6095 }
6096 }
6097
6098free_buf:
6099 kzfree(ce_hw);
6100 return ret;
6101}
6102
6103static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6104 void __user *argp)
6105{
6106 int ret = 0;
6107 uint32_t flags = 0;
6108 struct qseecom_update_key_userinfo_req update_key_req;
6109 struct qseecom_key_userinfo_update_ireq ireq;
6110
6111 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6112 if (ret) {
6113 pr_err("copy_from_user failed\n");
6114 return ret;
6115 }
6116
6117 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6118 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6119 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6120 return -EFAULT;
6121 }
6122
6123 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6124
6125 if (qseecom.fde_key_size)
6126 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6127 else
6128 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6129
6130 ireq.flags = flags;
6131 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6132 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6133 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6134 memcpy((void *)ireq.key_id,
6135 (void *)key_id_array[update_key_req.usage].desc,
6136 QSEECOM_KEY_ID_SIZE);
6137 memcpy((void *)ireq.current_hash32,
6138 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6139 memcpy((void *)ireq.new_hash32,
6140 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6141
6142 do {
6143 ret = __qseecom_update_current_key_user_info(data,
6144 update_key_req.usage,
6145 &ireq);
6146 /*
6147 * wait a little before calling scm again to let other
6148 * processes run
6149 */
6150 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6151 msleep(50);
6152
6153 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6154 if (ret) {
6155 pr_err("Failed to update key info: %d\n", ret);
6156 return ret;
6157 }
6158 return ret;
6159
6160}
6161static int qseecom_is_es_activated(void __user *argp)
6162{
6163 struct qseecom_is_es_activated_req req;
6164 struct qseecom_command_scm_resp resp;
6165 int ret;
6166
6167 if (qseecom.qsee_version < QSEE_VERSION_04) {
6168 pr_err("invalid qsee version\n");
6169 return -ENODEV;
6170 }
6171
6172 if (argp == NULL) {
6173 pr_err("arg is null\n");
6174 return -EINVAL;
6175 }
6176
6177 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6178 &req, sizeof(req), &resp, sizeof(resp));
6179 if (ret) {
6180 pr_err("scm_call failed\n");
6181 return ret;
6182 }
6183
6184 req.is_activated = resp.result;
6185 ret = copy_to_user(argp, &req, sizeof(req));
6186 if (ret) {
6187 pr_err("copy_to_user failed\n");
6188 return ret;
6189 }
6190
6191 return 0;
6192}
6193
6194static int qseecom_save_partition_hash(void __user *argp)
6195{
6196 struct qseecom_save_partition_hash_req req;
6197 struct qseecom_command_scm_resp resp;
6198 int ret;
6199
6200 memset(&resp, 0x00, sizeof(resp));
6201
6202 if (qseecom.qsee_version < QSEE_VERSION_04) {
6203 pr_err("invalid qsee version\n");
6204 return -ENODEV;
6205 }
6206
6207 if (argp == NULL) {
6208 pr_err("arg is null\n");
6209 return -EINVAL;
6210 }
6211
6212 ret = copy_from_user(&req, argp, sizeof(req));
6213 if (ret) {
6214 pr_err("copy_from_user failed\n");
6215 return ret;
6216 }
6217
6218 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6219 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6220 if (ret) {
6221 pr_err("qseecom_scm_call failed\n");
6222 return ret;
6223 }
6224
6225 return 0;
6226}
6227
6228static int qseecom_mdtp_cipher_dip(void __user *argp)
6229{
6230 struct qseecom_mdtp_cipher_dip_req req;
6231 u32 tzbuflenin, tzbuflenout;
6232 char *tzbufin = NULL, *tzbufout = NULL;
6233 struct scm_desc desc = {0};
6234 int ret;
6235
6236 do {
6237 /* Copy the parameters from userspace */
6238 if (argp == NULL) {
6239 pr_err("arg is null\n");
6240 ret = -EINVAL;
6241 break;
6242 }
6243
6244 ret = copy_from_user(&req, argp, sizeof(req));
6245 if (ret) {
6246 pr_err("copy_from_user failed, ret= %d\n", ret);
6247 break;
6248 }
6249
6250 if (req.in_buf == NULL || req.out_buf == NULL ||
6251 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6252 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6253 req.direction > 1) {
6254 pr_err("invalid parameters\n");
6255 ret = -EINVAL;
6256 break;
6257 }
6258
6259 /* Copy the input buffer from userspace to kernel space */
6260 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6261 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6262 if (!tzbufin) {
6263 pr_err("error allocating in buffer\n");
6264 ret = -ENOMEM;
6265 break;
6266 }
6267
6268 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6269 if (ret) {
6270 pr_err("copy_from_user failed, ret=%d\n", ret);
6271 break;
6272 }
6273
6274 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6275
6276 /* Prepare the output buffer in kernel space */
6277 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6278 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6279 if (!tzbufout) {
6280 pr_err("error allocating out buffer\n");
6281 ret = -ENOMEM;
6282 break;
6283 }
6284
6285 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6286
6287 /* Send the command to TZ */
6288 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6289 desc.args[0] = virt_to_phys(tzbufin);
6290 desc.args[1] = req.in_buf_size;
6291 desc.args[2] = virt_to_phys(tzbufout);
6292 desc.args[3] = req.out_buf_size;
6293 desc.args[4] = req.direction;
6294
6295 ret = __qseecom_enable_clk(CLK_QSEE);
6296 if (ret)
6297 break;
6298
6299 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6300
6301 __qseecom_disable_clk(CLK_QSEE);
6302
6303 if (ret) {
6304 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6305 ret);
6306 break;
6307 }
6308
6309 /* Copy the output buffer from kernel space to userspace */
6310 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6311 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6312 if (ret) {
6313 pr_err("copy_to_user failed, ret=%d\n", ret);
6314 break;
6315 }
6316 } while (0);
6317
6318 kzfree(tzbufin);
6319 kzfree(tzbufout);
6320
6321 return ret;
6322}
6323
6324static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6325 struct qseecom_qteec_req *req)
6326{
6327 if (!data || !data->client.ihandle) {
6328 pr_err("Client or client handle is not initialized\n");
6329 return -EINVAL;
6330 }
6331
6332 if (data->type != QSEECOM_CLIENT_APP)
6333 return -EFAULT;
6334
6335 if (req->req_len > UINT_MAX - req->resp_len) {
6336 pr_err("Integer overflow detected in req_len & rsp_len\n");
6337 return -EINVAL;
6338 }
6339
6340 if (req->req_len + req->resp_len > data->client.sb_length) {
6341 pr_debug("Not enough memory to fit cmd_buf.\n");
6342 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6343 (req->req_len + req->resp_len), data->client.sb_length);
6344 return -ENOMEM;
6345 }
6346
6347 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6348 pr_err("cmd buffer or response buffer is null\n");
6349 return -EINVAL;
6350 }
6351 if (((uintptr_t)req->req_ptr <
6352 data->client.user_virt_sb_base) ||
6353 ((uintptr_t)req->req_ptr >=
6354 (data->client.user_virt_sb_base + data->client.sb_length))) {
6355 pr_err("cmd buffer address not within shared bufffer\n");
6356 return -EINVAL;
6357 }
6358
6359 if (((uintptr_t)req->resp_ptr <
6360 data->client.user_virt_sb_base) ||
6361 ((uintptr_t)req->resp_ptr >=
6362 (data->client.user_virt_sb_base + data->client.sb_length))) {
6363 pr_err("response buffer address not within shared bufffer\n");
6364 return -EINVAL;
6365 }
6366
6367 if ((req->req_len == 0) || (req->resp_len == 0)) {
6368 pr_err("cmd buf lengtgh/response buf length not valid\n");
6369 return -EINVAL;
6370 }
6371
6372 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6373 pr_err("Integer overflow in req_len & req_ptr\n");
6374 return -EINVAL;
6375 }
6376
6377 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6378 pr_err("Integer overflow in resp_len & resp_ptr\n");
6379 return -EINVAL;
6380 }
6381
6382 if (data->client.user_virt_sb_base >
6383 (ULONG_MAX - data->client.sb_length)) {
6384 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6385 return -EINVAL;
6386 }
6387 if ((((uintptr_t)req->req_ptr + req->req_len) >
6388 ((uintptr_t)data->client.user_virt_sb_base +
6389 data->client.sb_length)) ||
6390 (((uintptr_t)req->resp_ptr + req->resp_len) >
6391 ((uintptr_t)data->client.user_virt_sb_base +
6392 data->client.sb_length))) {
6393 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6394 return -EINVAL;
6395 }
6396 return 0;
6397}
6398
6399static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6400 uint32_t fd_idx, struct sg_table *sg_ptr)
6401{
6402 struct scatterlist *sg = sg_ptr->sgl;
6403 struct qseecom_sg_entry *sg_entry;
6404 void *buf;
6405 uint i;
6406 size_t size;
6407 dma_addr_t coh_pmem;
6408
6409 if (fd_idx >= MAX_ION_FD) {
6410 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6411 return -ENOMEM;
6412 }
6413 /*
6414 * Allocate a buffer, populate it with number of entry plus
6415 * each sg entry's phy addr and length; then return the
6416 * phy_addr of the buffer.
6417 */
6418 size = sizeof(uint32_t) +
6419 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6420 size = (size + PAGE_SIZE) & PAGE_MASK;
6421 buf = dma_alloc_coherent(qseecom.pdev,
6422 size, &coh_pmem, GFP_KERNEL);
6423 if (buf == NULL) {
6424 pr_err("failed to alloc memory for sg buf\n");
6425 return -ENOMEM;
6426 }
6427 *(uint32_t *)buf = sg_ptr->nents;
6428 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6429 for (i = 0; i < sg_ptr->nents; i++) {
6430 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6431 sg_entry->len = sg->length;
6432 sg_entry++;
6433 sg = sg_next(sg);
6434 }
6435 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6436 data->client.sec_buf_fd[fd_idx].vbase = buf;
6437 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6438 data->client.sec_buf_fd[fd_idx].size = size;
6439 return 0;
6440}
6441
6442static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6443 struct qseecom_dev_handle *data, bool cleanup)
6444{
6445 struct ion_handle *ihandle;
6446 int ret = 0;
6447 int i = 0;
6448 uint32_t *update;
6449 struct sg_table *sg_ptr = NULL;
6450 struct scatterlist *sg;
6451 struct qseecom_param_memref *memref;
6452
6453 if (req == NULL) {
6454 pr_err("Invalid address\n");
6455 return -EINVAL;
6456 }
6457 for (i = 0; i < MAX_ION_FD; i++) {
6458 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006459 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006460 req->ifd_data[i].fd);
6461 if (IS_ERR_OR_NULL(ihandle)) {
6462 pr_err("Ion client can't retrieve the handle\n");
6463 return -ENOMEM;
6464 }
6465 if ((req->req_len < sizeof(uint32_t)) ||
6466 (req->ifd_data[i].cmd_buf_offset >
6467 req->req_len - sizeof(uint32_t))) {
6468 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6469 req->req_len,
6470 req->ifd_data[i].cmd_buf_offset);
6471 return -EINVAL;
6472 }
6473 update = (uint32_t *)((char *) req->req_ptr +
6474 req->ifd_data[i].cmd_buf_offset);
6475 if (!update) {
6476 pr_err("update pointer is NULL\n");
6477 return -EINVAL;
6478 }
6479 } else {
6480 continue;
6481 }
6482 /* Populate the cmd data structure with the phys_addr */
6483 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6484 if (IS_ERR_OR_NULL(sg_ptr)) {
6485 pr_err("IOn client could not retrieve sg table\n");
6486 goto err;
6487 }
6488 sg = sg_ptr->sgl;
6489 if (sg == NULL) {
6490 pr_err("sg is NULL\n");
6491 goto err;
6492 }
6493 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6494 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6495 sg_ptr->nents, sg->length);
6496 goto err;
6497 }
6498 /* clean up buf for pre-allocated fd */
6499 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6500 (*update)) {
6501 if (data->client.sec_buf_fd[i].vbase)
6502 dma_free_coherent(qseecom.pdev,
6503 data->client.sec_buf_fd[i].size,
6504 data->client.sec_buf_fd[i].vbase,
6505 data->client.sec_buf_fd[i].pbase);
6506 memset((void *)update, 0,
6507 sizeof(struct qseecom_param_memref));
6508 memset(&(data->client.sec_buf_fd[i]), 0,
6509 sizeof(struct qseecom_sec_buf_fd_info));
6510 goto clean;
6511 }
6512
6513 if (*update == 0) {
6514 /* update buf for pre-allocated fd from secure heap*/
6515 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6516 sg_ptr);
6517 if (ret) {
6518 pr_err("Failed to handle buf for fd[%d]\n", i);
6519 goto err;
6520 }
6521 memref = (struct qseecom_param_memref *)update;
6522 memref->buffer =
6523 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6524 memref->size =
6525 (uint32_t)(data->client.sec_buf_fd[i].size);
6526 } else {
6527 /* update buf for fd from non-secure qseecom heap */
6528 if (sg_ptr->nents != 1) {
6529 pr_err("Num of scat entr (%d) invalid\n",
6530 sg_ptr->nents);
6531 goto err;
6532 }
6533 if (cleanup)
6534 *update = 0;
6535 else
6536 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6537 }
6538clean:
6539 if (cleanup) {
6540 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6541 ihandle, NULL, sg->length,
6542 ION_IOC_INV_CACHES);
6543 if (ret) {
6544 pr_err("cache operation failed %d\n", ret);
6545 goto err;
6546 }
6547 } else {
6548 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6549 ihandle, NULL, sg->length,
6550 ION_IOC_CLEAN_INV_CACHES);
6551 if (ret) {
6552 pr_err("cache operation failed %d\n", ret);
6553 goto err;
6554 }
6555 data->sglistinfo_ptr[i].indexAndFlags =
6556 SGLISTINFO_SET_INDEX_FLAG(
6557 (sg_ptr->nents == 1), 0,
6558 req->ifd_data[i].cmd_buf_offset);
6559 data->sglistinfo_ptr[i].sizeOrCount =
6560 (sg_ptr->nents == 1) ?
6561 sg->length : sg_ptr->nents;
6562 data->sglist_cnt = i + 1;
6563 }
6564 /* Deallocate the handle */
6565 if (!IS_ERR_OR_NULL(ihandle))
6566 ion_free(qseecom.ion_clnt, ihandle);
6567 }
6568 return ret;
6569err:
6570 if (!IS_ERR_OR_NULL(ihandle))
6571 ion_free(qseecom.ion_clnt, ihandle);
6572 return -ENOMEM;
6573}
6574
6575static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6576 struct qseecom_qteec_req *req, uint32_t cmd_id)
6577{
6578 struct qseecom_command_scm_resp resp;
6579 struct qseecom_qteec_ireq ireq;
6580 struct qseecom_qteec_64bit_ireq ireq_64bit;
6581 struct qseecom_registered_app_list *ptr_app;
6582 bool found_app = false;
6583 unsigned long flags;
6584 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006585 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006586 uint32_t reqd_len_sb_in = 0;
6587 void *cmd_buf = NULL;
6588 size_t cmd_len;
6589 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306590 void *req_ptr = NULL;
6591 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006592
6593 ret = __qseecom_qteec_validate_msg(data, req);
6594 if (ret)
6595 return ret;
6596
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306597 req_ptr = req->req_ptr;
6598 resp_ptr = req->resp_ptr;
6599
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006600 /* find app_id & img_name from list */
6601 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6602 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6603 list) {
6604 if ((ptr_app->app_id == data->client.app_id) &&
6605 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6606 found_app = true;
6607 break;
6608 }
6609 }
6610 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6611 if (!found_app) {
6612 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6613 (char *)data->client.app_name);
6614 return -ENOENT;
6615 }
6616
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306617 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6618 (uintptr_t)req->req_ptr);
6619 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6620 (uintptr_t)req->resp_ptr);
6621
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006622 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6623 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6624 ret = __qseecom_update_qteec_req_buf(
6625 (struct qseecom_qteec_modfd_req *)req, data, false);
6626 if (ret)
6627 return ret;
6628 }
6629
6630 if (qseecom.qsee_version < QSEE_VERSION_40) {
6631 ireq.app_id = data->client.app_id;
6632 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306633 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006634 ireq.req_len = req->req_len;
6635 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306636 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006637 ireq.resp_len = req->resp_len;
6638 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6639 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6640 dmac_flush_range((void *)table,
6641 (void *)table + SGLISTINFO_TABLE_SIZE);
6642 cmd_buf = (void *)&ireq;
6643 cmd_len = sizeof(struct qseecom_qteec_ireq);
6644 } else {
6645 ireq_64bit.app_id = data->client.app_id;
6646 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306647 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006648 ireq_64bit.req_len = req->req_len;
6649 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306650 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006651 ireq_64bit.resp_len = req->resp_len;
6652 if ((data->client.app_arch == ELFCLASS32) &&
6653 ((ireq_64bit.req_ptr >=
6654 PHY_ADDR_4G - ireq_64bit.req_len) ||
6655 (ireq_64bit.resp_ptr >=
6656 PHY_ADDR_4G - ireq_64bit.resp_len))){
6657 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6658 data->client.app_name, data->client.app_id);
6659 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6660 ireq_64bit.req_ptr, ireq_64bit.req_len,
6661 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6662 return -EFAULT;
6663 }
6664 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6665 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6666 dmac_flush_range((void *)table,
6667 (void *)table + SGLISTINFO_TABLE_SIZE);
6668 cmd_buf = (void *)&ireq_64bit;
6669 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6670 }
6671 if (qseecom.whitelist_support == true
6672 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6673 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6674 else
6675 *(uint32_t *)cmd_buf = cmd_id;
6676
6677 reqd_len_sb_in = req->req_len + req->resp_len;
6678 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6679 data->client.sb_virt,
6680 reqd_len_sb_in,
6681 ION_IOC_CLEAN_INV_CACHES);
6682 if (ret) {
6683 pr_err("cache operation failed %d\n", ret);
6684 return ret;
6685 }
6686
6687 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6688
6689 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6690 cmd_buf, cmd_len,
6691 &resp, sizeof(resp));
6692 if (ret) {
6693 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6694 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006695 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006696 }
6697
6698 if (qseecom.qsee_reentrancy_support) {
6699 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006700 if (ret)
6701 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006702 } else {
6703 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6704 ret = __qseecom_process_incomplete_cmd(data, &resp);
6705 if (ret) {
6706 pr_err("process_incomplete_cmd failed err: %d\n",
6707 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006708 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006709 }
6710 } else {
6711 if (resp.result != QSEOS_RESULT_SUCCESS) {
6712 pr_err("Response result %d not supported\n",
6713 resp.result);
6714 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006715 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006716 }
6717 }
6718 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006719exit:
6720 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006721 data->client.sb_virt, data->client.sb_length,
6722 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006723 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006724 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006725 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006726 }
6727
6728 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6729 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006730 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006731 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006732 if (ret2)
6733 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006734 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006735 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006736}
6737
6738static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6739 void __user *argp)
6740{
6741 struct qseecom_qteec_modfd_req req;
6742 int ret = 0;
6743
6744 ret = copy_from_user(&req, argp,
6745 sizeof(struct qseecom_qteec_modfd_req));
6746 if (ret) {
6747 pr_err("copy_from_user failed\n");
6748 return ret;
6749 }
6750 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6751 QSEOS_TEE_OPEN_SESSION);
6752
6753 return ret;
6754}
6755
6756static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6757 void __user *argp)
6758{
6759 struct qseecom_qteec_req req;
6760 int ret = 0;
6761
6762 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6763 if (ret) {
6764 pr_err("copy_from_user failed\n");
6765 return ret;
6766 }
6767 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6768 return ret;
6769}
6770
6771static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6772 void __user *argp)
6773{
6774 struct qseecom_qteec_modfd_req req;
6775 struct qseecom_command_scm_resp resp;
6776 struct qseecom_qteec_ireq ireq;
6777 struct qseecom_qteec_64bit_ireq ireq_64bit;
6778 struct qseecom_registered_app_list *ptr_app;
6779 bool found_app = false;
6780 unsigned long flags;
6781 int ret = 0;
6782 int i = 0;
6783 uint32_t reqd_len_sb_in = 0;
6784 void *cmd_buf = NULL;
6785 size_t cmd_len;
6786 struct sglist_info *table = data->sglistinfo_ptr;
6787 void *req_ptr = NULL;
6788 void *resp_ptr = NULL;
6789
6790 ret = copy_from_user(&req, argp,
6791 sizeof(struct qseecom_qteec_modfd_req));
6792 if (ret) {
6793 pr_err("copy_from_user failed\n");
6794 return ret;
6795 }
6796 ret = __qseecom_qteec_validate_msg(data,
6797 (struct qseecom_qteec_req *)(&req));
6798 if (ret)
6799 return ret;
6800 req_ptr = req.req_ptr;
6801 resp_ptr = req.resp_ptr;
6802
6803 /* find app_id & img_name from list */
6804 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6805 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6806 list) {
6807 if ((ptr_app->app_id == data->client.app_id) &&
6808 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6809 found_app = true;
6810 break;
6811 }
6812 }
6813 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6814 if (!found_app) {
6815 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6816 (char *)data->client.app_name);
6817 return -ENOENT;
6818 }
6819
6820 /* validate offsets */
6821 for (i = 0; i < MAX_ION_FD; i++) {
6822 if (req.ifd_data[i].fd) {
6823 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6824 return -EINVAL;
6825 }
6826 }
6827 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6828 (uintptr_t)req.req_ptr);
6829 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6830 (uintptr_t)req.resp_ptr);
6831 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6832 if (ret)
6833 return ret;
6834
6835 if (qseecom.qsee_version < QSEE_VERSION_40) {
6836 ireq.app_id = data->client.app_id;
6837 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6838 (uintptr_t)req_ptr);
6839 ireq.req_len = req.req_len;
6840 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6841 (uintptr_t)resp_ptr);
6842 ireq.resp_len = req.resp_len;
6843 cmd_buf = (void *)&ireq;
6844 cmd_len = sizeof(struct qseecom_qteec_ireq);
6845 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6846 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6847 dmac_flush_range((void *)table,
6848 (void *)table + SGLISTINFO_TABLE_SIZE);
6849 } else {
6850 ireq_64bit.app_id = data->client.app_id;
6851 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6852 (uintptr_t)req_ptr);
6853 ireq_64bit.req_len = req.req_len;
6854 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6855 (uintptr_t)resp_ptr);
6856 ireq_64bit.resp_len = req.resp_len;
6857 cmd_buf = (void *)&ireq_64bit;
6858 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6859 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6860 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6861 dmac_flush_range((void *)table,
6862 (void *)table + SGLISTINFO_TABLE_SIZE);
6863 }
6864 reqd_len_sb_in = req.req_len + req.resp_len;
6865 if (qseecom.whitelist_support == true)
6866 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6867 else
6868 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6869
6870 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6871 data->client.sb_virt,
6872 reqd_len_sb_in,
6873 ION_IOC_CLEAN_INV_CACHES);
6874 if (ret) {
6875 pr_err("cache operation failed %d\n", ret);
6876 return ret;
6877 }
6878
6879 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6880
6881 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6882 cmd_buf, cmd_len,
6883 &resp, sizeof(resp));
6884 if (ret) {
6885 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6886 ret, data->client.app_id);
6887 return ret;
6888 }
6889
6890 if (qseecom.qsee_reentrancy_support) {
6891 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6892 } else {
6893 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6894 ret = __qseecom_process_incomplete_cmd(data, &resp);
6895 if (ret) {
6896 pr_err("process_incomplete_cmd failed err: %d\n",
6897 ret);
6898 return ret;
6899 }
6900 } else {
6901 if (resp.result != QSEOS_RESULT_SUCCESS) {
6902 pr_err("Response result %d not supported\n",
6903 resp.result);
6904 ret = -EINVAL;
6905 }
6906 }
6907 }
6908 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6909 if (ret)
6910 return ret;
6911
6912 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6913 data->client.sb_virt, data->client.sb_length,
6914 ION_IOC_INV_CACHES);
6915 if (ret) {
6916 pr_err("cache operation failed %d\n", ret);
6917 return ret;
6918 }
6919 return 0;
6920}
6921
6922static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6923 void __user *argp)
6924{
6925 struct qseecom_qteec_modfd_req req;
6926 int ret = 0;
6927
6928 ret = copy_from_user(&req, argp,
6929 sizeof(struct qseecom_qteec_modfd_req));
6930 if (ret) {
6931 pr_err("copy_from_user failed\n");
6932 return ret;
6933 }
6934 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6935 QSEOS_TEE_REQUEST_CANCELLATION);
6936
6937 return ret;
6938}
6939
6940static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6941{
6942 if (data->sglist_cnt) {
6943 memset(data->sglistinfo_ptr, 0,
6944 SGLISTINFO_TABLE_SIZE);
6945 data->sglist_cnt = 0;
6946 }
6947}
6948
6949static inline long qseecom_ioctl(struct file *file,
6950 unsigned int cmd, unsigned long arg)
6951{
6952 int ret = 0;
6953 struct qseecom_dev_handle *data = file->private_data;
6954 void __user *argp = (void __user *) arg;
6955 bool perf_enabled = false;
6956
6957 if (!data) {
6958 pr_err("Invalid/uninitialized device handle\n");
6959 return -EINVAL;
6960 }
6961
6962 if (data->abort) {
6963 pr_err("Aborting qseecom driver\n");
6964 return -ENODEV;
6965 }
6966
6967 switch (cmd) {
6968 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6969 if (data->type != QSEECOM_GENERIC) {
6970 pr_err("reg lstnr req: invalid handle (%d)\n",
6971 data->type);
6972 ret = -EINVAL;
6973 break;
6974 }
6975 pr_debug("ioctl register_listener_req()\n");
6976 mutex_lock(&app_access_lock);
6977 atomic_inc(&data->ioctl_count);
6978 data->type = QSEECOM_LISTENER_SERVICE;
6979 ret = qseecom_register_listener(data, argp);
6980 atomic_dec(&data->ioctl_count);
6981 wake_up_all(&data->abort_wq);
6982 mutex_unlock(&app_access_lock);
6983 if (ret)
6984 pr_err("failed qseecom_register_listener: %d\n", ret);
6985 break;
6986 }
6987 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6988 if ((data->listener.id == 0) ||
6989 (data->type != QSEECOM_LISTENER_SERVICE)) {
6990 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6991 data->type, data->listener.id);
6992 ret = -EINVAL;
6993 break;
6994 }
6995 pr_debug("ioctl unregister_listener_req()\n");
6996 mutex_lock(&app_access_lock);
6997 atomic_inc(&data->ioctl_count);
6998 ret = qseecom_unregister_listener(data);
6999 atomic_dec(&data->ioctl_count);
7000 wake_up_all(&data->abort_wq);
7001 mutex_unlock(&app_access_lock);
7002 if (ret)
7003 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7004 break;
7005 }
7006 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7007 if ((data->client.app_id == 0) ||
7008 (data->type != QSEECOM_CLIENT_APP)) {
7009 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7010 data->type, data->client.app_id);
7011 ret = -EINVAL;
7012 break;
7013 }
7014 /* Only one client allowed here at a time */
7015 mutex_lock(&app_access_lock);
7016 if (qseecom.support_bus_scaling) {
7017 /* register bus bw in case the client doesn't do it */
7018 if (!data->mode) {
7019 mutex_lock(&qsee_bw_mutex);
7020 __qseecom_register_bus_bandwidth_needs(
7021 data, HIGH);
7022 mutex_unlock(&qsee_bw_mutex);
7023 }
7024 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7025 if (ret) {
7026 pr_err("Failed to set bw.\n");
7027 ret = -EINVAL;
7028 mutex_unlock(&app_access_lock);
7029 break;
7030 }
7031 }
7032 /*
7033 * On targets where crypto clock is handled by HLOS,
7034 * if clk_access_cnt is zero and perf_enabled is false,
7035 * then the crypto clock was not enabled before sending cmd to
7036 * tz, qseecom will enable the clock to avoid service failure.
7037 */
7038 if (!qseecom.no_clock_support &&
7039 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7040 pr_debug("ce clock is not enabled!\n");
7041 ret = qseecom_perf_enable(data);
7042 if (ret) {
7043 pr_err("Failed to vote for clock with err %d\n",
7044 ret);
7045 mutex_unlock(&app_access_lock);
7046 ret = -EINVAL;
7047 break;
7048 }
7049 perf_enabled = true;
7050 }
7051 atomic_inc(&data->ioctl_count);
7052 ret = qseecom_send_cmd(data, argp);
7053 if (qseecom.support_bus_scaling)
7054 __qseecom_add_bw_scale_down_timer(
7055 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7056 if (perf_enabled) {
7057 qsee_disable_clock_vote(data, CLK_DFAB);
7058 qsee_disable_clock_vote(data, CLK_SFPB);
7059 }
7060 atomic_dec(&data->ioctl_count);
7061 wake_up_all(&data->abort_wq);
7062 mutex_unlock(&app_access_lock);
7063 if (ret)
7064 pr_err("failed qseecom_send_cmd: %d\n", ret);
7065 break;
7066 }
7067 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7068 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7069 if ((data->client.app_id == 0) ||
7070 (data->type != QSEECOM_CLIENT_APP)) {
7071 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7072 data->type, data->client.app_id);
7073 ret = -EINVAL;
7074 break;
7075 }
7076 /* Only one client allowed here at a time */
7077 mutex_lock(&app_access_lock);
7078 if (qseecom.support_bus_scaling) {
7079 if (!data->mode) {
7080 mutex_lock(&qsee_bw_mutex);
7081 __qseecom_register_bus_bandwidth_needs(
7082 data, HIGH);
7083 mutex_unlock(&qsee_bw_mutex);
7084 }
7085 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7086 if (ret) {
7087 pr_err("Failed to set bw.\n");
7088 mutex_unlock(&app_access_lock);
7089 ret = -EINVAL;
7090 break;
7091 }
7092 }
7093 /*
7094 * On targets where crypto clock is handled by HLOS,
7095 * if clk_access_cnt is zero and perf_enabled is false,
7096 * then the crypto clock was not enabled before sending cmd to
7097 * tz, qseecom will enable the clock to avoid service failure.
7098 */
7099 if (!qseecom.no_clock_support &&
7100 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7101 pr_debug("ce clock is not enabled!\n");
7102 ret = qseecom_perf_enable(data);
7103 if (ret) {
7104 pr_err("Failed to vote for clock with err %d\n",
7105 ret);
7106 mutex_unlock(&app_access_lock);
7107 ret = -EINVAL;
7108 break;
7109 }
7110 perf_enabled = true;
7111 }
7112 atomic_inc(&data->ioctl_count);
7113 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7114 ret = qseecom_send_modfd_cmd(data, argp);
7115 else
7116 ret = qseecom_send_modfd_cmd_64(data, argp);
7117 if (qseecom.support_bus_scaling)
7118 __qseecom_add_bw_scale_down_timer(
7119 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7120 if (perf_enabled) {
7121 qsee_disable_clock_vote(data, CLK_DFAB);
7122 qsee_disable_clock_vote(data, CLK_SFPB);
7123 }
7124 atomic_dec(&data->ioctl_count);
7125 wake_up_all(&data->abort_wq);
7126 mutex_unlock(&app_access_lock);
7127 if (ret)
7128 pr_err("failed qseecom_send_cmd: %d\n", ret);
7129 __qseecom_clean_data_sglistinfo(data);
7130 break;
7131 }
7132 case QSEECOM_IOCTL_RECEIVE_REQ: {
7133 if ((data->listener.id == 0) ||
7134 (data->type != QSEECOM_LISTENER_SERVICE)) {
7135 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7136 data->type, data->listener.id);
7137 ret = -EINVAL;
7138 break;
7139 }
7140 atomic_inc(&data->ioctl_count);
7141 ret = qseecom_receive_req(data);
7142 atomic_dec(&data->ioctl_count);
7143 wake_up_all(&data->abort_wq);
7144 if (ret && (ret != -ERESTARTSYS))
7145 pr_err("failed qseecom_receive_req: %d\n", ret);
7146 break;
7147 }
7148 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7149 if ((data->listener.id == 0) ||
7150 (data->type != QSEECOM_LISTENER_SERVICE)) {
7151 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7152 data->type, data->listener.id);
7153 ret = -EINVAL;
7154 break;
7155 }
7156 atomic_inc(&data->ioctl_count);
7157 if (!qseecom.qsee_reentrancy_support)
7158 ret = qseecom_send_resp();
7159 else
7160 ret = qseecom_reentrancy_send_resp(data);
7161 atomic_dec(&data->ioctl_count);
7162 wake_up_all(&data->abort_wq);
7163 if (ret)
7164 pr_err("failed qseecom_send_resp: %d\n", ret);
7165 break;
7166 }
7167 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7168 if ((data->type != QSEECOM_CLIENT_APP) &&
7169 (data->type != QSEECOM_GENERIC) &&
7170 (data->type != QSEECOM_SECURE_SERVICE)) {
7171 pr_err("set mem param req: invalid handle (%d)\n",
7172 data->type);
7173 ret = -EINVAL;
7174 break;
7175 }
7176 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7177 mutex_lock(&app_access_lock);
7178 atomic_inc(&data->ioctl_count);
7179 ret = qseecom_set_client_mem_param(data, argp);
7180 atomic_dec(&data->ioctl_count);
7181 mutex_unlock(&app_access_lock);
7182 if (ret)
7183 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7184 ret);
7185 break;
7186 }
7187 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7188 if ((data->type != QSEECOM_GENERIC) &&
7189 (data->type != QSEECOM_CLIENT_APP)) {
7190 pr_err("load app req: invalid handle (%d)\n",
7191 data->type);
7192 ret = -EINVAL;
7193 break;
7194 }
7195 data->type = QSEECOM_CLIENT_APP;
7196 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7197 mutex_lock(&app_access_lock);
7198 atomic_inc(&data->ioctl_count);
7199 ret = qseecom_load_app(data, argp);
7200 atomic_dec(&data->ioctl_count);
7201 mutex_unlock(&app_access_lock);
7202 if (ret)
7203 pr_err("failed load_app request: %d\n", ret);
7204 break;
7205 }
7206 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7207 if ((data->client.app_id == 0) ||
7208 (data->type != QSEECOM_CLIENT_APP)) {
7209 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7210 data->type, data->client.app_id);
7211 ret = -EINVAL;
7212 break;
7213 }
7214 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7215 mutex_lock(&app_access_lock);
7216 atomic_inc(&data->ioctl_count);
7217 ret = qseecom_unload_app(data, false);
7218 atomic_dec(&data->ioctl_count);
7219 mutex_unlock(&app_access_lock);
7220 if (ret)
7221 pr_err("failed unload_app request: %d\n", ret);
7222 break;
7223 }
7224 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7225 atomic_inc(&data->ioctl_count);
7226 ret = qseecom_get_qseos_version(data, argp);
7227 if (ret)
7228 pr_err("qseecom_get_qseos_version: %d\n", ret);
7229 atomic_dec(&data->ioctl_count);
7230 break;
7231 }
7232 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7233 if ((data->type != QSEECOM_GENERIC) &&
7234 (data->type != QSEECOM_CLIENT_APP)) {
7235 pr_err("perf enable req: invalid handle (%d)\n",
7236 data->type);
7237 ret = -EINVAL;
7238 break;
7239 }
7240 if ((data->type == QSEECOM_CLIENT_APP) &&
7241 (data->client.app_id == 0)) {
7242 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7243 data->type, data->client.app_id);
7244 ret = -EINVAL;
7245 break;
7246 }
7247 atomic_inc(&data->ioctl_count);
7248 if (qseecom.support_bus_scaling) {
7249 mutex_lock(&qsee_bw_mutex);
7250 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7251 mutex_unlock(&qsee_bw_mutex);
7252 } else {
7253 ret = qseecom_perf_enable(data);
7254 if (ret)
7255 pr_err("Fail to vote for clocks %d\n", ret);
7256 }
7257 atomic_dec(&data->ioctl_count);
7258 break;
7259 }
7260 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7261 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7262 (data->type != QSEECOM_CLIENT_APP)) {
7263 pr_err("perf disable req: invalid handle (%d)\n",
7264 data->type);
7265 ret = -EINVAL;
7266 break;
7267 }
7268 if ((data->type == QSEECOM_CLIENT_APP) &&
7269 (data->client.app_id == 0)) {
7270 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7271 data->type, data->client.app_id);
7272 ret = -EINVAL;
7273 break;
7274 }
7275 atomic_inc(&data->ioctl_count);
7276 if (!qseecom.support_bus_scaling) {
7277 qsee_disable_clock_vote(data, CLK_DFAB);
7278 qsee_disable_clock_vote(data, CLK_SFPB);
7279 } else {
7280 mutex_lock(&qsee_bw_mutex);
7281 qseecom_unregister_bus_bandwidth_needs(data);
7282 mutex_unlock(&qsee_bw_mutex);
7283 }
7284 atomic_dec(&data->ioctl_count);
7285 break;
7286 }
7287
7288 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7289 /* If crypto clock is not handled by HLOS, return directly. */
7290 if (qseecom.no_clock_support) {
7291 pr_debug("crypto clock is not handled by HLOS\n");
7292 break;
7293 }
7294 if ((data->client.app_id == 0) ||
7295 (data->type != QSEECOM_CLIENT_APP)) {
7296 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7297 data->type, data->client.app_id);
7298 ret = -EINVAL;
7299 break;
7300 }
7301 atomic_inc(&data->ioctl_count);
7302 ret = qseecom_scale_bus_bandwidth(data, argp);
7303 atomic_dec(&data->ioctl_count);
7304 break;
7305 }
7306 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7307 if (data->type != QSEECOM_GENERIC) {
7308 pr_err("load ext elf req: invalid client handle (%d)\n",
7309 data->type);
7310 ret = -EINVAL;
7311 break;
7312 }
7313 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7314 data->released = true;
7315 mutex_lock(&app_access_lock);
7316 atomic_inc(&data->ioctl_count);
7317 ret = qseecom_load_external_elf(data, argp);
7318 atomic_dec(&data->ioctl_count);
7319 mutex_unlock(&app_access_lock);
7320 if (ret)
7321 pr_err("failed load_external_elf request: %d\n", ret);
7322 break;
7323 }
7324 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7325 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7326 pr_err("unload ext elf req: invalid handle (%d)\n",
7327 data->type);
7328 ret = -EINVAL;
7329 break;
7330 }
7331 data->released = true;
7332 mutex_lock(&app_access_lock);
7333 atomic_inc(&data->ioctl_count);
7334 ret = qseecom_unload_external_elf(data);
7335 atomic_dec(&data->ioctl_count);
7336 mutex_unlock(&app_access_lock);
7337 if (ret)
7338 pr_err("failed unload_app request: %d\n", ret);
7339 break;
7340 }
7341 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7342 data->type = QSEECOM_CLIENT_APP;
7343 mutex_lock(&app_access_lock);
7344 atomic_inc(&data->ioctl_count);
7345 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7346 ret = qseecom_query_app_loaded(data, argp);
7347 atomic_dec(&data->ioctl_count);
7348 mutex_unlock(&app_access_lock);
7349 break;
7350 }
7351 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7352 if (data->type != QSEECOM_GENERIC) {
7353 pr_err("send cmd svc req: invalid handle (%d)\n",
7354 data->type);
7355 ret = -EINVAL;
7356 break;
7357 }
7358 data->type = QSEECOM_SECURE_SERVICE;
7359 if (qseecom.qsee_version < QSEE_VERSION_03) {
7360 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7361 qseecom.qsee_version);
7362 return -EINVAL;
7363 }
7364 mutex_lock(&app_access_lock);
7365 atomic_inc(&data->ioctl_count);
7366 ret = qseecom_send_service_cmd(data, argp);
7367 atomic_dec(&data->ioctl_count);
7368 mutex_unlock(&app_access_lock);
7369 break;
7370 }
7371 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7372 if (!(qseecom.support_pfe || qseecom.support_fde))
7373 pr_err("Features requiring key init not supported\n");
7374 if (data->type != QSEECOM_GENERIC) {
7375 pr_err("create key req: invalid handle (%d)\n",
7376 data->type);
7377 ret = -EINVAL;
7378 break;
7379 }
7380 if (qseecom.qsee_version < QSEE_VERSION_05) {
7381 pr_err("Create Key feature unsupported: qsee ver %u\n",
7382 qseecom.qsee_version);
7383 return -EINVAL;
7384 }
7385 data->released = true;
7386 mutex_lock(&app_access_lock);
7387 atomic_inc(&data->ioctl_count);
7388 ret = qseecom_create_key(data, argp);
7389 if (ret)
7390 pr_err("failed to create encryption key: %d\n", ret);
7391
7392 atomic_dec(&data->ioctl_count);
7393 mutex_unlock(&app_access_lock);
7394 break;
7395 }
7396 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7397 if (!(qseecom.support_pfe || qseecom.support_fde))
7398 pr_err("Features requiring key init not supported\n");
7399 if (data->type != QSEECOM_GENERIC) {
7400 pr_err("wipe key req: invalid handle (%d)\n",
7401 data->type);
7402 ret = -EINVAL;
7403 break;
7404 }
7405 if (qseecom.qsee_version < QSEE_VERSION_05) {
7406 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7407 qseecom.qsee_version);
7408 return -EINVAL;
7409 }
7410 data->released = true;
7411 mutex_lock(&app_access_lock);
7412 atomic_inc(&data->ioctl_count);
7413 ret = qseecom_wipe_key(data, argp);
7414 if (ret)
7415 pr_err("failed to wipe encryption key: %d\n", ret);
7416 atomic_dec(&data->ioctl_count);
7417 mutex_unlock(&app_access_lock);
7418 break;
7419 }
7420 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7421 if (!(qseecom.support_pfe || qseecom.support_fde))
7422 pr_err("Features requiring key init not supported\n");
7423 if (data->type != QSEECOM_GENERIC) {
7424 pr_err("update key req: invalid handle (%d)\n",
7425 data->type);
7426 ret = -EINVAL;
7427 break;
7428 }
7429 if (qseecom.qsee_version < QSEE_VERSION_05) {
7430 pr_err("Update Key feature unsupported in qsee ver %u\n",
7431 qseecom.qsee_version);
7432 return -EINVAL;
7433 }
7434 data->released = true;
7435 mutex_lock(&app_access_lock);
7436 atomic_inc(&data->ioctl_count);
7437 ret = qseecom_update_key_user_info(data, argp);
7438 if (ret)
7439 pr_err("failed to update key user info: %d\n", ret);
7440 atomic_dec(&data->ioctl_count);
7441 mutex_unlock(&app_access_lock);
7442 break;
7443 }
7444 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7445 if (data->type != QSEECOM_GENERIC) {
7446 pr_err("save part hash req: invalid handle (%d)\n",
7447 data->type);
7448 ret = -EINVAL;
7449 break;
7450 }
7451 data->released = true;
7452 mutex_lock(&app_access_lock);
7453 atomic_inc(&data->ioctl_count);
7454 ret = qseecom_save_partition_hash(argp);
7455 atomic_dec(&data->ioctl_count);
7456 mutex_unlock(&app_access_lock);
7457 break;
7458 }
7459 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7460 if (data->type != QSEECOM_GENERIC) {
7461 pr_err("ES activated req: invalid handle (%d)\n",
7462 data->type);
7463 ret = -EINVAL;
7464 break;
7465 }
7466 data->released = true;
7467 mutex_lock(&app_access_lock);
7468 atomic_inc(&data->ioctl_count);
7469 ret = qseecom_is_es_activated(argp);
7470 atomic_dec(&data->ioctl_count);
7471 mutex_unlock(&app_access_lock);
7472 break;
7473 }
7474 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7475 if (data->type != QSEECOM_GENERIC) {
7476 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7477 data->type);
7478 ret = -EINVAL;
7479 break;
7480 }
7481 data->released = true;
7482 mutex_lock(&app_access_lock);
7483 atomic_inc(&data->ioctl_count);
7484 ret = qseecom_mdtp_cipher_dip(argp);
7485 atomic_dec(&data->ioctl_count);
7486 mutex_unlock(&app_access_lock);
7487 break;
7488 }
7489 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7490 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7491 if ((data->listener.id == 0) ||
7492 (data->type != QSEECOM_LISTENER_SERVICE)) {
7493 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7494 data->type, data->listener.id);
7495 ret = -EINVAL;
7496 break;
7497 }
7498 atomic_inc(&data->ioctl_count);
7499 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7500 ret = qseecom_send_modfd_resp(data, argp);
7501 else
7502 ret = qseecom_send_modfd_resp_64(data, argp);
7503 atomic_dec(&data->ioctl_count);
7504 wake_up_all(&data->abort_wq);
7505 if (ret)
7506 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7507 __qseecom_clean_data_sglistinfo(data);
7508 break;
7509 }
7510 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7511 if ((data->client.app_id == 0) ||
7512 (data->type != QSEECOM_CLIENT_APP)) {
7513 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7514 data->type, data->client.app_id);
7515 ret = -EINVAL;
7516 break;
7517 }
7518 if (qseecom.qsee_version < QSEE_VERSION_40) {
7519 pr_err("GP feature unsupported: qsee ver %u\n",
7520 qseecom.qsee_version);
7521 return -EINVAL;
7522 }
7523 /* Only one client allowed here at a time */
7524 mutex_lock(&app_access_lock);
7525 atomic_inc(&data->ioctl_count);
7526 ret = qseecom_qteec_open_session(data, argp);
7527 atomic_dec(&data->ioctl_count);
7528 wake_up_all(&data->abort_wq);
7529 mutex_unlock(&app_access_lock);
7530 if (ret)
7531 pr_err("failed open_session_cmd: %d\n", ret);
7532 __qseecom_clean_data_sglistinfo(data);
7533 break;
7534 }
7535 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7536 if ((data->client.app_id == 0) ||
7537 (data->type != QSEECOM_CLIENT_APP)) {
7538 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7539 data->type, data->client.app_id);
7540 ret = -EINVAL;
7541 break;
7542 }
7543 if (qseecom.qsee_version < QSEE_VERSION_40) {
7544 pr_err("GP feature unsupported: qsee ver %u\n",
7545 qseecom.qsee_version);
7546 return -EINVAL;
7547 }
7548 /* Only one client allowed here at a time */
7549 mutex_lock(&app_access_lock);
7550 atomic_inc(&data->ioctl_count);
7551 ret = qseecom_qteec_close_session(data, argp);
7552 atomic_dec(&data->ioctl_count);
7553 wake_up_all(&data->abort_wq);
7554 mutex_unlock(&app_access_lock);
7555 if (ret)
7556 pr_err("failed close_session_cmd: %d\n", ret);
7557 break;
7558 }
7559 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7560 if ((data->client.app_id == 0) ||
7561 (data->type != QSEECOM_CLIENT_APP)) {
7562 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7563 data->type, data->client.app_id);
7564 ret = -EINVAL;
7565 break;
7566 }
7567 if (qseecom.qsee_version < QSEE_VERSION_40) {
7568 pr_err("GP feature unsupported: qsee ver %u\n",
7569 qseecom.qsee_version);
7570 return -EINVAL;
7571 }
7572 /* Only one client allowed here at a time */
7573 mutex_lock(&app_access_lock);
7574 atomic_inc(&data->ioctl_count);
7575 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7576 atomic_dec(&data->ioctl_count);
7577 wake_up_all(&data->abort_wq);
7578 mutex_unlock(&app_access_lock);
7579 if (ret)
7580 pr_err("failed Invoke cmd: %d\n", ret);
7581 __qseecom_clean_data_sglistinfo(data);
7582 break;
7583 }
7584 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7585 if ((data->client.app_id == 0) ||
7586 (data->type != QSEECOM_CLIENT_APP)) {
7587 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7588 data->type, data->client.app_id);
7589 ret = -EINVAL;
7590 break;
7591 }
7592 if (qseecom.qsee_version < QSEE_VERSION_40) {
7593 pr_err("GP feature unsupported: qsee ver %u\n",
7594 qseecom.qsee_version);
7595 return -EINVAL;
7596 }
7597 /* Only one client allowed here at a time */
7598 mutex_lock(&app_access_lock);
7599 atomic_inc(&data->ioctl_count);
7600 ret = qseecom_qteec_request_cancellation(data, argp);
7601 atomic_dec(&data->ioctl_count);
7602 wake_up_all(&data->abort_wq);
7603 mutex_unlock(&app_access_lock);
7604 if (ret)
7605 pr_err("failed request_cancellation: %d\n", ret);
7606 break;
7607 }
7608 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7609 atomic_inc(&data->ioctl_count);
7610 ret = qseecom_get_ce_info(data, argp);
7611 if (ret)
7612 pr_err("failed get fde ce pipe info: %d\n", ret);
7613 atomic_dec(&data->ioctl_count);
7614 break;
7615 }
7616 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7617 atomic_inc(&data->ioctl_count);
7618 ret = qseecom_free_ce_info(data, argp);
7619 if (ret)
7620 pr_err("failed get fde ce pipe info: %d\n", ret);
7621 atomic_dec(&data->ioctl_count);
7622 break;
7623 }
7624 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7625 atomic_inc(&data->ioctl_count);
7626 ret = qseecom_query_ce_info(data, argp);
7627 if (ret)
7628 pr_err("failed get fde ce pipe info: %d\n", ret);
7629 atomic_dec(&data->ioctl_count);
7630 break;
7631 }
7632 default:
7633 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7634 return -EINVAL;
7635 }
7636 return ret;
7637}
7638
7639static int qseecom_open(struct inode *inode, struct file *file)
7640{
7641 int ret = 0;
7642 struct qseecom_dev_handle *data;
7643
7644 data = kzalloc(sizeof(*data), GFP_KERNEL);
7645 if (!data)
7646 return -ENOMEM;
7647 file->private_data = data;
7648 data->abort = 0;
7649 data->type = QSEECOM_GENERIC;
7650 data->released = false;
7651 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7652 data->mode = INACTIVE;
7653 init_waitqueue_head(&data->abort_wq);
7654 atomic_set(&data->ioctl_count, 0);
7655 return ret;
7656}
7657
7658static int qseecom_release(struct inode *inode, struct file *file)
7659{
7660 struct qseecom_dev_handle *data = file->private_data;
7661 int ret = 0;
7662
7663 if (data->released == false) {
7664 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7665 data->type, data->mode, data);
7666 switch (data->type) {
7667 case QSEECOM_LISTENER_SERVICE:
7668 mutex_lock(&app_access_lock);
7669 ret = qseecom_unregister_listener(data);
7670 mutex_unlock(&app_access_lock);
7671 break;
7672 case QSEECOM_CLIENT_APP:
7673 mutex_lock(&app_access_lock);
7674 ret = qseecom_unload_app(data, true);
7675 mutex_unlock(&app_access_lock);
7676 break;
7677 case QSEECOM_SECURE_SERVICE:
7678 case QSEECOM_GENERIC:
7679 ret = qseecom_unmap_ion_allocated_memory(data);
7680 if (ret)
7681 pr_err("Ion Unmap failed\n");
7682 break;
7683 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7684 break;
7685 default:
7686 pr_err("Unsupported clnt_handle_type %d",
7687 data->type);
7688 break;
7689 }
7690 }
7691
7692 if (qseecom.support_bus_scaling) {
7693 mutex_lock(&qsee_bw_mutex);
7694 if (data->mode != INACTIVE) {
7695 qseecom_unregister_bus_bandwidth_needs(data);
7696 if (qseecom.cumulative_mode == INACTIVE) {
7697 ret = __qseecom_set_msm_bus_request(INACTIVE);
7698 if (ret)
7699 pr_err("Fail to scale down bus\n");
7700 }
7701 }
7702 mutex_unlock(&qsee_bw_mutex);
7703 } else {
7704 if (data->fast_load_enabled == true)
7705 qsee_disable_clock_vote(data, CLK_SFPB);
7706 if (data->perf_enabled == true)
7707 qsee_disable_clock_vote(data, CLK_DFAB);
7708 }
7709 kfree(data);
7710
7711 return ret;
7712}
7713
7714#ifdef CONFIG_COMPAT
7715#include "compat_qseecom.c"
7716#else
7717#define compat_qseecom_ioctl NULL
7718#endif
7719
7720static const struct file_operations qseecom_fops = {
7721 .owner = THIS_MODULE,
7722 .unlocked_ioctl = qseecom_ioctl,
7723 .compat_ioctl = compat_qseecom_ioctl,
7724 .open = qseecom_open,
7725 .release = qseecom_release
7726};
7727
7728static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7729{
7730 int rc = 0;
7731 struct device *pdev;
7732 struct qseecom_clk *qclk;
7733 char *core_clk_src = NULL;
7734 char *core_clk = NULL;
7735 char *iface_clk = NULL;
7736 char *bus_clk = NULL;
7737
7738 switch (ce) {
7739 case CLK_QSEE: {
7740 core_clk_src = "core_clk_src";
7741 core_clk = "core_clk";
7742 iface_clk = "iface_clk";
7743 bus_clk = "bus_clk";
7744 qclk = &qseecom.qsee;
7745 qclk->instance = CLK_QSEE;
7746 break;
7747 };
7748 case CLK_CE_DRV: {
7749 core_clk_src = "ce_drv_core_clk_src";
7750 core_clk = "ce_drv_core_clk";
7751 iface_clk = "ce_drv_iface_clk";
7752 bus_clk = "ce_drv_bus_clk";
7753 qclk = &qseecom.ce_drv;
7754 qclk->instance = CLK_CE_DRV;
7755 break;
7756 };
7757 default:
7758 pr_err("Invalid ce hw instance: %d!\n", ce);
7759 return -EIO;
7760 }
7761
7762 if (qseecom.no_clock_support) {
7763 qclk->ce_core_clk = NULL;
7764 qclk->ce_clk = NULL;
7765 qclk->ce_bus_clk = NULL;
7766 qclk->ce_core_src_clk = NULL;
7767 return 0;
7768 }
7769
7770 pdev = qseecom.pdev;
7771
7772 /* Get CE3 src core clk. */
7773 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7774 if (!IS_ERR(qclk->ce_core_src_clk)) {
7775 rc = clk_set_rate(qclk->ce_core_src_clk,
7776 qseecom.ce_opp_freq_hz);
7777 if (rc) {
7778 clk_put(qclk->ce_core_src_clk);
7779 qclk->ce_core_src_clk = NULL;
7780 pr_err("Unable to set the core src clk @%uMhz.\n",
7781 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7782 return -EIO;
7783 }
7784 } else {
7785 pr_warn("Unable to get CE core src clk, set to NULL\n");
7786 qclk->ce_core_src_clk = NULL;
7787 }
7788
7789 /* Get CE core clk */
7790 qclk->ce_core_clk = clk_get(pdev, core_clk);
7791 if (IS_ERR(qclk->ce_core_clk)) {
7792 rc = PTR_ERR(qclk->ce_core_clk);
7793 pr_err("Unable to get CE core clk\n");
7794 if (qclk->ce_core_src_clk != NULL)
7795 clk_put(qclk->ce_core_src_clk);
7796 return -EIO;
7797 }
7798
7799 /* Get CE Interface clk */
7800 qclk->ce_clk = clk_get(pdev, iface_clk);
7801 if (IS_ERR(qclk->ce_clk)) {
7802 rc = PTR_ERR(qclk->ce_clk);
7803 pr_err("Unable to get CE interface clk\n");
7804 if (qclk->ce_core_src_clk != NULL)
7805 clk_put(qclk->ce_core_src_clk);
7806 clk_put(qclk->ce_core_clk);
7807 return -EIO;
7808 }
7809
7810 /* Get CE AXI clk */
7811 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7812 if (IS_ERR(qclk->ce_bus_clk)) {
7813 rc = PTR_ERR(qclk->ce_bus_clk);
7814 pr_err("Unable to get CE BUS interface clk\n");
7815 if (qclk->ce_core_src_clk != NULL)
7816 clk_put(qclk->ce_core_src_clk);
7817 clk_put(qclk->ce_core_clk);
7818 clk_put(qclk->ce_clk);
7819 return -EIO;
7820 }
7821
7822 return rc;
7823}
7824
7825static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7826{
7827 struct qseecom_clk *qclk;
7828
7829 if (ce == CLK_QSEE)
7830 qclk = &qseecom.qsee;
7831 else
7832 qclk = &qseecom.ce_drv;
7833
7834 if (qclk->ce_clk != NULL) {
7835 clk_put(qclk->ce_clk);
7836 qclk->ce_clk = NULL;
7837 }
7838 if (qclk->ce_core_clk != NULL) {
7839 clk_put(qclk->ce_core_clk);
7840 qclk->ce_core_clk = NULL;
7841 }
7842 if (qclk->ce_bus_clk != NULL) {
7843 clk_put(qclk->ce_bus_clk);
7844 qclk->ce_bus_clk = NULL;
7845 }
7846 if (qclk->ce_core_src_clk != NULL) {
7847 clk_put(qclk->ce_core_src_clk);
7848 qclk->ce_core_src_clk = NULL;
7849 }
7850 qclk->instance = CLK_INVALID;
7851}
7852
7853static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7854{
7855 int rc = 0;
7856 uint32_t hlos_num_ce_hw_instances;
7857 uint32_t disk_encrypt_pipe;
7858 uint32_t file_encrypt_pipe;
7859 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
7860 int i;
7861 const int *tbl;
7862 int size;
7863 int entry;
7864 struct qseecom_crypto_info *pfde_tbl = NULL;
7865 struct qseecom_crypto_info *p;
7866 int tbl_size;
7867 int j;
7868 bool old_db = true;
7869 struct qseecom_ce_info_use *pce_info_use;
7870 uint32_t *unit_tbl = NULL;
7871 int total_units = 0;
7872 struct qseecom_ce_pipe_entry *pce_entry;
7873
7874 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7875 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7876
7877 if (of_property_read_u32((&pdev->dev)->of_node,
7878 "qcom,qsee-ce-hw-instance",
7879 &qseecom.ce_info.qsee_ce_hw_instance)) {
7880 pr_err("Fail to get qsee ce hw instance information.\n");
7881 rc = -EINVAL;
7882 goto out;
7883 } else {
7884 pr_debug("qsee-ce-hw-instance=0x%x\n",
7885 qseecom.ce_info.qsee_ce_hw_instance);
7886 }
7887
7888 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7889 "qcom,support-fde");
7890 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7891 "qcom,support-pfe");
7892
7893 if (!qseecom.support_pfe && !qseecom.support_fde) {
7894 pr_warn("Device does not support PFE/FDE");
7895 goto out;
7896 }
7897
7898 if (qseecom.support_fde)
7899 tbl = of_get_property((&pdev->dev)->of_node,
7900 "qcom,full-disk-encrypt-info", &size);
7901 else
7902 tbl = NULL;
7903 if (tbl) {
7904 old_db = false;
7905 if (size % sizeof(struct qseecom_crypto_info)) {
7906 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7907 size);
7908 rc = -EINVAL;
7909 goto out;
7910 }
7911 tbl_size = size / sizeof
7912 (struct qseecom_crypto_info);
7913
7914 pfde_tbl = kzalloc(size, GFP_KERNEL);
7915 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7916 total_units = 0;
7917
7918 if (!pfde_tbl || !unit_tbl) {
7919 pr_err("failed to alloc memory\n");
7920 rc = -ENOMEM;
7921 goto out;
7922 }
7923 if (of_property_read_u32_array((&pdev->dev)->of_node,
7924 "qcom,full-disk-encrypt-info",
7925 (u32 *)pfde_tbl, size/sizeof(u32))) {
7926 pr_err("failed to read full-disk-encrypt-info tbl\n");
7927 rc = -EINVAL;
7928 goto out;
7929 }
7930
7931 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7932 for (j = 0; j < total_units; j++) {
7933 if (p->unit_num == *(unit_tbl + j))
7934 break;
7935 }
7936 if (j == total_units) {
7937 *(unit_tbl + total_units) = p->unit_num;
7938 total_units++;
7939 }
7940 }
7941
7942 qseecom.ce_info.num_fde = total_units;
7943 pce_info_use = qseecom.ce_info.fde = kcalloc(
7944 total_units, sizeof(struct qseecom_ce_info_use),
7945 GFP_KERNEL);
7946 if (!pce_info_use) {
7947 pr_err("failed to alloc memory\n");
7948 rc = -ENOMEM;
7949 goto out;
7950 }
7951
7952 for (j = 0; j < total_units; j++, pce_info_use++) {
7953 pce_info_use->unit_num = *(unit_tbl + j);
7954 pce_info_use->alloc = false;
7955 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7956 pce_info_use->num_ce_pipe_entries = 0;
7957 pce_info_use->ce_pipe_entry = NULL;
7958 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7959 if (p->unit_num == pce_info_use->unit_num)
7960 pce_info_use->num_ce_pipe_entries++;
7961 }
7962
7963 entry = pce_info_use->num_ce_pipe_entries;
7964 pce_entry = pce_info_use->ce_pipe_entry =
7965 kcalloc(entry,
7966 sizeof(struct qseecom_ce_pipe_entry),
7967 GFP_KERNEL);
7968 if (pce_entry == NULL) {
7969 pr_err("failed to alloc memory\n");
7970 rc = -ENOMEM;
7971 goto out;
7972 }
7973
7974 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7975 if (p->unit_num == pce_info_use->unit_num) {
7976 pce_entry->ce_num = p->ce;
7977 pce_entry->ce_pipe_pair =
7978 p->pipe_pair;
7979 pce_entry->valid = true;
7980 pce_entry++;
7981 }
7982 }
7983 }
7984 kfree(unit_tbl);
7985 unit_tbl = NULL;
7986 kfree(pfde_tbl);
7987 pfde_tbl = NULL;
7988 }
7989
7990 if (qseecom.support_pfe)
7991 tbl = of_get_property((&pdev->dev)->of_node,
7992 "qcom,per-file-encrypt-info", &size);
7993 else
7994 tbl = NULL;
7995 if (tbl) {
7996 old_db = false;
7997 if (size % sizeof(struct qseecom_crypto_info)) {
7998 pr_err("per-file-encrypt-info tbl size(%d)\n",
7999 size);
8000 rc = -EINVAL;
8001 goto out;
8002 }
8003 tbl_size = size / sizeof
8004 (struct qseecom_crypto_info);
8005
8006 pfde_tbl = kzalloc(size, GFP_KERNEL);
8007 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8008 total_units = 0;
8009 if (!pfde_tbl || !unit_tbl) {
8010 pr_err("failed to alloc memory\n");
8011 rc = -ENOMEM;
8012 goto out;
8013 }
8014 if (of_property_read_u32_array((&pdev->dev)->of_node,
8015 "qcom,per-file-encrypt-info",
8016 (u32 *)pfde_tbl, size/sizeof(u32))) {
8017 pr_err("failed to read per-file-encrypt-info tbl\n");
8018 rc = -EINVAL;
8019 goto out;
8020 }
8021
8022 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8023 for (j = 0; j < total_units; j++) {
8024 if (p->unit_num == *(unit_tbl + j))
8025 break;
8026 }
8027 if (j == total_units) {
8028 *(unit_tbl + total_units) = p->unit_num;
8029 total_units++;
8030 }
8031 }
8032
8033 qseecom.ce_info.num_pfe = total_units;
8034 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8035 total_units, sizeof(struct qseecom_ce_info_use),
8036 GFP_KERNEL);
8037 if (!pce_info_use) {
8038 pr_err("failed to alloc memory\n");
8039 rc = -ENOMEM;
8040 goto out;
8041 }
8042
8043 for (j = 0; j < total_units; j++, pce_info_use++) {
8044 pce_info_use->unit_num = *(unit_tbl + j);
8045 pce_info_use->alloc = false;
8046 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8047 pce_info_use->num_ce_pipe_entries = 0;
8048 pce_info_use->ce_pipe_entry = NULL;
8049 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8050 if (p->unit_num == pce_info_use->unit_num)
8051 pce_info_use->num_ce_pipe_entries++;
8052 }
8053
8054 entry = pce_info_use->num_ce_pipe_entries;
8055 pce_entry = pce_info_use->ce_pipe_entry =
8056 kcalloc(entry,
8057 sizeof(struct qseecom_ce_pipe_entry),
8058 GFP_KERNEL);
8059 if (pce_entry == NULL) {
8060 pr_err("failed to alloc memory\n");
8061 rc = -ENOMEM;
8062 goto out;
8063 }
8064
8065 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8066 if (p->unit_num == pce_info_use->unit_num) {
8067 pce_entry->ce_num = p->ce;
8068 pce_entry->ce_pipe_pair =
8069 p->pipe_pair;
8070 pce_entry->valid = true;
8071 pce_entry++;
8072 }
8073 }
8074 }
8075 kfree(unit_tbl);
8076 unit_tbl = NULL;
8077 kfree(pfde_tbl);
8078 pfde_tbl = NULL;
8079 }
8080
8081 if (!old_db)
8082 goto out1;
8083
8084 if (of_property_read_bool((&pdev->dev)->of_node,
8085 "qcom,support-multiple-ce-hw-instance")) {
8086 if (of_property_read_u32((&pdev->dev)->of_node,
8087 "qcom,hlos-num-ce-hw-instances",
8088 &hlos_num_ce_hw_instances)) {
8089 pr_err("Fail: get hlos number of ce hw instance\n");
8090 rc = -EINVAL;
8091 goto out;
8092 }
8093 } else {
8094 hlos_num_ce_hw_instances = 1;
8095 }
8096
8097 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8098 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8099 MAX_CE_PIPE_PAIR_PER_UNIT);
8100 rc = -EINVAL;
8101 goto out;
8102 }
8103
8104 if (of_property_read_u32_array((&pdev->dev)->of_node,
8105 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8106 hlos_num_ce_hw_instances)) {
8107 pr_err("Fail: get hlos ce hw instance info\n");
8108 rc = -EINVAL;
8109 goto out;
8110 }
8111
8112 if (qseecom.support_fde) {
8113 pce_info_use = qseecom.ce_info.fde =
8114 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8115 if (!pce_info_use) {
8116 pr_err("failed to alloc memory\n");
8117 rc = -ENOMEM;
8118 goto out;
8119 }
8120 /* by default for old db */
8121 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8122 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8123 pce_info_use->alloc = false;
8124 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8125 pce_info_use->ce_pipe_entry = NULL;
8126 if (of_property_read_u32((&pdev->dev)->of_node,
8127 "qcom,disk-encrypt-pipe-pair",
8128 &disk_encrypt_pipe)) {
8129 pr_err("Fail to get FDE pipe information.\n");
8130 rc = -EINVAL;
8131 goto out;
8132 } else {
8133 pr_debug("disk-encrypt-pipe-pair=0x%x",
8134 disk_encrypt_pipe);
8135 }
8136 entry = pce_info_use->num_ce_pipe_entries =
8137 hlos_num_ce_hw_instances;
8138 pce_entry = pce_info_use->ce_pipe_entry =
8139 kcalloc(entry,
8140 sizeof(struct qseecom_ce_pipe_entry),
8141 GFP_KERNEL);
8142 if (pce_entry == NULL) {
8143 pr_err("failed to alloc memory\n");
8144 rc = -ENOMEM;
8145 goto out;
8146 }
8147 for (i = 0; i < entry; i++) {
8148 pce_entry->ce_num = hlos_ce_hw_instance[i];
8149 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8150 pce_entry->valid = 1;
8151 pce_entry++;
8152 }
8153 } else {
8154 pr_warn("Device does not support FDE");
8155 disk_encrypt_pipe = 0xff;
8156 }
8157 if (qseecom.support_pfe) {
8158 pce_info_use = qseecom.ce_info.pfe =
8159 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8160 if (!pce_info_use) {
8161 pr_err("failed to alloc memory\n");
8162 rc = -ENOMEM;
8163 goto out;
8164 }
8165 /* by default for old db */
8166 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8167 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8168 pce_info_use->alloc = false;
8169 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8170 pce_info_use->ce_pipe_entry = NULL;
8171
8172 if (of_property_read_u32((&pdev->dev)->of_node,
8173 "qcom,file-encrypt-pipe-pair",
8174 &file_encrypt_pipe)) {
8175 pr_err("Fail to get PFE pipe information.\n");
8176 rc = -EINVAL;
8177 goto out;
8178 } else {
8179 pr_debug("file-encrypt-pipe-pair=0x%x",
8180 file_encrypt_pipe);
8181 }
8182 entry = pce_info_use->num_ce_pipe_entries =
8183 hlos_num_ce_hw_instances;
8184 pce_entry = pce_info_use->ce_pipe_entry =
8185 kcalloc(entry,
8186 sizeof(struct qseecom_ce_pipe_entry),
8187 GFP_KERNEL);
8188 if (pce_entry == NULL) {
8189 pr_err("failed to alloc memory\n");
8190 rc = -ENOMEM;
8191 goto out;
8192 }
8193 for (i = 0; i < entry; i++) {
8194 pce_entry->ce_num = hlos_ce_hw_instance[i];
8195 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8196 pce_entry->valid = 1;
8197 pce_entry++;
8198 }
8199 } else {
8200 pr_warn("Device does not support PFE");
8201 file_encrypt_pipe = 0xff;
8202 }
8203
8204out1:
8205 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8206 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8207out:
8208 if (rc) {
8209 if (qseecom.ce_info.fde) {
8210 pce_info_use = qseecom.ce_info.fde;
8211 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8212 pce_entry = pce_info_use->ce_pipe_entry;
8213 kfree(pce_entry);
8214 pce_info_use++;
8215 }
8216 }
8217 kfree(qseecom.ce_info.fde);
8218 qseecom.ce_info.fde = NULL;
8219 if (qseecom.ce_info.pfe) {
8220 pce_info_use = qseecom.ce_info.pfe;
8221 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8222 pce_entry = pce_info_use->ce_pipe_entry;
8223 kfree(pce_entry);
8224 pce_info_use++;
8225 }
8226 }
8227 kfree(qseecom.ce_info.pfe);
8228 qseecom.ce_info.pfe = NULL;
8229 }
8230 kfree(unit_tbl);
8231 kfree(pfde_tbl);
8232 return rc;
8233}
8234
8235static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8236 void __user *argp)
8237{
8238 struct qseecom_ce_info_req req;
8239 struct qseecom_ce_info_req *pinfo = &req;
8240 int ret = 0;
8241 int i;
8242 unsigned int entries;
8243 struct qseecom_ce_info_use *pce_info_use, *p;
8244 int total = 0;
8245 bool found = false;
8246 struct qseecom_ce_pipe_entry *pce_entry;
8247
8248 ret = copy_from_user(pinfo, argp,
8249 sizeof(struct qseecom_ce_info_req));
8250 if (ret) {
8251 pr_err("copy_from_user failed\n");
8252 return ret;
8253 }
8254
8255 switch (pinfo->usage) {
8256 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8257 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8258 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8259 if (qseecom.support_fde) {
8260 p = qseecom.ce_info.fde;
8261 total = qseecom.ce_info.num_fde;
8262 } else {
8263 pr_err("system does not support fde\n");
8264 return -EINVAL;
8265 }
8266 break;
8267 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8268 if (qseecom.support_pfe) {
8269 p = qseecom.ce_info.pfe;
8270 total = qseecom.ce_info.num_pfe;
8271 } else {
8272 pr_err("system does not support pfe\n");
8273 return -EINVAL;
8274 }
8275 break;
8276 default:
8277 pr_err("unsupported usage %d\n", pinfo->usage);
8278 return -EINVAL;
8279 }
8280
8281 pce_info_use = NULL;
8282 for (i = 0; i < total; i++) {
8283 if (!p->alloc)
8284 pce_info_use = p;
8285 else if (!memcmp(p->handle, pinfo->handle,
8286 MAX_CE_INFO_HANDLE_SIZE)) {
8287 pce_info_use = p;
8288 found = true;
8289 break;
8290 }
8291 p++;
8292 }
8293
8294 if (pce_info_use == NULL)
8295 return -EBUSY;
8296
8297 pinfo->unit_num = pce_info_use->unit_num;
8298 if (!pce_info_use->alloc) {
8299 pce_info_use->alloc = true;
8300 memcpy(pce_info_use->handle,
8301 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8302 }
8303 if (pce_info_use->num_ce_pipe_entries >
8304 MAX_CE_PIPE_PAIR_PER_UNIT)
8305 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8306 else
8307 entries = pce_info_use->num_ce_pipe_entries;
8308 pinfo->num_ce_pipe_entries = entries;
8309 pce_entry = pce_info_use->ce_pipe_entry;
8310 for (i = 0; i < entries; i++, pce_entry++)
8311 pinfo->ce_pipe_entry[i] = *pce_entry;
8312 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8313 pinfo->ce_pipe_entry[i].valid = 0;
8314
8315 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8316 pr_err("copy_to_user failed\n");
8317 ret = -EFAULT;
8318 }
8319 return ret;
8320}
8321
8322static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8323 void __user *argp)
8324{
8325 struct qseecom_ce_info_req req;
8326 struct qseecom_ce_info_req *pinfo = &req;
8327 int ret = 0;
8328 struct qseecom_ce_info_use *p;
8329 int total = 0;
8330 int i;
8331 bool found = false;
8332
8333 ret = copy_from_user(pinfo, argp,
8334 sizeof(struct qseecom_ce_info_req));
8335 if (ret)
8336 return ret;
8337
8338 switch (pinfo->usage) {
8339 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8340 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8341 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8342 if (qseecom.support_fde) {
8343 p = qseecom.ce_info.fde;
8344 total = qseecom.ce_info.num_fde;
8345 } else {
8346 pr_err("system does not support fde\n");
8347 return -EINVAL;
8348 }
8349 break;
8350 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8351 if (qseecom.support_pfe) {
8352 p = qseecom.ce_info.pfe;
8353 total = qseecom.ce_info.num_pfe;
8354 } else {
8355 pr_err("system does not support pfe\n");
8356 return -EINVAL;
8357 }
8358 break;
8359 default:
8360 pr_err("unsupported usage %d\n", pinfo->usage);
8361 return -EINVAL;
8362 }
8363
8364 for (i = 0; i < total; i++) {
8365 if (p->alloc &&
8366 !memcmp(p->handle, pinfo->handle,
8367 MAX_CE_INFO_HANDLE_SIZE)) {
8368 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8369 p->alloc = false;
8370 found = true;
8371 break;
8372 }
8373 p++;
8374 }
8375 return ret;
8376}
8377
8378static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8379 void __user *argp)
8380{
8381 struct qseecom_ce_info_req req;
8382 struct qseecom_ce_info_req *pinfo = &req;
8383 int ret = 0;
8384 int i;
8385 unsigned int entries;
8386 struct qseecom_ce_info_use *pce_info_use, *p;
8387 int total = 0;
8388 bool found = false;
8389 struct qseecom_ce_pipe_entry *pce_entry;
8390
8391 ret = copy_from_user(pinfo, argp,
8392 sizeof(struct qseecom_ce_info_req));
8393 if (ret)
8394 return ret;
8395
8396 switch (pinfo->usage) {
8397 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8398 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8399 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8400 if (qseecom.support_fde) {
8401 p = qseecom.ce_info.fde;
8402 total = qseecom.ce_info.num_fde;
8403 } else {
8404 pr_err("system does not support fde\n");
8405 return -EINVAL;
8406 }
8407 break;
8408 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8409 if (qseecom.support_pfe) {
8410 p = qseecom.ce_info.pfe;
8411 total = qseecom.ce_info.num_pfe;
8412 } else {
8413 pr_err("system does not support pfe\n");
8414 return -EINVAL;
8415 }
8416 break;
8417 default:
8418 pr_err("unsupported usage %d\n", pinfo->usage);
8419 return -EINVAL;
8420 }
8421
8422 pce_info_use = NULL;
8423 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8424 pinfo->num_ce_pipe_entries = 0;
8425 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8426 pinfo->ce_pipe_entry[i].valid = 0;
8427
8428 for (i = 0; i < total; i++) {
8429
8430 if (p->alloc && !memcmp(p->handle,
8431 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8432 pce_info_use = p;
8433 found = true;
8434 break;
8435 }
8436 p++;
8437 }
8438 if (!pce_info_use)
8439 goto out;
8440 pinfo->unit_num = pce_info_use->unit_num;
8441 if (pce_info_use->num_ce_pipe_entries >
8442 MAX_CE_PIPE_PAIR_PER_UNIT)
8443 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8444 else
8445 entries = pce_info_use->num_ce_pipe_entries;
8446 pinfo->num_ce_pipe_entries = entries;
8447 pce_entry = pce_info_use->ce_pipe_entry;
8448 for (i = 0; i < entries; i++, pce_entry++)
8449 pinfo->ce_pipe_entry[i] = *pce_entry;
8450 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8451 pinfo->ce_pipe_entry[i].valid = 0;
8452out:
8453 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8454 pr_err("copy_to_user failed\n");
8455 ret = -EFAULT;
8456 }
8457 return ret;
8458}
8459
8460/*
8461 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8462 * then whitelist feature is not supported.
8463 */
8464static int qseecom_check_whitelist_feature(void)
8465{
8466 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8467
8468 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8469}
8470
8471static int qseecom_probe(struct platform_device *pdev)
8472{
8473 int rc;
8474 int i;
8475 uint32_t feature = 10;
8476 struct device *class_dev;
8477 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8478 struct qseecom_command_scm_resp resp;
8479 struct qseecom_ce_info_use *pce_info_use = NULL;
8480
8481 qseecom.qsee_bw_count = 0;
8482 qseecom.qsee_perf_client = 0;
8483 qseecom.qsee_sfpb_bw_count = 0;
8484
8485 qseecom.qsee.ce_core_clk = NULL;
8486 qseecom.qsee.ce_clk = NULL;
8487 qseecom.qsee.ce_core_src_clk = NULL;
8488 qseecom.qsee.ce_bus_clk = NULL;
8489
8490 qseecom.cumulative_mode = 0;
8491 qseecom.current_mode = INACTIVE;
8492 qseecom.support_bus_scaling = false;
8493 qseecom.support_fde = false;
8494 qseecom.support_pfe = false;
8495
8496 qseecom.ce_drv.ce_core_clk = NULL;
8497 qseecom.ce_drv.ce_clk = NULL;
8498 qseecom.ce_drv.ce_core_src_clk = NULL;
8499 qseecom.ce_drv.ce_bus_clk = NULL;
8500 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8501
8502 qseecom.app_block_ref_cnt = 0;
8503 init_waitqueue_head(&qseecom.app_block_wq);
8504 qseecom.whitelist_support = true;
8505
8506 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8507 if (rc < 0) {
8508 pr_err("alloc_chrdev_region failed %d\n", rc);
8509 return rc;
8510 }
8511
8512 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8513 if (IS_ERR(driver_class)) {
8514 rc = -ENOMEM;
8515 pr_err("class_create failed %d\n", rc);
8516 goto exit_unreg_chrdev_region;
8517 }
8518
8519 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8520 QSEECOM_DEV);
8521 if (IS_ERR(class_dev)) {
8522 pr_err("class_device_create failed %d\n", rc);
8523 rc = -ENOMEM;
8524 goto exit_destroy_class;
8525 }
8526
8527 cdev_init(&qseecom.cdev, &qseecom_fops);
8528 qseecom.cdev.owner = THIS_MODULE;
8529
8530 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8531 if (rc < 0) {
8532 pr_err("cdev_add failed %d\n", rc);
8533 goto exit_destroy_device;
8534 }
8535
8536 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8537 spin_lock_init(&qseecom.registered_listener_list_lock);
8538 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8539 spin_lock_init(&qseecom.registered_app_list_lock);
8540 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8541 spin_lock_init(&qseecom.registered_kclient_list_lock);
8542 init_waitqueue_head(&qseecom.send_resp_wq);
8543 qseecom.send_resp_flag = 0;
8544
8545 qseecom.qsee_version = QSEEE_VERSION_00;
8546 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8547 &resp, sizeof(resp));
8548 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8549 if (rc) {
8550 pr_err("Failed to get QSEE version info %d\n", rc);
8551 goto exit_del_cdev;
8552 }
8553 qseecom.qsee_version = resp.result;
8554 qseecom.qseos_version = QSEOS_VERSION_14;
8555 qseecom.commonlib_loaded = false;
8556 qseecom.commonlib64_loaded = false;
8557 qseecom.pdev = class_dev;
8558 /* Create ION msm client */
8559 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8560 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8561 pr_err("Ion client cannot be created\n");
8562 rc = -ENOMEM;
8563 goto exit_del_cdev;
8564 }
8565
8566 /* register client for bus scaling */
8567 if (pdev->dev.of_node) {
8568 qseecom.pdev->of_node = pdev->dev.of_node;
8569 qseecom.support_bus_scaling =
8570 of_property_read_bool((&pdev->dev)->of_node,
8571 "qcom,support-bus-scaling");
8572 rc = qseecom_retrieve_ce_data(pdev);
8573 if (rc)
8574 goto exit_destroy_ion_client;
8575 qseecom.appsbl_qseecom_support =
8576 of_property_read_bool((&pdev->dev)->of_node,
8577 "qcom,appsbl-qseecom-support");
8578 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8579 qseecom.appsbl_qseecom_support);
8580
8581 qseecom.commonlib64_loaded =
8582 of_property_read_bool((&pdev->dev)->of_node,
8583 "qcom,commonlib64-loaded-by-uefi");
8584 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8585 qseecom.commonlib64_loaded);
8586 qseecom.fde_key_size =
8587 of_property_read_bool((&pdev->dev)->of_node,
8588 "qcom,fde-key-size");
8589 qseecom.no_clock_support =
8590 of_property_read_bool((&pdev->dev)->of_node,
8591 "qcom,no-clock-support");
8592 if (!qseecom.no_clock_support) {
8593 pr_info("qseecom clocks handled by other subsystem\n");
8594 } else {
8595 pr_info("no-clock-support=0x%x",
8596 qseecom.no_clock_support);
8597 }
8598
8599 if (of_property_read_u32((&pdev->dev)->of_node,
8600 "qcom,qsee-reentrancy-support",
8601 &qseecom.qsee_reentrancy_support)) {
8602 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8603 qseecom.qsee_reentrancy_support = 0;
8604 } else {
8605 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8606 qseecom.qsee_reentrancy_support);
8607 }
8608
8609 /*
8610 * The qseecom bus scaling flag can not be enabled when
8611 * crypto clock is not handled by HLOS.
8612 */
8613 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8614 pr_err("support_bus_scaling flag can not be enabled.\n");
8615 rc = -EINVAL;
8616 goto exit_destroy_ion_client;
8617 }
8618
8619 if (of_property_read_u32((&pdev->dev)->of_node,
8620 "qcom,ce-opp-freq",
8621 &qseecom.ce_opp_freq_hz)) {
8622 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8623 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8624 }
8625 rc = __qseecom_init_clk(CLK_QSEE);
8626 if (rc)
8627 goto exit_destroy_ion_client;
8628
8629 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8630 (qseecom.support_pfe || qseecom.support_fde)) {
8631 rc = __qseecom_init_clk(CLK_CE_DRV);
8632 if (rc) {
8633 __qseecom_deinit_clk(CLK_QSEE);
8634 goto exit_destroy_ion_client;
8635 }
8636 } else {
8637 struct qseecom_clk *qclk;
8638
8639 qclk = &qseecom.qsee;
8640 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8641 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8642 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8643 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8644 }
8645
8646 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8647 msm_bus_cl_get_pdata(pdev);
8648 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8649 (!qseecom.is_apps_region_protected &&
8650 !qseecom.appsbl_qseecom_support)) {
8651 struct resource *resource = NULL;
8652 struct qsee_apps_region_info_ireq req;
8653 struct qsee_apps_region_info_64bit_ireq req_64bit;
8654 struct qseecom_command_scm_resp resp;
8655 void *cmd_buf = NULL;
8656 size_t cmd_len;
8657
8658 resource = platform_get_resource_byname(pdev,
8659 IORESOURCE_MEM, "secapp-region");
8660 if (resource) {
8661 if (qseecom.qsee_version < QSEE_VERSION_40) {
8662 req.qsee_cmd_id =
8663 QSEOS_APP_REGION_NOTIFICATION;
8664 req.addr = (uint32_t)resource->start;
8665 req.size = resource_size(resource);
8666 cmd_buf = (void *)&req;
8667 cmd_len = sizeof(struct
8668 qsee_apps_region_info_ireq);
8669 pr_warn("secure app region addr=0x%x size=0x%x",
8670 req.addr, req.size);
8671 } else {
8672 req_64bit.qsee_cmd_id =
8673 QSEOS_APP_REGION_NOTIFICATION;
8674 req_64bit.addr = resource->start;
8675 req_64bit.size = resource_size(
8676 resource);
8677 cmd_buf = (void *)&req_64bit;
8678 cmd_len = sizeof(struct
8679 qsee_apps_region_info_64bit_ireq);
8680 pr_warn("secure app region addr=0x%llx size=0x%x",
8681 req_64bit.addr, req_64bit.size);
8682 }
8683 } else {
8684 pr_err("Fail to get secure app region info\n");
8685 rc = -EINVAL;
8686 goto exit_deinit_clock;
8687 }
8688 rc = __qseecom_enable_clk(CLK_QSEE);
8689 if (rc) {
8690 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8691 rc = -EIO;
8692 goto exit_deinit_clock;
8693 }
8694 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8695 cmd_buf, cmd_len,
8696 &resp, sizeof(resp));
8697 __qseecom_disable_clk(CLK_QSEE);
8698 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8699 pr_err("send secapp reg fail %d resp.res %d\n",
8700 rc, resp.result);
8701 rc = -EINVAL;
8702 goto exit_deinit_clock;
8703 }
8704 }
8705 /*
8706 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8707 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8708 * Pls add "qseecom.commonlib64_loaded = true" here too.
8709 */
8710 if (qseecom.is_apps_region_protected ||
8711 qseecom.appsbl_qseecom_support)
8712 qseecom.commonlib_loaded = true;
8713 } else {
8714 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8715 pdev->dev.platform_data;
8716 }
8717 if (qseecom.support_bus_scaling) {
8718 init_timer(&(qseecom.bw_scale_down_timer));
8719 INIT_WORK(&qseecom.bw_inactive_req_ws,
8720 qseecom_bw_inactive_req_work);
8721 qseecom.bw_scale_down_timer.function =
8722 qseecom_scale_bus_bandwidth_timer_callback;
8723 }
8724 qseecom.timer_running = false;
8725 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8726 qseecom_platform_support);
8727
8728 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8729 pr_warn("qseecom.whitelist_support = %d\n",
8730 qseecom.whitelist_support);
8731
8732 if (!qseecom.qsee_perf_client)
8733 pr_err("Unable to register bus client\n");
8734
8735 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8736 return 0;
8737
8738exit_deinit_clock:
8739 __qseecom_deinit_clk(CLK_QSEE);
8740 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8741 (qseecom.support_pfe || qseecom.support_fde))
8742 __qseecom_deinit_clk(CLK_CE_DRV);
8743exit_destroy_ion_client:
8744 if (qseecom.ce_info.fde) {
8745 pce_info_use = qseecom.ce_info.fde;
8746 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8747 kzfree(pce_info_use->ce_pipe_entry);
8748 pce_info_use++;
8749 }
8750 kfree(qseecom.ce_info.fde);
8751 }
8752 if (qseecom.ce_info.pfe) {
8753 pce_info_use = qseecom.ce_info.pfe;
8754 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8755 kzfree(pce_info_use->ce_pipe_entry);
8756 pce_info_use++;
8757 }
8758 kfree(qseecom.ce_info.pfe);
8759 }
8760 ion_client_destroy(qseecom.ion_clnt);
8761exit_del_cdev:
8762 cdev_del(&qseecom.cdev);
8763exit_destroy_device:
8764 device_destroy(driver_class, qseecom_device_no);
8765exit_destroy_class:
8766 class_destroy(driver_class);
8767exit_unreg_chrdev_region:
8768 unregister_chrdev_region(qseecom_device_no, 1);
8769 return rc;
8770}
8771
8772static int qseecom_remove(struct platform_device *pdev)
8773{
8774 struct qseecom_registered_kclient_list *kclient = NULL;
8775 unsigned long flags = 0;
8776 int ret = 0;
8777 int i;
8778 struct qseecom_ce_pipe_entry *pce_entry;
8779 struct qseecom_ce_info_use *pce_info_use;
8780
8781 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8782 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8783
8784 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8785 list) {
8786 if (!kclient)
8787 goto exit_irqrestore;
8788
8789 /* Break the loop if client handle is NULL */
8790 if (!kclient->handle)
8791 goto exit_free_kclient;
8792
8793 if (list_empty(&kclient->list))
8794 goto exit_free_kc_handle;
8795
8796 list_del(&kclient->list);
8797 mutex_lock(&app_access_lock);
8798 ret = qseecom_unload_app(kclient->handle->dev, false);
8799 mutex_unlock(&app_access_lock);
8800 if (!ret) {
8801 kzfree(kclient->handle->dev);
8802 kzfree(kclient->handle);
8803 kzfree(kclient);
8804 }
8805 }
8806
8807exit_free_kc_handle:
8808 kzfree(kclient->handle);
8809exit_free_kclient:
8810 kzfree(kclient);
8811exit_irqrestore:
8812 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8813
8814 if (qseecom.qseos_version > QSEEE_VERSION_00)
8815 qseecom_unload_commonlib_image();
8816
8817 if (qseecom.qsee_perf_client)
8818 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8819 0);
8820 if (pdev->dev.platform_data != NULL)
8821 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8822
8823 if (qseecom.support_bus_scaling) {
8824 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8825 del_timer_sync(&qseecom.bw_scale_down_timer);
8826 }
8827
8828 if (qseecom.ce_info.fde) {
8829 pce_info_use = qseecom.ce_info.fde;
8830 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8831 pce_entry = pce_info_use->ce_pipe_entry;
8832 kfree(pce_entry);
8833 pce_info_use++;
8834 }
8835 }
8836 kfree(qseecom.ce_info.fde);
8837 if (qseecom.ce_info.pfe) {
8838 pce_info_use = qseecom.ce_info.pfe;
8839 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8840 pce_entry = pce_info_use->ce_pipe_entry;
8841 kfree(pce_entry);
8842 pce_info_use++;
8843 }
8844 }
8845 kfree(qseecom.ce_info.pfe);
8846
8847 /* register client for bus scaling */
8848 if (pdev->dev.of_node) {
8849 __qseecom_deinit_clk(CLK_QSEE);
8850 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8851 (qseecom.support_pfe || qseecom.support_fde))
8852 __qseecom_deinit_clk(CLK_CE_DRV);
8853 }
8854
8855 ion_client_destroy(qseecom.ion_clnt);
8856
8857 cdev_del(&qseecom.cdev);
8858
8859 device_destroy(driver_class, qseecom_device_no);
8860
8861 class_destroy(driver_class);
8862
8863 unregister_chrdev_region(qseecom_device_no, 1);
8864
8865 return ret;
8866}
8867
8868static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8869{
8870 int ret = 0;
8871 struct qseecom_clk *qclk;
8872
8873 qclk = &qseecom.qsee;
8874 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8875 if (qseecom.no_clock_support)
8876 return 0;
8877
8878 mutex_lock(&qsee_bw_mutex);
8879 mutex_lock(&clk_access_lock);
8880
8881 if (qseecom.current_mode != INACTIVE) {
8882 ret = msm_bus_scale_client_update_request(
8883 qseecom.qsee_perf_client, INACTIVE);
8884 if (ret)
8885 pr_err("Fail to scale down bus\n");
8886 else
8887 qseecom.current_mode = INACTIVE;
8888 }
8889
8890 if (qclk->clk_access_cnt) {
8891 if (qclk->ce_clk != NULL)
8892 clk_disable_unprepare(qclk->ce_clk);
8893 if (qclk->ce_core_clk != NULL)
8894 clk_disable_unprepare(qclk->ce_core_clk);
8895 if (qclk->ce_bus_clk != NULL)
8896 clk_disable_unprepare(qclk->ce_bus_clk);
8897 }
8898
8899 del_timer_sync(&(qseecom.bw_scale_down_timer));
8900 qseecom.timer_running = false;
8901
8902 mutex_unlock(&clk_access_lock);
8903 mutex_unlock(&qsee_bw_mutex);
8904 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8905
8906 return 0;
8907}
8908
8909static int qseecom_resume(struct platform_device *pdev)
8910{
8911 int mode = 0;
8912 int ret = 0;
8913 struct qseecom_clk *qclk;
8914
8915 qclk = &qseecom.qsee;
8916 if (qseecom.no_clock_support)
8917 goto exit;
8918
8919 mutex_lock(&qsee_bw_mutex);
8920 mutex_lock(&clk_access_lock);
8921 if (qseecom.cumulative_mode >= HIGH)
8922 mode = HIGH;
8923 else
8924 mode = qseecom.cumulative_mode;
8925
8926 if (qseecom.cumulative_mode != INACTIVE) {
8927 ret = msm_bus_scale_client_update_request(
8928 qseecom.qsee_perf_client, mode);
8929 if (ret)
8930 pr_err("Fail to scale up bus to %d\n", mode);
8931 else
8932 qseecom.current_mode = mode;
8933 }
8934
8935 if (qclk->clk_access_cnt) {
8936 if (qclk->ce_core_clk != NULL) {
8937 ret = clk_prepare_enable(qclk->ce_core_clk);
8938 if (ret) {
8939 pr_err("Unable to enable/prep CE core clk\n");
8940 qclk->clk_access_cnt = 0;
8941 goto err;
8942 }
8943 }
8944 if (qclk->ce_clk != NULL) {
8945 ret = clk_prepare_enable(qclk->ce_clk);
8946 if (ret) {
8947 pr_err("Unable to enable/prep CE iface clk\n");
8948 qclk->clk_access_cnt = 0;
8949 goto ce_clk_err;
8950 }
8951 }
8952 if (qclk->ce_bus_clk != NULL) {
8953 ret = clk_prepare_enable(qclk->ce_bus_clk);
8954 if (ret) {
8955 pr_err("Unable to enable/prep CE bus clk\n");
8956 qclk->clk_access_cnt = 0;
8957 goto ce_bus_clk_err;
8958 }
8959 }
8960 }
8961
8962 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8963 qseecom.bw_scale_down_timer.expires = jiffies +
8964 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8965 mod_timer(&(qseecom.bw_scale_down_timer),
8966 qseecom.bw_scale_down_timer.expires);
8967 qseecom.timer_running = true;
8968 }
8969
8970 mutex_unlock(&clk_access_lock);
8971 mutex_unlock(&qsee_bw_mutex);
8972 goto exit;
8973
8974ce_bus_clk_err:
8975 if (qclk->ce_clk)
8976 clk_disable_unprepare(qclk->ce_clk);
8977ce_clk_err:
8978 if (qclk->ce_core_clk)
8979 clk_disable_unprepare(qclk->ce_core_clk);
8980err:
8981 mutex_unlock(&clk_access_lock);
8982 mutex_unlock(&qsee_bw_mutex);
8983 ret = -EIO;
8984exit:
8985 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8986 return ret;
8987}
8988
8989static const struct of_device_id qseecom_match[] = {
8990 {
8991 .compatible = "qcom,qseecom",
8992 },
8993 {}
8994};
8995
8996static struct platform_driver qseecom_plat_driver = {
8997 .probe = qseecom_probe,
8998 .remove = qseecom_remove,
8999 .suspend = qseecom_suspend,
9000 .resume = qseecom_resume,
9001 .driver = {
9002 .name = "qseecom",
9003 .owner = THIS_MODULE,
9004 .of_match_table = qseecom_match,
9005 },
9006};
9007
9008static int qseecom_init(void)
9009{
9010 return platform_driver_register(&qseecom_plat_driver);
9011}
9012
9013static void qseecom_exit(void)
9014{
9015 platform_driver_unregister(&qseecom_plat_driver);
9016}
9017
9018MODULE_LICENSE("GPL v2");
9019MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9020
9021module_init(qseecom_init);
9022module_exit(qseecom_exit);