blob: 1c6b315c14f527c0ef4076e3912c921cd5741ab8 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
255 struct ion_handle *cmnlib_ion_handle;
256 struct ce_hw_usage_info ce_info;
257
258 int qsee_bw_count;
259 int qsee_sfpb_bw_count;
260
261 uint32_t qsee_perf_client;
262 struct qseecom_clk qsee;
263 struct qseecom_clk ce_drv;
264
265 bool support_bus_scaling;
266 bool support_fde;
267 bool support_pfe;
268 bool fde_key_size;
269 uint32_t cumulative_mode;
270 enum qseecom_bandwidth_request_mode current_mode;
271 struct timer_list bw_scale_down_timer;
272 struct work_struct bw_inactive_req_ws;
273 struct cdev cdev;
274 bool timer_running;
275 bool no_clock_support;
276 unsigned int ce_opp_freq_hz;
277 bool appsbl_qseecom_support;
278 uint32_t qsee_reentrancy_support;
279
280 uint32_t app_block_ref_cnt;
281 wait_queue_head_t app_block_wq;
282 atomic_t qseecom_state;
283 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700284 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700285};
286
287struct qseecom_sec_buf_fd_info {
288 bool is_sec_buf_fd;
289 size_t size;
290 void *vbase;
291 dma_addr_t pbase;
292};
293
294struct qseecom_param_memref {
295 uint32_t buffer;
296 uint32_t size;
297};
298
299struct qseecom_client_handle {
300 u32 app_id;
301 u8 *sb_virt;
302 phys_addr_t sb_phys;
303 unsigned long user_virt_sb_base;
304 size_t sb_length;
305 struct ion_handle *ihandle; /* Retrieve phy addr */
306 char app_name[MAX_APP_NAME_SIZE];
307 u32 app_arch;
308 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
309};
310
311struct qseecom_listener_handle {
312 u32 id;
313};
314
315static struct qseecom_control qseecom;
316
317struct qseecom_dev_handle {
318 enum qseecom_client_handle_type type;
319 union {
320 struct qseecom_client_handle client;
321 struct qseecom_listener_handle listener;
322 };
323 bool released;
324 int abort;
325 wait_queue_head_t abort_wq;
326 atomic_t ioctl_count;
327 bool perf_enabled;
328 bool fast_load_enabled;
329 enum qseecom_bandwidth_request_mode mode;
330 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
331 uint32_t sglist_cnt;
332 bool use_legacy_cmd;
333};
334
335struct qseecom_key_id_usage_desc {
336 uint8_t desc[QSEECOM_KEY_ID_SIZE];
337};
338
339struct qseecom_crypto_info {
340 unsigned int unit_num;
341 unsigned int ce;
342 unsigned int pipe_pair;
343};
344
345static struct qseecom_key_id_usage_desc key_id_array[] = {
346 {
347 .desc = "Undefined Usage Index",
348 },
349
350 {
351 .desc = "Full Disk Encryption",
352 },
353
354 {
355 .desc = "Per File Encryption",
356 },
357
358 {
359 .desc = "UFS ICE Full Disk Encryption",
360 },
361
362 {
363 .desc = "SDCC ICE Full Disk Encryption",
364 },
365};
366
367/* Function proto types */
368static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
369static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
370static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
371static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
372static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
373static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
374 char *cmnlib_name);
375static int qseecom_enable_ice_setup(int usage);
376static int qseecom_disable_ice_setup(int usage);
377static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
378static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
379 void __user *argp);
380static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
381 void __user *argp);
382static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
383 void __user *argp);
384
385static int get_qseecom_keymaster_status(char *str)
386{
387 get_option(&str, &qseecom.is_apps_region_protected);
388 return 1;
389}
390__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
391
392static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
393 const void *req_buf, void *resp_buf)
394{
395 int ret = 0;
396 uint32_t smc_id = 0;
397 uint32_t qseos_cmd_id = 0;
398 struct scm_desc desc = {0};
399 struct qseecom_command_scm_resp *scm_resp = NULL;
400
401 if (!req_buf || !resp_buf) {
402 pr_err("Invalid buffer pointer\n");
403 return -EINVAL;
404 }
405 qseos_cmd_id = *(uint32_t *)req_buf;
406 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
407
408 switch (svc_id) {
409 case 6: {
410 if (tz_cmd_id == 3) {
411 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
412 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
413 desc.args[0] = *(uint32_t *)req_buf;
414 } else {
415 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
416 svc_id, tz_cmd_id);
417 return -EINVAL;
418 }
419 ret = scm_call2(smc_id, &desc);
420 break;
421 }
422 case SCM_SVC_ES: {
423 switch (tz_cmd_id) {
424 case SCM_SAVE_PARTITION_HASH_ID: {
425 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
426 struct qseecom_save_partition_hash_req *p_hash_req =
427 (struct qseecom_save_partition_hash_req *)
428 req_buf;
429 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
430
431 if (!tzbuf)
432 return -ENOMEM;
433 memset(tzbuf, 0, tzbuflen);
434 memcpy(tzbuf, p_hash_req->digest,
435 SHA256_DIGEST_LENGTH);
436 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
437 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
438 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
439 desc.args[0] = p_hash_req->partition_id;
440 desc.args[1] = virt_to_phys(tzbuf);
441 desc.args[2] = SHA256_DIGEST_LENGTH;
442 ret = scm_call2(smc_id, &desc);
443 kzfree(tzbuf);
444 break;
445 }
446 default: {
447 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
448 tz_cmd_id);
449 ret = -EINVAL;
450 break;
451 }
452 } /* end of switch (tz_cmd_id) */
453 break;
454 } /* end of case SCM_SVC_ES */
455 case SCM_SVC_TZSCHEDULER: {
456 switch (qseos_cmd_id) {
457 case QSEOS_APP_START_COMMAND: {
458 struct qseecom_load_app_ireq *req;
459 struct qseecom_load_app_64bit_ireq *req_64bit;
460
461 smc_id = TZ_OS_APP_START_ID;
462 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
463 if (qseecom.qsee_version < QSEE_VERSION_40) {
464 req = (struct qseecom_load_app_ireq *)req_buf;
465 desc.args[0] = req->mdt_len;
466 desc.args[1] = req->img_len;
467 desc.args[2] = req->phy_addr;
468 } else {
469 req_64bit =
470 (struct qseecom_load_app_64bit_ireq *)
471 req_buf;
472 desc.args[0] = req_64bit->mdt_len;
473 desc.args[1] = req_64bit->img_len;
474 desc.args[2] = req_64bit->phy_addr;
475 }
476 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
477 ret = scm_call2(smc_id, &desc);
478 break;
479 }
480 case QSEOS_APP_SHUTDOWN_COMMAND: {
481 struct qseecom_unload_app_ireq *req;
482
483 req = (struct qseecom_unload_app_ireq *)req_buf;
484 smc_id = TZ_OS_APP_SHUTDOWN_ID;
485 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
486 desc.args[0] = req->app_id;
487 ret = scm_call2(smc_id, &desc);
488 break;
489 }
490 case QSEOS_APP_LOOKUP_COMMAND: {
491 struct qseecom_check_app_ireq *req;
492 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
493 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
494
495 if (!tzbuf)
496 return -ENOMEM;
497 req = (struct qseecom_check_app_ireq *)req_buf;
498 pr_debug("Lookup app_name = %s\n", req->app_name);
499 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
500 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
501 smc_id = TZ_OS_APP_LOOKUP_ID;
502 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
503 desc.args[0] = virt_to_phys(tzbuf);
504 desc.args[1] = strlen(req->app_name);
505 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
506 ret = scm_call2(smc_id, &desc);
507 kzfree(tzbuf);
508 break;
509 }
510 case QSEOS_APP_REGION_NOTIFICATION: {
511 struct qsee_apps_region_info_ireq *req;
512 struct qsee_apps_region_info_64bit_ireq *req_64bit;
513
514 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
515 desc.arginfo =
516 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
517 if (qseecom.qsee_version < QSEE_VERSION_40) {
518 req = (struct qsee_apps_region_info_ireq *)
519 req_buf;
520 desc.args[0] = req->addr;
521 desc.args[1] = req->size;
522 } else {
523 req_64bit =
524 (struct qsee_apps_region_info_64bit_ireq *)
525 req_buf;
526 desc.args[0] = req_64bit->addr;
527 desc.args[1] = req_64bit->size;
528 }
529 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
530 ret = scm_call2(smc_id, &desc);
531 break;
532 }
533 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
534 struct qseecom_load_lib_image_ireq *req;
535 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
536
537 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
538 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
539 if (qseecom.qsee_version < QSEE_VERSION_40) {
540 req = (struct qseecom_load_lib_image_ireq *)
541 req_buf;
542 desc.args[0] = req->mdt_len;
543 desc.args[1] = req->img_len;
544 desc.args[2] = req->phy_addr;
545 } else {
546 req_64bit =
547 (struct qseecom_load_lib_image_64bit_ireq *)
548 req_buf;
549 desc.args[0] = req_64bit->mdt_len;
550 desc.args[1] = req_64bit->img_len;
551 desc.args[2] = req_64bit->phy_addr;
552 }
553 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
554 ret = scm_call2(smc_id, &desc);
555 break;
556 }
557 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
558 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
559 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
560 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
561 ret = scm_call2(smc_id, &desc);
562 break;
563 }
564 case QSEOS_REGISTER_LISTENER: {
565 struct qseecom_register_listener_ireq *req;
566 struct qseecom_register_listener_64bit_ireq *req_64bit;
567
568 desc.arginfo =
569 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
570 if (qseecom.qsee_version < QSEE_VERSION_40) {
571 req = (struct qseecom_register_listener_ireq *)
572 req_buf;
573 desc.args[0] = req->listener_id;
574 desc.args[1] = req->sb_ptr;
575 desc.args[2] = req->sb_len;
576 } else {
577 req_64bit =
578 (struct qseecom_register_listener_64bit_ireq *)
579 req_buf;
580 desc.args[0] = req_64bit->listener_id;
581 desc.args[1] = req_64bit->sb_ptr;
582 desc.args[2] = req_64bit->sb_len;
583 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700584 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700585 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
586 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
587 ret = scm_call2(smc_id, &desc);
588 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700589 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700590 smc_id = TZ_OS_REGISTER_LISTENER_ID;
591 __qseecom_reentrancy_check_if_no_app_blocked(
592 smc_id);
593 ret = scm_call2(smc_id, &desc);
594 }
595 break;
596 }
597 case QSEOS_DEREGISTER_LISTENER: {
598 struct qseecom_unregister_listener_ireq *req;
599
600 req = (struct qseecom_unregister_listener_ireq *)
601 req_buf;
602 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
603 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
604 desc.args[0] = req->listener_id;
605 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
606 ret = scm_call2(smc_id, &desc);
607 break;
608 }
609 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
610 struct qseecom_client_listener_data_irsp *req;
611
612 req = (struct qseecom_client_listener_data_irsp *)
613 req_buf;
614 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
615 desc.arginfo =
616 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
617 desc.args[0] = req->listener_id;
618 desc.args[1] = req->status;
619 ret = scm_call2(smc_id, &desc);
620 break;
621 }
622 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
623 struct qseecom_client_listener_data_irsp *req;
624 struct qseecom_client_listener_data_64bit_irsp *req_64;
625
626 smc_id =
627 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
628 desc.arginfo =
629 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
630 if (qseecom.qsee_version < QSEE_VERSION_40) {
631 req =
632 (struct qseecom_client_listener_data_irsp *)
633 req_buf;
634 desc.args[0] = req->listener_id;
635 desc.args[1] = req->status;
636 desc.args[2] = req->sglistinfo_ptr;
637 desc.args[3] = req->sglistinfo_len;
638 } else {
639 req_64 =
640 (struct qseecom_client_listener_data_64bit_irsp *)
641 req_buf;
642 desc.args[0] = req_64->listener_id;
643 desc.args[1] = req_64->status;
644 desc.args[2] = req_64->sglistinfo_ptr;
645 desc.args[3] = req_64->sglistinfo_len;
646 }
647 ret = scm_call2(smc_id, &desc);
648 break;
649 }
650 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
651 struct qseecom_load_app_ireq *req;
652 struct qseecom_load_app_64bit_ireq *req_64bit;
653
654 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
655 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
656 if (qseecom.qsee_version < QSEE_VERSION_40) {
657 req = (struct qseecom_load_app_ireq *)req_buf;
658 desc.args[0] = req->mdt_len;
659 desc.args[1] = req->img_len;
660 desc.args[2] = req->phy_addr;
661 } else {
662 req_64bit =
663 (struct qseecom_load_app_64bit_ireq *)req_buf;
664 desc.args[0] = req_64bit->mdt_len;
665 desc.args[1] = req_64bit->img_len;
666 desc.args[2] = req_64bit->phy_addr;
667 }
668 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
669 ret = scm_call2(smc_id, &desc);
670 break;
671 }
672 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
673 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
674 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
675 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
676 ret = scm_call2(smc_id, &desc);
677 break;
678 }
679
680 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
681 struct qseecom_client_send_data_ireq *req;
682 struct qseecom_client_send_data_64bit_ireq *req_64bit;
683
684 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
685 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
686 if (qseecom.qsee_version < QSEE_VERSION_40) {
687 req = (struct qseecom_client_send_data_ireq *)
688 req_buf;
689 desc.args[0] = req->app_id;
690 desc.args[1] = req->req_ptr;
691 desc.args[2] = req->req_len;
692 desc.args[3] = req->rsp_ptr;
693 desc.args[4] = req->rsp_len;
694 } else {
695 req_64bit =
696 (struct qseecom_client_send_data_64bit_ireq *)
697 req_buf;
698 desc.args[0] = req_64bit->app_id;
699 desc.args[1] = req_64bit->req_ptr;
700 desc.args[2] = req_64bit->req_len;
701 desc.args[3] = req_64bit->rsp_ptr;
702 desc.args[4] = req_64bit->rsp_len;
703 }
704 ret = scm_call2(smc_id, &desc);
705 break;
706 }
707 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
708 struct qseecom_client_send_data_ireq *req;
709 struct qseecom_client_send_data_64bit_ireq *req_64bit;
710
711 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
712 desc.arginfo =
713 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
714 if (qseecom.qsee_version < QSEE_VERSION_40) {
715 req = (struct qseecom_client_send_data_ireq *)
716 req_buf;
717 desc.args[0] = req->app_id;
718 desc.args[1] = req->req_ptr;
719 desc.args[2] = req->req_len;
720 desc.args[3] = req->rsp_ptr;
721 desc.args[4] = req->rsp_len;
722 desc.args[5] = req->sglistinfo_ptr;
723 desc.args[6] = req->sglistinfo_len;
724 } else {
725 req_64bit =
726 (struct qseecom_client_send_data_64bit_ireq *)
727 req_buf;
728 desc.args[0] = req_64bit->app_id;
729 desc.args[1] = req_64bit->req_ptr;
730 desc.args[2] = req_64bit->req_len;
731 desc.args[3] = req_64bit->rsp_ptr;
732 desc.args[4] = req_64bit->rsp_len;
733 desc.args[5] = req_64bit->sglistinfo_ptr;
734 desc.args[6] = req_64bit->sglistinfo_len;
735 }
736 ret = scm_call2(smc_id, &desc);
737 break;
738 }
739 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
740 struct qseecom_client_send_service_ireq *req;
741
742 req = (struct qseecom_client_send_service_ireq *)
743 req_buf;
744 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
745 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
746 desc.args[0] = req->key_type;
747 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
748 ret = scm_call2(smc_id, &desc);
749 break;
750 }
751 case QSEOS_RPMB_ERASE_COMMAND: {
752 smc_id = TZ_OS_RPMB_ERASE_ID;
753 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
754 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
755 ret = scm_call2(smc_id, &desc);
756 break;
757 }
758 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
759 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
760 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
761 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
762 ret = scm_call2(smc_id, &desc);
763 break;
764 }
765 case QSEOS_GENERATE_KEY: {
766 u32 tzbuflen = PAGE_ALIGN(sizeof
767 (struct qseecom_key_generate_ireq) -
768 sizeof(uint32_t));
769 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
770
771 if (!tzbuf)
772 return -ENOMEM;
773 memset(tzbuf, 0, tzbuflen);
774 memcpy(tzbuf, req_buf + sizeof(uint32_t),
775 (sizeof(struct qseecom_key_generate_ireq) -
776 sizeof(uint32_t)));
777 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
778 smc_id = TZ_OS_KS_GEN_KEY_ID;
779 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
780 desc.args[0] = virt_to_phys(tzbuf);
781 desc.args[1] = tzbuflen;
782 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
783 ret = scm_call2(smc_id, &desc);
784 kzfree(tzbuf);
785 break;
786 }
787 case QSEOS_DELETE_KEY: {
788 u32 tzbuflen = PAGE_ALIGN(sizeof
789 (struct qseecom_key_delete_ireq) -
790 sizeof(uint32_t));
791 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
792
793 if (!tzbuf)
794 return -ENOMEM;
795 memset(tzbuf, 0, tzbuflen);
796 memcpy(tzbuf, req_buf + sizeof(uint32_t),
797 (sizeof(struct qseecom_key_delete_ireq) -
798 sizeof(uint32_t)));
799 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
800 smc_id = TZ_OS_KS_DEL_KEY_ID;
801 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
802 desc.args[0] = virt_to_phys(tzbuf);
803 desc.args[1] = tzbuflen;
804 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
805 ret = scm_call2(smc_id, &desc);
806 kzfree(tzbuf);
807 break;
808 }
809 case QSEOS_SET_KEY: {
810 u32 tzbuflen = PAGE_ALIGN(sizeof
811 (struct qseecom_key_select_ireq) -
812 sizeof(uint32_t));
813 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
814
815 if (!tzbuf)
816 return -ENOMEM;
817 memset(tzbuf, 0, tzbuflen);
818 memcpy(tzbuf, req_buf + sizeof(uint32_t),
819 (sizeof(struct qseecom_key_select_ireq) -
820 sizeof(uint32_t)));
821 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
822 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
823 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
824 desc.args[0] = virt_to_phys(tzbuf);
825 desc.args[1] = tzbuflen;
826 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
827 ret = scm_call2(smc_id, &desc);
828 kzfree(tzbuf);
829 break;
830 }
831 case QSEOS_UPDATE_KEY_USERINFO: {
832 u32 tzbuflen = PAGE_ALIGN(sizeof
833 (struct qseecom_key_userinfo_update_ireq) -
834 sizeof(uint32_t));
835 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
836
837 if (!tzbuf)
838 return -ENOMEM;
839 memset(tzbuf, 0, tzbuflen);
840 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
841 (struct qseecom_key_userinfo_update_ireq) -
842 sizeof(uint32_t)));
843 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
844 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
845 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
846 desc.args[0] = virt_to_phys(tzbuf);
847 desc.args[1] = tzbuflen;
848 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
849 ret = scm_call2(smc_id, &desc);
850 kzfree(tzbuf);
851 break;
852 }
853 case QSEOS_TEE_OPEN_SESSION: {
854 struct qseecom_qteec_ireq *req;
855 struct qseecom_qteec_64bit_ireq *req_64bit;
856
857 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
858 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
859 if (qseecom.qsee_version < QSEE_VERSION_40) {
860 req = (struct qseecom_qteec_ireq *)req_buf;
861 desc.args[0] = req->app_id;
862 desc.args[1] = req->req_ptr;
863 desc.args[2] = req->req_len;
864 desc.args[3] = req->resp_ptr;
865 desc.args[4] = req->resp_len;
866 } else {
867 req_64bit = (struct qseecom_qteec_64bit_ireq *)
868 req_buf;
869 desc.args[0] = req_64bit->app_id;
870 desc.args[1] = req_64bit->req_ptr;
871 desc.args[2] = req_64bit->req_len;
872 desc.args[3] = req_64bit->resp_ptr;
873 desc.args[4] = req_64bit->resp_len;
874 }
875 ret = scm_call2(smc_id, &desc);
876 break;
877 }
878 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
879 struct qseecom_qteec_ireq *req;
880 struct qseecom_qteec_64bit_ireq *req_64bit;
881
882 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
883 desc.arginfo =
884 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
885 if (qseecom.qsee_version < QSEE_VERSION_40) {
886 req = (struct qseecom_qteec_ireq *)req_buf;
887 desc.args[0] = req->app_id;
888 desc.args[1] = req->req_ptr;
889 desc.args[2] = req->req_len;
890 desc.args[3] = req->resp_ptr;
891 desc.args[4] = req->resp_len;
892 desc.args[5] = req->sglistinfo_ptr;
893 desc.args[6] = req->sglistinfo_len;
894 } else {
895 req_64bit = (struct qseecom_qteec_64bit_ireq *)
896 req_buf;
897 desc.args[0] = req_64bit->app_id;
898 desc.args[1] = req_64bit->req_ptr;
899 desc.args[2] = req_64bit->req_len;
900 desc.args[3] = req_64bit->resp_ptr;
901 desc.args[4] = req_64bit->resp_len;
902 desc.args[5] = req_64bit->sglistinfo_ptr;
903 desc.args[6] = req_64bit->sglistinfo_len;
904 }
905 ret = scm_call2(smc_id, &desc);
906 break;
907 }
908 case QSEOS_TEE_INVOKE_COMMAND: {
909 struct qseecom_qteec_ireq *req;
910 struct qseecom_qteec_64bit_ireq *req_64bit;
911
912 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
913 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
914 if (qseecom.qsee_version < QSEE_VERSION_40) {
915 req = (struct qseecom_qteec_ireq *)req_buf;
916 desc.args[0] = req->app_id;
917 desc.args[1] = req->req_ptr;
918 desc.args[2] = req->req_len;
919 desc.args[3] = req->resp_ptr;
920 desc.args[4] = req->resp_len;
921 } else {
922 req_64bit = (struct qseecom_qteec_64bit_ireq *)
923 req_buf;
924 desc.args[0] = req_64bit->app_id;
925 desc.args[1] = req_64bit->req_ptr;
926 desc.args[2] = req_64bit->req_len;
927 desc.args[3] = req_64bit->resp_ptr;
928 desc.args[4] = req_64bit->resp_len;
929 }
930 ret = scm_call2(smc_id, &desc);
931 break;
932 }
933 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
934 struct qseecom_qteec_ireq *req;
935 struct qseecom_qteec_64bit_ireq *req_64bit;
936
937 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
938 desc.arginfo =
939 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
940 if (qseecom.qsee_version < QSEE_VERSION_40) {
941 req = (struct qseecom_qteec_ireq *)req_buf;
942 desc.args[0] = req->app_id;
943 desc.args[1] = req->req_ptr;
944 desc.args[2] = req->req_len;
945 desc.args[3] = req->resp_ptr;
946 desc.args[4] = req->resp_len;
947 desc.args[5] = req->sglistinfo_ptr;
948 desc.args[6] = req->sglistinfo_len;
949 } else {
950 req_64bit = (struct qseecom_qteec_64bit_ireq *)
951 req_buf;
952 desc.args[0] = req_64bit->app_id;
953 desc.args[1] = req_64bit->req_ptr;
954 desc.args[2] = req_64bit->req_len;
955 desc.args[3] = req_64bit->resp_ptr;
956 desc.args[4] = req_64bit->resp_len;
957 desc.args[5] = req_64bit->sglistinfo_ptr;
958 desc.args[6] = req_64bit->sglistinfo_len;
959 }
960 ret = scm_call2(smc_id, &desc);
961 break;
962 }
963 case QSEOS_TEE_CLOSE_SESSION: {
964 struct qseecom_qteec_ireq *req;
965 struct qseecom_qteec_64bit_ireq *req_64bit;
966
967 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
968 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
969 if (qseecom.qsee_version < QSEE_VERSION_40) {
970 req = (struct qseecom_qteec_ireq *)req_buf;
971 desc.args[0] = req->app_id;
972 desc.args[1] = req->req_ptr;
973 desc.args[2] = req->req_len;
974 desc.args[3] = req->resp_ptr;
975 desc.args[4] = req->resp_len;
976 } else {
977 req_64bit = (struct qseecom_qteec_64bit_ireq *)
978 req_buf;
979 desc.args[0] = req_64bit->app_id;
980 desc.args[1] = req_64bit->req_ptr;
981 desc.args[2] = req_64bit->req_len;
982 desc.args[3] = req_64bit->resp_ptr;
983 desc.args[4] = req_64bit->resp_len;
984 }
985 ret = scm_call2(smc_id, &desc);
986 break;
987 }
988 case QSEOS_TEE_REQUEST_CANCELLATION: {
989 struct qseecom_qteec_ireq *req;
990 struct qseecom_qteec_64bit_ireq *req_64bit;
991
992 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
993 desc.arginfo =
994 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
995 if (qseecom.qsee_version < QSEE_VERSION_40) {
996 req = (struct qseecom_qteec_ireq *)req_buf;
997 desc.args[0] = req->app_id;
998 desc.args[1] = req->req_ptr;
999 desc.args[2] = req->req_len;
1000 desc.args[3] = req->resp_ptr;
1001 desc.args[4] = req->resp_len;
1002 } else {
1003 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1004 req_buf;
1005 desc.args[0] = req_64bit->app_id;
1006 desc.args[1] = req_64bit->req_ptr;
1007 desc.args[2] = req_64bit->req_len;
1008 desc.args[3] = req_64bit->resp_ptr;
1009 desc.args[4] = req_64bit->resp_len;
1010 }
1011 ret = scm_call2(smc_id, &desc);
1012 break;
1013 }
1014 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1015 struct qseecom_continue_blocked_request_ireq *req =
1016 (struct qseecom_continue_blocked_request_ireq *)
1017 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001018 if (qseecom.smcinvoke_support)
1019 smc_id =
1020 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1021 else
1022 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001023 desc.arginfo =
1024 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001025 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001026 ret = scm_call2(smc_id, &desc);
1027 break;
1028 }
1029 default: {
1030 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1031 qseos_cmd_id);
1032 ret = -EINVAL;
1033 break;
1034 }
1035 } /*end of switch (qsee_cmd_id) */
1036 break;
1037 } /*end of case SCM_SVC_TZSCHEDULER*/
1038 default: {
1039 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1040 svc_id);
1041 ret = -EINVAL;
1042 break;
1043 }
1044 } /*end of switch svc_id */
1045 scm_resp->result = desc.ret[0];
1046 scm_resp->resp_type = desc.ret[1];
1047 scm_resp->data = desc.ret[2];
1048 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1049 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1050 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1051 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1052 return ret;
1053}
1054
1055
1056static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1057 size_t cmd_len, void *resp_buf, size_t resp_len)
1058{
1059 if (!is_scm_armv8())
1060 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1061 resp_buf, resp_len);
1062 else
1063 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1064}
1065
1066static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1067 struct qseecom_register_listener_req *svc)
1068{
1069 struct qseecom_registered_listener_list *ptr;
1070 int unique = 1;
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1074 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1075 if (ptr->svc.listener_id == svc->listener_id) {
1076 pr_err("Service id: %u is already registered\n",
1077 ptr->svc.listener_id);
1078 unique = 0;
1079 break;
1080 }
1081 }
1082 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1083 return unique;
1084}
1085
1086static struct qseecom_registered_listener_list *__qseecom_find_svc(
1087 int32_t listener_id)
1088{
1089 struct qseecom_registered_listener_list *entry = NULL;
1090 unsigned long flags;
1091
1092 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1093 list_for_each_entry(entry,
1094 &qseecom.registered_listener_list_head, list) {
1095 if (entry->svc.listener_id == listener_id)
1096 break;
1097 }
1098 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1099
1100 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1101 pr_err("Service id: %u is not found\n", listener_id);
1102 return NULL;
1103 }
1104
1105 return entry;
1106}
1107
1108static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1109 struct qseecom_dev_handle *handle,
1110 struct qseecom_register_listener_req *listener)
1111{
1112 int ret = 0;
1113 struct qseecom_register_listener_ireq req;
1114 struct qseecom_register_listener_64bit_ireq req_64bit;
1115 struct qseecom_command_scm_resp resp;
1116 ion_phys_addr_t pa;
1117 void *cmd_buf = NULL;
1118 size_t cmd_len;
1119
1120 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001121 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001122 listener->ifd_data_fd);
1123 if (IS_ERR_OR_NULL(svc->ihandle)) {
1124 pr_err("Ion client could not retrieve the handle\n");
1125 return -ENOMEM;
1126 }
1127
1128 /* Get the physical address of the ION BUF */
1129 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1130 if (ret) {
1131 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1132 ret);
1133 return ret;
1134 }
1135 /* Populate the structure for sending scm call to load image */
1136 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1137 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1138 pr_err("ION memory mapping for listener shared buffer failed\n");
1139 return -ENOMEM;
1140 }
1141 svc->sb_phys = (phys_addr_t)pa;
1142
1143 if (qseecom.qsee_version < QSEE_VERSION_40) {
1144 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1145 req.listener_id = svc->svc.listener_id;
1146 req.sb_len = svc->sb_length;
1147 req.sb_ptr = (uint32_t)svc->sb_phys;
1148 cmd_buf = (void *)&req;
1149 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1150 } else {
1151 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1152 req_64bit.listener_id = svc->svc.listener_id;
1153 req_64bit.sb_len = svc->sb_length;
1154 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1155 cmd_buf = (void *)&req_64bit;
1156 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1157 }
1158
1159 resp.result = QSEOS_RESULT_INCOMPLETE;
1160
1161 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1162 &resp, sizeof(resp));
1163 if (ret) {
1164 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1165 return -EINVAL;
1166 }
1167
1168 if (resp.result != QSEOS_RESULT_SUCCESS) {
1169 pr_err("Error SB registration req: resp.result = %d\n",
1170 resp.result);
1171 return -EPERM;
1172 }
1173 return 0;
1174}
1175
1176static int qseecom_register_listener(struct qseecom_dev_handle *data,
1177 void __user *argp)
1178{
1179 int ret = 0;
1180 unsigned long flags;
1181 struct qseecom_register_listener_req rcvd_lstnr;
1182 struct qseecom_registered_listener_list *new_entry;
1183
1184 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1185 if (ret) {
1186 pr_err("copy_from_user failed\n");
1187 return ret;
1188 }
1189 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1190 rcvd_lstnr.sb_size))
1191 return -EFAULT;
1192
1193 data->listener.id = 0;
1194 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1195 pr_err("Service is not unique and is already registered\n");
1196 data->released = true;
1197 return -EBUSY;
1198 }
1199
1200 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1201 if (!new_entry)
1202 return -ENOMEM;
1203 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1204 new_entry->rcv_req_flag = 0;
1205
1206 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1207 new_entry->sb_length = rcvd_lstnr.sb_size;
1208 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1209 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1210 pr_err("qseecom_set_sb_memoryfailed\n");
1211 kzfree(new_entry);
1212 return -ENOMEM;
1213 }
1214
1215 data->listener.id = rcvd_lstnr.listener_id;
1216 init_waitqueue_head(&new_entry->rcv_req_wq);
1217 init_waitqueue_head(&new_entry->listener_block_app_wq);
1218 new_entry->send_resp_flag = 0;
1219 new_entry->listener_in_use = false;
1220 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1221 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1222 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1223
1224 return ret;
1225}
1226
1227static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1228{
1229 int ret = 0;
1230 unsigned long flags;
1231 uint32_t unmap_mem = 0;
1232 struct qseecom_register_listener_ireq req;
1233 struct qseecom_registered_listener_list *ptr_svc = NULL;
1234 struct qseecom_command_scm_resp resp;
1235 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1236
1237 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1238 req.listener_id = data->listener.id;
1239 resp.result = QSEOS_RESULT_INCOMPLETE;
1240
1241 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1242 sizeof(req), &resp, sizeof(resp));
1243 if (ret) {
1244 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1245 ret, data->listener.id);
1246 return ret;
1247 }
1248
1249 if (resp.result != QSEOS_RESULT_SUCCESS) {
1250 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1251 resp.result, data->listener.id);
1252 return -EPERM;
1253 }
1254
1255 data->abort = 1;
1256 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1257 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1258 list) {
1259 if (ptr_svc->svc.listener_id == data->listener.id) {
1260 wake_up_all(&ptr_svc->rcv_req_wq);
1261 break;
1262 }
1263 }
1264 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1265
1266 while (atomic_read(&data->ioctl_count) > 1) {
1267 if (wait_event_freezable(data->abort_wq,
1268 atomic_read(&data->ioctl_count) <= 1)) {
1269 pr_err("Interrupted from abort\n");
1270 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001271 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 }
1273 }
1274
1275 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1276 list_for_each_entry(ptr_svc,
1277 &qseecom.registered_listener_list_head, list) {
1278 if (ptr_svc->svc.listener_id == data->listener.id) {
1279 if (ptr_svc->sb_virt) {
1280 unmap_mem = 1;
1281 ihandle = ptr_svc->ihandle;
1282 }
1283 list_del(&ptr_svc->list);
1284 kzfree(ptr_svc);
1285 break;
1286 }
1287 }
1288 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1289
1290 /* Unmap the memory */
1291 if (unmap_mem) {
1292 if (!IS_ERR_OR_NULL(ihandle)) {
1293 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1294 ion_free(qseecom.ion_clnt, ihandle);
1295 }
1296 }
1297 data->released = true;
1298 return ret;
1299}
1300
1301static int __qseecom_set_msm_bus_request(uint32_t mode)
1302{
1303 int ret = 0;
1304 struct qseecom_clk *qclk;
1305
1306 qclk = &qseecom.qsee;
1307 if (qclk->ce_core_src_clk != NULL) {
1308 if (mode == INACTIVE) {
1309 __qseecom_disable_clk(CLK_QSEE);
1310 } else {
1311 ret = __qseecom_enable_clk(CLK_QSEE);
1312 if (ret)
1313 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1314 ret, mode);
1315 }
1316 }
1317
1318 if ((!ret) && (qseecom.current_mode != mode)) {
1319 ret = msm_bus_scale_client_update_request(
1320 qseecom.qsee_perf_client, mode);
1321 if (ret) {
1322 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1323 ret, mode);
1324 if (qclk->ce_core_src_clk != NULL) {
1325 if (mode == INACTIVE) {
1326 ret = __qseecom_enable_clk(CLK_QSEE);
1327 if (ret)
1328 pr_err("CLK enable failed\n");
1329 } else
1330 __qseecom_disable_clk(CLK_QSEE);
1331 }
1332 }
1333 qseecom.current_mode = mode;
1334 }
1335 return ret;
1336}
1337
1338static void qseecom_bw_inactive_req_work(struct work_struct *work)
1339{
1340 mutex_lock(&app_access_lock);
1341 mutex_lock(&qsee_bw_mutex);
1342 if (qseecom.timer_running)
1343 __qseecom_set_msm_bus_request(INACTIVE);
1344 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1345 qseecom.current_mode, qseecom.cumulative_mode);
1346 qseecom.timer_running = false;
1347 mutex_unlock(&qsee_bw_mutex);
1348 mutex_unlock(&app_access_lock);
1349}
1350
1351static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1352{
1353 schedule_work(&qseecom.bw_inactive_req_ws);
1354}
1355
1356static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1357{
1358 struct qseecom_clk *qclk;
1359 int ret = 0;
1360
1361 mutex_lock(&clk_access_lock);
1362 if (ce == CLK_QSEE)
1363 qclk = &qseecom.qsee;
1364 else
1365 qclk = &qseecom.ce_drv;
1366
1367 if (qclk->clk_access_cnt > 2) {
1368 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1369 ret = -EINVAL;
1370 goto err_dec_ref_cnt;
1371 }
1372 if (qclk->clk_access_cnt == 2)
1373 qclk->clk_access_cnt--;
1374
1375err_dec_ref_cnt:
1376 mutex_unlock(&clk_access_lock);
1377 return ret;
1378}
1379
1380
1381static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1382{
1383 int32_t ret = 0;
1384 int32_t request_mode = INACTIVE;
1385
1386 mutex_lock(&qsee_bw_mutex);
1387 if (mode == 0) {
1388 if (qseecom.cumulative_mode > MEDIUM)
1389 request_mode = HIGH;
1390 else
1391 request_mode = qseecom.cumulative_mode;
1392 } else {
1393 request_mode = mode;
1394 }
1395
1396 ret = __qseecom_set_msm_bus_request(request_mode);
1397 if (ret) {
1398 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1399 ret, request_mode);
1400 goto err_scale_timer;
1401 }
1402
1403 if (qseecom.timer_running) {
1404 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1405 if (ret) {
1406 pr_err("Failed to decrease clk ref count.\n");
1407 goto err_scale_timer;
1408 }
1409 del_timer_sync(&(qseecom.bw_scale_down_timer));
1410 qseecom.timer_running = false;
1411 }
1412err_scale_timer:
1413 mutex_unlock(&qsee_bw_mutex);
1414 return ret;
1415}
1416
1417
1418static int qseecom_unregister_bus_bandwidth_needs(
1419 struct qseecom_dev_handle *data)
1420{
1421 int32_t ret = 0;
1422
1423 qseecom.cumulative_mode -= data->mode;
1424 data->mode = INACTIVE;
1425
1426 return ret;
1427}
1428
1429static int __qseecom_register_bus_bandwidth_needs(
1430 struct qseecom_dev_handle *data, uint32_t request_mode)
1431{
1432 int32_t ret = 0;
1433
1434 if (data->mode == INACTIVE) {
1435 qseecom.cumulative_mode += request_mode;
1436 data->mode = request_mode;
1437 } else {
1438 if (data->mode != request_mode) {
1439 qseecom.cumulative_mode -= data->mode;
1440 qseecom.cumulative_mode += request_mode;
1441 data->mode = request_mode;
1442 }
1443 }
1444 return ret;
1445}
1446
1447static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1448{
1449 int ret = 0;
1450
1451 ret = qsee_vote_for_clock(data, CLK_DFAB);
1452 if (ret) {
1453 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1454 goto perf_enable_exit;
1455 }
1456 ret = qsee_vote_for_clock(data, CLK_SFPB);
1457 if (ret) {
1458 qsee_disable_clock_vote(data, CLK_DFAB);
1459 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1460 goto perf_enable_exit;
1461 }
1462
1463perf_enable_exit:
1464 return ret;
1465}
1466
1467static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1468 void __user *argp)
1469{
1470 int32_t ret = 0;
1471 int32_t req_mode;
1472
1473 if (qseecom.no_clock_support)
1474 return 0;
1475
1476 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1477 if (ret) {
1478 pr_err("copy_from_user failed\n");
1479 return ret;
1480 }
1481 if (req_mode > HIGH) {
1482 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1483 return -EINVAL;
1484 }
1485
1486 /*
1487 * Register bus bandwidth needs if bus scaling feature is enabled;
1488 * otherwise, qseecom enable/disable clocks for the client directly.
1489 */
1490 if (qseecom.support_bus_scaling) {
1491 mutex_lock(&qsee_bw_mutex);
1492 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1493 mutex_unlock(&qsee_bw_mutex);
1494 } else {
1495 pr_debug("Bus scaling feature is NOT enabled\n");
1496 pr_debug("request bandwidth mode %d for the client\n",
1497 req_mode);
1498 if (req_mode != INACTIVE) {
1499 ret = qseecom_perf_enable(data);
1500 if (ret)
1501 pr_err("Failed to vote for clock with err %d\n",
1502 ret);
1503 } else {
1504 qsee_disable_clock_vote(data, CLK_DFAB);
1505 qsee_disable_clock_vote(data, CLK_SFPB);
1506 }
1507 }
1508 return ret;
1509}
1510
1511static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1512{
1513 if (qseecom.no_clock_support)
1514 return;
1515
1516 mutex_lock(&qsee_bw_mutex);
1517 qseecom.bw_scale_down_timer.expires = jiffies +
1518 msecs_to_jiffies(duration);
1519 mod_timer(&(qseecom.bw_scale_down_timer),
1520 qseecom.bw_scale_down_timer.expires);
1521 qseecom.timer_running = true;
1522 mutex_unlock(&qsee_bw_mutex);
1523}
1524
1525static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1526{
1527 if (!qseecom.support_bus_scaling)
1528 qsee_disable_clock_vote(data, CLK_SFPB);
1529 else
1530 __qseecom_add_bw_scale_down_timer(
1531 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1532}
1533
1534static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1535{
1536 int ret = 0;
1537
1538 if (qseecom.support_bus_scaling) {
1539 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1540 if (ret)
1541 pr_err("Failed to set bw MEDIUM.\n");
1542 } else {
1543 ret = qsee_vote_for_clock(data, CLK_SFPB);
1544 if (ret)
1545 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1546 }
1547 return ret;
1548}
1549
1550static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1551 void __user *argp)
1552{
1553 ion_phys_addr_t pa;
1554 int32_t ret;
1555 struct qseecom_set_sb_mem_param_req req;
1556 size_t len;
1557
1558 /* Copy the relevant information needed for loading the image */
1559 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1560 return -EFAULT;
1561
1562 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1563 (req.sb_len == 0)) {
1564 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1565 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1566 return -EFAULT;
1567 }
1568 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1569 req.sb_len))
1570 return -EFAULT;
1571
1572 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001573 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001574 req.ifd_data_fd);
1575 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1576 pr_err("Ion client could not retrieve the handle\n");
1577 return -ENOMEM;
1578 }
1579 /* Get the physical address of the ION BUF */
1580 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1581 if (ret) {
1582
1583 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1584 ret);
1585 return ret;
1586 }
1587
1588 if (len < req.sb_len) {
1589 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1590 req.sb_len, len);
1591 return -EINVAL;
1592 }
1593 /* Populate the structure for sending scm call to load image */
1594 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1595 data->client.ihandle);
1596 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1597 pr_err("ION memory mapping for client shared buf failed\n");
1598 return -ENOMEM;
1599 }
1600 data->client.sb_phys = (phys_addr_t)pa;
1601 data->client.sb_length = req.sb_len;
1602 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1603 return 0;
1604}
1605
1606static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1607{
1608 int ret;
1609
1610 ret = (qseecom.send_resp_flag != 0);
1611 return ret || data->abort;
1612}
1613
1614static int __qseecom_reentrancy_listener_has_sent_rsp(
1615 struct qseecom_dev_handle *data,
1616 struct qseecom_registered_listener_list *ptr_svc)
1617{
1618 int ret;
1619
1620 ret = (ptr_svc->send_resp_flag != 0);
1621 return ret || data->abort;
1622}
1623
1624static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1625 struct qseecom_command_scm_resp *resp,
1626 struct qseecom_client_listener_data_irsp *send_data_rsp,
1627 struct qseecom_registered_listener_list *ptr_svc,
1628 uint32_t lstnr) {
1629 int ret = 0;
1630
1631 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1632 qseecom.send_resp_flag = 0;
1633 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1634 send_data_rsp->listener_id = lstnr;
1635 if (ptr_svc)
1636 pr_warn("listener_id:%x, lstnr: %x\n",
1637 ptr_svc->svc.listener_id, lstnr);
1638 if (ptr_svc && ptr_svc->ihandle) {
1639 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1640 ptr_svc->sb_virt, ptr_svc->sb_length,
1641 ION_IOC_CLEAN_INV_CACHES);
1642 if (ret) {
1643 pr_err("cache operation failed %d\n", ret);
1644 return ret;
1645 }
1646 }
1647
1648 if (lstnr == RPMB_SERVICE) {
1649 ret = __qseecom_enable_clk(CLK_QSEE);
1650 if (ret)
1651 return ret;
1652 }
1653 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1654 sizeof(send_data_rsp), resp, sizeof(*resp));
1655 if (ret) {
1656 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1657 ret, data->client.app_id);
1658 if (lstnr == RPMB_SERVICE)
1659 __qseecom_disable_clk(CLK_QSEE);
1660 return ret;
1661 }
1662 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1663 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1664 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1665 resp->result, data->client.app_id, lstnr);
1666 ret = -EINVAL;
1667 }
1668 if (lstnr == RPMB_SERVICE)
1669 __qseecom_disable_clk(CLK_QSEE);
1670 return ret;
1671}
1672
1673static void __qseecom_clean_listener_sglistinfo(
1674 struct qseecom_registered_listener_list *ptr_svc)
1675{
1676 if (ptr_svc->sglist_cnt) {
1677 memset(ptr_svc->sglistinfo_ptr, 0,
1678 SGLISTINFO_TABLE_SIZE);
1679 ptr_svc->sglist_cnt = 0;
1680 }
1681}
1682
1683static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1684 struct qseecom_command_scm_resp *resp)
1685{
1686 int ret = 0;
1687 int rc = 0;
1688 uint32_t lstnr;
1689 unsigned long flags;
1690 struct qseecom_client_listener_data_irsp send_data_rsp;
1691 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1692 struct qseecom_registered_listener_list *ptr_svc = NULL;
1693 sigset_t new_sigset;
1694 sigset_t old_sigset;
1695 uint32_t status;
1696 void *cmd_buf = NULL;
1697 size_t cmd_len;
1698 struct sglist_info *table = NULL;
1699
1700 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1701 lstnr = resp->data;
1702 /*
1703 * Wake up blocking lsitener service with the lstnr id
1704 */
1705 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1706 flags);
1707 list_for_each_entry(ptr_svc,
1708 &qseecom.registered_listener_list_head, list) {
1709 if (ptr_svc->svc.listener_id == lstnr) {
1710 ptr_svc->listener_in_use = true;
1711 ptr_svc->rcv_req_flag = 1;
1712 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1713 break;
1714 }
1715 }
1716 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1717 flags);
1718
1719 if (ptr_svc == NULL) {
1720 pr_err("Listener Svc %d does not exist\n", lstnr);
1721 __qseecom_qseos_fail_return_resp_tz(data, resp,
1722 &send_data_rsp, ptr_svc, lstnr);
1723 return -EINVAL;
1724 }
1725
1726 if (!ptr_svc->ihandle) {
1727 pr_err("Client handle is not initialized\n");
1728 __qseecom_qseos_fail_return_resp_tz(data, resp,
1729 &send_data_rsp, ptr_svc, lstnr);
1730 return -EINVAL;
1731 }
1732
1733 if (ptr_svc->svc.listener_id != lstnr) {
1734 pr_warn("Service requested does not exist\n");
1735 __qseecom_qseos_fail_return_resp_tz(data, resp,
1736 &send_data_rsp, ptr_svc, lstnr);
1737 return -ERESTARTSYS;
1738 }
1739 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1740
1741 /* initialize the new signal mask with all signals*/
1742 sigfillset(&new_sigset);
1743 /* block all signals */
1744 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1745
1746 do {
1747 /*
1748 * When reentrancy is not supported, check global
1749 * send_resp_flag; otherwise, check this listener's
1750 * send_resp_flag.
1751 */
1752 if (!qseecom.qsee_reentrancy_support &&
1753 !wait_event_freezable(qseecom.send_resp_wq,
1754 __qseecom_listener_has_sent_rsp(data))) {
1755 break;
1756 }
1757
1758 if (qseecom.qsee_reentrancy_support &&
1759 !wait_event_freezable(qseecom.send_resp_wq,
1760 __qseecom_reentrancy_listener_has_sent_rsp(
1761 data, ptr_svc))) {
1762 break;
1763 }
1764 } while (1);
1765
1766 /* restore signal mask */
1767 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1768 if (data->abort) {
1769 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1770 data->client.app_id, lstnr, ret);
1771 rc = -ENODEV;
1772 status = QSEOS_RESULT_FAILURE;
1773 } else {
1774 status = QSEOS_RESULT_SUCCESS;
1775 }
1776
1777 qseecom.send_resp_flag = 0;
1778 ptr_svc->send_resp_flag = 0;
1779 table = ptr_svc->sglistinfo_ptr;
1780 if (qseecom.qsee_version < QSEE_VERSION_40) {
1781 send_data_rsp.listener_id = lstnr;
1782 send_data_rsp.status = status;
1783 send_data_rsp.sglistinfo_ptr =
1784 (uint32_t)virt_to_phys(table);
1785 send_data_rsp.sglistinfo_len =
1786 SGLISTINFO_TABLE_SIZE;
1787 dmac_flush_range((void *)table,
1788 (void *)table + SGLISTINFO_TABLE_SIZE);
1789 cmd_buf = (void *)&send_data_rsp;
1790 cmd_len = sizeof(send_data_rsp);
1791 } else {
1792 send_data_rsp_64bit.listener_id = lstnr;
1793 send_data_rsp_64bit.status = status;
1794 send_data_rsp_64bit.sglistinfo_ptr =
1795 virt_to_phys(table);
1796 send_data_rsp_64bit.sglistinfo_len =
1797 SGLISTINFO_TABLE_SIZE;
1798 dmac_flush_range((void *)table,
1799 (void *)table + SGLISTINFO_TABLE_SIZE);
1800 cmd_buf = (void *)&send_data_rsp_64bit;
1801 cmd_len = sizeof(send_data_rsp_64bit);
1802 }
1803 if (qseecom.whitelist_support == false)
1804 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1805 else
1806 *(uint32_t *)cmd_buf =
1807 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1808 if (ptr_svc) {
1809 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1810 ptr_svc->ihandle,
1811 ptr_svc->sb_virt, ptr_svc->sb_length,
1812 ION_IOC_CLEAN_INV_CACHES);
1813 if (ret) {
1814 pr_err("cache operation failed %d\n", ret);
1815 return ret;
1816 }
1817 }
1818
1819 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1820 ret = __qseecom_enable_clk(CLK_QSEE);
1821 if (ret)
1822 return ret;
1823 }
1824
1825 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1826 cmd_buf, cmd_len, resp, sizeof(*resp));
1827 ptr_svc->listener_in_use = false;
1828 __qseecom_clean_listener_sglistinfo(ptr_svc);
1829 if (ret) {
1830 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1831 ret, data->client.app_id);
1832 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1833 __qseecom_disable_clk(CLK_QSEE);
1834 return ret;
1835 }
1836 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1837 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1838 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1839 resp->result, data->client.app_id, lstnr);
1840 ret = -EINVAL;
1841 }
1842 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1843 __qseecom_disable_clk(CLK_QSEE);
1844
1845 }
1846 if (rc)
1847 return rc;
1848
1849 return ret;
1850}
1851
Zhen Kong2f60f492017-06-29 15:22:14 -07001852static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001853 struct qseecom_command_scm_resp *resp,
1854 struct qseecom_registered_app_list *ptr_app,
1855 struct qseecom_dev_handle *data)
1856{
1857 struct qseecom_registered_listener_list *list_ptr;
1858 int ret = 0;
1859 struct qseecom_continue_blocked_request_ireq ireq;
1860 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001862 unsigned long flags;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001863
1864 if (!resp || !data) {
1865 pr_err("invalid resp or data pointer\n");
1866 ret = -EINVAL;
1867 goto exit;
1868 }
1869
1870 /* find app_id & img_name from list */
1871 if (!ptr_app) {
1872 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1873 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1874 list) {
1875 if ((ptr_app->app_id == data->client.app_id) &&
1876 (!strcmp(ptr_app->app_name,
1877 data->client.app_name))) {
1878 found_app = true;
1879 break;
1880 }
1881 }
1882 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1883 flags);
1884 if (!found_app) {
1885 pr_err("app_id %d (%s) is not found\n",
1886 data->client.app_id,
1887 (char *)data->client.app_name);
1888 ret = -ENOENT;
1889 goto exit;
1890 }
1891 }
1892
1893 list_ptr = __qseecom_find_svc(resp->data);
1894 if (!list_ptr) {
1895 pr_err("Invalid listener ID\n");
1896 ret = -ENODATA;
1897 goto exit;
1898 }
1899 pr_debug("lsntr %d in_use = %d\n",
1900 resp->data, list_ptr->listener_in_use);
1901 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001902
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001903 /* sleep until listener is available */
Zhen Kong2f60f492017-06-29 15:22:14 -07001904 qseecom.app_block_ref_cnt++;
1905 ptr_app->app_blocked = true;
1906 mutex_unlock(&app_access_lock);
1907 if (wait_event_freezable(
1908 list_ptr->listener_block_app_wq,
1909 !list_ptr->listener_in_use)) {
1910 pr_err("Interrupted: listener_id %d, app_id %d\n",
1911 resp->data, ptr_app->app_id);
1912 ret = -ERESTARTSYS;
1913 goto exit;
1914 }
1915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001919 ptr_app->blocked_on_listener_id = 0;
1920 /* notify the blocked app that listener is available */
1921 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1922 resp->data, data->client.app_id,
1923 data->client.app_name);
1924 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001925 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 &ireq, sizeof(ireq),
1928 &continue_resp, sizeof(continue_resp));
1929 if (ret) {
1930 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1931 data->client.app_id,
1932 data->client.app_name, ret);
1933 goto exit;
1934 }
1935 /*
1936 * After TZ app is unblocked, then continue to next case
1937 * for incomplete request processing
1938 */
1939 resp->result = QSEOS_RESULT_INCOMPLETE;
1940exit:
1941 return ret;
1942}
1943
Zhen Kong2f60f492017-06-29 15:22:14 -07001944static int __qseecom_process_blocked_on_listener_smcinvoke(
1945 struct qseecom_command_scm_resp *resp)
1946{
1947 struct qseecom_registered_listener_list *list_ptr;
1948 int ret = 0;
1949 struct qseecom_continue_blocked_request_ireq ireq;
1950 struct qseecom_command_scm_resp continue_resp;
1951 unsigned int session_id;
1952
1953 if (!resp) {
1954 pr_err("invalid resp pointer\n");
1955 ret = -EINVAL;
1956 goto exit;
1957 }
1958 session_id = resp->resp_type;
1959 list_ptr = __qseecom_find_svc(resp->data);
1960 if (!list_ptr) {
1961 pr_err("Invalid listener ID\n");
1962 ret = -ENODATA;
1963 goto exit;
1964 }
1965 pr_debug("lsntr %d in_use = %d\n",
1966 resp->data, list_ptr->listener_in_use);
1967 /* sleep until listener is available */
1968 qseecom.app_block_ref_cnt++;
1969 mutex_unlock(&app_access_lock);
1970 if (wait_event_freezable(
1971 list_ptr->listener_block_app_wq,
1972 !list_ptr->listener_in_use)) {
1973 pr_err("Interrupted: listener_id %d, session_id %d\n",
1974 resp->data, session_id);
1975 ret = -ERESTARTSYS;
1976 goto exit;
1977 }
1978 mutex_lock(&app_access_lock);
1979 qseecom.app_block_ref_cnt--;
1980
1981 /* notify TZ that listener is available */
1982 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1983 resp->data, session_id);
1984 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1985 ireq.app_or_session_id = session_id;
1986 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1987 &ireq, sizeof(ireq),
1988 &continue_resp, sizeof(continue_resp));
1989 if (ret) {
1990 pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
1991 session_id, ret);
1992 goto exit;
1993 }
1994 resp->result = QSEOS_RESULT_INCOMPLETE;
1995exit:
1996 return ret;
1997}
1998
1999static int __qseecom_process_reentrancy_blocked_on_listener(
2000 struct qseecom_command_scm_resp *resp,
2001 struct qseecom_registered_app_list *ptr_app,
2002 struct qseecom_dev_handle *data)
2003{
2004 if (!qseecom.smcinvoke_support)
2005 return __qseecom_process_blocked_on_listener_legacy(
2006 resp, ptr_app, data);
2007 else
2008 return __qseecom_process_blocked_on_listener_smcinvoke(
2009 resp);
2010}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002011static int __qseecom_reentrancy_process_incomplete_cmd(
2012 struct qseecom_dev_handle *data,
2013 struct qseecom_command_scm_resp *resp)
2014{
2015 int ret = 0;
2016 int rc = 0;
2017 uint32_t lstnr;
2018 unsigned long flags;
2019 struct qseecom_client_listener_data_irsp send_data_rsp;
2020 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2021 struct qseecom_registered_listener_list *ptr_svc = NULL;
2022 sigset_t new_sigset;
2023 sigset_t old_sigset;
2024 uint32_t status;
2025 void *cmd_buf = NULL;
2026 size_t cmd_len;
2027 struct sglist_info *table = NULL;
2028
2029 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2030 lstnr = resp->data;
2031 /*
2032 * Wake up blocking lsitener service with the lstnr id
2033 */
2034 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2035 flags);
2036 list_for_each_entry(ptr_svc,
2037 &qseecom.registered_listener_list_head, list) {
2038 if (ptr_svc->svc.listener_id == lstnr) {
2039 ptr_svc->listener_in_use = true;
2040 ptr_svc->rcv_req_flag = 1;
2041 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2042 break;
2043 }
2044 }
2045 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2046 flags);
2047
2048 if (ptr_svc == NULL) {
2049 pr_err("Listener Svc %d does not exist\n", lstnr);
2050 return -EINVAL;
2051 }
2052
2053 if (!ptr_svc->ihandle) {
2054 pr_err("Client handle is not initialized\n");
2055 return -EINVAL;
2056 }
2057
2058 if (ptr_svc->svc.listener_id != lstnr) {
2059 pr_warn("Service requested does not exist\n");
2060 return -ERESTARTSYS;
2061 }
2062 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2063
2064 /* initialize the new signal mask with all signals*/
2065 sigfillset(&new_sigset);
2066
2067 /* block all signals */
2068 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2069
2070 /* unlock mutex btw waking listener and sleep-wait */
2071 mutex_unlock(&app_access_lock);
2072 do {
2073 if (!wait_event_freezable(qseecom.send_resp_wq,
2074 __qseecom_reentrancy_listener_has_sent_rsp(
2075 data, ptr_svc))) {
2076 break;
2077 }
2078 } while (1);
2079 /* lock mutex again after resp sent */
2080 mutex_lock(&app_access_lock);
2081 ptr_svc->send_resp_flag = 0;
2082 qseecom.send_resp_flag = 0;
2083
2084 /* restore signal mask */
2085 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2086 if (data->abort) {
2087 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2088 data->client.app_id, lstnr, ret);
2089 rc = -ENODEV;
2090 status = QSEOS_RESULT_FAILURE;
2091 } else {
2092 status = QSEOS_RESULT_SUCCESS;
2093 }
2094 table = ptr_svc->sglistinfo_ptr;
2095 if (qseecom.qsee_version < QSEE_VERSION_40) {
2096 send_data_rsp.listener_id = lstnr;
2097 send_data_rsp.status = status;
2098 send_data_rsp.sglistinfo_ptr =
2099 (uint32_t)virt_to_phys(table);
2100 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2101 dmac_flush_range((void *)table,
2102 (void *)table + SGLISTINFO_TABLE_SIZE);
2103 cmd_buf = (void *)&send_data_rsp;
2104 cmd_len = sizeof(send_data_rsp);
2105 } else {
2106 send_data_rsp_64bit.listener_id = lstnr;
2107 send_data_rsp_64bit.status = status;
2108 send_data_rsp_64bit.sglistinfo_ptr =
2109 virt_to_phys(table);
2110 send_data_rsp_64bit.sglistinfo_len =
2111 SGLISTINFO_TABLE_SIZE;
2112 dmac_flush_range((void *)table,
2113 (void *)table + SGLISTINFO_TABLE_SIZE);
2114 cmd_buf = (void *)&send_data_rsp_64bit;
2115 cmd_len = sizeof(send_data_rsp_64bit);
2116 }
2117 if (qseecom.whitelist_support == false)
2118 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2119 else
2120 *(uint32_t *)cmd_buf =
2121 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2122 if (ptr_svc) {
2123 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2124 ptr_svc->ihandle,
2125 ptr_svc->sb_virt, ptr_svc->sb_length,
2126 ION_IOC_CLEAN_INV_CACHES);
2127 if (ret) {
2128 pr_err("cache operation failed %d\n", ret);
2129 return ret;
2130 }
2131 }
2132 if (lstnr == RPMB_SERVICE) {
2133 ret = __qseecom_enable_clk(CLK_QSEE);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2139 cmd_buf, cmd_len, resp, sizeof(*resp));
2140 ptr_svc->listener_in_use = false;
2141 __qseecom_clean_listener_sglistinfo(ptr_svc);
2142 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2143
2144 if (ret) {
2145 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2146 ret, data->client.app_id);
2147 goto exit;
2148 }
2149
2150 switch (resp->result) {
2151 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2152 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2153 lstnr, data->client.app_id, resp->data);
2154 if (lstnr == resp->data) {
2155 pr_err("lstnr %d should not be blocked!\n",
2156 lstnr);
2157 ret = -EINVAL;
2158 goto exit;
2159 }
2160 ret = __qseecom_process_reentrancy_blocked_on_listener(
2161 resp, NULL, data);
2162 if (ret) {
2163 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2164 data->client.app_id,
2165 data->client.app_name, resp->data);
2166 goto exit;
2167 }
2168 case QSEOS_RESULT_SUCCESS:
2169 case QSEOS_RESULT_INCOMPLETE:
2170 break;
2171 default:
2172 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2173 resp->result, data->client.app_id, lstnr);
2174 ret = -EINVAL;
2175 goto exit;
2176 }
2177exit:
2178 if (lstnr == RPMB_SERVICE)
2179 __qseecom_disable_clk(CLK_QSEE);
2180
2181 }
2182 if (rc)
2183 return rc;
2184
2185 return ret;
2186}
2187
2188/*
2189 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2190 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2191 * So, needs to first check if no app blocked before sending OS level scm call,
2192 * then wait until all apps are unblocked.
2193 */
2194static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2195{
2196 sigset_t new_sigset, old_sigset;
2197
2198 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2199 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2200 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2201 /* thread sleep until this app unblocked */
2202 while (qseecom.app_block_ref_cnt > 0) {
2203 sigfillset(&new_sigset);
2204 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2205 mutex_unlock(&app_access_lock);
2206 do {
2207 if (!wait_event_freezable(qseecom.app_block_wq,
2208 (qseecom.app_block_ref_cnt == 0)))
2209 break;
2210 } while (1);
2211 mutex_lock(&app_access_lock);
2212 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2213 }
2214 }
2215}
2216
2217/*
2218 * scm_call of send data will fail if this TA is blocked or there are more
2219 * than one TA requesting listener services; So, first check to see if need
2220 * to wait.
2221 */
2222static void __qseecom_reentrancy_check_if_this_app_blocked(
2223 struct qseecom_registered_app_list *ptr_app)
2224{
2225 sigset_t new_sigset, old_sigset;
2226
2227 if (qseecom.qsee_reentrancy_support) {
2228 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2229 /* thread sleep until this app unblocked */
2230 sigfillset(&new_sigset);
2231 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2232 mutex_unlock(&app_access_lock);
2233 do {
2234 if (!wait_event_freezable(qseecom.app_block_wq,
2235 (!ptr_app->app_blocked &&
2236 qseecom.app_block_ref_cnt <= 1)))
2237 break;
2238 } while (1);
2239 mutex_lock(&app_access_lock);
2240 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2241 }
2242 }
2243}
2244
2245static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2246 uint32_t *app_id)
2247{
2248 int32_t ret;
2249 struct qseecom_command_scm_resp resp;
2250 bool found_app = false;
2251 struct qseecom_registered_app_list *entry = NULL;
2252 unsigned long flags = 0;
2253
2254 if (!app_id) {
2255 pr_err("Null pointer to app_id\n");
2256 return -EINVAL;
2257 }
2258 *app_id = 0;
2259
2260 /* check if app exists and has been registered locally */
2261 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2262 list_for_each_entry(entry,
2263 &qseecom.registered_app_list_head, list) {
2264 if (!strcmp(entry->app_name, req.app_name)) {
2265 found_app = true;
2266 break;
2267 }
2268 }
2269 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2270 if (found_app) {
2271 pr_debug("Found app with id %d\n", entry->app_id);
2272 *app_id = entry->app_id;
2273 return 0;
2274 }
2275
2276 memset((void *)&resp, 0, sizeof(resp));
2277
2278 /* SCM_CALL to check if app_id for the mentioned app exists */
2279 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2280 sizeof(struct qseecom_check_app_ireq),
2281 &resp, sizeof(resp));
2282 if (ret) {
2283 pr_err("scm_call to check if app is already loaded failed\n");
2284 return -EINVAL;
2285 }
2286
2287 if (resp.result == QSEOS_RESULT_FAILURE)
2288 return 0;
2289
2290 switch (resp.resp_type) {
2291 /*qsee returned listener type response */
2292 case QSEOS_LISTENER_ID:
2293 pr_err("resp type is of listener type instead of app");
2294 return -EINVAL;
2295 case QSEOS_APP_ID:
2296 *app_id = resp.data;
2297 return 0;
2298 default:
2299 pr_err("invalid resp type (%d) from qsee",
2300 resp.resp_type);
2301 return -ENODEV;
2302 }
2303}
2304
2305static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2306{
2307 struct qseecom_registered_app_list *entry = NULL;
2308 unsigned long flags = 0;
2309 u32 app_id = 0;
2310 struct ion_handle *ihandle; /* Ion handle */
2311 struct qseecom_load_img_req load_img_req;
2312 int32_t ret = 0;
2313 ion_phys_addr_t pa = 0;
2314 size_t len;
2315 struct qseecom_command_scm_resp resp;
2316 struct qseecom_check_app_ireq req;
2317 struct qseecom_load_app_ireq load_req;
2318 struct qseecom_load_app_64bit_ireq load_req_64bit;
2319 void *cmd_buf = NULL;
2320 size_t cmd_len;
2321 bool first_time = false;
2322
2323 /* Copy the relevant information needed for loading the image */
2324 if (copy_from_user(&load_img_req,
2325 (void __user *)argp,
2326 sizeof(struct qseecom_load_img_req))) {
2327 pr_err("copy_from_user failed\n");
2328 return -EFAULT;
2329 }
2330
2331 /* Check and load cmnlib */
2332 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2333 if (!qseecom.commonlib_loaded &&
2334 load_img_req.app_arch == ELFCLASS32) {
2335 ret = qseecom_load_commonlib_image(data, "cmnlib");
2336 if (ret) {
2337 pr_err("failed to load cmnlib\n");
2338 return -EIO;
2339 }
2340 qseecom.commonlib_loaded = true;
2341 pr_debug("cmnlib is loaded\n");
2342 }
2343
2344 if (!qseecom.commonlib64_loaded &&
2345 load_img_req.app_arch == ELFCLASS64) {
2346 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2347 if (ret) {
2348 pr_err("failed to load cmnlib64\n");
2349 return -EIO;
2350 }
2351 qseecom.commonlib64_loaded = true;
2352 pr_debug("cmnlib64 is loaded\n");
2353 }
2354 }
2355
2356 if (qseecom.support_bus_scaling) {
2357 mutex_lock(&qsee_bw_mutex);
2358 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2359 mutex_unlock(&qsee_bw_mutex);
2360 if (ret)
2361 return ret;
2362 }
2363
2364 /* Vote for the SFPB clock */
2365 ret = __qseecom_enable_clk_scale_up(data);
2366 if (ret)
2367 goto enable_clk_err;
2368
2369 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2370 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2371 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2372
2373 ret = __qseecom_check_app_exists(req, &app_id);
2374 if (ret < 0)
2375 goto loadapp_err;
2376
2377 if (app_id) {
2378 pr_debug("App id %d (%s) already exists\n", app_id,
2379 (char *)(req.app_name));
2380 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2381 list_for_each_entry(entry,
2382 &qseecom.registered_app_list_head, list){
2383 if (entry->app_id == app_id) {
2384 entry->ref_cnt++;
2385 break;
2386 }
2387 }
2388 spin_unlock_irqrestore(
2389 &qseecom.registered_app_list_lock, flags);
2390 ret = 0;
2391 } else {
2392 first_time = true;
2393 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2394 (char *)(load_img_req.img_name));
2395 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002396 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002397 load_img_req.ifd_data_fd);
2398 if (IS_ERR_OR_NULL(ihandle)) {
2399 pr_err("Ion client could not retrieve the handle\n");
2400 ret = -ENOMEM;
2401 goto loadapp_err;
2402 }
2403
2404 /* Get the physical address of the ION BUF */
2405 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2406 if (ret) {
2407 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2408 ret);
2409 goto loadapp_err;
2410 }
2411 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2412 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2413 len, load_img_req.mdt_len,
2414 load_img_req.img_len);
2415 ret = -EINVAL;
2416 goto loadapp_err;
2417 }
2418 /* Populate the structure for sending scm call to load image */
2419 if (qseecom.qsee_version < QSEE_VERSION_40) {
2420 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2421 load_req.mdt_len = load_img_req.mdt_len;
2422 load_req.img_len = load_img_req.img_len;
2423 strlcpy(load_req.app_name, load_img_req.img_name,
2424 MAX_APP_NAME_SIZE);
2425 load_req.phy_addr = (uint32_t)pa;
2426 cmd_buf = (void *)&load_req;
2427 cmd_len = sizeof(struct qseecom_load_app_ireq);
2428 } else {
2429 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2430 load_req_64bit.mdt_len = load_img_req.mdt_len;
2431 load_req_64bit.img_len = load_img_req.img_len;
2432 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2433 MAX_APP_NAME_SIZE);
2434 load_req_64bit.phy_addr = (uint64_t)pa;
2435 cmd_buf = (void *)&load_req_64bit;
2436 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2437 }
2438
2439 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2440 ION_IOC_CLEAN_INV_CACHES);
2441 if (ret) {
2442 pr_err("cache operation failed %d\n", ret);
2443 goto loadapp_err;
2444 }
2445
2446 /* SCM_CALL to load the app and get the app_id back */
2447 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2448 cmd_len, &resp, sizeof(resp));
2449 if (ret) {
2450 pr_err("scm_call to load app failed\n");
2451 if (!IS_ERR_OR_NULL(ihandle))
2452 ion_free(qseecom.ion_clnt, ihandle);
2453 ret = -EINVAL;
2454 goto loadapp_err;
2455 }
2456
2457 if (resp.result == QSEOS_RESULT_FAILURE) {
2458 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2459 if (!IS_ERR_OR_NULL(ihandle))
2460 ion_free(qseecom.ion_clnt, ihandle);
2461 ret = -EFAULT;
2462 goto loadapp_err;
2463 }
2464
2465 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2466 ret = __qseecom_process_incomplete_cmd(data, &resp);
2467 if (ret) {
2468 pr_err("process_incomplete_cmd failed err: %d\n",
2469 ret);
2470 if (!IS_ERR_OR_NULL(ihandle))
2471 ion_free(qseecom.ion_clnt, ihandle);
2472 ret = -EFAULT;
2473 goto loadapp_err;
2474 }
2475 }
2476
2477 if (resp.result != QSEOS_RESULT_SUCCESS) {
2478 pr_err("scm_call failed resp.result unknown, %d\n",
2479 resp.result);
2480 if (!IS_ERR_OR_NULL(ihandle))
2481 ion_free(qseecom.ion_clnt, ihandle);
2482 ret = -EFAULT;
2483 goto loadapp_err;
2484 }
2485
2486 app_id = resp.data;
2487
2488 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2489 if (!entry) {
2490 ret = -ENOMEM;
2491 goto loadapp_err;
2492 }
2493 entry->app_id = app_id;
2494 entry->ref_cnt = 1;
2495 entry->app_arch = load_img_req.app_arch;
2496 /*
2497 * keymaster app may be first loaded as "keymaste" by qseecomd,
2498 * and then used as "keymaster" on some targets. To avoid app
2499 * name checking error, register "keymaster" into app_list and
2500 * thread private data.
2501 */
2502 if (!strcmp(load_img_req.img_name, "keymaste"))
2503 strlcpy(entry->app_name, "keymaster",
2504 MAX_APP_NAME_SIZE);
2505 else
2506 strlcpy(entry->app_name, load_img_req.img_name,
2507 MAX_APP_NAME_SIZE);
2508 entry->app_blocked = false;
2509 entry->blocked_on_listener_id = 0;
2510
2511 /* Deallocate the handle */
2512 if (!IS_ERR_OR_NULL(ihandle))
2513 ion_free(qseecom.ion_clnt, ihandle);
2514
2515 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2516 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2517 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2518 flags);
2519
2520 pr_warn("App with id %u (%s) now loaded\n", app_id,
2521 (char *)(load_img_req.img_name));
2522 }
2523 data->client.app_id = app_id;
2524 data->client.app_arch = load_img_req.app_arch;
2525 if (!strcmp(load_img_req.img_name, "keymaste"))
2526 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2527 else
2528 strlcpy(data->client.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_img_req.app_id = app_id;
2531 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2532 pr_err("copy_to_user failed\n");
2533 ret = -EFAULT;
2534 if (first_time == true) {
2535 spin_lock_irqsave(
2536 &qseecom.registered_app_list_lock, flags);
2537 list_del(&entry->list);
2538 spin_unlock_irqrestore(
2539 &qseecom.registered_app_list_lock, flags);
2540 kzfree(entry);
2541 }
2542 }
2543
2544loadapp_err:
2545 __qseecom_disable_clk_scale_down(data);
2546enable_clk_err:
2547 if (qseecom.support_bus_scaling) {
2548 mutex_lock(&qsee_bw_mutex);
2549 qseecom_unregister_bus_bandwidth_needs(data);
2550 mutex_unlock(&qsee_bw_mutex);
2551 }
2552 return ret;
2553}
2554
2555static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2556{
2557 int ret = 1; /* Set unload app */
2558
2559 wake_up_all(&qseecom.send_resp_wq);
2560 if (qseecom.qsee_reentrancy_support)
2561 mutex_unlock(&app_access_lock);
2562 while (atomic_read(&data->ioctl_count) > 1) {
2563 if (wait_event_freezable(data->abort_wq,
2564 atomic_read(&data->ioctl_count) <= 1)) {
2565 pr_err("Interrupted from abort\n");
2566 ret = -ERESTARTSYS;
2567 break;
2568 }
2569 }
2570 if (qseecom.qsee_reentrancy_support)
2571 mutex_lock(&app_access_lock);
2572 return ret;
2573}
2574
2575static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2576{
2577 int ret = 0;
2578
2579 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2580 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2581 ion_free(qseecom.ion_clnt, data->client.ihandle);
2582 data->client.ihandle = NULL;
2583 }
2584 return ret;
2585}
2586
2587static int qseecom_unload_app(struct qseecom_dev_handle *data,
2588 bool app_crash)
2589{
2590 unsigned long flags;
2591 unsigned long flags1;
2592 int ret = 0;
2593 struct qseecom_command_scm_resp resp;
2594 struct qseecom_registered_app_list *ptr_app = NULL;
2595 bool unload = false;
2596 bool found_app = false;
2597 bool found_dead_app = false;
2598
2599 if (!data) {
2600 pr_err("Invalid/uninitialized device handle\n");
2601 return -EINVAL;
2602 }
2603
2604 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2605 pr_debug("Do not unload keymaster app from tz\n");
2606 goto unload_exit;
2607 }
2608
2609 __qseecom_cleanup_app(data);
2610 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2611
2612 if (data->client.app_id > 0) {
2613 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2614 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2615 list) {
2616 if (ptr_app->app_id == data->client.app_id) {
2617 if (!strcmp((void *)ptr_app->app_name,
2618 (void *)data->client.app_name)) {
2619 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002620 if (ptr_app->app_blocked)
2621 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002622 if (app_crash || ptr_app->ref_cnt == 1)
2623 unload = true;
2624 break;
2625 }
2626 found_dead_app = true;
2627 break;
2628 }
2629 }
2630 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2631 flags);
2632 if (found_app == false && found_dead_app == false) {
2633 pr_err("Cannot find app with id = %d (%s)\n",
2634 data->client.app_id,
2635 (char *)data->client.app_name);
2636 ret = -EINVAL;
2637 goto unload_exit;
2638 }
2639 }
2640
2641 if (found_dead_app)
2642 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2643 (char *)data->client.app_name);
2644
2645 if (unload) {
2646 struct qseecom_unload_app_ireq req;
2647 /* Populate the structure for sending scm call to load image */
2648 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2649 req.app_id = data->client.app_id;
2650
2651 /* SCM_CALL to unload the app */
2652 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2653 sizeof(struct qseecom_unload_app_ireq),
2654 &resp, sizeof(resp));
2655 if (ret) {
2656 pr_err("scm_call to unload app (id = %d) failed\n",
2657 req.app_id);
2658 ret = -EFAULT;
2659 goto unload_exit;
2660 } else {
2661 pr_warn("App id %d now unloaded\n", req.app_id);
2662 }
2663 if (resp.result == QSEOS_RESULT_FAILURE) {
2664 pr_err("app (%d) unload_failed!!\n",
2665 data->client.app_id);
2666 ret = -EFAULT;
2667 goto unload_exit;
2668 }
2669 if (resp.result == QSEOS_RESULT_SUCCESS)
2670 pr_debug("App (%d) is unloaded!!\n",
2671 data->client.app_id);
2672 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2673 ret = __qseecom_process_incomplete_cmd(data, &resp);
2674 if (ret) {
2675 pr_err("process_incomplete_cmd fail err: %d\n",
2676 ret);
2677 goto unload_exit;
2678 }
2679 }
2680 }
2681
2682 if (found_app) {
2683 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2684 if (app_crash) {
2685 ptr_app->ref_cnt = 0;
2686 pr_debug("app_crash: ref_count = 0\n");
2687 } else {
2688 if (ptr_app->ref_cnt == 1) {
2689 ptr_app->ref_cnt = 0;
2690 pr_debug("ref_count set to 0\n");
2691 } else {
2692 ptr_app->ref_cnt--;
2693 pr_debug("Can't unload app(%d) inuse\n",
2694 ptr_app->app_id);
2695 }
2696 }
2697 if (unload) {
2698 list_del(&ptr_app->list);
2699 kzfree(ptr_app);
2700 }
2701 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2702 flags1);
2703 }
2704unload_exit:
2705 qseecom_unmap_ion_allocated_memory(data);
2706 data->released = true;
2707 return ret;
2708}
2709
2710static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2711 unsigned long virt)
2712{
2713 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2714}
2715
2716static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2717 unsigned long virt)
2718{
2719 return (uintptr_t)data->client.sb_virt +
2720 (virt - data->client.user_virt_sb_base);
2721}
2722
2723int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2724 struct qseecom_send_svc_cmd_req *req_ptr,
2725 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2726{
2727 int ret = 0;
2728 void *req_buf = NULL;
2729
2730 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2731 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2732 req_ptr, send_svc_ireq_ptr);
2733 return -EINVAL;
2734 }
2735
2736 /* Clients need to ensure req_buf is at base offset of shared buffer */
2737 if ((uintptr_t)req_ptr->cmd_req_buf !=
2738 data_ptr->client.user_virt_sb_base) {
2739 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2740 return -EINVAL;
2741 }
2742
2743 if (data_ptr->client.sb_length <
2744 sizeof(struct qseecom_rpmb_provision_key)) {
2745 pr_err("shared buffer is too small to hold key type\n");
2746 return -EINVAL;
2747 }
2748 req_buf = data_ptr->client.sb_virt;
2749
2750 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2751 send_svc_ireq_ptr->key_type =
2752 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2753 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2754 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2755 data_ptr, (uintptr_t)req_ptr->resp_buf));
2756 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2757
2758 return ret;
2759}
2760
2761int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2762 struct qseecom_send_svc_cmd_req *req_ptr,
2763 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2764{
2765 int ret = 0;
2766 uint32_t reqd_len_sb_in = 0;
2767
2768 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2769 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2770 req_ptr, send_svc_ireq_ptr);
2771 return -EINVAL;
2772 }
2773
2774 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2775 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2776 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2777 pr_err("Required: %u, Available: %zu\n",
2778 reqd_len_sb_in, data_ptr->client.sb_length);
2779 return -ENOMEM;
2780 }
2781
2782 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2783 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2784 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2785 data_ptr, (uintptr_t)req_ptr->resp_buf));
2786 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2787
2788 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2789 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2790
2791
2792 return ret;
2793}
2794
2795static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2796 struct qseecom_send_svc_cmd_req *req)
2797{
2798 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2799 pr_err("req or cmd buffer or response buffer is null\n");
2800 return -EINVAL;
2801 }
2802
2803 if (!data || !data->client.ihandle) {
2804 pr_err("Client or client handle is not initialized\n");
2805 return -EINVAL;
2806 }
2807
2808 if (data->client.sb_virt == NULL) {
2809 pr_err("sb_virt null\n");
2810 return -EINVAL;
2811 }
2812
2813 if (data->client.user_virt_sb_base == 0) {
2814 pr_err("user_virt_sb_base is null\n");
2815 return -EINVAL;
2816 }
2817
2818 if (data->client.sb_length == 0) {
2819 pr_err("sb_length is 0\n");
2820 return -EINVAL;
2821 }
2822
2823 if (((uintptr_t)req->cmd_req_buf <
2824 data->client.user_virt_sb_base) ||
2825 ((uintptr_t)req->cmd_req_buf >=
2826 (data->client.user_virt_sb_base + data->client.sb_length))) {
2827 pr_err("cmd buffer address not within shared bufffer\n");
2828 return -EINVAL;
2829 }
2830 if (((uintptr_t)req->resp_buf <
2831 data->client.user_virt_sb_base) ||
2832 ((uintptr_t)req->resp_buf >=
2833 (data->client.user_virt_sb_base + data->client.sb_length))) {
2834 pr_err("response buffer address not within shared bufffer\n");
2835 return -EINVAL;
2836 }
2837 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2838 (req->cmd_req_len > data->client.sb_length) ||
2839 (req->resp_len > data->client.sb_length)) {
2840 pr_err("cmd buf length or response buf length not valid\n");
2841 return -EINVAL;
2842 }
2843 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2844 pr_err("Integer overflow detected in req_len & rsp_len\n");
2845 return -EINVAL;
2846 }
2847
2848 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2849 pr_debug("Not enough memory to fit cmd_buf.\n");
2850 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2851 (req->cmd_req_len + req->resp_len),
2852 data->client.sb_length);
2853 return -ENOMEM;
2854 }
2855 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2856 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2857 return -EINVAL;
2858 }
2859 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2860 pr_err("Integer overflow in resp_len & resp_buf\n");
2861 return -EINVAL;
2862 }
2863 if (data->client.user_virt_sb_base >
2864 (ULONG_MAX - data->client.sb_length)) {
2865 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2866 return -EINVAL;
2867 }
2868 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2869 ((uintptr_t)data->client.user_virt_sb_base +
2870 data->client.sb_length)) ||
2871 (((uintptr_t)req->resp_buf + req->resp_len) >
2872 ((uintptr_t)data->client.user_virt_sb_base +
2873 data->client.sb_length))) {
2874 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2875 return -EINVAL;
2876 }
2877 return 0;
2878}
2879
2880static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2881 void __user *argp)
2882{
2883 int ret = 0;
2884 struct qseecom_client_send_service_ireq send_svc_ireq;
2885 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2886 struct qseecom_command_scm_resp resp;
2887 struct qseecom_send_svc_cmd_req req;
2888 void *send_req_ptr;
2889 size_t req_buf_size;
2890
2891 /*struct qseecom_command_scm_resp resp;*/
2892
2893 if (copy_from_user(&req,
2894 (void __user *)argp,
2895 sizeof(req))) {
2896 pr_err("copy_from_user failed\n");
2897 return -EFAULT;
2898 }
2899
2900 if (__validate_send_service_cmd_inputs(data, &req))
2901 return -EINVAL;
2902
2903 data->type = QSEECOM_SECURE_SERVICE;
2904
2905 switch (req.cmd_id) {
2906 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2907 case QSEOS_RPMB_ERASE_COMMAND:
2908 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2909 send_req_ptr = &send_svc_ireq;
2910 req_buf_size = sizeof(send_svc_ireq);
2911 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2912 send_req_ptr))
2913 return -EINVAL;
2914 break;
2915 case QSEOS_FSM_LTEOTA_REQ_CMD:
2916 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2917 case QSEOS_FSM_IKE_REQ_CMD:
2918 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2919 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2920 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2921 case QSEOS_FSM_ENCFS_REQ_CMD:
2922 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2923 send_req_ptr = &send_fsm_key_svc_ireq;
2924 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2925 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2926 send_req_ptr))
2927 return -EINVAL;
2928 break;
2929 default:
2930 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2931 return -EINVAL;
2932 }
2933
2934 if (qseecom.support_bus_scaling) {
2935 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2936 if (ret) {
2937 pr_err("Fail to set bw HIGH\n");
2938 return ret;
2939 }
2940 } else {
2941 ret = qseecom_perf_enable(data);
2942 if (ret) {
2943 pr_err("Failed to vote for clocks with err %d\n", ret);
2944 goto exit;
2945 }
2946 }
2947
2948 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2949 data->client.sb_virt, data->client.sb_length,
2950 ION_IOC_CLEAN_INV_CACHES);
2951 if (ret) {
2952 pr_err("cache operation failed %d\n", ret);
2953 goto exit;
2954 }
2955 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2956 (const void *)send_req_ptr,
2957 req_buf_size, &resp, sizeof(resp));
2958 if (ret) {
2959 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2960 if (!qseecom.support_bus_scaling) {
2961 qsee_disable_clock_vote(data, CLK_DFAB);
2962 qsee_disable_clock_vote(data, CLK_SFPB);
2963 } else {
2964 __qseecom_add_bw_scale_down_timer(
2965 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2966 }
2967 goto exit;
2968 }
2969 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2970 data->client.sb_virt, data->client.sb_length,
2971 ION_IOC_INV_CACHES);
2972 if (ret) {
2973 pr_err("cache operation failed %d\n", ret);
2974 goto exit;
2975 }
2976 switch (resp.result) {
2977 case QSEOS_RESULT_SUCCESS:
2978 break;
2979 case QSEOS_RESULT_INCOMPLETE:
2980 pr_debug("qseos_result_incomplete\n");
2981 ret = __qseecom_process_incomplete_cmd(data, &resp);
2982 if (ret) {
2983 pr_err("process_incomplete_cmd fail with result: %d\n",
2984 resp.result);
2985 }
2986 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2987 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302988 if (put_user(resp.result,
2989 (uint32_t __user *)req.resp_buf)) {
2990 ret = -EINVAL;
2991 goto exit;
2992 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002993 ret = 0;
2994 }
2995 break;
2996 case QSEOS_RESULT_FAILURE:
2997 pr_err("scm call failed with resp.result: %d\n", resp.result);
2998 ret = -EINVAL;
2999 break;
3000 default:
3001 pr_err("Response result %d not supported\n",
3002 resp.result);
3003 ret = -EINVAL;
3004 break;
3005 }
3006 if (!qseecom.support_bus_scaling) {
3007 qsee_disable_clock_vote(data, CLK_DFAB);
3008 qsee_disable_clock_vote(data, CLK_SFPB);
3009 } else {
3010 __qseecom_add_bw_scale_down_timer(
3011 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3012 }
3013
3014exit:
3015 return ret;
3016}
3017
3018static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3019 struct qseecom_send_cmd_req *req)
3020
3021{
3022 if (!data || !data->client.ihandle) {
3023 pr_err("Client or client handle is not initialized\n");
3024 return -EINVAL;
3025 }
3026 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3027 (req->cmd_req_buf == NULL)) {
3028 pr_err("cmd buffer or response buffer is null\n");
3029 return -EINVAL;
3030 }
3031 if (((uintptr_t)req->cmd_req_buf <
3032 data->client.user_virt_sb_base) ||
3033 ((uintptr_t)req->cmd_req_buf >=
3034 (data->client.user_virt_sb_base + data->client.sb_length))) {
3035 pr_err("cmd buffer address not within shared bufffer\n");
3036 return -EINVAL;
3037 }
3038 if (((uintptr_t)req->resp_buf <
3039 data->client.user_virt_sb_base) ||
3040 ((uintptr_t)req->resp_buf >=
3041 (data->client.user_virt_sb_base + data->client.sb_length))) {
3042 pr_err("response buffer address not within shared bufffer\n");
3043 return -EINVAL;
3044 }
3045 if ((req->cmd_req_len == 0) ||
3046 (req->cmd_req_len > data->client.sb_length) ||
3047 (req->resp_len > data->client.sb_length)) {
3048 pr_err("cmd buf length or response buf length not valid\n");
3049 return -EINVAL;
3050 }
3051 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3052 pr_err("Integer overflow detected in req_len & rsp_len\n");
3053 return -EINVAL;
3054 }
3055
3056 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3057 pr_debug("Not enough memory to fit cmd_buf.\n");
3058 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3059 (req->cmd_req_len + req->resp_len),
3060 data->client.sb_length);
3061 return -ENOMEM;
3062 }
3063 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3064 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3065 return -EINVAL;
3066 }
3067 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3068 pr_err("Integer overflow in resp_len & resp_buf\n");
3069 return -EINVAL;
3070 }
3071 if (data->client.user_virt_sb_base >
3072 (ULONG_MAX - data->client.sb_length)) {
3073 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3074 return -EINVAL;
3075 }
3076 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3077 ((uintptr_t)data->client.user_virt_sb_base +
3078 data->client.sb_length)) ||
3079 (((uintptr_t)req->resp_buf + req->resp_len) >
3080 ((uintptr_t)data->client.user_virt_sb_base +
3081 data->client.sb_length))) {
3082 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3083 return -EINVAL;
3084 }
3085 return 0;
3086}
3087
3088int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3089 struct qseecom_registered_app_list *ptr_app,
3090 struct qseecom_dev_handle *data)
3091{
3092 int ret = 0;
3093
3094 switch (resp->result) {
3095 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3096 pr_warn("App(%d) %s is blocked on listener %d\n",
3097 data->client.app_id, data->client.app_name,
3098 resp->data);
3099 ret = __qseecom_process_reentrancy_blocked_on_listener(
3100 resp, ptr_app, data);
3101 if (ret) {
3102 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3103 data->client.app_id, data->client.app_name, resp->data);
3104 return ret;
3105 }
3106
3107 case QSEOS_RESULT_INCOMPLETE:
3108 qseecom.app_block_ref_cnt++;
3109 ptr_app->app_blocked = true;
3110 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3111 ptr_app->app_blocked = false;
3112 qseecom.app_block_ref_cnt--;
3113 wake_up_interruptible(&qseecom.app_block_wq);
3114 if (ret)
3115 pr_err("process_incomplete_cmd failed err: %d\n",
3116 ret);
3117 return ret;
3118 case QSEOS_RESULT_SUCCESS:
3119 return ret;
3120 default:
3121 pr_err("Response result %d not supported\n",
3122 resp->result);
3123 return -EINVAL;
3124 }
3125}
3126
3127static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3128 struct qseecom_send_cmd_req *req)
3129{
3130 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003131 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003132 u32 reqd_len_sb_in = 0;
3133 struct qseecom_client_send_data_ireq send_data_req = {0};
3134 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3135 struct qseecom_command_scm_resp resp;
3136 unsigned long flags;
3137 struct qseecom_registered_app_list *ptr_app;
3138 bool found_app = false;
3139 void *cmd_buf = NULL;
3140 size_t cmd_len;
3141 struct sglist_info *table = data->sglistinfo_ptr;
3142
3143 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3144 /* find app_id & img_name from list */
3145 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3146 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3147 list) {
3148 if ((ptr_app->app_id == data->client.app_id) &&
3149 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3150 found_app = true;
3151 break;
3152 }
3153 }
3154 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3155
3156 if (!found_app) {
3157 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3158 (char *)data->client.app_name);
3159 return -ENOENT;
3160 }
3161
3162 if (qseecom.qsee_version < QSEE_VERSION_40) {
3163 send_data_req.app_id = data->client.app_id;
3164 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3165 data, (uintptr_t)req->cmd_req_buf));
3166 send_data_req.req_len = req->cmd_req_len;
3167 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3168 data, (uintptr_t)req->resp_buf));
3169 send_data_req.rsp_len = req->resp_len;
3170 send_data_req.sglistinfo_ptr =
3171 (uint32_t)virt_to_phys(table);
3172 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3173 dmac_flush_range((void *)table,
3174 (void *)table + SGLISTINFO_TABLE_SIZE);
3175 cmd_buf = (void *)&send_data_req;
3176 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3177 } else {
3178 send_data_req_64bit.app_id = data->client.app_id;
3179 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3180 (uintptr_t)req->cmd_req_buf);
3181 send_data_req_64bit.req_len = req->cmd_req_len;
3182 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3183 (uintptr_t)req->resp_buf);
3184 send_data_req_64bit.rsp_len = req->resp_len;
3185 /* check if 32bit app's phys_addr region is under 4GB.*/
3186 if ((data->client.app_arch == ELFCLASS32) &&
3187 ((send_data_req_64bit.req_ptr >=
3188 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3189 (send_data_req_64bit.rsp_ptr >=
3190 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3191 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3192 data->client.app_name,
3193 send_data_req_64bit.req_ptr,
3194 send_data_req_64bit.req_len,
3195 send_data_req_64bit.rsp_ptr,
3196 send_data_req_64bit.rsp_len);
3197 return -EFAULT;
3198 }
3199 send_data_req_64bit.sglistinfo_ptr =
3200 (uint64_t)virt_to_phys(table);
3201 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3202 dmac_flush_range((void *)table,
3203 (void *)table + SGLISTINFO_TABLE_SIZE);
3204 cmd_buf = (void *)&send_data_req_64bit;
3205 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3206 }
3207
3208 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3209 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3210 else
3211 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3212
3213 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3214 data->client.sb_virt,
3215 reqd_len_sb_in,
3216 ION_IOC_CLEAN_INV_CACHES);
3217 if (ret) {
3218 pr_err("cache operation failed %d\n", ret);
3219 return ret;
3220 }
3221
3222 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3223
3224 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3225 cmd_buf, cmd_len,
3226 &resp, sizeof(resp));
3227 if (ret) {
3228 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3229 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003230 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003231 }
3232
3233 if (qseecom.qsee_reentrancy_support) {
3234 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003235 if (ret)
3236 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003237 } else {
3238 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3239 ret = __qseecom_process_incomplete_cmd(data, &resp);
3240 if (ret) {
3241 pr_err("process_incomplete_cmd failed err: %d\n",
3242 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003243 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003244 }
3245 } else {
3246 if (resp.result != QSEOS_RESULT_SUCCESS) {
3247 pr_err("Response result %d not supported\n",
3248 resp.result);
3249 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003250 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003251 }
3252 }
3253 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003254exit:
3255 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003256 data->client.sb_virt, data->client.sb_length,
3257 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003258 if (ret2) {
3259 pr_err("cache operation failed %d\n", ret2);
3260 return ret2;
3261 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003262 return ret;
3263}
3264
3265static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3266{
3267 int ret = 0;
3268 struct qseecom_send_cmd_req req;
3269
3270 ret = copy_from_user(&req, argp, sizeof(req));
3271 if (ret) {
3272 pr_err("copy_from_user failed\n");
3273 return ret;
3274 }
3275
3276 if (__validate_send_cmd_inputs(data, &req))
3277 return -EINVAL;
3278
3279 ret = __qseecom_send_cmd(data, &req);
3280
3281 if (ret)
3282 return ret;
3283
3284 return ret;
3285}
3286
3287int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3288 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3289 struct qseecom_dev_handle *data, int i) {
3290
3291 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3292 (req->ifd_data[i].fd > 0)) {
3293 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3294 (req->ifd_data[i].cmd_buf_offset >
3295 req->cmd_req_len - sizeof(uint32_t))) {
3296 pr_err("Invalid offset (req len) 0x%x\n",
3297 req->ifd_data[i].cmd_buf_offset);
3298 return -EINVAL;
3299 }
3300 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3301 (lstnr_resp->ifd_data[i].fd > 0)) {
3302 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3303 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3304 lstnr_resp->resp_len - sizeof(uint32_t))) {
3305 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3306 lstnr_resp->ifd_data[i].cmd_buf_offset);
3307 return -EINVAL;
3308 }
3309 }
3310 return 0;
3311}
3312
3313static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3314 struct qseecom_dev_handle *data)
3315{
3316 struct ion_handle *ihandle;
3317 char *field;
3318 int ret = 0;
3319 int i = 0;
3320 uint32_t len = 0;
3321 struct scatterlist *sg;
3322 struct qseecom_send_modfd_cmd_req *req = NULL;
3323 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3324 struct qseecom_registered_listener_list *this_lstnr = NULL;
3325 uint32_t offset;
3326 struct sg_table *sg_ptr;
3327
3328 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3329 (data->type != QSEECOM_CLIENT_APP))
3330 return -EFAULT;
3331
3332 if (msg == NULL) {
3333 pr_err("Invalid address\n");
3334 return -EINVAL;
3335 }
3336 if (data->type == QSEECOM_LISTENER_SERVICE) {
3337 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3338 this_lstnr = __qseecom_find_svc(data->listener.id);
3339 if (IS_ERR_OR_NULL(this_lstnr)) {
3340 pr_err("Invalid listener ID\n");
3341 return -ENOMEM;
3342 }
3343 } else {
3344 req = (struct qseecom_send_modfd_cmd_req *)msg;
3345 }
3346
3347 for (i = 0; i < MAX_ION_FD; i++) {
3348 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3349 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003350 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003351 req->ifd_data[i].fd);
3352 if (IS_ERR_OR_NULL(ihandle)) {
3353 pr_err("Ion client can't retrieve the handle\n");
3354 return -ENOMEM;
3355 }
3356 field = (char *) req->cmd_req_buf +
3357 req->ifd_data[i].cmd_buf_offset;
3358 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3359 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003360 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003361 lstnr_resp->ifd_data[i].fd);
3362 if (IS_ERR_OR_NULL(ihandle)) {
3363 pr_err("Ion client can't retrieve the handle\n");
3364 return -ENOMEM;
3365 }
3366 field = lstnr_resp->resp_buf_ptr +
3367 lstnr_resp->ifd_data[i].cmd_buf_offset;
3368 } else {
3369 continue;
3370 }
3371 /* Populate the cmd data structure with the phys_addr */
3372 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3373 if (IS_ERR_OR_NULL(sg_ptr)) {
3374 pr_err("IOn client could not retrieve sg table\n");
3375 goto err;
3376 }
3377 if (sg_ptr->nents == 0) {
3378 pr_err("Num of scattered entries is 0\n");
3379 goto err;
3380 }
3381 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3382 pr_err("Num of scattered entries");
3383 pr_err(" (%d) is greater than max supported %d\n",
3384 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3385 goto err;
3386 }
3387 sg = sg_ptr->sgl;
3388 if (sg_ptr->nents == 1) {
3389 uint32_t *update;
3390
3391 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3392 goto err;
3393 if ((data->type == QSEECOM_CLIENT_APP &&
3394 (data->client.app_arch == ELFCLASS32 ||
3395 data->client.app_arch == ELFCLASS64)) ||
3396 (data->type == QSEECOM_LISTENER_SERVICE)) {
3397 /*
3398 * Check if sg list phy add region is under 4GB
3399 */
3400 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3401 (!cleanup) &&
3402 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3403 >= PHY_ADDR_4G - sg->length)) {
3404 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3405 data->client.app_name,
3406 &(sg_dma_address(sg_ptr->sgl)),
3407 sg->length);
3408 goto err;
3409 }
3410 update = (uint32_t *) field;
3411 *update = cleanup ? 0 :
3412 (uint32_t)sg_dma_address(sg_ptr->sgl);
3413 } else {
3414 pr_err("QSEE app arch %u is not supported\n",
3415 data->client.app_arch);
3416 goto err;
3417 }
3418 len += (uint32_t)sg->length;
3419 } else {
3420 struct qseecom_sg_entry *update;
3421 int j = 0;
3422
3423 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3424 (req->ifd_data[i].fd > 0)) {
3425
3426 if ((req->cmd_req_len <
3427 SG_ENTRY_SZ * sg_ptr->nents) ||
3428 (req->ifd_data[i].cmd_buf_offset >
3429 (req->cmd_req_len -
3430 SG_ENTRY_SZ * sg_ptr->nents))) {
3431 pr_err("Invalid offset = 0x%x\n",
3432 req->ifd_data[i].cmd_buf_offset);
3433 goto err;
3434 }
3435
3436 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3437 (lstnr_resp->ifd_data[i].fd > 0)) {
3438
3439 if ((lstnr_resp->resp_len <
3440 SG_ENTRY_SZ * sg_ptr->nents) ||
3441 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3442 (lstnr_resp->resp_len -
3443 SG_ENTRY_SZ * sg_ptr->nents))) {
3444 goto err;
3445 }
3446 }
3447 if ((data->type == QSEECOM_CLIENT_APP &&
3448 (data->client.app_arch == ELFCLASS32 ||
3449 data->client.app_arch == ELFCLASS64)) ||
3450 (data->type == QSEECOM_LISTENER_SERVICE)) {
3451 update = (struct qseecom_sg_entry *)field;
3452 for (j = 0; j < sg_ptr->nents; j++) {
3453 /*
3454 * Check if sg list PA is under 4GB
3455 */
3456 if ((qseecom.qsee_version >=
3457 QSEE_VERSION_40) &&
3458 (!cleanup) &&
3459 ((uint64_t)(sg_dma_address(sg))
3460 >= PHY_ADDR_4G - sg->length)) {
3461 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3462 data->client.app_name,
3463 &(sg_dma_address(sg)),
3464 sg->length);
3465 goto err;
3466 }
3467 update->phys_addr = cleanup ? 0 :
3468 (uint32_t)sg_dma_address(sg);
3469 update->len = cleanup ? 0 : sg->length;
3470 update++;
3471 len += sg->length;
3472 sg = sg_next(sg);
3473 }
3474 } else {
3475 pr_err("QSEE app arch %u is not supported\n",
3476 data->client.app_arch);
3477 goto err;
3478 }
3479 }
3480
3481 if (cleanup) {
3482 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3483 ihandle, NULL, len,
3484 ION_IOC_INV_CACHES);
3485 if (ret) {
3486 pr_err("cache operation failed %d\n", ret);
3487 goto err;
3488 }
3489 } else {
3490 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3491 ihandle, NULL, len,
3492 ION_IOC_CLEAN_INV_CACHES);
3493 if (ret) {
3494 pr_err("cache operation failed %d\n", ret);
3495 goto err;
3496 }
3497 if (data->type == QSEECOM_CLIENT_APP) {
3498 offset = req->ifd_data[i].cmd_buf_offset;
3499 data->sglistinfo_ptr[i].indexAndFlags =
3500 SGLISTINFO_SET_INDEX_FLAG(
3501 (sg_ptr->nents == 1), 0, offset);
3502 data->sglistinfo_ptr[i].sizeOrCount =
3503 (sg_ptr->nents == 1) ?
3504 sg->length : sg_ptr->nents;
3505 data->sglist_cnt = i + 1;
3506 } else {
3507 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3508 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3509 (uintptr_t)this_lstnr->sb_virt);
3510 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3511 SGLISTINFO_SET_INDEX_FLAG(
3512 (sg_ptr->nents == 1), 0, offset);
3513 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3514 (sg_ptr->nents == 1) ?
3515 sg->length : sg_ptr->nents;
3516 this_lstnr->sglist_cnt = i + 1;
3517 }
3518 }
3519 /* Deallocate the handle */
3520 if (!IS_ERR_OR_NULL(ihandle))
3521 ion_free(qseecom.ion_clnt, ihandle);
3522 }
3523 return ret;
3524err:
3525 if (!IS_ERR_OR_NULL(ihandle))
3526 ion_free(qseecom.ion_clnt, ihandle);
3527 return -ENOMEM;
3528}
3529
3530static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3531 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3532{
3533 struct scatterlist *sg = sg_ptr->sgl;
3534 struct qseecom_sg_entry_64bit *sg_entry;
3535 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3536 void *buf;
3537 uint i;
3538 size_t size;
3539 dma_addr_t coh_pmem;
3540
3541 if (fd_idx >= MAX_ION_FD) {
3542 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3543 return -ENOMEM;
3544 }
3545 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3546 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3547 /* Allocate a contiguous kernel buffer */
3548 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3549 size = (size + PAGE_SIZE) & PAGE_MASK;
3550 buf = dma_alloc_coherent(qseecom.pdev,
3551 size, &coh_pmem, GFP_KERNEL);
3552 if (buf == NULL) {
3553 pr_err("failed to alloc memory for sg buf\n");
3554 return -ENOMEM;
3555 }
3556 /* update qseecom_sg_list_buf_hdr_64bit */
3557 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3558 buf_hdr->new_buf_phys_addr = coh_pmem;
3559 buf_hdr->nents_total = sg_ptr->nents;
3560 /* save the left sg entries into new allocated buf */
3561 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3562 for (i = 0; i < sg_ptr->nents; i++) {
3563 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3564 sg_entry->len = sg->length;
3565 sg_entry++;
3566 sg = sg_next(sg);
3567 }
3568
3569 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3570 data->client.sec_buf_fd[fd_idx].vbase = buf;
3571 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3572 data->client.sec_buf_fd[fd_idx].size = size;
3573
3574 return 0;
3575}
3576
3577static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3578 struct qseecom_dev_handle *data)
3579{
3580 struct ion_handle *ihandle;
3581 char *field;
3582 int ret = 0;
3583 int i = 0;
3584 uint32_t len = 0;
3585 struct scatterlist *sg;
3586 struct qseecom_send_modfd_cmd_req *req = NULL;
3587 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3588 struct qseecom_registered_listener_list *this_lstnr = NULL;
3589 uint32_t offset;
3590 struct sg_table *sg_ptr;
3591
3592 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3593 (data->type != QSEECOM_CLIENT_APP))
3594 return -EFAULT;
3595
3596 if (msg == NULL) {
3597 pr_err("Invalid address\n");
3598 return -EINVAL;
3599 }
3600 if (data->type == QSEECOM_LISTENER_SERVICE) {
3601 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3602 this_lstnr = __qseecom_find_svc(data->listener.id);
3603 if (IS_ERR_OR_NULL(this_lstnr)) {
3604 pr_err("Invalid listener ID\n");
3605 return -ENOMEM;
3606 }
3607 } else {
3608 req = (struct qseecom_send_modfd_cmd_req *)msg;
3609 }
3610
3611 for (i = 0; i < MAX_ION_FD; i++) {
3612 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3613 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003614 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003615 req->ifd_data[i].fd);
3616 if (IS_ERR_OR_NULL(ihandle)) {
3617 pr_err("Ion client can't retrieve the handle\n");
3618 return -ENOMEM;
3619 }
3620 field = (char *) req->cmd_req_buf +
3621 req->ifd_data[i].cmd_buf_offset;
3622 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3623 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003624 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003625 lstnr_resp->ifd_data[i].fd);
3626 if (IS_ERR_OR_NULL(ihandle)) {
3627 pr_err("Ion client can't retrieve the handle\n");
3628 return -ENOMEM;
3629 }
3630 field = lstnr_resp->resp_buf_ptr +
3631 lstnr_resp->ifd_data[i].cmd_buf_offset;
3632 } else {
3633 continue;
3634 }
3635 /* Populate the cmd data structure with the phys_addr */
3636 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3637 if (IS_ERR_OR_NULL(sg_ptr)) {
3638 pr_err("IOn client could not retrieve sg table\n");
3639 goto err;
3640 }
3641 if (sg_ptr->nents == 0) {
3642 pr_err("Num of scattered entries is 0\n");
3643 goto err;
3644 }
3645 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3646 pr_warn("Num of scattered entries");
3647 pr_warn(" (%d) is greater than %d\n",
3648 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3649 if (cleanup) {
3650 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3651 data->client.sec_buf_fd[i].vbase)
3652 dma_free_coherent(qseecom.pdev,
3653 data->client.sec_buf_fd[i].size,
3654 data->client.sec_buf_fd[i].vbase,
3655 data->client.sec_buf_fd[i].pbase);
3656 } else {
3657 ret = __qseecom_allocate_sg_list_buffer(data,
3658 field, i, sg_ptr);
3659 if (ret) {
3660 pr_err("Failed to allocate sg list buffer\n");
3661 goto err;
3662 }
3663 }
3664 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3665 sg = sg_ptr->sgl;
3666 goto cleanup;
3667 }
3668 sg = sg_ptr->sgl;
3669 if (sg_ptr->nents == 1) {
3670 uint64_t *update_64bit;
3671
3672 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3673 goto err;
3674 /* 64bit app uses 64bit address */
3675 update_64bit = (uint64_t *) field;
3676 *update_64bit = cleanup ? 0 :
3677 (uint64_t)sg_dma_address(sg_ptr->sgl);
3678 len += (uint32_t)sg->length;
3679 } else {
3680 struct qseecom_sg_entry_64bit *update_64bit;
3681 int j = 0;
3682
3683 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3684 (req->ifd_data[i].fd > 0)) {
3685
3686 if ((req->cmd_req_len <
3687 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3688 (req->ifd_data[i].cmd_buf_offset >
3689 (req->cmd_req_len -
3690 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3691 pr_err("Invalid offset = 0x%x\n",
3692 req->ifd_data[i].cmd_buf_offset);
3693 goto err;
3694 }
3695
3696 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3697 (lstnr_resp->ifd_data[i].fd > 0)) {
3698
3699 if ((lstnr_resp->resp_len <
3700 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3701 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3702 (lstnr_resp->resp_len -
3703 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3704 goto err;
3705 }
3706 }
3707 /* 64bit app uses 64bit address */
3708 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3709 for (j = 0; j < sg_ptr->nents; j++) {
3710 update_64bit->phys_addr = cleanup ? 0 :
3711 (uint64_t)sg_dma_address(sg);
3712 update_64bit->len = cleanup ? 0 :
3713 (uint32_t)sg->length;
3714 update_64bit++;
3715 len += sg->length;
3716 sg = sg_next(sg);
3717 }
3718 }
3719cleanup:
3720 if (cleanup) {
3721 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3722 ihandle, NULL, len,
3723 ION_IOC_INV_CACHES);
3724 if (ret) {
3725 pr_err("cache operation failed %d\n", ret);
3726 goto err;
3727 }
3728 } else {
3729 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3730 ihandle, NULL, len,
3731 ION_IOC_CLEAN_INV_CACHES);
3732 if (ret) {
3733 pr_err("cache operation failed %d\n", ret);
3734 goto err;
3735 }
3736 if (data->type == QSEECOM_CLIENT_APP) {
3737 offset = req->ifd_data[i].cmd_buf_offset;
3738 data->sglistinfo_ptr[i].indexAndFlags =
3739 SGLISTINFO_SET_INDEX_FLAG(
3740 (sg_ptr->nents == 1), 1, offset);
3741 data->sglistinfo_ptr[i].sizeOrCount =
3742 (sg_ptr->nents == 1) ?
3743 sg->length : sg_ptr->nents;
3744 data->sglist_cnt = i + 1;
3745 } else {
3746 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3747 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3748 (uintptr_t)this_lstnr->sb_virt);
3749 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3750 SGLISTINFO_SET_INDEX_FLAG(
3751 (sg_ptr->nents == 1), 1, offset);
3752 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3753 (sg_ptr->nents == 1) ?
3754 sg->length : sg_ptr->nents;
3755 this_lstnr->sglist_cnt = i + 1;
3756 }
3757 }
3758 /* Deallocate the handle */
3759 if (!IS_ERR_OR_NULL(ihandle))
3760 ion_free(qseecom.ion_clnt, ihandle);
3761 }
3762 return ret;
3763err:
3764 for (i = 0; i < MAX_ION_FD; i++)
3765 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3766 data->client.sec_buf_fd[i].vbase)
3767 dma_free_coherent(qseecom.pdev,
3768 data->client.sec_buf_fd[i].size,
3769 data->client.sec_buf_fd[i].vbase,
3770 data->client.sec_buf_fd[i].pbase);
3771 if (!IS_ERR_OR_NULL(ihandle))
3772 ion_free(qseecom.ion_clnt, ihandle);
3773 return -ENOMEM;
3774}
3775
3776static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3777 void __user *argp,
3778 bool is_64bit_addr)
3779{
3780 int ret = 0;
3781 int i;
3782 struct qseecom_send_modfd_cmd_req req;
3783 struct qseecom_send_cmd_req send_cmd_req;
3784
3785 ret = copy_from_user(&req, argp, sizeof(req));
3786 if (ret) {
3787 pr_err("copy_from_user failed\n");
3788 return ret;
3789 }
3790
3791 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3792 send_cmd_req.cmd_req_len = req.cmd_req_len;
3793 send_cmd_req.resp_buf = req.resp_buf;
3794 send_cmd_req.resp_len = req.resp_len;
3795
3796 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3797 return -EINVAL;
3798
3799 /* validate offsets */
3800 for (i = 0; i < MAX_ION_FD; i++) {
3801 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3802 pr_err("Invalid offset %d = 0x%x\n",
3803 i, req.ifd_data[i].cmd_buf_offset);
3804 return -EINVAL;
3805 }
3806 }
3807 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3808 (uintptr_t)req.cmd_req_buf);
3809 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3810 (uintptr_t)req.resp_buf);
3811
3812 if (!is_64bit_addr) {
3813 ret = __qseecom_update_cmd_buf(&req, false, data);
3814 if (ret)
3815 return ret;
3816 ret = __qseecom_send_cmd(data, &send_cmd_req);
3817 if (ret)
3818 return ret;
3819 ret = __qseecom_update_cmd_buf(&req, true, data);
3820 if (ret)
3821 return ret;
3822 } else {
3823 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3824 if (ret)
3825 return ret;
3826 ret = __qseecom_send_cmd(data, &send_cmd_req);
3827 if (ret)
3828 return ret;
3829 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3830 if (ret)
3831 return ret;
3832 }
3833
3834 return ret;
3835}
3836
3837static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3838 void __user *argp)
3839{
3840 return __qseecom_send_modfd_cmd(data, argp, false);
3841}
3842
3843static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3844 void __user *argp)
3845{
3846 return __qseecom_send_modfd_cmd(data, argp, true);
3847}
3848
3849
3850
3851static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3852 struct qseecom_registered_listener_list *svc)
3853{
3854 int ret;
3855
3856 ret = (svc->rcv_req_flag != 0);
3857 return ret || data->abort;
3858}
3859
3860static int qseecom_receive_req(struct qseecom_dev_handle *data)
3861{
3862 int ret = 0;
3863 struct qseecom_registered_listener_list *this_lstnr;
3864
3865 this_lstnr = __qseecom_find_svc(data->listener.id);
3866 if (!this_lstnr) {
3867 pr_err("Invalid listener ID\n");
3868 return -ENODATA;
3869 }
3870
3871 while (1) {
3872 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3873 __qseecom_listener_has_rcvd_req(data,
3874 this_lstnr))) {
3875 pr_debug("Interrupted: exiting Listener Service = %d\n",
3876 (uint32_t)data->listener.id);
3877 /* woken up for different reason */
3878 return -ERESTARTSYS;
3879 }
3880
3881 if (data->abort) {
3882 pr_err("Aborting Listener Service = %d\n",
3883 (uint32_t)data->listener.id);
3884 return -ENODEV;
3885 }
3886 this_lstnr->rcv_req_flag = 0;
3887 break;
3888 }
3889 return ret;
3890}
3891
3892static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3893{
3894 unsigned char app_arch = 0;
3895 struct elf32_hdr *ehdr;
3896 struct elf64_hdr *ehdr64;
3897
3898 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3899
3900 switch (app_arch) {
3901 case ELFCLASS32: {
3902 ehdr = (struct elf32_hdr *)fw_entry->data;
3903 if (fw_entry->size < sizeof(*ehdr)) {
3904 pr_err("%s: Not big enough to be an elf32 header\n",
3905 qseecom.pdev->init_name);
3906 return false;
3907 }
3908 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3909 pr_err("%s: Not an elf32 header\n",
3910 qseecom.pdev->init_name);
3911 return false;
3912 }
3913 if (ehdr->e_phnum == 0) {
3914 pr_err("%s: No loadable segments\n",
3915 qseecom.pdev->init_name);
3916 return false;
3917 }
3918 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3919 sizeof(struct elf32_hdr) > fw_entry->size) {
3920 pr_err("%s: Program headers not within mdt\n",
3921 qseecom.pdev->init_name);
3922 return false;
3923 }
3924 break;
3925 }
3926 case ELFCLASS64: {
3927 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3928 if (fw_entry->size < sizeof(*ehdr64)) {
3929 pr_err("%s: Not big enough to be an elf64 header\n",
3930 qseecom.pdev->init_name);
3931 return false;
3932 }
3933 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3934 pr_err("%s: Not an elf64 header\n",
3935 qseecom.pdev->init_name);
3936 return false;
3937 }
3938 if (ehdr64->e_phnum == 0) {
3939 pr_err("%s: No loadable segments\n",
3940 qseecom.pdev->init_name);
3941 return false;
3942 }
3943 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3944 sizeof(struct elf64_hdr) > fw_entry->size) {
3945 pr_err("%s: Program headers not within mdt\n",
3946 qseecom.pdev->init_name);
3947 return false;
3948 }
3949 break;
3950 }
3951 default: {
3952 pr_err("QSEE app arch %u is not supported\n", app_arch);
3953 return false;
3954 }
3955 }
3956 return true;
3957}
3958
3959static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3960 uint32_t *app_arch)
3961{
3962 int ret = -1;
3963 int i = 0, rc = 0;
3964 const struct firmware *fw_entry = NULL;
3965 char fw_name[MAX_APP_NAME_SIZE];
3966 struct elf32_hdr *ehdr;
3967 struct elf64_hdr *ehdr64;
3968 int num_images = 0;
3969
3970 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3971 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3972 if (rc) {
3973 pr_err("error with request_firmware\n");
3974 ret = -EIO;
3975 goto err;
3976 }
3977 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3978 ret = -EIO;
3979 goto err;
3980 }
3981 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3982 *fw_size = fw_entry->size;
3983 if (*app_arch == ELFCLASS32) {
3984 ehdr = (struct elf32_hdr *)fw_entry->data;
3985 num_images = ehdr->e_phnum;
3986 } else if (*app_arch == ELFCLASS64) {
3987 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3988 num_images = ehdr64->e_phnum;
3989 } else {
3990 pr_err("QSEE %s app, arch %u is not supported\n",
3991 appname, *app_arch);
3992 ret = -EIO;
3993 goto err;
3994 }
3995 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3996 release_firmware(fw_entry);
3997 fw_entry = NULL;
3998 for (i = 0; i < num_images; i++) {
3999 memset(fw_name, 0, sizeof(fw_name));
4000 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4001 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4002 if (ret)
4003 goto err;
4004 if (*fw_size > U32_MAX - fw_entry->size) {
4005 pr_err("QSEE %s app file size overflow\n", appname);
4006 ret = -EINVAL;
4007 goto err;
4008 }
4009 *fw_size += fw_entry->size;
4010 release_firmware(fw_entry);
4011 fw_entry = NULL;
4012 }
4013
4014 return ret;
4015err:
4016 if (fw_entry)
4017 release_firmware(fw_entry);
4018 *fw_size = 0;
4019 return ret;
4020}
4021
4022static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4023 uint32_t fw_size,
4024 struct qseecom_load_app_ireq *load_req)
4025{
4026 int ret = -1;
4027 int i = 0, rc = 0;
4028 const struct firmware *fw_entry = NULL;
4029 char fw_name[MAX_APP_NAME_SIZE];
4030 u8 *img_data_ptr = img_data;
4031 struct elf32_hdr *ehdr;
4032 struct elf64_hdr *ehdr64;
4033 int num_images = 0;
4034 unsigned char app_arch = 0;
4035
4036 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4037 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4038 if (rc) {
4039 ret = -EIO;
4040 goto err;
4041 }
4042
4043 load_req->img_len = fw_entry->size;
4044 if (load_req->img_len > fw_size) {
4045 pr_err("app %s size %zu is larger than buf size %u\n",
4046 appname, fw_entry->size, fw_size);
4047 ret = -EINVAL;
4048 goto err;
4049 }
4050 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4051 img_data_ptr = img_data_ptr + fw_entry->size;
4052 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4053
4054 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4055 if (app_arch == ELFCLASS32) {
4056 ehdr = (struct elf32_hdr *)fw_entry->data;
4057 num_images = ehdr->e_phnum;
4058 } else if (app_arch == ELFCLASS64) {
4059 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4060 num_images = ehdr64->e_phnum;
4061 } else {
4062 pr_err("QSEE %s app, arch %u is not supported\n",
4063 appname, app_arch);
4064 ret = -EIO;
4065 goto err;
4066 }
4067 release_firmware(fw_entry);
4068 fw_entry = NULL;
4069 for (i = 0; i < num_images; i++) {
4070 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4071 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4072 if (ret) {
4073 pr_err("Failed to locate blob %s\n", fw_name);
4074 goto err;
4075 }
4076 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4077 (fw_entry->size + load_req->img_len > fw_size)) {
4078 pr_err("Invalid file size for %s\n", fw_name);
4079 ret = -EINVAL;
4080 goto err;
4081 }
4082 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4083 img_data_ptr = img_data_ptr + fw_entry->size;
4084 load_req->img_len += fw_entry->size;
4085 release_firmware(fw_entry);
4086 fw_entry = NULL;
4087 }
4088 return ret;
4089err:
4090 release_firmware(fw_entry);
4091 return ret;
4092}
4093
4094static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4095 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4096{
4097 size_t len = 0;
4098 int ret = 0;
4099 ion_phys_addr_t pa;
4100 struct ion_handle *ihandle = NULL;
4101 u8 *img_data = NULL;
4102
4103 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4104 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4105
4106 if (IS_ERR_OR_NULL(ihandle)) {
4107 pr_err("ION alloc failed\n");
4108 return -ENOMEM;
4109 }
4110 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4111 ihandle);
4112
4113 if (IS_ERR_OR_NULL(img_data)) {
4114 pr_err("ION memory mapping for image loading failed\n");
4115 ret = -ENOMEM;
4116 goto exit_ion_free;
4117 }
4118 /* Get the physical address of the ION BUF */
4119 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4120 if (ret) {
4121 pr_err("physical memory retrieval failure\n");
4122 ret = -EIO;
4123 goto exit_ion_unmap_kernel;
4124 }
4125
4126 *pihandle = ihandle;
4127 *data = img_data;
4128 *paddr = pa;
4129 return ret;
4130
4131exit_ion_unmap_kernel:
4132 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4133exit_ion_free:
4134 ion_free(qseecom.ion_clnt, ihandle);
4135 ihandle = NULL;
4136 return ret;
4137}
4138
4139static void __qseecom_free_img_data(struct ion_handle **ihandle)
4140{
4141 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4142 ion_free(qseecom.ion_clnt, *ihandle);
4143 *ihandle = NULL;
4144}
4145
4146static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4147 uint32_t *app_id)
4148{
4149 int ret = -1;
4150 uint32_t fw_size = 0;
4151 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4152 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4153 struct qseecom_command_scm_resp resp;
4154 u8 *img_data = NULL;
4155 ion_phys_addr_t pa = 0;
4156 struct ion_handle *ihandle = NULL;
4157 void *cmd_buf = NULL;
4158 size_t cmd_len;
4159 uint32_t app_arch = 0;
4160
4161 if (!data || !appname || !app_id) {
4162 pr_err("Null pointer to data or appname or appid\n");
4163 return -EINVAL;
4164 }
4165 *app_id = 0;
4166 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4167 return -EIO;
4168 data->client.app_arch = app_arch;
4169
4170 /* Check and load cmnlib */
4171 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4172 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4173 ret = qseecom_load_commonlib_image(data, "cmnlib");
4174 if (ret) {
4175 pr_err("failed to load cmnlib\n");
4176 return -EIO;
4177 }
4178 qseecom.commonlib_loaded = true;
4179 pr_debug("cmnlib is loaded\n");
4180 }
4181
4182 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4183 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4184 if (ret) {
4185 pr_err("failed to load cmnlib64\n");
4186 return -EIO;
4187 }
4188 qseecom.commonlib64_loaded = true;
4189 pr_debug("cmnlib64 is loaded\n");
4190 }
4191 }
4192
4193 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4194 if (ret)
4195 return ret;
4196
4197 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4198 if (ret) {
4199 ret = -EIO;
4200 goto exit_free_img_data;
4201 }
4202
4203 /* Populate the load_req parameters */
4204 if (qseecom.qsee_version < QSEE_VERSION_40) {
4205 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4206 load_req.mdt_len = load_req.mdt_len;
4207 load_req.img_len = load_req.img_len;
4208 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4209 load_req.phy_addr = (uint32_t)pa;
4210 cmd_buf = (void *)&load_req;
4211 cmd_len = sizeof(struct qseecom_load_app_ireq);
4212 } else {
4213 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4214 load_req_64bit.mdt_len = load_req.mdt_len;
4215 load_req_64bit.img_len = load_req.img_len;
4216 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4217 load_req_64bit.phy_addr = (uint64_t)pa;
4218 cmd_buf = (void *)&load_req_64bit;
4219 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4220 }
4221
4222 if (qseecom.support_bus_scaling) {
4223 mutex_lock(&qsee_bw_mutex);
4224 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4225 mutex_unlock(&qsee_bw_mutex);
4226 if (ret) {
4227 ret = -EIO;
4228 goto exit_free_img_data;
4229 }
4230 }
4231
4232 ret = __qseecom_enable_clk_scale_up(data);
4233 if (ret) {
4234 ret = -EIO;
4235 goto exit_unregister_bus_bw_need;
4236 }
4237
4238 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4239 img_data, fw_size,
4240 ION_IOC_CLEAN_INV_CACHES);
4241 if (ret) {
4242 pr_err("cache operation failed %d\n", ret);
4243 goto exit_disable_clk_vote;
4244 }
4245
4246 /* SCM_CALL to load the image */
4247 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4248 &resp, sizeof(resp));
4249 if (ret) {
4250 pr_err("scm_call to load failed : ret %d\n", ret);
4251 ret = -EIO;
4252 goto exit_disable_clk_vote;
4253 }
4254
4255 switch (resp.result) {
4256 case QSEOS_RESULT_SUCCESS:
4257 *app_id = resp.data;
4258 break;
4259 case QSEOS_RESULT_INCOMPLETE:
4260 ret = __qseecom_process_incomplete_cmd(data, &resp);
4261 if (ret)
4262 pr_err("process_incomplete_cmd FAILED\n");
4263 else
4264 *app_id = resp.data;
4265 break;
4266 case QSEOS_RESULT_FAILURE:
4267 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4268 break;
4269 default:
4270 pr_err("scm call return unknown response %d\n", resp.result);
4271 ret = -EINVAL;
4272 break;
4273 }
4274
4275exit_disable_clk_vote:
4276 __qseecom_disable_clk_scale_down(data);
4277
4278exit_unregister_bus_bw_need:
4279 if (qseecom.support_bus_scaling) {
4280 mutex_lock(&qsee_bw_mutex);
4281 qseecom_unregister_bus_bandwidth_needs(data);
4282 mutex_unlock(&qsee_bw_mutex);
4283 }
4284
4285exit_free_img_data:
4286 __qseecom_free_img_data(&ihandle);
4287 return ret;
4288}
4289
4290static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4291 char *cmnlib_name)
4292{
4293 int ret = 0;
4294 uint32_t fw_size = 0;
4295 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4296 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4297 struct qseecom_command_scm_resp resp;
4298 u8 *img_data = NULL;
4299 ion_phys_addr_t pa = 0;
4300 void *cmd_buf = NULL;
4301 size_t cmd_len;
4302 uint32_t app_arch = 0;
4303
4304 if (!cmnlib_name) {
4305 pr_err("cmnlib_name is NULL\n");
4306 return -EINVAL;
4307 }
4308 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4309 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4310 cmnlib_name, strlen(cmnlib_name));
4311 return -EINVAL;
4312 }
4313
4314 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4315 return -EIO;
4316
4317 ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
4318 &img_data, fw_size, &pa);
4319 if (ret)
4320 return -EIO;
4321
4322 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4323 if (ret) {
4324 ret = -EIO;
4325 goto exit_free_img_data;
4326 }
4327 if (qseecom.qsee_version < QSEE_VERSION_40) {
4328 load_req.phy_addr = (uint32_t)pa;
4329 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4330 cmd_buf = (void *)&load_req;
4331 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4332 } else {
4333 load_req_64bit.phy_addr = (uint64_t)pa;
4334 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4335 load_req_64bit.img_len = load_req.img_len;
4336 load_req_64bit.mdt_len = load_req.mdt_len;
4337 cmd_buf = (void *)&load_req_64bit;
4338 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4339 }
4340
4341 if (qseecom.support_bus_scaling) {
4342 mutex_lock(&qsee_bw_mutex);
4343 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4344 mutex_unlock(&qsee_bw_mutex);
4345 if (ret) {
4346 ret = -EIO;
4347 goto exit_free_img_data;
4348 }
4349 }
4350
4351 /* Vote for the SFPB clock */
4352 ret = __qseecom_enable_clk_scale_up(data);
4353 if (ret) {
4354 ret = -EIO;
4355 goto exit_unregister_bus_bw_need;
4356 }
4357
4358 ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
4359 img_data, fw_size,
4360 ION_IOC_CLEAN_INV_CACHES);
4361 if (ret) {
4362 pr_err("cache operation failed %d\n", ret);
4363 goto exit_disable_clk_vote;
4364 }
4365
4366 /* SCM_CALL to load the image */
4367 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4368 &resp, sizeof(resp));
4369 if (ret) {
4370 pr_err("scm_call to load failed : ret %d\n", ret);
4371 ret = -EIO;
4372 goto exit_disable_clk_vote;
4373 }
4374
4375 switch (resp.result) {
4376 case QSEOS_RESULT_SUCCESS:
4377 break;
4378 case QSEOS_RESULT_FAILURE:
4379 pr_err("scm call failed w/response result%d\n", resp.result);
4380 ret = -EINVAL;
4381 goto exit_disable_clk_vote;
4382 case QSEOS_RESULT_INCOMPLETE:
4383 ret = __qseecom_process_incomplete_cmd(data, &resp);
4384 if (ret) {
4385 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4386 goto exit_disable_clk_vote;
4387 }
4388 break;
4389 default:
4390 pr_err("scm call return unknown response %d\n", resp.result);
4391 ret = -EINVAL;
4392 goto exit_disable_clk_vote;
4393 }
4394
4395exit_disable_clk_vote:
4396 __qseecom_disable_clk_scale_down(data);
4397
4398exit_unregister_bus_bw_need:
4399 if (qseecom.support_bus_scaling) {
4400 mutex_lock(&qsee_bw_mutex);
4401 qseecom_unregister_bus_bandwidth_needs(data);
4402 mutex_unlock(&qsee_bw_mutex);
4403 }
4404
4405exit_free_img_data:
4406 __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
4407 return ret;
4408}
4409
4410static int qseecom_unload_commonlib_image(void)
4411{
4412 int ret = -EINVAL;
4413 struct qseecom_unload_lib_image_ireq unload_req = {0};
4414 struct qseecom_command_scm_resp resp;
4415
4416 /* Populate the remaining parameters */
4417 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4418
4419 /* SCM_CALL to load the image */
4420 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4421 sizeof(struct qseecom_unload_lib_image_ireq),
4422 &resp, sizeof(resp));
4423 if (ret) {
4424 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4425 ret = -EIO;
4426 } else {
4427 switch (resp.result) {
4428 case QSEOS_RESULT_SUCCESS:
4429 break;
4430 case QSEOS_RESULT_FAILURE:
4431 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4432 break;
4433 default:
4434 pr_err("scm call return unknown response %d\n",
4435 resp.result);
4436 ret = -EINVAL;
4437 break;
4438 }
4439 }
4440
4441 return ret;
4442}
4443
4444int qseecom_start_app(struct qseecom_handle **handle,
4445 char *app_name, uint32_t size)
4446{
4447 int32_t ret = 0;
4448 unsigned long flags = 0;
4449 struct qseecom_dev_handle *data = NULL;
4450 struct qseecom_check_app_ireq app_ireq;
4451 struct qseecom_registered_app_list *entry = NULL;
4452 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4453 bool found_app = false;
4454 size_t len;
4455 ion_phys_addr_t pa;
4456 uint32_t fw_size, app_arch;
4457 uint32_t app_id = 0;
4458
4459 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4460 pr_err("Not allowed to be called in %d state\n",
4461 atomic_read(&qseecom.qseecom_state));
4462 return -EPERM;
4463 }
4464 if (!app_name) {
4465 pr_err("failed to get the app name\n");
4466 return -EINVAL;
4467 }
4468
Zhen Kong64a6d7282017-06-16 11:55:07 -07004469 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004470 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004471 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004472 return -EINVAL;
4473 }
4474
4475 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4476 if (!(*handle))
4477 return -ENOMEM;
4478
4479 data = kzalloc(sizeof(*data), GFP_KERNEL);
4480 if (!data) {
4481 if (ret == 0) {
4482 kfree(*handle);
4483 *handle = NULL;
4484 }
4485 return -ENOMEM;
4486 }
4487 data->abort = 0;
4488 data->type = QSEECOM_CLIENT_APP;
4489 data->released = false;
4490 data->client.sb_length = size;
4491 data->client.user_virt_sb_base = 0;
4492 data->client.ihandle = NULL;
4493
4494 init_waitqueue_head(&data->abort_wq);
4495
4496 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4497 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4498 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4499 pr_err("Ion client could not retrieve the handle\n");
4500 kfree(data);
4501 kfree(*handle);
4502 *handle = NULL;
4503 return -EINVAL;
4504 }
4505 mutex_lock(&app_access_lock);
4506
4507 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4508 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4509 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4510 if (ret)
4511 goto err;
4512
4513 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4514 if (app_id) {
4515 pr_warn("App id %d for [%s] app exists\n", app_id,
4516 (char *)app_ireq.app_name);
4517 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4518 list_for_each_entry(entry,
4519 &qseecom.registered_app_list_head, list){
4520 if (entry->app_id == app_id) {
4521 entry->ref_cnt++;
4522 found_app = true;
4523 break;
4524 }
4525 }
4526 spin_unlock_irqrestore(
4527 &qseecom.registered_app_list_lock, flags);
4528 if (!found_app)
4529 pr_warn("App_id %d [%s] was loaded but not registered\n",
4530 ret, (char *)app_ireq.app_name);
4531 } else {
4532 /* load the app and get the app_id */
4533 pr_debug("%s: Loading app for the first time'\n",
4534 qseecom.pdev->init_name);
4535 ret = __qseecom_load_fw(data, app_name, &app_id);
4536 if (ret < 0)
4537 goto err;
4538 }
4539 data->client.app_id = app_id;
4540 if (!found_app) {
4541 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4542 if (!entry) {
4543 pr_err("kmalloc for app entry failed\n");
4544 ret = -ENOMEM;
4545 goto err;
4546 }
4547 entry->app_id = app_id;
4548 entry->ref_cnt = 1;
4549 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4550 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4551 ret = -EIO;
4552 kfree(entry);
4553 goto err;
4554 }
4555 entry->app_arch = app_arch;
4556 entry->app_blocked = false;
4557 entry->blocked_on_listener_id = 0;
4558 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4559 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4560 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4561 flags);
4562 }
4563
4564 /* Get the physical address of the ION BUF */
4565 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4566 if (ret) {
4567 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4568 ret);
4569 goto err;
4570 }
4571
4572 /* Populate the structure for sending scm call to load image */
4573 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4574 data->client.ihandle);
4575 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4576 pr_err("ION memory mapping for client shared buf failed\n");
4577 ret = -ENOMEM;
4578 goto err;
4579 }
4580 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4581 data->client.sb_phys = (phys_addr_t)pa;
4582 (*handle)->dev = (void *)data;
4583 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4584 (*handle)->sbuf_len = data->client.sb_length;
4585
4586 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4587 if (!kclient_entry) {
4588 ret = -ENOMEM;
4589 goto err;
4590 }
4591 kclient_entry->handle = *handle;
4592
4593 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4594 list_add_tail(&kclient_entry->list,
4595 &qseecom.registered_kclient_list_head);
4596 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4597
4598 mutex_unlock(&app_access_lock);
4599 return 0;
4600
4601err:
4602 kfree(data);
4603 kfree(*handle);
4604 *handle = NULL;
4605 mutex_unlock(&app_access_lock);
4606 return ret;
4607}
4608EXPORT_SYMBOL(qseecom_start_app);
4609
4610int qseecom_shutdown_app(struct qseecom_handle **handle)
4611{
4612 int ret = -EINVAL;
4613 struct qseecom_dev_handle *data;
4614
4615 struct qseecom_registered_kclient_list *kclient = NULL;
4616 unsigned long flags = 0;
4617 bool found_handle = false;
4618
4619 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4620 pr_err("Not allowed to be called in %d state\n",
4621 atomic_read(&qseecom.qseecom_state));
4622 return -EPERM;
4623 }
4624
4625 if ((handle == NULL) || (*handle == NULL)) {
4626 pr_err("Handle is not initialized\n");
4627 return -EINVAL;
4628 }
4629 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4630 mutex_lock(&app_access_lock);
4631
4632 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4633 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4634 list) {
4635 if (kclient->handle == (*handle)) {
4636 list_del(&kclient->list);
4637 found_handle = true;
4638 break;
4639 }
4640 }
4641 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4642 if (!found_handle)
4643 pr_err("Unable to find the handle, exiting\n");
4644 else
4645 ret = qseecom_unload_app(data, false);
4646
4647 mutex_unlock(&app_access_lock);
4648 if (ret == 0) {
4649 kzfree(data);
4650 kzfree(*handle);
4651 kzfree(kclient);
4652 *handle = NULL;
4653 }
4654
4655 return ret;
4656}
4657EXPORT_SYMBOL(qseecom_shutdown_app);
4658
4659int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4660 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4661{
4662 int ret = 0;
4663 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4664 struct qseecom_dev_handle *data;
4665 bool perf_enabled = false;
4666
4667 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4668 pr_err("Not allowed to be called in %d state\n",
4669 atomic_read(&qseecom.qseecom_state));
4670 return -EPERM;
4671 }
4672
4673 if (handle == NULL) {
4674 pr_err("Handle is not initialized\n");
4675 return -EINVAL;
4676 }
4677 data = handle->dev;
4678
4679 req.cmd_req_len = sbuf_len;
4680 req.resp_len = rbuf_len;
4681 req.cmd_req_buf = send_buf;
4682 req.resp_buf = resp_buf;
4683
4684 if (__validate_send_cmd_inputs(data, &req))
4685 return -EINVAL;
4686
4687 mutex_lock(&app_access_lock);
4688 if (qseecom.support_bus_scaling) {
4689 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4690 if (ret) {
4691 pr_err("Failed to set bw.\n");
4692 mutex_unlock(&app_access_lock);
4693 return ret;
4694 }
4695 }
4696 /*
4697 * On targets where crypto clock is handled by HLOS,
4698 * if clk_access_cnt is zero and perf_enabled is false,
4699 * then the crypto clock was not enabled before sending cmd
4700 * to tz, qseecom will enable the clock to avoid service failure.
4701 */
4702 if (!qseecom.no_clock_support &&
4703 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4704 pr_debug("ce clock is not enabled!\n");
4705 ret = qseecom_perf_enable(data);
4706 if (ret) {
4707 pr_err("Failed to vote for clock with err %d\n",
4708 ret);
4709 mutex_unlock(&app_access_lock);
4710 return -EINVAL;
4711 }
4712 perf_enabled = true;
4713 }
4714 if (!strcmp(data->client.app_name, "securemm"))
4715 data->use_legacy_cmd = true;
4716
4717 ret = __qseecom_send_cmd(data, &req);
4718 data->use_legacy_cmd = false;
4719 if (qseecom.support_bus_scaling)
4720 __qseecom_add_bw_scale_down_timer(
4721 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4722
4723 if (perf_enabled) {
4724 qsee_disable_clock_vote(data, CLK_DFAB);
4725 qsee_disable_clock_vote(data, CLK_SFPB);
4726 }
4727
4728 mutex_unlock(&app_access_lock);
4729
4730 if (ret)
4731 return ret;
4732
4733 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4734 req.resp_len, req.resp_buf);
4735 return ret;
4736}
4737EXPORT_SYMBOL(qseecom_send_command);
4738
4739int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4740{
4741 int ret = 0;
4742
4743 if ((handle == NULL) || (handle->dev == NULL)) {
4744 pr_err("No valid kernel client\n");
4745 return -EINVAL;
4746 }
4747 if (high) {
4748 if (qseecom.support_bus_scaling) {
4749 mutex_lock(&qsee_bw_mutex);
4750 __qseecom_register_bus_bandwidth_needs(handle->dev,
4751 HIGH);
4752 mutex_unlock(&qsee_bw_mutex);
4753 } else {
4754 ret = qseecom_perf_enable(handle->dev);
4755 if (ret)
4756 pr_err("Failed to vote for clock with err %d\n",
4757 ret);
4758 }
4759 } else {
4760 if (!qseecom.support_bus_scaling) {
4761 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4762 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4763 } else {
4764 mutex_lock(&qsee_bw_mutex);
4765 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4766 mutex_unlock(&qsee_bw_mutex);
4767 }
4768 }
4769 return ret;
4770}
4771EXPORT_SYMBOL(qseecom_set_bandwidth);
4772
4773int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4774{
4775 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4776 struct qseecom_dev_handle dummy_private_data = {0};
4777 struct qseecom_command_scm_resp resp;
4778 int ret = 0;
4779
4780 if (!desc) {
4781 pr_err("desc is NULL\n");
4782 return -EINVAL;
4783 }
4784
4785 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004786 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004787 resp.data = desc->ret[2]; /*listener_id*/
4788
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004789 mutex_lock(&app_access_lock);
4790 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
4791 &dummy_private_data);
4792 mutex_unlock(&app_access_lock);
4793 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004794 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004795 (int)desc->ret[0], (int)desc->ret[2],
4796 (int)desc->ret[1], ret);
4797 desc->ret[0] = resp.result;
4798 desc->ret[1] = resp.resp_type;
4799 desc->ret[2] = resp.data;
4800 return ret;
4801}
4802EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4803
4804static int qseecom_send_resp(void)
4805{
4806 qseecom.send_resp_flag = 1;
4807 wake_up_interruptible(&qseecom.send_resp_wq);
4808 return 0;
4809}
4810
4811static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4812{
4813 struct qseecom_registered_listener_list *this_lstnr = NULL;
4814
4815 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4816 this_lstnr = __qseecom_find_svc(data->listener.id);
4817 if (this_lstnr == NULL)
4818 return -EINVAL;
4819 qseecom.send_resp_flag = 1;
4820 this_lstnr->send_resp_flag = 1;
4821 wake_up_interruptible(&qseecom.send_resp_wq);
4822 return 0;
4823}
4824
4825static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4826 struct qseecom_send_modfd_listener_resp *resp,
4827 struct qseecom_registered_listener_list *this_lstnr)
4828{
4829 int i;
4830
4831 if (!data || !resp || !this_lstnr) {
4832 pr_err("listener handle or resp msg is null\n");
4833 return -EINVAL;
4834 }
4835
4836 if (resp->resp_buf_ptr == NULL) {
4837 pr_err("resp buffer is null\n");
4838 return -EINVAL;
4839 }
4840 /* validate resp buf length */
4841 if ((resp->resp_len == 0) ||
4842 (resp->resp_len > this_lstnr->sb_length)) {
4843 pr_err("resp buf length %d not valid\n", resp->resp_len);
4844 return -EINVAL;
4845 }
4846
4847 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4848 pr_err("Integer overflow in resp_len & resp_buf\n");
4849 return -EINVAL;
4850 }
4851 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4852 (ULONG_MAX - this_lstnr->sb_length)) {
4853 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4854 return -EINVAL;
4855 }
4856 /* validate resp buf */
4857 if (((uintptr_t)resp->resp_buf_ptr <
4858 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4859 ((uintptr_t)resp->resp_buf_ptr >=
4860 ((uintptr_t)this_lstnr->user_virt_sb_base +
4861 this_lstnr->sb_length)) ||
4862 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4863 ((uintptr_t)this_lstnr->user_virt_sb_base +
4864 this_lstnr->sb_length))) {
4865 pr_err("resp buf is out of shared buffer region\n");
4866 return -EINVAL;
4867 }
4868
4869 /* validate offsets */
4870 for (i = 0; i < MAX_ION_FD; i++) {
4871 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4872 pr_err("Invalid offset %d = 0x%x\n",
4873 i, resp->ifd_data[i].cmd_buf_offset);
4874 return -EINVAL;
4875 }
4876 }
4877
4878 return 0;
4879}
4880
4881static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4882 void __user *argp, bool is_64bit_addr)
4883{
4884 struct qseecom_send_modfd_listener_resp resp;
4885 struct qseecom_registered_listener_list *this_lstnr = NULL;
4886
4887 if (copy_from_user(&resp, argp, sizeof(resp))) {
4888 pr_err("copy_from_user failed");
4889 return -EINVAL;
4890 }
4891
4892 this_lstnr = __qseecom_find_svc(data->listener.id);
4893 if (this_lstnr == NULL)
4894 return -EINVAL;
4895
4896 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4897 return -EINVAL;
4898
4899 resp.resp_buf_ptr = this_lstnr->sb_virt +
4900 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4901
4902 if (!is_64bit_addr)
4903 __qseecom_update_cmd_buf(&resp, false, data);
4904 else
4905 __qseecom_update_cmd_buf_64(&resp, false, data);
4906 qseecom.send_resp_flag = 1;
4907 this_lstnr->send_resp_flag = 1;
4908 wake_up_interruptible(&qseecom.send_resp_wq);
4909 return 0;
4910}
4911
4912static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4913 void __user *argp)
4914{
4915 return __qseecom_send_modfd_resp(data, argp, false);
4916}
4917
4918static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4919 void __user *argp)
4920{
4921 return __qseecom_send_modfd_resp(data, argp, true);
4922}
4923
4924static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4925 void __user *argp)
4926{
4927 struct qseecom_qseos_version_req req;
4928
4929 if (copy_from_user(&req, argp, sizeof(req))) {
4930 pr_err("copy_from_user failed");
4931 return -EINVAL;
4932 }
4933 req.qseos_version = qseecom.qseos_version;
4934 if (copy_to_user(argp, &req, sizeof(req))) {
4935 pr_err("copy_to_user failed");
4936 return -EINVAL;
4937 }
4938 return 0;
4939}
4940
4941static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4942{
4943 int rc = 0;
4944 struct qseecom_clk *qclk = NULL;
4945
4946 if (qseecom.no_clock_support)
4947 return 0;
4948
4949 if (ce == CLK_QSEE)
4950 qclk = &qseecom.qsee;
4951 if (ce == CLK_CE_DRV)
4952 qclk = &qseecom.ce_drv;
4953
4954 if (qclk == NULL) {
4955 pr_err("CLK type not supported\n");
4956 return -EINVAL;
4957 }
4958 mutex_lock(&clk_access_lock);
4959
4960 if (qclk->clk_access_cnt == ULONG_MAX) {
4961 pr_err("clk_access_cnt beyond limitation\n");
4962 goto err;
4963 }
4964 if (qclk->clk_access_cnt > 0) {
4965 qclk->clk_access_cnt++;
4966 mutex_unlock(&clk_access_lock);
4967 return rc;
4968 }
4969
4970 /* Enable CE core clk */
4971 if (qclk->ce_core_clk != NULL) {
4972 rc = clk_prepare_enable(qclk->ce_core_clk);
4973 if (rc) {
4974 pr_err("Unable to enable/prepare CE core clk\n");
4975 goto err;
4976 }
4977 }
4978 /* Enable CE clk */
4979 if (qclk->ce_clk != NULL) {
4980 rc = clk_prepare_enable(qclk->ce_clk);
4981 if (rc) {
4982 pr_err("Unable to enable/prepare CE iface clk\n");
4983 goto ce_clk_err;
4984 }
4985 }
4986 /* Enable AXI clk */
4987 if (qclk->ce_bus_clk != NULL) {
4988 rc = clk_prepare_enable(qclk->ce_bus_clk);
4989 if (rc) {
4990 pr_err("Unable to enable/prepare CE bus clk\n");
4991 goto ce_bus_clk_err;
4992 }
4993 }
4994 qclk->clk_access_cnt++;
4995 mutex_unlock(&clk_access_lock);
4996 return 0;
4997
4998ce_bus_clk_err:
4999 if (qclk->ce_clk != NULL)
5000 clk_disable_unprepare(qclk->ce_clk);
5001ce_clk_err:
5002 if (qclk->ce_core_clk != NULL)
5003 clk_disable_unprepare(qclk->ce_core_clk);
5004err:
5005 mutex_unlock(&clk_access_lock);
5006 return -EIO;
5007}
5008
5009static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5010{
5011 struct qseecom_clk *qclk;
5012
5013 if (qseecom.no_clock_support)
5014 return;
5015
5016 if (ce == CLK_QSEE)
5017 qclk = &qseecom.qsee;
5018 else
5019 qclk = &qseecom.ce_drv;
5020
5021 mutex_lock(&clk_access_lock);
5022
5023 if (qclk->clk_access_cnt == 0) {
5024 mutex_unlock(&clk_access_lock);
5025 return;
5026 }
5027
5028 if (qclk->clk_access_cnt == 1) {
5029 if (qclk->ce_clk != NULL)
5030 clk_disable_unprepare(qclk->ce_clk);
5031 if (qclk->ce_core_clk != NULL)
5032 clk_disable_unprepare(qclk->ce_core_clk);
5033 if (qclk->ce_bus_clk != NULL)
5034 clk_disable_unprepare(qclk->ce_bus_clk);
5035 }
5036 qclk->clk_access_cnt--;
5037 mutex_unlock(&clk_access_lock);
5038}
5039
5040static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5041 int32_t clk_type)
5042{
5043 int ret = 0;
5044 struct qseecom_clk *qclk;
5045
5046 if (qseecom.no_clock_support)
5047 return 0;
5048
5049 qclk = &qseecom.qsee;
5050 if (!qseecom.qsee_perf_client)
5051 return ret;
5052
5053 switch (clk_type) {
5054 case CLK_DFAB:
5055 mutex_lock(&qsee_bw_mutex);
5056 if (!qseecom.qsee_bw_count) {
5057 if (qseecom.qsee_sfpb_bw_count > 0)
5058 ret = msm_bus_scale_client_update_request(
5059 qseecom.qsee_perf_client, 3);
5060 else {
5061 if (qclk->ce_core_src_clk != NULL)
5062 ret = __qseecom_enable_clk(CLK_QSEE);
5063 if (!ret) {
5064 ret =
5065 msm_bus_scale_client_update_request(
5066 qseecom.qsee_perf_client, 1);
5067 if ((ret) &&
5068 (qclk->ce_core_src_clk != NULL))
5069 __qseecom_disable_clk(CLK_QSEE);
5070 }
5071 }
5072 if (ret)
5073 pr_err("DFAB Bandwidth req failed (%d)\n",
5074 ret);
5075 else {
5076 qseecom.qsee_bw_count++;
5077 data->perf_enabled = true;
5078 }
5079 } else {
5080 qseecom.qsee_bw_count++;
5081 data->perf_enabled = true;
5082 }
5083 mutex_unlock(&qsee_bw_mutex);
5084 break;
5085 case CLK_SFPB:
5086 mutex_lock(&qsee_bw_mutex);
5087 if (!qseecom.qsee_sfpb_bw_count) {
5088 if (qseecom.qsee_bw_count > 0)
5089 ret = msm_bus_scale_client_update_request(
5090 qseecom.qsee_perf_client, 3);
5091 else {
5092 if (qclk->ce_core_src_clk != NULL)
5093 ret = __qseecom_enable_clk(CLK_QSEE);
5094 if (!ret) {
5095 ret =
5096 msm_bus_scale_client_update_request(
5097 qseecom.qsee_perf_client, 2);
5098 if ((ret) &&
5099 (qclk->ce_core_src_clk != NULL))
5100 __qseecom_disable_clk(CLK_QSEE);
5101 }
5102 }
5103
5104 if (ret)
5105 pr_err("SFPB Bandwidth req failed (%d)\n",
5106 ret);
5107 else {
5108 qseecom.qsee_sfpb_bw_count++;
5109 data->fast_load_enabled = true;
5110 }
5111 } else {
5112 qseecom.qsee_sfpb_bw_count++;
5113 data->fast_load_enabled = true;
5114 }
5115 mutex_unlock(&qsee_bw_mutex);
5116 break;
5117 default:
5118 pr_err("Clock type not defined\n");
5119 break;
5120 }
5121 return ret;
5122}
5123
5124static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5125 int32_t clk_type)
5126{
5127 int32_t ret = 0;
5128 struct qseecom_clk *qclk;
5129
5130 qclk = &qseecom.qsee;
5131
5132 if (qseecom.no_clock_support)
5133 return;
5134 if (!qseecom.qsee_perf_client)
5135 return;
5136
5137 switch (clk_type) {
5138 case CLK_DFAB:
5139 mutex_lock(&qsee_bw_mutex);
5140 if (qseecom.qsee_bw_count == 0) {
5141 pr_err("Client error.Extra call to disable DFAB clk\n");
5142 mutex_unlock(&qsee_bw_mutex);
5143 return;
5144 }
5145
5146 if (qseecom.qsee_bw_count == 1) {
5147 if (qseecom.qsee_sfpb_bw_count > 0)
5148 ret = msm_bus_scale_client_update_request(
5149 qseecom.qsee_perf_client, 2);
5150 else {
5151 ret = msm_bus_scale_client_update_request(
5152 qseecom.qsee_perf_client, 0);
5153 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5154 __qseecom_disable_clk(CLK_QSEE);
5155 }
5156 if (ret)
5157 pr_err("SFPB Bandwidth req fail (%d)\n",
5158 ret);
5159 else {
5160 qseecom.qsee_bw_count--;
5161 data->perf_enabled = false;
5162 }
5163 } else {
5164 qseecom.qsee_bw_count--;
5165 data->perf_enabled = false;
5166 }
5167 mutex_unlock(&qsee_bw_mutex);
5168 break;
5169 case CLK_SFPB:
5170 mutex_lock(&qsee_bw_mutex);
5171 if (qseecom.qsee_sfpb_bw_count == 0) {
5172 pr_err("Client error.Extra call to disable SFPB clk\n");
5173 mutex_unlock(&qsee_bw_mutex);
5174 return;
5175 }
5176 if (qseecom.qsee_sfpb_bw_count == 1) {
5177 if (qseecom.qsee_bw_count > 0)
5178 ret = msm_bus_scale_client_update_request(
5179 qseecom.qsee_perf_client, 1);
5180 else {
5181 ret = msm_bus_scale_client_update_request(
5182 qseecom.qsee_perf_client, 0);
5183 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5184 __qseecom_disable_clk(CLK_QSEE);
5185 }
5186 if (ret)
5187 pr_err("SFPB Bandwidth req fail (%d)\n",
5188 ret);
5189 else {
5190 qseecom.qsee_sfpb_bw_count--;
5191 data->fast_load_enabled = false;
5192 }
5193 } else {
5194 qseecom.qsee_sfpb_bw_count--;
5195 data->fast_load_enabled = false;
5196 }
5197 mutex_unlock(&qsee_bw_mutex);
5198 break;
5199 default:
5200 pr_err("Clock type not defined\n");
5201 break;
5202 }
5203
5204}
5205
5206static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5207 void __user *argp)
5208{
5209 struct ion_handle *ihandle; /* Ion handle */
5210 struct qseecom_load_img_req load_img_req;
5211 int uret = 0;
5212 int ret;
5213 ion_phys_addr_t pa = 0;
5214 size_t len;
5215 struct qseecom_load_app_ireq load_req;
5216 struct qseecom_load_app_64bit_ireq load_req_64bit;
5217 struct qseecom_command_scm_resp resp;
5218 void *cmd_buf = NULL;
5219 size_t cmd_len;
5220 /* Copy the relevant information needed for loading the image */
5221 if (copy_from_user(&load_img_req,
5222 (void __user *)argp,
5223 sizeof(struct qseecom_load_img_req))) {
5224 pr_err("copy_from_user failed\n");
5225 return -EFAULT;
5226 }
5227
5228 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005229 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005230 load_img_req.ifd_data_fd);
5231 if (IS_ERR_OR_NULL(ihandle)) {
5232 pr_err("Ion client could not retrieve the handle\n");
5233 return -ENOMEM;
5234 }
5235
5236 /* Get the physical address of the ION BUF */
5237 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5238 if (ret) {
5239 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5240 ret);
5241 return ret;
5242 }
5243 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5244 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5245 len, load_img_req.mdt_len,
5246 load_img_req.img_len);
5247 return ret;
5248 }
5249 /* Populate the structure for sending scm call to load image */
5250 if (qseecom.qsee_version < QSEE_VERSION_40) {
5251 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5252 load_req.mdt_len = load_img_req.mdt_len;
5253 load_req.img_len = load_img_req.img_len;
5254 load_req.phy_addr = (uint32_t)pa;
5255 cmd_buf = (void *)&load_req;
5256 cmd_len = sizeof(struct qseecom_load_app_ireq);
5257 } else {
5258 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5259 load_req_64bit.mdt_len = load_img_req.mdt_len;
5260 load_req_64bit.img_len = load_img_req.img_len;
5261 load_req_64bit.phy_addr = (uint64_t)pa;
5262 cmd_buf = (void *)&load_req_64bit;
5263 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5264 }
5265
5266 if (qseecom.support_bus_scaling) {
5267 mutex_lock(&qsee_bw_mutex);
5268 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5269 mutex_unlock(&qsee_bw_mutex);
5270 if (ret) {
5271 ret = -EIO;
5272 goto exit_cpu_restore;
5273 }
5274 }
5275
5276 /* Vote for the SFPB clock */
5277 ret = __qseecom_enable_clk_scale_up(data);
5278 if (ret) {
5279 ret = -EIO;
5280 goto exit_register_bus_bandwidth_needs;
5281 }
5282 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5283 ION_IOC_CLEAN_INV_CACHES);
5284 if (ret) {
5285 pr_err("cache operation failed %d\n", ret);
5286 goto exit_disable_clock;
5287 }
5288 /* SCM_CALL to load the external elf */
5289 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5290 &resp, sizeof(resp));
5291 if (ret) {
5292 pr_err("scm_call to load failed : ret %d\n",
5293 ret);
5294 ret = -EFAULT;
5295 goto exit_disable_clock;
5296 }
5297
5298 switch (resp.result) {
5299 case QSEOS_RESULT_SUCCESS:
5300 break;
5301 case QSEOS_RESULT_INCOMPLETE:
5302 pr_err("%s: qseos result incomplete\n", __func__);
5303 ret = __qseecom_process_incomplete_cmd(data, &resp);
5304 if (ret)
5305 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5306 break;
5307 case QSEOS_RESULT_FAILURE:
5308 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5309 ret = -EFAULT;
5310 break;
5311 default:
5312 pr_err("scm_call response result %d not supported\n",
5313 resp.result);
5314 ret = -EFAULT;
5315 break;
5316 }
5317
5318exit_disable_clock:
5319 __qseecom_disable_clk_scale_down(data);
5320
5321exit_register_bus_bandwidth_needs:
5322 if (qseecom.support_bus_scaling) {
5323 mutex_lock(&qsee_bw_mutex);
5324 uret = qseecom_unregister_bus_bandwidth_needs(data);
5325 mutex_unlock(&qsee_bw_mutex);
5326 if (uret)
5327 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5328 uret, ret);
5329 }
5330
5331exit_cpu_restore:
5332 /* Deallocate the handle */
5333 if (!IS_ERR_OR_NULL(ihandle))
5334 ion_free(qseecom.ion_clnt, ihandle);
5335 return ret;
5336}
5337
5338static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5339{
5340 int ret = 0;
5341 struct qseecom_command_scm_resp resp;
5342 struct qseecom_unload_app_ireq req;
5343
5344 /* unavailable client app */
5345 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5346
5347 /* Populate the structure for sending scm call to unload image */
5348 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5349
5350 /* SCM_CALL to unload the external elf */
5351 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5352 sizeof(struct qseecom_unload_app_ireq),
5353 &resp, sizeof(resp));
5354 if (ret) {
5355 pr_err("scm_call to unload failed : ret %d\n",
5356 ret);
5357 ret = -EFAULT;
5358 goto qseecom_unload_external_elf_scm_err;
5359 }
5360 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5361 ret = __qseecom_process_incomplete_cmd(data, &resp);
5362 if (ret)
5363 pr_err("process_incomplete_cmd fail err: %d\n",
5364 ret);
5365 } else {
5366 if (resp.result != QSEOS_RESULT_SUCCESS) {
5367 pr_err("scm_call to unload image failed resp.result =%d\n",
5368 resp.result);
5369 ret = -EFAULT;
5370 }
5371 }
5372
5373qseecom_unload_external_elf_scm_err:
5374
5375 return ret;
5376}
5377
5378static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5379 void __user *argp)
5380{
5381
5382 int32_t ret;
5383 struct qseecom_qseos_app_load_query query_req;
5384 struct qseecom_check_app_ireq req;
5385 struct qseecom_registered_app_list *entry = NULL;
5386 unsigned long flags = 0;
5387 uint32_t app_arch = 0, app_id = 0;
5388 bool found_app = false;
5389
5390 /* Copy the relevant information needed for loading the image */
5391 if (copy_from_user(&query_req,
5392 (void __user *)argp,
5393 sizeof(struct qseecom_qseos_app_load_query))) {
5394 pr_err("copy_from_user failed\n");
5395 return -EFAULT;
5396 }
5397
5398 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5399 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5400 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5401
5402 ret = __qseecom_check_app_exists(req, &app_id);
5403 if (ret) {
5404 pr_err(" scm call to check if app is loaded failed");
5405 return ret; /* scm call failed */
5406 }
5407 if (app_id) {
5408 pr_debug("App id %d (%s) already exists\n", app_id,
5409 (char *)(req.app_name));
5410 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5411 list_for_each_entry(entry,
5412 &qseecom.registered_app_list_head, list){
5413 if (entry->app_id == app_id) {
5414 app_arch = entry->app_arch;
5415 entry->ref_cnt++;
5416 found_app = true;
5417 break;
5418 }
5419 }
5420 spin_unlock_irqrestore(
5421 &qseecom.registered_app_list_lock, flags);
5422 data->client.app_id = app_id;
5423 query_req.app_id = app_id;
5424 if (app_arch) {
5425 data->client.app_arch = app_arch;
5426 query_req.app_arch = app_arch;
5427 } else {
5428 data->client.app_arch = 0;
5429 query_req.app_arch = 0;
5430 }
5431 strlcpy(data->client.app_name, query_req.app_name,
5432 MAX_APP_NAME_SIZE);
5433 /*
5434 * If app was loaded by appsbl before and was not registered,
5435 * regiser this app now.
5436 */
5437 if (!found_app) {
5438 pr_debug("Register app %d [%s] which was loaded before\n",
5439 ret, (char *)query_req.app_name);
5440 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5441 if (!entry) {
5442 pr_err("kmalloc for app entry failed\n");
5443 return -ENOMEM;
5444 }
5445 entry->app_id = app_id;
5446 entry->ref_cnt = 1;
5447 entry->app_arch = data->client.app_arch;
5448 strlcpy(entry->app_name, data->client.app_name,
5449 MAX_APP_NAME_SIZE);
5450 entry->app_blocked = false;
5451 entry->blocked_on_listener_id = 0;
5452 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5453 flags);
5454 list_add_tail(&entry->list,
5455 &qseecom.registered_app_list_head);
5456 spin_unlock_irqrestore(
5457 &qseecom.registered_app_list_lock, flags);
5458 }
5459 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5460 pr_err("copy_to_user failed\n");
5461 return -EFAULT;
5462 }
5463 return -EEXIST; /* app already loaded */
5464 } else {
5465 return 0; /* app not loaded */
5466 }
5467}
5468
5469static int __qseecom_get_ce_pipe_info(
5470 enum qseecom_key_management_usage_type usage,
5471 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5472{
5473 int ret = -EINVAL;
5474 int i, j;
5475 struct qseecom_ce_info_use *p = NULL;
5476 int total = 0;
5477 struct qseecom_ce_pipe_entry *pcepipe;
5478
5479 switch (usage) {
5480 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5481 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5482 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5483 if (qseecom.support_fde) {
5484 p = qseecom.ce_info.fde;
5485 total = qseecom.ce_info.num_fde;
5486 } else {
5487 pr_err("system does not support fde\n");
5488 return -EINVAL;
5489 }
5490 break;
5491 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5492 if (qseecom.support_pfe) {
5493 p = qseecom.ce_info.pfe;
5494 total = qseecom.ce_info.num_pfe;
5495 } else {
5496 pr_err("system does not support pfe\n");
5497 return -EINVAL;
5498 }
5499 break;
5500 default:
5501 pr_err("unsupported usage %d\n", usage);
5502 return -EINVAL;
5503 }
5504
5505 for (j = 0; j < total; j++) {
5506 if (p->unit_num == unit) {
5507 pcepipe = p->ce_pipe_entry;
5508 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5509 (*ce_hw)[i] = pcepipe->ce_num;
5510 *pipe = pcepipe->ce_pipe_pair;
5511 pcepipe++;
5512 }
5513 ret = 0;
5514 break;
5515 }
5516 p++;
5517 }
5518 return ret;
5519}
5520
5521static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5522 enum qseecom_key_management_usage_type usage,
5523 struct qseecom_key_generate_ireq *ireq)
5524{
5525 struct qseecom_command_scm_resp resp;
5526 int ret;
5527
5528 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5529 usage >= QSEOS_KM_USAGE_MAX) {
5530 pr_err("Error:: unsupported usage %d\n", usage);
5531 return -EFAULT;
5532 }
5533 ret = __qseecom_enable_clk(CLK_QSEE);
5534 if (ret)
5535 return ret;
5536
5537 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5538 ireq, sizeof(struct qseecom_key_generate_ireq),
5539 &resp, sizeof(resp));
5540 if (ret) {
5541 if (ret == -EINVAL &&
5542 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5543 pr_debug("Key ID exists.\n");
5544 ret = 0;
5545 } else {
5546 pr_err("scm call to generate key failed : %d\n", ret);
5547 ret = -EFAULT;
5548 }
5549 goto generate_key_exit;
5550 }
5551
5552 switch (resp.result) {
5553 case QSEOS_RESULT_SUCCESS:
5554 break;
5555 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5556 pr_debug("Key ID exists.\n");
5557 break;
5558 case QSEOS_RESULT_INCOMPLETE:
5559 ret = __qseecom_process_incomplete_cmd(data, &resp);
5560 if (ret) {
5561 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5562 pr_debug("Key ID exists.\n");
5563 ret = 0;
5564 } else {
5565 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5566 resp.result);
5567 }
5568 }
5569 break;
5570 case QSEOS_RESULT_FAILURE:
5571 default:
5572 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5573 ret = -EINVAL;
5574 break;
5575 }
5576generate_key_exit:
5577 __qseecom_disable_clk(CLK_QSEE);
5578 return ret;
5579}
5580
5581static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5582 enum qseecom_key_management_usage_type usage,
5583 struct qseecom_key_delete_ireq *ireq)
5584{
5585 struct qseecom_command_scm_resp resp;
5586 int ret;
5587
5588 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5589 usage >= QSEOS_KM_USAGE_MAX) {
5590 pr_err("Error:: unsupported usage %d\n", usage);
5591 return -EFAULT;
5592 }
5593 ret = __qseecom_enable_clk(CLK_QSEE);
5594 if (ret)
5595 return ret;
5596
5597 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5598 ireq, sizeof(struct qseecom_key_delete_ireq),
5599 &resp, sizeof(struct qseecom_command_scm_resp));
5600 if (ret) {
5601 if (ret == -EINVAL &&
5602 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5603 pr_debug("Max attempts to input password reached.\n");
5604 ret = -ERANGE;
5605 } else {
5606 pr_err("scm call to delete key failed : %d\n", ret);
5607 ret = -EFAULT;
5608 }
5609 goto del_key_exit;
5610 }
5611
5612 switch (resp.result) {
5613 case QSEOS_RESULT_SUCCESS:
5614 break;
5615 case QSEOS_RESULT_INCOMPLETE:
5616 ret = __qseecom_process_incomplete_cmd(data, &resp);
5617 if (ret) {
5618 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5619 resp.result);
5620 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5621 pr_debug("Max attempts to input password reached.\n");
5622 ret = -ERANGE;
5623 }
5624 }
5625 break;
5626 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5627 pr_debug("Max attempts to input password reached.\n");
5628 ret = -ERANGE;
5629 break;
5630 case QSEOS_RESULT_FAILURE:
5631 default:
5632 pr_err("Delete key scm call failed resp.result %d\n",
5633 resp.result);
5634 ret = -EINVAL;
5635 break;
5636 }
5637del_key_exit:
5638 __qseecom_disable_clk(CLK_QSEE);
5639 return ret;
5640}
5641
5642static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5643 enum qseecom_key_management_usage_type usage,
5644 struct qseecom_key_select_ireq *ireq)
5645{
5646 struct qseecom_command_scm_resp resp;
5647 int ret;
5648
5649 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5650 usage >= QSEOS_KM_USAGE_MAX) {
5651 pr_err("Error:: unsupported usage %d\n", usage);
5652 return -EFAULT;
5653 }
5654 ret = __qseecom_enable_clk(CLK_QSEE);
5655 if (ret)
5656 return ret;
5657
5658 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5659 ret = __qseecom_enable_clk(CLK_CE_DRV);
5660 if (ret)
5661 return ret;
5662 }
5663
5664 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5665 ireq, sizeof(struct qseecom_key_select_ireq),
5666 &resp, sizeof(struct qseecom_command_scm_resp));
5667 if (ret) {
5668 if (ret == -EINVAL &&
5669 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5670 pr_debug("Max attempts to input password reached.\n");
5671 ret = -ERANGE;
5672 } else if (ret == -EINVAL &&
5673 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5674 pr_debug("Set Key operation under processing...\n");
5675 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5676 } else {
5677 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5678 ret);
5679 ret = -EFAULT;
5680 }
5681 goto set_key_exit;
5682 }
5683
5684 switch (resp.result) {
5685 case QSEOS_RESULT_SUCCESS:
5686 break;
5687 case QSEOS_RESULT_INCOMPLETE:
5688 ret = __qseecom_process_incomplete_cmd(data, &resp);
5689 if (ret) {
5690 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5691 resp.result);
5692 if (resp.result ==
5693 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5694 pr_debug("Set Key operation under processing...\n");
5695 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5696 }
5697 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5698 pr_debug("Max attempts to input password reached.\n");
5699 ret = -ERANGE;
5700 }
5701 }
5702 break;
5703 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5704 pr_debug("Max attempts to input password reached.\n");
5705 ret = -ERANGE;
5706 break;
5707 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5708 pr_debug("Set Key operation under processing...\n");
5709 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5710 break;
5711 case QSEOS_RESULT_FAILURE:
5712 default:
5713 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5714 ret = -EINVAL;
5715 break;
5716 }
5717set_key_exit:
5718 __qseecom_disable_clk(CLK_QSEE);
5719 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5720 __qseecom_disable_clk(CLK_CE_DRV);
5721 return ret;
5722}
5723
5724static int __qseecom_update_current_key_user_info(
5725 struct qseecom_dev_handle *data,
5726 enum qseecom_key_management_usage_type usage,
5727 struct qseecom_key_userinfo_update_ireq *ireq)
5728{
5729 struct qseecom_command_scm_resp resp;
5730 int ret;
5731
5732 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5733 usage >= QSEOS_KM_USAGE_MAX) {
5734 pr_err("Error:: unsupported usage %d\n", usage);
5735 return -EFAULT;
5736 }
5737 ret = __qseecom_enable_clk(CLK_QSEE);
5738 if (ret)
5739 return ret;
5740
5741 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5742 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5743 &resp, sizeof(struct qseecom_command_scm_resp));
5744 if (ret) {
5745 if (ret == -EINVAL &&
5746 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5747 pr_debug("Set Key operation under processing...\n");
5748 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5749 } else {
5750 pr_err("scm call to update key userinfo failed: %d\n",
5751 ret);
5752 __qseecom_disable_clk(CLK_QSEE);
5753 return -EFAULT;
5754 }
5755 }
5756
5757 switch (resp.result) {
5758 case QSEOS_RESULT_SUCCESS:
5759 break;
5760 case QSEOS_RESULT_INCOMPLETE:
5761 ret = __qseecom_process_incomplete_cmd(data, &resp);
5762 if (resp.result ==
5763 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5764 pr_debug("Set Key operation under processing...\n");
5765 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5766 }
5767 if (ret)
5768 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5769 resp.result);
5770 break;
5771 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5772 pr_debug("Update Key operation under processing...\n");
5773 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5774 break;
5775 case QSEOS_RESULT_FAILURE:
5776 default:
5777 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5778 ret = -EINVAL;
5779 break;
5780 }
5781
5782 __qseecom_disable_clk(CLK_QSEE);
5783 return ret;
5784}
5785
5786
5787static int qseecom_enable_ice_setup(int usage)
5788{
5789 int ret = 0;
5790
5791 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5792 ret = qcom_ice_setup_ice_hw("ufs", true);
5793 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5794 ret = qcom_ice_setup_ice_hw("sdcc", true);
5795
5796 return ret;
5797}
5798
5799static int qseecom_disable_ice_setup(int usage)
5800{
5801 int ret = 0;
5802
5803 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5804 ret = qcom_ice_setup_ice_hw("ufs", false);
5805 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5806 ret = qcom_ice_setup_ice_hw("sdcc", false);
5807
5808 return ret;
5809}
5810
5811static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5812{
5813 struct qseecom_ce_info_use *pce_info_use, *p;
5814 int total = 0;
5815 int i;
5816
5817 switch (usage) {
5818 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5819 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5820 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5821 p = qseecom.ce_info.fde;
5822 total = qseecom.ce_info.num_fde;
5823 break;
5824 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5825 p = qseecom.ce_info.pfe;
5826 total = qseecom.ce_info.num_pfe;
5827 break;
5828 default:
5829 pr_err("unsupported usage %d\n", usage);
5830 return -EINVAL;
5831 }
5832
5833 pce_info_use = NULL;
5834
5835 for (i = 0; i < total; i++) {
5836 if (p->unit_num == unit) {
5837 pce_info_use = p;
5838 break;
5839 }
5840 p++;
5841 }
5842 if (!pce_info_use) {
5843 pr_err("can not find %d\n", unit);
5844 return -EINVAL;
5845 }
5846 return pce_info_use->num_ce_pipe_entries;
5847}
5848
5849static int qseecom_create_key(struct qseecom_dev_handle *data,
5850 void __user *argp)
5851{
5852 int i;
5853 uint32_t *ce_hw = NULL;
5854 uint32_t pipe = 0;
5855 int ret = 0;
5856 uint32_t flags = 0;
5857 struct qseecom_create_key_req create_key_req;
5858 struct qseecom_key_generate_ireq generate_key_ireq;
5859 struct qseecom_key_select_ireq set_key_ireq;
5860 uint32_t entries = 0;
5861
5862 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5863 if (ret) {
5864 pr_err("copy_from_user failed\n");
5865 return ret;
5866 }
5867
5868 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5869 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5870 pr_err("unsupported usage %d\n", create_key_req.usage);
5871 ret = -EFAULT;
5872 return ret;
5873 }
5874 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5875 create_key_req.usage);
5876 if (entries <= 0) {
5877 pr_err("no ce instance for usage %d instance %d\n",
5878 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5879 ret = -EINVAL;
5880 return ret;
5881 }
5882
5883 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5884 if (!ce_hw) {
5885 ret = -ENOMEM;
5886 return ret;
5887 }
5888 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5889 DEFAULT_CE_INFO_UNIT);
5890 if (ret) {
5891 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5892 ret = -EINVAL;
5893 goto free_buf;
5894 }
5895
5896 if (qseecom.fde_key_size)
5897 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5898 else
5899 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5900
5901 generate_key_ireq.flags = flags;
5902 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5903 memset((void *)generate_key_ireq.key_id,
5904 0, QSEECOM_KEY_ID_SIZE);
5905 memset((void *)generate_key_ireq.hash32,
5906 0, QSEECOM_HASH_SIZE);
5907 memcpy((void *)generate_key_ireq.key_id,
5908 (void *)key_id_array[create_key_req.usage].desc,
5909 QSEECOM_KEY_ID_SIZE);
5910 memcpy((void *)generate_key_ireq.hash32,
5911 (void *)create_key_req.hash32,
5912 QSEECOM_HASH_SIZE);
5913
5914 ret = __qseecom_generate_and_save_key(data,
5915 create_key_req.usage, &generate_key_ireq);
5916 if (ret) {
5917 pr_err("Failed to generate key on storage: %d\n", ret);
5918 goto free_buf;
5919 }
5920
5921 for (i = 0; i < entries; i++) {
5922 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5923 if (create_key_req.usage ==
5924 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5925 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5926 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5927
5928 } else if (create_key_req.usage ==
5929 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5930 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5931 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5932
5933 } else {
5934 set_key_ireq.ce = ce_hw[i];
5935 set_key_ireq.pipe = pipe;
5936 }
5937 set_key_ireq.flags = flags;
5938
5939 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5940 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5941 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5942 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5943 memcpy((void *)set_key_ireq.key_id,
5944 (void *)key_id_array[create_key_req.usage].desc,
5945 QSEECOM_KEY_ID_SIZE);
5946 memcpy((void *)set_key_ireq.hash32,
5947 (void *)create_key_req.hash32,
5948 QSEECOM_HASH_SIZE);
5949 /*
5950 * It will return false if it is GPCE based crypto instance or
5951 * ICE is setup properly
5952 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005953 ret = qseecom_enable_ice_setup(create_key_req.usage);
5954 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005955 goto free_buf;
5956
5957 do {
5958 ret = __qseecom_set_clear_ce_key(data,
5959 create_key_req.usage,
5960 &set_key_ireq);
5961 /*
5962 * wait a little before calling scm again to let other
5963 * processes run
5964 */
5965 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5966 msleep(50);
5967
5968 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5969
5970 qseecom_disable_ice_setup(create_key_req.usage);
5971
5972 if (ret) {
5973 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5974 pipe, ce_hw[i], ret);
5975 goto free_buf;
5976 } else {
5977 pr_err("Set the key successfully\n");
5978 if ((create_key_req.usage ==
5979 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5980 (create_key_req.usage ==
5981 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5982 goto free_buf;
5983 }
5984 }
5985
5986free_buf:
5987 kzfree(ce_hw);
5988 return ret;
5989}
5990
5991static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5992 void __user *argp)
5993{
5994 uint32_t *ce_hw = NULL;
5995 uint32_t pipe = 0;
5996 int ret = 0;
5997 uint32_t flags = 0;
5998 int i, j;
5999 struct qseecom_wipe_key_req wipe_key_req;
6000 struct qseecom_key_delete_ireq delete_key_ireq;
6001 struct qseecom_key_select_ireq clear_key_ireq;
6002 uint32_t entries = 0;
6003
6004 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6005 if (ret) {
6006 pr_err("copy_from_user failed\n");
6007 return ret;
6008 }
6009
6010 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6011 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6012 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6013 ret = -EFAULT;
6014 return ret;
6015 }
6016
6017 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6018 wipe_key_req.usage);
6019 if (entries <= 0) {
6020 pr_err("no ce instance for usage %d instance %d\n",
6021 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6022 ret = -EINVAL;
6023 return ret;
6024 }
6025
6026 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6027 if (!ce_hw) {
6028 ret = -ENOMEM;
6029 return ret;
6030 }
6031
6032 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6033 DEFAULT_CE_INFO_UNIT);
6034 if (ret) {
6035 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6036 ret = -EINVAL;
6037 goto free_buf;
6038 }
6039
6040 if (wipe_key_req.wipe_key_flag) {
6041 delete_key_ireq.flags = flags;
6042 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6043 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6044 memcpy((void *)delete_key_ireq.key_id,
6045 (void *)key_id_array[wipe_key_req.usage].desc,
6046 QSEECOM_KEY_ID_SIZE);
6047 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6048
6049 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6050 &delete_key_ireq);
6051 if (ret) {
6052 pr_err("Failed to delete key from ssd storage: %d\n",
6053 ret);
6054 ret = -EFAULT;
6055 goto free_buf;
6056 }
6057 }
6058
6059 for (j = 0; j < entries; j++) {
6060 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6061 if (wipe_key_req.usage ==
6062 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6063 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6064 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6065 } else if (wipe_key_req.usage ==
6066 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6067 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6068 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6069 } else {
6070 clear_key_ireq.ce = ce_hw[j];
6071 clear_key_ireq.pipe = pipe;
6072 }
6073 clear_key_ireq.flags = flags;
6074 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6075 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6076 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6077 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6078
6079 /*
6080 * It will return false if it is GPCE based crypto instance or
6081 * ICE is setup properly
6082 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006083 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6084 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006085 goto free_buf;
6086
6087 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6088 &clear_key_ireq);
6089
6090 qseecom_disable_ice_setup(wipe_key_req.usage);
6091
6092 if (ret) {
6093 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6094 pipe, ce_hw[j], ret);
6095 ret = -EFAULT;
6096 goto free_buf;
6097 }
6098 }
6099
6100free_buf:
6101 kzfree(ce_hw);
6102 return ret;
6103}
6104
6105static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6106 void __user *argp)
6107{
6108 int ret = 0;
6109 uint32_t flags = 0;
6110 struct qseecom_update_key_userinfo_req update_key_req;
6111 struct qseecom_key_userinfo_update_ireq ireq;
6112
6113 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6114 if (ret) {
6115 pr_err("copy_from_user failed\n");
6116 return ret;
6117 }
6118
6119 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6120 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6121 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6122 return -EFAULT;
6123 }
6124
6125 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6126
6127 if (qseecom.fde_key_size)
6128 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6129 else
6130 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6131
6132 ireq.flags = flags;
6133 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6134 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6135 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6136 memcpy((void *)ireq.key_id,
6137 (void *)key_id_array[update_key_req.usage].desc,
6138 QSEECOM_KEY_ID_SIZE);
6139 memcpy((void *)ireq.current_hash32,
6140 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6141 memcpy((void *)ireq.new_hash32,
6142 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6143
6144 do {
6145 ret = __qseecom_update_current_key_user_info(data,
6146 update_key_req.usage,
6147 &ireq);
6148 /*
6149 * wait a little before calling scm again to let other
6150 * processes run
6151 */
6152 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6153 msleep(50);
6154
6155 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6156 if (ret) {
6157 pr_err("Failed to update key info: %d\n", ret);
6158 return ret;
6159 }
6160 return ret;
6161
6162}
6163static int qseecom_is_es_activated(void __user *argp)
6164{
6165 struct qseecom_is_es_activated_req req;
6166 struct qseecom_command_scm_resp resp;
6167 int ret;
6168
6169 if (qseecom.qsee_version < QSEE_VERSION_04) {
6170 pr_err("invalid qsee version\n");
6171 return -ENODEV;
6172 }
6173
6174 if (argp == NULL) {
6175 pr_err("arg is null\n");
6176 return -EINVAL;
6177 }
6178
6179 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6180 &req, sizeof(req), &resp, sizeof(resp));
6181 if (ret) {
6182 pr_err("scm_call failed\n");
6183 return ret;
6184 }
6185
6186 req.is_activated = resp.result;
6187 ret = copy_to_user(argp, &req, sizeof(req));
6188 if (ret) {
6189 pr_err("copy_to_user failed\n");
6190 return ret;
6191 }
6192
6193 return 0;
6194}
6195
6196static int qseecom_save_partition_hash(void __user *argp)
6197{
6198 struct qseecom_save_partition_hash_req req;
6199 struct qseecom_command_scm_resp resp;
6200 int ret;
6201
6202 memset(&resp, 0x00, sizeof(resp));
6203
6204 if (qseecom.qsee_version < QSEE_VERSION_04) {
6205 pr_err("invalid qsee version\n");
6206 return -ENODEV;
6207 }
6208
6209 if (argp == NULL) {
6210 pr_err("arg is null\n");
6211 return -EINVAL;
6212 }
6213
6214 ret = copy_from_user(&req, argp, sizeof(req));
6215 if (ret) {
6216 pr_err("copy_from_user failed\n");
6217 return ret;
6218 }
6219
6220 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6221 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6222 if (ret) {
6223 pr_err("qseecom_scm_call failed\n");
6224 return ret;
6225 }
6226
6227 return 0;
6228}
6229
6230static int qseecom_mdtp_cipher_dip(void __user *argp)
6231{
6232 struct qseecom_mdtp_cipher_dip_req req;
6233 u32 tzbuflenin, tzbuflenout;
6234 char *tzbufin = NULL, *tzbufout = NULL;
6235 struct scm_desc desc = {0};
6236 int ret;
6237
6238 do {
6239 /* Copy the parameters from userspace */
6240 if (argp == NULL) {
6241 pr_err("arg is null\n");
6242 ret = -EINVAL;
6243 break;
6244 }
6245
6246 ret = copy_from_user(&req, argp, sizeof(req));
6247 if (ret) {
6248 pr_err("copy_from_user failed, ret= %d\n", ret);
6249 break;
6250 }
6251
6252 if (req.in_buf == NULL || req.out_buf == NULL ||
6253 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6254 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6255 req.direction > 1) {
6256 pr_err("invalid parameters\n");
6257 ret = -EINVAL;
6258 break;
6259 }
6260
6261 /* Copy the input buffer from userspace to kernel space */
6262 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6263 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6264 if (!tzbufin) {
6265 pr_err("error allocating in buffer\n");
6266 ret = -ENOMEM;
6267 break;
6268 }
6269
6270 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6271 if (ret) {
6272 pr_err("copy_from_user failed, ret=%d\n", ret);
6273 break;
6274 }
6275
6276 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6277
6278 /* Prepare the output buffer in kernel space */
6279 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6280 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6281 if (!tzbufout) {
6282 pr_err("error allocating out buffer\n");
6283 ret = -ENOMEM;
6284 break;
6285 }
6286
6287 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6288
6289 /* Send the command to TZ */
6290 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6291 desc.args[0] = virt_to_phys(tzbufin);
6292 desc.args[1] = req.in_buf_size;
6293 desc.args[2] = virt_to_phys(tzbufout);
6294 desc.args[3] = req.out_buf_size;
6295 desc.args[4] = req.direction;
6296
6297 ret = __qseecom_enable_clk(CLK_QSEE);
6298 if (ret)
6299 break;
6300
6301 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6302
6303 __qseecom_disable_clk(CLK_QSEE);
6304
6305 if (ret) {
6306 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6307 ret);
6308 break;
6309 }
6310
6311 /* Copy the output buffer from kernel space to userspace */
6312 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6313 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6314 if (ret) {
6315 pr_err("copy_to_user failed, ret=%d\n", ret);
6316 break;
6317 }
6318 } while (0);
6319
6320 kzfree(tzbufin);
6321 kzfree(tzbufout);
6322
6323 return ret;
6324}
6325
6326static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6327 struct qseecom_qteec_req *req)
6328{
6329 if (!data || !data->client.ihandle) {
6330 pr_err("Client or client handle is not initialized\n");
6331 return -EINVAL;
6332 }
6333
6334 if (data->type != QSEECOM_CLIENT_APP)
6335 return -EFAULT;
6336
6337 if (req->req_len > UINT_MAX - req->resp_len) {
6338 pr_err("Integer overflow detected in req_len & rsp_len\n");
6339 return -EINVAL;
6340 }
6341
6342 if (req->req_len + req->resp_len > data->client.sb_length) {
6343 pr_debug("Not enough memory to fit cmd_buf.\n");
6344 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6345 (req->req_len + req->resp_len), data->client.sb_length);
6346 return -ENOMEM;
6347 }
6348
6349 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6350 pr_err("cmd buffer or response buffer is null\n");
6351 return -EINVAL;
6352 }
6353 if (((uintptr_t)req->req_ptr <
6354 data->client.user_virt_sb_base) ||
6355 ((uintptr_t)req->req_ptr >=
6356 (data->client.user_virt_sb_base + data->client.sb_length))) {
6357 pr_err("cmd buffer address not within shared bufffer\n");
6358 return -EINVAL;
6359 }
6360
6361 if (((uintptr_t)req->resp_ptr <
6362 data->client.user_virt_sb_base) ||
6363 ((uintptr_t)req->resp_ptr >=
6364 (data->client.user_virt_sb_base + data->client.sb_length))) {
6365 pr_err("response buffer address not within shared bufffer\n");
6366 return -EINVAL;
6367 }
6368
6369 if ((req->req_len == 0) || (req->resp_len == 0)) {
6370 pr_err("cmd buf lengtgh/response buf length not valid\n");
6371 return -EINVAL;
6372 }
6373
6374 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6375 pr_err("Integer overflow in req_len & req_ptr\n");
6376 return -EINVAL;
6377 }
6378
6379 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6380 pr_err("Integer overflow in resp_len & resp_ptr\n");
6381 return -EINVAL;
6382 }
6383
6384 if (data->client.user_virt_sb_base >
6385 (ULONG_MAX - data->client.sb_length)) {
6386 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6387 return -EINVAL;
6388 }
6389 if ((((uintptr_t)req->req_ptr + req->req_len) >
6390 ((uintptr_t)data->client.user_virt_sb_base +
6391 data->client.sb_length)) ||
6392 (((uintptr_t)req->resp_ptr + req->resp_len) >
6393 ((uintptr_t)data->client.user_virt_sb_base +
6394 data->client.sb_length))) {
6395 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6396 return -EINVAL;
6397 }
6398 return 0;
6399}
6400
6401static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6402 uint32_t fd_idx, struct sg_table *sg_ptr)
6403{
6404 struct scatterlist *sg = sg_ptr->sgl;
6405 struct qseecom_sg_entry *sg_entry;
6406 void *buf;
6407 uint i;
6408 size_t size;
6409 dma_addr_t coh_pmem;
6410
6411 if (fd_idx >= MAX_ION_FD) {
6412 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6413 return -ENOMEM;
6414 }
6415 /*
6416 * Allocate a buffer, populate it with number of entry plus
6417 * each sg entry's phy addr and length; then return the
6418 * phy_addr of the buffer.
6419 */
6420 size = sizeof(uint32_t) +
6421 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6422 size = (size + PAGE_SIZE) & PAGE_MASK;
6423 buf = dma_alloc_coherent(qseecom.pdev,
6424 size, &coh_pmem, GFP_KERNEL);
6425 if (buf == NULL) {
6426 pr_err("failed to alloc memory for sg buf\n");
6427 return -ENOMEM;
6428 }
6429 *(uint32_t *)buf = sg_ptr->nents;
6430 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6431 for (i = 0; i < sg_ptr->nents; i++) {
6432 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6433 sg_entry->len = sg->length;
6434 sg_entry++;
6435 sg = sg_next(sg);
6436 }
6437 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6438 data->client.sec_buf_fd[fd_idx].vbase = buf;
6439 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6440 data->client.sec_buf_fd[fd_idx].size = size;
6441 return 0;
6442}
6443
6444static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6445 struct qseecom_dev_handle *data, bool cleanup)
6446{
6447 struct ion_handle *ihandle;
6448 int ret = 0;
6449 int i = 0;
6450 uint32_t *update;
6451 struct sg_table *sg_ptr = NULL;
6452 struct scatterlist *sg;
6453 struct qseecom_param_memref *memref;
6454
6455 if (req == NULL) {
6456 pr_err("Invalid address\n");
6457 return -EINVAL;
6458 }
6459 for (i = 0; i < MAX_ION_FD; i++) {
6460 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006461 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006462 req->ifd_data[i].fd);
6463 if (IS_ERR_OR_NULL(ihandle)) {
6464 pr_err("Ion client can't retrieve the handle\n");
6465 return -ENOMEM;
6466 }
6467 if ((req->req_len < sizeof(uint32_t)) ||
6468 (req->ifd_data[i].cmd_buf_offset >
6469 req->req_len - sizeof(uint32_t))) {
6470 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6471 req->req_len,
6472 req->ifd_data[i].cmd_buf_offset);
6473 return -EINVAL;
6474 }
6475 update = (uint32_t *)((char *) req->req_ptr +
6476 req->ifd_data[i].cmd_buf_offset);
6477 if (!update) {
6478 pr_err("update pointer is NULL\n");
6479 return -EINVAL;
6480 }
6481 } else {
6482 continue;
6483 }
6484 /* Populate the cmd data structure with the phys_addr */
6485 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6486 if (IS_ERR_OR_NULL(sg_ptr)) {
6487 pr_err("IOn client could not retrieve sg table\n");
6488 goto err;
6489 }
6490 sg = sg_ptr->sgl;
6491 if (sg == NULL) {
6492 pr_err("sg is NULL\n");
6493 goto err;
6494 }
6495 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6496 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6497 sg_ptr->nents, sg->length);
6498 goto err;
6499 }
6500 /* clean up buf for pre-allocated fd */
6501 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6502 (*update)) {
6503 if (data->client.sec_buf_fd[i].vbase)
6504 dma_free_coherent(qseecom.pdev,
6505 data->client.sec_buf_fd[i].size,
6506 data->client.sec_buf_fd[i].vbase,
6507 data->client.sec_buf_fd[i].pbase);
6508 memset((void *)update, 0,
6509 sizeof(struct qseecom_param_memref));
6510 memset(&(data->client.sec_buf_fd[i]), 0,
6511 sizeof(struct qseecom_sec_buf_fd_info));
6512 goto clean;
6513 }
6514
6515 if (*update == 0) {
6516 /* update buf for pre-allocated fd from secure heap*/
6517 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6518 sg_ptr);
6519 if (ret) {
6520 pr_err("Failed to handle buf for fd[%d]\n", i);
6521 goto err;
6522 }
6523 memref = (struct qseecom_param_memref *)update;
6524 memref->buffer =
6525 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6526 memref->size =
6527 (uint32_t)(data->client.sec_buf_fd[i].size);
6528 } else {
6529 /* update buf for fd from non-secure qseecom heap */
6530 if (sg_ptr->nents != 1) {
6531 pr_err("Num of scat entr (%d) invalid\n",
6532 sg_ptr->nents);
6533 goto err;
6534 }
6535 if (cleanup)
6536 *update = 0;
6537 else
6538 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6539 }
6540clean:
6541 if (cleanup) {
6542 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6543 ihandle, NULL, sg->length,
6544 ION_IOC_INV_CACHES);
6545 if (ret) {
6546 pr_err("cache operation failed %d\n", ret);
6547 goto err;
6548 }
6549 } else {
6550 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6551 ihandle, NULL, sg->length,
6552 ION_IOC_CLEAN_INV_CACHES);
6553 if (ret) {
6554 pr_err("cache operation failed %d\n", ret);
6555 goto err;
6556 }
6557 data->sglistinfo_ptr[i].indexAndFlags =
6558 SGLISTINFO_SET_INDEX_FLAG(
6559 (sg_ptr->nents == 1), 0,
6560 req->ifd_data[i].cmd_buf_offset);
6561 data->sglistinfo_ptr[i].sizeOrCount =
6562 (sg_ptr->nents == 1) ?
6563 sg->length : sg_ptr->nents;
6564 data->sglist_cnt = i + 1;
6565 }
6566 /* Deallocate the handle */
6567 if (!IS_ERR_OR_NULL(ihandle))
6568 ion_free(qseecom.ion_clnt, ihandle);
6569 }
6570 return ret;
6571err:
6572 if (!IS_ERR_OR_NULL(ihandle))
6573 ion_free(qseecom.ion_clnt, ihandle);
6574 return -ENOMEM;
6575}
6576
6577static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6578 struct qseecom_qteec_req *req, uint32_t cmd_id)
6579{
6580 struct qseecom_command_scm_resp resp;
6581 struct qseecom_qteec_ireq ireq;
6582 struct qseecom_qteec_64bit_ireq ireq_64bit;
6583 struct qseecom_registered_app_list *ptr_app;
6584 bool found_app = false;
6585 unsigned long flags;
6586 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006587 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006588 uint32_t reqd_len_sb_in = 0;
6589 void *cmd_buf = NULL;
6590 size_t cmd_len;
6591 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306592 void *req_ptr = NULL;
6593 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006594
6595 ret = __qseecom_qteec_validate_msg(data, req);
6596 if (ret)
6597 return ret;
6598
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306599 req_ptr = req->req_ptr;
6600 resp_ptr = req->resp_ptr;
6601
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006602 /* find app_id & img_name from list */
6603 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6604 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6605 list) {
6606 if ((ptr_app->app_id == data->client.app_id) &&
6607 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6608 found_app = true;
6609 break;
6610 }
6611 }
6612 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6613 if (!found_app) {
6614 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6615 (char *)data->client.app_name);
6616 return -ENOENT;
6617 }
6618
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306619 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6620 (uintptr_t)req->req_ptr);
6621 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6622 (uintptr_t)req->resp_ptr);
6623
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006624 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6625 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6626 ret = __qseecom_update_qteec_req_buf(
6627 (struct qseecom_qteec_modfd_req *)req, data, false);
6628 if (ret)
6629 return ret;
6630 }
6631
6632 if (qseecom.qsee_version < QSEE_VERSION_40) {
6633 ireq.app_id = data->client.app_id;
6634 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306635 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006636 ireq.req_len = req->req_len;
6637 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306638 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006639 ireq.resp_len = req->resp_len;
6640 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6641 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6642 dmac_flush_range((void *)table,
6643 (void *)table + SGLISTINFO_TABLE_SIZE);
6644 cmd_buf = (void *)&ireq;
6645 cmd_len = sizeof(struct qseecom_qteec_ireq);
6646 } else {
6647 ireq_64bit.app_id = data->client.app_id;
6648 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306649 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006650 ireq_64bit.req_len = req->req_len;
6651 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306652 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006653 ireq_64bit.resp_len = req->resp_len;
6654 if ((data->client.app_arch == ELFCLASS32) &&
6655 ((ireq_64bit.req_ptr >=
6656 PHY_ADDR_4G - ireq_64bit.req_len) ||
6657 (ireq_64bit.resp_ptr >=
6658 PHY_ADDR_4G - ireq_64bit.resp_len))){
6659 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6660 data->client.app_name, data->client.app_id);
6661 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6662 ireq_64bit.req_ptr, ireq_64bit.req_len,
6663 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6664 return -EFAULT;
6665 }
6666 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6667 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6668 dmac_flush_range((void *)table,
6669 (void *)table + SGLISTINFO_TABLE_SIZE);
6670 cmd_buf = (void *)&ireq_64bit;
6671 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6672 }
6673 if (qseecom.whitelist_support == true
6674 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6675 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6676 else
6677 *(uint32_t *)cmd_buf = cmd_id;
6678
6679 reqd_len_sb_in = req->req_len + req->resp_len;
6680 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6681 data->client.sb_virt,
6682 reqd_len_sb_in,
6683 ION_IOC_CLEAN_INV_CACHES);
6684 if (ret) {
6685 pr_err("cache operation failed %d\n", ret);
6686 return ret;
6687 }
6688
6689 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6690
6691 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6692 cmd_buf, cmd_len,
6693 &resp, sizeof(resp));
6694 if (ret) {
6695 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6696 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006697 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006698 }
6699
6700 if (qseecom.qsee_reentrancy_support) {
6701 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006702 if (ret)
6703 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006704 } else {
6705 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6706 ret = __qseecom_process_incomplete_cmd(data, &resp);
6707 if (ret) {
6708 pr_err("process_incomplete_cmd failed err: %d\n",
6709 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006710 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006711 }
6712 } else {
6713 if (resp.result != QSEOS_RESULT_SUCCESS) {
6714 pr_err("Response result %d not supported\n",
6715 resp.result);
6716 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006717 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006718 }
6719 }
6720 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006721exit:
6722 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006723 data->client.sb_virt, data->client.sb_length,
6724 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006725 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006726 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006727 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006728 }
6729
6730 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6731 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006732 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006733 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006734 if (ret2)
6735 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006736 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006737 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006738}
6739
6740static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6741 void __user *argp)
6742{
6743 struct qseecom_qteec_modfd_req req;
6744 int ret = 0;
6745
6746 ret = copy_from_user(&req, argp,
6747 sizeof(struct qseecom_qteec_modfd_req));
6748 if (ret) {
6749 pr_err("copy_from_user failed\n");
6750 return ret;
6751 }
6752 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6753 QSEOS_TEE_OPEN_SESSION);
6754
6755 return ret;
6756}
6757
6758static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6759 void __user *argp)
6760{
6761 struct qseecom_qteec_req req;
6762 int ret = 0;
6763
6764 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6765 if (ret) {
6766 pr_err("copy_from_user failed\n");
6767 return ret;
6768 }
6769 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6770 return ret;
6771}
6772
6773static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6774 void __user *argp)
6775{
6776 struct qseecom_qteec_modfd_req req;
6777 struct qseecom_command_scm_resp resp;
6778 struct qseecom_qteec_ireq ireq;
6779 struct qseecom_qteec_64bit_ireq ireq_64bit;
6780 struct qseecom_registered_app_list *ptr_app;
6781 bool found_app = false;
6782 unsigned long flags;
6783 int ret = 0;
6784 int i = 0;
6785 uint32_t reqd_len_sb_in = 0;
6786 void *cmd_buf = NULL;
6787 size_t cmd_len;
6788 struct sglist_info *table = data->sglistinfo_ptr;
6789 void *req_ptr = NULL;
6790 void *resp_ptr = NULL;
6791
6792 ret = copy_from_user(&req, argp,
6793 sizeof(struct qseecom_qteec_modfd_req));
6794 if (ret) {
6795 pr_err("copy_from_user failed\n");
6796 return ret;
6797 }
6798 ret = __qseecom_qteec_validate_msg(data,
6799 (struct qseecom_qteec_req *)(&req));
6800 if (ret)
6801 return ret;
6802 req_ptr = req.req_ptr;
6803 resp_ptr = req.resp_ptr;
6804
6805 /* find app_id & img_name from list */
6806 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6807 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6808 list) {
6809 if ((ptr_app->app_id == data->client.app_id) &&
6810 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6811 found_app = true;
6812 break;
6813 }
6814 }
6815 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6816 if (!found_app) {
6817 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6818 (char *)data->client.app_name);
6819 return -ENOENT;
6820 }
6821
6822 /* validate offsets */
6823 for (i = 0; i < MAX_ION_FD; i++) {
6824 if (req.ifd_data[i].fd) {
6825 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6826 return -EINVAL;
6827 }
6828 }
6829 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6830 (uintptr_t)req.req_ptr);
6831 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6832 (uintptr_t)req.resp_ptr);
6833 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6834 if (ret)
6835 return ret;
6836
6837 if (qseecom.qsee_version < QSEE_VERSION_40) {
6838 ireq.app_id = data->client.app_id;
6839 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6840 (uintptr_t)req_ptr);
6841 ireq.req_len = req.req_len;
6842 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6843 (uintptr_t)resp_ptr);
6844 ireq.resp_len = req.resp_len;
6845 cmd_buf = (void *)&ireq;
6846 cmd_len = sizeof(struct qseecom_qteec_ireq);
6847 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6848 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6849 dmac_flush_range((void *)table,
6850 (void *)table + SGLISTINFO_TABLE_SIZE);
6851 } else {
6852 ireq_64bit.app_id = data->client.app_id;
6853 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6854 (uintptr_t)req_ptr);
6855 ireq_64bit.req_len = req.req_len;
6856 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6857 (uintptr_t)resp_ptr);
6858 ireq_64bit.resp_len = req.resp_len;
6859 cmd_buf = (void *)&ireq_64bit;
6860 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6861 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6862 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6863 dmac_flush_range((void *)table,
6864 (void *)table + SGLISTINFO_TABLE_SIZE);
6865 }
6866 reqd_len_sb_in = req.req_len + req.resp_len;
6867 if (qseecom.whitelist_support == true)
6868 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6869 else
6870 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6871
6872 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6873 data->client.sb_virt,
6874 reqd_len_sb_in,
6875 ION_IOC_CLEAN_INV_CACHES);
6876 if (ret) {
6877 pr_err("cache operation failed %d\n", ret);
6878 return ret;
6879 }
6880
6881 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6882
6883 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6884 cmd_buf, cmd_len,
6885 &resp, sizeof(resp));
6886 if (ret) {
6887 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6888 ret, data->client.app_id);
6889 return ret;
6890 }
6891
6892 if (qseecom.qsee_reentrancy_support) {
6893 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6894 } else {
6895 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6896 ret = __qseecom_process_incomplete_cmd(data, &resp);
6897 if (ret) {
6898 pr_err("process_incomplete_cmd failed err: %d\n",
6899 ret);
6900 return ret;
6901 }
6902 } else {
6903 if (resp.result != QSEOS_RESULT_SUCCESS) {
6904 pr_err("Response result %d not supported\n",
6905 resp.result);
6906 ret = -EINVAL;
6907 }
6908 }
6909 }
6910 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6911 if (ret)
6912 return ret;
6913
6914 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6915 data->client.sb_virt, data->client.sb_length,
6916 ION_IOC_INV_CACHES);
6917 if (ret) {
6918 pr_err("cache operation failed %d\n", ret);
6919 return ret;
6920 }
6921 return 0;
6922}
6923
6924static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6925 void __user *argp)
6926{
6927 struct qseecom_qteec_modfd_req req;
6928 int ret = 0;
6929
6930 ret = copy_from_user(&req, argp,
6931 sizeof(struct qseecom_qteec_modfd_req));
6932 if (ret) {
6933 pr_err("copy_from_user failed\n");
6934 return ret;
6935 }
6936 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6937 QSEOS_TEE_REQUEST_CANCELLATION);
6938
6939 return ret;
6940}
6941
6942static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6943{
6944 if (data->sglist_cnt) {
6945 memset(data->sglistinfo_ptr, 0,
6946 SGLISTINFO_TABLE_SIZE);
6947 data->sglist_cnt = 0;
6948 }
6949}
6950
6951static inline long qseecom_ioctl(struct file *file,
6952 unsigned int cmd, unsigned long arg)
6953{
6954 int ret = 0;
6955 struct qseecom_dev_handle *data = file->private_data;
6956 void __user *argp = (void __user *) arg;
6957 bool perf_enabled = false;
6958
6959 if (!data) {
6960 pr_err("Invalid/uninitialized device handle\n");
6961 return -EINVAL;
6962 }
6963
6964 if (data->abort) {
6965 pr_err("Aborting qseecom driver\n");
6966 return -ENODEV;
6967 }
6968
6969 switch (cmd) {
6970 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6971 if (data->type != QSEECOM_GENERIC) {
6972 pr_err("reg lstnr req: invalid handle (%d)\n",
6973 data->type);
6974 ret = -EINVAL;
6975 break;
6976 }
6977 pr_debug("ioctl register_listener_req()\n");
6978 mutex_lock(&app_access_lock);
6979 atomic_inc(&data->ioctl_count);
6980 data->type = QSEECOM_LISTENER_SERVICE;
6981 ret = qseecom_register_listener(data, argp);
6982 atomic_dec(&data->ioctl_count);
6983 wake_up_all(&data->abort_wq);
6984 mutex_unlock(&app_access_lock);
6985 if (ret)
6986 pr_err("failed qseecom_register_listener: %d\n", ret);
6987 break;
6988 }
6989 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6990 if ((data->listener.id == 0) ||
6991 (data->type != QSEECOM_LISTENER_SERVICE)) {
6992 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6993 data->type, data->listener.id);
6994 ret = -EINVAL;
6995 break;
6996 }
6997 pr_debug("ioctl unregister_listener_req()\n");
6998 mutex_lock(&app_access_lock);
6999 atomic_inc(&data->ioctl_count);
7000 ret = qseecom_unregister_listener(data);
7001 atomic_dec(&data->ioctl_count);
7002 wake_up_all(&data->abort_wq);
7003 mutex_unlock(&app_access_lock);
7004 if (ret)
7005 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7006 break;
7007 }
7008 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7009 if ((data->client.app_id == 0) ||
7010 (data->type != QSEECOM_CLIENT_APP)) {
7011 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7012 data->type, data->client.app_id);
7013 ret = -EINVAL;
7014 break;
7015 }
7016 /* Only one client allowed here at a time */
7017 mutex_lock(&app_access_lock);
7018 if (qseecom.support_bus_scaling) {
7019 /* register bus bw in case the client doesn't do it */
7020 if (!data->mode) {
7021 mutex_lock(&qsee_bw_mutex);
7022 __qseecom_register_bus_bandwidth_needs(
7023 data, HIGH);
7024 mutex_unlock(&qsee_bw_mutex);
7025 }
7026 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7027 if (ret) {
7028 pr_err("Failed to set bw.\n");
7029 ret = -EINVAL;
7030 mutex_unlock(&app_access_lock);
7031 break;
7032 }
7033 }
7034 /*
7035 * On targets where crypto clock is handled by HLOS,
7036 * if clk_access_cnt is zero and perf_enabled is false,
7037 * then the crypto clock was not enabled before sending cmd to
7038 * tz, qseecom will enable the clock to avoid service failure.
7039 */
7040 if (!qseecom.no_clock_support &&
7041 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7042 pr_debug("ce clock is not enabled!\n");
7043 ret = qseecom_perf_enable(data);
7044 if (ret) {
7045 pr_err("Failed to vote for clock with err %d\n",
7046 ret);
7047 mutex_unlock(&app_access_lock);
7048 ret = -EINVAL;
7049 break;
7050 }
7051 perf_enabled = true;
7052 }
7053 atomic_inc(&data->ioctl_count);
7054 ret = qseecom_send_cmd(data, argp);
7055 if (qseecom.support_bus_scaling)
7056 __qseecom_add_bw_scale_down_timer(
7057 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7058 if (perf_enabled) {
7059 qsee_disable_clock_vote(data, CLK_DFAB);
7060 qsee_disable_clock_vote(data, CLK_SFPB);
7061 }
7062 atomic_dec(&data->ioctl_count);
7063 wake_up_all(&data->abort_wq);
7064 mutex_unlock(&app_access_lock);
7065 if (ret)
7066 pr_err("failed qseecom_send_cmd: %d\n", ret);
7067 break;
7068 }
7069 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7070 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7071 if ((data->client.app_id == 0) ||
7072 (data->type != QSEECOM_CLIENT_APP)) {
7073 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7074 data->type, data->client.app_id);
7075 ret = -EINVAL;
7076 break;
7077 }
7078 /* Only one client allowed here at a time */
7079 mutex_lock(&app_access_lock);
7080 if (qseecom.support_bus_scaling) {
7081 if (!data->mode) {
7082 mutex_lock(&qsee_bw_mutex);
7083 __qseecom_register_bus_bandwidth_needs(
7084 data, HIGH);
7085 mutex_unlock(&qsee_bw_mutex);
7086 }
7087 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7088 if (ret) {
7089 pr_err("Failed to set bw.\n");
7090 mutex_unlock(&app_access_lock);
7091 ret = -EINVAL;
7092 break;
7093 }
7094 }
7095 /*
7096 * On targets where crypto clock is handled by HLOS,
7097 * if clk_access_cnt is zero and perf_enabled is false,
7098 * then the crypto clock was not enabled before sending cmd to
7099 * tz, qseecom will enable the clock to avoid service failure.
7100 */
7101 if (!qseecom.no_clock_support &&
7102 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7103 pr_debug("ce clock is not enabled!\n");
7104 ret = qseecom_perf_enable(data);
7105 if (ret) {
7106 pr_err("Failed to vote for clock with err %d\n",
7107 ret);
7108 mutex_unlock(&app_access_lock);
7109 ret = -EINVAL;
7110 break;
7111 }
7112 perf_enabled = true;
7113 }
7114 atomic_inc(&data->ioctl_count);
7115 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7116 ret = qseecom_send_modfd_cmd(data, argp);
7117 else
7118 ret = qseecom_send_modfd_cmd_64(data, argp);
7119 if (qseecom.support_bus_scaling)
7120 __qseecom_add_bw_scale_down_timer(
7121 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7122 if (perf_enabled) {
7123 qsee_disable_clock_vote(data, CLK_DFAB);
7124 qsee_disable_clock_vote(data, CLK_SFPB);
7125 }
7126 atomic_dec(&data->ioctl_count);
7127 wake_up_all(&data->abort_wq);
7128 mutex_unlock(&app_access_lock);
7129 if (ret)
7130 pr_err("failed qseecom_send_cmd: %d\n", ret);
7131 __qseecom_clean_data_sglistinfo(data);
7132 break;
7133 }
7134 case QSEECOM_IOCTL_RECEIVE_REQ: {
7135 if ((data->listener.id == 0) ||
7136 (data->type != QSEECOM_LISTENER_SERVICE)) {
7137 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7138 data->type, data->listener.id);
7139 ret = -EINVAL;
7140 break;
7141 }
7142 atomic_inc(&data->ioctl_count);
7143 ret = qseecom_receive_req(data);
7144 atomic_dec(&data->ioctl_count);
7145 wake_up_all(&data->abort_wq);
7146 if (ret && (ret != -ERESTARTSYS))
7147 pr_err("failed qseecom_receive_req: %d\n", ret);
7148 break;
7149 }
7150 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7151 if ((data->listener.id == 0) ||
7152 (data->type != QSEECOM_LISTENER_SERVICE)) {
7153 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7154 data->type, data->listener.id);
7155 ret = -EINVAL;
7156 break;
7157 }
7158 atomic_inc(&data->ioctl_count);
7159 if (!qseecom.qsee_reentrancy_support)
7160 ret = qseecom_send_resp();
7161 else
7162 ret = qseecom_reentrancy_send_resp(data);
7163 atomic_dec(&data->ioctl_count);
7164 wake_up_all(&data->abort_wq);
7165 if (ret)
7166 pr_err("failed qseecom_send_resp: %d\n", ret);
7167 break;
7168 }
7169 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7170 if ((data->type != QSEECOM_CLIENT_APP) &&
7171 (data->type != QSEECOM_GENERIC) &&
7172 (data->type != QSEECOM_SECURE_SERVICE)) {
7173 pr_err("set mem param req: invalid handle (%d)\n",
7174 data->type);
7175 ret = -EINVAL;
7176 break;
7177 }
7178 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7179 mutex_lock(&app_access_lock);
7180 atomic_inc(&data->ioctl_count);
7181 ret = qseecom_set_client_mem_param(data, argp);
7182 atomic_dec(&data->ioctl_count);
7183 mutex_unlock(&app_access_lock);
7184 if (ret)
7185 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7186 ret);
7187 break;
7188 }
7189 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7190 if ((data->type != QSEECOM_GENERIC) &&
7191 (data->type != QSEECOM_CLIENT_APP)) {
7192 pr_err("load app req: invalid handle (%d)\n",
7193 data->type);
7194 ret = -EINVAL;
7195 break;
7196 }
7197 data->type = QSEECOM_CLIENT_APP;
7198 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7199 mutex_lock(&app_access_lock);
7200 atomic_inc(&data->ioctl_count);
7201 ret = qseecom_load_app(data, argp);
7202 atomic_dec(&data->ioctl_count);
7203 mutex_unlock(&app_access_lock);
7204 if (ret)
7205 pr_err("failed load_app request: %d\n", ret);
7206 break;
7207 }
7208 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7209 if ((data->client.app_id == 0) ||
7210 (data->type != QSEECOM_CLIENT_APP)) {
7211 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7212 data->type, data->client.app_id);
7213 ret = -EINVAL;
7214 break;
7215 }
7216 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7217 mutex_lock(&app_access_lock);
7218 atomic_inc(&data->ioctl_count);
7219 ret = qseecom_unload_app(data, false);
7220 atomic_dec(&data->ioctl_count);
7221 mutex_unlock(&app_access_lock);
7222 if (ret)
7223 pr_err("failed unload_app request: %d\n", ret);
7224 break;
7225 }
7226 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7227 atomic_inc(&data->ioctl_count);
7228 ret = qseecom_get_qseos_version(data, argp);
7229 if (ret)
7230 pr_err("qseecom_get_qseos_version: %d\n", ret);
7231 atomic_dec(&data->ioctl_count);
7232 break;
7233 }
7234 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7235 if ((data->type != QSEECOM_GENERIC) &&
7236 (data->type != QSEECOM_CLIENT_APP)) {
7237 pr_err("perf enable req: invalid handle (%d)\n",
7238 data->type);
7239 ret = -EINVAL;
7240 break;
7241 }
7242 if ((data->type == QSEECOM_CLIENT_APP) &&
7243 (data->client.app_id == 0)) {
7244 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7245 data->type, data->client.app_id);
7246 ret = -EINVAL;
7247 break;
7248 }
7249 atomic_inc(&data->ioctl_count);
7250 if (qseecom.support_bus_scaling) {
7251 mutex_lock(&qsee_bw_mutex);
7252 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7253 mutex_unlock(&qsee_bw_mutex);
7254 } else {
7255 ret = qseecom_perf_enable(data);
7256 if (ret)
7257 pr_err("Fail to vote for clocks %d\n", ret);
7258 }
7259 atomic_dec(&data->ioctl_count);
7260 break;
7261 }
7262 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7263 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7264 (data->type != QSEECOM_CLIENT_APP)) {
7265 pr_err("perf disable req: invalid handle (%d)\n",
7266 data->type);
7267 ret = -EINVAL;
7268 break;
7269 }
7270 if ((data->type == QSEECOM_CLIENT_APP) &&
7271 (data->client.app_id == 0)) {
7272 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7273 data->type, data->client.app_id);
7274 ret = -EINVAL;
7275 break;
7276 }
7277 atomic_inc(&data->ioctl_count);
7278 if (!qseecom.support_bus_scaling) {
7279 qsee_disable_clock_vote(data, CLK_DFAB);
7280 qsee_disable_clock_vote(data, CLK_SFPB);
7281 } else {
7282 mutex_lock(&qsee_bw_mutex);
7283 qseecom_unregister_bus_bandwidth_needs(data);
7284 mutex_unlock(&qsee_bw_mutex);
7285 }
7286 atomic_dec(&data->ioctl_count);
7287 break;
7288 }
7289
7290 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7291 /* If crypto clock is not handled by HLOS, return directly. */
7292 if (qseecom.no_clock_support) {
7293 pr_debug("crypto clock is not handled by HLOS\n");
7294 break;
7295 }
7296 if ((data->client.app_id == 0) ||
7297 (data->type != QSEECOM_CLIENT_APP)) {
7298 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7299 data->type, data->client.app_id);
7300 ret = -EINVAL;
7301 break;
7302 }
7303 atomic_inc(&data->ioctl_count);
7304 ret = qseecom_scale_bus_bandwidth(data, argp);
7305 atomic_dec(&data->ioctl_count);
7306 break;
7307 }
7308 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7309 if (data->type != QSEECOM_GENERIC) {
7310 pr_err("load ext elf req: invalid client handle (%d)\n",
7311 data->type);
7312 ret = -EINVAL;
7313 break;
7314 }
7315 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7316 data->released = true;
7317 mutex_lock(&app_access_lock);
7318 atomic_inc(&data->ioctl_count);
7319 ret = qseecom_load_external_elf(data, argp);
7320 atomic_dec(&data->ioctl_count);
7321 mutex_unlock(&app_access_lock);
7322 if (ret)
7323 pr_err("failed load_external_elf request: %d\n", ret);
7324 break;
7325 }
7326 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7327 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7328 pr_err("unload ext elf req: invalid handle (%d)\n",
7329 data->type);
7330 ret = -EINVAL;
7331 break;
7332 }
7333 data->released = true;
7334 mutex_lock(&app_access_lock);
7335 atomic_inc(&data->ioctl_count);
7336 ret = qseecom_unload_external_elf(data);
7337 atomic_dec(&data->ioctl_count);
7338 mutex_unlock(&app_access_lock);
7339 if (ret)
7340 pr_err("failed unload_app request: %d\n", ret);
7341 break;
7342 }
7343 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7344 data->type = QSEECOM_CLIENT_APP;
7345 mutex_lock(&app_access_lock);
7346 atomic_inc(&data->ioctl_count);
7347 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7348 ret = qseecom_query_app_loaded(data, argp);
7349 atomic_dec(&data->ioctl_count);
7350 mutex_unlock(&app_access_lock);
7351 break;
7352 }
7353 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7354 if (data->type != QSEECOM_GENERIC) {
7355 pr_err("send cmd svc req: invalid handle (%d)\n",
7356 data->type);
7357 ret = -EINVAL;
7358 break;
7359 }
7360 data->type = QSEECOM_SECURE_SERVICE;
7361 if (qseecom.qsee_version < QSEE_VERSION_03) {
7362 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7363 qseecom.qsee_version);
7364 return -EINVAL;
7365 }
7366 mutex_lock(&app_access_lock);
7367 atomic_inc(&data->ioctl_count);
7368 ret = qseecom_send_service_cmd(data, argp);
7369 atomic_dec(&data->ioctl_count);
7370 mutex_unlock(&app_access_lock);
7371 break;
7372 }
7373 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7374 if (!(qseecom.support_pfe || qseecom.support_fde))
7375 pr_err("Features requiring key init not supported\n");
7376 if (data->type != QSEECOM_GENERIC) {
7377 pr_err("create key req: invalid handle (%d)\n",
7378 data->type);
7379 ret = -EINVAL;
7380 break;
7381 }
7382 if (qseecom.qsee_version < QSEE_VERSION_05) {
7383 pr_err("Create Key feature unsupported: qsee ver %u\n",
7384 qseecom.qsee_version);
7385 return -EINVAL;
7386 }
7387 data->released = true;
7388 mutex_lock(&app_access_lock);
7389 atomic_inc(&data->ioctl_count);
7390 ret = qseecom_create_key(data, argp);
7391 if (ret)
7392 pr_err("failed to create encryption key: %d\n", ret);
7393
7394 atomic_dec(&data->ioctl_count);
7395 mutex_unlock(&app_access_lock);
7396 break;
7397 }
7398 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7399 if (!(qseecom.support_pfe || qseecom.support_fde))
7400 pr_err("Features requiring key init not supported\n");
7401 if (data->type != QSEECOM_GENERIC) {
7402 pr_err("wipe key req: invalid handle (%d)\n",
7403 data->type);
7404 ret = -EINVAL;
7405 break;
7406 }
7407 if (qseecom.qsee_version < QSEE_VERSION_05) {
7408 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7409 qseecom.qsee_version);
7410 return -EINVAL;
7411 }
7412 data->released = true;
7413 mutex_lock(&app_access_lock);
7414 atomic_inc(&data->ioctl_count);
7415 ret = qseecom_wipe_key(data, argp);
7416 if (ret)
7417 pr_err("failed to wipe encryption key: %d\n", ret);
7418 atomic_dec(&data->ioctl_count);
7419 mutex_unlock(&app_access_lock);
7420 break;
7421 }
7422 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7423 if (!(qseecom.support_pfe || qseecom.support_fde))
7424 pr_err("Features requiring key init not supported\n");
7425 if (data->type != QSEECOM_GENERIC) {
7426 pr_err("update key req: invalid handle (%d)\n",
7427 data->type);
7428 ret = -EINVAL;
7429 break;
7430 }
7431 if (qseecom.qsee_version < QSEE_VERSION_05) {
7432 pr_err("Update Key feature unsupported in qsee ver %u\n",
7433 qseecom.qsee_version);
7434 return -EINVAL;
7435 }
7436 data->released = true;
7437 mutex_lock(&app_access_lock);
7438 atomic_inc(&data->ioctl_count);
7439 ret = qseecom_update_key_user_info(data, argp);
7440 if (ret)
7441 pr_err("failed to update key user info: %d\n", ret);
7442 atomic_dec(&data->ioctl_count);
7443 mutex_unlock(&app_access_lock);
7444 break;
7445 }
7446 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7447 if (data->type != QSEECOM_GENERIC) {
7448 pr_err("save part hash req: invalid handle (%d)\n",
7449 data->type);
7450 ret = -EINVAL;
7451 break;
7452 }
7453 data->released = true;
7454 mutex_lock(&app_access_lock);
7455 atomic_inc(&data->ioctl_count);
7456 ret = qseecom_save_partition_hash(argp);
7457 atomic_dec(&data->ioctl_count);
7458 mutex_unlock(&app_access_lock);
7459 break;
7460 }
7461 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7462 if (data->type != QSEECOM_GENERIC) {
7463 pr_err("ES activated req: invalid handle (%d)\n",
7464 data->type);
7465 ret = -EINVAL;
7466 break;
7467 }
7468 data->released = true;
7469 mutex_lock(&app_access_lock);
7470 atomic_inc(&data->ioctl_count);
7471 ret = qseecom_is_es_activated(argp);
7472 atomic_dec(&data->ioctl_count);
7473 mutex_unlock(&app_access_lock);
7474 break;
7475 }
7476 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7477 if (data->type != QSEECOM_GENERIC) {
7478 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7479 data->type);
7480 ret = -EINVAL;
7481 break;
7482 }
7483 data->released = true;
7484 mutex_lock(&app_access_lock);
7485 atomic_inc(&data->ioctl_count);
7486 ret = qseecom_mdtp_cipher_dip(argp);
7487 atomic_dec(&data->ioctl_count);
7488 mutex_unlock(&app_access_lock);
7489 break;
7490 }
7491 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7492 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7493 if ((data->listener.id == 0) ||
7494 (data->type != QSEECOM_LISTENER_SERVICE)) {
7495 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7496 data->type, data->listener.id);
7497 ret = -EINVAL;
7498 break;
7499 }
7500 atomic_inc(&data->ioctl_count);
7501 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7502 ret = qseecom_send_modfd_resp(data, argp);
7503 else
7504 ret = qseecom_send_modfd_resp_64(data, argp);
7505 atomic_dec(&data->ioctl_count);
7506 wake_up_all(&data->abort_wq);
7507 if (ret)
7508 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7509 __qseecom_clean_data_sglistinfo(data);
7510 break;
7511 }
7512 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7513 if ((data->client.app_id == 0) ||
7514 (data->type != QSEECOM_CLIENT_APP)) {
7515 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7516 data->type, data->client.app_id);
7517 ret = -EINVAL;
7518 break;
7519 }
7520 if (qseecom.qsee_version < QSEE_VERSION_40) {
7521 pr_err("GP feature unsupported: qsee ver %u\n",
7522 qseecom.qsee_version);
7523 return -EINVAL;
7524 }
7525 /* Only one client allowed here at a time */
7526 mutex_lock(&app_access_lock);
7527 atomic_inc(&data->ioctl_count);
7528 ret = qseecom_qteec_open_session(data, argp);
7529 atomic_dec(&data->ioctl_count);
7530 wake_up_all(&data->abort_wq);
7531 mutex_unlock(&app_access_lock);
7532 if (ret)
7533 pr_err("failed open_session_cmd: %d\n", ret);
7534 __qseecom_clean_data_sglistinfo(data);
7535 break;
7536 }
7537 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7538 if ((data->client.app_id == 0) ||
7539 (data->type != QSEECOM_CLIENT_APP)) {
7540 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7541 data->type, data->client.app_id);
7542 ret = -EINVAL;
7543 break;
7544 }
7545 if (qseecom.qsee_version < QSEE_VERSION_40) {
7546 pr_err("GP feature unsupported: qsee ver %u\n",
7547 qseecom.qsee_version);
7548 return -EINVAL;
7549 }
7550 /* Only one client allowed here at a time */
7551 mutex_lock(&app_access_lock);
7552 atomic_inc(&data->ioctl_count);
7553 ret = qseecom_qteec_close_session(data, argp);
7554 atomic_dec(&data->ioctl_count);
7555 wake_up_all(&data->abort_wq);
7556 mutex_unlock(&app_access_lock);
7557 if (ret)
7558 pr_err("failed close_session_cmd: %d\n", ret);
7559 break;
7560 }
7561 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7562 if ((data->client.app_id == 0) ||
7563 (data->type != QSEECOM_CLIENT_APP)) {
7564 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7565 data->type, data->client.app_id);
7566 ret = -EINVAL;
7567 break;
7568 }
7569 if (qseecom.qsee_version < QSEE_VERSION_40) {
7570 pr_err("GP feature unsupported: qsee ver %u\n",
7571 qseecom.qsee_version);
7572 return -EINVAL;
7573 }
7574 /* Only one client allowed here at a time */
7575 mutex_lock(&app_access_lock);
7576 atomic_inc(&data->ioctl_count);
7577 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7578 atomic_dec(&data->ioctl_count);
7579 wake_up_all(&data->abort_wq);
7580 mutex_unlock(&app_access_lock);
7581 if (ret)
7582 pr_err("failed Invoke cmd: %d\n", ret);
7583 __qseecom_clean_data_sglistinfo(data);
7584 break;
7585 }
7586 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7587 if ((data->client.app_id == 0) ||
7588 (data->type != QSEECOM_CLIENT_APP)) {
7589 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7590 data->type, data->client.app_id);
7591 ret = -EINVAL;
7592 break;
7593 }
7594 if (qseecom.qsee_version < QSEE_VERSION_40) {
7595 pr_err("GP feature unsupported: qsee ver %u\n",
7596 qseecom.qsee_version);
7597 return -EINVAL;
7598 }
7599 /* Only one client allowed here at a time */
7600 mutex_lock(&app_access_lock);
7601 atomic_inc(&data->ioctl_count);
7602 ret = qseecom_qteec_request_cancellation(data, argp);
7603 atomic_dec(&data->ioctl_count);
7604 wake_up_all(&data->abort_wq);
7605 mutex_unlock(&app_access_lock);
7606 if (ret)
7607 pr_err("failed request_cancellation: %d\n", ret);
7608 break;
7609 }
7610 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7611 atomic_inc(&data->ioctl_count);
7612 ret = qseecom_get_ce_info(data, argp);
7613 if (ret)
7614 pr_err("failed get fde ce pipe info: %d\n", ret);
7615 atomic_dec(&data->ioctl_count);
7616 break;
7617 }
7618 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7619 atomic_inc(&data->ioctl_count);
7620 ret = qseecom_free_ce_info(data, argp);
7621 if (ret)
7622 pr_err("failed get fde ce pipe info: %d\n", ret);
7623 atomic_dec(&data->ioctl_count);
7624 break;
7625 }
7626 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7627 atomic_inc(&data->ioctl_count);
7628 ret = qseecom_query_ce_info(data, argp);
7629 if (ret)
7630 pr_err("failed get fde ce pipe info: %d\n", ret);
7631 atomic_dec(&data->ioctl_count);
7632 break;
7633 }
7634 default:
7635 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7636 return -EINVAL;
7637 }
7638 return ret;
7639}
7640
7641static int qseecom_open(struct inode *inode, struct file *file)
7642{
7643 int ret = 0;
7644 struct qseecom_dev_handle *data;
7645
7646 data = kzalloc(sizeof(*data), GFP_KERNEL);
7647 if (!data)
7648 return -ENOMEM;
7649 file->private_data = data;
7650 data->abort = 0;
7651 data->type = QSEECOM_GENERIC;
7652 data->released = false;
7653 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7654 data->mode = INACTIVE;
7655 init_waitqueue_head(&data->abort_wq);
7656 atomic_set(&data->ioctl_count, 0);
7657 return ret;
7658}
7659
7660static int qseecom_release(struct inode *inode, struct file *file)
7661{
7662 struct qseecom_dev_handle *data = file->private_data;
7663 int ret = 0;
7664
7665 if (data->released == false) {
7666 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7667 data->type, data->mode, data);
7668 switch (data->type) {
7669 case QSEECOM_LISTENER_SERVICE:
7670 mutex_lock(&app_access_lock);
7671 ret = qseecom_unregister_listener(data);
7672 mutex_unlock(&app_access_lock);
7673 break;
7674 case QSEECOM_CLIENT_APP:
7675 mutex_lock(&app_access_lock);
7676 ret = qseecom_unload_app(data, true);
7677 mutex_unlock(&app_access_lock);
7678 break;
7679 case QSEECOM_SECURE_SERVICE:
7680 case QSEECOM_GENERIC:
7681 ret = qseecom_unmap_ion_allocated_memory(data);
7682 if (ret)
7683 pr_err("Ion Unmap failed\n");
7684 break;
7685 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7686 break;
7687 default:
7688 pr_err("Unsupported clnt_handle_type %d",
7689 data->type);
7690 break;
7691 }
7692 }
7693
7694 if (qseecom.support_bus_scaling) {
7695 mutex_lock(&qsee_bw_mutex);
7696 if (data->mode != INACTIVE) {
7697 qseecom_unregister_bus_bandwidth_needs(data);
7698 if (qseecom.cumulative_mode == INACTIVE) {
7699 ret = __qseecom_set_msm_bus_request(INACTIVE);
7700 if (ret)
7701 pr_err("Fail to scale down bus\n");
7702 }
7703 }
7704 mutex_unlock(&qsee_bw_mutex);
7705 } else {
7706 if (data->fast_load_enabled == true)
7707 qsee_disable_clock_vote(data, CLK_SFPB);
7708 if (data->perf_enabled == true)
7709 qsee_disable_clock_vote(data, CLK_DFAB);
7710 }
7711 kfree(data);
7712
7713 return ret;
7714}
7715
7716#ifdef CONFIG_COMPAT
7717#include "compat_qseecom.c"
7718#else
7719#define compat_qseecom_ioctl NULL
7720#endif
7721
7722static const struct file_operations qseecom_fops = {
7723 .owner = THIS_MODULE,
7724 .unlocked_ioctl = qseecom_ioctl,
7725 .compat_ioctl = compat_qseecom_ioctl,
7726 .open = qseecom_open,
7727 .release = qseecom_release
7728};
7729
7730static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7731{
7732 int rc = 0;
7733 struct device *pdev;
7734 struct qseecom_clk *qclk;
7735 char *core_clk_src = NULL;
7736 char *core_clk = NULL;
7737 char *iface_clk = NULL;
7738 char *bus_clk = NULL;
7739
7740 switch (ce) {
7741 case CLK_QSEE: {
7742 core_clk_src = "core_clk_src";
7743 core_clk = "core_clk";
7744 iface_clk = "iface_clk";
7745 bus_clk = "bus_clk";
7746 qclk = &qseecom.qsee;
7747 qclk->instance = CLK_QSEE;
7748 break;
7749 };
7750 case CLK_CE_DRV: {
7751 core_clk_src = "ce_drv_core_clk_src";
7752 core_clk = "ce_drv_core_clk";
7753 iface_clk = "ce_drv_iface_clk";
7754 bus_clk = "ce_drv_bus_clk";
7755 qclk = &qseecom.ce_drv;
7756 qclk->instance = CLK_CE_DRV;
7757 break;
7758 };
7759 default:
7760 pr_err("Invalid ce hw instance: %d!\n", ce);
7761 return -EIO;
7762 }
7763
7764 if (qseecom.no_clock_support) {
7765 qclk->ce_core_clk = NULL;
7766 qclk->ce_clk = NULL;
7767 qclk->ce_bus_clk = NULL;
7768 qclk->ce_core_src_clk = NULL;
7769 return 0;
7770 }
7771
7772 pdev = qseecom.pdev;
7773
7774 /* Get CE3 src core clk. */
7775 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7776 if (!IS_ERR(qclk->ce_core_src_clk)) {
7777 rc = clk_set_rate(qclk->ce_core_src_clk,
7778 qseecom.ce_opp_freq_hz);
7779 if (rc) {
7780 clk_put(qclk->ce_core_src_clk);
7781 qclk->ce_core_src_clk = NULL;
7782 pr_err("Unable to set the core src clk @%uMhz.\n",
7783 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7784 return -EIO;
7785 }
7786 } else {
7787 pr_warn("Unable to get CE core src clk, set to NULL\n");
7788 qclk->ce_core_src_clk = NULL;
7789 }
7790
7791 /* Get CE core clk */
7792 qclk->ce_core_clk = clk_get(pdev, core_clk);
7793 if (IS_ERR(qclk->ce_core_clk)) {
7794 rc = PTR_ERR(qclk->ce_core_clk);
7795 pr_err("Unable to get CE core clk\n");
7796 if (qclk->ce_core_src_clk != NULL)
7797 clk_put(qclk->ce_core_src_clk);
7798 return -EIO;
7799 }
7800
7801 /* Get CE Interface clk */
7802 qclk->ce_clk = clk_get(pdev, iface_clk);
7803 if (IS_ERR(qclk->ce_clk)) {
7804 rc = PTR_ERR(qclk->ce_clk);
7805 pr_err("Unable to get CE interface clk\n");
7806 if (qclk->ce_core_src_clk != NULL)
7807 clk_put(qclk->ce_core_src_clk);
7808 clk_put(qclk->ce_core_clk);
7809 return -EIO;
7810 }
7811
7812 /* Get CE AXI clk */
7813 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7814 if (IS_ERR(qclk->ce_bus_clk)) {
7815 rc = PTR_ERR(qclk->ce_bus_clk);
7816 pr_err("Unable to get CE BUS interface clk\n");
7817 if (qclk->ce_core_src_clk != NULL)
7818 clk_put(qclk->ce_core_src_clk);
7819 clk_put(qclk->ce_core_clk);
7820 clk_put(qclk->ce_clk);
7821 return -EIO;
7822 }
7823
7824 return rc;
7825}
7826
7827static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7828{
7829 struct qseecom_clk *qclk;
7830
7831 if (ce == CLK_QSEE)
7832 qclk = &qseecom.qsee;
7833 else
7834 qclk = &qseecom.ce_drv;
7835
7836 if (qclk->ce_clk != NULL) {
7837 clk_put(qclk->ce_clk);
7838 qclk->ce_clk = NULL;
7839 }
7840 if (qclk->ce_core_clk != NULL) {
7841 clk_put(qclk->ce_core_clk);
7842 qclk->ce_core_clk = NULL;
7843 }
7844 if (qclk->ce_bus_clk != NULL) {
7845 clk_put(qclk->ce_bus_clk);
7846 qclk->ce_bus_clk = NULL;
7847 }
7848 if (qclk->ce_core_src_clk != NULL) {
7849 clk_put(qclk->ce_core_src_clk);
7850 qclk->ce_core_src_clk = NULL;
7851 }
7852 qclk->instance = CLK_INVALID;
7853}
7854
7855static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7856{
7857 int rc = 0;
7858 uint32_t hlos_num_ce_hw_instances;
7859 uint32_t disk_encrypt_pipe;
7860 uint32_t file_encrypt_pipe;
7861 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
7862 int i;
7863 const int *tbl;
7864 int size;
7865 int entry;
7866 struct qseecom_crypto_info *pfde_tbl = NULL;
7867 struct qseecom_crypto_info *p;
7868 int tbl_size;
7869 int j;
7870 bool old_db = true;
7871 struct qseecom_ce_info_use *pce_info_use;
7872 uint32_t *unit_tbl = NULL;
7873 int total_units = 0;
7874 struct qseecom_ce_pipe_entry *pce_entry;
7875
7876 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7877 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7878
7879 if (of_property_read_u32((&pdev->dev)->of_node,
7880 "qcom,qsee-ce-hw-instance",
7881 &qseecom.ce_info.qsee_ce_hw_instance)) {
7882 pr_err("Fail to get qsee ce hw instance information.\n");
7883 rc = -EINVAL;
7884 goto out;
7885 } else {
7886 pr_debug("qsee-ce-hw-instance=0x%x\n",
7887 qseecom.ce_info.qsee_ce_hw_instance);
7888 }
7889
7890 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7891 "qcom,support-fde");
7892 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7893 "qcom,support-pfe");
7894
7895 if (!qseecom.support_pfe && !qseecom.support_fde) {
7896 pr_warn("Device does not support PFE/FDE");
7897 goto out;
7898 }
7899
7900 if (qseecom.support_fde)
7901 tbl = of_get_property((&pdev->dev)->of_node,
7902 "qcom,full-disk-encrypt-info", &size);
7903 else
7904 tbl = NULL;
7905 if (tbl) {
7906 old_db = false;
7907 if (size % sizeof(struct qseecom_crypto_info)) {
7908 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7909 size);
7910 rc = -EINVAL;
7911 goto out;
7912 }
7913 tbl_size = size / sizeof
7914 (struct qseecom_crypto_info);
7915
7916 pfde_tbl = kzalloc(size, GFP_KERNEL);
7917 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7918 total_units = 0;
7919
7920 if (!pfde_tbl || !unit_tbl) {
7921 pr_err("failed to alloc memory\n");
7922 rc = -ENOMEM;
7923 goto out;
7924 }
7925 if (of_property_read_u32_array((&pdev->dev)->of_node,
7926 "qcom,full-disk-encrypt-info",
7927 (u32 *)pfde_tbl, size/sizeof(u32))) {
7928 pr_err("failed to read full-disk-encrypt-info tbl\n");
7929 rc = -EINVAL;
7930 goto out;
7931 }
7932
7933 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7934 for (j = 0; j < total_units; j++) {
7935 if (p->unit_num == *(unit_tbl + j))
7936 break;
7937 }
7938 if (j == total_units) {
7939 *(unit_tbl + total_units) = p->unit_num;
7940 total_units++;
7941 }
7942 }
7943
7944 qseecom.ce_info.num_fde = total_units;
7945 pce_info_use = qseecom.ce_info.fde = kcalloc(
7946 total_units, sizeof(struct qseecom_ce_info_use),
7947 GFP_KERNEL);
7948 if (!pce_info_use) {
7949 pr_err("failed to alloc memory\n");
7950 rc = -ENOMEM;
7951 goto out;
7952 }
7953
7954 for (j = 0; j < total_units; j++, pce_info_use++) {
7955 pce_info_use->unit_num = *(unit_tbl + j);
7956 pce_info_use->alloc = false;
7957 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7958 pce_info_use->num_ce_pipe_entries = 0;
7959 pce_info_use->ce_pipe_entry = NULL;
7960 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7961 if (p->unit_num == pce_info_use->unit_num)
7962 pce_info_use->num_ce_pipe_entries++;
7963 }
7964
7965 entry = pce_info_use->num_ce_pipe_entries;
7966 pce_entry = pce_info_use->ce_pipe_entry =
7967 kcalloc(entry,
7968 sizeof(struct qseecom_ce_pipe_entry),
7969 GFP_KERNEL);
7970 if (pce_entry == NULL) {
7971 pr_err("failed to alloc memory\n");
7972 rc = -ENOMEM;
7973 goto out;
7974 }
7975
7976 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7977 if (p->unit_num == pce_info_use->unit_num) {
7978 pce_entry->ce_num = p->ce;
7979 pce_entry->ce_pipe_pair =
7980 p->pipe_pair;
7981 pce_entry->valid = true;
7982 pce_entry++;
7983 }
7984 }
7985 }
7986 kfree(unit_tbl);
7987 unit_tbl = NULL;
7988 kfree(pfde_tbl);
7989 pfde_tbl = NULL;
7990 }
7991
7992 if (qseecom.support_pfe)
7993 tbl = of_get_property((&pdev->dev)->of_node,
7994 "qcom,per-file-encrypt-info", &size);
7995 else
7996 tbl = NULL;
7997 if (tbl) {
7998 old_db = false;
7999 if (size % sizeof(struct qseecom_crypto_info)) {
8000 pr_err("per-file-encrypt-info tbl size(%d)\n",
8001 size);
8002 rc = -EINVAL;
8003 goto out;
8004 }
8005 tbl_size = size / sizeof
8006 (struct qseecom_crypto_info);
8007
8008 pfde_tbl = kzalloc(size, GFP_KERNEL);
8009 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8010 total_units = 0;
8011 if (!pfde_tbl || !unit_tbl) {
8012 pr_err("failed to alloc memory\n");
8013 rc = -ENOMEM;
8014 goto out;
8015 }
8016 if (of_property_read_u32_array((&pdev->dev)->of_node,
8017 "qcom,per-file-encrypt-info",
8018 (u32 *)pfde_tbl, size/sizeof(u32))) {
8019 pr_err("failed to read per-file-encrypt-info tbl\n");
8020 rc = -EINVAL;
8021 goto out;
8022 }
8023
8024 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8025 for (j = 0; j < total_units; j++) {
8026 if (p->unit_num == *(unit_tbl + j))
8027 break;
8028 }
8029 if (j == total_units) {
8030 *(unit_tbl + total_units) = p->unit_num;
8031 total_units++;
8032 }
8033 }
8034
8035 qseecom.ce_info.num_pfe = total_units;
8036 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8037 total_units, sizeof(struct qseecom_ce_info_use),
8038 GFP_KERNEL);
8039 if (!pce_info_use) {
8040 pr_err("failed to alloc memory\n");
8041 rc = -ENOMEM;
8042 goto out;
8043 }
8044
8045 for (j = 0; j < total_units; j++, pce_info_use++) {
8046 pce_info_use->unit_num = *(unit_tbl + j);
8047 pce_info_use->alloc = false;
8048 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8049 pce_info_use->num_ce_pipe_entries = 0;
8050 pce_info_use->ce_pipe_entry = NULL;
8051 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8052 if (p->unit_num == pce_info_use->unit_num)
8053 pce_info_use->num_ce_pipe_entries++;
8054 }
8055
8056 entry = pce_info_use->num_ce_pipe_entries;
8057 pce_entry = pce_info_use->ce_pipe_entry =
8058 kcalloc(entry,
8059 sizeof(struct qseecom_ce_pipe_entry),
8060 GFP_KERNEL);
8061 if (pce_entry == NULL) {
8062 pr_err("failed to alloc memory\n");
8063 rc = -ENOMEM;
8064 goto out;
8065 }
8066
8067 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8068 if (p->unit_num == pce_info_use->unit_num) {
8069 pce_entry->ce_num = p->ce;
8070 pce_entry->ce_pipe_pair =
8071 p->pipe_pair;
8072 pce_entry->valid = true;
8073 pce_entry++;
8074 }
8075 }
8076 }
8077 kfree(unit_tbl);
8078 unit_tbl = NULL;
8079 kfree(pfde_tbl);
8080 pfde_tbl = NULL;
8081 }
8082
8083 if (!old_db)
8084 goto out1;
8085
8086 if (of_property_read_bool((&pdev->dev)->of_node,
8087 "qcom,support-multiple-ce-hw-instance")) {
8088 if (of_property_read_u32((&pdev->dev)->of_node,
8089 "qcom,hlos-num-ce-hw-instances",
8090 &hlos_num_ce_hw_instances)) {
8091 pr_err("Fail: get hlos number of ce hw instance\n");
8092 rc = -EINVAL;
8093 goto out;
8094 }
8095 } else {
8096 hlos_num_ce_hw_instances = 1;
8097 }
8098
8099 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8100 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8101 MAX_CE_PIPE_PAIR_PER_UNIT);
8102 rc = -EINVAL;
8103 goto out;
8104 }
8105
8106 if (of_property_read_u32_array((&pdev->dev)->of_node,
8107 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8108 hlos_num_ce_hw_instances)) {
8109 pr_err("Fail: get hlos ce hw instance info\n");
8110 rc = -EINVAL;
8111 goto out;
8112 }
8113
8114 if (qseecom.support_fde) {
8115 pce_info_use = qseecom.ce_info.fde =
8116 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8117 if (!pce_info_use) {
8118 pr_err("failed to alloc memory\n");
8119 rc = -ENOMEM;
8120 goto out;
8121 }
8122 /* by default for old db */
8123 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8124 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8125 pce_info_use->alloc = false;
8126 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8127 pce_info_use->ce_pipe_entry = NULL;
8128 if (of_property_read_u32((&pdev->dev)->of_node,
8129 "qcom,disk-encrypt-pipe-pair",
8130 &disk_encrypt_pipe)) {
8131 pr_err("Fail to get FDE pipe information.\n");
8132 rc = -EINVAL;
8133 goto out;
8134 } else {
8135 pr_debug("disk-encrypt-pipe-pair=0x%x",
8136 disk_encrypt_pipe);
8137 }
8138 entry = pce_info_use->num_ce_pipe_entries =
8139 hlos_num_ce_hw_instances;
8140 pce_entry = pce_info_use->ce_pipe_entry =
8141 kcalloc(entry,
8142 sizeof(struct qseecom_ce_pipe_entry),
8143 GFP_KERNEL);
8144 if (pce_entry == NULL) {
8145 pr_err("failed to alloc memory\n");
8146 rc = -ENOMEM;
8147 goto out;
8148 }
8149 for (i = 0; i < entry; i++) {
8150 pce_entry->ce_num = hlos_ce_hw_instance[i];
8151 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8152 pce_entry->valid = 1;
8153 pce_entry++;
8154 }
8155 } else {
8156 pr_warn("Device does not support FDE");
8157 disk_encrypt_pipe = 0xff;
8158 }
8159 if (qseecom.support_pfe) {
8160 pce_info_use = qseecom.ce_info.pfe =
8161 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8162 if (!pce_info_use) {
8163 pr_err("failed to alloc memory\n");
8164 rc = -ENOMEM;
8165 goto out;
8166 }
8167 /* by default for old db */
8168 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8169 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8170 pce_info_use->alloc = false;
8171 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8172 pce_info_use->ce_pipe_entry = NULL;
8173
8174 if (of_property_read_u32((&pdev->dev)->of_node,
8175 "qcom,file-encrypt-pipe-pair",
8176 &file_encrypt_pipe)) {
8177 pr_err("Fail to get PFE pipe information.\n");
8178 rc = -EINVAL;
8179 goto out;
8180 } else {
8181 pr_debug("file-encrypt-pipe-pair=0x%x",
8182 file_encrypt_pipe);
8183 }
8184 entry = pce_info_use->num_ce_pipe_entries =
8185 hlos_num_ce_hw_instances;
8186 pce_entry = pce_info_use->ce_pipe_entry =
8187 kcalloc(entry,
8188 sizeof(struct qseecom_ce_pipe_entry),
8189 GFP_KERNEL);
8190 if (pce_entry == NULL) {
8191 pr_err("failed to alloc memory\n");
8192 rc = -ENOMEM;
8193 goto out;
8194 }
8195 for (i = 0; i < entry; i++) {
8196 pce_entry->ce_num = hlos_ce_hw_instance[i];
8197 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8198 pce_entry->valid = 1;
8199 pce_entry++;
8200 }
8201 } else {
8202 pr_warn("Device does not support PFE");
8203 file_encrypt_pipe = 0xff;
8204 }
8205
8206out1:
8207 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8208 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8209out:
8210 if (rc) {
8211 if (qseecom.ce_info.fde) {
8212 pce_info_use = qseecom.ce_info.fde;
8213 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8214 pce_entry = pce_info_use->ce_pipe_entry;
8215 kfree(pce_entry);
8216 pce_info_use++;
8217 }
8218 }
8219 kfree(qseecom.ce_info.fde);
8220 qseecom.ce_info.fde = NULL;
8221 if (qseecom.ce_info.pfe) {
8222 pce_info_use = qseecom.ce_info.pfe;
8223 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8224 pce_entry = pce_info_use->ce_pipe_entry;
8225 kfree(pce_entry);
8226 pce_info_use++;
8227 }
8228 }
8229 kfree(qseecom.ce_info.pfe);
8230 qseecom.ce_info.pfe = NULL;
8231 }
8232 kfree(unit_tbl);
8233 kfree(pfde_tbl);
8234 return rc;
8235}
8236
8237static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8238 void __user *argp)
8239{
8240 struct qseecom_ce_info_req req;
8241 struct qseecom_ce_info_req *pinfo = &req;
8242 int ret = 0;
8243 int i;
8244 unsigned int entries;
8245 struct qseecom_ce_info_use *pce_info_use, *p;
8246 int total = 0;
8247 bool found = false;
8248 struct qseecom_ce_pipe_entry *pce_entry;
8249
8250 ret = copy_from_user(pinfo, argp,
8251 sizeof(struct qseecom_ce_info_req));
8252 if (ret) {
8253 pr_err("copy_from_user failed\n");
8254 return ret;
8255 }
8256
8257 switch (pinfo->usage) {
8258 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8259 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8260 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8261 if (qseecom.support_fde) {
8262 p = qseecom.ce_info.fde;
8263 total = qseecom.ce_info.num_fde;
8264 } else {
8265 pr_err("system does not support fde\n");
8266 return -EINVAL;
8267 }
8268 break;
8269 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8270 if (qseecom.support_pfe) {
8271 p = qseecom.ce_info.pfe;
8272 total = qseecom.ce_info.num_pfe;
8273 } else {
8274 pr_err("system does not support pfe\n");
8275 return -EINVAL;
8276 }
8277 break;
8278 default:
8279 pr_err("unsupported usage %d\n", pinfo->usage);
8280 return -EINVAL;
8281 }
8282
8283 pce_info_use = NULL;
8284 for (i = 0; i < total; i++) {
8285 if (!p->alloc)
8286 pce_info_use = p;
8287 else if (!memcmp(p->handle, pinfo->handle,
8288 MAX_CE_INFO_HANDLE_SIZE)) {
8289 pce_info_use = p;
8290 found = true;
8291 break;
8292 }
8293 p++;
8294 }
8295
8296 if (pce_info_use == NULL)
8297 return -EBUSY;
8298
8299 pinfo->unit_num = pce_info_use->unit_num;
8300 if (!pce_info_use->alloc) {
8301 pce_info_use->alloc = true;
8302 memcpy(pce_info_use->handle,
8303 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8304 }
8305 if (pce_info_use->num_ce_pipe_entries >
8306 MAX_CE_PIPE_PAIR_PER_UNIT)
8307 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8308 else
8309 entries = pce_info_use->num_ce_pipe_entries;
8310 pinfo->num_ce_pipe_entries = entries;
8311 pce_entry = pce_info_use->ce_pipe_entry;
8312 for (i = 0; i < entries; i++, pce_entry++)
8313 pinfo->ce_pipe_entry[i] = *pce_entry;
8314 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8315 pinfo->ce_pipe_entry[i].valid = 0;
8316
8317 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8318 pr_err("copy_to_user failed\n");
8319 ret = -EFAULT;
8320 }
8321 return ret;
8322}
8323
8324static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8325 void __user *argp)
8326{
8327 struct qseecom_ce_info_req req;
8328 struct qseecom_ce_info_req *pinfo = &req;
8329 int ret = 0;
8330 struct qseecom_ce_info_use *p;
8331 int total = 0;
8332 int i;
8333 bool found = false;
8334
8335 ret = copy_from_user(pinfo, argp,
8336 sizeof(struct qseecom_ce_info_req));
8337 if (ret)
8338 return ret;
8339
8340 switch (pinfo->usage) {
8341 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8342 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8343 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8344 if (qseecom.support_fde) {
8345 p = qseecom.ce_info.fde;
8346 total = qseecom.ce_info.num_fde;
8347 } else {
8348 pr_err("system does not support fde\n");
8349 return -EINVAL;
8350 }
8351 break;
8352 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8353 if (qseecom.support_pfe) {
8354 p = qseecom.ce_info.pfe;
8355 total = qseecom.ce_info.num_pfe;
8356 } else {
8357 pr_err("system does not support pfe\n");
8358 return -EINVAL;
8359 }
8360 break;
8361 default:
8362 pr_err("unsupported usage %d\n", pinfo->usage);
8363 return -EINVAL;
8364 }
8365
8366 for (i = 0; i < total; i++) {
8367 if (p->alloc &&
8368 !memcmp(p->handle, pinfo->handle,
8369 MAX_CE_INFO_HANDLE_SIZE)) {
8370 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8371 p->alloc = false;
8372 found = true;
8373 break;
8374 }
8375 p++;
8376 }
8377 return ret;
8378}
8379
8380static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8381 void __user *argp)
8382{
8383 struct qseecom_ce_info_req req;
8384 struct qseecom_ce_info_req *pinfo = &req;
8385 int ret = 0;
8386 int i;
8387 unsigned int entries;
8388 struct qseecom_ce_info_use *pce_info_use, *p;
8389 int total = 0;
8390 bool found = false;
8391 struct qseecom_ce_pipe_entry *pce_entry;
8392
8393 ret = copy_from_user(pinfo, argp,
8394 sizeof(struct qseecom_ce_info_req));
8395 if (ret)
8396 return ret;
8397
8398 switch (pinfo->usage) {
8399 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8400 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8401 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8402 if (qseecom.support_fde) {
8403 p = qseecom.ce_info.fde;
8404 total = qseecom.ce_info.num_fde;
8405 } else {
8406 pr_err("system does not support fde\n");
8407 return -EINVAL;
8408 }
8409 break;
8410 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8411 if (qseecom.support_pfe) {
8412 p = qseecom.ce_info.pfe;
8413 total = qseecom.ce_info.num_pfe;
8414 } else {
8415 pr_err("system does not support pfe\n");
8416 return -EINVAL;
8417 }
8418 break;
8419 default:
8420 pr_err("unsupported usage %d\n", pinfo->usage);
8421 return -EINVAL;
8422 }
8423
8424 pce_info_use = NULL;
8425 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8426 pinfo->num_ce_pipe_entries = 0;
8427 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8428 pinfo->ce_pipe_entry[i].valid = 0;
8429
8430 for (i = 0; i < total; i++) {
8431
8432 if (p->alloc && !memcmp(p->handle,
8433 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8434 pce_info_use = p;
8435 found = true;
8436 break;
8437 }
8438 p++;
8439 }
8440 if (!pce_info_use)
8441 goto out;
8442 pinfo->unit_num = pce_info_use->unit_num;
8443 if (pce_info_use->num_ce_pipe_entries >
8444 MAX_CE_PIPE_PAIR_PER_UNIT)
8445 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8446 else
8447 entries = pce_info_use->num_ce_pipe_entries;
8448 pinfo->num_ce_pipe_entries = entries;
8449 pce_entry = pce_info_use->ce_pipe_entry;
8450 for (i = 0; i < entries; i++, pce_entry++)
8451 pinfo->ce_pipe_entry[i] = *pce_entry;
8452 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8453 pinfo->ce_pipe_entry[i].valid = 0;
8454out:
8455 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8456 pr_err("copy_to_user failed\n");
8457 ret = -EFAULT;
8458 }
8459 return ret;
8460}
8461
8462/*
8463 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8464 * then whitelist feature is not supported.
8465 */
8466static int qseecom_check_whitelist_feature(void)
8467{
8468 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8469
8470 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8471}
8472
8473static int qseecom_probe(struct platform_device *pdev)
8474{
8475 int rc;
8476 int i;
8477 uint32_t feature = 10;
8478 struct device *class_dev;
8479 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8480 struct qseecom_command_scm_resp resp;
8481 struct qseecom_ce_info_use *pce_info_use = NULL;
8482
8483 qseecom.qsee_bw_count = 0;
8484 qseecom.qsee_perf_client = 0;
8485 qseecom.qsee_sfpb_bw_count = 0;
8486
8487 qseecom.qsee.ce_core_clk = NULL;
8488 qseecom.qsee.ce_clk = NULL;
8489 qseecom.qsee.ce_core_src_clk = NULL;
8490 qseecom.qsee.ce_bus_clk = NULL;
8491
8492 qseecom.cumulative_mode = 0;
8493 qseecom.current_mode = INACTIVE;
8494 qseecom.support_bus_scaling = false;
8495 qseecom.support_fde = false;
8496 qseecom.support_pfe = false;
8497
8498 qseecom.ce_drv.ce_core_clk = NULL;
8499 qseecom.ce_drv.ce_clk = NULL;
8500 qseecom.ce_drv.ce_core_src_clk = NULL;
8501 qseecom.ce_drv.ce_bus_clk = NULL;
8502 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8503
8504 qseecom.app_block_ref_cnt = 0;
8505 init_waitqueue_head(&qseecom.app_block_wq);
8506 qseecom.whitelist_support = true;
8507
8508 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8509 if (rc < 0) {
8510 pr_err("alloc_chrdev_region failed %d\n", rc);
8511 return rc;
8512 }
8513
8514 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8515 if (IS_ERR(driver_class)) {
8516 rc = -ENOMEM;
8517 pr_err("class_create failed %d\n", rc);
8518 goto exit_unreg_chrdev_region;
8519 }
8520
8521 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8522 QSEECOM_DEV);
8523 if (IS_ERR(class_dev)) {
8524 pr_err("class_device_create failed %d\n", rc);
8525 rc = -ENOMEM;
8526 goto exit_destroy_class;
8527 }
8528
8529 cdev_init(&qseecom.cdev, &qseecom_fops);
8530 qseecom.cdev.owner = THIS_MODULE;
8531
8532 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8533 if (rc < 0) {
8534 pr_err("cdev_add failed %d\n", rc);
8535 goto exit_destroy_device;
8536 }
8537
8538 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8539 spin_lock_init(&qseecom.registered_listener_list_lock);
8540 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8541 spin_lock_init(&qseecom.registered_app_list_lock);
8542 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8543 spin_lock_init(&qseecom.registered_kclient_list_lock);
8544 init_waitqueue_head(&qseecom.send_resp_wq);
8545 qseecom.send_resp_flag = 0;
8546
8547 qseecom.qsee_version = QSEEE_VERSION_00;
8548 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8549 &resp, sizeof(resp));
8550 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8551 if (rc) {
8552 pr_err("Failed to get QSEE version info %d\n", rc);
8553 goto exit_del_cdev;
8554 }
8555 qseecom.qsee_version = resp.result;
8556 qseecom.qseos_version = QSEOS_VERSION_14;
8557 qseecom.commonlib_loaded = false;
8558 qseecom.commonlib64_loaded = false;
8559 qseecom.pdev = class_dev;
8560 /* Create ION msm client */
8561 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8562 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8563 pr_err("Ion client cannot be created\n");
8564 rc = -ENOMEM;
8565 goto exit_del_cdev;
8566 }
8567
8568 /* register client for bus scaling */
8569 if (pdev->dev.of_node) {
8570 qseecom.pdev->of_node = pdev->dev.of_node;
8571 qseecom.support_bus_scaling =
8572 of_property_read_bool((&pdev->dev)->of_node,
8573 "qcom,support-bus-scaling");
8574 rc = qseecom_retrieve_ce_data(pdev);
8575 if (rc)
8576 goto exit_destroy_ion_client;
8577 qseecom.appsbl_qseecom_support =
8578 of_property_read_bool((&pdev->dev)->of_node,
8579 "qcom,appsbl-qseecom-support");
8580 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8581 qseecom.appsbl_qseecom_support);
8582
8583 qseecom.commonlib64_loaded =
8584 of_property_read_bool((&pdev->dev)->of_node,
8585 "qcom,commonlib64-loaded-by-uefi");
8586 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8587 qseecom.commonlib64_loaded);
8588 qseecom.fde_key_size =
8589 of_property_read_bool((&pdev->dev)->of_node,
8590 "qcom,fde-key-size");
8591 qseecom.no_clock_support =
8592 of_property_read_bool((&pdev->dev)->of_node,
8593 "qcom,no-clock-support");
8594 if (!qseecom.no_clock_support) {
8595 pr_info("qseecom clocks handled by other subsystem\n");
8596 } else {
8597 pr_info("no-clock-support=0x%x",
8598 qseecom.no_clock_support);
8599 }
8600
8601 if (of_property_read_u32((&pdev->dev)->of_node,
8602 "qcom,qsee-reentrancy-support",
8603 &qseecom.qsee_reentrancy_support)) {
8604 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8605 qseecom.qsee_reentrancy_support = 0;
8606 } else {
8607 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8608 qseecom.qsee_reentrancy_support);
8609 }
8610
8611 /*
8612 * The qseecom bus scaling flag can not be enabled when
8613 * crypto clock is not handled by HLOS.
8614 */
8615 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8616 pr_err("support_bus_scaling flag can not be enabled.\n");
8617 rc = -EINVAL;
8618 goto exit_destroy_ion_client;
8619 }
8620
8621 if (of_property_read_u32((&pdev->dev)->of_node,
8622 "qcom,ce-opp-freq",
8623 &qseecom.ce_opp_freq_hz)) {
8624 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8625 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8626 }
8627 rc = __qseecom_init_clk(CLK_QSEE);
8628 if (rc)
8629 goto exit_destroy_ion_client;
8630
8631 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8632 (qseecom.support_pfe || qseecom.support_fde)) {
8633 rc = __qseecom_init_clk(CLK_CE_DRV);
8634 if (rc) {
8635 __qseecom_deinit_clk(CLK_QSEE);
8636 goto exit_destroy_ion_client;
8637 }
8638 } else {
8639 struct qseecom_clk *qclk;
8640
8641 qclk = &qseecom.qsee;
8642 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8643 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8644 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8645 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8646 }
8647
8648 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8649 msm_bus_cl_get_pdata(pdev);
8650 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8651 (!qseecom.is_apps_region_protected &&
8652 !qseecom.appsbl_qseecom_support)) {
8653 struct resource *resource = NULL;
8654 struct qsee_apps_region_info_ireq req;
8655 struct qsee_apps_region_info_64bit_ireq req_64bit;
8656 struct qseecom_command_scm_resp resp;
8657 void *cmd_buf = NULL;
8658 size_t cmd_len;
8659
8660 resource = platform_get_resource_byname(pdev,
8661 IORESOURCE_MEM, "secapp-region");
8662 if (resource) {
8663 if (qseecom.qsee_version < QSEE_VERSION_40) {
8664 req.qsee_cmd_id =
8665 QSEOS_APP_REGION_NOTIFICATION;
8666 req.addr = (uint32_t)resource->start;
8667 req.size = resource_size(resource);
8668 cmd_buf = (void *)&req;
8669 cmd_len = sizeof(struct
8670 qsee_apps_region_info_ireq);
8671 pr_warn("secure app region addr=0x%x size=0x%x",
8672 req.addr, req.size);
8673 } else {
8674 req_64bit.qsee_cmd_id =
8675 QSEOS_APP_REGION_NOTIFICATION;
8676 req_64bit.addr = resource->start;
8677 req_64bit.size = resource_size(
8678 resource);
8679 cmd_buf = (void *)&req_64bit;
8680 cmd_len = sizeof(struct
8681 qsee_apps_region_info_64bit_ireq);
8682 pr_warn("secure app region addr=0x%llx size=0x%x",
8683 req_64bit.addr, req_64bit.size);
8684 }
8685 } else {
8686 pr_err("Fail to get secure app region info\n");
8687 rc = -EINVAL;
8688 goto exit_deinit_clock;
8689 }
8690 rc = __qseecom_enable_clk(CLK_QSEE);
8691 if (rc) {
8692 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8693 rc = -EIO;
8694 goto exit_deinit_clock;
8695 }
8696 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8697 cmd_buf, cmd_len,
8698 &resp, sizeof(resp));
8699 __qseecom_disable_clk(CLK_QSEE);
8700 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8701 pr_err("send secapp reg fail %d resp.res %d\n",
8702 rc, resp.result);
8703 rc = -EINVAL;
8704 goto exit_deinit_clock;
8705 }
8706 }
8707 /*
8708 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8709 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8710 * Pls add "qseecom.commonlib64_loaded = true" here too.
8711 */
8712 if (qseecom.is_apps_region_protected ||
8713 qseecom.appsbl_qseecom_support)
8714 qseecom.commonlib_loaded = true;
8715 } else {
8716 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8717 pdev->dev.platform_data;
8718 }
8719 if (qseecom.support_bus_scaling) {
8720 init_timer(&(qseecom.bw_scale_down_timer));
8721 INIT_WORK(&qseecom.bw_inactive_req_ws,
8722 qseecom_bw_inactive_req_work);
8723 qseecom.bw_scale_down_timer.function =
8724 qseecom_scale_bus_bandwidth_timer_callback;
8725 }
8726 qseecom.timer_running = false;
8727 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8728 qseecom_platform_support);
8729
8730 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8731 pr_warn("qseecom.whitelist_support = %d\n",
8732 qseecom.whitelist_support);
8733
8734 if (!qseecom.qsee_perf_client)
8735 pr_err("Unable to register bus client\n");
8736
8737 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8738 return 0;
8739
8740exit_deinit_clock:
8741 __qseecom_deinit_clk(CLK_QSEE);
8742 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8743 (qseecom.support_pfe || qseecom.support_fde))
8744 __qseecom_deinit_clk(CLK_CE_DRV);
8745exit_destroy_ion_client:
8746 if (qseecom.ce_info.fde) {
8747 pce_info_use = qseecom.ce_info.fde;
8748 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8749 kzfree(pce_info_use->ce_pipe_entry);
8750 pce_info_use++;
8751 }
8752 kfree(qseecom.ce_info.fde);
8753 }
8754 if (qseecom.ce_info.pfe) {
8755 pce_info_use = qseecom.ce_info.pfe;
8756 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8757 kzfree(pce_info_use->ce_pipe_entry);
8758 pce_info_use++;
8759 }
8760 kfree(qseecom.ce_info.pfe);
8761 }
8762 ion_client_destroy(qseecom.ion_clnt);
8763exit_del_cdev:
8764 cdev_del(&qseecom.cdev);
8765exit_destroy_device:
8766 device_destroy(driver_class, qseecom_device_no);
8767exit_destroy_class:
8768 class_destroy(driver_class);
8769exit_unreg_chrdev_region:
8770 unregister_chrdev_region(qseecom_device_no, 1);
8771 return rc;
8772}
8773
8774static int qseecom_remove(struct platform_device *pdev)
8775{
8776 struct qseecom_registered_kclient_list *kclient = NULL;
8777 unsigned long flags = 0;
8778 int ret = 0;
8779 int i;
8780 struct qseecom_ce_pipe_entry *pce_entry;
8781 struct qseecom_ce_info_use *pce_info_use;
8782
8783 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8784 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8785
8786 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8787 list) {
8788 if (!kclient)
8789 goto exit_irqrestore;
8790
8791 /* Break the loop if client handle is NULL */
8792 if (!kclient->handle)
8793 goto exit_free_kclient;
8794
8795 if (list_empty(&kclient->list))
8796 goto exit_free_kc_handle;
8797
8798 list_del(&kclient->list);
8799 mutex_lock(&app_access_lock);
8800 ret = qseecom_unload_app(kclient->handle->dev, false);
8801 mutex_unlock(&app_access_lock);
8802 if (!ret) {
8803 kzfree(kclient->handle->dev);
8804 kzfree(kclient->handle);
8805 kzfree(kclient);
8806 }
8807 }
8808
8809exit_free_kc_handle:
8810 kzfree(kclient->handle);
8811exit_free_kclient:
8812 kzfree(kclient);
8813exit_irqrestore:
8814 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8815
8816 if (qseecom.qseos_version > QSEEE_VERSION_00)
8817 qseecom_unload_commonlib_image();
8818
8819 if (qseecom.qsee_perf_client)
8820 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8821 0);
8822 if (pdev->dev.platform_data != NULL)
8823 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8824
8825 if (qseecom.support_bus_scaling) {
8826 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8827 del_timer_sync(&qseecom.bw_scale_down_timer);
8828 }
8829
8830 if (qseecom.ce_info.fde) {
8831 pce_info_use = qseecom.ce_info.fde;
8832 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8833 pce_entry = pce_info_use->ce_pipe_entry;
8834 kfree(pce_entry);
8835 pce_info_use++;
8836 }
8837 }
8838 kfree(qseecom.ce_info.fde);
8839 if (qseecom.ce_info.pfe) {
8840 pce_info_use = qseecom.ce_info.pfe;
8841 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8842 pce_entry = pce_info_use->ce_pipe_entry;
8843 kfree(pce_entry);
8844 pce_info_use++;
8845 }
8846 }
8847 kfree(qseecom.ce_info.pfe);
8848
8849 /* register client for bus scaling */
8850 if (pdev->dev.of_node) {
8851 __qseecom_deinit_clk(CLK_QSEE);
8852 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8853 (qseecom.support_pfe || qseecom.support_fde))
8854 __qseecom_deinit_clk(CLK_CE_DRV);
8855 }
8856
8857 ion_client_destroy(qseecom.ion_clnt);
8858
8859 cdev_del(&qseecom.cdev);
8860
8861 device_destroy(driver_class, qseecom_device_no);
8862
8863 class_destroy(driver_class);
8864
8865 unregister_chrdev_region(qseecom_device_no, 1);
8866
8867 return ret;
8868}
8869
8870static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8871{
8872 int ret = 0;
8873 struct qseecom_clk *qclk;
8874
8875 qclk = &qseecom.qsee;
8876 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8877 if (qseecom.no_clock_support)
8878 return 0;
8879
8880 mutex_lock(&qsee_bw_mutex);
8881 mutex_lock(&clk_access_lock);
8882
8883 if (qseecom.current_mode != INACTIVE) {
8884 ret = msm_bus_scale_client_update_request(
8885 qseecom.qsee_perf_client, INACTIVE);
8886 if (ret)
8887 pr_err("Fail to scale down bus\n");
8888 else
8889 qseecom.current_mode = INACTIVE;
8890 }
8891
8892 if (qclk->clk_access_cnt) {
8893 if (qclk->ce_clk != NULL)
8894 clk_disable_unprepare(qclk->ce_clk);
8895 if (qclk->ce_core_clk != NULL)
8896 clk_disable_unprepare(qclk->ce_core_clk);
8897 if (qclk->ce_bus_clk != NULL)
8898 clk_disable_unprepare(qclk->ce_bus_clk);
8899 }
8900
8901 del_timer_sync(&(qseecom.bw_scale_down_timer));
8902 qseecom.timer_running = false;
8903
8904 mutex_unlock(&clk_access_lock);
8905 mutex_unlock(&qsee_bw_mutex);
8906 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8907
8908 return 0;
8909}
8910
8911static int qseecom_resume(struct platform_device *pdev)
8912{
8913 int mode = 0;
8914 int ret = 0;
8915 struct qseecom_clk *qclk;
8916
8917 qclk = &qseecom.qsee;
8918 if (qseecom.no_clock_support)
8919 goto exit;
8920
8921 mutex_lock(&qsee_bw_mutex);
8922 mutex_lock(&clk_access_lock);
8923 if (qseecom.cumulative_mode >= HIGH)
8924 mode = HIGH;
8925 else
8926 mode = qseecom.cumulative_mode;
8927
8928 if (qseecom.cumulative_mode != INACTIVE) {
8929 ret = msm_bus_scale_client_update_request(
8930 qseecom.qsee_perf_client, mode);
8931 if (ret)
8932 pr_err("Fail to scale up bus to %d\n", mode);
8933 else
8934 qseecom.current_mode = mode;
8935 }
8936
8937 if (qclk->clk_access_cnt) {
8938 if (qclk->ce_core_clk != NULL) {
8939 ret = clk_prepare_enable(qclk->ce_core_clk);
8940 if (ret) {
8941 pr_err("Unable to enable/prep CE core clk\n");
8942 qclk->clk_access_cnt = 0;
8943 goto err;
8944 }
8945 }
8946 if (qclk->ce_clk != NULL) {
8947 ret = clk_prepare_enable(qclk->ce_clk);
8948 if (ret) {
8949 pr_err("Unable to enable/prep CE iface clk\n");
8950 qclk->clk_access_cnt = 0;
8951 goto ce_clk_err;
8952 }
8953 }
8954 if (qclk->ce_bus_clk != NULL) {
8955 ret = clk_prepare_enable(qclk->ce_bus_clk);
8956 if (ret) {
8957 pr_err("Unable to enable/prep CE bus clk\n");
8958 qclk->clk_access_cnt = 0;
8959 goto ce_bus_clk_err;
8960 }
8961 }
8962 }
8963
8964 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8965 qseecom.bw_scale_down_timer.expires = jiffies +
8966 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8967 mod_timer(&(qseecom.bw_scale_down_timer),
8968 qseecom.bw_scale_down_timer.expires);
8969 qseecom.timer_running = true;
8970 }
8971
8972 mutex_unlock(&clk_access_lock);
8973 mutex_unlock(&qsee_bw_mutex);
8974 goto exit;
8975
8976ce_bus_clk_err:
8977 if (qclk->ce_clk)
8978 clk_disable_unprepare(qclk->ce_clk);
8979ce_clk_err:
8980 if (qclk->ce_core_clk)
8981 clk_disable_unprepare(qclk->ce_core_clk);
8982err:
8983 mutex_unlock(&clk_access_lock);
8984 mutex_unlock(&qsee_bw_mutex);
8985 ret = -EIO;
8986exit:
8987 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8988 return ret;
8989}
8990
8991static const struct of_device_id qseecom_match[] = {
8992 {
8993 .compatible = "qcom,qseecom",
8994 },
8995 {}
8996};
8997
8998static struct platform_driver qseecom_plat_driver = {
8999 .probe = qseecom_probe,
9000 .remove = qseecom_remove,
9001 .suspend = qseecom_suspend,
9002 .resume = qseecom_resume,
9003 .driver = {
9004 .name = "qseecom",
9005 .owner = THIS_MODULE,
9006 .of_match_table = qseecom_match,
9007 },
9008};
9009
9010static int qseecom_init(void)
9011{
9012 return platform_driver_register(&qseecom_plat_driver);
9013}
9014
9015static void qseecom_exit(void)
9016{
9017 platform_driver_unregister(&qseecom_plat_driver);
9018}
9019
9020MODULE_LICENSE("GPL v2");
9021MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9022
9023module_init(qseecom_init);
9024module_exit(qseecom_exit);