blob: b5ad125b3b497c8200474922d31b535686881e73 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700255 struct ce_hw_usage_info ce_info;
256
257 int qsee_bw_count;
258 int qsee_sfpb_bw_count;
259
260 uint32_t qsee_perf_client;
261 struct qseecom_clk qsee;
262 struct qseecom_clk ce_drv;
263
264 bool support_bus_scaling;
265 bool support_fde;
266 bool support_pfe;
267 bool fde_key_size;
268 uint32_t cumulative_mode;
269 enum qseecom_bandwidth_request_mode current_mode;
270 struct timer_list bw_scale_down_timer;
271 struct work_struct bw_inactive_req_ws;
272 struct cdev cdev;
273 bool timer_running;
274 bool no_clock_support;
275 unsigned int ce_opp_freq_hz;
276 bool appsbl_qseecom_support;
277 uint32_t qsee_reentrancy_support;
278
279 uint32_t app_block_ref_cnt;
280 wait_queue_head_t app_block_wq;
281 atomic_t qseecom_state;
282 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700283 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700583 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700584 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
585 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
586 ret = scm_call2(smc_id, &desc);
587 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700588 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700589 smc_id = TZ_OS_REGISTER_LISTENER_ID;
590 __qseecom_reentrancy_check_if_no_app_blocked(
591 smc_id);
592 ret = scm_call2(smc_id, &desc);
593 }
594 break;
595 }
596 case QSEOS_DEREGISTER_LISTENER: {
597 struct qseecom_unregister_listener_ireq *req;
598
599 req = (struct qseecom_unregister_listener_ireq *)
600 req_buf;
601 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
602 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
603 desc.args[0] = req->listener_id;
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
605 ret = scm_call2(smc_id, &desc);
606 break;
607 }
608 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
609 struct qseecom_client_listener_data_irsp *req;
610
611 req = (struct qseecom_client_listener_data_irsp *)
612 req_buf;
613 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
614 desc.arginfo =
615 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
616 desc.args[0] = req->listener_id;
617 desc.args[1] = req->status;
618 ret = scm_call2(smc_id, &desc);
619 break;
620 }
621 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
622 struct qseecom_client_listener_data_irsp *req;
623 struct qseecom_client_listener_data_64bit_irsp *req_64;
624
625 smc_id =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
627 desc.arginfo =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
629 if (qseecom.qsee_version < QSEE_VERSION_40) {
630 req =
631 (struct qseecom_client_listener_data_irsp *)
632 req_buf;
633 desc.args[0] = req->listener_id;
634 desc.args[1] = req->status;
635 desc.args[2] = req->sglistinfo_ptr;
636 desc.args[3] = req->sglistinfo_len;
637 } else {
638 req_64 =
639 (struct qseecom_client_listener_data_64bit_irsp *)
640 req_buf;
641 desc.args[0] = req_64->listener_id;
642 desc.args[1] = req_64->status;
643 desc.args[2] = req_64->sglistinfo_ptr;
644 desc.args[3] = req_64->sglistinfo_len;
645 }
646 ret = scm_call2(smc_id, &desc);
647 break;
648 }
649 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
650 struct qseecom_load_app_ireq *req;
651 struct qseecom_load_app_64bit_ireq *req_64bit;
652
653 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
654 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
655 if (qseecom.qsee_version < QSEE_VERSION_40) {
656 req = (struct qseecom_load_app_ireq *)req_buf;
657 desc.args[0] = req->mdt_len;
658 desc.args[1] = req->img_len;
659 desc.args[2] = req->phy_addr;
660 } else {
661 req_64bit =
662 (struct qseecom_load_app_64bit_ireq *)req_buf;
663 desc.args[0] = req_64bit->mdt_len;
664 desc.args[1] = req_64bit->img_len;
665 desc.args[2] = req_64bit->phy_addr;
666 }
667 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
668 ret = scm_call2(smc_id, &desc);
669 break;
670 }
671 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
672 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
673 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
674 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
675 ret = scm_call2(smc_id, &desc);
676 break;
677 }
678
679 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
680 struct qseecom_client_send_data_ireq *req;
681 struct qseecom_client_send_data_64bit_ireq *req_64bit;
682
683 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
684 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
685 if (qseecom.qsee_version < QSEE_VERSION_40) {
686 req = (struct qseecom_client_send_data_ireq *)
687 req_buf;
688 desc.args[0] = req->app_id;
689 desc.args[1] = req->req_ptr;
690 desc.args[2] = req->req_len;
691 desc.args[3] = req->rsp_ptr;
692 desc.args[4] = req->rsp_len;
693 } else {
694 req_64bit =
695 (struct qseecom_client_send_data_64bit_ireq *)
696 req_buf;
697 desc.args[0] = req_64bit->app_id;
698 desc.args[1] = req_64bit->req_ptr;
699 desc.args[2] = req_64bit->req_len;
700 desc.args[3] = req_64bit->rsp_ptr;
701 desc.args[4] = req_64bit->rsp_len;
702 }
703 ret = scm_call2(smc_id, &desc);
704 break;
705 }
706 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
707 struct qseecom_client_send_data_ireq *req;
708 struct qseecom_client_send_data_64bit_ireq *req_64bit;
709
710 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
711 desc.arginfo =
712 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
713 if (qseecom.qsee_version < QSEE_VERSION_40) {
714 req = (struct qseecom_client_send_data_ireq *)
715 req_buf;
716 desc.args[0] = req->app_id;
717 desc.args[1] = req->req_ptr;
718 desc.args[2] = req->req_len;
719 desc.args[3] = req->rsp_ptr;
720 desc.args[4] = req->rsp_len;
721 desc.args[5] = req->sglistinfo_ptr;
722 desc.args[6] = req->sglistinfo_len;
723 } else {
724 req_64bit =
725 (struct qseecom_client_send_data_64bit_ireq *)
726 req_buf;
727 desc.args[0] = req_64bit->app_id;
728 desc.args[1] = req_64bit->req_ptr;
729 desc.args[2] = req_64bit->req_len;
730 desc.args[3] = req_64bit->rsp_ptr;
731 desc.args[4] = req_64bit->rsp_len;
732 desc.args[5] = req_64bit->sglistinfo_ptr;
733 desc.args[6] = req_64bit->sglistinfo_len;
734 }
735 ret = scm_call2(smc_id, &desc);
736 break;
737 }
738 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
739 struct qseecom_client_send_service_ireq *req;
740
741 req = (struct qseecom_client_send_service_ireq *)
742 req_buf;
743 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
744 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
745 desc.args[0] = req->key_type;
746 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
747 ret = scm_call2(smc_id, &desc);
748 break;
749 }
750 case QSEOS_RPMB_ERASE_COMMAND: {
751 smc_id = TZ_OS_RPMB_ERASE_ID;
752 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
753 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
754 ret = scm_call2(smc_id, &desc);
755 break;
756 }
757 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
758 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
759 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
760 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
761 ret = scm_call2(smc_id, &desc);
762 break;
763 }
764 case QSEOS_GENERATE_KEY: {
765 u32 tzbuflen = PAGE_ALIGN(sizeof
766 (struct qseecom_key_generate_ireq) -
767 sizeof(uint32_t));
768 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
769
770 if (!tzbuf)
771 return -ENOMEM;
772 memset(tzbuf, 0, tzbuflen);
773 memcpy(tzbuf, req_buf + sizeof(uint32_t),
774 (sizeof(struct qseecom_key_generate_ireq) -
775 sizeof(uint32_t)));
776 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
777 smc_id = TZ_OS_KS_GEN_KEY_ID;
778 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
779 desc.args[0] = virt_to_phys(tzbuf);
780 desc.args[1] = tzbuflen;
781 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
782 ret = scm_call2(smc_id, &desc);
783 kzfree(tzbuf);
784 break;
785 }
786 case QSEOS_DELETE_KEY: {
787 u32 tzbuflen = PAGE_ALIGN(sizeof
788 (struct qseecom_key_delete_ireq) -
789 sizeof(uint32_t));
790 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
791
792 if (!tzbuf)
793 return -ENOMEM;
794 memset(tzbuf, 0, tzbuflen);
795 memcpy(tzbuf, req_buf + sizeof(uint32_t),
796 (sizeof(struct qseecom_key_delete_ireq) -
797 sizeof(uint32_t)));
798 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
799 smc_id = TZ_OS_KS_DEL_KEY_ID;
800 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
801 desc.args[0] = virt_to_phys(tzbuf);
802 desc.args[1] = tzbuflen;
803 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
804 ret = scm_call2(smc_id, &desc);
805 kzfree(tzbuf);
806 break;
807 }
808 case QSEOS_SET_KEY: {
809 u32 tzbuflen = PAGE_ALIGN(sizeof
810 (struct qseecom_key_select_ireq) -
811 sizeof(uint32_t));
812 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
813
814 if (!tzbuf)
815 return -ENOMEM;
816 memset(tzbuf, 0, tzbuflen);
817 memcpy(tzbuf, req_buf + sizeof(uint32_t),
818 (sizeof(struct qseecom_key_select_ireq) -
819 sizeof(uint32_t)));
820 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
821 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
822 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
823 desc.args[0] = virt_to_phys(tzbuf);
824 desc.args[1] = tzbuflen;
825 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
826 ret = scm_call2(smc_id, &desc);
827 kzfree(tzbuf);
828 break;
829 }
830 case QSEOS_UPDATE_KEY_USERINFO: {
831 u32 tzbuflen = PAGE_ALIGN(sizeof
832 (struct qseecom_key_userinfo_update_ireq) -
833 sizeof(uint32_t));
834 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
835
836 if (!tzbuf)
837 return -ENOMEM;
838 memset(tzbuf, 0, tzbuflen);
839 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
840 (struct qseecom_key_userinfo_update_ireq) -
841 sizeof(uint32_t)));
842 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
843 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
844 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
845 desc.args[0] = virt_to_phys(tzbuf);
846 desc.args[1] = tzbuflen;
847 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
848 ret = scm_call2(smc_id, &desc);
849 kzfree(tzbuf);
850 break;
851 }
852 case QSEOS_TEE_OPEN_SESSION: {
853 struct qseecom_qteec_ireq *req;
854 struct qseecom_qteec_64bit_ireq *req_64bit;
855
856 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
857 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
858 if (qseecom.qsee_version < QSEE_VERSION_40) {
859 req = (struct qseecom_qteec_ireq *)req_buf;
860 desc.args[0] = req->app_id;
861 desc.args[1] = req->req_ptr;
862 desc.args[2] = req->req_len;
863 desc.args[3] = req->resp_ptr;
864 desc.args[4] = req->resp_len;
865 } else {
866 req_64bit = (struct qseecom_qteec_64bit_ireq *)
867 req_buf;
868 desc.args[0] = req_64bit->app_id;
869 desc.args[1] = req_64bit->req_ptr;
870 desc.args[2] = req_64bit->req_len;
871 desc.args[3] = req_64bit->resp_ptr;
872 desc.args[4] = req_64bit->resp_len;
873 }
874 ret = scm_call2(smc_id, &desc);
875 break;
876 }
877 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
878 struct qseecom_qteec_ireq *req;
879 struct qseecom_qteec_64bit_ireq *req_64bit;
880
881 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
882 desc.arginfo =
883 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
884 if (qseecom.qsee_version < QSEE_VERSION_40) {
885 req = (struct qseecom_qteec_ireq *)req_buf;
886 desc.args[0] = req->app_id;
887 desc.args[1] = req->req_ptr;
888 desc.args[2] = req->req_len;
889 desc.args[3] = req->resp_ptr;
890 desc.args[4] = req->resp_len;
891 desc.args[5] = req->sglistinfo_ptr;
892 desc.args[6] = req->sglistinfo_len;
893 } else {
894 req_64bit = (struct qseecom_qteec_64bit_ireq *)
895 req_buf;
896 desc.args[0] = req_64bit->app_id;
897 desc.args[1] = req_64bit->req_ptr;
898 desc.args[2] = req_64bit->req_len;
899 desc.args[3] = req_64bit->resp_ptr;
900 desc.args[4] = req_64bit->resp_len;
901 desc.args[5] = req_64bit->sglistinfo_ptr;
902 desc.args[6] = req_64bit->sglistinfo_len;
903 }
904 ret = scm_call2(smc_id, &desc);
905 break;
906 }
907 case QSEOS_TEE_INVOKE_COMMAND: {
908 struct qseecom_qteec_ireq *req;
909 struct qseecom_qteec_64bit_ireq *req_64bit;
910
911 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
912 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
913 if (qseecom.qsee_version < QSEE_VERSION_40) {
914 req = (struct qseecom_qteec_ireq *)req_buf;
915 desc.args[0] = req->app_id;
916 desc.args[1] = req->req_ptr;
917 desc.args[2] = req->req_len;
918 desc.args[3] = req->resp_ptr;
919 desc.args[4] = req->resp_len;
920 } else {
921 req_64bit = (struct qseecom_qteec_64bit_ireq *)
922 req_buf;
923 desc.args[0] = req_64bit->app_id;
924 desc.args[1] = req_64bit->req_ptr;
925 desc.args[2] = req_64bit->req_len;
926 desc.args[3] = req_64bit->resp_ptr;
927 desc.args[4] = req_64bit->resp_len;
928 }
929 ret = scm_call2(smc_id, &desc);
930 break;
931 }
932 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
933 struct qseecom_qteec_ireq *req;
934 struct qseecom_qteec_64bit_ireq *req_64bit;
935
936 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
937 desc.arginfo =
938 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
939 if (qseecom.qsee_version < QSEE_VERSION_40) {
940 req = (struct qseecom_qteec_ireq *)req_buf;
941 desc.args[0] = req->app_id;
942 desc.args[1] = req->req_ptr;
943 desc.args[2] = req->req_len;
944 desc.args[3] = req->resp_ptr;
945 desc.args[4] = req->resp_len;
946 desc.args[5] = req->sglistinfo_ptr;
947 desc.args[6] = req->sglistinfo_len;
948 } else {
949 req_64bit = (struct qseecom_qteec_64bit_ireq *)
950 req_buf;
951 desc.args[0] = req_64bit->app_id;
952 desc.args[1] = req_64bit->req_ptr;
953 desc.args[2] = req_64bit->req_len;
954 desc.args[3] = req_64bit->resp_ptr;
955 desc.args[4] = req_64bit->resp_len;
956 desc.args[5] = req_64bit->sglistinfo_ptr;
957 desc.args[6] = req_64bit->sglistinfo_len;
958 }
959 ret = scm_call2(smc_id, &desc);
960 break;
961 }
962 case QSEOS_TEE_CLOSE_SESSION: {
963 struct qseecom_qteec_ireq *req;
964 struct qseecom_qteec_64bit_ireq *req_64bit;
965
966 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
967 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
968 if (qseecom.qsee_version < QSEE_VERSION_40) {
969 req = (struct qseecom_qteec_ireq *)req_buf;
970 desc.args[0] = req->app_id;
971 desc.args[1] = req->req_ptr;
972 desc.args[2] = req->req_len;
973 desc.args[3] = req->resp_ptr;
974 desc.args[4] = req->resp_len;
975 } else {
976 req_64bit = (struct qseecom_qteec_64bit_ireq *)
977 req_buf;
978 desc.args[0] = req_64bit->app_id;
979 desc.args[1] = req_64bit->req_ptr;
980 desc.args[2] = req_64bit->req_len;
981 desc.args[3] = req_64bit->resp_ptr;
982 desc.args[4] = req_64bit->resp_len;
983 }
984 ret = scm_call2(smc_id, &desc);
985 break;
986 }
987 case QSEOS_TEE_REQUEST_CANCELLATION: {
988 struct qseecom_qteec_ireq *req;
989 struct qseecom_qteec_64bit_ireq *req_64bit;
990
991 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
992 desc.arginfo =
993 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
994 if (qseecom.qsee_version < QSEE_VERSION_40) {
995 req = (struct qseecom_qteec_ireq *)req_buf;
996 desc.args[0] = req->app_id;
997 desc.args[1] = req->req_ptr;
998 desc.args[2] = req->req_len;
999 desc.args[3] = req->resp_ptr;
1000 desc.args[4] = req->resp_len;
1001 } else {
1002 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1003 req_buf;
1004 desc.args[0] = req_64bit->app_id;
1005 desc.args[1] = req_64bit->req_ptr;
1006 desc.args[2] = req_64bit->req_len;
1007 desc.args[3] = req_64bit->resp_ptr;
1008 desc.args[4] = req_64bit->resp_len;
1009 }
1010 ret = scm_call2(smc_id, &desc);
1011 break;
1012 }
1013 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1014 struct qseecom_continue_blocked_request_ireq *req =
1015 (struct qseecom_continue_blocked_request_ireq *)
1016 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001017 if (qseecom.smcinvoke_support)
1018 smc_id =
1019 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1020 else
1021 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001022 desc.arginfo =
1023 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001024 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001025 ret = scm_call2(smc_id, &desc);
1026 break;
1027 }
1028 default: {
1029 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1030 qseos_cmd_id);
1031 ret = -EINVAL;
1032 break;
1033 }
1034 } /*end of switch (qsee_cmd_id) */
1035 break;
1036 } /*end of case SCM_SVC_TZSCHEDULER*/
1037 default: {
1038 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1039 svc_id);
1040 ret = -EINVAL;
1041 break;
1042 }
1043 } /*end of switch svc_id */
1044 scm_resp->result = desc.ret[0];
1045 scm_resp->resp_type = desc.ret[1];
1046 scm_resp->data = desc.ret[2];
1047 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1048 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1049 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1050 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1051 return ret;
1052}
1053
1054
1055static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1056 size_t cmd_len, void *resp_buf, size_t resp_len)
1057{
1058 if (!is_scm_armv8())
1059 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1060 resp_buf, resp_len);
1061 else
1062 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1063}
1064
1065static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1066 struct qseecom_register_listener_req *svc)
1067{
1068 struct qseecom_registered_listener_list *ptr;
1069 int unique = 1;
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1073 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1074 if (ptr->svc.listener_id == svc->listener_id) {
1075 pr_err("Service id: %u is already registered\n",
1076 ptr->svc.listener_id);
1077 unique = 0;
1078 break;
1079 }
1080 }
1081 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1082 return unique;
1083}
1084
1085static struct qseecom_registered_listener_list *__qseecom_find_svc(
1086 int32_t listener_id)
1087{
1088 struct qseecom_registered_listener_list *entry = NULL;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1092 list_for_each_entry(entry,
1093 &qseecom.registered_listener_list_head, list) {
1094 if (entry->svc.listener_id == listener_id)
1095 break;
1096 }
1097 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1098
1099 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1100 pr_err("Service id: %u is not found\n", listener_id);
1101 return NULL;
1102 }
1103
1104 return entry;
1105}
1106
1107static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1108 struct qseecom_dev_handle *handle,
1109 struct qseecom_register_listener_req *listener)
1110{
1111 int ret = 0;
1112 struct qseecom_register_listener_ireq req;
1113 struct qseecom_register_listener_64bit_ireq req_64bit;
1114 struct qseecom_command_scm_resp resp;
1115 ion_phys_addr_t pa;
1116 void *cmd_buf = NULL;
1117 size_t cmd_len;
1118
1119 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001120 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001121 listener->ifd_data_fd);
1122 if (IS_ERR_OR_NULL(svc->ihandle)) {
1123 pr_err("Ion client could not retrieve the handle\n");
1124 return -ENOMEM;
1125 }
1126
1127 /* Get the physical address of the ION BUF */
1128 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1129 if (ret) {
1130 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1131 ret);
1132 return ret;
1133 }
1134 /* Populate the structure for sending scm call to load image */
1135 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1136 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1137 pr_err("ION memory mapping for listener shared buffer failed\n");
1138 return -ENOMEM;
1139 }
1140 svc->sb_phys = (phys_addr_t)pa;
1141
1142 if (qseecom.qsee_version < QSEE_VERSION_40) {
1143 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1144 req.listener_id = svc->svc.listener_id;
1145 req.sb_len = svc->sb_length;
1146 req.sb_ptr = (uint32_t)svc->sb_phys;
1147 cmd_buf = (void *)&req;
1148 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1149 } else {
1150 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1151 req_64bit.listener_id = svc->svc.listener_id;
1152 req_64bit.sb_len = svc->sb_length;
1153 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1154 cmd_buf = (void *)&req_64bit;
1155 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1156 }
1157
1158 resp.result = QSEOS_RESULT_INCOMPLETE;
1159
1160 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1161 &resp, sizeof(resp));
1162 if (ret) {
1163 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1164 return -EINVAL;
1165 }
1166
1167 if (resp.result != QSEOS_RESULT_SUCCESS) {
1168 pr_err("Error SB registration req: resp.result = %d\n",
1169 resp.result);
1170 return -EPERM;
1171 }
1172 return 0;
1173}
1174
1175static int qseecom_register_listener(struct qseecom_dev_handle *data,
1176 void __user *argp)
1177{
1178 int ret = 0;
1179 unsigned long flags;
1180 struct qseecom_register_listener_req rcvd_lstnr;
1181 struct qseecom_registered_listener_list *new_entry;
1182
1183 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1184 if (ret) {
1185 pr_err("copy_from_user failed\n");
1186 return ret;
1187 }
1188 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1189 rcvd_lstnr.sb_size))
1190 return -EFAULT;
1191
1192 data->listener.id = 0;
1193 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1194 pr_err("Service is not unique and is already registered\n");
1195 data->released = true;
1196 return -EBUSY;
1197 }
1198
1199 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1200 if (!new_entry)
1201 return -ENOMEM;
1202 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1203 new_entry->rcv_req_flag = 0;
1204
1205 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1206 new_entry->sb_length = rcvd_lstnr.sb_size;
1207 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1208 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1209 pr_err("qseecom_set_sb_memoryfailed\n");
1210 kzfree(new_entry);
1211 return -ENOMEM;
1212 }
1213
1214 data->listener.id = rcvd_lstnr.listener_id;
1215 init_waitqueue_head(&new_entry->rcv_req_wq);
1216 init_waitqueue_head(&new_entry->listener_block_app_wq);
1217 new_entry->send_resp_flag = 0;
1218 new_entry->listener_in_use = false;
1219 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1220 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1221 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1222
1223 return ret;
1224}
1225
1226static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1227{
1228 int ret = 0;
1229 unsigned long flags;
1230 uint32_t unmap_mem = 0;
1231 struct qseecom_register_listener_ireq req;
1232 struct qseecom_registered_listener_list *ptr_svc = NULL;
1233 struct qseecom_command_scm_resp resp;
1234 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1235
1236 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1237 req.listener_id = data->listener.id;
1238 resp.result = QSEOS_RESULT_INCOMPLETE;
1239
1240 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1241 sizeof(req), &resp, sizeof(resp));
1242 if (ret) {
1243 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1244 ret, data->listener.id);
1245 return ret;
1246 }
1247
1248 if (resp.result != QSEOS_RESULT_SUCCESS) {
1249 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1250 resp.result, data->listener.id);
1251 return -EPERM;
1252 }
1253
1254 data->abort = 1;
1255 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1256 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1257 list) {
1258 if (ptr_svc->svc.listener_id == data->listener.id) {
1259 wake_up_all(&ptr_svc->rcv_req_wq);
1260 break;
1261 }
1262 }
1263 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1264
1265 while (atomic_read(&data->ioctl_count) > 1) {
1266 if (wait_event_freezable(data->abort_wq,
1267 atomic_read(&data->ioctl_count) <= 1)) {
1268 pr_err("Interrupted from abort\n");
1269 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001270 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272 }
1273
1274 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1275 list_for_each_entry(ptr_svc,
1276 &qseecom.registered_listener_list_head, list) {
1277 if (ptr_svc->svc.listener_id == data->listener.id) {
1278 if (ptr_svc->sb_virt) {
1279 unmap_mem = 1;
1280 ihandle = ptr_svc->ihandle;
1281 }
1282 list_del(&ptr_svc->list);
1283 kzfree(ptr_svc);
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1288
1289 /* Unmap the memory */
1290 if (unmap_mem) {
1291 if (!IS_ERR_OR_NULL(ihandle)) {
1292 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1293 ion_free(qseecom.ion_clnt, ihandle);
1294 }
1295 }
1296 data->released = true;
1297 return ret;
1298}
1299
1300static int __qseecom_set_msm_bus_request(uint32_t mode)
1301{
1302 int ret = 0;
1303 struct qseecom_clk *qclk;
1304
1305 qclk = &qseecom.qsee;
1306 if (qclk->ce_core_src_clk != NULL) {
1307 if (mode == INACTIVE) {
1308 __qseecom_disable_clk(CLK_QSEE);
1309 } else {
1310 ret = __qseecom_enable_clk(CLK_QSEE);
1311 if (ret)
1312 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1313 ret, mode);
1314 }
1315 }
1316
1317 if ((!ret) && (qseecom.current_mode != mode)) {
1318 ret = msm_bus_scale_client_update_request(
1319 qseecom.qsee_perf_client, mode);
1320 if (ret) {
1321 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1322 ret, mode);
1323 if (qclk->ce_core_src_clk != NULL) {
1324 if (mode == INACTIVE) {
1325 ret = __qseecom_enable_clk(CLK_QSEE);
1326 if (ret)
1327 pr_err("CLK enable failed\n");
1328 } else
1329 __qseecom_disable_clk(CLK_QSEE);
1330 }
1331 }
1332 qseecom.current_mode = mode;
1333 }
1334 return ret;
1335}
1336
1337static void qseecom_bw_inactive_req_work(struct work_struct *work)
1338{
1339 mutex_lock(&app_access_lock);
1340 mutex_lock(&qsee_bw_mutex);
1341 if (qseecom.timer_running)
1342 __qseecom_set_msm_bus_request(INACTIVE);
1343 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1344 qseecom.current_mode, qseecom.cumulative_mode);
1345 qseecom.timer_running = false;
1346 mutex_unlock(&qsee_bw_mutex);
1347 mutex_unlock(&app_access_lock);
1348}
1349
1350static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1351{
1352 schedule_work(&qseecom.bw_inactive_req_ws);
1353}
1354
1355static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1356{
1357 struct qseecom_clk *qclk;
1358 int ret = 0;
1359
1360 mutex_lock(&clk_access_lock);
1361 if (ce == CLK_QSEE)
1362 qclk = &qseecom.qsee;
1363 else
1364 qclk = &qseecom.ce_drv;
1365
1366 if (qclk->clk_access_cnt > 2) {
1367 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1368 ret = -EINVAL;
1369 goto err_dec_ref_cnt;
1370 }
1371 if (qclk->clk_access_cnt == 2)
1372 qclk->clk_access_cnt--;
1373
1374err_dec_ref_cnt:
1375 mutex_unlock(&clk_access_lock);
1376 return ret;
1377}
1378
1379
1380static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1381{
1382 int32_t ret = 0;
1383 int32_t request_mode = INACTIVE;
1384
1385 mutex_lock(&qsee_bw_mutex);
1386 if (mode == 0) {
1387 if (qseecom.cumulative_mode > MEDIUM)
1388 request_mode = HIGH;
1389 else
1390 request_mode = qseecom.cumulative_mode;
1391 } else {
1392 request_mode = mode;
1393 }
1394
1395 ret = __qseecom_set_msm_bus_request(request_mode);
1396 if (ret) {
1397 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1398 ret, request_mode);
1399 goto err_scale_timer;
1400 }
1401
1402 if (qseecom.timer_running) {
1403 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1404 if (ret) {
1405 pr_err("Failed to decrease clk ref count.\n");
1406 goto err_scale_timer;
1407 }
1408 del_timer_sync(&(qseecom.bw_scale_down_timer));
1409 qseecom.timer_running = false;
1410 }
1411err_scale_timer:
1412 mutex_unlock(&qsee_bw_mutex);
1413 return ret;
1414}
1415
1416
1417static int qseecom_unregister_bus_bandwidth_needs(
1418 struct qseecom_dev_handle *data)
1419{
1420 int32_t ret = 0;
1421
1422 qseecom.cumulative_mode -= data->mode;
1423 data->mode = INACTIVE;
1424
1425 return ret;
1426}
1427
1428static int __qseecom_register_bus_bandwidth_needs(
1429 struct qseecom_dev_handle *data, uint32_t request_mode)
1430{
1431 int32_t ret = 0;
1432
1433 if (data->mode == INACTIVE) {
1434 qseecom.cumulative_mode += request_mode;
1435 data->mode = request_mode;
1436 } else {
1437 if (data->mode != request_mode) {
1438 qseecom.cumulative_mode -= data->mode;
1439 qseecom.cumulative_mode += request_mode;
1440 data->mode = request_mode;
1441 }
1442 }
1443 return ret;
1444}
1445
1446static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1447{
1448 int ret = 0;
1449
1450 ret = qsee_vote_for_clock(data, CLK_DFAB);
1451 if (ret) {
1452 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455 ret = qsee_vote_for_clock(data, CLK_SFPB);
1456 if (ret) {
1457 qsee_disable_clock_vote(data, CLK_DFAB);
1458 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1459 goto perf_enable_exit;
1460 }
1461
1462perf_enable_exit:
1463 return ret;
1464}
1465
1466static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1467 void __user *argp)
1468{
1469 int32_t ret = 0;
1470 int32_t req_mode;
1471
1472 if (qseecom.no_clock_support)
1473 return 0;
1474
1475 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1476 if (ret) {
1477 pr_err("copy_from_user failed\n");
1478 return ret;
1479 }
1480 if (req_mode > HIGH) {
1481 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1482 return -EINVAL;
1483 }
1484
1485 /*
1486 * Register bus bandwidth needs if bus scaling feature is enabled;
1487 * otherwise, qseecom enable/disable clocks for the client directly.
1488 */
1489 if (qseecom.support_bus_scaling) {
1490 mutex_lock(&qsee_bw_mutex);
1491 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1492 mutex_unlock(&qsee_bw_mutex);
1493 } else {
1494 pr_debug("Bus scaling feature is NOT enabled\n");
1495 pr_debug("request bandwidth mode %d for the client\n",
1496 req_mode);
1497 if (req_mode != INACTIVE) {
1498 ret = qseecom_perf_enable(data);
1499 if (ret)
1500 pr_err("Failed to vote for clock with err %d\n",
1501 ret);
1502 } else {
1503 qsee_disable_clock_vote(data, CLK_DFAB);
1504 qsee_disable_clock_vote(data, CLK_SFPB);
1505 }
1506 }
1507 return ret;
1508}
1509
1510static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1511{
1512 if (qseecom.no_clock_support)
1513 return;
1514
1515 mutex_lock(&qsee_bw_mutex);
1516 qseecom.bw_scale_down_timer.expires = jiffies +
1517 msecs_to_jiffies(duration);
1518 mod_timer(&(qseecom.bw_scale_down_timer),
1519 qseecom.bw_scale_down_timer.expires);
1520 qseecom.timer_running = true;
1521 mutex_unlock(&qsee_bw_mutex);
1522}
1523
1524static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1525{
1526 if (!qseecom.support_bus_scaling)
1527 qsee_disable_clock_vote(data, CLK_SFPB);
1528 else
1529 __qseecom_add_bw_scale_down_timer(
1530 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1531}
1532
1533static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1534{
1535 int ret = 0;
1536
1537 if (qseecom.support_bus_scaling) {
1538 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1539 if (ret)
1540 pr_err("Failed to set bw MEDIUM.\n");
1541 } else {
1542 ret = qsee_vote_for_clock(data, CLK_SFPB);
1543 if (ret)
1544 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1545 }
1546 return ret;
1547}
1548
1549static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1550 void __user *argp)
1551{
1552 ion_phys_addr_t pa;
1553 int32_t ret;
1554 struct qseecom_set_sb_mem_param_req req;
1555 size_t len;
1556
1557 /* Copy the relevant information needed for loading the image */
1558 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1559 return -EFAULT;
1560
1561 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1562 (req.sb_len == 0)) {
1563 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1564 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1565 return -EFAULT;
1566 }
1567 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1568 req.sb_len))
1569 return -EFAULT;
1570
1571 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001572 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001573 req.ifd_data_fd);
1574 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1575 pr_err("Ion client could not retrieve the handle\n");
1576 return -ENOMEM;
1577 }
1578 /* Get the physical address of the ION BUF */
1579 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1580 if (ret) {
1581
1582 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1583 ret);
1584 return ret;
1585 }
1586
1587 if (len < req.sb_len) {
1588 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1589 req.sb_len, len);
1590 return -EINVAL;
1591 }
1592 /* Populate the structure for sending scm call to load image */
1593 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1594 data->client.ihandle);
1595 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1596 pr_err("ION memory mapping for client shared buf failed\n");
1597 return -ENOMEM;
1598 }
1599 data->client.sb_phys = (phys_addr_t)pa;
1600 data->client.sb_length = req.sb_len;
1601 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1602 return 0;
1603}
1604
1605static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1606{
1607 int ret;
1608
1609 ret = (qseecom.send_resp_flag != 0);
1610 return ret || data->abort;
1611}
1612
1613static int __qseecom_reentrancy_listener_has_sent_rsp(
1614 struct qseecom_dev_handle *data,
1615 struct qseecom_registered_listener_list *ptr_svc)
1616{
1617 int ret;
1618
1619 ret = (ptr_svc->send_resp_flag != 0);
1620 return ret || data->abort;
1621}
1622
1623static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1624 struct qseecom_command_scm_resp *resp,
1625 struct qseecom_client_listener_data_irsp *send_data_rsp,
1626 struct qseecom_registered_listener_list *ptr_svc,
1627 uint32_t lstnr) {
1628 int ret = 0;
1629
1630 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1631 qseecom.send_resp_flag = 0;
1632 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1633 send_data_rsp->listener_id = lstnr;
1634 if (ptr_svc)
1635 pr_warn("listener_id:%x, lstnr: %x\n",
1636 ptr_svc->svc.listener_id, lstnr);
1637 if (ptr_svc && ptr_svc->ihandle) {
1638 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1639 ptr_svc->sb_virt, ptr_svc->sb_length,
1640 ION_IOC_CLEAN_INV_CACHES);
1641 if (ret) {
1642 pr_err("cache operation failed %d\n", ret);
1643 return ret;
1644 }
1645 }
1646
1647 if (lstnr == RPMB_SERVICE) {
1648 ret = __qseecom_enable_clk(CLK_QSEE);
1649 if (ret)
1650 return ret;
1651 }
1652 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1653 sizeof(send_data_rsp), resp, sizeof(*resp));
1654 if (ret) {
1655 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1656 ret, data->client.app_id);
1657 if (lstnr == RPMB_SERVICE)
1658 __qseecom_disable_clk(CLK_QSEE);
1659 return ret;
1660 }
1661 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1662 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1663 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1664 resp->result, data->client.app_id, lstnr);
1665 ret = -EINVAL;
1666 }
1667 if (lstnr == RPMB_SERVICE)
1668 __qseecom_disable_clk(CLK_QSEE);
1669 return ret;
1670}
1671
1672static void __qseecom_clean_listener_sglistinfo(
1673 struct qseecom_registered_listener_list *ptr_svc)
1674{
1675 if (ptr_svc->sglist_cnt) {
1676 memset(ptr_svc->sglistinfo_ptr, 0,
1677 SGLISTINFO_TABLE_SIZE);
1678 ptr_svc->sglist_cnt = 0;
1679 }
1680}
1681
1682static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1683 struct qseecom_command_scm_resp *resp)
1684{
1685 int ret = 0;
1686 int rc = 0;
1687 uint32_t lstnr;
1688 unsigned long flags;
1689 struct qseecom_client_listener_data_irsp send_data_rsp;
1690 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1691 struct qseecom_registered_listener_list *ptr_svc = NULL;
1692 sigset_t new_sigset;
1693 sigset_t old_sigset;
1694 uint32_t status;
1695 void *cmd_buf = NULL;
1696 size_t cmd_len;
1697 struct sglist_info *table = NULL;
1698
1699 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1700 lstnr = resp->data;
1701 /*
1702 * Wake up blocking lsitener service with the lstnr id
1703 */
1704 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1705 flags);
1706 list_for_each_entry(ptr_svc,
1707 &qseecom.registered_listener_list_head, list) {
1708 if (ptr_svc->svc.listener_id == lstnr) {
1709 ptr_svc->listener_in_use = true;
1710 ptr_svc->rcv_req_flag = 1;
1711 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1712 break;
1713 }
1714 }
1715 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1716 flags);
1717
1718 if (ptr_svc == NULL) {
1719 pr_err("Listener Svc %d does not exist\n", lstnr);
1720 __qseecom_qseos_fail_return_resp_tz(data, resp,
1721 &send_data_rsp, ptr_svc, lstnr);
1722 return -EINVAL;
1723 }
1724
1725 if (!ptr_svc->ihandle) {
1726 pr_err("Client handle is not initialized\n");
1727 __qseecom_qseos_fail_return_resp_tz(data, resp,
1728 &send_data_rsp, ptr_svc, lstnr);
1729 return -EINVAL;
1730 }
1731
1732 if (ptr_svc->svc.listener_id != lstnr) {
1733 pr_warn("Service requested does not exist\n");
1734 __qseecom_qseos_fail_return_resp_tz(data, resp,
Zhen Kongad83f302017-12-09 12:51:36 -08001735 &send_data_rsp, NULL, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001736 return -ERESTARTSYS;
1737 }
1738 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1739
1740 /* initialize the new signal mask with all signals*/
1741 sigfillset(&new_sigset);
1742 /* block all signals */
1743 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1744
1745 do {
1746 /*
1747 * When reentrancy is not supported, check global
1748 * send_resp_flag; otherwise, check this listener's
1749 * send_resp_flag.
1750 */
1751 if (!qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_listener_has_sent_rsp(data))) {
1754 break;
1755 }
1756
1757 if (qseecom.qsee_reentrancy_support &&
1758 !wait_event_freezable(qseecom.send_resp_wq,
1759 __qseecom_reentrancy_listener_has_sent_rsp(
1760 data, ptr_svc))) {
1761 break;
1762 }
1763 } while (1);
1764
1765 /* restore signal mask */
1766 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1767 if (data->abort) {
1768 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1769 data->client.app_id, lstnr, ret);
1770 rc = -ENODEV;
1771 status = QSEOS_RESULT_FAILURE;
1772 } else {
1773 status = QSEOS_RESULT_SUCCESS;
1774 }
1775
1776 qseecom.send_resp_flag = 0;
1777 ptr_svc->send_resp_flag = 0;
1778 table = ptr_svc->sglistinfo_ptr;
1779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
1782 send_data_rsp.sglistinfo_ptr =
1783 (uint32_t)virt_to_phys(table);
1784 send_data_rsp.sglistinfo_len =
1785 SGLISTINFO_TABLE_SIZE;
1786 dmac_flush_range((void *)table,
1787 (void *)table + SGLISTINFO_TABLE_SIZE);
1788 cmd_buf = (void *)&send_data_rsp;
1789 cmd_len = sizeof(send_data_rsp);
1790 } else {
1791 send_data_rsp_64bit.listener_id = lstnr;
1792 send_data_rsp_64bit.status = status;
1793 send_data_rsp_64bit.sglistinfo_ptr =
1794 virt_to_phys(table);
1795 send_data_rsp_64bit.sglistinfo_len =
1796 SGLISTINFO_TABLE_SIZE;
1797 dmac_flush_range((void *)table,
1798 (void *)table + SGLISTINFO_TABLE_SIZE);
1799 cmd_buf = (void *)&send_data_rsp_64bit;
1800 cmd_len = sizeof(send_data_rsp_64bit);
1801 }
1802 if (qseecom.whitelist_support == false)
1803 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1804 else
1805 *(uint32_t *)cmd_buf =
1806 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1807 if (ptr_svc) {
1808 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1809 ptr_svc->ihandle,
1810 ptr_svc->sb_virt, ptr_svc->sb_length,
1811 ION_IOC_CLEAN_INV_CACHES);
1812 if (ret) {
1813 pr_err("cache operation failed %d\n", ret);
1814 return ret;
1815 }
1816 }
1817
1818 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1819 ret = __qseecom_enable_clk(CLK_QSEE);
1820 if (ret)
1821 return ret;
1822 }
1823
1824 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1825 cmd_buf, cmd_len, resp, sizeof(*resp));
1826 ptr_svc->listener_in_use = false;
1827 __qseecom_clean_listener_sglistinfo(ptr_svc);
1828 if (ret) {
1829 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1830 ret, data->client.app_id);
1831 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1832 __qseecom_disable_clk(CLK_QSEE);
1833 return ret;
1834 }
1835 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1836 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1837 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1838 resp->result, data->client.app_id, lstnr);
1839 ret = -EINVAL;
1840 }
1841 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1842 __qseecom_disable_clk(CLK_QSEE);
1843
1844 }
1845 if (rc)
1846 return rc;
1847
1848 return ret;
1849}
1850
Zhen Kong2f60f492017-06-29 15:22:14 -07001851static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001852 struct qseecom_command_scm_resp *resp,
1853 struct qseecom_registered_app_list *ptr_app,
1854 struct qseecom_dev_handle *data)
1855{
1856 struct qseecom_registered_listener_list *list_ptr;
1857 int ret = 0;
1858 struct qseecom_continue_blocked_request_ireq ireq;
1859 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001860 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001861 unsigned long flags;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001862 sigset_t new_sigset;
1863 sigset_t old_sigset;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001864
1865 if (!resp || !data) {
1866 pr_err("invalid resp or data pointer\n");
1867 ret = -EINVAL;
1868 goto exit;
1869 }
1870
1871 /* find app_id & img_name from list */
1872 if (!ptr_app) {
1873 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1874 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1875 list) {
1876 if ((ptr_app->app_id == data->client.app_id) &&
1877 (!strcmp(ptr_app->app_name,
1878 data->client.app_name))) {
1879 found_app = true;
1880 break;
1881 }
1882 }
1883 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1884 flags);
1885 if (!found_app) {
1886 pr_err("app_id %d (%s) is not found\n",
1887 data->client.app_id,
1888 (char *)data->client.app_name);
1889 ret = -ENOENT;
1890 goto exit;
1891 }
1892 }
1893
1894 list_ptr = __qseecom_find_svc(resp->data);
1895 if (!list_ptr) {
1896 pr_err("Invalid listener ID\n");
1897 ret = -ENODATA;
1898 goto exit;
1899 }
1900 pr_debug("lsntr %d in_use = %d\n",
1901 resp->data, list_ptr->listener_in_use);
1902 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001903
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001904 /* sleep until listener is available */
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001905 sigfillset(&new_sigset);
1906 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1907
Zhen Kongd8cc0052017-11-13 15:13:31 -08001908 do {
1909 qseecom.app_block_ref_cnt++;
1910 ptr_app->app_blocked = true;
1911 mutex_unlock(&app_access_lock);
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001912 wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001913 list_ptr->listener_block_app_wq,
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001914 !list_ptr->listener_in_use);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001919
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001920 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1921
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001922 ptr_app->blocked_on_listener_id = 0;
1923 /* notify the blocked app that listener is available */
1924 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1925 resp->data, data->client.app_id,
1926 data->client.app_name);
1927 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001928 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001929 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1930 &ireq, sizeof(ireq),
1931 &continue_resp, sizeof(continue_resp));
1932 if (ret) {
1933 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1934 data->client.app_id,
1935 data->client.app_name, ret);
1936 goto exit;
1937 }
1938 /*
1939 * After TZ app is unblocked, then continue to next case
1940 * for incomplete request processing
1941 */
1942 resp->result = QSEOS_RESULT_INCOMPLETE;
1943exit:
1944 return ret;
1945}
1946
Zhen Kong2f60f492017-06-29 15:22:14 -07001947static int __qseecom_process_blocked_on_listener_smcinvoke(
Zhen Konge7f525f2017-12-01 18:26:25 -08001948 struct qseecom_command_scm_resp *resp, uint32_t app_id)
Zhen Kong2f60f492017-06-29 15:22:14 -07001949{
1950 struct qseecom_registered_listener_list *list_ptr;
1951 int ret = 0;
1952 struct qseecom_continue_blocked_request_ireq ireq;
1953 struct qseecom_command_scm_resp continue_resp;
1954 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001955 sigset_t new_sigset;
1956 sigset_t old_sigset;
Zhen Kong2f60f492017-06-29 15:22:14 -07001957
1958 if (!resp) {
1959 pr_err("invalid resp pointer\n");
1960 ret = -EINVAL;
1961 goto exit;
1962 }
1963 session_id = resp->resp_type;
1964 list_ptr = __qseecom_find_svc(resp->data);
1965 if (!list_ptr) {
1966 pr_err("Invalid listener ID\n");
1967 ret = -ENODATA;
1968 goto exit;
1969 }
1970 pr_debug("lsntr %d in_use = %d\n",
1971 resp->data, list_ptr->listener_in_use);
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001972
Zhen Kong2f60f492017-06-29 15:22:14 -07001973 /* sleep until listener is available */
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001974 sigfillset(&new_sigset);
1975 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1976
Zhen Kongd8cc0052017-11-13 15:13:31 -08001977 do {
1978 qseecom.app_block_ref_cnt++;
1979 mutex_unlock(&app_access_lock);
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001980 wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001981 list_ptr->listener_block_app_wq,
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001982 !list_ptr->listener_in_use);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001983 mutex_lock(&app_access_lock);
1984 qseecom.app_block_ref_cnt--;
1985 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001986
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001987 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1988
Zhen Kong2f60f492017-06-29 15:22:14 -07001989 /* notify TZ that listener is available */
1990 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1991 resp->data, session_id);
1992 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1993 ireq.app_or_session_id = session_id;
1994 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1995 &ireq, sizeof(ireq),
1996 &continue_resp, sizeof(continue_resp));
1997 if (ret) {
Zhen Konge7f525f2017-12-01 18:26:25 -08001998 /* retry with legacy cmd */
1999 qseecom.smcinvoke_support = false;
2000 ireq.app_or_session_id = app_id;
2001 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2002 &ireq, sizeof(ireq),
2003 &continue_resp, sizeof(continue_resp));
2004 qseecom.smcinvoke_support = true;
2005 if (ret) {
2006 pr_err("cont block req for app %d or session %d fail\n",
2007 app_id, session_id);
2008 goto exit;
2009 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002010 }
2011 resp->result = QSEOS_RESULT_INCOMPLETE;
2012exit:
2013 return ret;
2014}
2015
2016static int __qseecom_process_reentrancy_blocked_on_listener(
2017 struct qseecom_command_scm_resp *resp,
2018 struct qseecom_registered_app_list *ptr_app,
2019 struct qseecom_dev_handle *data)
2020{
2021 if (!qseecom.smcinvoke_support)
2022 return __qseecom_process_blocked_on_listener_legacy(
2023 resp, ptr_app, data);
2024 else
2025 return __qseecom_process_blocked_on_listener_smcinvoke(
Zhen Konge7f525f2017-12-01 18:26:25 -08002026 resp, data->client.app_id);
Zhen Kong2f60f492017-06-29 15:22:14 -07002027}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002028static int __qseecom_reentrancy_process_incomplete_cmd(
2029 struct qseecom_dev_handle *data,
2030 struct qseecom_command_scm_resp *resp)
2031{
2032 int ret = 0;
2033 int rc = 0;
2034 uint32_t lstnr;
2035 unsigned long flags;
2036 struct qseecom_client_listener_data_irsp send_data_rsp;
2037 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2038 struct qseecom_registered_listener_list *ptr_svc = NULL;
2039 sigset_t new_sigset;
2040 sigset_t old_sigset;
2041 uint32_t status;
2042 void *cmd_buf = NULL;
2043 size_t cmd_len;
2044 struct sglist_info *table = NULL;
2045
2046 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2047 lstnr = resp->data;
2048 /*
2049 * Wake up blocking lsitener service with the lstnr id
2050 */
2051 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2052 flags);
2053 list_for_each_entry(ptr_svc,
2054 &qseecom.registered_listener_list_head, list) {
2055 if (ptr_svc->svc.listener_id == lstnr) {
2056 ptr_svc->listener_in_use = true;
2057 ptr_svc->rcv_req_flag = 1;
2058 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2059 break;
2060 }
2061 }
2062 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2063 flags);
2064
2065 if (ptr_svc == NULL) {
2066 pr_err("Listener Svc %d does not exist\n", lstnr);
2067 return -EINVAL;
2068 }
2069
2070 if (!ptr_svc->ihandle) {
2071 pr_err("Client handle is not initialized\n");
2072 return -EINVAL;
2073 }
2074
2075 if (ptr_svc->svc.listener_id != lstnr) {
2076 pr_warn("Service requested does not exist\n");
2077 return -ERESTARTSYS;
2078 }
2079 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2080
2081 /* initialize the new signal mask with all signals*/
2082 sigfillset(&new_sigset);
2083
2084 /* block all signals */
2085 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2086
2087 /* unlock mutex btw waking listener and sleep-wait */
2088 mutex_unlock(&app_access_lock);
2089 do {
2090 if (!wait_event_freezable(qseecom.send_resp_wq,
2091 __qseecom_reentrancy_listener_has_sent_rsp(
2092 data, ptr_svc))) {
2093 break;
2094 }
2095 } while (1);
2096 /* lock mutex again after resp sent */
2097 mutex_lock(&app_access_lock);
2098 ptr_svc->send_resp_flag = 0;
2099 qseecom.send_resp_flag = 0;
2100
2101 /* restore signal mask */
2102 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2103 if (data->abort) {
2104 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2105 data->client.app_id, lstnr, ret);
2106 rc = -ENODEV;
2107 status = QSEOS_RESULT_FAILURE;
2108 } else {
2109 status = QSEOS_RESULT_SUCCESS;
2110 }
2111 table = ptr_svc->sglistinfo_ptr;
2112 if (qseecom.qsee_version < QSEE_VERSION_40) {
2113 send_data_rsp.listener_id = lstnr;
2114 send_data_rsp.status = status;
2115 send_data_rsp.sglistinfo_ptr =
2116 (uint32_t)virt_to_phys(table);
2117 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2118 dmac_flush_range((void *)table,
2119 (void *)table + SGLISTINFO_TABLE_SIZE);
2120 cmd_buf = (void *)&send_data_rsp;
2121 cmd_len = sizeof(send_data_rsp);
2122 } else {
2123 send_data_rsp_64bit.listener_id = lstnr;
2124 send_data_rsp_64bit.status = status;
2125 send_data_rsp_64bit.sglistinfo_ptr =
2126 virt_to_phys(table);
2127 send_data_rsp_64bit.sglistinfo_len =
2128 SGLISTINFO_TABLE_SIZE;
2129 dmac_flush_range((void *)table,
2130 (void *)table + SGLISTINFO_TABLE_SIZE);
2131 cmd_buf = (void *)&send_data_rsp_64bit;
2132 cmd_len = sizeof(send_data_rsp_64bit);
2133 }
2134 if (qseecom.whitelist_support == false)
2135 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2136 else
2137 *(uint32_t *)cmd_buf =
2138 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2139 if (ptr_svc) {
2140 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2141 ptr_svc->ihandle,
2142 ptr_svc->sb_virt, ptr_svc->sb_length,
2143 ION_IOC_CLEAN_INV_CACHES);
2144 if (ret) {
2145 pr_err("cache operation failed %d\n", ret);
2146 return ret;
2147 }
2148 }
2149 if (lstnr == RPMB_SERVICE) {
2150 ret = __qseecom_enable_clk(CLK_QSEE);
2151 if (ret)
2152 return ret;
2153 }
2154
2155 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2156 cmd_buf, cmd_len, resp, sizeof(*resp));
2157 ptr_svc->listener_in_use = false;
2158 __qseecom_clean_listener_sglistinfo(ptr_svc);
2159 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2160
2161 if (ret) {
2162 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2163 ret, data->client.app_id);
2164 goto exit;
2165 }
2166
2167 switch (resp->result) {
2168 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2169 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2170 lstnr, data->client.app_id, resp->data);
2171 if (lstnr == resp->data) {
2172 pr_err("lstnr %d should not be blocked!\n",
2173 lstnr);
2174 ret = -EINVAL;
2175 goto exit;
2176 }
2177 ret = __qseecom_process_reentrancy_blocked_on_listener(
2178 resp, NULL, data);
2179 if (ret) {
2180 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2181 data->client.app_id,
2182 data->client.app_name, resp->data);
2183 goto exit;
2184 }
2185 case QSEOS_RESULT_SUCCESS:
2186 case QSEOS_RESULT_INCOMPLETE:
2187 break;
2188 default:
2189 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2190 resp->result, data->client.app_id, lstnr);
2191 ret = -EINVAL;
2192 goto exit;
2193 }
2194exit:
2195 if (lstnr == RPMB_SERVICE)
2196 __qseecom_disable_clk(CLK_QSEE);
2197
2198 }
2199 if (rc)
2200 return rc;
2201
2202 return ret;
2203}
2204
2205/*
2206 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2207 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2208 * So, needs to first check if no app blocked before sending OS level scm call,
2209 * then wait until all apps are unblocked.
2210 */
2211static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2212{
2213 sigset_t new_sigset, old_sigset;
2214
2215 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2216 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2217 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2218 /* thread sleep until this app unblocked */
2219 while (qseecom.app_block_ref_cnt > 0) {
2220 sigfillset(&new_sigset);
2221 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2222 mutex_unlock(&app_access_lock);
2223 do {
2224 if (!wait_event_freezable(qseecom.app_block_wq,
2225 (qseecom.app_block_ref_cnt == 0)))
2226 break;
2227 } while (1);
2228 mutex_lock(&app_access_lock);
2229 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2230 }
2231 }
2232}
2233
2234/*
2235 * scm_call of send data will fail if this TA is blocked or there are more
2236 * than one TA requesting listener services; So, first check to see if need
2237 * to wait.
2238 */
2239static void __qseecom_reentrancy_check_if_this_app_blocked(
2240 struct qseecom_registered_app_list *ptr_app)
2241{
2242 sigset_t new_sigset, old_sigset;
2243
2244 if (qseecom.qsee_reentrancy_support) {
2245 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2246 /* thread sleep until this app unblocked */
2247 sigfillset(&new_sigset);
2248 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2249 mutex_unlock(&app_access_lock);
2250 do {
2251 if (!wait_event_freezable(qseecom.app_block_wq,
2252 (!ptr_app->app_blocked &&
2253 qseecom.app_block_ref_cnt <= 1)))
2254 break;
2255 } while (1);
2256 mutex_lock(&app_access_lock);
2257 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2258 }
2259 }
2260}
2261
2262static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2263 uint32_t *app_id)
2264{
2265 int32_t ret;
2266 struct qseecom_command_scm_resp resp;
2267 bool found_app = false;
2268 struct qseecom_registered_app_list *entry = NULL;
2269 unsigned long flags = 0;
2270
2271 if (!app_id) {
2272 pr_err("Null pointer to app_id\n");
2273 return -EINVAL;
2274 }
2275 *app_id = 0;
2276
2277 /* check if app exists and has been registered locally */
2278 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2279 list_for_each_entry(entry,
2280 &qseecom.registered_app_list_head, list) {
2281 if (!strcmp(entry->app_name, req.app_name)) {
2282 found_app = true;
2283 break;
2284 }
2285 }
2286 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2287 if (found_app) {
2288 pr_debug("Found app with id %d\n", entry->app_id);
2289 *app_id = entry->app_id;
2290 return 0;
2291 }
2292
2293 memset((void *)&resp, 0, sizeof(resp));
2294
2295 /* SCM_CALL to check if app_id for the mentioned app exists */
2296 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2297 sizeof(struct qseecom_check_app_ireq),
2298 &resp, sizeof(resp));
2299 if (ret) {
2300 pr_err("scm_call to check if app is already loaded failed\n");
2301 return -EINVAL;
2302 }
2303
2304 if (resp.result == QSEOS_RESULT_FAILURE)
2305 return 0;
2306
2307 switch (resp.resp_type) {
2308 /*qsee returned listener type response */
2309 case QSEOS_LISTENER_ID:
2310 pr_err("resp type is of listener type instead of app");
2311 return -EINVAL;
2312 case QSEOS_APP_ID:
2313 *app_id = resp.data;
2314 return 0;
2315 default:
2316 pr_err("invalid resp type (%d) from qsee",
2317 resp.resp_type);
2318 return -ENODEV;
2319 }
2320}
2321
2322static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2323{
2324 struct qseecom_registered_app_list *entry = NULL;
2325 unsigned long flags = 0;
2326 u32 app_id = 0;
2327 struct ion_handle *ihandle; /* Ion handle */
2328 struct qseecom_load_img_req load_img_req;
2329 int32_t ret = 0;
2330 ion_phys_addr_t pa = 0;
2331 size_t len;
2332 struct qseecom_command_scm_resp resp;
2333 struct qseecom_check_app_ireq req;
2334 struct qseecom_load_app_ireq load_req;
2335 struct qseecom_load_app_64bit_ireq load_req_64bit;
2336 void *cmd_buf = NULL;
2337 size_t cmd_len;
2338 bool first_time = false;
2339
2340 /* Copy the relevant information needed for loading the image */
2341 if (copy_from_user(&load_img_req,
2342 (void __user *)argp,
2343 sizeof(struct qseecom_load_img_req))) {
2344 pr_err("copy_from_user failed\n");
2345 return -EFAULT;
2346 }
2347
2348 /* Check and load cmnlib */
2349 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2350 if (!qseecom.commonlib_loaded &&
2351 load_img_req.app_arch == ELFCLASS32) {
2352 ret = qseecom_load_commonlib_image(data, "cmnlib");
2353 if (ret) {
2354 pr_err("failed to load cmnlib\n");
2355 return -EIO;
2356 }
2357 qseecom.commonlib_loaded = true;
2358 pr_debug("cmnlib is loaded\n");
2359 }
2360
2361 if (!qseecom.commonlib64_loaded &&
2362 load_img_req.app_arch == ELFCLASS64) {
2363 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2364 if (ret) {
2365 pr_err("failed to load cmnlib64\n");
2366 return -EIO;
2367 }
2368 qseecom.commonlib64_loaded = true;
2369 pr_debug("cmnlib64 is loaded\n");
2370 }
2371 }
2372
2373 if (qseecom.support_bus_scaling) {
2374 mutex_lock(&qsee_bw_mutex);
2375 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2376 mutex_unlock(&qsee_bw_mutex);
2377 if (ret)
2378 return ret;
2379 }
2380
2381 /* Vote for the SFPB clock */
2382 ret = __qseecom_enable_clk_scale_up(data);
2383 if (ret)
2384 goto enable_clk_err;
2385
2386 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2387 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2388 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2389
2390 ret = __qseecom_check_app_exists(req, &app_id);
2391 if (ret < 0)
2392 goto loadapp_err;
2393
2394 if (app_id) {
2395 pr_debug("App id %d (%s) already exists\n", app_id,
2396 (char *)(req.app_name));
2397 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2398 list_for_each_entry(entry,
2399 &qseecom.registered_app_list_head, list){
2400 if (entry->app_id == app_id) {
2401 entry->ref_cnt++;
2402 break;
2403 }
2404 }
2405 spin_unlock_irqrestore(
2406 &qseecom.registered_app_list_lock, flags);
2407 ret = 0;
2408 } else {
2409 first_time = true;
2410 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2411 (char *)(load_img_req.img_name));
2412 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002413 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002414 load_img_req.ifd_data_fd);
2415 if (IS_ERR_OR_NULL(ihandle)) {
2416 pr_err("Ion client could not retrieve the handle\n");
2417 ret = -ENOMEM;
2418 goto loadapp_err;
2419 }
2420
2421 /* Get the physical address of the ION BUF */
2422 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2423 if (ret) {
2424 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2425 ret);
2426 goto loadapp_err;
2427 }
2428 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2429 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2430 len, load_img_req.mdt_len,
2431 load_img_req.img_len);
2432 ret = -EINVAL;
2433 goto loadapp_err;
2434 }
2435 /* Populate the structure for sending scm call to load image */
2436 if (qseecom.qsee_version < QSEE_VERSION_40) {
2437 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2438 load_req.mdt_len = load_img_req.mdt_len;
2439 load_req.img_len = load_img_req.img_len;
2440 strlcpy(load_req.app_name, load_img_req.img_name,
2441 MAX_APP_NAME_SIZE);
2442 load_req.phy_addr = (uint32_t)pa;
2443 cmd_buf = (void *)&load_req;
2444 cmd_len = sizeof(struct qseecom_load_app_ireq);
2445 } else {
2446 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2447 load_req_64bit.mdt_len = load_img_req.mdt_len;
2448 load_req_64bit.img_len = load_img_req.img_len;
2449 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2450 MAX_APP_NAME_SIZE);
2451 load_req_64bit.phy_addr = (uint64_t)pa;
2452 cmd_buf = (void *)&load_req_64bit;
2453 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2454 }
2455
2456 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2457 ION_IOC_CLEAN_INV_CACHES);
2458 if (ret) {
2459 pr_err("cache operation failed %d\n", ret);
2460 goto loadapp_err;
2461 }
2462
2463 /* SCM_CALL to load the app and get the app_id back */
2464 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2465 cmd_len, &resp, sizeof(resp));
2466 if (ret) {
2467 pr_err("scm_call to load app failed\n");
2468 if (!IS_ERR_OR_NULL(ihandle))
2469 ion_free(qseecom.ion_clnt, ihandle);
2470 ret = -EINVAL;
2471 goto loadapp_err;
2472 }
2473
2474 if (resp.result == QSEOS_RESULT_FAILURE) {
2475 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2476 if (!IS_ERR_OR_NULL(ihandle))
2477 ion_free(qseecom.ion_clnt, ihandle);
2478 ret = -EFAULT;
2479 goto loadapp_err;
2480 }
2481
2482 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2483 ret = __qseecom_process_incomplete_cmd(data, &resp);
2484 if (ret) {
2485 pr_err("process_incomplete_cmd failed err: %d\n",
2486 ret);
2487 if (!IS_ERR_OR_NULL(ihandle))
2488 ion_free(qseecom.ion_clnt, ihandle);
2489 ret = -EFAULT;
2490 goto loadapp_err;
2491 }
2492 }
2493
2494 if (resp.result != QSEOS_RESULT_SUCCESS) {
2495 pr_err("scm_call failed resp.result unknown, %d\n",
2496 resp.result);
2497 if (!IS_ERR_OR_NULL(ihandle))
2498 ion_free(qseecom.ion_clnt, ihandle);
2499 ret = -EFAULT;
2500 goto loadapp_err;
2501 }
2502
2503 app_id = resp.data;
2504
2505 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2506 if (!entry) {
2507 ret = -ENOMEM;
2508 goto loadapp_err;
2509 }
2510 entry->app_id = app_id;
2511 entry->ref_cnt = 1;
2512 entry->app_arch = load_img_req.app_arch;
2513 /*
2514 * keymaster app may be first loaded as "keymaste" by qseecomd,
2515 * and then used as "keymaster" on some targets. To avoid app
2516 * name checking error, register "keymaster" into app_list and
2517 * thread private data.
2518 */
2519 if (!strcmp(load_img_req.img_name, "keymaste"))
2520 strlcpy(entry->app_name, "keymaster",
2521 MAX_APP_NAME_SIZE);
2522 else
2523 strlcpy(entry->app_name, load_img_req.img_name,
2524 MAX_APP_NAME_SIZE);
2525 entry->app_blocked = false;
2526 entry->blocked_on_listener_id = 0;
2527
2528 /* Deallocate the handle */
2529 if (!IS_ERR_OR_NULL(ihandle))
2530 ion_free(qseecom.ion_clnt, ihandle);
2531
2532 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2533 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2534 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2535 flags);
2536
2537 pr_warn("App with id %u (%s) now loaded\n", app_id,
2538 (char *)(load_img_req.img_name));
2539 }
2540 data->client.app_id = app_id;
2541 data->client.app_arch = load_img_req.app_arch;
2542 if (!strcmp(load_img_req.img_name, "keymaste"))
2543 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2544 else
2545 strlcpy(data->client.app_name, load_img_req.img_name,
2546 MAX_APP_NAME_SIZE);
2547 load_img_req.app_id = app_id;
2548 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2549 pr_err("copy_to_user failed\n");
2550 ret = -EFAULT;
2551 if (first_time == true) {
2552 spin_lock_irqsave(
2553 &qseecom.registered_app_list_lock, flags);
2554 list_del(&entry->list);
2555 spin_unlock_irqrestore(
2556 &qseecom.registered_app_list_lock, flags);
2557 kzfree(entry);
2558 }
2559 }
2560
2561loadapp_err:
2562 __qseecom_disable_clk_scale_down(data);
2563enable_clk_err:
2564 if (qseecom.support_bus_scaling) {
2565 mutex_lock(&qsee_bw_mutex);
2566 qseecom_unregister_bus_bandwidth_needs(data);
2567 mutex_unlock(&qsee_bw_mutex);
2568 }
2569 return ret;
2570}
2571
2572static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2573{
2574 int ret = 1; /* Set unload app */
2575
2576 wake_up_all(&qseecom.send_resp_wq);
2577 if (qseecom.qsee_reentrancy_support)
2578 mutex_unlock(&app_access_lock);
2579 while (atomic_read(&data->ioctl_count) > 1) {
2580 if (wait_event_freezable(data->abort_wq,
2581 atomic_read(&data->ioctl_count) <= 1)) {
2582 pr_err("Interrupted from abort\n");
2583 ret = -ERESTARTSYS;
2584 break;
2585 }
2586 }
2587 if (qseecom.qsee_reentrancy_support)
2588 mutex_lock(&app_access_lock);
2589 return ret;
2590}
2591
2592static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2593{
2594 int ret = 0;
2595
2596 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2597 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2598 ion_free(qseecom.ion_clnt, data->client.ihandle);
2599 data->client.ihandle = NULL;
2600 }
2601 return ret;
2602}
2603
2604static int qseecom_unload_app(struct qseecom_dev_handle *data,
2605 bool app_crash)
2606{
2607 unsigned long flags;
2608 unsigned long flags1;
2609 int ret = 0;
2610 struct qseecom_command_scm_resp resp;
2611 struct qseecom_registered_app_list *ptr_app = NULL;
2612 bool unload = false;
2613 bool found_app = false;
2614 bool found_dead_app = false;
2615
2616 if (!data) {
2617 pr_err("Invalid/uninitialized device handle\n");
2618 return -EINVAL;
2619 }
2620
2621 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2622 pr_debug("Do not unload keymaster app from tz\n");
2623 goto unload_exit;
2624 }
2625
2626 __qseecom_cleanup_app(data);
2627 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2628
2629 if (data->client.app_id > 0) {
2630 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2631 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2632 list) {
2633 if (ptr_app->app_id == data->client.app_id) {
2634 if (!strcmp((void *)ptr_app->app_name,
2635 (void *)data->client.app_name)) {
2636 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002637 if (ptr_app->app_blocked)
2638 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002639 if (app_crash || ptr_app->ref_cnt == 1)
2640 unload = true;
2641 break;
2642 }
2643 found_dead_app = true;
2644 break;
2645 }
2646 }
2647 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2648 flags);
2649 if (found_app == false && found_dead_app == false) {
2650 pr_err("Cannot find app with id = %d (%s)\n",
2651 data->client.app_id,
2652 (char *)data->client.app_name);
2653 ret = -EINVAL;
2654 goto unload_exit;
2655 }
2656 }
2657
2658 if (found_dead_app)
2659 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2660 (char *)data->client.app_name);
2661
2662 if (unload) {
2663 struct qseecom_unload_app_ireq req;
2664 /* Populate the structure for sending scm call to load image */
2665 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2666 req.app_id = data->client.app_id;
2667
2668 /* SCM_CALL to unload the app */
2669 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2670 sizeof(struct qseecom_unload_app_ireq),
2671 &resp, sizeof(resp));
2672 if (ret) {
2673 pr_err("scm_call to unload app (id = %d) failed\n",
2674 req.app_id);
2675 ret = -EFAULT;
2676 goto unload_exit;
2677 } else {
2678 pr_warn("App id %d now unloaded\n", req.app_id);
2679 }
2680 if (resp.result == QSEOS_RESULT_FAILURE) {
2681 pr_err("app (%d) unload_failed!!\n",
2682 data->client.app_id);
2683 ret = -EFAULT;
2684 goto unload_exit;
2685 }
2686 if (resp.result == QSEOS_RESULT_SUCCESS)
2687 pr_debug("App (%d) is unloaded!!\n",
2688 data->client.app_id);
2689 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2690 ret = __qseecom_process_incomplete_cmd(data, &resp);
2691 if (ret) {
2692 pr_err("process_incomplete_cmd fail err: %d\n",
2693 ret);
2694 goto unload_exit;
2695 }
2696 }
2697 }
2698
2699 if (found_app) {
2700 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2701 if (app_crash) {
2702 ptr_app->ref_cnt = 0;
2703 pr_debug("app_crash: ref_count = 0\n");
2704 } else {
2705 if (ptr_app->ref_cnt == 1) {
2706 ptr_app->ref_cnt = 0;
2707 pr_debug("ref_count set to 0\n");
2708 } else {
2709 ptr_app->ref_cnt--;
2710 pr_debug("Can't unload app(%d) inuse\n",
2711 ptr_app->app_id);
2712 }
2713 }
2714 if (unload) {
2715 list_del(&ptr_app->list);
2716 kzfree(ptr_app);
2717 }
2718 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2719 flags1);
2720 }
2721unload_exit:
2722 qseecom_unmap_ion_allocated_memory(data);
2723 data->released = true;
2724 return ret;
2725}
2726
2727static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2728 unsigned long virt)
2729{
2730 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2731}
2732
2733static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2734 unsigned long virt)
2735{
2736 return (uintptr_t)data->client.sb_virt +
2737 (virt - data->client.user_virt_sb_base);
2738}
2739
2740int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2741 struct qseecom_send_svc_cmd_req *req_ptr,
2742 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2743{
2744 int ret = 0;
2745 void *req_buf = NULL;
2746
2747 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2748 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2749 req_ptr, send_svc_ireq_ptr);
2750 return -EINVAL;
2751 }
2752
2753 /* Clients need to ensure req_buf is at base offset of shared buffer */
2754 if ((uintptr_t)req_ptr->cmd_req_buf !=
2755 data_ptr->client.user_virt_sb_base) {
2756 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2757 return -EINVAL;
2758 }
2759
2760 if (data_ptr->client.sb_length <
2761 sizeof(struct qseecom_rpmb_provision_key)) {
2762 pr_err("shared buffer is too small to hold key type\n");
2763 return -EINVAL;
2764 }
2765 req_buf = data_ptr->client.sb_virt;
2766
2767 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2768 send_svc_ireq_ptr->key_type =
2769 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2770 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2771 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2772 data_ptr, (uintptr_t)req_ptr->resp_buf));
2773 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2774
2775 return ret;
2776}
2777
2778int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2779 struct qseecom_send_svc_cmd_req *req_ptr,
2780 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2781{
2782 int ret = 0;
2783 uint32_t reqd_len_sb_in = 0;
2784
2785 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2786 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2787 req_ptr, send_svc_ireq_ptr);
2788 return -EINVAL;
2789 }
2790
2791 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2792 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2793 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2794 pr_err("Required: %u, Available: %zu\n",
2795 reqd_len_sb_in, data_ptr->client.sb_length);
2796 return -ENOMEM;
2797 }
2798
2799 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2800 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2801 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2802 data_ptr, (uintptr_t)req_ptr->resp_buf));
2803 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2804
2805 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2806 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2807
2808
2809 return ret;
2810}
2811
2812static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2813 struct qseecom_send_svc_cmd_req *req)
2814{
2815 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2816 pr_err("req or cmd buffer or response buffer is null\n");
2817 return -EINVAL;
2818 }
2819
2820 if (!data || !data->client.ihandle) {
2821 pr_err("Client or client handle is not initialized\n");
2822 return -EINVAL;
2823 }
2824
2825 if (data->client.sb_virt == NULL) {
2826 pr_err("sb_virt null\n");
2827 return -EINVAL;
2828 }
2829
2830 if (data->client.user_virt_sb_base == 0) {
2831 pr_err("user_virt_sb_base is null\n");
2832 return -EINVAL;
2833 }
2834
2835 if (data->client.sb_length == 0) {
2836 pr_err("sb_length is 0\n");
2837 return -EINVAL;
2838 }
2839
2840 if (((uintptr_t)req->cmd_req_buf <
2841 data->client.user_virt_sb_base) ||
2842 ((uintptr_t)req->cmd_req_buf >=
2843 (data->client.user_virt_sb_base + data->client.sb_length))) {
2844 pr_err("cmd buffer address not within shared bufffer\n");
2845 return -EINVAL;
2846 }
2847 if (((uintptr_t)req->resp_buf <
2848 data->client.user_virt_sb_base) ||
2849 ((uintptr_t)req->resp_buf >=
2850 (data->client.user_virt_sb_base + data->client.sb_length))) {
2851 pr_err("response buffer address not within shared bufffer\n");
2852 return -EINVAL;
2853 }
2854 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2855 (req->cmd_req_len > data->client.sb_length) ||
2856 (req->resp_len > data->client.sb_length)) {
2857 pr_err("cmd buf length or response buf length not valid\n");
2858 return -EINVAL;
2859 }
2860 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2861 pr_err("Integer overflow detected in req_len & rsp_len\n");
2862 return -EINVAL;
2863 }
2864
2865 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2866 pr_debug("Not enough memory to fit cmd_buf.\n");
2867 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2868 (req->cmd_req_len + req->resp_len),
2869 data->client.sb_length);
2870 return -ENOMEM;
2871 }
2872 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2873 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2874 return -EINVAL;
2875 }
2876 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2877 pr_err("Integer overflow in resp_len & resp_buf\n");
2878 return -EINVAL;
2879 }
2880 if (data->client.user_virt_sb_base >
2881 (ULONG_MAX - data->client.sb_length)) {
2882 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2883 return -EINVAL;
2884 }
2885 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2886 ((uintptr_t)data->client.user_virt_sb_base +
2887 data->client.sb_length)) ||
2888 (((uintptr_t)req->resp_buf + req->resp_len) >
2889 ((uintptr_t)data->client.user_virt_sb_base +
2890 data->client.sb_length))) {
2891 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2892 return -EINVAL;
2893 }
2894 return 0;
2895}
2896
2897static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2898 void __user *argp)
2899{
2900 int ret = 0;
2901 struct qseecom_client_send_service_ireq send_svc_ireq;
2902 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2903 struct qseecom_command_scm_resp resp;
2904 struct qseecom_send_svc_cmd_req req;
2905 void *send_req_ptr;
2906 size_t req_buf_size;
2907
2908 /*struct qseecom_command_scm_resp resp;*/
2909
2910 if (copy_from_user(&req,
2911 (void __user *)argp,
2912 sizeof(req))) {
2913 pr_err("copy_from_user failed\n");
2914 return -EFAULT;
2915 }
2916
2917 if (__validate_send_service_cmd_inputs(data, &req))
2918 return -EINVAL;
2919
2920 data->type = QSEECOM_SECURE_SERVICE;
2921
2922 switch (req.cmd_id) {
2923 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2924 case QSEOS_RPMB_ERASE_COMMAND:
2925 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2926 send_req_ptr = &send_svc_ireq;
2927 req_buf_size = sizeof(send_svc_ireq);
2928 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2929 send_req_ptr))
2930 return -EINVAL;
2931 break;
2932 case QSEOS_FSM_LTEOTA_REQ_CMD:
2933 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2934 case QSEOS_FSM_IKE_REQ_CMD:
2935 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2936 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2937 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2938 case QSEOS_FSM_ENCFS_REQ_CMD:
2939 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2940 send_req_ptr = &send_fsm_key_svc_ireq;
2941 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2942 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2943 send_req_ptr))
2944 return -EINVAL;
2945 break;
2946 default:
2947 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2948 return -EINVAL;
2949 }
2950
2951 if (qseecom.support_bus_scaling) {
2952 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2953 if (ret) {
2954 pr_err("Fail to set bw HIGH\n");
2955 return ret;
2956 }
2957 } else {
2958 ret = qseecom_perf_enable(data);
2959 if (ret) {
2960 pr_err("Failed to vote for clocks with err %d\n", ret);
2961 goto exit;
2962 }
2963 }
2964
2965 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2966 data->client.sb_virt, data->client.sb_length,
2967 ION_IOC_CLEAN_INV_CACHES);
2968 if (ret) {
2969 pr_err("cache operation failed %d\n", ret);
2970 goto exit;
2971 }
2972 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2973 (const void *)send_req_ptr,
2974 req_buf_size, &resp, sizeof(resp));
2975 if (ret) {
2976 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2977 if (!qseecom.support_bus_scaling) {
2978 qsee_disable_clock_vote(data, CLK_DFAB);
2979 qsee_disable_clock_vote(data, CLK_SFPB);
2980 } else {
2981 __qseecom_add_bw_scale_down_timer(
2982 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2983 }
2984 goto exit;
2985 }
2986 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2987 data->client.sb_virt, data->client.sb_length,
2988 ION_IOC_INV_CACHES);
2989 if (ret) {
2990 pr_err("cache operation failed %d\n", ret);
2991 goto exit;
2992 }
2993 switch (resp.result) {
2994 case QSEOS_RESULT_SUCCESS:
2995 break;
2996 case QSEOS_RESULT_INCOMPLETE:
2997 pr_debug("qseos_result_incomplete\n");
2998 ret = __qseecom_process_incomplete_cmd(data, &resp);
2999 if (ret) {
3000 pr_err("process_incomplete_cmd fail with result: %d\n",
3001 resp.result);
3002 }
3003 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3004 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303005 if (put_user(resp.result,
3006 (uint32_t __user *)req.resp_buf)) {
3007 ret = -EINVAL;
3008 goto exit;
3009 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003010 ret = 0;
3011 }
3012 break;
3013 case QSEOS_RESULT_FAILURE:
3014 pr_err("scm call failed with resp.result: %d\n", resp.result);
3015 ret = -EINVAL;
3016 break;
3017 default:
3018 pr_err("Response result %d not supported\n",
3019 resp.result);
3020 ret = -EINVAL;
3021 break;
3022 }
3023 if (!qseecom.support_bus_scaling) {
3024 qsee_disable_clock_vote(data, CLK_DFAB);
3025 qsee_disable_clock_vote(data, CLK_SFPB);
3026 } else {
3027 __qseecom_add_bw_scale_down_timer(
3028 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3029 }
3030
3031exit:
3032 return ret;
3033}
3034
3035static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3036 struct qseecom_send_cmd_req *req)
3037
3038{
3039 if (!data || !data->client.ihandle) {
3040 pr_err("Client or client handle is not initialized\n");
3041 return -EINVAL;
3042 }
3043 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3044 (req->cmd_req_buf == NULL)) {
3045 pr_err("cmd buffer or response buffer is null\n");
3046 return -EINVAL;
3047 }
3048 if (((uintptr_t)req->cmd_req_buf <
3049 data->client.user_virt_sb_base) ||
3050 ((uintptr_t)req->cmd_req_buf >=
3051 (data->client.user_virt_sb_base + data->client.sb_length))) {
3052 pr_err("cmd buffer address not within shared bufffer\n");
3053 return -EINVAL;
3054 }
3055 if (((uintptr_t)req->resp_buf <
3056 data->client.user_virt_sb_base) ||
3057 ((uintptr_t)req->resp_buf >=
3058 (data->client.user_virt_sb_base + data->client.sb_length))) {
3059 pr_err("response buffer address not within shared bufffer\n");
3060 return -EINVAL;
3061 }
3062 if ((req->cmd_req_len == 0) ||
3063 (req->cmd_req_len > data->client.sb_length) ||
3064 (req->resp_len > data->client.sb_length)) {
3065 pr_err("cmd buf length or response buf length not valid\n");
3066 return -EINVAL;
3067 }
3068 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3069 pr_err("Integer overflow detected in req_len & rsp_len\n");
3070 return -EINVAL;
3071 }
3072
3073 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3074 pr_debug("Not enough memory to fit cmd_buf.\n");
3075 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3076 (req->cmd_req_len + req->resp_len),
3077 data->client.sb_length);
3078 return -ENOMEM;
3079 }
3080 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3081 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3082 return -EINVAL;
3083 }
3084 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3085 pr_err("Integer overflow in resp_len & resp_buf\n");
3086 return -EINVAL;
3087 }
3088 if (data->client.user_virt_sb_base >
3089 (ULONG_MAX - data->client.sb_length)) {
3090 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3091 return -EINVAL;
3092 }
3093 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3094 ((uintptr_t)data->client.user_virt_sb_base +
3095 data->client.sb_length)) ||
3096 (((uintptr_t)req->resp_buf + req->resp_len) >
3097 ((uintptr_t)data->client.user_virt_sb_base +
3098 data->client.sb_length))) {
3099 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3100 return -EINVAL;
3101 }
3102 return 0;
3103}
3104
3105int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3106 struct qseecom_registered_app_list *ptr_app,
3107 struct qseecom_dev_handle *data)
3108{
3109 int ret = 0;
3110
3111 switch (resp->result) {
3112 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3113 pr_warn("App(%d) %s is blocked on listener %d\n",
3114 data->client.app_id, data->client.app_name,
3115 resp->data);
3116 ret = __qseecom_process_reentrancy_blocked_on_listener(
3117 resp, ptr_app, data);
3118 if (ret) {
3119 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3120 data->client.app_id, data->client.app_name, resp->data);
3121 return ret;
3122 }
3123
3124 case QSEOS_RESULT_INCOMPLETE:
3125 qseecom.app_block_ref_cnt++;
3126 ptr_app->app_blocked = true;
3127 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3128 ptr_app->app_blocked = false;
3129 qseecom.app_block_ref_cnt--;
3130 wake_up_interruptible(&qseecom.app_block_wq);
3131 if (ret)
3132 pr_err("process_incomplete_cmd failed err: %d\n",
3133 ret);
3134 return ret;
3135 case QSEOS_RESULT_SUCCESS:
3136 return ret;
3137 default:
3138 pr_err("Response result %d not supported\n",
3139 resp->result);
3140 return -EINVAL;
3141 }
3142}
3143
3144static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3145 struct qseecom_send_cmd_req *req)
3146{
3147 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003148 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003149 u32 reqd_len_sb_in = 0;
3150 struct qseecom_client_send_data_ireq send_data_req = {0};
3151 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3152 struct qseecom_command_scm_resp resp;
3153 unsigned long flags;
3154 struct qseecom_registered_app_list *ptr_app;
3155 bool found_app = false;
3156 void *cmd_buf = NULL;
3157 size_t cmd_len;
3158 struct sglist_info *table = data->sglistinfo_ptr;
3159
3160 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3161 /* find app_id & img_name from list */
3162 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3163 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3164 list) {
3165 if ((ptr_app->app_id == data->client.app_id) &&
3166 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3167 found_app = true;
3168 break;
3169 }
3170 }
3171 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3172
3173 if (!found_app) {
3174 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3175 (char *)data->client.app_name);
3176 return -ENOENT;
3177 }
3178
3179 if (qseecom.qsee_version < QSEE_VERSION_40) {
3180 send_data_req.app_id = data->client.app_id;
3181 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3182 data, (uintptr_t)req->cmd_req_buf));
3183 send_data_req.req_len = req->cmd_req_len;
3184 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3185 data, (uintptr_t)req->resp_buf));
3186 send_data_req.rsp_len = req->resp_len;
3187 send_data_req.sglistinfo_ptr =
3188 (uint32_t)virt_to_phys(table);
3189 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3190 dmac_flush_range((void *)table,
3191 (void *)table + SGLISTINFO_TABLE_SIZE);
3192 cmd_buf = (void *)&send_data_req;
3193 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3194 } else {
3195 send_data_req_64bit.app_id = data->client.app_id;
3196 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3197 (uintptr_t)req->cmd_req_buf);
3198 send_data_req_64bit.req_len = req->cmd_req_len;
3199 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3200 (uintptr_t)req->resp_buf);
3201 send_data_req_64bit.rsp_len = req->resp_len;
3202 /* check if 32bit app's phys_addr region is under 4GB.*/
3203 if ((data->client.app_arch == ELFCLASS32) &&
3204 ((send_data_req_64bit.req_ptr >=
3205 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3206 (send_data_req_64bit.rsp_ptr >=
3207 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3208 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3209 data->client.app_name,
3210 send_data_req_64bit.req_ptr,
3211 send_data_req_64bit.req_len,
3212 send_data_req_64bit.rsp_ptr,
3213 send_data_req_64bit.rsp_len);
3214 return -EFAULT;
3215 }
3216 send_data_req_64bit.sglistinfo_ptr =
3217 (uint64_t)virt_to_phys(table);
3218 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3219 dmac_flush_range((void *)table,
3220 (void *)table + SGLISTINFO_TABLE_SIZE);
3221 cmd_buf = (void *)&send_data_req_64bit;
3222 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3223 }
3224
3225 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3226 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3227 else
3228 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3229
3230 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3231 data->client.sb_virt,
3232 reqd_len_sb_in,
3233 ION_IOC_CLEAN_INV_CACHES);
3234 if (ret) {
3235 pr_err("cache operation failed %d\n", ret);
3236 return ret;
3237 }
3238
3239 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3240
3241 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3242 cmd_buf, cmd_len,
3243 &resp, sizeof(resp));
3244 if (ret) {
3245 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3246 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003247 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003248 }
3249
3250 if (qseecom.qsee_reentrancy_support) {
3251 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003252 if (ret)
3253 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003254 } else {
3255 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3256 ret = __qseecom_process_incomplete_cmd(data, &resp);
3257 if (ret) {
3258 pr_err("process_incomplete_cmd failed err: %d\n",
3259 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003260 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003261 }
3262 } else {
3263 if (resp.result != QSEOS_RESULT_SUCCESS) {
3264 pr_err("Response result %d not supported\n",
3265 resp.result);
3266 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003267 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 }
3269 }
3270 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003271exit:
3272 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003273 data->client.sb_virt, data->client.sb_length,
3274 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003275 if (ret2) {
3276 pr_err("cache operation failed %d\n", ret2);
3277 return ret2;
3278 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003279 return ret;
3280}
3281
3282static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3283{
3284 int ret = 0;
3285 struct qseecom_send_cmd_req req;
3286
3287 ret = copy_from_user(&req, argp, sizeof(req));
3288 if (ret) {
3289 pr_err("copy_from_user failed\n");
3290 return ret;
3291 }
3292
3293 if (__validate_send_cmd_inputs(data, &req))
3294 return -EINVAL;
3295
3296 ret = __qseecom_send_cmd(data, &req);
3297
3298 if (ret)
3299 return ret;
3300
3301 return ret;
3302}
3303
3304int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3305 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3306 struct qseecom_dev_handle *data, int i) {
3307
3308 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3309 (req->ifd_data[i].fd > 0)) {
3310 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3311 (req->ifd_data[i].cmd_buf_offset >
3312 req->cmd_req_len - sizeof(uint32_t))) {
3313 pr_err("Invalid offset (req len) 0x%x\n",
3314 req->ifd_data[i].cmd_buf_offset);
3315 return -EINVAL;
3316 }
3317 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3318 (lstnr_resp->ifd_data[i].fd > 0)) {
3319 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3320 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3321 lstnr_resp->resp_len - sizeof(uint32_t))) {
3322 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3323 lstnr_resp->ifd_data[i].cmd_buf_offset);
3324 return -EINVAL;
3325 }
3326 }
3327 return 0;
3328}
3329
3330static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3331 struct qseecom_dev_handle *data)
3332{
3333 struct ion_handle *ihandle;
3334 char *field;
3335 int ret = 0;
3336 int i = 0;
3337 uint32_t len = 0;
3338 struct scatterlist *sg;
3339 struct qseecom_send_modfd_cmd_req *req = NULL;
3340 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3341 struct qseecom_registered_listener_list *this_lstnr = NULL;
3342 uint32_t offset;
3343 struct sg_table *sg_ptr;
3344
3345 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3346 (data->type != QSEECOM_CLIENT_APP))
3347 return -EFAULT;
3348
3349 if (msg == NULL) {
3350 pr_err("Invalid address\n");
3351 return -EINVAL;
3352 }
3353 if (data->type == QSEECOM_LISTENER_SERVICE) {
3354 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3355 this_lstnr = __qseecom_find_svc(data->listener.id);
3356 if (IS_ERR_OR_NULL(this_lstnr)) {
3357 pr_err("Invalid listener ID\n");
3358 return -ENOMEM;
3359 }
3360 } else {
3361 req = (struct qseecom_send_modfd_cmd_req *)msg;
3362 }
3363
3364 for (i = 0; i < MAX_ION_FD; i++) {
3365 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3366 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003367 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003368 req->ifd_data[i].fd);
3369 if (IS_ERR_OR_NULL(ihandle)) {
3370 pr_err("Ion client can't retrieve the handle\n");
3371 return -ENOMEM;
3372 }
3373 field = (char *) req->cmd_req_buf +
3374 req->ifd_data[i].cmd_buf_offset;
3375 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3376 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003377 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003378 lstnr_resp->ifd_data[i].fd);
3379 if (IS_ERR_OR_NULL(ihandle)) {
3380 pr_err("Ion client can't retrieve the handle\n");
3381 return -ENOMEM;
3382 }
3383 field = lstnr_resp->resp_buf_ptr +
3384 lstnr_resp->ifd_data[i].cmd_buf_offset;
3385 } else {
3386 continue;
3387 }
3388 /* Populate the cmd data structure with the phys_addr */
3389 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3390 if (IS_ERR_OR_NULL(sg_ptr)) {
3391 pr_err("IOn client could not retrieve sg table\n");
3392 goto err;
3393 }
3394 if (sg_ptr->nents == 0) {
3395 pr_err("Num of scattered entries is 0\n");
3396 goto err;
3397 }
3398 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3399 pr_err("Num of scattered entries");
3400 pr_err(" (%d) is greater than max supported %d\n",
3401 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3402 goto err;
3403 }
3404 sg = sg_ptr->sgl;
3405 if (sg_ptr->nents == 1) {
3406 uint32_t *update;
3407
3408 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3409 goto err;
3410 if ((data->type == QSEECOM_CLIENT_APP &&
3411 (data->client.app_arch == ELFCLASS32 ||
3412 data->client.app_arch == ELFCLASS64)) ||
3413 (data->type == QSEECOM_LISTENER_SERVICE)) {
3414 /*
3415 * Check if sg list phy add region is under 4GB
3416 */
3417 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3418 (!cleanup) &&
3419 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3420 >= PHY_ADDR_4G - sg->length)) {
3421 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3422 data->client.app_name,
3423 &(sg_dma_address(sg_ptr->sgl)),
3424 sg->length);
3425 goto err;
3426 }
3427 update = (uint32_t *) field;
3428 *update = cleanup ? 0 :
3429 (uint32_t)sg_dma_address(sg_ptr->sgl);
3430 } else {
3431 pr_err("QSEE app arch %u is not supported\n",
3432 data->client.app_arch);
3433 goto err;
3434 }
3435 len += (uint32_t)sg->length;
3436 } else {
3437 struct qseecom_sg_entry *update;
3438 int j = 0;
3439
3440 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3441 (req->ifd_data[i].fd > 0)) {
3442
3443 if ((req->cmd_req_len <
3444 SG_ENTRY_SZ * sg_ptr->nents) ||
3445 (req->ifd_data[i].cmd_buf_offset >
3446 (req->cmd_req_len -
3447 SG_ENTRY_SZ * sg_ptr->nents))) {
3448 pr_err("Invalid offset = 0x%x\n",
3449 req->ifd_data[i].cmd_buf_offset);
3450 goto err;
3451 }
3452
3453 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3454 (lstnr_resp->ifd_data[i].fd > 0)) {
3455
3456 if ((lstnr_resp->resp_len <
3457 SG_ENTRY_SZ * sg_ptr->nents) ||
3458 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3459 (lstnr_resp->resp_len -
3460 SG_ENTRY_SZ * sg_ptr->nents))) {
3461 goto err;
3462 }
3463 }
3464 if ((data->type == QSEECOM_CLIENT_APP &&
3465 (data->client.app_arch == ELFCLASS32 ||
3466 data->client.app_arch == ELFCLASS64)) ||
3467 (data->type == QSEECOM_LISTENER_SERVICE)) {
3468 update = (struct qseecom_sg_entry *)field;
3469 for (j = 0; j < sg_ptr->nents; j++) {
3470 /*
3471 * Check if sg list PA is under 4GB
3472 */
3473 if ((qseecom.qsee_version >=
3474 QSEE_VERSION_40) &&
3475 (!cleanup) &&
3476 ((uint64_t)(sg_dma_address(sg))
3477 >= PHY_ADDR_4G - sg->length)) {
3478 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3479 data->client.app_name,
3480 &(sg_dma_address(sg)),
3481 sg->length);
3482 goto err;
3483 }
3484 update->phys_addr = cleanup ? 0 :
3485 (uint32_t)sg_dma_address(sg);
3486 update->len = cleanup ? 0 : sg->length;
3487 update++;
3488 len += sg->length;
3489 sg = sg_next(sg);
3490 }
3491 } else {
3492 pr_err("QSEE app arch %u is not supported\n",
3493 data->client.app_arch);
3494 goto err;
3495 }
3496 }
3497
3498 if (cleanup) {
3499 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3500 ihandle, NULL, len,
3501 ION_IOC_INV_CACHES);
3502 if (ret) {
3503 pr_err("cache operation failed %d\n", ret);
3504 goto err;
3505 }
3506 } else {
3507 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3508 ihandle, NULL, len,
3509 ION_IOC_CLEAN_INV_CACHES);
3510 if (ret) {
3511 pr_err("cache operation failed %d\n", ret);
3512 goto err;
3513 }
3514 if (data->type == QSEECOM_CLIENT_APP) {
3515 offset = req->ifd_data[i].cmd_buf_offset;
3516 data->sglistinfo_ptr[i].indexAndFlags =
3517 SGLISTINFO_SET_INDEX_FLAG(
3518 (sg_ptr->nents == 1), 0, offset);
3519 data->sglistinfo_ptr[i].sizeOrCount =
3520 (sg_ptr->nents == 1) ?
3521 sg->length : sg_ptr->nents;
3522 data->sglist_cnt = i + 1;
3523 } else {
3524 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3525 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3526 (uintptr_t)this_lstnr->sb_virt);
3527 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3528 SGLISTINFO_SET_INDEX_FLAG(
3529 (sg_ptr->nents == 1), 0, offset);
3530 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3531 (sg_ptr->nents == 1) ?
3532 sg->length : sg_ptr->nents;
3533 this_lstnr->sglist_cnt = i + 1;
3534 }
3535 }
3536 /* Deallocate the handle */
3537 if (!IS_ERR_OR_NULL(ihandle))
3538 ion_free(qseecom.ion_clnt, ihandle);
3539 }
3540 return ret;
3541err:
3542 if (!IS_ERR_OR_NULL(ihandle))
3543 ion_free(qseecom.ion_clnt, ihandle);
3544 return -ENOMEM;
3545}
3546
3547static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3548 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3549{
3550 struct scatterlist *sg = sg_ptr->sgl;
3551 struct qseecom_sg_entry_64bit *sg_entry;
3552 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3553 void *buf;
3554 uint i;
3555 size_t size;
3556 dma_addr_t coh_pmem;
3557
3558 if (fd_idx >= MAX_ION_FD) {
3559 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3560 return -ENOMEM;
3561 }
3562 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3563 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3564 /* Allocate a contiguous kernel buffer */
3565 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3566 size = (size + PAGE_SIZE) & PAGE_MASK;
3567 buf = dma_alloc_coherent(qseecom.pdev,
3568 size, &coh_pmem, GFP_KERNEL);
3569 if (buf == NULL) {
3570 pr_err("failed to alloc memory for sg buf\n");
3571 return -ENOMEM;
3572 }
3573 /* update qseecom_sg_list_buf_hdr_64bit */
3574 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3575 buf_hdr->new_buf_phys_addr = coh_pmem;
3576 buf_hdr->nents_total = sg_ptr->nents;
3577 /* save the left sg entries into new allocated buf */
3578 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3579 for (i = 0; i < sg_ptr->nents; i++) {
3580 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3581 sg_entry->len = sg->length;
3582 sg_entry++;
3583 sg = sg_next(sg);
3584 }
3585
3586 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3587 data->client.sec_buf_fd[fd_idx].vbase = buf;
3588 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3589 data->client.sec_buf_fd[fd_idx].size = size;
3590
3591 return 0;
3592}
3593
3594static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3595 struct qseecom_dev_handle *data)
3596{
3597 struct ion_handle *ihandle;
3598 char *field;
3599 int ret = 0;
3600 int i = 0;
3601 uint32_t len = 0;
3602 struct scatterlist *sg;
3603 struct qseecom_send_modfd_cmd_req *req = NULL;
3604 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3605 struct qseecom_registered_listener_list *this_lstnr = NULL;
3606 uint32_t offset;
3607 struct sg_table *sg_ptr;
3608
3609 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3610 (data->type != QSEECOM_CLIENT_APP))
3611 return -EFAULT;
3612
3613 if (msg == NULL) {
3614 pr_err("Invalid address\n");
3615 return -EINVAL;
3616 }
3617 if (data->type == QSEECOM_LISTENER_SERVICE) {
3618 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3619 this_lstnr = __qseecom_find_svc(data->listener.id);
3620 if (IS_ERR_OR_NULL(this_lstnr)) {
3621 pr_err("Invalid listener ID\n");
3622 return -ENOMEM;
3623 }
3624 } else {
3625 req = (struct qseecom_send_modfd_cmd_req *)msg;
3626 }
3627
3628 for (i = 0; i < MAX_ION_FD; i++) {
3629 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3630 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003631 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003632 req->ifd_data[i].fd);
3633 if (IS_ERR_OR_NULL(ihandle)) {
3634 pr_err("Ion client can't retrieve the handle\n");
3635 return -ENOMEM;
3636 }
3637 field = (char *) req->cmd_req_buf +
3638 req->ifd_data[i].cmd_buf_offset;
3639 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3640 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003641 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003642 lstnr_resp->ifd_data[i].fd);
3643 if (IS_ERR_OR_NULL(ihandle)) {
3644 pr_err("Ion client can't retrieve the handle\n");
3645 return -ENOMEM;
3646 }
3647 field = lstnr_resp->resp_buf_ptr +
3648 lstnr_resp->ifd_data[i].cmd_buf_offset;
3649 } else {
3650 continue;
3651 }
3652 /* Populate the cmd data structure with the phys_addr */
3653 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3654 if (IS_ERR_OR_NULL(sg_ptr)) {
3655 pr_err("IOn client could not retrieve sg table\n");
3656 goto err;
3657 }
3658 if (sg_ptr->nents == 0) {
3659 pr_err("Num of scattered entries is 0\n");
3660 goto err;
3661 }
3662 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3663 pr_warn("Num of scattered entries");
3664 pr_warn(" (%d) is greater than %d\n",
3665 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3666 if (cleanup) {
3667 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3668 data->client.sec_buf_fd[i].vbase)
3669 dma_free_coherent(qseecom.pdev,
3670 data->client.sec_buf_fd[i].size,
3671 data->client.sec_buf_fd[i].vbase,
3672 data->client.sec_buf_fd[i].pbase);
3673 } else {
3674 ret = __qseecom_allocate_sg_list_buffer(data,
3675 field, i, sg_ptr);
3676 if (ret) {
3677 pr_err("Failed to allocate sg list buffer\n");
3678 goto err;
3679 }
3680 }
3681 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3682 sg = sg_ptr->sgl;
3683 goto cleanup;
3684 }
3685 sg = sg_ptr->sgl;
3686 if (sg_ptr->nents == 1) {
3687 uint64_t *update_64bit;
3688
3689 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3690 goto err;
3691 /* 64bit app uses 64bit address */
3692 update_64bit = (uint64_t *) field;
3693 *update_64bit = cleanup ? 0 :
3694 (uint64_t)sg_dma_address(sg_ptr->sgl);
3695 len += (uint32_t)sg->length;
3696 } else {
3697 struct qseecom_sg_entry_64bit *update_64bit;
3698 int j = 0;
3699
3700 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3701 (req->ifd_data[i].fd > 0)) {
3702
3703 if ((req->cmd_req_len <
3704 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3705 (req->ifd_data[i].cmd_buf_offset >
3706 (req->cmd_req_len -
3707 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3708 pr_err("Invalid offset = 0x%x\n",
3709 req->ifd_data[i].cmd_buf_offset);
3710 goto err;
3711 }
3712
3713 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3714 (lstnr_resp->ifd_data[i].fd > 0)) {
3715
3716 if ((lstnr_resp->resp_len <
3717 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3718 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3719 (lstnr_resp->resp_len -
3720 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3721 goto err;
3722 }
3723 }
3724 /* 64bit app uses 64bit address */
3725 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3726 for (j = 0; j < sg_ptr->nents; j++) {
3727 update_64bit->phys_addr = cleanup ? 0 :
3728 (uint64_t)sg_dma_address(sg);
3729 update_64bit->len = cleanup ? 0 :
3730 (uint32_t)sg->length;
3731 update_64bit++;
3732 len += sg->length;
3733 sg = sg_next(sg);
3734 }
3735 }
3736cleanup:
3737 if (cleanup) {
3738 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3739 ihandle, NULL, len,
3740 ION_IOC_INV_CACHES);
3741 if (ret) {
3742 pr_err("cache operation failed %d\n", ret);
3743 goto err;
3744 }
3745 } else {
3746 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3747 ihandle, NULL, len,
3748 ION_IOC_CLEAN_INV_CACHES);
3749 if (ret) {
3750 pr_err("cache operation failed %d\n", ret);
3751 goto err;
3752 }
3753 if (data->type == QSEECOM_CLIENT_APP) {
3754 offset = req->ifd_data[i].cmd_buf_offset;
3755 data->sglistinfo_ptr[i].indexAndFlags =
3756 SGLISTINFO_SET_INDEX_FLAG(
3757 (sg_ptr->nents == 1), 1, offset);
3758 data->sglistinfo_ptr[i].sizeOrCount =
3759 (sg_ptr->nents == 1) ?
3760 sg->length : sg_ptr->nents;
3761 data->sglist_cnt = i + 1;
3762 } else {
3763 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3764 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3765 (uintptr_t)this_lstnr->sb_virt);
3766 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3767 SGLISTINFO_SET_INDEX_FLAG(
3768 (sg_ptr->nents == 1), 1, offset);
3769 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3770 (sg_ptr->nents == 1) ?
3771 sg->length : sg_ptr->nents;
3772 this_lstnr->sglist_cnt = i + 1;
3773 }
3774 }
3775 /* Deallocate the handle */
3776 if (!IS_ERR_OR_NULL(ihandle))
3777 ion_free(qseecom.ion_clnt, ihandle);
3778 }
3779 return ret;
3780err:
3781 for (i = 0; i < MAX_ION_FD; i++)
3782 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3783 data->client.sec_buf_fd[i].vbase)
3784 dma_free_coherent(qseecom.pdev,
3785 data->client.sec_buf_fd[i].size,
3786 data->client.sec_buf_fd[i].vbase,
3787 data->client.sec_buf_fd[i].pbase);
3788 if (!IS_ERR_OR_NULL(ihandle))
3789 ion_free(qseecom.ion_clnt, ihandle);
3790 return -ENOMEM;
3791}
3792
3793static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3794 void __user *argp,
3795 bool is_64bit_addr)
3796{
3797 int ret = 0;
3798 int i;
3799 struct qseecom_send_modfd_cmd_req req;
3800 struct qseecom_send_cmd_req send_cmd_req;
3801
3802 ret = copy_from_user(&req, argp, sizeof(req));
3803 if (ret) {
3804 pr_err("copy_from_user failed\n");
3805 return ret;
3806 }
3807
3808 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3809 send_cmd_req.cmd_req_len = req.cmd_req_len;
3810 send_cmd_req.resp_buf = req.resp_buf;
3811 send_cmd_req.resp_len = req.resp_len;
3812
3813 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3814 return -EINVAL;
3815
3816 /* validate offsets */
3817 for (i = 0; i < MAX_ION_FD; i++) {
3818 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3819 pr_err("Invalid offset %d = 0x%x\n",
3820 i, req.ifd_data[i].cmd_buf_offset);
3821 return -EINVAL;
3822 }
3823 }
3824 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3825 (uintptr_t)req.cmd_req_buf);
3826 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3827 (uintptr_t)req.resp_buf);
3828
3829 if (!is_64bit_addr) {
3830 ret = __qseecom_update_cmd_buf(&req, false, data);
3831 if (ret)
3832 return ret;
3833 ret = __qseecom_send_cmd(data, &send_cmd_req);
3834 if (ret)
3835 return ret;
3836 ret = __qseecom_update_cmd_buf(&req, true, data);
3837 if (ret)
3838 return ret;
3839 } else {
3840 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3841 if (ret)
3842 return ret;
3843 ret = __qseecom_send_cmd(data, &send_cmd_req);
3844 if (ret)
3845 return ret;
3846 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3847 if (ret)
3848 return ret;
3849 }
3850
3851 return ret;
3852}
3853
3854static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3855 void __user *argp)
3856{
3857 return __qseecom_send_modfd_cmd(data, argp, false);
3858}
3859
3860static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3861 void __user *argp)
3862{
3863 return __qseecom_send_modfd_cmd(data, argp, true);
3864}
3865
3866
3867
3868static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3869 struct qseecom_registered_listener_list *svc)
3870{
3871 int ret;
3872
3873 ret = (svc->rcv_req_flag != 0);
3874 return ret || data->abort;
3875}
3876
3877static int qseecom_receive_req(struct qseecom_dev_handle *data)
3878{
3879 int ret = 0;
3880 struct qseecom_registered_listener_list *this_lstnr;
3881
3882 this_lstnr = __qseecom_find_svc(data->listener.id);
3883 if (!this_lstnr) {
3884 pr_err("Invalid listener ID\n");
3885 return -ENODATA;
3886 }
3887
3888 while (1) {
3889 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3890 __qseecom_listener_has_rcvd_req(data,
3891 this_lstnr))) {
3892 pr_debug("Interrupted: exiting Listener Service = %d\n",
3893 (uint32_t)data->listener.id);
3894 /* woken up for different reason */
3895 return -ERESTARTSYS;
3896 }
3897
3898 if (data->abort) {
3899 pr_err("Aborting Listener Service = %d\n",
3900 (uint32_t)data->listener.id);
3901 return -ENODEV;
3902 }
3903 this_lstnr->rcv_req_flag = 0;
3904 break;
3905 }
3906 return ret;
3907}
3908
3909static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3910{
3911 unsigned char app_arch = 0;
3912 struct elf32_hdr *ehdr;
3913 struct elf64_hdr *ehdr64;
3914
3915 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3916
3917 switch (app_arch) {
3918 case ELFCLASS32: {
3919 ehdr = (struct elf32_hdr *)fw_entry->data;
3920 if (fw_entry->size < sizeof(*ehdr)) {
3921 pr_err("%s: Not big enough to be an elf32 header\n",
3922 qseecom.pdev->init_name);
3923 return false;
3924 }
3925 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3926 pr_err("%s: Not an elf32 header\n",
3927 qseecom.pdev->init_name);
3928 return false;
3929 }
3930 if (ehdr->e_phnum == 0) {
3931 pr_err("%s: No loadable segments\n",
3932 qseecom.pdev->init_name);
3933 return false;
3934 }
3935 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3936 sizeof(struct elf32_hdr) > fw_entry->size) {
3937 pr_err("%s: Program headers not within mdt\n",
3938 qseecom.pdev->init_name);
3939 return false;
3940 }
3941 break;
3942 }
3943 case ELFCLASS64: {
3944 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3945 if (fw_entry->size < sizeof(*ehdr64)) {
3946 pr_err("%s: Not big enough to be an elf64 header\n",
3947 qseecom.pdev->init_name);
3948 return false;
3949 }
3950 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3951 pr_err("%s: Not an elf64 header\n",
3952 qseecom.pdev->init_name);
3953 return false;
3954 }
3955 if (ehdr64->e_phnum == 0) {
3956 pr_err("%s: No loadable segments\n",
3957 qseecom.pdev->init_name);
3958 return false;
3959 }
3960 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3961 sizeof(struct elf64_hdr) > fw_entry->size) {
3962 pr_err("%s: Program headers not within mdt\n",
3963 qseecom.pdev->init_name);
3964 return false;
3965 }
3966 break;
3967 }
3968 default: {
3969 pr_err("QSEE app arch %u is not supported\n", app_arch);
3970 return false;
3971 }
3972 }
3973 return true;
3974}
3975
3976static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3977 uint32_t *app_arch)
3978{
3979 int ret = -1;
3980 int i = 0, rc = 0;
3981 const struct firmware *fw_entry = NULL;
3982 char fw_name[MAX_APP_NAME_SIZE];
3983 struct elf32_hdr *ehdr;
3984 struct elf64_hdr *ehdr64;
3985 int num_images = 0;
3986
3987 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3988 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3989 if (rc) {
3990 pr_err("error with request_firmware\n");
3991 ret = -EIO;
3992 goto err;
3993 }
3994 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3995 ret = -EIO;
3996 goto err;
3997 }
3998 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3999 *fw_size = fw_entry->size;
4000 if (*app_arch == ELFCLASS32) {
4001 ehdr = (struct elf32_hdr *)fw_entry->data;
4002 num_images = ehdr->e_phnum;
4003 } else if (*app_arch == ELFCLASS64) {
4004 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4005 num_images = ehdr64->e_phnum;
4006 } else {
4007 pr_err("QSEE %s app, arch %u is not supported\n",
4008 appname, *app_arch);
4009 ret = -EIO;
4010 goto err;
4011 }
4012 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4013 release_firmware(fw_entry);
4014 fw_entry = NULL;
4015 for (i = 0; i < num_images; i++) {
4016 memset(fw_name, 0, sizeof(fw_name));
4017 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4018 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4019 if (ret)
4020 goto err;
4021 if (*fw_size > U32_MAX - fw_entry->size) {
4022 pr_err("QSEE %s app file size overflow\n", appname);
4023 ret = -EINVAL;
4024 goto err;
4025 }
4026 *fw_size += fw_entry->size;
4027 release_firmware(fw_entry);
4028 fw_entry = NULL;
4029 }
4030
4031 return ret;
4032err:
4033 if (fw_entry)
4034 release_firmware(fw_entry);
4035 *fw_size = 0;
4036 return ret;
4037}
4038
4039static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4040 uint32_t fw_size,
4041 struct qseecom_load_app_ireq *load_req)
4042{
4043 int ret = -1;
4044 int i = 0, rc = 0;
4045 const struct firmware *fw_entry = NULL;
4046 char fw_name[MAX_APP_NAME_SIZE];
4047 u8 *img_data_ptr = img_data;
4048 struct elf32_hdr *ehdr;
4049 struct elf64_hdr *ehdr64;
4050 int num_images = 0;
4051 unsigned char app_arch = 0;
4052
4053 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4054 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4055 if (rc) {
4056 ret = -EIO;
4057 goto err;
4058 }
4059
4060 load_req->img_len = fw_entry->size;
4061 if (load_req->img_len > fw_size) {
4062 pr_err("app %s size %zu is larger than buf size %u\n",
4063 appname, fw_entry->size, fw_size);
4064 ret = -EINVAL;
4065 goto err;
4066 }
4067 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4068 img_data_ptr = img_data_ptr + fw_entry->size;
4069 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4070
4071 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4072 if (app_arch == ELFCLASS32) {
4073 ehdr = (struct elf32_hdr *)fw_entry->data;
4074 num_images = ehdr->e_phnum;
4075 } else if (app_arch == ELFCLASS64) {
4076 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4077 num_images = ehdr64->e_phnum;
4078 } else {
4079 pr_err("QSEE %s app, arch %u is not supported\n",
4080 appname, app_arch);
4081 ret = -EIO;
4082 goto err;
4083 }
4084 release_firmware(fw_entry);
4085 fw_entry = NULL;
4086 for (i = 0; i < num_images; i++) {
4087 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4088 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4089 if (ret) {
4090 pr_err("Failed to locate blob %s\n", fw_name);
4091 goto err;
4092 }
4093 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4094 (fw_entry->size + load_req->img_len > fw_size)) {
4095 pr_err("Invalid file size for %s\n", fw_name);
4096 ret = -EINVAL;
4097 goto err;
4098 }
4099 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4100 img_data_ptr = img_data_ptr + fw_entry->size;
4101 load_req->img_len += fw_entry->size;
4102 release_firmware(fw_entry);
4103 fw_entry = NULL;
4104 }
4105 return ret;
4106err:
4107 release_firmware(fw_entry);
4108 return ret;
4109}
4110
4111static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4112 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4113{
4114 size_t len = 0;
4115 int ret = 0;
4116 ion_phys_addr_t pa;
4117 struct ion_handle *ihandle = NULL;
4118 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004119 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004120
Zhen Kong3dd92792017-12-08 09:47:15 -08004121 do {
4122 if (retry++)
4123 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
4124 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4125 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4126 } while (IS_ERR_OR_NULL(ihandle) &&
4127 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004128
4129 if (IS_ERR_OR_NULL(ihandle)) {
4130 pr_err("ION alloc failed\n");
4131 return -ENOMEM;
4132 }
4133 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4134 ihandle);
4135
4136 if (IS_ERR_OR_NULL(img_data)) {
4137 pr_err("ION memory mapping for image loading failed\n");
4138 ret = -ENOMEM;
4139 goto exit_ion_free;
4140 }
4141 /* Get the physical address of the ION BUF */
4142 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4143 if (ret) {
4144 pr_err("physical memory retrieval failure\n");
4145 ret = -EIO;
4146 goto exit_ion_unmap_kernel;
4147 }
4148
4149 *pihandle = ihandle;
4150 *data = img_data;
4151 *paddr = pa;
4152 return ret;
4153
4154exit_ion_unmap_kernel:
4155 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4156exit_ion_free:
4157 ion_free(qseecom.ion_clnt, ihandle);
4158 ihandle = NULL;
4159 return ret;
4160}
4161
4162static void __qseecom_free_img_data(struct ion_handle **ihandle)
4163{
4164 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4165 ion_free(qseecom.ion_clnt, *ihandle);
4166 *ihandle = NULL;
4167}
4168
4169static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4170 uint32_t *app_id)
4171{
4172 int ret = -1;
4173 uint32_t fw_size = 0;
4174 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4175 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4176 struct qseecom_command_scm_resp resp;
4177 u8 *img_data = NULL;
4178 ion_phys_addr_t pa = 0;
4179 struct ion_handle *ihandle = NULL;
4180 void *cmd_buf = NULL;
4181 size_t cmd_len;
4182 uint32_t app_arch = 0;
4183
4184 if (!data || !appname || !app_id) {
4185 pr_err("Null pointer to data or appname or appid\n");
4186 return -EINVAL;
4187 }
4188 *app_id = 0;
4189 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4190 return -EIO;
4191 data->client.app_arch = app_arch;
4192
4193 /* Check and load cmnlib */
4194 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4195 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4196 ret = qseecom_load_commonlib_image(data, "cmnlib");
4197 if (ret) {
4198 pr_err("failed to load cmnlib\n");
4199 return -EIO;
4200 }
4201 qseecom.commonlib_loaded = true;
4202 pr_debug("cmnlib is loaded\n");
4203 }
4204
4205 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4206 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4207 if (ret) {
4208 pr_err("failed to load cmnlib64\n");
4209 return -EIO;
4210 }
4211 qseecom.commonlib64_loaded = true;
4212 pr_debug("cmnlib64 is loaded\n");
4213 }
4214 }
4215
4216 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4217 if (ret)
4218 return ret;
4219
4220 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4221 if (ret) {
4222 ret = -EIO;
4223 goto exit_free_img_data;
4224 }
4225
4226 /* Populate the load_req parameters */
4227 if (qseecom.qsee_version < QSEE_VERSION_40) {
4228 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4229 load_req.mdt_len = load_req.mdt_len;
4230 load_req.img_len = load_req.img_len;
4231 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4232 load_req.phy_addr = (uint32_t)pa;
4233 cmd_buf = (void *)&load_req;
4234 cmd_len = sizeof(struct qseecom_load_app_ireq);
4235 } else {
4236 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4237 load_req_64bit.mdt_len = load_req.mdt_len;
4238 load_req_64bit.img_len = load_req.img_len;
4239 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4240 load_req_64bit.phy_addr = (uint64_t)pa;
4241 cmd_buf = (void *)&load_req_64bit;
4242 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4243 }
4244
4245 if (qseecom.support_bus_scaling) {
4246 mutex_lock(&qsee_bw_mutex);
4247 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4248 mutex_unlock(&qsee_bw_mutex);
4249 if (ret) {
4250 ret = -EIO;
4251 goto exit_free_img_data;
4252 }
4253 }
4254
4255 ret = __qseecom_enable_clk_scale_up(data);
4256 if (ret) {
4257 ret = -EIO;
4258 goto exit_unregister_bus_bw_need;
4259 }
4260
4261 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4262 img_data, fw_size,
4263 ION_IOC_CLEAN_INV_CACHES);
4264 if (ret) {
4265 pr_err("cache operation failed %d\n", ret);
4266 goto exit_disable_clk_vote;
4267 }
4268
4269 /* SCM_CALL to load the image */
4270 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4271 &resp, sizeof(resp));
4272 if (ret) {
4273 pr_err("scm_call to load failed : ret %d\n", ret);
4274 ret = -EIO;
4275 goto exit_disable_clk_vote;
4276 }
4277
4278 switch (resp.result) {
4279 case QSEOS_RESULT_SUCCESS:
4280 *app_id = resp.data;
4281 break;
4282 case QSEOS_RESULT_INCOMPLETE:
4283 ret = __qseecom_process_incomplete_cmd(data, &resp);
4284 if (ret)
4285 pr_err("process_incomplete_cmd FAILED\n");
4286 else
4287 *app_id = resp.data;
4288 break;
4289 case QSEOS_RESULT_FAILURE:
4290 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4291 break;
4292 default:
4293 pr_err("scm call return unknown response %d\n", resp.result);
4294 ret = -EINVAL;
4295 break;
4296 }
4297
4298exit_disable_clk_vote:
4299 __qseecom_disable_clk_scale_down(data);
4300
4301exit_unregister_bus_bw_need:
4302 if (qseecom.support_bus_scaling) {
4303 mutex_lock(&qsee_bw_mutex);
4304 qseecom_unregister_bus_bandwidth_needs(data);
4305 mutex_unlock(&qsee_bw_mutex);
4306 }
4307
4308exit_free_img_data:
4309 __qseecom_free_img_data(&ihandle);
4310 return ret;
4311}
4312
4313static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4314 char *cmnlib_name)
4315{
4316 int ret = 0;
4317 uint32_t fw_size = 0;
4318 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4319 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4320 struct qseecom_command_scm_resp resp;
4321 u8 *img_data = NULL;
4322 ion_phys_addr_t pa = 0;
4323 void *cmd_buf = NULL;
4324 size_t cmd_len;
4325 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004326 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004327
4328 if (!cmnlib_name) {
4329 pr_err("cmnlib_name is NULL\n");
4330 return -EINVAL;
4331 }
4332 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4333 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4334 cmnlib_name, strlen(cmnlib_name));
4335 return -EINVAL;
4336 }
4337
4338 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4339 return -EIO;
4340
Zhen Kong3bafb312017-10-18 10:27:20 -07004341 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004342 &img_data, fw_size, &pa);
4343 if (ret)
4344 return -EIO;
4345
4346 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4347 if (ret) {
4348 ret = -EIO;
4349 goto exit_free_img_data;
4350 }
4351 if (qseecom.qsee_version < QSEE_VERSION_40) {
4352 load_req.phy_addr = (uint32_t)pa;
4353 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4354 cmd_buf = (void *)&load_req;
4355 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4356 } else {
4357 load_req_64bit.phy_addr = (uint64_t)pa;
4358 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4359 load_req_64bit.img_len = load_req.img_len;
4360 load_req_64bit.mdt_len = load_req.mdt_len;
4361 cmd_buf = (void *)&load_req_64bit;
4362 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4363 }
4364
4365 if (qseecom.support_bus_scaling) {
4366 mutex_lock(&qsee_bw_mutex);
4367 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4368 mutex_unlock(&qsee_bw_mutex);
4369 if (ret) {
4370 ret = -EIO;
4371 goto exit_free_img_data;
4372 }
4373 }
4374
4375 /* Vote for the SFPB clock */
4376 ret = __qseecom_enable_clk_scale_up(data);
4377 if (ret) {
4378 ret = -EIO;
4379 goto exit_unregister_bus_bw_need;
4380 }
4381
Zhen Kong3bafb312017-10-18 10:27:20 -07004382 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004383 img_data, fw_size,
4384 ION_IOC_CLEAN_INV_CACHES);
4385 if (ret) {
4386 pr_err("cache operation failed %d\n", ret);
4387 goto exit_disable_clk_vote;
4388 }
4389
4390 /* SCM_CALL to load the image */
4391 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4392 &resp, sizeof(resp));
4393 if (ret) {
4394 pr_err("scm_call to load failed : ret %d\n", ret);
4395 ret = -EIO;
4396 goto exit_disable_clk_vote;
4397 }
4398
4399 switch (resp.result) {
4400 case QSEOS_RESULT_SUCCESS:
4401 break;
4402 case QSEOS_RESULT_FAILURE:
4403 pr_err("scm call failed w/response result%d\n", resp.result);
4404 ret = -EINVAL;
4405 goto exit_disable_clk_vote;
4406 case QSEOS_RESULT_INCOMPLETE:
4407 ret = __qseecom_process_incomplete_cmd(data, &resp);
4408 if (ret) {
4409 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4410 goto exit_disable_clk_vote;
4411 }
4412 break;
4413 default:
4414 pr_err("scm call return unknown response %d\n", resp.result);
4415 ret = -EINVAL;
4416 goto exit_disable_clk_vote;
4417 }
4418
4419exit_disable_clk_vote:
4420 __qseecom_disable_clk_scale_down(data);
4421
4422exit_unregister_bus_bw_need:
4423 if (qseecom.support_bus_scaling) {
4424 mutex_lock(&qsee_bw_mutex);
4425 qseecom_unregister_bus_bandwidth_needs(data);
4426 mutex_unlock(&qsee_bw_mutex);
4427 }
4428
4429exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004430 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004431 return ret;
4432}
4433
4434static int qseecom_unload_commonlib_image(void)
4435{
4436 int ret = -EINVAL;
4437 struct qseecom_unload_lib_image_ireq unload_req = {0};
4438 struct qseecom_command_scm_resp resp;
4439
4440 /* Populate the remaining parameters */
4441 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4442
4443 /* SCM_CALL to load the image */
4444 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4445 sizeof(struct qseecom_unload_lib_image_ireq),
4446 &resp, sizeof(resp));
4447 if (ret) {
4448 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4449 ret = -EIO;
4450 } else {
4451 switch (resp.result) {
4452 case QSEOS_RESULT_SUCCESS:
4453 break;
4454 case QSEOS_RESULT_FAILURE:
4455 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4456 break;
4457 default:
4458 pr_err("scm call return unknown response %d\n",
4459 resp.result);
4460 ret = -EINVAL;
4461 break;
4462 }
4463 }
4464
4465 return ret;
4466}
4467
4468int qseecom_start_app(struct qseecom_handle **handle,
4469 char *app_name, uint32_t size)
4470{
4471 int32_t ret = 0;
4472 unsigned long flags = 0;
4473 struct qseecom_dev_handle *data = NULL;
4474 struct qseecom_check_app_ireq app_ireq;
4475 struct qseecom_registered_app_list *entry = NULL;
4476 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4477 bool found_app = false;
4478 size_t len;
4479 ion_phys_addr_t pa;
4480 uint32_t fw_size, app_arch;
4481 uint32_t app_id = 0;
4482
4483 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4484 pr_err("Not allowed to be called in %d state\n",
4485 atomic_read(&qseecom.qseecom_state));
4486 return -EPERM;
4487 }
4488 if (!app_name) {
4489 pr_err("failed to get the app name\n");
4490 return -EINVAL;
4491 }
4492
Zhen Kong64a6d7282017-06-16 11:55:07 -07004493 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004494 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004495 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004496 return -EINVAL;
4497 }
4498
4499 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4500 if (!(*handle))
4501 return -ENOMEM;
4502
4503 data = kzalloc(sizeof(*data), GFP_KERNEL);
4504 if (!data) {
4505 if (ret == 0) {
4506 kfree(*handle);
4507 *handle = NULL;
4508 }
4509 return -ENOMEM;
4510 }
4511 data->abort = 0;
4512 data->type = QSEECOM_CLIENT_APP;
4513 data->released = false;
4514 data->client.sb_length = size;
4515 data->client.user_virt_sb_base = 0;
4516 data->client.ihandle = NULL;
4517
4518 init_waitqueue_head(&data->abort_wq);
4519
4520 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4521 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4522 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4523 pr_err("Ion client could not retrieve the handle\n");
4524 kfree(data);
4525 kfree(*handle);
4526 *handle = NULL;
4527 return -EINVAL;
4528 }
4529 mutex_lock(&app_access_lock);
4530
4531 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4532 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4533 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4534 if (ret)
4535 goto err;
4536
4537 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4538 if (app_id) {
4539 pr_warn("App id %d for [%s] app exists\n", app_id,
4540 (char *)app_ireq.app_name);
4541 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4542 list_for_each_entry(entry,
4543 &qseecom.registered_app_list_head, list){
4544 if (entry->app_id == app_id) {
4545 entry->ref_cnt++;
4546 found_app = true;
4547 break;
4548 }
4549 }
4550 spin_unlock_irqrestore(
4551 &qseecom.registered_app_list_lock, flags);
4552 if (!found_app)
4553 pr_warn("App_id %d [%s] was loaded but not registered\n",
4554 ret, (char *)app_ireq.app_name);
4555 } else {
4556 /* load the app and get the app_id */
4557 pr_debug("%s: Loading app for the first time'\n",
4558 qseecom.pdev->init_name);
4559 ret = __qseecom_load_fw(data, app_name, &app_id);
4560 if (ret < 0)
4561 goto err;
4562 }
4563 data->client.app_id = app_id;
4564 if (!found_app) {
4565 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4566 if (!entry) {
4567 pr_err("kmalloc for app entry failed\n");
4568 ret = -ENOMEM;
4569 goto err;
4570 }
4571 entry->app_id = app_id;
4572 entry->ref_cnt = 1;
4573 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4574 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4575 ret = -EIO;
4576 kfree(entry);
4577 goto err;
4578 }
4579 entry->app_arch = app_arch;
4580 entry->app_blocked = false;
4581 entry->blocked_on_listener_id = 0;
4582 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4583 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4584 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4585 flags);
4586 }
4587
4588 /* Get the physical address of the ION BUF */
4589 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4590 if (ret) {
4591 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4592 ret);
4593 goto err;
4594 }
4595
4596 /* Populate the structure for sending scm call to load image */
4597 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4598 data->client.ihandle);
4599 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4600 pr_err("ION memory mapping for client shared buf failed\n");
4601 ret = -ENOMEM;
4602 goto err;
4603 }
4604 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4605 data->client.sb_phys = (phys_addr_t)pa;
4606 (*handle)->dev = (void *)data;
4607 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4608 (*handle)->sbuf_len = data->client.sb_length;
4609
4610 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4611 if (!kclient_entry) {
4612 ret = -ENOMEM;
4613 goto err;
4614 }
4615 kclient_entry->handle = *handle;
4616
4617 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4618 list_add_tail(&kclient_entry->list,
4619 &qseecom.registered_kclient_list_head);
4620 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4621
4622 mutex_unlock(&app_access_lock);
4623 return 0;
4624
4625err:
4626 kfree(data);
4627 kfree(*handle);
4628 *handle = NULL;
4629 mutex_unlock(&app_access_lock);
4630 return ret;
4631}
4632EXPORT_SYMBOL(qseecom_start_app);
4633
4634int qseecom_shutdown_app(struct qseecom_handle **handle)
4635{
4636 int ret = -EINVAL;
4637 struct qseecom_dev_handle *data;
4638
4639 struct qseecom_registered_kclient_list *kclient = NULL;
4640 unsigned long flags = 0;
4641 bool found_handle = false;
4642
4643 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4644 pr_err("Not allowed to be called in %d state\n",
4645 atomic_read(&qseecom.qseecom_state));
4646 return -EPERM;
4647 }
4648
4649 if ((handle == NULL) || (*handle == NULL)) {
4650 pr_err("Handle is not initialized\n");
4651 return -EINVAL;
4652 }
4653 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4654 mutex_lock(&app_access_lock);
4655
4656 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4657 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4658 list) {
4659 if (kclient->handle == (*handle)) {
4660 list_del(&kclient->list);
4661 found_handle = true;
4662 break;
4663 }
4664 }
4665 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4666 if (!found_handle)
4667 pr_err("Unable to find the handle, exiting\n");
4668 else
4669 ret = qseecom_unload_app(data, false);
4670
4671 mutex_unlock(&app_access_lock);
4672 if (ret == 0) {
4673 kzfree(data);
4674 kzfree(*handle);
4675 kzfree(kclient);
4676 *handle = NULL;
4677 }
4678
4679 return ret;
4680}
4681EXPORT_SYMBOL(qseecom_shutdown_app);
4682
4683int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4684 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4685{
4686 int ret = 0;
4687 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4688 struct qseecom_dev_handle *data;
4689 bool perf_enabled = false;
4690
4691 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4692 pr_err("Not allowed to be called in %d state\n",
4693 atomic_read(&qseecom.qseecom_state));
4694 return -EPERM;
4695 }
4696
4697 if (handle == NULL) {
4698 pr_err("Handle is not initialized\n");
4699 return -EINVAL;
4700 }
4701 data = handle->dev;
4702
4703 req.cmd_req_len = sbuf_len;
4704 req.resp_len = rbuf_len;
4705 req.cmd_req_buf = send_buf;
4706 req.resp_buf = resp_buf;
4707
4708 if (__validate_send_cmd_inputs(data, &req))
4709 return -EINVAL;
4710
4711 mutex_lock(&app_access_lock);
4712 if (qseecom.support_bus_scaling) {
4713 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4714 if (ret) {
4715 pr_err("Failed to set bw.\n");
4716 mutex_unlock(&app_access_lock);
4717 return ret;
4718 }
4719 }
4720 /*
4721 * On targets where crypto clock is handled by HLOS,
4722 * if clk_access_cnt is zero and perf_enabled is false,
4723 * then the crypto clock was not enabled before sending cmd
4724 * to tz, qseecom will enable the clock to avoid service failure.
4725 */
4726 if (!qseecom.no_clock_support &&
4727 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4728 pr_debug("ce clock is not enabled!\n");
4729 ret = qseecom_perf_enable(data);
4730 if (ret) {
4731 pr_err("Failed to vote for clock with err %d\n",
4732 ret);
4733 mutex_unlock(&app_access_lock);
4734 return -EINVAL;
4735 }
4736 perf_enabled = true;
4737 }
4738 if (!strcmp(data->client.app_name, "securemm"))
4739 data->use_legacy_cmd = true;
4740
4741 ret = __qseecom_send_cmd(data, &req);
4742 data->use_legacy_cmd = false;
4743 if (qseecom.support_bus_scaling)
4744 __qseecom_add_bw_scale_down_timer(
4745 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4746
4747 if (perf_enabled) {
4748 qsee_disable_clock_vote(data, CLK_DFAB);
4749 qsee_disable_clock_vote(data, CLK_SFPB);
4750 }
4751
4752 mutex_unlock(&app_access_lock);
4753
4754 if (ret)
4755 return ret;
4756
4757 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4758 req.resp_len, req.resp_buf);
4759 return ret;
4760}
4761EXPORT_SYMBOL(qseecom_send_command);
4762
4763int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4764{
4765 int ret = 0;
4766
4767 if ((handle == NULL) || (handle->dev == NULL)) {
4768 pr_err("No valid kernel client\n");
4769 return -EINVAL;
4770 }
4771 if (high) {
4772 if (qseecom.support_bus_scaling) {
4773 mutex_lock(&qsee_bw_mutex);
4774 __qseecom_register_bus_bandwidth_needs(handle->dev,
4775 HIGH);
4776 mutex_unlock(&qsee_bw_mutex);
4777 } else {
4778 ret = qseecom_perf_enable(handle->dev);
4779 if (ret)
4780 pr_err("Failed to vote for clock with err %d\n",
4781 ret);
4782 }
4783 } else {
4784 if (!qseecom.support_bus_scaling) {
4785 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4786 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4787 } else {
4788 mutex_lock(&qsee_bw_mutex);
4789 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4790 mutex_unlock(&qsee_bw_mutex);
4791 }
4792 }
4793 return ret;
4794}
4795EXPORT_SYMBOL(qseecom_set_bandwidth);
4796
4797int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4798{
4799 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4800 struct qseecom_dev_handle dummy_private_data = {0};
4801 struct qseecom_command_scm_resp resp;
4802 int ret = 0;
4803
4804 if (!desc) {
4805 pr_err("desc is NULL\n");
4806 return -EINVAL;
4807 }
4808
4809 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004810 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004811 resp.data = desc->ret[2]; /*listener_id*/
4812
Zhen Konge7f525f2017-12-01 18:26:25 -08004813 dummy_private_data.client.app_id = desc->ret[1];
4814 dummy_app_entry.app_id = desc->ret[1];
4815
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004816 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004817 if (qseecom.qsee_reentrancy_support)
4818 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004819 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004820 else
4821 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4822 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004823 mutex_unlock(&app_access_lock);
4824 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004825 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004826 (int)desc->ret[0], (int)desc->ret[2],
4827 (int)desc->ret[1], ret);
4828 desc->ret[0] = resp.result;
4829 desc->ret[1] = resp.resp_type;
4830 desc->ret[2] = resp.data;
4831 return ret;
4832}
4833EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4834
4835static int qseecom_send_resp(void)
4836{
4837 qseecom.send_resp_flag = 1;
4838 wake_up_interruptible(&qseecom.send_resp_wq);
4839 return 0;
4840}
4841
4842static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4843{
4844 struct qseecom_registered_listener_list *this_lstnr = NULL;
4845
4846 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4847 this_lstnr = __qseecom_find_svc(data->listener.id);
4848 if (this_lstnr == NULL)
4849 return -EINVAL;
4850 qseecom.send_resp_flag = 1;
4851 this_lstnr->send_resp_flag = 1;
4852 wake_up_interruptible(&qseecom.send_resp_wq);
4853 return 0;
4854}
4855
4856static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4857 struct qseecom_send_modfd_listener_resp *resp,
4858 struct qseecom_registered_listener_list *this_lstnr)
4859{
4860 int i;
4861
4862 if (!data || !resp || !this_lstnr) {
4863 pr_err("listener handle or resp msg is null\n");
4864 return -EINVAL;
4865 }
4866
4867 if (resp->resp_buf_ptr == NULL) {
4868 pr_err("resp buffer is null\n");
4869 return -EINVAL;
4870 }
4871 /* validate resp buf length */
4872 if ((resp->resp_len == 0) ||
4873 (resp->resp_len > this_lstnr->sb_length)) {
4874 pr_err("resp buf length %d not valid\n", resp->resp_len);
4875 return -EINVAL;
4876 }
4877
4878 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4879 pr_err("Integer overflow in resp_len & resp_buf\n");
4880 return -EINVAL;
4881 }
4882 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4883 (ULONG_MAX - this_lstnr->sb_length)) {
4884 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4885 return -EINVAL;
4886 }
4887 /* validate resp buf */
4888 if (((uintptr_t)resp->resp_buf_ptr <
4889 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4890 ((uintptr_t)resp->resp_buf_ptr >=
4891 ((uintptr_t)this_lstnr->user_virt_sb_base +
4892 this_lstnr->sb_length)) ||
4893 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4894 ((uintptr_t)this_lstnr->user_virt_sb_base +
4895 this_lstnr->sb_length))) {
4896 pr_err("resp buf is out of shared buffer region\n");
4897 return -EINVAL;
4898 }
4899
4900 /* validate offsets */
4901 for (i = 0; i < MAX_ION_FD; i++) {
4902 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4903 pr_err("Invalid offset %d = 0x%x\n",
4904 i, resp->ifd_data[i].cmd_buf_offset);
4905 return -EINVAL;
4906 }
4907 }
4908
4909 return 0;
4910}
4911
4912static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4913 void __user *argp, bool is_64bit_addr)
4914{
4915 struct qseecom_send_modfd_listener_resp resp;
4916 struct qseecom_registered_listener_list *this_lstnr = NULL;
4917
4918 if (copy_from_user(&resp, argp, sizeof(resp))) {
4919 pr_err("copy_from_user failed");
4920 return -EINVAL;
4921 }
4922
4923 this_lstnr = __qseecom_find_svc(data->listener.id);
4924 if (this_lstnr == NULL)
4925 return -EINVAL;
4926
4927 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4928 return -EINVAL;
4929
4930 resp.resp_buf_ptr = this_lstnr->sb_virt +
4931 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4932
4933 if (!is_64bit_addr)
4934 __qseecom_update_cmd_buf(&resp, false, data);
4935 else
4936 __qseecom_update_cmd_buf_64(&resp, false, data);
4937 qseecom.send_resp_flag = 1;
4938 this_lstnr->send_resp_flag = 1;
4939 wake_up_interruptible(&qseecom.send_resp_wq);
4940 return 0;
4941}
4942
4943static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4944 void __user *argp)
4945{
4946 return __qseecom_send_modfd_resp(data, argp, false);
4947}
4948
4949static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4950 void __user *argp)
4951{
4952 return __qseecom_send_modfd_resp(data, argp, true);
4953}
4954
4955static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4956 void __user *argp)
4957{
4958 struct qseecom_qseos_version_req req;
4959
4960 if (copy_from_user(&req, argp, sizeof(req))) {
4961 pr_err("copy_from_user failed");
4962 return -EINVAL;
4963 }
4964 req.qseos_version = qseecom.qseos_version;
4965 if (copy_to_user(argp, &req, sizeof(req))) {
4966 pr_err("copy_to_user failed");
4967 return -EINVAL;
4968 }
4969 return 0;
4970}
4971
4972static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4973{
4974 int rc = 0;
4975 struct qseecom_clk *qclk = NULL;
4976
4977 if (qseecom.no_clock_support)
4978 return 0;
4979
4980 if (ce == CLK_QSEE)
4981 qclk = &qseecom.qsee;
4982 if (ce == CLK_CE_DRV)
4983 qclk = &qseecom.ce_drv;
4984
4985 if (qclk == NULL) {
4986 pr_err("CLK type not supported\n");
4987 return -EINVAL;
4988 }
4989 mutex_lock(&clk_access_lock);
4990
4991 if (qclk->clk_access_cnt == ULONG_MAX) {
4992 pr_err("clk_access_cnt beyond limitation\n");
4993 goto err;
4994 }
4995 if (qclk->clk_access_cnt > 0) {
4996 qclk->clk_access_cnt++;
4997 mutex_unlock(&clk_access_lock);
4998 return rc;
4999 }
5000
5001 /* Enable CE core clk */
5002 if (qclk->ce_core_clk != NULL) {
5003 rc = clk_prepare_enable(qclk->ce_core_clk);
5004 if (rc) {
5005 pr_err("Unable to enable/prepare CE core clk\n");
5006 goto err;
5007 }
5008 }
5009 /* Enable CE clk */
5010 if (qclk->ce_clk != NULL) {
5011 rc = clk_prepare_enable(qclk->ce_clk);
5012 if (rc) {
5013 pr_err("Unable to enable/prepare CE iface clk\n");
5014 goto ce_clk_err;
5015 }
5016 }
5017 /* Enable AXI clk */
5018 if (qclk->ce_bus_clk != NULL) {
5019 rc = clk_prepare_enable(qclk->ce_bus_clk);
5020 if (rc) {
5021 pr_err("Unable to enable/prepare CE bus clk\n");
5022 goto ce_bus_clk_err;
5023 }
5024 }
5025 qclk->clk_access_cnt++;
5026 mutex_unlock(&clk_access_lock);
5027 return 0;
5028
5029ce_bus_clk_err:
5030 if (qclk->ce_clk != NULL)
5031 clk_disable_unprepare(qclk->ce_clk);
5032ce_clk_err:
5033 if (qclk->ce_core_clk != NULL)
5034 clk_disable_unprepare(qclk->ce_core_clk);
5035err:
5036 mutex_unlock(&clk_access_lock);
5037 return -EIO;
5038}
5039
5040static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5041{
5042 struct qseecom_clk *qclk;
5043
5044 if (qseecom.no_clock_support)
5045 return;
5046
5047 if (ce == CLK_QSEE)
5048 qclk = &qseecom.qsee;
5049 else
5050 qclk = &qseecom.ce_drv;
5051
5052 mutex_lock(&clk_access_lock);
5053
5054 if (qclk->clk_access_cnt == 0) {
5055 mutex_unlock(&clk_access_lock);
5056 return;
5057 }
5058
5059 if (qclk->clk_access_cnt == 1) {
5060 if (qclk->ce_clk != NULL)
5061 clk_disable_unprepare(qclk->ce_clk);
5062 if (qclk->ce_core_clk != NULL)
5063 clk_disable_unprepare(qclk->ce_core_clk);
5064 if (qclk->ce_bus_clk != NULL)
5065 clk_disable_unprepare(qclk->ce_bus_clk);
5066 }
5067 qclk->clk_access_cnt--;
5068 mutex_unlock(&clk_access_lock);
5069}
5070
5071static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5072 int32_t clk_type)
5073{
5074 int ret = 0;
5075 struct qseecom_clk *qclk;
5076
5077 if (qseecom.no_clock_support)
5078 return 0;
5079
5080 qclk = &qseecom.qsee;
5081 if (!qseecom.qsee_perf_client)
5082 return ret;
5083
5084 switch (clk_type) {
5085 case CLK_DFAB:
5086 mutex_lock(&qsee_bw_mutex);
5087 if (!qseecom.qsee_bw_count) {
5088 if (qseecom.qsee_sfpb_bw_count > 0)
5089 ret = msm_bus_scale_client_update_request(
5090 qseecom.qsee_perf_client, 3);
5091 else {
5092 if (qclk->ce_core_src_clk != NULL)
5093 ret = __qseecom_enable_clk(CLK_QSEE);
5094 if (!ret) {
5095 ret =
5096 msm_bus_scale_client_update_request(
5097 qseecom.qsee_perf_client, 1);
5098 if ((ret) &&
5099 (qclk->ce_core_src_clk != NULL))
5100 __qseecom_disable_clk(CLK_QSEE);
5101 }
5102 }
5103 if (ret)
5104 pr_err("DFAB Bandwidth req failed (%d)\n",
5105 ret);
5106 else {
5107 qseecom.qsee_bw_count++;
5108 data->perf_enabled = true;
5109 }
5110 } else {
5111 qseecom.qsee_bw_count++;
5112 data->perf_enabled = true;
5113 }
5114 mutex_unlock(&qsee_bw_mutex);
5115 break;
5116 case CLK_SFPB:
5117 mutex_lock(&qsee_bw_mutex);
5118 if (!qseecom.qsee_sfpb_bw_count) {
5119 if (qseecom.qsee_bw_count > 0)
5120 ret = msm_bus_scale_client_update_request(
5121 qseecom.qsee_perf_client, 3);
5122 else {
5123 if (qclk->ce_core_src_clk != NULL)
5124 ret = __qseecom_enable_clk(CLK_QSEE);
5125 if (!ret) {
5126 ret =
5127 msm_bus_scale_client_update_request(
5128 qseecom.qsee_perf_client, 2);
5129 if ((ret) &&
5130 (qclk->ce_core_src_clk != NULL))
5131 __qseecom_disable_clk(CLK_QSEE);
5132 }
5133 }
5134
5135 if (ret)
5136 pr_err("SFPB Bandwidth req failed (%d)\n",
5137 ret);
5138 else {
5139 qseecom.qsee_sfpb_bw_count++;
5140 data->fast_load_enabled = true;
5141 }
5142 } else {
5143 qseecom.qsee_sfpb_bw_count++;
5144 data->fast_load_enabled = true;
5145 }
5146 mutex_unlock(&qsee_bw_mutex);
5147 break;
5148 default:
5149 pr_err("Clock type not defined\n");
5150 break;
5151 }
5152 return ret;
5153}
5154
5155static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5156 int32_t clk_type)
5157{
5158 int32_t ret = 0;
5159 struct qseecom_clk *qclk;
5160
5161 qclk = &qseecom.qsee;
5162
5163 if (qseecom.no_clock_support)
5164 return;
5165 if (!qseecom.qsee_perf_client)
5166 return;
5167
5168 switch (clk_type) {
5169 case CLK_DFAB:
5170 mutex_lock(&qsee_bw_mutex);
5171 if (qseecom.qsee_bw_count == 0) {
5172 pr_err("Client error.Extra call to disable DFAB clk\n");
5173 mutex_unlock(&qsee_bw_mutex);
5174 return;
5175 }
5176
5177 if (qseecom.qsee_bw_count == 1) {
5178 if (qseecom.qsee_sfpb_bw_count > 0)
5179 ret = msm_bus_scale_client_update_request(
5180 qseecom.qsee_perf_client, 2);
5181 else {
5182 ret = msm_bus_scale_client_update_request(
5183 qseecom.qsee_perf_client, 0);
5184 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5185 __qseecom_disable_clk(CLK_QSEE);
5186 }
5187 if (ret)
5188 pr_err("SFPB Bandwidth req fail (%d)\n",
5189 ret);
5190 else {
5191 qseecom.qsee_bw_count--;
5192 data->perf_enabled = false;
5193 }
5194 } else {
5195 qseecom.qsee_bw_count--;
5196 data->perf_enabled = false;
5197 }
5198 mutex_unlock(&qsee_bw_mutex);
5199 break;
5200 case CLK_SFPB:
5201 mutex_lock(&qsee_bw_mutex);
5202 if (qseecom.qsee_sfpb_bw_count == 0) {
5203 pr_err("Client error.Extra call to disable SFPB clk\n");
5204 mutex_unlock(&qsee_bw_mutex);
5205 return;
5206 }
5207 if (qseecom.qsee_sfpb_bw_count == 1) {
5208 if (qseecom.qsee_bw_count > 0)
5209 ret = msm_bus_scale_client_update_request(
5210 qseecom.qsee_perf_client, 1);
5211 else {
5212 ret = msm_bus_scale_client_update_request(
5213 qseecom.qsee_perf_client, 0);
5214 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5215 __qseecom_disable_clk(CLK_QSEE);
5216 }
5217 if (ret)
5218 pr_err("SFPB Bandwidth req fail (%d)\n",
5219 ret);
5220 else {
5221 qseecom.qsee_sfpb_bw_count--;
5222 data->fast_load_enabled = false;
5223 }
5224 } else {
5225 qseecom.qsee_sfpb_bw_count--;
5226 data->fast_load_enabled = false;
5227 }
5228 mutex_unlock(&qsee_bw_mutex);
5229 break;
5230 default:
5231 pr_err("Clock type not defined\n");
5232 break;
5233 }
5234
5235}
5236
5237static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5238 void __user *argp)
5239{
5240 struct ion_handle *ihandle; /* Ion handle */
5241 struct qseecom_load_img_req load_img_req;
5242 int uret = 0;
5243 int ret;
5244 ion_phys_addr_t pa = 0;
5245 size_t len;
5246 struct qseecom_load_app_ireq load_req;
5247 struct qseecom_load_app_64bit_ireq load_req_64bit;
5248 struct qseecom_command_scm_resp resp;
5249 void *cmd_buf = NULL;
5250 size_t cmd_len;
5251 /* Copy the relevant information needed for loading the image */
5252 if (copy_from_user(&load_img_req,
5253 (void __user *)argp,
5254 sizeof(struct qseecom_load_img_req))) {
5255 pr_err("copy_from_user failed\n");
5256 return -EFAULT;
5257 }
5258
5259 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005260 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005261 load_img_req.ifd_data_fd);
5262 if (IS_ERR_OR_NULL(ihandle)) {
5263 pr_err("Ion client could not retrieve the handle\n");
5264 return -ENOMEM;
5265 }
5266
5267 /* Get the physical address of the ION BUF */
5268 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5269 if (ret) {
5270 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5271 ret);
5272 return ret;
5273 }
5274 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5275 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5276 len, load_img_req.mdt_len,
5277 load_img_req.img_len);
5278 return ret;
5279 }
5280 /* Populate the structure for sending scm call to load image */
5281 if (qseecom.qsee_version < QSEE_VERSION_40) {
5282 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5283 load_req.mdt_len = load_img_req.mdt_len;
5284 load_req.img_len = load_img_req.img_len;
5285 load_req.phy_addr = (uint32_t)pa;
5286 cmd_buf = (void *)&load_req;
5287 cmd_len = sizeof(struct qseecom_load_app_ireq);
5288 } else {
5289 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5290 load_req_64bit.mdt_len = load_img_req.mdt_len;
5291 load_req_64bit.img_len = load_img_req.img_len;
5292 load_req_64bit.phy_addr = (uint64_t)pa;
5293 cmd_buf = (void *)&load_req_64bit;
5294 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5295 }
5296
5297 if (qseecom.support_bus_scaling) {
5298 mutex_lock(&qsee_bw_mutex);
5299 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5300 mutex_unlock(&qsee_bw_mutex);
5301 if (ret) {
5302 ret = -EIO;
5303 goto exit_cpu_restore;
5304 }
5305 }
5306
5307 /* Vote for the SFPB clock */
5308 ret = __qseecom_enable_clk_scale_up(data);
5309 if (ret) {
5310 ret = -EIO;
5311 goto exit_register_bus_bandwidth_needs;
5312 }
5313 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5314 ION_IOC_CLEAN_INV_CACHES);
5315 if (ret) {
5316 pr_err("cache operation failed %d\n", ret);
5317 goto exit_disable_clock;
5318 }
5319 /* SCM_CALL to load the external elf */
5320 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5321 &resp, sizeof(resp));
5322 if (ret) {
5323 pr_err("scm_call to load failed : ret %d\n",
5324 ret);
5325 ret = -EFAULT;
5326 goto exit_disable_clock;
5327 }
5328
5329 switch (resp.result) {
5330 case QSEOS_RESULT_SUCCESS:
5331 break;
5332 case QSEOS_RESULT_INCOMPLETE:
5333 pr_err("%s: qseos result incomplete\n", __func__);
5334 ret = __qseecom_process_incomplete_cmd(data, &resp);
5335 if (ret)
5336 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5337 break;
5338 case QSEOS_RESULT_FAILURE:
5339 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5340 ret = -EFAULT;
5341 break;
5342 default:
5343 pr_err("scm_call response result %d not supported\n",
5344 resp.result);
5345 ret = -EFAULT;
5346 break;
5347 }
5348
5349exit_disable_clock:
5350 __qseecom_disable_clk_scale_down(data);
5351
5352exit_register_bus_bandwidth_needs:
5353 if (qseecom.support_bus_scaling) {
5354 mutex_lock(&qsee_bw_mutex);
5355 uret = qseecom_unregister_bus_bandwidth_needs(data);
5356 mutex_unlock(&qsee_bw_mutex);
5357 if (uret)
5358 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5359 uret, ret);
5360 }
5361
5362exit_cpu_restore:
5363 /* Deallocate the handle */
5364 if (!IS_ERR_OR_NULL(ihandle))
5365 ion_free(qseecom.ion_clnt, ihandle);
5366 return ret;
5367}
5368
5369static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5370{
5371 int ret = 0;
5372 struct qseecom_command_scm_resp resp;
5373 struct qseecom_unload_app_ireq req;
5374
5375 /* unavailable client app */
5376 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5377
5378 /* Populate the structure for sending scm call to unload image */
5379 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5380
5381 /* SCM_CALL to unload the external elf */
5382 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5383 sizeof(struct qseecom_unload_app_ireq),
5384 &resp, sizeof(resp));
5385 if (ret) {
5386 pr_err("scm_call to unload failed : ret %d\n",
5387 ret);
5388 ret = -EFAULT;
5389 goto qseecom_unload_external_elf_scm_err;
5390 }
5391 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5392 ret = __qseecom_process_incomplete_cmd(data, &resp);
5393 if (ret)
5394 pr_err("process_incomplete_cmd fail err: %d\n",
5395 ret);
5396 } else {
5397 if (resp.result != QSEOS_RESULT_SUCCESS) {
5398 pr_err("scm_call to unload image failed resp.result =%d\n",
5399 resp.result);
5400 ret = -EFAULT;
5401 }
5402 }
5403
5404qseecom_unload_external_elf_scm_err:
5405
5406 return ret;
5407}
5408
5409static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5410 void __user *argp)
5411{
5412
5413 int32_t ret;
5414 struct qseecom_qseos_app_load_query query_req;
5415 struct qseecom_check_app_ireq req;
5416 struct qseecom_registered_app_list *entry = NULL;
5417 unsigned long flags = 0;
5418 uint32_t app_arch = 0, app_id = 0;
5419 bool found_app = false;
5420
5421 /* Copy the relevant information needed for loading the image */
5422 if (copy_from_user(&query_req,
5423 (void __user *)argp,
5424 sizeof(struct qseecom_qseos_app_load_query))) {
5425 pr_err("copy_from_user failed\n");
5426 return -EFAULT;
5427 }
5428
5429 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5430 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5431 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5432
5433 ret = __qseecom_check_app_exists(req, &app_id);
5434 if (ret) {
5435 pr_err(" scm call to check if app is loaded failed");
5436 return ret; /* scm call failed */
5437 }
5438 if (app_id) {
5439 pr_debug("App id %d (%s) already exists\n", app_id,
5440 (char *)(req.app_name));
5441 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5442 list_for_each_entry(entry,
5443 &qseecom.registered_app_list_head, list){
5444 if (entry->app_id == app_id) {
5445 app_arch = entry->app_arch;
5446 entry->ref_cnt++;
5447 found_app = true;
5448 break;
5449 }
5450 }
5451 spin_unlock_irqrestore(
5452 &qseecom.registered_app_list_lock, flags);
5453 data->client.app_id = app_id;
5454 query_req.app_id = app_id;
5455 if (app_arch) {
5456 data->client.app_arch = app_arch;
5457 query_req.app_arch = app_arch;
5458 } else {
5459 data->client.app_arch = 0;
5460 query_req.app_arch = 0;
5461 }
5462 strlcpy(data->client.app_name, query_req.app_name,
5463 MAX_APP_NAME_SIZE);
5464 /*
5465 * If app was loaded by appsbl before and was not registered,
5466 * regiser this app now.
5467 */
5468 if (!found_app) {
5469 pr_debug("Register app %d [%s] which was loaded before\n",
5470 ret, (char *)query_req.app_name);
5471 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5472 if (!entry) {
5473 pr_err("kmalloc for app entry failed\n");
5474 return -ENOMEM;
5475 }
5476 entry->app_id = app_id;
5477 entry->ref_cnt = 1;
5478 entry->app_arch = data->client.app_arch;
5479 strlcpy(entry->app_name, data->client.app_name,
5480 MAX_APP_NAME_SIZE);
5481 entry->app_blocked = false;
5482 entry->blocked_on_listener_id = 0;
5483 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5484 flags);
5485 list_add_tail(&entry->list,
5486 &qseecom.registered_app_list_head);
5487 spin_unlock_irqrestore(
5488 &qseecom.registered_app_list_lock, flags);
5489 }
5490 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5491 pr_err("copy_to_user failed\n");
5492 return -EFAULT;
5493 }
5494 return -EEXIST; /* app already loaded */
5495 } else {
5496 return 0; /* app not loaded */
5497 }
5498}
5499
5500static int __qseecom_get_ce_pipe_info(
5501 enum qseecom_key_management_usage_type usage,
5502 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5503{
5504 int ret = -EINVAL;
5505 int i, j;
5506 struct qseecom_ce_info_use *p = NULL;
5507 int total = 0;
5508 struct qseecom_ce_pipe_entry *pcepipe;
5509
5510 switch (usage) {
5511 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5512 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5513 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5514 if (qseecom.support_fde) {
5515 p = qseecom.ce_info.fde;
5516 total = qseecom.ce_info.num_fde;
5517 } else {
5518 pr_err("system does not support fde\n");
5519 return -EINVAL;
5520 }
5521 break;
5522 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5523 if (qseecom.support_pfe) {
5524 p = qseecom.ce_info.pfe;
5525 total = qseecom.ce_info.num_pfe;
5526 } else {
5527 pr_err("system does not support pfe\n");
5528 return -EINVAL;
5529 }
5530 break;
5531 default:
5532 pr_err("unsupported usage %d\n", usage);
5533 return -EINVAL;
5534 }
5535
5536 for (j = 0; j < total; j++) {
5537 if (p->unit_num == unit) {
5538 pcepipe = p->ce_pipe_entry;
5539 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5540 (*ce_hw)[i] = pcepipe->ce_num;
5541 *pipe = pcepipe->ce_pipe_pair;
5542 pcepipe++;
5543 }
5544 ret = 0;
5545 break;
5546 }
5547 p++;
5548 }
5549 return ret;
5550}
5551
5552static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5553 enum qseecom_key_management_usage_type usage,
5554 struct qseecom_key_generate_ireq *ireq)
5555{
5556 struct qseecom_command_scm_resp resp;
5557 int ret;
5558
5559 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5560 usage >= QSEOS_KM_USAGE_MAX) {
5561 pr_err("Error:: unsupported usage %d\n", usage);
5562 return -EFAULT;
5563 }
5564 ret = __qseecom_enable_clk(CLK_QSEE);
5565 if (ret)
5566 return ret;
5567
5568 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5569 ireq, sizeof(struct qseecom_key_generate_ireq),
5570 &resp, sizeof(resp));
5571 if (ret) {
5572 if (ret == -EINVAL &&
5573 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5574 pr_debug("Key ID exists.\n");
5575 ret = 0;
5576 } else {
5577 pr_err("scm call to generate key failed : %d\n", ret);
5578 ret = -EFAULT;
5579 }
5580 goto generate_key_exit;
5581 }
5582
5583 switch (resp.result) {
5584 case QSEOS_RESULT_SUCCESS:
5585 break;
5586 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5587 pr_debug("Key ID exists.\n");
5588 break;
5589 case QSEOS_RESULT_INCOMPLETE:
5590 ret = __qseecom_process_incomplete_cmd(data, &resp);
5591 if (ret) {
5592 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5593 pr_debug("Key ID exists.\n");
5594 ret = 0;
5595 } else {
5596 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5597 resp.result);
5598 }
5599 }
5600 break;
5601 case QSEOS_RESULT_FAILURE:
5602 default:
5603 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5604 ret = -EINVAL;
5605 break;
5606 }
5607generate_key_exit:
5608 __qseecom_disable_clk(CLK_QSEE);
5609 return ret;
5610}
5611
5612static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5613 enum qseecom_key_management_usage_type usage,
5614 struct qseecom_key_delete_ireq *ireq)
5615{
5616 struct qseecom_command_scm_resp resp;
5617 int ret;
5618
5619 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5620 usage >= QSEOS_KM_USAGE_MAX) {
5621 pr_err("Error:: unsupported usage %d\n", usage);
5622 return -EFAULT;
5623 }
5624 ret = __qseecom_enable_clk(CLK_QSEE);
5625 if (ret)
5626 return ret;
5627
5628 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5629 ireq, sizeof(struct qseecom_key_delete_ireq),
5630 &resp, sizeof(struct qseecom_command_scm_resp));
5631 if (ret) {
5632 if (ret == -EINVAL &&
5633 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5634 pr_debug("Max attempts to input password reached.\n");
5635 ret = -ERANGE;
5636 } else {
5637 pr_err("scm call to delete key failed : %d\n", ret);
5638 ret = -EFAULT;
5639 }
5640 goto del_key_exit;
5641 }
5642
5643 switch (resp.result) {
5644 case QSEOS_RESULT_SUCCESS:
5645 break;
5646 case QSEOS_RESULT_INCOMPLETE:
5647 ret = __qseecom_process_incomplete_cmd(data, &resp);
5648 if (ret) {
5649 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5650 resp.result);
5651 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5652 pr_debug("Max attempts to input password reached.\n");
5653 ret = -ERANGE;
5654 }
5655 }
5656 break;
5657 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5658 pr_debug("Max attempts to input password reached.\n");
5659 ret = -ERANGE;
5660 break;
5661 case QSEOS_RESULT_FAILURE:
5662 default:
5663 pr_err("Delete key scm call failed resp.result %d\n",
5664 resp.result);
5665 ret = -EINVAL;
5666 break;
5667 }
5668del_key_exit:
5669 __qseecom_disable_clk(CLK_QSEE);
5670 return ret;
5671}
5672
5673static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5674 enum qseecom_key_management_usage_type usage,
5675 struct qseecom_key_select_ireq *ireq)
5676{
5677 struct qseecom_command_scm_resp resp;
5678 int ret;
5679
5680 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5681 usage >= QSEOS_KM_USAGE_MAX) {
5682 pr_err("Error:: unsupported usage %d\n", usage);
5683 return -EFAULT;
5684 }
5685 ret = __qseecom_enable_clk(CLK_QSEE);
5686 if (ret)
5687 return ret;
5688
5689 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5690 ret = __qseecom_enable_clk(CLK_CE_DRV);
5691 if (ret)
5692 return ret;
5693 }
5694
5695 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5696 ireq, sizeof(struct qseecom_key_select_ireq),
5697 &resp, sizeof(struct qseecom_command_scm_resp));
5698 if (ret) {
5699 if (ret == -EINVAL &&
5700 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5701 pr_debug("Max attempts to input password reached.\n");
5702 ret = -ERANGE;
5703 } else if (ret == -EINVAL &&
5704 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5705 pr_debug("Set Key operation under processing...\n");
5706 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5707 } else {
5708 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5709 ret);
5710 ret = -EFAULT;
5711 }
5712 goto set_key_exit;
5713 }
5714
5715 switch (resp.result) {
5716 case QSEOS_RESULT_SUCCESS:
5717 break;
5718 case QSEOS_RESULT_INCOMPLETE:
5719 ret = __qseecom_process_incomplete_cmd(data, &resp);
5720 if (ret) {
5721 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5722 resp.result);
5723 if (resp.result ==
5724 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5725 pr_debug("Set Key operation under processing...\n");
5726 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5727 }
5728 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5729 pr_debug("Max attempts to input password reached.\n");
5730 ret = -ERANGE;
5731 }
5732 }
5733 break;
5734 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5735 pr_debug("Max attempts to input password reached.\n");
5736 ret = -ERANGE;
5737 break;
5738 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5739 pr_debug("Set Key operation under processing...\n");
5740 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5741 break;
5742 case QSEOS_RESULT_FAILURE:
5743 default:
5744 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5745 ret = -EINVAL;
5746 break;
5747 }
5748set_key_exit:
5749 __qseecom_disable_clk(CLK_QSEE);
5750 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5751 __qseecom_disable_clk(CLK_CE_DRV);
5752 return ret;
5753}
5754
5755static int __qseecom_update_current_key_user_info(
5756 struct qseecom_dev_handle *data,
5757 enum qseecom_key_management_usage_type usage,
5758 struct qseecom_key_userinfo_update_ireq *ireq)
5759{
5760 struct qseecom_command_scm_resp resp;
5761 int ret;
5762
5763 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5764 usage >= QSEOS_KM_USAGE_MAX) {
5765 pr_err("Error:: unsupported usage %d\n", usage);
5766 return -EFAULT;
5767 }
5768 ret = __qseecom_enable_clk(CLK_QSEE);
5769 if (ret)
5770 return ret;
5771
5772 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5773 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5774 &resp, sizeof(struct qseecom_command_scm_resp));
5775 if (ret) {
5776 if (ret == -EINVAL &&
5777 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5778 pr_debug("Set Key operation under processing...\n");
5779 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5780 } else {
5781 pr_err("scm call to update key userinfo failed: %d\n",
5782 ret);
5783 __qseecom_disable_clk(CLK_QSEE);
5784 return -EFAULT;
5785 }
5786 }
5787
5788 switch (resp.result) {
5789 case QSEOS_RESULT_SUCCESS:
5790 break;
5791 case QSEOS_RESULT_INCOMPLETE:
5792 ret = __qseecom_process_incomplete_cmd(data, &resp);
5793 if (resp.result ==
5794 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5795 pr_debug("Set Key operation under processing...\n");
5796 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5797 }
5798 if (ret)
5799 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5800 resp.result);
5801 break;
5802 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5803 pr_debug("Update Key operation under processing...\n");
5804 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5805 break;
5806 case QSEOS_RESULT_FAILURE:
5807 default:
5808 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5809 ret = -EINVAL;
5810 break;
5811 }
5812
5813 __qseecom_disable_clk(CLK_QSEE);
5814 return ret;
5815}
5816
5817
5818static int qseecom_enable_ice_setup(int usage)
5819{
5820 int ret = 0;
5821
5822 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5823 ret = qcom_ice_setup_ice_hw("ufs", true);
5824 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5825 ret = qcom_ice_setup_ice_hw("sdcc", true);
5826
5827 return ret;
5828}
5829
5830static int qseecom_disable_ice_setup(int usage)
5831{
5832 int ret = 0;
5833
5834 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5835 ret = qcom_ice_setup_ice_hw("ufs", false);
5836 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5837 ret = qcom_ice_setup_ice_hw("sdcc", false);
5838
5839 return ret;
5840}
5841
5842static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5843{
5844 struct qseecom_ce_info_use *pce_info_use, *p;
5845 int total = 0;
5846 int i;
5847
5848 switch (usage) {
5849 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5850 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5851 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5852 p = qseecom.ce_info.fde;
5853 total = qseecom.ce_info.num_fde;
5854 break;
5855 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5856 p = qseecom.ce_info.pfe;
5857 total = qseecom.ce_info.num_pfe;
5858 break;
5859 default:
5860 pr_err("unsupported usage %d\n", usage);
5861 return -EINVAL;
5862 }
5863
5864 pce_info_use = NULL;
5865
5866 for (i = 0; i < total; i++) {
5867 if (p->unit_num == unit) {
5868 pce_info_use = p;
5869 break;
5870 }
5871 p++;
5872 }
5873 if (!pce_info_use) {
5874 pr_err("can not find %d\n", unit);
5875 return -EINVAL;
5876 }
5877 return pce_info_use->num_ce_pipe_entries;
5878}
5879
5880static int qseecom_create_key(struct qseecom_dev_handle *data,
5881 void __user *argp)
5882{
5883 int i;
5884 uint32_t *ce_hw = NULL;
5885 uint32_t pipe = 0;
5886 int ret = 0;
5887 uint32_t flags = 0;
5888 struct qseecom_create_key_req create_key_req;
5889 struct qseecom_key_generate_ireq generate_key_ireq;
5890 struct qseecom_key_select_ireq set_key_ireq;
5891 uint32_t entries = 0;
5892
5893 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5894 if (ret) {
5895 pr_err("copy_from_user failed\n");
5896 return ret;
5897 }
5898
5899 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5900 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5901 pr_err("unsupported usage %d\n", create_key_req.usage);
5902 ret = -EFAULT;
5903 return ret;
5904 }
5905 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5906 create_key_req.usage);
5907 if (entries <= 0) {
5908 pr_err("no ce instance for usage %d instance %d\n",
5909 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5910 ret = -EINVAL;
5911 return ret;
5912 }
5913
5914 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5915 if (!ce_hw) {
5916 ret = -ENOMEM;
5917 return ret;
5918 }
5919 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5920 DEFAULT_CE_INFO_UNIT);
5921 if (ret) {
5922 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5923 ret = -EINVAL;
5924 goto free_buf;
5925 }
5926
5927 if (qseecom.fde_key_size)
5928 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5929 else
5930 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5931
5932 generate_key_ireq.flags = flags;
5933 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5934 memset((void *)generate_key_ireq.key_id,
5935 0, QSEECOM_KEY_ID_SIZE);
5936 memset((void *)generate_key_ireq.hash32,
5937 0, QSEECOM_HASH_SIZE);
5938 memcpy((void *)generate_key_ireq.key_id,
5939 (void *)key_id_array[create_key_req.usage].desc,
5940 QSEECOM_KEY_ID_SIZE);
5941 memcpy((void *)generate_key_ireq.hash32,
5942 (void *)create_key_req.hash32,
5943 QSEECOM_HASH_SIZE);
5944
5945 ret = __qseecom_generate_and_save_key(data,
5946 create_key_req.usage, &generate_key_ireq);
5947 if (ret) {
5948 pr_err("Failed to generate key on storage: %d\n", ret);
5949 goto free_buf;
5950 }
5951
5952 for (i = 0; i < entries; i++) {
5953 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5954 if (create_key_req.usage ==
5955 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5956 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5957 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5958
5959 } else if (create_key_req.usage ==
5960 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5961 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5962 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5963
5964 } else {
5965 set_key_ireq.ce = ce_hw[i];
5966 set_key_ireq.pipe = pipe;
5967 }
5968 set_key_ireq.flags = flags;
5969
5970 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5971 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5972 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5973 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5974 memcpy((void *)set_key_ireq.key_id,
5975 (void *)key_id_array[create_key_req.usage].desc,
5976 QSEECOM_KEY_ID_SIZE);
5977 memcpy((void *)set_key_ireq.hash32,
5978 (void *)create_key_req.hash32,
5979 QSEECOM_HASH_SIZE);
5980 /*
5981 * It will return false if it is GPCE based crypto instance or
5982 * ICE is setup properly
5983 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005984 ret = qseecom_enable_ice_setup(create_key_req.usage);
5985 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005986 goto free_buf;
5987
5988 do {
5989 ret = __qseecom_set_clear_ce_key(data,
5990 create_key_req.usage,
5991 &set_key_ireq);
5992 /*
5993 * wait a little before calling scm again to let other
5994 * processes run
5995 */
5996 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5997 msleep(50);
5998
5999 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6000
6001 qseecom_disable_ice_setup(create_key_req.usage);
6002
6003 if (ret) {
6004 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6005 pipe, ce_hw[i], ret);
6006 goto free_buf;
6007 } else {
6008 pr_err("Set the key successfully\n");
6009 if ((create_key_req.usage ==
6010 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6011 (create_key_req.usage ==
6012 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6013 goto free_buf;
6014 }
6015 }
6016
6017free_buf:
6018 kzfree(ce_hw);
6019 return ret;
6020}
6021
6022static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6023 void __user *argp)
6024{
6025 uint32_t *ce_hw = NULL;
6026 uint32_t pipe = 0;
6027 int ret = 0;
6028 uint32_t flags = 0;
6029 int i, j;
6030 struct qseecom_wipe_key_req wipe_key_req;
6031 struct qseecom_key_delete_ireq delete_key_ireq;
6032 struct qseecom_key_select_ireq clear_key_ireq;
6033 uint32_t entries = 0;
6034
6035 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6036 if (ret) {
6037 pr_err("copy_from_user failed\n");
6038 return ret;
6039 }
6040
6041 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6042 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6043 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6044 ret = -EFAULT;
6045 return ret;
6046 }
6047
6048 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6049 wipe_key_req.usage);
6050 if (entries <= 0) {
6051 pr_err("no ce instance for usage %d instance %d\n",
6052 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6053 ret = -EINVAL;
6054 return ret;
6055 }
6056
6057 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6058 if (!ce_hw) {
6059 ret = -ENOMEM;
6060 return ret;
6061 }
6062
6063 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6064 DEFAULT_CE_INFO_UNIT);
6065 if (ret) {
6066 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6067 ret = -EINVAL;
6068 goto free_buf;
6069 }
6070
6071 if (wipe_key_req.wipe_key_flag) {
6072 delete_key_ireq.flags = flags;
6073 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6074 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6075 memcpy((void *)delete_key_ireq.key_id,
6076 (void *)key_id_array[wipe_key_req.usage].desc,
6077 QSEECOM_KEY_ID_SIZE);
6078 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6079
6080 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6081 &delete_key_ireq);
6082 if (ret) {
6083 pr_err("Failed to delete key from ssd storage: %d\n",
6084 ret);
6085 ret = -EFAULT;
6086 goto free_buf;
6087 }
6088 }
6089
6090 for (j = 0; j < entries; j++) {
6091 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6092 if (wipe_key_req.usage ==
6093 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6094 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6095 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6096 } else if (wipe_key_req.usage ==
6097 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6098 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6099 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6100 } else {
6101 clear_key_ireq.ce = ce_hw[j];
6102 clear_key_ireq.pipe = pipe;
6103 }
6104 clear_key_ireq.flags = flags;
6105 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6106 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6107 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6108 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6109
6110 /*
6111 * It will return false if it is GPCE based crypto instance or
6112 * ICE is setup properly
6113 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006114 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6115 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006116 goto free_buf;
6117
6118 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6119 &clear_key_ireq);
6120
6121 qseecom_disable_ice_setup(wipe_key_req.usage);
6122
6123 if (ret) {
6124 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6125 pipe, ce_hw[j], ret);
6126 ret = -EFAULT;
6127 goto free_buf;
6128 }
6129 }
6130
6131free_buf:
6132 kzfree(ce_hw);
6133 return ret;
6134}
6135
6136static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6137 void __user *argp)
6138{
6139 int ret = 0;
6140 uint32_t flags = 0;
6141 struct qseecom_update_key_userinfo_req update_key_req;
6142 struct qseecom_key_userinfo_update_ireq ireq;
6143
6144 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6145 if (ret) {
6146 pr_err("copy_from_user failed\n");
6147 return ret;
6148 }
6149
6150 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6151 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6152 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6153 return -EFAULT;
6154 }
6155
6156 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6157
6158 if (qseecom.fde_key_size)
6159 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6160 else
6161 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6162
6163 ireq.flags = flags;
6164 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6165 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6166 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6167 memcpy((void *)ireq.key_id,
6168 (void *)key_id_array[update_key_req.usage].desc,
6169 QSEECOM_KEY_ID_SIZE);
6170 memcpy((void *)ireq.current_hash32,
6171 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6172 memcpy((void *)ireq.new_hash32,
6173 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6174
6175 do {
6176 ret = __qseecom_update_current_key_user_info(data,
6177 update_key_req.usage,
6178 &ireq);
6179 /*
6180 * wait a little before calling scm again to let other
6181 * processes run
6182 */
6183 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6184 msleep(50);
6185
6186 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6187 if (ret) {
6188 pr_err("Failed to update key info: %d\n", ret);
6189 return ret;
6190 }
6191 return ret;
6192
6193}
6194static int qseecom_is_es_activated(void __user *argp)
6195{
6196 struct qseecom_is_es_activated_req req;
6197 struct qseecom_command_scm_resp resp;
6198 int ret;
6199
6200 if (qseecom.qsee_version < QSEE_VERSION_04) {
6201 pr_err("invalid qsee version\n");
6202 return -ENODEV;
6203 }
6204
6205 if (argp == NULL) {
6206 pr_err("arg is null\n");
6207 return -EINVAL;
6208 }
6209
6210 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6211 &req, sizeof(req), &resp, sizeof(resp));
6212 if (ret) {
6213 pr_err("scm_call failed\n");
6214 return ret;
6215 }
6216
6217 req.is_activated = resp.result;
6218 ret = copy_to_user(argp, &req, sizeof(req));
6219 if (ret) {
6220 pr_err("copy_to_user failed\n");
6221 return ret;
6222 }
6223
6224 return 0;
6225}
6226
6227static int qseecom_save_partition_hash(void __user *argp)
6228{
6229 struct qseecom_save_partition_hash_req req;
6230 struct qseecom_command_scm_resp resp;
6231 int ret;
6232
6233 memset(&resp, 0x00, sizeof(resp));
6234
6235 if (qseecom.qsee_version < QSEE_VERSION_04) {
6236 pr_err("invalid qsee version\n");
6237 return -ENODEV;
6238 }
6239
6240 if (argp == NULL) {
6241 pr_err("arg is null\n");
6242 return -EINVAL;
6243 }
6244
6245 ret = copy_from_user(&req, argp, sizeof(req));
6246 if (ret) {
6247 pr_err("copy_from_user failed\n");
6248 return ret;
6249 }
6250
6251 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6252 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6253 if (ret) {
6254 pr_err("qseecom_scm_call failed\n");
6255 return ret;
6256 }
6257
6258 return 0;
6259}
6260
6261static int qseecom_mdtp_cipher_dip(void __user *argp)
6262{
6263 struct qseecom_mdtp_cipher_dip_req req;
6264 u32 tzbuflenin, tzbuflenout;
6265 char *tzbufin = NULL, *tzbufout = NULL;
6266 struct scm_desc desc = {0};
6267 int ret;
6268
6269 do {
6270 /* Copy the parameters from userspace */
6271 if (argp == NULL) {
6272 pr_err("arg is null\n");
6273 ret = -EINVAL;
6274 break;
6275 }
6276
6277 ret = copy_from_user(&req, argp, sizeof(req));
6278 if (ret) {
6279 pr_err("copy_from_user failed, ret= %d\n", ret);
6280 break;
6281 }
6282
6283 if (req.in_buf == NULL || req.out_buf == NULL ||
6284 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6285 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6286 req.direction > 1) {
6287 pr_err("invalid parameters\n");
6288 ret = -EINVAL;
6289 break;
6290 }
6291
6292 /* Copy the input buffer from userspace to kernel space */
6293 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6294 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6295 if (!tzbufin) {
6296 pr_err("error allocating in buffer\n");
6297 ret = -ENOMEM;
6298 break;
6299 }
6300
6301 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6302 if (ret) {
6303 pr_err("copy_from_user failed, ret=%d\n", ret);
6304 break;
6305 }
6306
6307 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6308
6309 /* Prepare the output buffer in kernel space */
6310 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6311 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6312 if (!tzbufout) {
6313 pr_err("error allocating out buffer\n");
6314 ret = -ENOMEM;
6315 break;
6316 }
6317
6318 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6319
6320 /* Send the command to TZ */
6321 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6322 desc.args[0] = virt_to_phys(tzbufin);
6323 desc.args[1] = req.in_buf_size;
6324 desc.args[2] = virt_to_phys(tzbufout);
6325 desc.args[3] = req.out_buf_size;
6326 desc.args[4] = req.direction;
6327
6328 ret = __qseecom_enable_clk(CLK_QSEE);
6329 if (ret)
6330 break;
6331
6332 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6333
6334 __qseecom_disable_clk(CLK_QSEE);
6335
6336 if (ret) {
6337 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6338 ret);
6339 break;
6340 }
6341
6342 /* Copy the output buffer from kernel space to userspace */
6343 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6344 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6345 if (ret) {
6346 pr_err("copy_to_user failed, ret=%d\n", ret);
6347 break;
6348 }
6349 } while (0);
6350
6351 kzfree(tzbufin);
6352 kzfree(tzbufout);
6353
6354 return ret;
6355}
6356
6357static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6358 struct qseecom_qteec_req *req)
6359{
6360 if (!data || !data->client.ihandle) {
6361 pr_err("Client or client handle is not initialized\n");
6362 return -EINVAL;
6363 }
6364
6365 if (data->type != QSEECOM_CLIENT_APP)
6366 return -EFAULT;
6367
6368 if (req->req_len > UINT_MAX - req->resp_len) {
6369 pr_err("Integer overflow detected in req_len & rsp_len\n");
6370 return -EINVAL;
6371 }
6372
6373 if (req->req_len + req->resp_len > data->client.sb_length) {
6374 pr_debug("Not enough memory to fit cmd_buf.\n");
6375 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6376 (req->req_len + req->resp_len), data->client.sb_length);
6377 return -ENOMEM;
6378 }
6379
6380 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6381 pr_err("cmd buffer or response buffer is null\n");
6382 return -EINVAL;
6383 }
6384 if (((uintptr_t)req->req_ptr <
6385 data->client.user_virt_sb_base) ||
6386 ((uintptr_t)req->req_ptr >=
6387 (data->client.user_virt_sb_base + data->client.sb_length))) {
6388 pr_err("cmd buffer address not within shared bufffer\n");
6389 return -EINVAL;
6390 }
6391
6392 if (((uintptr_t)req->resp_ptr <
6393 data->client.user_virt_sb_base) ||
6394 ((uintptr_t)req->resp_ptr >=
6395 (data->client.user_virt_sb_base + data->client.sb_length))) {
6396 pr_err("response buffer address not within shared bufffer\n");
6397 return -EINVAL;
6398 }
6399
6400 if ((req->req_len == 0) || (req->resp_len == 0)) {
6401 pr_err("cmd buf lengtgh/response buf length not valid\n");
6402 return -EINVAL;
6403 }
6404
6405 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6406 pr_err("Integer overflow in req_len & req_ptr\n");
6407 return -EINVAL;
6408 }
6409
6410 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6411 pr_err("Integer overflow in resp_len & resp_ptr\n");
6412 return -EINVAL;
6413 }
6414
6415 if (data->client.user_virt_sb_base >
6416 (ULONG_MAX - data->client.sb_length)) {
6417 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6418 return -EINVAL;
6419 }
6420 if ((((uintptr_t)req->req_ptr + req->req_len) >
6421 ((uintptr_t)data->client.user_virt_sb_base +
6422 data->client.sb_length)) ||
6423 (((uintptr_t)req->resp_ptr + req->resp_len) >
6424 ((uintptr_t)data->client.user_virt_sb_base +
6425 data->client.sb_length))) {
6426 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6427 return -EINVAL;
6428 }
6429 return 0;
6430}
6431
6432static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6433 uint32_t fd_idx, struct sg_table *sg_ptr)
6434{
6435 struct scatterlist *sg = sg_ptr->sgl;
6436 struct qseecom_sg_entry *sg_entry;
6437 void *buf;
6438 uint i;
6439 size_t size;
6440 dma_addr_t coh_pmem;
6441
6442 if (fd_idx >= MAX_ION_FD) {
6443 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6444 return -ENOMEM;
6445 }
6446 /*
6447 * Allocate a buffer, populate it with number of entry plus
6448 * each sg entry's phy addr and length; then return the
6449 * phy_addr of the buffer.
6450 */
6451 size = sizeof(uint32_t) +
6452 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6453 size = (size + PAGE_SIZE) & PAGE_MASK;
6454 buf = dma_alloc_coherent(qseecom.pdev,
6455 size, &coh_pmem, GFP_KERNEL);
6456 if (buf == NULL) {
6457 pr_err("failed to alloc memory for sg buf\n");
6458 return -ENOMEM;
6459 }
6460 *(uint32_t *)buf = sg_ptr->nents;
6461 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6462 for (i = 0; i < sg_ptr->nents; i++) {
6463 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6464 sg_entry->len = sg->length;
6465 sg_entry++;
6466 sg = sg_next(sg);
6467 }
6468 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6469 data->client.sec_buf_fd[fd_idx].vbase = buf;
6470 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6471 data->client.sec_buf_fd[fd_idx].size = size;
6472 return 0;
6473}
6474
6475static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6476 struct qseecom_dev_handle *data, bool cleanup)
6477{
6478 struct ion_handle *ihandle;
6479 int ret = 0;
6480 int i = 0;
6481 uint32_t *update;
6482 struct sg_table *sg_ptr = NULL;
6483 struct scatterlist *sg;
6484 struct qseecom_param_memref *memref;
6485
6486 if (req == NULL) {
6487 pr_err("Invalid address\n");
6488 return -EINVAL;
6489 }
6490 for (i = 0; i < MAX_ION_FD; i++) {
6491 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006492 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006493 req->ifd_data[i].fd);
6494 if (IS_ERR_OR_NULL(ihandle)) {
6495 pr_err("Ion client can't retrieve the handle\n");
6496 return -ENOMEM;
6497 }
6498 if ((req->req_len < sizeof(uint32_t)) ||
6499 (req->ifd_data[i].cmd_buf_offset >
6500 req->req_len - sizeof(uint32_t))) {
6501 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6502 req->req_len,
6503 req->ifd_data[i].cmd_buf_offset);
6504 return -EINVAL;
6505 }
6506 update = (uint32_t *)((char *) req->req_ptr +
6507 req->ifd_data[i].cmd_buf_offset);
6508 if (!update) {
6509 pr_err("update pointer is NULL\n");
6510 return -EINVAL;
6511 }
6512 } else {
6513 continue;
6514 }
6515 /* Populate the cmd data structure with the phys_addr */
6516 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6517 if (IS_ERR_OR_NULL(sg_ptr)) {
6518 pr_err("IOn client could not retrieve sg table\n");
6519 goto err;
6520 }
6521 sg = sg_ptr->sgl;
6522 if (sg == NULL) {
6523 pr_err("sg is NULL\n");
6524 goto err;
6525 }
6526 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6527 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6528 sg_ptr->nents, sg->length);
6529 goto err;
6530 }
6531 /* clean up buf for pre-allocated fd */
6532 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6533 (*update)) {
6534 if (data->client.sec_buf_fd[i].vbase)
6535 dma_free_coherent(qseecom.pdev,
6536 data->client.sec_buf_fd[i].size,
6537 data->client.sec_buf_fd[i].vbase,
6538 data->client.sec_buf_fd[i].pbase);
6539 memset((void *)update, 0,
6540 sizeof(struct qseecom_param_memref));
6541 memset(&(data->client.sec_buf_fd[i]), 0,
6542 sizeof(struct qseecom_sec_buf_fd_info));
6543 goto clean;
6544 }
6545
6546 if (*update == 0) {
6547 /* update buf for pre-allocated fd from secure heap*/
6548 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6549 sg_ptr);
6550 if (ret) {
6551 pr_err("Failed to handle buf for fd[%d]\n", i);
6552 goto err;
6553 }
6554 memref = (struct qseecom_param_memref *)update;
6555 memref->buffer =
6556 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6557 memref->size =
6558 (uint32_t)(data->client.sec_buf_fd[i].size);
6559 } else {
6560 /* update buf for fd from non-secure qseecom heap */
6561 if (sg_ptr->nents != 1) {
6562 pr_err("Num of scat entr (%d) invalid\n",
6563 sg_ptr->nents);
6564 goto err;
6565 }
6566 if (cleanup)
6567 *update = 0;
6568 else
6569 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6570 }
6571clean:
6572 if (cleanup) {
6573 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6574 ihandle, NULL, sg->length,
6575 ION_IOC_INV_CACHES);
6576 if (ret) {
6577 pr_err("cache operation failed %d\n", ret);
6578 goto err;
6579 }
6580 } else {
6581 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6582 ihandle, NULL, sg->length,
6583 ION_IOC_CLEAN_INV_CACHES);
6584 if (ret) {
6585 pr_err("cache operation failed %d\n", ret);
6586 goto err;
6587 }
6588 data->sglistinfo_ptr[i].indexAndFlags =
6589 SGLISTINFO_SET_INDEX_FLAG(
6590 (sg_ptr->nents == 1), 0,
6591 req->ifd_data[i].cmd_buf_offset);
6592 data->sglistinfo_ptr[i].sizeOrCount =
6593 (sg_ptr->nents == 1) ?
6594 sg->length : sg_ptr->nents;
6595 data->sglist_cnt = i + 1;
6596 }
6597 /* Deallocate the handle */
6598 if (!IS_ERR_OR_NULL(ihandle))
6599 ion_free(qseecom.ion_clnt, ihandle);
6600 }
6601 return ret;
6602err:
6603 if (!IS_ERR_OR_NULL(ihandle))
6604 ion_free(qseecom.ion_clnt, ihandle);
6605 return -ENOMEM;
6606}
6607
6608static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6609 struct qseecom_qteec_req *req, uint32_t cmd_id)
6610{
6611 struct qseecom_command_scm_resp resp;
6612 struct qseecom_qteec_ireq ireq;
6613 struct qseecom_qteec_64bit_ireq ireq_64bit;
6614 struct qseecom_registered_app_list *ptr_app;
6615 bool found_app = false;
6616 unsigned long flags;
6617 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006618 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006619 uint32_t reqd_len_sb_in = 0;
6620 void *cmd_buf = NULL;
6621 size_t cmd_len;
6622 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306623 void *req_ptr = NULL;
6624 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006625
6626 ret = __qseecom_qteec_validate_msg(data, req);
6627 if (ret)
6628 return ret;
6629
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306630 req_ptr = req->req_ptr;
6631 resp_ptr = req->resp_ptr;
6632
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006633 /* find app_id & img_name from list */
6634 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6635 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6636 list) {
6637 if ((ptr_app->app_id == data->client.app_id) &&
6638 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6639 found_app = true;
6640 break;
6641 }
6642 }
6643 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6644 if (!found_app) {
6645 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6646 (char *)data->client.app_name);
6647 return -ENOENT;
6648 }
6649
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306650 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6651 (uintptr_t)req->req_ptr);
6652 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6653 (uintptr_t)req->resp_ptr);
6654
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006655 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6656 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6657 ret = __qseecom_update_qteec_req_buf(
6658 (struct qseecom_qteec_modfd_req *)req, data, false);
6659 if (ret)
6660 return ret;
6661 }
6662
6663 if (qseecom.qsee_version < QSEE_VERSION_40) {
6664 ireq.app_id = data->client.app_id;
6665 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306666 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006667 ireq.req_len = req->req_len;
6668 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306669 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006670 ireq.resp_len = req->resp_len;
6671 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6672 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6673 dmac_flush_range((void *)table,
6674 (void *)table + SGLISTINFO_TABLE_SIZE);
6675 cmd_buf = (void *)&ireq;
6676 cmd_len = sizeof(struct qseecom_qteec_ireq);
6677 } else {
6678 ireq_64bit.app_id = data->client.app_id;
6679 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306680 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006681 ireq_64bit.req_len = req->req_len;
6682 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306683 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006684 ireq_64bit.resp_len = req->resp_len;
6685 if ((data->client.app_arch == ELFCLASS32) &&
6686 ((ireq_64bit.req_ptr >=
6687 PHY_ADDR_4G - ireq_64bit.req_len) ||
6688 (ireq_64bit.resp_ptr >=
6689 PHY_ADDR_4G - ireq_64bit.resp_len))){
6690 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6691 data->client.app_name, data->client.app_id);
6692 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6693 ireq_64bit.req_ptr, ireq_64bit.req_len,
6694 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6695 return -EFAULT;
6696 }
6697 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6698 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6699 dmac_flush_range((void *)table,
6700 (void *)table + SGLISTINFO_TABLE_SIZE);
6701 cmd_buf = (void *)&ireq_64bit;
6702 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6703 }
6704 if (qseecom.whitelist_support == true
6705 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6706 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6707 else
6708 *(uint32_t *)cmd_buf = cmd_id;
6709
6710 reqd_len_sb_in = req->req_len + req->resp_len;
6711 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6712 data->client.sb_virt,
6713 reqd_len_sb_in,
6714 ION_IOC_CLEAN_INV_CACHES);
6715 if (ret) {
6716 pr_err("cache operation failed %d\n", ret);
6717 return ret;
6718 }
6719
6720 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6721
6722 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6723 cmd_buf, cmd_len,
6724 &resp, sizeof(resp));
6725 if (ret) {
6726 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6727 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006728 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006729 }
6730
6731 if (qseecom.qsee_reentrancy_support) {
6732 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006733 if (ret)
6734 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006735 } else {
6736 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6737 ret = __qseecom_process_incomplete_cmd(data, &resp);
6738 if (ret) {
6739 pr_err("process_incomplete_cmd failed err: %d\n",
6740 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006741 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006742 }
6743 } else {
6744 if (resp.result != QSEOS_RESULT_SUCCESS) {
6745 pr_err("Response result %d not supported\n",
6746 resp.result);
6747 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006748 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006749 }
6750 }
6751 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006752exit:
6753 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006754 data->client.sb_virt, data->client.sb_length,
6755 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006756 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006757 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006758 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006759 }
6760
6761 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6762 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006763 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006764 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006765 if (ret2)
6766 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006767 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006768 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006769}
6770
6771static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6772 void __user *argp)
6773{
6774 struct qseecom_qteec_modfd_req req;
6775 int ret = 0;
6776
6777 ret = copy_from_user(&req, argp,
6778 sizeof(struct qseecom_qteec_modfd_req));
6779 if (ret) {
6780 pr_err("copy_from_user failed\n");
6781 return ret;
6782 }
6783 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6784 QSEOS_TEE_OPEN_SESSION);
6785
6786 return ret;
6787}
6788
6789static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6790 void __user *argp)
6791{
6792 struct qseecom_qteec_req req;
6793 int ret = 0;
6794
6795 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6796 if (ret) {
6797 pr_err("copy_from_user failed\n");
6798 return ret;
6799 }
6800 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6801 return ret;
6802}
6803
6804static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6805 void __user *argp)
6806{
6807 struct qseecom_qteec_modfd_req req;
6808 struct qseecom_command_scm_resp resp;
6809 struct qseecom_qteec_ireq ireq;
6810 struct qseecom_qteec_64bit_ireq ireq_64bit;
6811 struct qseecom_registered_app_list *ptr_app;
6812 bool found_app = false;
6813 unsigned long flags;
6814 int ret = 0;
6815 int i = 0;
6816 uint32_t reqd_len_sb_in = 0;
6817 void *cmd_buf = NULL;
6818 size_t cmd_len;
6819 struct sglist_info *table = data->sglistinfo_ptr;
6820 void *req_ptr = NULL;
6821 void *resp_ptr = NULL;
6822
6823 ret = copy_from_user(&req, argp,
6824 sizeof(struct qseecom_qteec_modfd_req));
6825 if (ret) {
6826 pr_err("copy_from_user failed\n");
6827 return ret;
6828 }
6829 ret = __qseecom_qteec_validate_msg(data,
6830 (struct qseecom_qteec_req *)(&req));
6831 if (ret)
6832 return ret;
6833 req_ptr = req.req_ptr;
6834 resp_ptr = req.resp_ptr;
6835
6836 /* find app_id & img_name from list */
6837 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6838 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6839 list) {
6840 if ((ptr_app->app_id == data->client.app_id) &&
6841 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6842 found_app = true;
6843 break;
6844 }
6845 }
6846 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6847 if (!found_app) {
6848 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6849 (char *)data->client.app_name);
6850 return -ENOENT;
6851 }
6852
6853 /* validate offsets */
6854 for (i = 0; i < MAX_ION_FD; i++) {
6855 if (req.ifd_data[i].fd) {
6856 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6857 return -EINVAL;
6858 }
6859 }
6860 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6861 (uintptr_t)req.req_ptr);
6862 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6863 (uintptr_t)req.resp_ptr);
6864 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6865 if (ret)
6866 return ret;
6867
6868 if (qseecom.qsee_version < QSEE_VERSION_40) {
6869 ireq.app_id = data->client.app_id;
6870 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6871 (uintptr_t)req_ptr);
6872 ireq.req_len = req.req_len;
6873 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6874 (uintptr_t)resp_ptr);
6875 ireq.resp_len = req.resp_len;
6876 cmd_buf = (void *)&ireq;
6877 cmd_len = sizeof(struct qseecom_qteec_ireq);
6878 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6879 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6880 dmac_flush_range((void *)table,
6881 (void *)table + SGLISTINFO_TABLE_SIZE);
6882 } else {
6883 ireq_64bit.app_id = data->client.app_id;
6884 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6885 (uintptr_t)req_ptr);
6886 ireq_64bit.req_len = req.req_len;
6887 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6888 (uintptr_t)resp_ptr);
6889 ireq_64bit.resp_len = req.resp_len;
6890 cmd_buf = (void *)&ireq_64bit;
6891 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6892 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6893 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6894 dmac_flush_range((void *)table,
6895 (void *)table + SGLISTINFO_TABLE_SIZE);
6896 }
6897 reqd_len_sb_in = req.req_len + req.resp_len;
6898 if (qseecom.whitelist_support == true)
6899 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6900 else
6901 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6902
6903 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6904 data->client.sb_virt,
6905 reqd_len_sb_in,
6906 ION_IOC_CLEAN_INV_CACHES);
6907 if (ret) {
6908 pr_err("cache operation failed %d\n", ret);
6909 return ret;
6910 }
6911
6912 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6913
6914 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6915 cmd_buf, cmd_len,
6916 &resp, sizeof(resp));
6917 if (ret) {
6918 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6919 ret, data->client.app_id);
6920 return ret;
6921 }
6922
6923 if (qseecom.qsee_reentrancy_support) {
6924 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6925 } else {
6926 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6927 ret = __qseecom_process_incomplete_cmd(data, &resp);
6928 if (ret) {
6929 pr_err("process_incomplete_cmd failed err: %d\n",
6930 ret);
6931 return ret;
6932 }
6933 } else {
6934 if (resp.result != QSEOS_RESULT_SUCCESS) {
6935 pr_err("Response result %d not supported\n",
6936 resp.result);
6937 ret = -EINVAL;
6938 }
6939 }
6940 }
6941 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6942 if (ret)
6943 return ret;
6944
6945 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6946 data->client.sb_virt, data->client.sb_length,
6947 ION_IOC_INV_CACHES);
6948 if (ret) {
6949 pr_err("cache operation failed %d\n", ret);
6950 return ret;
6951 }
6952 return 0;
6953}
6954
6955static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6956 void __user *argp)
6957{
6958 struct qseecom_qteec_modfd_req req;
6959 int ret = 0;
6960
6961 ret = copy_from_user(&req, argp,
6962 sizeof(struct qseecom_qteec_modfd_req));
6963 if (ret) {
6964 pr_err("copy_from_user failed\n");
6965 return ret;
6966 }
6967 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6968 QSEOS_TEE_REQUEST_CANCELLATION);
6969
6970 return ret;
6971}
6972
6973static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6974{
6975 if (data->sglist_cnt) {
6976 memset(data->sglistinfo_ptr, 0,
6977 SGLISTINFO_TABLE_SIZE);
6978 data->sglist_cnt = 0;
6979 }
6980}
6981
6982static inline long qseecom_ioctl(struct file *file,
6983 unsigned int cmd, unsigned long arg)
6984{
6985 int ret = 0;
6986 struct qseecom_dev_handle *data = file->private_data;
6987 void __user *argp = (void __user *) arg;
6988 bool perf_enabled = false;
6989
6990 if (!data) {
6991 pr_err("Invalid/uninitialized device handle\n");
6992 return -EINVAL;
6993 }
6994
6995 if (data->abort) {
6996 pr_err("Aborting qseecom driver\n");
6997 return -ENODEV;
6998 }
6999
7000 switch (cmd) {
7001 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7002 if (data->type != QSEECOM_GENERIC) {
7003 pr_err("reg lstnr req: invalid handle (%d)\n",
7004 data->type);
7005 ret = -EINVAL;
7006 break;
7007 }
7008 pr_debug("ioctl register_listener_req()\n");
7009 mutex_lock(&app_access_lock);
7010 atomic_inc(&data->ioctl_count);
7011 data->type = QSEECOM_LISTENER_SERVICE;
7012 ret = qseecom_register_listener(data, argp);
7013 atomic_dec(&data->ioctl_count);
7014 wake_up_all(&data->abort_wq);
7015 mutex_unlock(&app_access_lock);
7016 if (ret)
7017 pr_err("failed qseecom_register_listener: %d\n", ret);
7018 break;
7019 }
7020 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7021 if ((data->listener.id == 0) ||
7022 (data->type != QSEECOM_LISTENER_SERVICE)) {
7023 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7024 data->type, data->listener.id);
7025 ret = -EINVAL;
7026 break;
7027 }
7028 pr_debug("ioctl unregister_listener_req()\n");
7029 mutex_lock(&app_access_lock);
7030 atomic_inc(&data->ioctl_count);
7031 ret = qseecom_unregister_listener(data);
7032 atomic_dec(&data->ioctl_count);
7033 wake_up_all(&data->abort_wq);
7034 mutex_unlock(&app_access_lock);
7035 if (ret)
7036 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7037 break;
7038 }
7039 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7040 if ((data->client.app_id == 0) ||
7041 (data->type != QSEECOM_CLIENT_APP)) {
7042 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7043 data->type, data->client.app_id);
7044 ret = -EINVAL;
7045 break;
7046 }
7047 /* Only one client allowed here at a time */
7048 mutex_lock(&app_access_lock);
7049 if (qseecom.support_bus_scaling) {
7050 /* register bus bw in case the client doesn't do it */
7051 if (!data->mode) {
7052 mutex_lock(&qsee_bw_mutex);
7053 __qseecom_register_bus_bandwidth_needs(
7054 data, HIGH);
7055 mutex_unlock(&qsee_bw_mutex);
7056 }
7057 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7058 if (ret) {
7059 pr_err("Failed to set bw.\n");
7060 ret = -EINVAL;
7061 mutex_unlock(&app_access_lock);
7062 break;
7063 }
7064 }
7065 /*
7066 * On targets where crypto clock is handled by HLOS,
7067 * if clk_access_cnt is zero and perf_enabled is false,
7068 * then the crypto clock was not enabled before sending cmd to
7069 * tz, qseecom will enable the clock to avoid service failure.
7070 */
7071 if (!qseecom.no_clock_support &&
7072 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7073 pr_debug("ce clock is not enabled!\n");
7074 ret = qseecom_perf_enable(data);
7075 if (ret) {
7076 pr_err("Failed to vote for clock with err %d\n",
7077 ret);
7078 mutex_unlock(&app_access_lock);
7079 ret = -EINVAL;
7080 break;
7081 }
7082 perf_enabled = true;
7083 }
7084 atomic_inc(&data->ioctl_count);
7085 ret = qseecom_send_cmd(data, argp);
7086 if (qseecom.support_bus_scaling)
7087 __qseecom_add_bw_scale_down_timer(
7088 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7089 if (perf_enabled) {
7090 qsee_disable_clock_vote(data, CLK_DFAB);
7091 qsee_disable_clock_vote(data, CLK_SFPB);
7092 }
7093 atomic_dec(&data->ioctl_count);
7094 wake_up_all(&data->abort_wq);
7095 mutex_unlock(&app_access_lock);
7096 if (ret)
7097 pr_err("failed qseecom_send_cmd: %d\n", ret);
7098 break;
7099 }
7100 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7101 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7102 if ((data->client.app_id == 0) ||
7103 (data->type != QSEECOM_CLIENT_APP)) {
7104 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7105 data->type, data->client.app_id);
7106 ret = -EINVAL;
7107 break;
7108 }
7109 /* Only one client allowed here at a time */
7110 mutex_lock(&app_access_lock);
7111 if (qseecom.support_bus_scaling) {
7112 if (!data->mode) {
7113 mutex_lock(&qsee_bw_mutex);
7114 __qseecom_register_bus_bandwidth_needs(
7115 data, HIGH);
7116 mutex_unlock(&qsee_bw_mutex);
7117 }
7118 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7119 if (ret) {
7120 pr_err("Failed to set bw.\n");
7121 mutex_unlock(&app_access_lock);
7122 ret = -EINVAL;
7123 break;
7124 }
7125 }
7126 /*
7127 * On targets where crypto clock is handled by HLOS,
7128 * if clk_access_cnt is zero and perf_enabled is false,
7129 * then the crypto clock was not enabled before sending cmd to
7130 * tz, qseecom will enable the clock to avoid service failure.
7131 */
7132 if (!qseecom.no_clock_support &&
7133 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7134 pr_debug("ce clock is not enabled!\n");
7135 ret = qseecom_perf_enable(data);
7136 if (ret) {
7137 pr_err("Failed to vote for clock with err %d\n",
7138 ret);
7139 mutex_unlock(&app_access_lock);
7140 ret = -EINVAL;
7141 break;
7142 }
7143 perf_enabled = true;
7144 }
7145 atomic_inc(&data->ioctl_count);
7146 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7147 ret = qseecom_send_modfd_cmd(data, argp);
7148 else
7149 ret = qseecom_send_modfd_cmd_64(data, argp);
7150 if (qseecom.support_bus_scaling)
7151 __qseecom_add_bw_scale_down_timer(
7152 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7153 if (perf_enabled) {
7154 qsee_disable_clock_vote(data, CLK_DFAB);
7155 qsee_disable_clock_vote(data, CLK_SFPB);
7156 }
7157 atomic_dec(&data->ioctl_count);
7158 wake_up_all(&data->abort_wq);
7159 mutex_unlock(&app_access_lock);
7160 if (ret)
7161 pr_err("failed qseecom_send_cmd: %d\n", ret);
7162 __qseecom_clean_data_sglistinfo(data);
7163 break;
7164 }
7165 case QSEECOM_IOCTL_RECEIVE_REQ: {
7166 if ((data->listener.id == 0) ||
7167 (data->type != QSEECOM_LISTENER_SERVICE)) {
7168 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7169 data->type, data->listener.id);
7170 ret = -EINVAL;
7171 break;
7172 }
7173 atomic_inc(&data->ioctl_count);
7174 ret = qseecom_receive_req(data);
7175 atomic_dec(&data->ioctl_count);
7176 wake_up_all(&data->abort_wq);
7177 if (ret && (ret != -ERESTARTSYS))
7178 pr_err("failed qseecom_receive_req: %d\n", ret);
7179 break;
7180 }
7181 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7182 if ((data->listener.id == 0) ||
7183 (data->type != QSEECOM_LISTENER_SERVICE)) {
7184 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7185 data->type, data->listener.id);
7186 ret = -EINVAL;
7187 break;
7188 }
7189 atomic_inc(&data->ioctl_count);
7190 if (!qseecom.qsee_reentrancy_support)
7191 ret = qseecom_send_resp();
7192 else
7193 ret = qseecom_reentrancy_send_resp(data);
7194 atomic_dec(&data->ioctl_count);
7195 wake_up_all(&data->abort_wq);
7196 if (ret)
7197 pr_err("failed qseecom_send_resp: %d\n", ret);
7198 break;
7199 }
7200 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7201 if ((data->type != QSEECOM_CLIENT_APP) &&
7202 (data->type != QSEECOM_GENERIC) &&
7203 (data->type != QSEECOM_SECURE_SERVICE)) {
7204 pr_err("set mem param req: invalid handle (%d)\n",
7205 data->type);
7206 ret = -EINVAL;
7207 break;
7208 }
7209 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7210 mutex_lock(&app_access_lock);
7211 atomic_inc(&data->ioctl_count);
7212 ret = qseecom_set_client_mem_param(data, argp);
7213 atomic_dec(&data->ioctl_count);
7214 mutex_unlock(&app_access_lock);
7215 if (ret)
7216 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7217 ret);
7218 break;
7219 }
7220 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7221 if ((data->type != QSEECOM_GENERIC) &&
7222 (data->type != QSEECOM_CLIENT_APP)) {
7223 pr_err("load app req: invalid handle (%d)\n",
7224 data->type);
7225 ret = -EINVAL;
7226 break;
7227 }
7228 data->type = QSEECOM_CLIENT_APP;
7229 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7230 mutex_lock(&app_access_lock);
7231 atomic_inc(&data->ioctl_count);
7232 ret = qseecom_load_app(data, argp);
7233 atomic_dec(&data->ioctl_count);
7234 mutex_unlock(&app_access_lock);
7235 if (ret)
7236 pr_err("failed load_app request: %d\n", ret);
7237 break;
7238 }
7239 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7240 if ((data->client.app_id == 0) ||
7241 (data->type != QSEECOM_CLIENT_APP)) {
7242 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7243 data->type, data->client.app_id);
7244 ret = -EINVAL;
7245 break;
7246 }
7247 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7248 mutex_lock(&app_access_lock);
7249 atomic_inc(&data->ioctl_count);
7250 ret = qseecom_unload_app(data, false);
7251 atomic_dec(&data->ioctl_count);
7252 mutex_unlock(&app_access_lock);
7253 if (ret)
7254 pr_err("failed unload_app request: %d\n", ret);
7255 break;
7256 }
7257 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7258 atomic_inc(&data->ioctl_count);
7259 ret = qseecom_get_qseos_version(data, argp);
7260 if (ret)
7261 pr_err("qseecom_get_qseos_version: %d\n", ret);
7262 atomic_dec(&data->ioctl_count);
7263 break;
7264 }
7265 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7266 if ((data->type != QSEECOM_GENERIC) &&
7267 (data->type != QSEECOM_CLIENT_APP)) {
7268 pr_err("perf enable req: invalid handle (%d)\n",
7269 data->type);
7270 ret = -EINVAL;
7271 break;
7272 }
7273 if ((data->type == QSEECOM_CLIENT_APP) &&
7274 (data->client.app_id == 0)) {
7275 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7276 data->type, data->client.app_id);
7277 ret = -EINVAL;
7278 break;
7279 }
7280 atomic_inc(&data->ioctl_count);
7281 if (qseecom.support_bus_scaling) {
7282 mutex_lock(&qsee_bw_mutex);
7283 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7284 mutex_unlock(&qsee_bw_mutex);
7285 } else {
7286 ret = qseecom_perf_enable(data);
7287 if (ret)
7288 pr_err("Fail to vote for clocks %d\n", ret);
7289 }
7290 atomic_dec(&data->ioctl_count);
7291 break;
7292 }
7293 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7294 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7295 (data->type != QSEECOM_CLIENT_APP)) {
7296 pr_err("perf disable req: invalid handle (%d)\n",
7297 data->type);
7298 ret = -EINVAL;
7299 break;
7300 }
7301 if ((data->type == QSEECOM_CLIENT_APP) &&
7302 (data->client.app_id == 0)) {
7303 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7304 data->type, data->client.app_id);
7305 ret = -EINVAL;
7306 break;
7307 }
7308 atomic_inc(&data->ioctl_count);
7309 if (!qseecom.support_bus_scaling) {
7310 qsee_disable_clock_vote(data, CLK_DFAB);
7311 qsee_disable_clock_vote(data, CLK_SFPB);
7312 } else {
7313 mutex_lock(&qsee_bw_mutex);
7314 qseecom_unregister_bus_bandwidth_needs(data);
7315 mutex_unlock(&qsee_bw_mutex);
7316 }
7317 atomic_dec(&data->ioctl_count);
7318 break;
7319 }
7320
7321 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7322 /* If crypto clock is not handled by HLOS, return directly. */
7323 if (qseecom.no_clock_support) {
7324 pr_debug("crypto clock is not handled by HLOS\n");
7325 break;
7326 }
7327 if ((data->client.app_id == 0) ||
7328 (data->type != QSEECOM_CLIENT_APP)) {
7329 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7330 data->type, data->client.app_id);
7331 ret = -EINVAL;
7332 break;
7333 }
7334 atomic_inc(&data->ioctl_count);
7335 ret = qseecom_scale_bus_bandwidth(data, argp);
7336 atomic_dec(&data->ioctl_count);
7337 break;
7338 }
7339 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7340 if (data->type != QSEECOM_GENERIC) {
7341 pr_err("load ext elf req: invalid client handle (%d)\n",
7342 data->type);
7343 ret = -EINVAL;
7344 break;
7345 }
7346 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7347 data->released = true;
7348 mutex_lock(&app_access_lock);
7349 atomic_inc(&data->ioctl_count);
7350 ret = qseecom_load_external_elf(data, argp);
7351 atomic_dec(&data->ioctl_count);
7352 mutex_unlock(&app_access_lock);
7353 if (ret)
7354 pr_err("failed load_external_elf request: %d\n", ret);
7355 break;
7356 }
7357 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7358 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7359 pr_err("unload ext elf req: invalid handle (%d)\n",
7360 data->type);
7361 ret = -EINVAL;
7362 break;
7363 }
7364 data->released = true;
7365 mutex_lock(&app_access_lock);
7366 atomic_inc(&data->ioctl_count);
7367 ret = qseecom_unload_external_elf(data);
7368 atomic_dec(&data->ioctl_count);
7369 mutex_unlock(&app_access_lock);
7370 if (ret)
7371 pr_err("failed unload_app request: %d\n", ret);
7372 break;
7373 }
7374 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7375 data->type = QSEECOM_CLIENT_APP;
7376 mutex_lock(&app_access_lock);
7377 atomic_inc(&data->ioctl_count);
7378 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7379 ret = qseecom_query_app_loaded(data, argp);
7380 atomic_dec(&data->ioctl_count);
7381 mutex_unlock(&app_access_lock);
7382 break;
7383 }
7384 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7385 if (data->type != QSEECOM_GENERIC) {
7386 pr_err("send cmd svc req: invalid handle (%d)\n",
7387 data->type);
7388 ret = -EINVAL;
7389 break;
7390 }
7391 data->type = QSEECOM_SECURE_SERVICE;
7392 if (qseecom.qsee_version < QSEE_VERSION_03) {
7393 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7394 qseecom.qsee_version);
7395 return -EINVAL;
7396 }
7397 mutex_lock(&app_access_lock);
7398 atomic_inc(&data->ioctl_count);
7399 ret = qseecom_send_service_cmd(data, argp);
7400 atomic_dec(&data->ioctl_count);
7401 mutex_unlock(&app_access_lock);
7402 break;
7403 }
7404 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7405 if (!(qseecom.support_pfe || qseecom.support_fde))
7406 pr_err("Features requiring key init not supported\n");
7407 if (data->type != QSEECOM_GENERIC) {
7408 pr_err("create key req: invalid handle (%d)\n",
7409 data->type);
7410 ret = -EINVAL;
7411 break;
7412 }
7413 if (qseecom.qsee_version < QSEE_VERSION_05) {
7414 pr_err("Create Key feature unsupported: qsee ver %u\n",
7415 qseecom.qsee_version);
7416 return -EINVAL;
7417 }
7418 data->released = true;
7419 mutex_lock(&app_access_lock);
7420 atomic_inc(&data->ioctl_count);
7421 ret = qseecom_create_key(data, argp);
7422 if (ret)
7423 pr_err("failed to create encryption key: %d\n", ret);
7424
7425 atomic_dec(&data->ioctl_count);
7426 mutex_unlock(&app_access_lock);
7427 break;
7428 }
7429 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7430 if (!(qseecom.support_pfe || qseecom.support_fde))
7431 pr_err("Features requiring key init not supported\n");
7432 if (data->type != QSEECOM_GENERIC) {
7433 pr_err("wipe key req: invalid handle (%d)\n",
7434 data->type);
7435 ret = -EINVAL;
7436 break;
7437 }
7438 if (qseecom.qsee_version < QSEE_VERSION_05) {
7439 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7440 qseecom.qsee_version);
7441 return -EINVAL;
7442 }
7443 data->released = true;
7444 mutex_lock(&app_access_lock);
7445 atomic_inc(&data->ioctl_count);
7446 ret = qseecom_wipe_key(data, argp);
7447 if (ret)
7448 pr_err("failed to wipe encryption key: %d\n", ret);
7449 atomic_dec(&data->ioctl_count);
7450 mutex_unlock(&app_access_lock);
7451 break;
7452 }
7453 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7454 if (!(qseecom.support_pfe || qseecom.support_fde))
7455 pr_err("Features requiring key init not supported\n");
7456 if (data->type != QSEECOM_GENERIC) {
7457 pr_err("update key req: invalid handle (%d)\n",
7458 data->type);
7459 ret = -EINVAL;
7460 break;
7461 }
7462 if (qseecom.qsee_version < QSEE_VERSION_05) {
7463 pr_err("Update Key feature unsupported in qsee ver %u\n",
7464 qseecom.qsee_version);
7465 return -EINVAL;
7466 }
7467 data->released = true;
7468 mutex_lock(&app_access_lock);
7469 atomic_inc(&data->ioctl_count);
7470 ret = qseecom_update_key_user_info(data, argp);
7471 if (ret)
7472 pr_err("failed to update key user info: %d\n", ret);
7473 atomic_dec(&data->ioctl_count);
7474 mutex_unlock(&app_access_lock);
7475 break;
7476 }
7477 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7478 if (data->type != QSEECOM_GENERIC) {
7479 pr_err("save part hash req: invalid handle (%d)\n",
7480 data->type);
7481 ret = -EINVAL;
7482 break;
7483 }
7484 data->released = true;
7485 mutex_lock(&app_access_lock);
7486 atomic_inc(&data->ioctl_count);
7487 ret = qseecom_save_partition_hash(argp);
7488 atomic_dec(&data->ioctl_count);
7489 mutex_unlock(&app_access_lock);
7490 break;
7491 }
7492 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7493 if (data->type != QSEECOM_GENERIC) {
7494 pr_err("ES activated req: invalid handle (%d)\n",
7495 data->type);
7496 ret = -EINVAL;
7497 break;
7498 }
7499 data->released = true;
7500 mutex_lock(&app_access_lock);
7501 atomic_inc(&data->ioctl_count);
7502 ret = qseecom_is_es_activated(argp);
7503 atomic_dec(&data->ioctl_count);
7504 mutex_unlock(&app_access_lock);
7505 break;
7506 }
7507 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7508 if (data->type != QSEECOM_GENERIC) {
7509 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7510 data->type);
7511 ret = -EINVAL;
7512 break;
7513 }
7514 data->released = true;
7515 mutex_lock(&app_access_lock);
7516 atomic_inc(&data->ioctl_count);
7517 ret = qseecom_mdtp_cipher_dip(argp);
7518 atomic_dec(&data->ioctl_count);
7519 mutex_unlock(&app_access_lock);
7520 break;
7521 }
7522 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7523 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7524 if ((data->listener.id == 0) ||
7525 (data->type != QSEECOM_LISTENER_SERVICE)) {
7526 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7527 data->type, data->listener.id);
7528 ret = -EINVAL;
7529 break;
7530 }
7531 atomic_inc(&data->ioctl_count);
7532 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7533 ret = qseecom_send_modfd_resp(data, argp);
7534 else
7535 ret = qseecom_send_modfd_resp_64(data, argp);
7536 atomic_dec(&data->ioctl_count);
7537 wake_up_all(&data->abort_wq);
7538 if (ret)
7539 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7540 __qseecom_clean_data_sglistinfo(data);
7541 break;
7542 }
7543 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7544 if ((data->client.app_id == 0) ||
7545 (data->type != QSEECOM_CLIENT_APP)) {
7546 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7547 data->type, data->client.app_id);
7548 ret = -EINVAL;
7549 break;
7550 }
7551 if (qseecom.qsee_version < QSEE_VERSION_40) {
7552 pr_err("GP feature unsupported: qsee ver %u\n",
7553 qseecom.qsee_version);
7554 return -EINVAL;
7555 }
7556 /* Only one client allowed here at a time */
7557 mutex_lock(&app_access_lock);
7558 atomic_inc(&data->ioctl_count);
7559 ret = qseecom_qteec_open_session(data, argp);
7560 atomic_dec(&data->ioctl_count);
7561 wake_up_all(&data->abort_wq);
7562 mutex_unlock(&app_access_lock);
7563 if (ret)
7564 pr_err("failed open_session_cmd: %d\n", ret);
7565 __qseecom_clean_data_sglistinfo(data);
7566 break;
7567 }
7568 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7569 if ((data->client.app_id == 0) ||
7570 (data->type != QSEECOM_CLIENT_APP)) {
7571 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7572 data->type, data->client.app_id);
7573 ret = -EINVAL;
7574 break;
7575 }
7576 if (qseecom.qsee_version < QSEE_VERSION_40) {
7577 pr_err("GP feature unsupported: qsee ver %u\n",
7578 qseecom.qsee_version);
7579 return -EINVAL;
7580 }
7581 /* Only one client allowed here at a time */
7582 mutex_lock(&app_access_lock);
7583 atomic_inc(&data->ioctl_count);
7584 ret = qseecom_qteec_close_session(data, argp);
7585 atomic_dec(&data->ioctl_count);
7586 wake_up_all(&data->abort_wq);
7587 mutex_unlock(&app_access_lock);
7588 if (ret)
7589 pr_err("failed close_session_cmd: %d\n", ret);
7590 break;
7591 }
7592 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7593 if ((data->client.app_id == 0) ||
7594 (data->type != QSEECOM_CLIENT_APP)) {
7595 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7596 data->type, data->client.app_id);
7597 ret = -EINVAL;
7598 break;
7599 }
7600 if (qseecom.qsee_version < QSEE_VERSION_40) {
7601 pr_err("GP feature unsupported: qsee ver %u\n",
7602 qseecom.qsee_version);
7603 return -EINVAL;
7604 }
7605 /* Only one client allowed here at a time */
7606 mutex_lock(&app_access_lock);
7607 atomic_inc(&data->ioctl_count);
7608 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7609 atomic_dec(&data->ioctl_count);
7610 wake_up_all(&data->abort_wq);
7611 mutex_unlock(&app_access_lock);
7612 if (ret)
7613 pr_err("failed Invoke cmd: %d\n", ret);
7614 __qseecom_clean_data_sglistinfo(data);
7615 break;
7616 }
7617 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7618 if ((data->client.app_id == 0) ||
7619 (data->type != QSEECOM_CLIENT_APP)) {
7620 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7621 data->type, data->client.app_id);
7622 ret = -EINVAL;
7623 break;
7624 }
7625 if (qseecom.qsee_version < QSEE_VERSION_40) {
7626 pr_err("GP feature unsupported: qsee ver %u\n",
7627 qseecom.qsee_version);
7628 return -EINVAL;
7629 }
7630 /* Only one client allowed here at a time */
7631 mutex_lock(&app_access_lock);
7632 atomic_inc(&data->ioctl_count);
7633 ret = qseecom_qteec_request_cancellation(data, argp);
7634 atomic_dec(&data->ioctl_count);
7635 wake_up_all(&data->abort_wq);
7636 mutex_unlock(&app_access_lock);
7637 if (ret)
7638 pr_err("failed request_cancellation: %d\n", ret);
7639 break;
7640 }
7641 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7642 atomic_inc(&data->ioctl_count);
7643 ret = qseecom_get_ce_info(data, argp);
7644 if (ret)
7645 pr_err("failed get fde ce pipe info: %d\n", ret);
7646 atomic_dec(&data->ioctl_count);
7647 break;
7648 }
7649 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7650 atomic_inc(&data->ioctl_count);
7651 ret = qseecom_free_ce_info(data, argp);
7652 if (ret)
7653 pr_err("failed get fde ce pipe info: %d\n", ret);
7654 atomic_dec(&data->ioctl_count);
7655 break;
7656 }
7657 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7658 atomic_inc(&data->ioctl_count);
7659 ret = qseecom_query_ce_info(data, argp);
7660 if (ret)
7661 pr_err("failed get fde ce pipe info: %d\n", ret);
7662 atomic_dec(&data->ioctl_count);
7663 break;
7664 }
7665 default:
7666 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7667 return -EINVAL;
7668 }
7669 return ret;
7670}
7671
7672static int qseecom_open(struct inode *inode, struct file *file)
7673{
7674 int ret = 0;
7675 struct qseecom_dev_handle *data;
7676
7677 data = kzalloc(sizeof(*data), GFP_KERNEL);
7678 if (!data)
7679 return -ENOMEM;
7680 file->private_data = data;
7681 data->abort = 0;
7682 data->type = QSEECOM_GENERIC;
7683 data->released = false;
7684 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7685 data->mode = INACTIVE;
7686 init_waitqueue_head(&data->abort_wq);
7687 atomic_set(&data->ioctl_count, 0);
7688 return ret;
7689}
7690
7691static int qseecom_release(struct inode *inode, struct file *file)
7692{
7693 struct qseecom_dev_handle *data = file->private_data;
7694 int ret = 0;
7695
7696 if (data->released == false) {
7697 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7698 data->type, data->mode, data);
7699 switch (data->type) {
7700 case QSEECOM_LISTENER_SERVICE:
7701 mutex_lock(&app_access_lock);
7702 ret = qseecom_unregister_listener(data);
7703 mutex_unlock(&app_access_lock);
7704 break;
7705 case QSEECOM_CLIENT_APP:
7706 mutex_lock(&app_access_lock);
7707 ret = qseecom_unload_app(data, true);
7708 mutex_unlock(&app_access_lock);
7709 break;
7710 case QSEECOM_SECURE_SERVICE:
7711 case QSEECOM_GENERIC:
7712 ret = qseecom_unmap_ion_allocated_memory(data);
7713 if (ret)
7714 pr_err("Ion Unmap failed\n");
7715 break;
7716 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7717 break;
7718 default:
7719 pr_err("Unsupported clnt_handle_type %d",
7720 data->type);
7721 break;
7722 }
7723 }
7724
7725 if (qseecom.support_bus_scaling) {
7726 mutex_lock(&qsee_bw_mutex);
7727 if (data->mode != INACTIVE) {
7728 qseecom_unregister_bus_bandwidth_needs(data);
7729 if (qseecom.cumulative_mode == INACTIVE) {
7730 ret = __qseecom_set_msm_bus_request(INACTIVE);
7731 if (ret)
7732 pr_err("Fail to scale down bus\n");
7733 }
7734 }
7735 mutex_unlock(&qsee_bw_mutex);
7736 } else {
7737 if (data->fast_load_enabled == true)
7738 qsee_disable_clock_vote(data, CLK_SFPB);
7739 if (data->perf_enabled == true)
7740 qsee_disable_clock_vote(data, CLK_DFAB);
7741 }
7742 kfree(data);
7743
7744 return ret;
7745}
7746
7747#ifdef CONFIG_COMPAT
7748#include "compat_qseecom.c"
7749#else
7750#define compat_qseecom_ioctl NULL
7751#endif
7752
7753static const struct file_operations qseecom_fops = {
7754 .owner = THIS_MODULE,
7755 .unlocked_ioctl = qseecom_ioctl,
7756 .compat_ioctl = compat_qseecom_ioctl,
7757 .open = qseecom_open,
7758 .release = qseecom_release
7759};
7760
7761static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7762{
7763 int rc = 0;
7764 struct device *pdev;
7765 struct qseecom_clk *qclk;
7766 char *core_clk_src = NULL;
7767 char *core_clk = NULL;
7768 char *iface_clk = NULL;
7769 char *bus_clk = NULL;
7770
7771 switch (ce) {
7772 case CLK_QSEE: {
7773 core_clk_src = "core_clk_src";
7774 core_clk = "core_clk";
7775 iface_clk = "iface_clk";
7776 bus_clk = "bus_clk";
7777 qclk = &qseecom.qsee;
7778 qclk->instance = CLK_QSEE;
7779 break;
7780 };
7781 case CLK_CE_DRV: {
7782 core_clk_src = "ce_drv_core_clk_src";
7783 core_clk = "ce_drv_core_clk";
7784 iface_clk = "ce_drv_iface_clk";
7785 bus_clk = "ce_drv_bus_clk";
7786 qclk = &qseecom.ce_drv;
7787 qclk->instance = CLK_CE_DRV;
7788 break;
7789 };
7790 default:
7791 pr_err("Invalid ce hw instance: %d!\n", ce);
7792 return -EIO;
7793 }
7794
7795 if (qseecom.no_clock_support) {
7796 qclk->ce_core_clk = NULL;
7797 qclk->ce_clk = NULL;
7798 qclk->ce_bus_clk = NULL;
7799 qclk->ce_core_src_clk = NULL;
7800 return 0;
7801 }
7802
7803 pdev = qseecom.pdev;
7804
7805 /* Get CE3 src core clk. */
7806 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7807 if (!IS_ERR(qclk->ce_core_src_clk)) {
7808 rc = clk_set_rate(qclk->ce_core_src_clk,
7809 qseecom.ce_opp_freq_hz);
7810 if (rc) {
7811 clk_put(qclk->ce_core_src_clk);
7812 qclk->ce_core_src_clk = NULL;
7813 pr_err("Unable to set the core src clk @%uMhz.\n",
7814 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7815 return -EIO;
7816 }
7817 } else {
7818 pr_warn("Unable to get CE core src clk, set to NULL\n");
7819 qclk->ce_core_src_clk = NULL;
7820 }
7821
7822 /* Get CE core clk */
7823 qclk->ce_core_clk = clk_get(pdev, core_clk);
7824 if (IS_ERR(qclk->ce_core_clk)) {
7825 rc = PTR_ERR(qclk->ce_core_clk);
7826 pr_err("Unable to get CE core clk\n");
7827 if (qclk->ce_core_src_clk != NULL)
7828 clk_put(qclk->ce_core_src_clk);
7829 return -EIO;
7830 }
7831
7832 /* Get CE Interface clk */
7833 qclk->ce_clk = clk_get(pdev, iface_clk);
7834 if (IS_ERR(qclk->ce_clk)) {
7835 rc = PTR_ERR(qclk->ce_clk);
7836 pr_err("Unable to get CE interface clk\n");
7837 if (qclk->ce_core_src_clk != NULL)
7838 clk_put(qclk->ce_core_src_clk);
7839 clk_put(qclk->ce_core_clk);
7840 return -EIO;
7841 }
7842
7843 /* Get CE AXI clk */
7844 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7845 if (IS_ERR(qclk->ce_bus_clk)) {
7846 rc = PTR_ERR(qclk->ce_bus_clk);
7847 pr_err("Unable to get CE BUS interface clk\n");
7848 if (qclk->ce_core_src_clk != NULL)
7849 clk_put(qclk->ce_core_src_clk);
7850 clk_put(qclk->ce_core_clk);
7851 clk_put(qclk->ce_clk);
7852 return -EIO;
7853 }
7854
7855 return rc;
7856}
7857
7858static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7859{
7860 struct qseecom_clk *qclk;
7861
7862 if (ce == CLK_QSEE)
7863 qclk = &qseecom.qsee;
7864 else
7865 qclk = &qseecom.ce_drv;
7866
7867 if (qclk->ce_clk != NULL) {
7868 clk_put(qclk->ce_clk);
7869 qclk->ce_clk = NULL;
7870 }
7871 if (qclk->ce_core_clk != NULL) {
7872 clk_put(qclk->ce_core_clk);
7873 qclk->ce_core_clk = NULL;
7874 }
7875 if (qclk->ce_bus_clk != NULL) {
7876 clk_put(qclk->ce_bus_clk);
7877 qclk->ce_bus_clk = NULL;
7878 }
7879 if (qclk->ce_core_src_clk != NULL) {
7880 clk_put(qclk->ce_core_src_clk);
7881 qclk->ce_core_src_clk = NULL;
7882 }
7883 qclk->instance = CLK_INVALID;
7884}
7885
7886static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7887{
7888 int rc = 0;
7889 uint32_t hlos_num_ce_hw_instances;
7890 uint32_t disk_encrypt_pipe;
7891 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007892 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007893 int i;
7894 const int *tbl;
7895 int size;
7896 int entry;
7897 struct qseecom_crypto_info *pfde_tbl = NULL;
7898 struct qseecom_crypto_info *p;
7899 int tbl_size;
7900 int j;
7901 bool old_db = true;
7902 struct qseecom_ce_info_use *pce_info_use;
7903 uint32_t *unit_tbl = NULL;
7904 int total_units = 0;
7905 struct qseecom_ce_pipe_entry *pce_entry;
7906
7907 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7908 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7909
7910 if (of_property_read_u32((&pdev->dev)->of_node,
7911 "qcom,qsee-ce-hw-instance",
7912 &qseecom.ce_info.qsee_ce_hw_instance)) {
7913 pr_err("Fail to get qsee ce hw instance information.\n");
7914 rc = -EINVAL;
7915 goto out;
7916 } else {
7917 pr_debug("qsee-ce-hw-instance=0x%x\n",
7918 qseecom.ce_info.qsee_ce_hw_instance);
7919 }
7920
7921 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7922 "qcom,support-fde");
7923 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7924 "qcom,support-pfe");
7925
7926 if (!qseecom.support_pfe && !qseecom.support_fde) {
7927 pr_warn("Device does not support PFE/FDE");
7928 goto out;
7929 }
7930
7931 if (qseecom.support_fde)
7932 tbl = of_get_property((&pdev->dev)->of_node,
7933 "qcom,full-disk-encrypt-info", &size);
7934 else
7935 tbl = NULL;
7936 if (tbl) {
7937 old_db = false;
7938 if (size % sizeof(struct qseecom_crypto_info)) {
7939 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7940 size);
7941 rc = -EINVAL;
7942 goto out;
7943 }
7944 tbl_size = size / sizeof
7945 (struct qseecom_crypto_info);
7946
7947 pfde_tbl = kzalloc(size, GFP_KERNEL);
7948 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7949 total_units = 0;
7950
7951 if (!pfde_tbl || !unit_tbl) {
7952 pr_err("failed to alloc memory\n");
7953 rc = -ENOMEM;
7954 goto out;
7955 }
7956 if (of_property_read_u32_array((&pdev->dev)->of_node,
7957 "qcom,full-disk-encrypt-info",
7958 (u32 *)pfde_tbl, size/sizeof(u32))) {
7959 pr_err("failed to read full-disk-encrypt-info tbl\n");
7960 rc = -EINVAL;
7961 goto out;
7962 }
7963
7964 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7965 for (j = 0; j < total_units; j++) {
7966 if (p->unit_num == *(unit_tbl + j))
7967 break;
7968 }
7969 if (j == total_units) {
7970 *(unit_tbl + total_units) = p->unit_num;
7971 total_units++;
7972 }
7973 }
7974
7975 qseecom.ce_info.num_fde = total_units;
7976 pce_info_use = qseecom.ce_info.fde = kcalloc(
7977 total_units, sizeof(struct qseecom_ce_info_use),
7978 GFP_KERNEL);
7979 if (!pce_info_use) {
7980 pr_err("failed to alloc memory\n");
7981 rc = -ENOMEM;
7982 goto out;
7983 }
7984
7985 for (j = 0; j < total_units; j++, pce_info_use++) {
7986 pce_info_use->unit_num = *(unit_tbl + j);
7987 pce_info_use->alloc = false;
7988 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7989 pce_info_use->num_ce_pipe_entries = 0;
7990 pce_info_use->ce_pipe_entry = NULL;
7991 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7992 if (p->unit_num == pce_info_use->unit_num)
7993 pce_info_use->num_ce_pipe_entries++;
7994 }
7995
7996 entry = pce_info_use->num_ce_pipe_entries;
7997 pce_entry = pce_info_use->ce_pipe_entry =
7998 kcalloc(entry,
7999 sizeof(struct qseecom_ce_pipe_entry),
8000 GFP_KERNEL);
8001 if (pce_entry == NULL) {
8002 pr_err("failed to alloc memory\n");
8003 rc = -ENOMEM;
8004 goto out;
8005 }
8006
8007 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8008 if (p->unit_num == pce_info_use->unit_num) {
8009 pce_entry->ce_num = p->ce;
8010 pce_entry->ce_pipe_pair =
8011 p->pipe_pair;
8012 pce_entry->valid = true;
8013 pce_entry++;
8014 }
8015 }
8016 }
8017 kfree(unit_tbl);
8018 unit_tbl = NULL;
8019 kfree(pfde_tbl);
8020 pfde_tbl = NULL;
8021 }
8022
8023 if (qseecom.support_pfe)
8024 tbl = of_get_property((&pdev->dev)->of_node,
8025 "qcom,per-file-encrypt-info", &size);
8026 else
8027 tbl = NULL;
8028 if (tbl) {
8029 old_db = false;
8030 if (size % sizeof(struct qseecom_crypto_info)) {
8031 pr_err("per-file-encrypt-info tbl size(%d)\n",
8032 size);
8033 rc = -EINVAL;
8034 goto out;
8035 }
8036 tbl_size = size / sizeof
8037 (struct qseecom_crypto_info);
8038
8039 pfde_tbl = kzalloc(size, GFP_KERNEL);
8040 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8041 total_units = 0;
8042 if (!pfde_tbl || !unit_tbl) {
8043 pr_err("failed to alloc memory\n");
8044 rc = -ENOMEM;
8045 goto out;
8046 }
8047 if (of_property_read_u32_array((&pdev->dev)->of_node,
8048 "qcom,per-file-encrypt-info",
8049 (u32 *)pfde_tbl, size/sizeof(u32))) {
8050 pr_err("failed to read per-file-encrypt-info tbl\n");
8051 rc = -EINVAL;
8052 goto out;
8053 }
8054
8055 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8056 for (j = 0; j < total_units; j++) {
8057 if (p->unit_num == *(unit_tbl + j))
8058 break;
8059 }
8060 if (j == total_units) {
8061 *(unit_tbl + total_units) = p->unit_num;
8062 total_units++;
8063 }
8064 }
8065
8066 qseecom.ce_info.num_pfe = total_units;
8067 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8068 total_units, sizeof(struct qseecom_ce_info_use),
8069 GFP_KERNEL);
8070 if (!pce_info_use) {
8071 pr_err("failed to alloc memory\n");
8072 rc = -ENOMEM;
8073 goto out;
8074 }
8075
8076 for (j = 0; j < total_units; j++, pce_info_use++) {
8077 pce_info_use->unit_num = *(unit_tbl + j);
8078 pce_info_use->alloc = false;
8079 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8080 pce_info_use->num_ce_pipe_entries = 0;
8081 pce_info_use->ce_pipe_entry = NULL;
8082 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8083 if (p->unit_num == pce_info_use->unit_num)
8084 pce_info_use->num_ce_pipe_entries++;
8085 }
8086
8087 entry = pce_info_use->num_ce_pipe_entries;
8088 pce_entry = pce_info_use->ce_pipe_entry =
8089 kcalloc(entry,
8090 sizeof(struct qseecom_ce_pipe_entry),
8091 GFP_KERNEL);
8092 if (pce_entry == NULL) {
8093 pr_err("failed to alloc memory\n");
8094 rc = -ENOMEM;
8095 goto out;
8096 }
8097
8098 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8099 if (p->unit_num == pce_info_use->unit_num) {
8100 pce_entry->ce_num = p->ce;
8101 pce_entry->ce_pipe_pair =
8102 p->pipe_pair;
8103 pce_entry->valid = true;
8104 pce_entry++;
8105 }
8106 }
8107 }
8108 kfree(unit_tbl);
8109 unit_tbl = NULL;
8110 kfree(pfde_tbl);
8111 pfde_tbl = NULL;
8112 }
8113
8114 if (!old_db)
8115 goto out1;
8116
8117 if (of_property_read_bool((&pdev->dev)->of_node,
8118 "qcom,support-multiple-ce-hw-instance")) {
8119 if (of_property_read_u32((&pdev->dev)->of_node,
8120 "qcom,hlos-num-ce-hw-instances",
8121 &hlos_num_ce_hw_instances)) {
8122 pr_err("Fail: get hlos number of ce hw instance\n");
8123 rc = -EINVAL;
8124 goto out;
8125 }
8126 } else {
8127 hlos_num_ce_hw_instances = 1;
8128 }
8129
8130 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8131 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8132 MAX_CE_PIPE_PAIR_PER_UNIT);
8133 rc = -EINVAL;
8134 goto out;
8135 }
8136
8137 if (of_property_read_u32_array((&pdev->dev)->of_node,
8138 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8139 hlos_num_ce_hw_instances)) {
8140 pr_err("Fail: get hlos ce hw instance info\n");
8141 rc = -EINVAL;
8142 goto out;
8143 }
8144
8145 if (qseecom.support_fde) {
8146 pce_info_use = qseecom.ce_info.fde =
8147 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8148 if (!pce_info_use) {
8149 pr_err("failed to alloc memory\n");
8150 rc = -ENOMEM;
8151 goto out;
8152 }
8153 /* by default for old db */
8154 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8155 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8156 pce_info_use->alloc = false;
8157 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8158 pce_info_use->ce_pipe_entry = NULL;
8159 if (of_property_read_u32((&pdev->dev)->of_node,
8160 "qcom,disk-encrypt-pipe-pair",
8161 &disk_encrypt_pipe)) {
8162 pr_err("Fail to get FDE pipe information.\n");
8163 rc = -EINVAL;
8164 goto out;
8165 } else {
8166 pr_debug("disk-encrypt-pipe-pair=0x%x",
8167 disk_encrypt_pipe);
8168 }
8169 entry = pce_info_use->num_ce_pipe_entries =
8170 hlos_num_ce_hw_instances;
8171 pce_entry = pce_info_use->ce_pipe_entry =
8172 kcalloc(entry,
8173 sizeof(struct qseecom_ce_pipe_entry),
8174 GFP_KERNEL);
8175 if (pce_entry == NULL) {
8176 pr_err("failed to alloc memory\n");
8177 rc = -ENOMEM;
8178 goto out;
8179 }
8180 for (i = 0; i < entry; i++) {
8181 pce_entry->ce_num = hlos_ce_hw_instance[i];
8182 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8183 pce_entry->valid = 1;
8184 pce_entry++;
8185 }
8186 } else {
8187 pr_warn("Device does not support FDE");
8188 disk_encrypt_pipe = 0xff;
8189 }
8190 if (qseecom.support_pfe) {
8191 pce_info_use = qseecom.ce_info.pfe =
8192 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8193 if (!pce_info_use) {
8194 pr_err("failed to alloc memory\n");
8195 rc = -ENOMEM;
8196 goto out;
8197 }
8198 /* by default for old db */
8199 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8200 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8201 pce_info_use->alloc = false;
8202 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8203 pce_info_use->ce_pipe_entry = NULL;
8204
8205 if (of_property_read_u32((&pdev->dev)->of_node,
8206 "qcom,file-encrypt-pipe-pair",
8207 &file_encrypt_pipe)) {
8208 pr_err("Fail to get PFE pipe information.\n");
8209 rc = -EINVAL;
8210 goto out;
8211 } else {
8212 pr_debug("file-encrypt-pipe-pair=0x%x",
8213 file_encrypt_pipe);
8214 }
8215 entry = pce_info_use->num_ce_pipe_entries =
8216 hlos_num_ce_hw_instances;
8217 pce_entry = pce_info_use->ce_pipe_entry =
8218 kcalloc(entry,
8219 sizeof(struct qseecom_ce_pipe_entry),
8220 GFP_KERNEL);
8221 if (pce_entry == NULL) {
8222 pr_err("failed to alloc memory\n");
8223 rc = -ENOMEM;
8224 goto out;
8225 }
8226 for (i = 0; i < entry; i++) {
8227 pce_entry->ce_num = hlos_ce_hw_instance[i];
8228 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8229 pce_entry->valid = 1;
8230 pce_entry++;
8231 }
8232 } else {
8233 pr_warn("Device does not support PFE");
8234 file_encrypt_pipe = 0xff;
8235 }
8236
8237out1:
8238 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8239 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8240out:
8241 if (rc) {
8242 if (qseecom.ce_info.fde) {
8243 pce_info_use = qseecom.ce_info.fde;
8244 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8245 pce_entry = pce_info_use->ce_pipe_entry;
8246 kfree(pce_entry);
8247 pce_info_use++;
8248 }
8249 }
8250 kfree(qseecom.ce_info.fde);
8251 qseecom.ce_info.fde = NULL;
8252 if (qseecom.ce_info.pfe) {
8253 pce_info_use = qseecom.ce_info.pfe;
8254 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8255 pce_entry = pce_info_use->ce_pipe_entry;
8256 kfree(pce_entry);
8257 pce_info_use++;
8258 }
8259 }
8260 kfree(qseecom.ce_info.pfe);
8261 qseecom.ce_info.pfe = NULL;
8262 }
8263 kfree(unit_tbl);
8264 kfree(pfde_tbl);
8265 return rc;
8266}
8267
8268static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8269 void __user *argp)
8270{
8271 struct qseecom_ce_info_req req;
8272 struct qseecom_ce_info_req *pinfo = &req;
8273 int ret = 0;
8274 int i;
8275 unsigned int entries;
8276 struct qseecom_ce_info_use *pce_info_use, *p;
8277 int total = 0;
8278 bool found = false;
8279 struct qseecom_ce_pipe_entry *pce_entry;
8280
8281 ret = copy_from_user(pinfo, argp,
8282 sizeof(struct qseecom_ce_info_req));
8283 if (ret) {
8284 pr_err("copy_from_user failed\n");
8285 return ret;
8286 }
8287
8288 switch (pinfo->usage) {
8289 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8290 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8291 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8292 if (qseecom.support_fde) {
8293 p = qseecom.ce_info.fde;
8294 total = qseecom.ce_info.num_fde;
8295 } else {
8296 pr_err("system does not support fde\n");
8297 return -EINVAL;
8298 }
8299 break;
8300 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8301 if (qseecom.support_pfe) {
8302 p = qseecom.ce_info.pfe;
8303 total = qseecom.ce_info.num_pfe;
8304 } else {
8305 pr_err("system does not support pfe\n");
8306 return -EINVAL;
8307 }
8308 break;
8309 default:
8310 pr_err("unsupported usage %d\n", pinfo->usage);
8311 return -EINVAL;
8312 }
8313
8314 pce_info_use = NULL;
8315 for (i = 0; i < total; i++) {
8316 if (!p->alloc)
8317 pce_info_use = p;
8318 else if (!memcmp(p->handle, pinfo->handle,
8319 MAX_CE_INFO_HANDLE_SIZE)) {
8320 pce_info_use = p;
8321 found = true;
8322 break;
8323 }
8324 p++;
8325 }
8326
8327 if (pce_info_use == NULL)
8328 return -EBUSY;
8329
8330 pinfo->unit_num = pce_info_use->unit_num;
8331 if (!pce_info_use->alloc) {
8332 pce_info_use->alloc = true;
8333 memcpy(pce_info_use->handle,
8334 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8335 }
8336 if (pce_info_use->num_ce_pipe_entries >
8337 MAX_CE_PIPE_PAIR_PER_UNIT)
8338 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8339 else
8340 entries = pce_info_use->num_ce_pipe_entries;
8341 pinfo->num_ce_pipe_entries = entries;
8342 pce_entry = pce_info_use->ce_pipe_entry;
8343 for (i = 0; i < entries; i++, pce_entry++)
8344 pinfo->ce_pipe_entry[i] = *pce_entry;
8345 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8346 pinfo->ce_pipe_entry[i].valid = 0;
8347
8348 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8349 pr_err("copy_to_user failed\n");
8350 ret = -EFAULT;
8351 }
8352 return ret;
8353}
8354
8355static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8356 void __user *argp)
8357{
8358 struct qseecom_ce_info_req req;
8359 struct qseecom_ce_info_req *pinfo = &req;
8360 int ret = 0;
8361 struct qseecom_ce_info_use *p;
8362 int total = 0;
8363 int i;
8364 bool found = false;
8365
8366 ret = copy_from_user(pinfo, argp,
8367 sizeof(struct qseecom_ce_info_req));
8368 if (ret)
8369 return ret;
8370
8371 switch (pinfo->usage) {
8372 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8373 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8374 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8375 if (qseecom.support_fde) {
8376 p = qseecom.ce_info.fde;
8377 total = qseecom.ce_info.num_fde;
8378 } else {
8379 pr_err("system does not support fde\n");
8380 return -EINVAL;
8381 }
8382 break;
8383 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8384 if (qseecom.support_pfe) {
8385 p = qseecom.ce_info.pfe;
8386 total = qseecom.ce_info.num_pfe;
8387 } else {
8388 pr_err("system does not support pfe\n");
8389 return -EINVAL;
8390 }
8391 break;
8392 default:
8393 pr_err("unsupported usage %d\n", pinfo->usage);
8394 return -EINVAL;
8395 }
8396
8397 for (i = 0; i < total; i++) {
8398 if (p->alloc &&
8399 !memcmp(p->handle, pinfo->handle,
8400 MAX_CE_INFO_HANDLE_SIZE)) {
8401 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8402 p->alloc = false;
8403 found = true;
8404 break;
8405 }
8406 p++;
8407 }
8408 return ret;
8409}
8410
8411static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8412 void __user *argp)
8413{
8414 struct qseecom_ce_info_req req;
8415 struct qseecom_ce_info_req *pinfo = &req;
8416 int ret = 0;
8417 int i;
8418 unsigned int entries;
8419 struct qseecom_ce_info_use *pce_info_use, *p;
8420 int total = 0;
8421 bool found = false;
8422 struct qseecom_ce_pipe_entry *pce_entry;
8423
8424 ret = copy_from_user(pinfo, argp,
8425 sizeof(struct qseecom_ce_info_req));
8426 if (ret)
8427 return ret;
8428
8429 switch (pinfo->usage) {
8430 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8431 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8432 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8433 if (qseecom.support_fde) {
8434 p = qseecom.ce_info.fde;
8435 total = qseecom.ce_info.num_fde;
8436 } else {
8437 pr_err("system does not support fde\n");
8438 return -EINVAL;
8439 }
8440 break;
8441 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8442 if (qseecom.support_pfe) {
8443 p = qseecom.ce_info.pfe;
8444 total = qseecom.ce_info.num_pfe;
8445 } else {
8446 pr_err("system does not support pfe\n");
8447 return -EINVAL;
8448 }
8449 break;
8450 default:
8451 pr_err("unsupported usage %d\n", pinfo->usage);
8452 return -EINVAL;
8453 }
8454
8455 pce_info_use = NULL;
8456 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8457 pinfo->num_ce_pipe_entries = 0;
8458 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8459 pinfo->ce_pipe_entry[i].valid = 0;
8460
8461 for (i = 0; i < total; i++) {
8462
8463 if (p->alloc && !memcmp(p->handle,
8464 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8465 pce_info_use = p;
8466 found = true;
8467 break;
8468 }
8469 p++;
8470 }
8471 if (!pce_info_use)
8472 goto out;
8473 pinfo->unit_num = pce_info_use->unit_num;
8474 if (pce_info_use->num_ce_pipe_entries >
8475 MAX_CE_PIPE_PAIR_PER_UNIT)
8476 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8477 else
8478 entries = pce_info_use->num_ce_pipe_entries;
8479 pinfo->num_ce_pipe_entries = entries;
8480 pce_entry = pce_info_use->ce_pipe_entry;
8481 for (i = 0; i < entries; i++, pce_entry++)
8482 pinfo->ce_pipe_entry[i] = *pce_entry;
8483 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8484 pinfo->ce_pipe_entry[i].valid = 0;
8485out:
8486 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8487 pr_err("copy_to_user failed\n");
8488 ret = -EFAULT;
8489 }
8490 return ret;
8491}
8492
8493/*
8494 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8495 * then whitelist feature is not supported.
8496 */
8497static int qseecom_check_whitelist_feature(void)
8498{
8499 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8500
8501 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8502}
8503
8504static int qseecom_probe(struct platform_device *pdev)
8505{
8506 int rc;
8507 int i;
8508 uint32_t feature = 10;
8509 struct device *class_dev;
8510 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8511 struct qseecom_command_scm_resp resp;
8512 struct qseecom_ce_info_use *pce_info_use = NULL;
8513
8514 qseecom.qsee_bw_count = 0;
8515 qseecom.qsee_perf_client = 0;
8516 qseecom.qsee_sfpb_bw_count = 0;
8517
8518 qseecom.qsee.ce_core_clk = NULL;
8519 qseecom.qsee.ce_clk = NULL;
8520 qseecom.qsee.ce_core_src_clk = NULL;
8521 qseecom.qsee.ce_bus_clk = NULL;
8522
8523 qseecom.cumulative_mode = 0;
8524 qseecom.current_mode = INACTIVE;
8525 qseecom.support_bus_scaling = false;
8526 qseecom.support_fde = false;
8527 qseecom.support_pfe = false;
8528
8529 qseecom.ce_drv.ce_core_clk = NULL;
8530 qseecom.ce_drv.ce_clk = NULL;
8531 qseecom.ce_drv.ce_core_src_clk = NULL;
8532 qseecom.ce_drv.ce_bus_clk = NULL;
8533 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8534
8535 qseecom.app_block_ref_cnt = 0;
8536 init_waitqueue_head(&qseecom.app_block_wq);
8537 qseecom.whitelist_support = true;
8538
8539 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8540 if (rc < 0) {
8541 pr_err("alloc_chrdev_region failed %d\n", rc);
8542 return rc;
8543 }
8544
8545 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8546 if (IS_ERR(driver_class)) {
8547 rc = -ENOMEM;
8548 pr_err("class_create failed %d\n", rc);
8549 goto exit_unreg_chrdev_region;
8550 }
8551
8552 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8553 QSEECOM_DEV);
8554 if (IS_ERR(class_dev)) {
8555 pr_err("class_device_create failed %d\n", rc);
8556 rc = -ENOMEM;
8557 goto exit_destroy_class;
8558 }
8559
8560 cdev_init(&qseecom.cdev, &qseecom_fops);
8561 qseecom.cdev.owner = THIS_MODULE;
8562
8563 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8564 if (rc < 0) {
8565 pr_err("cdev_add failed %d\n", rc);
8566 goto exit_destroy_device;
8567 }
8568
8569 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8570 spin_lock_init(&qseecom.registered_listener_list_lock);
8571 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8572 spin_lock_init(&qseecom.registered_app_list_lock);
8573 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8574 spin_lock_init(&qseecom.registered_kclient_list_lock);
8575 init_waitqueue_head(&qseecom.send_resp_wq);
8576 qseecom.send_resp_flag = 0;
8577
8578 qseecom.qsee_version = QSEEE_VERSION_00;
8579 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8580 &resp, sizeof(resp));
8581 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8582 if (rc) {
8583 pr_err("Failed to get QSEE version info %d\n", rc);
8584 goto exit_del_cdev;
8585 }
8586 qseecom.qsee_version = resp.result;
8587 qseecom.qseos_version = QSEOS_VERSION_14;
8588 qseecom.commonlib_loaded = false;
8589 qseecom.commonlib64_loaded = false;
8590 qseecom.pdev = class_dev;
8591 /* Create ION msm client */
8592 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8593 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8594 pr_err("Ion client cannot be created\n");
8595 rc = -ENOMEM;
8596 goto exit_del_cdev;
8597 }
8598
8599 /* register client for bus scaling */
8600 if (pdev->dev.of_node) {
8601 qseecom.pdev->of_node = pdev->dev.of_node;
8602 qseecom.support_bus_scaling =
8603 of_property_read_bool((&pdev->dev)->of_node,
8604 "qcom,support-bus-scaling");
8605 rc = qseecom_retrieve_ce_data(pdev);
8606 if (rc)
8607 goto exit_destroy_ion_client;
8608 qseecom.appsbl_qseecom_support =
8609 of_property_read_bool((&pdev->dev)->of_node,
8610 "qcom,appsbl-qseecom-support");
8611 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8612 qseecom.appsbl_qseecom_support);
8613
8614 qseecom.commonlib64_loaded =
8615 of_property_read_bool((&pdev->dev)->of_node,
8616 "qcom,commonlib64-loaded-by-uefi");
8617 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8618 qseecom.commonlib64_loaded);
8619 qseecom.fde_key_size =
8620 of_property_read_bool((&pdev->dev)->of_node,
8621 "qcom,fde-key-size");
8622 qseecom.no_clock_support =
8623 of_property_read_bool((&pdev->dev)->of_node,
8624 "qcom,no-clock-support");
8625 if (!qseecom.no_clock_support) {
8626 pr_info("qseecom clocks handled by other subsystem\n");
8627 } else {
8628 pr_info("no-clock-support=0x%x",
8629 qseecom.no_clock_support);
8630 }
8631
8632 if (of_property_read_u32((&pdev->dev)->of_node,
8633 "qcom,qsee-reentrancy-support",
8634 &qseecom.qsee_reentrancy_support)) {
8635 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8636 qseecom.qsee_reentrancy_support = 0;
8637 } else {
8638 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8639 qseecom.qsee_reentrancy_support);
8640 }
8641
8642 /*
8643 * The qseecom bus scaling flag can not be enabled when
8644 * crypto clock is not handled by HLOS.
8645 */
8646 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8647 pr_err("support_bus_scaling flag can not be enabled.\n");
8648 rc = -EINVAL;
8649 goto exit_destroy_ion_client;
8650 }
8651
8652 if (of_property_read_u32((&pdev->dev)->of_node,
8653 "qcom,ce-opp-freq",
8654 &qseecom.ce_opp_freq_hz)) {
8655 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8656 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8657 }
8658 rc = __qseecom_init_clk(CLK_QSEE);
8659 if (rc)
8660 goto exit_destroy_ion_client;
8661
8662 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8663 (qseecom.support_pfe || qseecom.support_fde)) {
8664 rc = __qseecom_init_clk(CLK_CE_DRV);
8665 if (rc) {
8666 __qseecom_deinit_clk(CLK_QSEE);
8667 goto exit_destroy_ion_client;
8668 }
8669 } else {
8670 struct qseecom_clk *qclk;
8671
8672 qclk = &qseecom.qsee;
8673 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8674 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8675 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8676 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8677 }
8678
8679 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8680 msm_bus_cl_get_pdata(pdev);
8681 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8682 (!qseecom.is_apps_region_protected &&
8683 !qseecom.appsbl_qseecom_support)) {
8684 struct resource *resource = NULL;
8685 struct qsee_apps_region_info_ireq req;
8686 struct qsee_apps_region_info_64bit_ireq req_64bit;
8687 struct qseecom_command_scm_resp resp;
8688 void *cmd_buf = NULL;
8689 size_t cmd_len;
8690
8691 resource = platform_get_resource_byname(pdev,
8692 IORESOURCE_MEM, "secapp-region");
8693 if (resource) {
8694 if (qseecom.qsee_version < QSEE_VERSION_40) {
8695 req.qsee_cmd_id =
8696 QSEOS_APP_REGION_NOTIFICATION;
8697 req.addr = (uint32_t)resource->start;
8698 req.size = resource_size(resource);
8699 cmd_buf = (void *)&req;
8700 cmd_len = sizeof(struct
8701 qsee_apps_region_info_ireq);
8702 pr_warn("secure app region addr=0x%x size=0x%x",
8703 req.addr, req.size);
8704 } else {
8705 req_64bit.qsee_cmd_id =
8706 QSEOS_APP_REGION_NOTIFICATION;
8707 req_64bit.addr = resource->start;
8708 req_64bit.size = resource_size(
8709 resource);
8710 cmd_buf = (void *)&req_64bit;
8711 cmd_len = sizeof(struct
8712 qsee_apps_region_info_64bit_ireq);
8713 pr_warn("secure app region addr=0x%llx size=0x%x",
8714 req_64bit.addr, req_64bit.size);
8715 }
8716 } else {
8717 pr_err("Fail to get secure app region info\n");
8718 rc = -EINVAL;
8719 goto exit_deinit_clock;
8720 }
8721 rc = __qseecom_enable_clk(CLK_QSEE);
8722 if (rc) {
8723 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8724 rc = -EIO;
8725 goto exit_deinit_clock;
8726 }
8727 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8728 cmd_buf, cmd_len,
8729 &resp, sizeof(resp));
8730 __qseecom_disable_clk(CLK_QSEE);
8731 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8732 pr_err("send secapp reg fail %d resp.res %d\n",
8733 rc, resp.result);
8734 rc = -EINVAL;
8735 goto exit_deinit_clock;
8736 }
8737 }
8738 /*
8739 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8740 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8741 * Pls add "qseecom.commonlib64_loaded = true" here too.
8742 */
8743 if (qseecom.is_apps_region_protected ||
8744 qseecom.appsbl_qseecom_support)
8745 qseecom.commonlib_loaded = true;
8746 } else {
8747 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8748 pdev->dev.platform_data;
8749 }
8750 if (qseecom.support_bus_scaling) {
8751 init_timer(&(qseecom.bw_scale_down_timer));
8752 INIT_WORK(&qseecom.bw_inactive_req_ws,
8753 qseecom_bw_inactive_req_work);
8754 qseecom.bw_scale_down_timer.function =
8755 qseecom_scale_bus_bandwidth_timer_callback;
8756 }
8757 qseecom.timer_running = false;
8758 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8759 qseecom_platform_support);
8760
8761 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8762 pr_warn("qseecom.whitelist_support = %d\n",
8763 qseecom.whitelist_support);
8764
8765 if (!qseecom.qsee_perf_client)
8766 pr_err("Unable to register bus client\n");
8767
8768 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8769 return 0;
8770
8771exit_deinit_clock:
8772 __qseecom_deinit_clk(CLK_QSEE);
8773 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8774 (qseecom.support_pfe || qseecom.support_fde))
8775 __qseecom_deinit_clk(CLK_CE_DRV);
8776exit_destroy_ion_client:
8777 if (qseecom.ce_info.fde) {
8778 pce_info_use = qseecom.ce_info.fde;
8779 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8780 kzfree(pce_info_use->ce_pipe_entry);
8781 pce_info_use++;
8782 }
8783 kfree(qseecom.ce_info.fde);
8784 }
8785 if (qseecom.ce_info.pfe) {
8786 pce_info_use = qseecom.ce_info.pfe;
8787 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8788 kzfree(pce_info_use->ce_pipe_entry);
8789 pce_info_use++;
8790 }
8791 kfree(qseecom.ce_info.pfe);
8792 }
8793 ion_client_destroy(qseecom.ion_clnt);
8794exit_del_cdev:
8795 cdev_del(&qseecom.cdev);
8796exit_destroy_device:
8797 device_destroy(driver_class, qseecom_device_no);
8798exit_destroy_class:
8799 class_destroy(driver_class);
8800exit_unreg_chrdev_region:
8801 unregister_chrdev_region(qseecom_device_no, 1);
8802 return rc;
8803}
8804
8805static int qseecom_remove(struct platform_device *pdev)
8806{
8807 struct qseecom_registered_kclient_list *kclient = NULL;
8808 unsigned long flags = 0;
8809 int ret = 0;
8810 int i;
8811 struct qseecom_ce_pipe_entry *pce_entry;
8812 struct qseecom_ce_info_use *pce_info_use;
8813
8814 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8815 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8816
8817 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8818 list) {
8819 if (!kclient)
8820 goto exit_irqrestore;
8821
8822 /* Break the loop if client handle is NULL */
8823 if (!kclient->handle)
8824 goto exit_free_kclient;
8825
8826 if (list_empty(&kclient->list))
8827 goto exit_free_kc_handle;
8828
8829 list_del(&kclient->list);
8830 mutex_lock(&app_access_lock);
8831 ret = qseecom_unload_app(kclient->handle->dev, false);
8832 mutex_unlock(&app_access_lock);
8833 if (!ret) {
8834 kzfree(kclient->handle->dev);
8835 kzfree(kclient->handle);
8836 kzfree(kclient);
8837 }
8838 }
8839
8840exit_free_kc_handle:
8841 kzfree(kclient->handle);
8842exit_free_kclient:
8843 kzfree(kclient);
8844exit_irqrestore:
8845 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8846
8847 if (qseecom.qseos_version > QSEEE_VERSION_00)
8848 qseecom_unload_commonlib_image();
8849
8850 if (qseecom.qsee_perf_client)
8851 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8852 0);
8853 if (pdev->dev.platform_data != NULL)
8854 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8855
8856 if (qseecom.support_bus_scaling) {
8857 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8858 del_timer_sync(&qseecom.bw_scale_down_timer);
8859 }
8860
8861 if (qseecom.ce_info.fde) {
8862 pce_info_use = qseecom.ce_info.fde;
8863 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8864 pce_entry = pce_info_use->ce_pipe_entry;
8865 kfree(pce_entry);
8866 pce_info_use++;
8867 }
8868 }
8869 kfree(qseecom.ce_info.fde);
8870 if (qseecom.ce_info.pfe) {
8871 pce_info_use = qseecom.ce_info.pfe;
8872 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8873 pce_entry = pce_info_use->ce_pipe_entry;
8874 kfree(pce_entry);
8875 pce_info_use++;
8876 }
8877 }
8878 kfree(qseecom.ce_info.pfe);
8879
8880 /* register client for bus scaling */
8881 if (pdev->dev.of_node) {
8882 __qseecom_deinit_clk(CLK_QSEE);
8883 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8884 (qseecom.support_pfe || qseecom.support_fde))
8885 __qseecom_deinit_clk(CLK_CE_DRV);
8886 }
8887
8888 ion_client_destroy(qseecom.ion_clnt);
8889
8890 cdev_del(&qseecom.cdev);
8891
8892 device_destroy(driver_class, qseecom_device_no);
8893
8894 class_destroy(driver_class);
8895
8896 unregister_chrdev_region(qseecom_device_no, 1);
8897
8898 return ret;
8899}
8900
8901static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8902{
8903 int ret = 0;
8904 struct qseecom_clk *qclk;
8905
8906 qclk = &qseecom.qsee;
8907 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8908 if (qseecom.no_clock_support)
8909 return 0;
8910
8911 mutex_lock(&qsee_bw_mutex);
8912 mutex_lock(&clk_access_lock);
8913
8914 if (qseecom.current_mode != INACTIVE) {
8915 ret = msm_bus_scale_client_update_request(
8916 qseecom.qsee_perf_client, INACTIVE);
8917 if (ret)
8918 pr_err("Fail to scale down bus\n");
8919 else
8920 qseecom.current_mode = INACTIVE;
8921 }
8922
8923 if (qclk->clk_access_cnt) {
8924 if (qclk->ce_clk != NULL)
8925 clk_disable_unprepare(qclk->ce_clk);
8926 if (qclk->ce_core_clk != NULL)
8927 clk_disable_unprepare(qclk->ce_core_clk);
8928 if (qclk->ce_bus_clk != NULL)
8929 clk_disable_unprepare(qclk->ce_bus_clk);
8930 }
8931
8932 del_timer_sync(&(qseecom.bw_scale_down_timer));
8933 qseecom.timer_running = false;
8934
8935 mutex_unlock(&clk_access_lock);
8936 mutex_unlock(&qsee_bw_mutex);
8937 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8938
8939 return 0;
8940}
8941
8942static int qseecom_resume(struct platform_device *pdev)
8943{
8944 int mode = 0;
8945 int ret = 0;
8946 struct qseecom_clk *qclk;
8947
8948 qclk = &qseecom.qsee;
8949 if (qseecom.no_clock_support)
8950 goto exit;
8951
8952 mutex_lock(&qsee_bw_mutex);
8953 mutex_lock(&clk_access_lock);
8954 if (qseecom.cumulative_mode >= HIGH)
8955 mode = HIGH;
8956 else
8957 mode = qseecom.cumulative_mode;
8958
8959 if (qseecom.cumulative_mode != INACTIVE) {
8960 ret = msm_bus_scale_client_update_request(
8961 qseecom.qsee_perf_client, mode);
8962 if (ret)
8963 pr_err("Fail to scale up bus to %d\n", mode);
8964 else
8965 qseecom.current_mode = mode;
8966 }
8967
8968 if (qclk->clk_access_cnt) {
8969 if (qclk->ce_core_clk != NULL) {
8970 ret = clk_prepare_enable(qclk->ce_core_clk);
8971 if (ret) {
8972 pr_err("Unable to enable/prep CE core clk\n");
8973 qclk->clk_access_cnt = 0;
8974 goto err;
8975 }
8976 }
8977 if (qclk->ce_clk != NULL) {
8978 ret = clk_prepare_enable(qclk->ce_clk);
8979 if (ret) {
8980 pr_err("Unable to enable/prep CE iface clk\n");
8981 qclk->clk_access_cnt = 0;
8982 goto ce_clk_err;
8983 }
8984 }
8985 if (qclk->ce_bus_clk != NULL) {
8986 ret = clk_prepare_enable(qclk->ce_bus_clk);
8987 if (ret) {
8988 pr_err("Unable to enable/prep CE bus clk\n");
8989 qclk->clk_access_cnt = 0;
8990 goto ce_bus_clk_err;
8991 }
8992 }
8993 }
8994
8995 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8996 qseecom.bw_scale_down_timer.expires = jiffies +
8997 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8998 mod_timer(&(qseecom.bw_scale_down_timer),
8999 qseecom.bw_scale_down_timer.expires);
9000 qseecom.timer_running = true;
9001 }
9002
9003 mutex_unlock(&clk_access_lock);
9004 mutex_unlock(&qsee_bw_mutex);
9005 goto exit;
9006
9007ce_bus_clk_err:
9008 if (qclk->ce_clk)
9009 clk_disable_unprepare(qclk->ce_clk);
9010ce_clk_err:
9011 if (qclk->ce_core_clk)
9012 clk_disable_unprepare(qclk->ce_core_clk);
9013err:
9014 mutex_unlock(&clk_access_lock);
9015 mutex_unlock(&qsee_bw_mutex);
9016 ret = -EIO;
9017exit:
9018 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9019 return ret;
9020}
9021
9022static const struct of_device_id qseecom_match[] = {
9023 {
9024 .compatible = "qcom,qseecom",
9025 },
9026 {}
9027};
9028
9029static struct platform_driver qseecom_plat_driver = {
9030 .probe = qseecom_probe,
9031 .remove = qseecom_remove,
9032 .suspend = qseecom_suspend,
9033 .resume = qseecom_resume,
9034 .driver = {
9035 .name = "qseecom",
9036 .owner = THIS_MODULE,
9037 .of_match_table = qseecom_match,
9038 },
9039};
9040
9041static int qseecom_init(void)
9042{
9043 return platform_driver_register(&qseecom_plat_driver);
9044}
9045
9046static void qseecom_exit(void)
9047{
9048 platform_driver_unregister(&qseecom_plat_driver);
9049}
9050
9051MODULE_LICENSE("GPL v2");
9052MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9053
9054module_init(qseecom_init);
9055module_exit(qseecom_exit);