blob: 9aed29ea34dbb8496b12cd040452d754fcdd3c96 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Mohamed Sunfeer4887bbc2020-01-16 13:26:38 +05304 * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
Zhen Kong03b2eae2019-09-17 16:58:46 -0700149enum qseecom_unload_app_kthread_state {
150 UNLOAD_APP_KT_SLEEP = 0,
151 UNLOAD_APP_KT_WAKEUP,
152};
153
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700154static struct class *driver_class;
155static dev_t qseecom_device_no;
156
157static DEFINE_MUTEX(qsee_bw_mutex);
158static DEFINE_MUTEX(app_access_lock);
159static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800160static DEFINE_MUTEX(listener_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700161static DEFINE_MUTEX(unload_app_pending_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800162
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700163
164struct sglist_info {
165 uint32_t indexAndFlags;
166 uint32_t sizeOrCount;
167};
168
169/*
170 * The 31th bit indicates only one or multiple physical address inside
171 * the request buffer. If it is set, the index locates a single physical addr
172 * inside the request buffer, and `sizeOrCount` is the size of the memory being
173 * shared at that physical address.
174 * Otherwise, the index locates an array of {start, len} pairs (a
175 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
176 * that array.
177 *
178 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
179 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
180 *
181 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
182 */
183#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
184 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
185
186#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
187
188#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
189
190#define MAKE_WHITELIST_VERSION(major, minor, patch) \
191 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
192
193struct qseecom_registered_listener_list {
194 struct list_head list;
195 struct qseecom_register_listener_req svc;
196 void *user_virt_sb_base;
197 u8 *sb_virt;
198 phys_addr_t sb_phys;
199 size_t sb_length;
200 struct ion_handle *ihandle; /* Retrieve phy addr */
201 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 int rcv_req_flag;
204 int send_resp_flag;
205 bool listener_in_use;
206 /* wq for thread blocked on this listener*/
207 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800208 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
209 uint32_t sglist_cnt;
210 int abort;
211 bool unregister_pending;
212};
213
214struct qseecom_unregister_pending_list {
215 struct list_head list;
216 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700217};
218
219struct qseecom_registered_app_list {
220 struct list_head list;
221 u32 app_id;
222 u32 ref_cnt;
223 char app_name[MAX_APP_NAME_SIZE];
224 u32 app_arch;
225 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700226 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700227 u32 blocked_on_listener_id;
228};
229
230struct qseecom_registered_kclient_list {
231 struct list_head list;
232 struct qseecom_handle *handle;
233};
234
235struct qseecom_ce_info_use {
236 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
237 unsigned int unit_num;
238 unsigned int num_ce_pipe_entries;
239 struct qseecom_ce_pipe_entry *ce_pipe_entry;
240 bool alloc;
241 uint32_t type;
242};
243
244struct ce_hw_usage_info {
245 uint32_t qsee_ce_hw_instance;
246 uint32_t num_fde;
247 struct qseecom_ce_info_use *fde;
248 uint32_t num_pfe;
249 struct qseecom_ce_info_use *pfe;
250};
251
252struct qseecom_clk {
253 enum qseecom_ce_hw_instance instance;
254 struct clk *ce_core_clk;
255 struct clk *ce_clk;
256 struct clk *ce_core_src_clk;
257 struct clk *ce_bus_clk;
258 uint32_t clk_access_cnt;
259};
260
261struct qseecom_control {
262 struct ion_client *ion_clnt; /* Ion client */
263 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700264
265 struct list_head registered_app_list_head;
266 spinlock_t registered_app_list_lock;
267
268 struct list_head registered_kclient_list_head;
269 spinlock_t registered_kclient_list_lock;
270
271 wait_queue_head_t send_resp_wq;
272 int send_resp_flag;
273
274 uint32_t qseos_version;
275 uint32_t qsee_version;
276 struct device *pdev;
277 bool whitelist_support;
278 bool commonlib_loaded;
279 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700280 struct ce_hw_usage_info ce_info;
281
282 int qsee_bw_count;
283 int qsee_sfpb_bw_count;
284
285 uint32_t qsee_perf_client;
286 struct qseecom_clk qsee;
287 struct qseecom_clk ce_drv;
288
289 bool support_bus_scaling;
290 bool support_fde;
291 bool support_pfe;
292 bool fde_key_size;
293 uint32_t cumulative_mode;
294 enum qseecom_bandwidth_request_mode current_mode;
295 struct timer_list bw_scale_down_timer;
296 struct work_struct bw_inactive_req_ws;
297 struct cdev cdev;
298 bool timer_running;
299 bool no_clock_support;
300 unsigned int ce_opp_freq_hz;
301 bool appsbl_qseecom_support;
302 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530303 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700304
305 uint32_t app_block_ref_cnt;
306 wait_queue_head_t app_block_wq;
307 atomic_t qseecom_state;
308 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700309 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800310
311 struct list_head unregister_lsnr_pending_list_head;
312 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800313 struct task_struct *unregister_lsnr_kthread_task;
314 wait_queue_head_t unregister_lsnr_kthread_wq;
315 atomic_t unregister_lsnr_kthread_state;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700316
317 struct list_head unload_app_pending_list_head;
318 struct task_struct *unload_app_kthread_task;
319 wait_queue_head_t unload_app_kthread_wq;
320 atomic_t unload_app_kthread_state;
321};
322
323struct qseecom_unload_app_pending_list {
324 struct list_head list;
325 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700326};
327
328struct qseecom_sec_buf_fd_info {
329 bool is_sec_buf_fd;
330 size_t size;
331 void *vbase;
332 dma_addr_t pbase;
333};
334
335struct qseecom_param_memref {
336 uint32_t buffer;
337 uint32_t size;
338};
339
340struct qseecom_client_handle {
341 u32 app_id;
342 u8 *sb_virt;
343 phys_addr_t sb_phys;
344 unsigned long user_virt_sb_base;
345 size_t sb_length;
346 struct ion_handle *ihandle; /* Retrieve phy addr */
347 char app_name[MAX_APP_NAME_SIZE];
348 u32 app_arch;
349 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700350 bool from_smcinvoke;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700351 bool unload_pending;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700352};
353
354struct qseecom_listener_handle {
355 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800356 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800357 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700358};
359
360static struct qseecom_control qseecom;
361
362struct qseecom_dev_handle {
363 enum qseecom_client_handle_type type;
364 union {
365 struct qseecom_client_handle client;
366 struct qseecom_listener_handle listener;
367 };
368 bool released;
369 int abort;
370 wait_queue_head_t abort_wq;
371 atomic_t ioctl_count;
372 bool perf_enabled;
373 bool fast_load_enabled;
374 enum qseecom_bandwidth_request_mode mode;
375 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
376 uint32_t sglist_cnt;
377 bool use_legacy_cmd;
378};
379
380struct qseecom_key_id_usage_desc {
381 uint8_t desc[QSEECOM_KEY_ID_SIZE];
382};
383
384struct qseecom_crypto_info {
385 unsigned int unit_num;
386 unsigned int ce;
387 unsigned int pipe_pair;
388};
389
390static struct qseecom_key_id_usage_desc key_id_array[] = {
391 {
392 .desc = "Undefined Usage Index",
393 },
394
395 {
396 .desc = "Full Disk Encryption",
397 },
398
399 {
400 .desc = "Per File Encryption",
401 },
402
403 {
404 .desc = "UFS ICE Full Disk Encryption",
405 },
406
407 {
408 .desc = "SDCC ICE Full Disk Encryption",
409 },
410};
411
412/* Function proto types */
413static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
414static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
415static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
416static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
417static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
418static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
419 char *cmnlib_name);
420static int qseecom_enable_ice_setup(int usage);
421static int qseecom_disable_ice_setup(int usage);
422static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
423static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
424 void __user *argp);
425static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
426 void __user *argp);
427static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
428 void __user *argp);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700429static int __qseecom_unload_app(struct qseecom_dev_handle *data,
430 uint32_t app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700431
432static int get_qseecom_keymaster_status(char *str)
433{
434 get_option(&str, &qseecom.is_apps_region_protected);
435 return 1;
436}
437__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
438
Zhen Kong03f220d2019-02-01 17:12:34 -0800439
440#define QSEECOM_SCM_EBUSY_WAIT_MS 30
441#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
442
443static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
444{
445 int ret = 0;
446 int retry_count = 0;
shermanwei1248c7e2020-06-15 10:20:27 +0800447///<<20200615 revert Qualcomm patch:CR# 2402609 and 2478458
448 if (qseecom.support_bus_scaling)
449 return scm_call2(smc_id, desc);
450///>>20200615 revert Qualcomm patch
Zhen Kong03f220d2019-02-01 17:12:34 -0800451
452 do {
453 ret = scm_call2_noretry(smc_id, desc);
454 if (ret == -EBUSY) {
455 mutex_unlock(&app_access_lock);
456 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
457 mutex_lock(&app_access_lock);
458 }
459 if (retry_count == 33)
460 pr_warn("secure world has been busy for 1 second!\n");
461 } while (ret == -EBUSY &&
462 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
463 return ret;
464}
465
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700466static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
467 const void *req_buf, void *resp_buf)
468{
469 int ret = 0;
470 uint32_t smc_id = 0;
471 uint32_t qseos_cmd_id = 0;
472 struct scm_desc desc = {0};
473 struct qseecom_command_scm_resp *scm_resp = NULL;
474
475 if (!req_buf || !resp_buf) {
476 pr_err("Invalid buffer pointer\n");
477 return -EINVAL;
478 }
479 qseos_cmd_id = *(uint32_t *)req_buf;
480 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
481
482 switch (svc_id) {
483 case 6: {
484 if (tz_cmd_id == 3) {
485 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
486 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
487 desc.args[0] = *(uint32_t *)req_buf;
488 } else {
489 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
490 svc_id, tz_cmd_id);
491 return -EINVAL;
492 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800493 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700494 break;
495 }
496 case SCM_SVC_ES: {
497 switch (tz_cmd_id) {
498 case SCM_SAVE_PARTITION_HASH_ID: {
499 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
500 struct qseecom_save_partition_hash_req *p_hash_req =
501 (struct qseecom_save_partition_hash_req *)
502 req_buf;
503 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
504
505 if (!tzbuf)
506 return -ENOMEM;
507 memset(tzbuf, 0, tzbuflen);
508 memcpy(tzbuf, p_hash_req->digest,
509 SHA256_DIGEST_LENGTH);
510 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
511 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
512 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
513 desc.args[0] = p_hash_req->partition_id;
514 desc.args[1] = virt_to_phys(tzbuf);
515 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800516 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700517 kzfree(tzbuf);
518 break;
519 }
520 default: {
521 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
522 tz_cmd_id);
523 ret = -EINVAL;
524 break;
525 }
526 } /* end of switch (tz_cmd_id) */
527 break;
528 } /* end of case SCM_SVC_ES */
529 case SCM_SVC_TZSCHEDULER: {
530 switch (qseos_cmd_id) {
531 case QSEOS_APP_START_COMMAND: {
532 struct qseecom_load_app_ireq *req;
533 struct qseecom_load_app_64bit_ireq *req_64bit;
534
535 smc_id = TZ_OS_APP_START_ID;
536 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
537 if (qseecom.qsee_version < QSEE_VERSION_40) {
538 req = (struct qseecom_load_app_ireq *)req_buf;
539 desc.args[0] = req->mdt_len;
540 desc.args[1] = req->img_len;
541 desc.args[2] = req->phy_addr;
542 } else {
543 req_64bit =
544 (struct qseecom_load_app_64bit_ireq *)
545 req_buf;
546 desc.args[0] = req_64bit->mdt_len;
547 desc.args[1] = req_64bit->img_len;
548 desc.args[2] = req_64bit->phy_addr;
549 }
550 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800551 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700552 break;
553 }
554 case QSEOS_APP_SHUTDOWN_COMMAND: {
555 struct qseecom_unload_app_ireq *req;
556
557 req = (struct qseecom_unload_app_ireq *)req_buf;
558 smc_id = TZ_OS_APP_SHUTDOWN_ID;
559 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
560 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700561 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700562 break;
563 }
564 case QSEOS_APP_LOOKUP_COMMAND: {
565 struct qseecom_check_app_ireq *req;
566 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
567 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
568
569 if (!tzbuf)
570 return -ENOMEM;
571 req = (struct qseecom_check_app_ireq *)req_buf;
572 pr_debug("Lookup app_name = %s\n", req->app_name);
573 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
574 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
575 smc_id = TZ_OS_APP_LOOKUP_ID;
576 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
577 desc.args[0] = virt_to_phys(tzbuf);
578 desc.args[1] = strlen(req->app_name);
579 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800580 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700581 kzfree(tzbuf);
582 break;
583 }
584 case QSEOS_APP_REGION_NOTIFICATION: {
585 struct qsee_apps_region_info_ireq *req;
586 struct qsee_apps_region_info_64bit_ireq *req_64bit;
587
588 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
589 desc.arginfo =
590 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
591 if (qseecom.qsee_version < QSEE_VERSION_40) {
592 req = (struct qsee_apps_region_info_ireq *)
593 req_buf;
594 desc.args[0] = req->addr;
595 desc.args[1] = req->size;
596 } else {
597 req_64bit =
598 (struct qsee_apps_region_info_64bit_ireq *)
599 req_buf;
600 desc.args[0] = req_64bit->addr;
601 desc.args[1] = req_64bit->size;
602 }
603 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800604 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700605 break;
606 }
607 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
608 struct qseecom_load_lib_image_ireq *req;
609 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
610
611 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
612 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
613 if (qseecom.qsee_version < QSEE_VERSION_40) {
614 req = (struct qseecom_load_lib_image_ireq *)
615 req_buf;
616 desc.args[0] = req->mdt_len;
617 desc.args[1] = req->img_len;
618 desc.args[2] = req->phy_addr;
619 } else {
620 req_64bit =
621 (struct qseecom_load_lib_image_64bit_ireq *)
622 req_buf;
623 desc.args[0] = req_64bit->mdt_len;
624 desc.args[1] = req_64bit->img_len;
625 desc.args[2] = req_64bit->phy_addr;
626 }
627 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800628 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700629 break;
630 }
631 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
632 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
633 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
634 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800635 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700636 break;
637 }
638 case QSEOS_REGISTER_LISTENER: {
639 struct qseecom_register_listener_ireq *req;
640 struct qseecom_register_listener_64bit_ireq *req_64bit;
641
642 desc.arginfo =
643 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
644 if (qseecom.qsee_version < QSEE_VERSION_40) {
645 req = (struct qseecom_register_listener_ireq *)
646 req_buf;
647 desc.args[0] = req->listener_id;
648 desc.args[1] = req->sb_ptr;
649 desc.args[2] = req->sb_len;
650 } else {
651 req_64bit =
652 (struct qseecom_register_listener_64bit_ireq *)
653 req_buf;
654 desc.args[0] = req_64bit->listener_id;
655 desc.args[1] = req_64bit->sb_ptr;
656 desc.args[2] = req_64bit->sb_len;
657 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700658 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700659 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800660 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800661 if (ret == -EIO) {
662 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700663 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700664 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800665 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700666 }
667 break;
668 }
669 case QSEOS_DEREGISTER_LISTENER: {
670 struct qseecom_unregister_listener_ireq *req;
671
672 req = (struct qseecom_unregister_listener_ireq *)
673 req_buf;
674 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
675 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
676 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800677 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700678 break;
679 }
680 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
681 struct qseecom_client_listener_data_irsp *req;
682
683 req = (struct qseecom_client_listener_data_irsp *)
684 req_buf;
685 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
686 desc.arginfo =
687 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
688 desc.args[0] = req->listener_id;
689 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800690 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700691 break;
692 }
693 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
694 struct qseecom_client_listener_data_irsp *req;
695 struct qseecom_client_listener_data_64bit_irsp *req_64;
696
697 smc_id =
698 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
699 desc.arginfo =
700 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
701 if (qseecom.qsee_version < QSEE_VERSION_40) {
702 req =
703 (struct qseecom_client_listener_data_irsp *)
704 req_buf;
705 desc.args[0] = req->listener_id;
706 desc.args[1] = req->status;
707 desc.args[2] = req->sglistinfo_ptr;
708 desc.args[3] = req->sglistinfo_len;
709 } else {
710 req_64 =
711 (struct qseecom_client_listener_data_64bit_irsp *)
712 req_buf;
713 desc.args[0] = req_64->listener_id;
714 desc.args[1] = req_64->status;
715 desc.args[2] = req_64->sglistinfo_ptr;
716 desc.args[3] = req_64->sglistinfo_len;
717 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800718 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700719 break;
720 }
721 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
722 struct qseecom_load_app_ireq *req;
723 struct qseecom_load_app_64bit_ireq *req_64bit;
724
725 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
726 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
727 if (qseecom.qsee_version < QSEE_VERSION_40) {
728 req = (struct qseecom_load_app_ireq *)req_buf;
729 desc.args[0] = req->mdt_len;
730 desc.args[1] = req->img_len;
731 desc.args[2] = req->phy_addr;
732 } else {
733 req_64bit =
734 (struct qseecom_load_app_64bit_ireq *)req_buf;
735 desc.args[0] = req_64bit->mdt_len;
736 desc.args[1] = req_64bit->img_len;
737 desc.args[2] = req_64bit->phy_addr;
738 }
739 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800740 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700741 break;
742 }
743 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
744 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
745 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
746 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800747 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700748 break;
749 }
750
751 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
752 struct qseecom_client_send_data_ireq *req;
753 struct qseecom_client_send_data_64bit_ireq *req_64bit;
754
755 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
756 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
757 if (qseecom.qsee_version < QSEE_VERSION_40) {
758 req = (struct qseecom_client_send_data_ireq *)
759 req_buf;
760 desc.args[0] = req->app_id;
761 desc.args[1] = req->req_ptr;
762 desc.args[2] = req->req_len;
763 desc.args[3] = req->rsp_ptr;
764 desc.args[4] = req->rsp_len;
765 } else {
766 req_64bit =
767 (struct qseecom_client_send_data_64bit_ireq *)
768 req_buf;
769 desc.args[0] = req_64bit->app_id;
770 desc.args[1] = req_64bit->req_ptr;
771 desc.args[2] = req_64bit->req_len;
772 desc.args[3] = req_64bit->rsp_ptr;
773 desc.args[4] = req_64bit->rsp_len;
774 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800775 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700776 break;
777 }
778 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
779 struct qseecom_client_send_data_ireq *req;
780 struct qseecom_client_send_data_64bit_ireq *req_64bit;
781
782 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
783 desc.arginfo =
784 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
785 if (qseecom.qsee_version < QSEE_VERSION_40) {
786 req = (struct qseecom_client_send_data_ireq *)
787 req_buf;
788 desc.args[0] = req->app_id;
789 desc.args[1] = req->req_ptr;
790 desc.args[2] = req->req_len;
791 desc.args[3] = req->rsp_ptr;
792 desc.args[4] = req->rsp_len;
793 desc.args[5] = req->sglistinfo_ptr;
794 desc.args[6] = req->sglistinfo_len;
795 } else {
796 req_64bit =
797 (struct qseecom_client_send_data_64bit_ireq *)
798 req_buf;
799 desc.args[0] = req_64bit->app_id;
800 desc.args[1] = req_64bit->req_ptr;
801 desc.args[2] = req_64bit->req_len;
802 desc.args[3] = req_64bit->rsp_ptr;
803 desc.args[4] = req_64bit->rsp_len;
804 desc.args[5] = req_64bit->sglistinfo_ptr;
805 desc.args[6] = req_64bit->sglistinfo_len;
806 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800807 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700808 break;
809 }
810 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
811 struct qseecom_client_send_service_ireq *req;
812
813 req = (struct qseecom_client_send_service_ireq *)
814 req_buf;
815 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
816 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
817 desc.args[0] = req->key_type;
818 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800819 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700820 break;
821 }
822 case QSEOS_RPMB_ERASE_COMMAND: {
823 smc_id = TZ_OS_RPMB_ERASE_ID;
824 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
825 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800826 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700827 break;
828 }
829 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
830 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
831 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
832 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800833 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700834 break;
835 }
836 case QSEOS_GENERATE_KEY: {
837 u32 tzbuflen = PAGE_ALIGN(sizeof
838 (struct qseecom_key_generate_ireq) -
839 sizeof(uint32_t));
840 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
841
842 if (!tzbuf)
843 return -ENOMEM;
844 memset(tzbuf, 0, tzbuflen);
845 memcpy(tzbuf, req_buf + sizeof(uint32_t),
846 (sizeof(struct qseecom_key_generate_ireq) -
847 sizeof(uint32_t)));
848 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
849 smc_id = TZ_OS_KS_GEN_KEY_ID;
850 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
851 desc.args[0] = virt_to_phys(tzbuf);
852 desc.args[1] = tzbuflen;
853 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800854 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700855 kzfree(tzbuf);
856 break;
857 }
858 case QSEOS_DELETE_KEY: {
859 u32 tzbuflen = PAGE_ALIGN(sizeof
860 (struct qseecom_key_delete_ireq) -
861 sizeof(uint32_t));
862 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
863
864 if (!tzbuf)
865 return -ENOMEM;
866 memset(tzbuf, 0, tzbuflen);
867 memcpy(tzbuf, req_buf + sizeof(uint32_t),
868 (sizeof(struct qseecom_key_delete_ireq) -
869 sizeof(uint32_t)));
870 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
871 smc_id = TZ_OS_KS_DEL_KEY_ID;
872 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
873 desc.args[0] = virt_to_phys(tzbuf);
874 desc.args[1] = tzbuflen;
875 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800876 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700877 kzfree(tzbuf);
878 break;
879 }
880 case QSEOS_SET_KEY: {
881 u32 tzbuflen = PAGE_ALIGN(sizeof
882 (struct qseecom_key_select_ireq) -
883 sizeof(uint32_t));
884 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
885
886 if (!tzbuf)
887 return -ENOMEM;
888 memset(tzbuf, 0, tzbuflen);
889 memcpy(tzbuf, req_buf + sizeof(uint32_t),
890 (sizeof(struct qseecom_key_select_ireq) -
891 sizeof(uint32_t)));
892 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
893 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
894 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
895 desc.args[0] = virt_to_phys(tzbuf);
896 desc.args[1] = tzbuflen;
897 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800898 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700899 kzfree(tzbuf);
900 break;
901 }
902 case QSEOS_UPDATE_KEY_USERINFO: {
903 u32 tzbuflen = PAGE_ALIGN(sizeof
904 (struct qseecom_key_userinfo_update_ireq) -
905 sizeof(uint32_t));
906 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
907
908 if (!tzbuf)
909 return -ENOMEM;
910 memset(tzbuf, 0, tzbuflen);
911 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
912 (struct qseecom_key_userinfo_update_ireq) -
913 sizeof(uint32_t)));
914 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
915 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
916 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
917 desc.args[0] = virt_to_phys(tzbuf);
918 desc.args[1] = tzbuflen;
919 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800920 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700921 kzfree(tzbuf);
922 break;
923 }
924 case QSEOS_TEE_OPEN_SESSION: {
925 struct qseecom_qteec_ireq *req;
926 struct qseecom_qteec_64bit_ireq *req_64bit;
927
928 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
929 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
930 if (qseecom.qsee_version < QSEE_VERSION_40) {
931 req = (struct qseecom_qteec_ireq *)req_buf;
932 desc.args[0] = req->app_id;
933 desc.args[1] = req->req_ptr;
934 desc.args[2] = req->req_len;
935 desc.args[3] = req->resp_ptr;
936 desc.args[4] = req->resp_len;
937 } else {
938 req_64bit = (struct qseecom_qteec_64bit_ireq *)
939 req_buf;
940 desc.args[0] = req_64bit->app_id;
941 desc.args[1] = req_64bit->req_ptr;
942 desc.args[2] = req_64bit->req_len;
943 desc.args[3] = req_64bit->resp_ptr;
944 desc.args[4] = req_64bit->resp_len;
945 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800946 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700947 break;
948 }
949 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
950 struct qseecom_qteec_ireq *req;
951 struct qseecom_qteec_64bit_ireq *req_64bit;
952
953 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
954 desc.arginfo =
955 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
956 if (qseecom.qsee_version < QSEE_VERSION_40) {
957 req = (struct qseecom_qteec_ireq *)req_buf;
958 desc.args[0] = req->app_id;
959 desc.args[1] = req->req_ptr;
960 desc.args[2] = req->req_len;
961 desc.args[3] = req->resp_ptr;
962 desc.args[4] = req->resp_len;
963 desc.args[5] = req->sglistinfo_ptr;
964 desc.args[6] = req->sglistinfo_len;
965 } else {
966 req_64bit = (struct qseecom_qteec_64bit_ireq *)
967 req_buf;
968 desc.args[0] = req_64bit->app_id;
969 desc.args[1] = req_64bit->req_ptr;
970 desc.args[2] = req_64bit->req_len;
971 desc.args[3] = req_64bit->resp_ptr;
972 desc.args[4] = req_64bit->resp_len;
973 desc.args[5] = req_64bit->sglistinfo_ptr;
974 desc.args[6] = req_64bit->sglistinfo_len;
975 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800976 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700977 break;
978 }
979 case QSEOS_TEE_INVOKE_COMMAND: {
980 struct qseecom_qteec_ireq *req;
981 struct qseecom_qteec_64bit_ireq *req_64bit;
982
983 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
984 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
985 if (qseecom.qsee_version < QSEE_VERSION_40) {
986 req = (struct qseecom_qteec_ireq *)req_buf;
987 desc.args[0] = req->app_id;
988 desc.args[1] = req->req_ptr;
989 desc.args[2] = req->req_len;
990 desc.args[3] = req->resp_ptr;
991 desc.args[4] = req->resp_len;
992 } else {
993 req_64bit = (struct qseecom_qteec_64bit_ireq *)
994 req_buf;
995 desc.args[0] = req_64bit->app_id;
996 desc.args[1] = req_64bit->req_ptr;
997 desc.args[2] = req_64bit->req_len;
998 desc.args[3] = req_64bit->resp_ptr;
999 desc.args[4] = req_64bit->resp_len;
1000 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001001 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001002 break;
1003 }
1004 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
1005 struct qseecom_qteec_ireq *req;
1006 struct qseecom_qteec_64bit_ireq *req_64bit;
1007
1008 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
1009 desc.arginfo =
1010 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
1011 if (qseecom.qsee_version < QSEE_VERSION_40) {
1012 req = (struct qseecom_qteec_ireq *)req_buf;
1013 desc.args[0] = req->app_id;
1014 desc.args[1] = req->req_ptr;
1015 desc.args[2] = req->req_len;
1016 desc.args[3] = req->resp_ptr;
1017 desc.args[4] = req->resp_len;
1018 desc.args[5] = req->sglistinfo_ptr;
1019 desc.args[6] = req->sglistinfo_len;
1020 } else {
1021 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1022 req_buf;
1023 desc.args[0] = req_64bit->app_id;
1024 desc.args[1] = req_64bit->req_ptr;
1025 desc.args[2] = req_64bit->req_len;
1026 desc.args[3] = req_64bit->resp_ptr;
1027 desc.args[4] = req_64bit->resp_len;
1028 desc.args[5] = req_64bit->sglistinfo_ptr;
1029 desc.args[6] = req_64bit->sglistinfo_len;
1030 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001031 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001032 break;
1033 }
1034 case QSEOS_TEE_CLOSE_SESSION: {
1035 struct qseecom_qteec_ireq *req;
1036 struct qseecom_qteec_64bit_ireq *req_64bit;
1037
1038 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1039 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1040 if (qseecom.qsee_version < QSEE_VERSION_40) {
1041 req = (struct qseecom_qteec_ireq *)req_buf;
1042 desc.args[0] = req->app_id;
1043 desc.args[1] = req->req_ptr;
1044 desc.args[2] = req->req_len;
1045 desc.args[3] = req->resp_ptr;
1046 desc.args[4] = req->resp_len;
1047 } else {
1048 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1049 req_buf;
1050 desc.args[0] = req_64bit->app_id;
1051 desc.args[1] = req_64bit->req_ptr;
1052 desc.args[2] = req_64bit->req_len;
1053 desc.args[3] = req_64bit->resp_ptr;
1054 desc.args[4] = req_64bit->resp_len;
1055 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001056 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001057 break;
1058 }
1059 case QSEOS_TEE_REQUEST_CANCELLATION: {
1060 struct qseecom_qteec_ireq *req;
1061 struct qseecom_qteec_64bit_ireq *req_64bit;
1062
1063 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1064 desc.arginfo =
1065 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1066 if (qseecom.qsee_version < QSEE_VERSION_40) {
1067 req = (struct qseecom_qteec_ireq *)req_buf;
1068 desc.args[0] = req->app_id;
1069 desc.args[1] = req->req_ptr;
1070 desc.args[2] = req->req_len;
1071 desc.args[3] = req->resp_ptr;
1072 desc.args[4] = req->resp_len;
1073 } else {
1074 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1075 req_buf;
1076 desc.args[0] = req_64bit->app_id;
1077 desc.args[1] = req_64bit->req_ptr;
1078 desc.args[2] = req_64bit->req_len;
1079 desc.args[3] = req_64bit->resp_ptr;
1080 desc.args[4] = req_64bit->resp_len;
1081 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001082 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001083 break;
1084 }
1085 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1086 struct qseecom_continue_blocked_request_ireq *req =
1087 (struct qseecom_continue_blocked_request_ireq *)
1088 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001089 if (qseecom.smcinvoke_support)
1090 smc_id =
1091 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1092 else
1093 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001094 desc.arginfo =
1095 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001096 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001097 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001098 break;
1099 }
1100 default: {
1101 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1102 qseos_cmd_id);
1103 ret = -EINVAL;
1104 break;
1105 }
1106 } /*end of switch (qsee_cmd_id) */
1107 break;
1108 } /*end of case SCM_SVC_TZSCHEDULER*/
1109 default: {
1110 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1111 svc_id);
1112 ret = -EINVAL;
1113 break;
1114 }
1115 } /*end of switch svc_id */
1116 scm_resp->result = desc.ret[0];
1117 scm_resp->resp_type = desc.ret[1];
1118 scm_resp->data = desc.ret[2];
1119 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1120 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1121 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1122 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1123 return ret;
1124}
1125
1126
1127static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1128 size_t cmd_len, void *resp_buf, size_t resp_len)
1129{
1130 if (!is_scm_armv8())
1131 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1132 resp_buf, resp_len);
1133 else
1134 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1135}
1136
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001137static struct qseecom_registered_listener_list *__qseecom_find_svc(
1138 int32_t listener_id)
1139{
1140 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001141
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001142 list_for_each_entry(entry,
1143 &qseecom.registered_listener_list_head, list) {
1144 if (entry->svc.listener_id == listener_id)
1145 break;
1146 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001147 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001148 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001149 return NULL;
1150 }
1151
1152 return entry;
1153}
1154
1155static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1156 struct qseecom_dev_handle *handle,
1157 struct qseecom_register_listener_req *listener)
1158{
1159 int ret = 0;
1160 struct qseecom_register_listener_ireq req;
1161 struct qseecom_register_listener_64bit_ireq req_64bit;
1162 struct qseecom_command_scm_resp resp;
1163 ion_phys_addr_t pa;
1164 void *cmd_buf = NULL;
1165 size_t cmd_len;
1166
1167 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001168 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001169 listener->ifd_data_fd);
1170 if (IS_ERR_OR_NULL(svc->ihandle)) {
1171 pr_err("Ion client could not retrieve the handle\n");
1172 return -ENOMEM;
1173 }
1174
1175 /* Get the physical address of the ION BUF */
1176 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1177 if (ret) {
1178 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1179 ret);
1180 return ret;
1181 }
1182 /* Populate the structure for sending scm call to load image */
1183 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1184 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1185 pr_err("ION memory mapping for listener shared buffer failed\n");
1186 return -ENOMEM;
1187 }
1188 svc->sb_phys = (phys_addr_t)pa;
1189
1190 if (qseecom.qsee_version < QSEE_VERSION_40) {
1191 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1192 req.listener_id = svc->svc.listener_id;
1193 req.sb_len = svc->sb_length;
1194 req.sb_ptr = (uint32_t)svc->sb_phys;
1195 cmd_buf = (void *)&req;
1196 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1197 } else {
1198 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1199 req_64bit.listener_id = svc->svc.listener_id;
1200 req_64bit.sb_len = svc->sb_length;
1201 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1202 cmd_buf = (void *)&req_64bit;
1203 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1204 }
1205
1206 resp.result = QSEOS_RESULT_INCOMPLETE;
1207
Zhen Kongc4c162a2019-01-23 12:07:12 -08001208 mutex_unlock(&listener_access_lock);
1209 mutex_lock(&app_access_lock);
1210 __qseecom_reentrancy_check_if_no_app_blocked(
1211 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1213 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001214 mutex_unlock(&app_access_lock);
1215 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001216 if (ret) {
1217 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1218 return -EINVAL;
1219 }
1220
1221 if (resp.result != QSEOS_RESULT_SUCCESS) {
1222 pr_err("Error SB registration req: resp.result = %d\n",
1223 resp.result);
1224 return -EPERM;
1225 }
1226 return 0;
1227}
1228
1229static int qseecom_register_listener(struct qseecom_dev_handle *data,
1230 void __user *argp)
1231{
1232 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001233 struct qseecom_register_listener_req rcvd_lstnr;
1234 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001235 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001236
1237 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1238 if (ret) {
1239 pr_err("copy_from_user failed\n");
1240 return ret;
1241 }
1242 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1243 rcvd_lstnr.sb_size))
1244 return -EFAULT;
1245
Zhen Kongbcdeda22018-11-16 13:50:51 -08001246 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1247 if (ptr_svc) {
1248 if (ptr_svc->unregister_pending == false) {
1249 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001250 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001251 data->released = true;
1252 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001253 } else {
1254 /*wait until listener is unregistered*/
1255 pr_debug("register %d has to wait\n",
1256 rcvd_lstnr.listener_id);
1257 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301258 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001259 qseecom.register_lsnr_pending_wq,
1260 list_empty(
1261 &qseecom.unregister_lsnr_pending_list_head));
1262 if (ret) {
1263 pr_err("interrupted register_pending_wq %d\n",
1264 rcvd_lstnr.listener_id);
1265 mutex_lock(&listener_access_lock);
1266 return -ERESTARTSYS;
1267 }
1268 mutex_lock(&listener_access_lock);
1269 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1272 if (!new_entry)
1273 return -ENOMEM;
1274 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001275 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001276
1277 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1278 new_entry->sb_length = rcvd_lstnr.sb_size;
1279 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1280 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001281 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1282 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001283 kzfree(new_entry);
1284 return -ENOMEM;
1285 }
1286
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001287 init_waitqueue_head(&new_entry->rcv_req_wq);
1288 init_waitqueue_head(&new_entry->listener_block_app_wq);
1289 new_entry->send_resp_flag = 0;
1290 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001292
Zhen Konge6ac4132019-09-20 13:49:41 -07001293 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kong52ce9062018-09-24 14:33:27 -07001294 pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 return ret;
1296}
1297
Zhen Kongbcdeda22018-11-16 13:50:51 -08001298static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1299 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001300{
1301 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001302 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001303 struct qseecom_command_scm_resp resp;
1304 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1305
1306 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1307 req.listener_id = data->listener.id;
1308 resp.result = QSEOS_RESULT_INCOMPLETE;
1309
Zhen Kongc4c162a2019-01-23 12:07:12 -08001310 mutex_unlock(&listener_access_lock);
1311 mutex_lock(&app_access_lock);
1312 __qseecom_reentrancy_check_if_no_app_blocked(
1313 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001314 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1315 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001316 mutex_unlock(&app_access_lock);
1317 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001318 if (ret) {
1319 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1320 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001321 if (ret == -EBUSY)
1322 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001323 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001324 }
1325
1326 if (resp.result != QSEOS_RESULT_SUCCESS) {
1327 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1328 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001329 ret = -EPERM;
1330 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 }
1332
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001333 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301334 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001335 atomic_read(&data->ioctl_count) <= 1)) {
1336 pr_err("Interrupted from abort\n");
1337 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001338 }
1339 }
1340
Zhen Kong3c674612018-09-06 22:51:27 -07001341exit:
1342 if (ptr_svc->sb_virt) {
1343 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001344 if (!IS_ERR_OR_NULL(ihandle)) {
1345 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1346 ion_free(qseecom.ion_clnt, ihandle);
1347 }
1348 }
Zhen Kong3c674612018-09-06 22:51:27 -07001349 list_del(&ptr_svc->list);
1350 kzfree(ptr_svc);
1351
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001352 data->released = true;
Zhen Kong52ce9062018-09-24 14:33:27 -07001353 pr_debug("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001354 return ret;
1355}
1356
Zhen Kongbcdeda22018-11-16 13:50:51 -08001357static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1358{
1359 struct qseecom_registered_listener_list *ptr_svc = NULL;
1360 struct qseecom_unregister_pending_list *entry = NULL;
1361
Zhen Konge6ac4132019-09-20 13:49:41 -07001362 if (data->released) {
1363 pr_err("Don't unregister lsnr %d\n", data->listener.id);
1364 return -EINVAL;
1365 }
1366
Zhen Kongbcdeda22018-11-16 13:50:51 -08001367 ptr_svc = __qseecom_find_svc(data->listener.id);
1368 if (!ptr_svc) {
1369 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1370 return -ENODATA;
1371 }
1372 /* stop CA thread waiting for listener response */
1373 ptr_svc->abort = 1;
1374 wake_up_interruptible_all(&qseecom.send_resp_wq);
1375
Zhen Kongc4c162a2019-01-23 12:07:12 -08001376 /* stop listener thread waiting for listener request */
1377 data->abort = 1;
1378 wake_up_all(&ptr_svc->rcv_req_wq);
1379
Zhen Kongbcdeda22018-11-16 13:50:51 -08001380 /* return directly if pending*/
1381 if (ptr_svc->unregister_pending)
1382 return 0;
1383
1384 /*add unregistration into pending list*/
1385 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1386 if (!entry)
1387 return -ENOMEM;
1388 entry->data = data;
1389 list_add_tail(&entry->list,
1390 &qseecom.unregister_lsnr_pending_list_head);
1391 ptr_svc->unregister_pending = true;
1392 pr_debug("unregister %d pending\n", data->listener.id);
1393 return 0;
1394}
1395
1396static void __qseecom_processing_pending_lsnr_unregister(void)
1397{
1398 struct qseecom_unregister_pending_list *entry = NULL;
1399 struct qseecom_registered_listener_list *ptr_svc = NULL;
1400 struct list_head *pos;
1401 int ret = 0;
1402
1403 mutex_lock(&listener_access_lock);
1404 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1405 pos = qseecom.unregister_lsnr_pending_list_head.next;
1406 entry = list_entry(pos,
1407 struct qseecom_unregister_pending_list, list);
1408 if (entry && entry->data) {
1409 pr_debug("process pending unregister %d\n",
1410 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001411 /* don't process if qseecom_release is not called*/
1412 if (!entry->data->listener.release_called)
1413 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001414 ptr_svc = __qseecom_find_svc(
1415 entry->data->listener.id);
1416 if (ptr_svc) {
1417 ret = __qseecom_unregister_listener(
1418 entry->data, ptr_svc);
1419 if (ret == -EBUSY) {
1420 pr_debug("unregister %d pending again\n",
1421 entry->data->listener.id);
1422 mutex_unlock(&listener_access_lock);
1423 return;
1424 }
1425 } else
1426 pr_err("invalid listener %d\n",
1427 entry->data->listener.id);
1428 kzfree(entry->data);
1429 }
1430 list_del(pos);
1431 kzfree(entry);
1432 }
1433 mutex_unlock(&listener_access_lock);
1434 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1435}
1436
Zhen Kongc4c162a2019-01-23 12:07:12 -08001437static void __wakeup_unregister_listener_kthread(void)
1438{
1439 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1440 LSNR_UNREG_KT_WAKEUP);
1441 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1442}
1443
1444static int __qseecom_unregister_listener_kthread_func(void *data)
1445{
1446 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301447 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001448 qseecom.unregister_lsnr_kthread_wq,
1449 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1450 == LSNR_UNREG_KT_WAKEUP);
1451 pr_debug("kthread to unregister listener is called %d\n",
1452 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1453 __qseecom_processing_pending_lsnr_unregister();
1454 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1455 LSNR_UNREG_KT_SLEEP);
1456 }
1457 pr_warn("kthread to unregister listener stopped\n");
1458 return 0;
1459}
1460
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001461static int __qseecom_set_msm_bus_request(uint32_t mode)
1462{
1463 int ret = 0;
1464 struct qseecom_clk *qclk;
1465
1466 qclk = &qseecom.qsee;
1467 if (qclk->ce_core_src_clk != NULL) {
1468 if (mode == INACTIVE) {
1469 __qseecom_disable_clk(CLK_QSEE);
1470 } else {
1471 ret = __qseecom_enable_clk(CLK_QSEE);
1472 if (ret)
1473 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1474 ret, mode);
1475 }
1476 }
1477
1478 if ((!ret) && (qseecom.current_mode != mode)) {
1479 ret = msm_bus_scale_client_update_request(
1480 qseecom.qsee_perf_client, mode);
1481 if (ret) {
1482 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1483 ret, mode);
1484 if (qclk->ce_core_src_clk != NULL) {
1485 if (mode == INACTIVE) {
1486 ret = __qseecom_enable_clk(CLK_QSEE);
1487 if (ret)
1488 pr_err("CLK enable failed\n");
1489 } else
1490 __qseecom_disable_clk(CLK_QSEE);
1491 }
1492 }
1493 qseecom.current_mode = mode;
1494 }
1495 return ret;
1496}
1497
1498static void qseecom_bw_inactive_req_work(struct work_struct *work)
1499{
1500 mutex_lock(&app_access_lock);
1501 mutex_lock(&qsee_bw_mutex);
1502 if (qseecom.timer_running)
1503 __qseecom_set_msm_bus_request(INACTIVE);
1504 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1505 qseecom.current_mode, qseecom.cumulative_mode);
1506 qseecom.timer_running = false;
1507 mutex_unlock(&qsee_bw_mutex);
1508 mutex_unlock(&app_access_lock);
1509}
1510
1511static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1512{
1513 schedule_work(&qseecom.bw_inactive_req_ws);
1514}
1515
1516static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1517{
1518 struct qseecom_clk *qclk;
1519 int ret = 0;
1520
1521 mutex_lock(&clk_access_lock);
1522 if (ce == CLK_QSEE)
1523 qclk = &qseecom.qsee;
1524 else
1525 qclk = &qseecom.ce_drv;
shermanwei1248c7e2020-06-15 10:20:27 +08001526///<<20200615 revert Qualcomm patch:CR# 2402609 and 2478458
1527/// if (qclk->clk_access_cnt > 0) {
1528/// qclk->clk_access_cnt--;
1529/// } else {
1530 if (qclk->clk_access_cnt > 2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001531 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1532 ret = -EINVAL;
shermanwei1248c7e2020-06-15 10:20:27 +08001533 goto err_dec_ref_cnt;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001534 }
shermanwei1248c7e2020-06-15 10:20:27 +08001535 if (qclk->clk_access_cnt == 2)
1536 qclk->clk_access_cnt--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001537
shermanwei1248c7e2020-06-15 10:20:27 +08001538err_dec_ref_cnt:
1539///>>20200615 revert Qualcomm patch
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001540 mutex_unlock(&clk_access_lock);
1541 return ret;
1542}
1543
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001544static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1545{
1546 int32_t ret = 0;
1547 int32_t request_mode = INACTIVE;
1548
1549 mutex_lock(&qsee_bw_mutex);
1550 if (mode == 0) {
1551 if (qseecom.cumulative_mode > MEDIUM)
1552 request_mode = HIGH;
1553 else
1554 request_mode = qseecom.cumulative_mode;
1555 } else {
1556 request_mode = mode;
1557 }
1558
1559 ret = __qseecom_set_msm_bus_request(request_mode);
1560 if (ret) {
1561 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1562 ret, request_mode);
1563 goto err_scale_timer;
1564 }
1565
1566 if (qseecom.timer_running) {
1567 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1568 if (ret) {
1569 pr_err("Failed to decrease clk ref count.\n");
1570 goto err_scale_timer;
1571 }
1572 del_timer_sync(&(qseecom.bw_scale_down_timer));
1573 qseecom.timer_running = false;
1574 }
1575err_scale_timer:
1576 mutex_unlock(&qsee_bw_mutex);
1577 return ret;
1578}
1579
1580
1581static int qseecom_unregister_bus_bandwidth_needs(
1582 struct qseecom_dev_handle *data)
1583{
1584 int32_t ret = 0;
1585
1586 qseecom.cumulative_mode -= data->mode;
1587 data->mode = INACTIVE;
1588
1589 return ret;
1590}
1591
1592static int __qseecom_register_bus_bandwidth_needs(
1593 struct qseecom_dev_handle *data, uint32_t request_mode)
1594{
1595 int32_t ret = 0;
1596
1597 if (data->mode == INACTIVE) {
1598 qseecom.cumulative_mode += request_mode;
1599 data->mode = request_mode;
1600 } else {
1601 if (data->mode != request_mode) {
1602 qseecom.cumulative_mode -= data->mode;
1603 qseecom.cumulative_mode += request_mode;
1604 data->mode = request_mode;
1605 }
1606 }
1607 return ret;
1608}
1609
1610static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1611{
1612 int ret = 0;
1613
1614 ret = qsee_vote_for_clock(data, CLK_DFAB);
1615 if (ret) {
1616 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1617 goto perf_enable_exit;
1618 }
1619 ret = qsee_vote_for_clock(data, CLK_SFPB);
1620 if (ret) {
1621 qsee_disable_clock_vote(data, CLK_DFAB);
1622 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1623 goto perf_enable_exit;
1624 }
1625
1626perf_enable_exit:
1627 return ret;
1628}
1629
1630static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1631 void __user *argp)
1632{
1633 int32_t ret = 0;
1634 int32_t req_mode;
1635
1636 if (qseecom.no_clock_support)
1637 return 0;
1638
1639 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1640 if (ret) {
1641 pr_err("copy_from_user failed\n");
1642 return ret;
1643 }
1644 if (req_mode > HIGH) {
1645 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1646 return -EINVAL;
1647 }
1648
1649 /*
1650 * Register bus bandwidth needs if bus scaling feature is enabled;
1651 * otherwise, qseecom enable/disable clocks for the client directly.
1652 */
1653 if (qseecom.support_bus_scaling) {
1654 mutex_lock(&qsee_bw_mutex);
1655 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1656 mutex_unlock(&qsee_bw_mutex);
1657 } else {
1658 pr_debug("Bus scaling feature is NOT enabled\n");
1659 pr_debug("request bandwidth mode %d for the client\n",
1660 req_mode);
1661 if (req_mode != INACTIVE) {
1662 ret = qseecom_perf_enable(data);
1663 if (ret)
1664 pr_err("Failed to vote for clock with err %d\n",
1665 ret);
1666 } else {
1667 qsee_disable_clock_vote(data, CLK_DFAB);
1668 qsee_disable_clock_vote(data, CLK_SFPB);
1669 }
1670 }
1671 return ret;
1672}
1673
1674static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1675{
1676 if (qseecom.no_clock_support)
1677 return;
1678
1679 mutex_lock(&qsee_bw_mutex);
1680 qseecom.bw_scale_down_timer.expires = jiffies +
1681 msecs_to_jiffies(duration);
1682 mod_timer(&(qseecom.bw_scale_down_timer),
1683 qseecom.bw_scale_down_timer.expires);
1684 qseecom.timer_running = true;
1685 mutex_unlock(&qsee_bw_mutex);
1686}
1687
1688static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1689{
1690 if (!qseecom.support_bus_scaling)
1691 qsee_disable_clock_vote(data, CLK_SFPB);
1692 else
1693 __qseecom_add_bw_scale_down_timer(
1694 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1695}
1696
1697static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1698{
1699 int ret = 0;
1700
1701 if (qseecom.support_bus_scaling) {
1702 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1703 if (ret)
1704 pr_err("Failed to set bw MEDIUM.\n");
1705 } else {
1706 ret = qsee_vote_for_clock(data, CLK_SFPB);
1707 if (ret)
1708 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1709 }
1710 return ret;
1711}
1712
1713static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1714 void __user *argp)
1715{
1716 ion_phys_addr_t pa;
1717 int32_t ret;
1718 struct qseecom_set_sb_mem_param_req req;
1719 size_t len;
1720
1721 /* Copy the relevant information needed for loading the image */
1722 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1723 return -EFAULT;
1724
1725 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1726 (req.sb_len == 0)) {
1727 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1728 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1729 return -EFAULT;
1730 }
1731 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1732 req.sb_len))
1733 return -EFAULT;
1734
1735 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001736 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001737 req.ifd_data_fd);
1738 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1739 pr_err("Ion client could not retrieve the handle\n");
1740 return -ENOMEM;
1741 }
1742 /* Get the physical address of the ION BUF */
1743 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1744 if (ret) {
1745
1746 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1747 ret);
1748 return ret;
1749 }
1750
1751 if (len < req.sb_len) {
1752 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1753 req.sb_len, len);
1754 return -EINVAL;
1755 }
1756 /* Populate the structure for sending scm call to load image */
1757 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1758 data->client.ihandle);
1759 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1760 pr_err("ION memory mapping for client shared buf failed\n");
1761 return -ENOMEM;
1762 }
1763 data->client.sb_phys = (phys_addr_t)pa;
1764 data->client.sb_length = req.sb_len;
1765 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1766 return 0;
1767}
1768
Zhen Kong26e62742018-05-04 17:19:06 -07001769static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1770 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001771{
1772 int ret;
1773
1774 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001775 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776}
1777
1778static int __qseecom_reentrancy_listener_has_sent_rsp(
1779 struct qseecom_dev_handle *data,
1780 struct qseecom_registered_listener_list *ptr_svc)
1781{
1782 int ret;
1783
1784 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001785 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001786}
1787
1788static void __qseecom_clean_listener_sglistinfo(
1789 struct qseecom_registered_listener_list *ptr_svc)
1790{
1791 if (ptr_svc->sglist_cnt) {
1792 memset(ptr_svc->sglistinfo_ptr, 0,
1793 SGLISTINFO_TABLE_SIZE);
1794 ptr_svc->sglist_cnt = 0;
1795 }
1796}
1797
1798static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1799 struct qseecom_command_scm_resp *resp)
1800{
1801 int ret = 0;
1802 int rc = 0;
1803 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001804 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1805 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1806 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001807 struct qseecom_registered_listener_list *ptr_svc = NULL;
1808 sigset_t new_sigset;
1809 sigset_t old_sigset;
1810 uint32_t status;
1811 void *cmd_buf = NULL;
1812 size_t cmd_len;
1813 struct sglist_info *table = NULL;
1814
Zhen Kongbcdeda22018-11-16 13:50:51 -08001815 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001816 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1817 lstnr = resp->data;
1818 /*
1819 * Wake up blocking lsitener service with the lstnr id
1820 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001821 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001822 list_for_each_entry(ptr_svc,
1823 &qseecom.registered_listener_list_head, list) {
1824 if (ptr_svc->svc.listener_id == lstnr) {
1825 ptr_svc->listener_in_use = true;
1826 ptr_svc->rcv_req_flag = 1;
AnilKumar Chimata20f86662019-12-11 11:51:03 +05301827 rc = msm_ion_do_cache_op(qseecom.ion_clnt,
1828 ptr_svc->ihandle,
1829 ptr_svc->sb_virt,
1830 ptr_svc->sb_length,
1831 ION_IOC_INV_CACHES);
1832 if (rc) {
1833 pr_err("cache opp failed %d\n", rc);
1834 status = QSEOS_RESULT_FAILURE;
1835 goto err_resp;
1836 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001837 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1838 break;
1839 }
1840 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001841
1842 if (ptr_svc == NULL) {
1843 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001844 rc = -EINVAL;
1845 status = QSEOS_RESULT_FAILURE;
1846 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001847 }
1848
1849 if (!ptr_svc->ihandle) {
1850 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001851 rc = -EINVAL;
1852 status = QSEOS_RESULT_FAILURE;
1853 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001854 }
1855
1856 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001857 pr_err("Service %d does not exist\n",
1858 lstnr);
1859 rc = -ERESTARTSYS;
1860 ptr_svc = NULL;
1861 status = QSEOS_RESULT_FAILURE;
1862 goto err_resp;
1863 }
1864
1865 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001866 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001867 lstnr, ptr_svc->abort);
1868 rc = -ENODEV;
1869 status = QSEOS_RESULT_FAILURE;
1870 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001871 }
Zhen Kong25731112018-09-20 13:10:03 -07001872
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001873 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1874
1875 /* initialize the new signal mask with all signals*/
1876 sigfillset(&new_sigset);
1877 /* block all signals */
1878 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1879
Zhen Kongbcdeda22018-11-16 13:50:51 -08001880 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001881 do {
1882 /*
1883 * When reentrancy is not supported, check global
1884 * send_resp_flag; otherwise, check this listener's
1885 * send_resp_flag.
1886 */
1887 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301888 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001889 __qseecom_listener_has_sent_rsp(
1890 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001891 break;
1892 }
1893
1894 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301895 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001896 __qseecom_reentrancy_listener_has_sent_rsp(
1897 data, ptr_svc))) {
1898 break;
1899 }
1900 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001901 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001902 /* restore signal mask */
1903 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001904 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001905 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1906 data->client.app_id, lstnr, ret);
1907 rc = -ENODEV;
1908 status = QSEOS_RESULT_FAILURE;
1909 } else {
1910 status = QSEOS_RESULT_SUCCESS;
1911 }
Zhen Kong26e62742018-05-04 17:19:06 -07001912err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001913 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001914 if (ptr_svc) {
1915 ptr_svc->send_resp_flag = 0;
1916 table = ptr_svc->sglistinfo_ptr;
1917 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001918 if (qseecom.qsee_version < QSEE_VERSION_40) {
1919 send_data_rsp.listener_id = lstnr;
1920 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001921 if (table) {
1922 send_data_rsp.sglistinfo_ptr =
1923 (uint32_t)virt_to_phys(table);
1924 send_data_rsp.sglistinfo_len =
1925 SGLISTINFO_TABLE_SIZE;
1926 dmac_flush_range((void *)table,
1927 (void *)table + SGLISTINFO_TABLE_SIZE);
1928 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001929 cmd_buf = (void *)&send_data_rsp;
1930 cmd_len = sizeof(send_data_rsp);
1931 } else {
1932 send_data_rsp_64bit.listener_id = lstnr;
1933 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001934 if (table) {
1935 send_data_rsp_64bit.sglistinfo_ptr =
1936 virt_to_phys(table);
1937 send_data_rsp_64bit.sglistinfo_len =
1938 SGLISTINFO_TABLE_SIZE;
1939 dmac_flush_range((void *)table,
1940 (void *)table + SGLISTINFO_TABLE_SIZE);
1941 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001942 cmd_buf = (void *)&send_data_rsp_64bit;
1943 cmd_len = sizeof(send_data_rsp_64bit);
1944 }
Zhen Kong7d500032018-08-06 16:58:31 -07001945 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001946 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1947 else
1948 *(uint32_t *)cmd_buf =
1949 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001950 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001951 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1952 ptr_svc->ihandle,
1953 ptr_svc->sb_virt, ptr_svc->sb_length,
1954 ION_IOC_CLEAN_INV_CACHES);
1955 if (ret) {
1956 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001957 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001958 }
1959 }
1960
1961 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1962 ret = __qseecom_enable_clk(CLK_QSEE);
1963 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001964 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001965 }
1966
1967 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1968 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001969 if (ptr_svc) {
1970 ptr_svc->listener_in_use = false;
1971 __qseecom_clean_listener_sglistinfo(ptr_svc);
1972 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001973 if (ret) {
1974 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1975 ret, data->client.app_id);
1976 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1977 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001978 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001979 }
Zhen Kong26e62742018-05-04 17:19:06 -07001980 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1981 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001982 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1983 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1984 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1985 resp->result, data->client.app_id, lstnr);
1986 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001987 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001988 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001989exit:
1990 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001991 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1992 __qseecom_disable_clk(CLK_QSEE);
1993
1994 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001995 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001996 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001997 if (rc)
1998 return rc;
1999
2000 return ret;
2001}
2002
Zhen Konga91aaf02018-02-02 17:21:04 -08002003static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002004 struct qseecom_command_scm_resp *resp,
2005 struct qseecom_registered_app_list *ptr_app,
2006 struct qseecom_dev_handle *data)
2007{
2008 struct qseecom_registered_listener_list *list_ptr;
2009 int ret = 0;
2010 struct qseecom_continue_blocked_request_ireq ireq;
2011 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08002012 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08002013 sigset_t new_sigset;
2014 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08002015 unsigned long flags;
2016 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07002017 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002018
2019 if (!resp || !data) {
2020 pr_err("invalid resp or data pointer\n");
2021 ret = -EINVAL;
2022 goto exit;
2023 }
2024
2025 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07002026 if (!ptr_app) {
2027 if (data->client.from_smcinvoke) {
2028 pr_debug("This request is from smcinvoke\n");
2029 ptr_app = &dummy_app_entry;
2030 ptr_app->app_id = data->client.app_id;
2031 } else {
2032 spin_lock_irqsave(&qseecom.registered_app_list_lock,
2033 flags);
2034 list_for_each_entry(ptr_app,
2035 &qseecom.registered_app_list_head, list) {
2036 if ((ptr_app->app_id == data->client.app_id) &&
2037 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002038 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07002039 found_app = true;
2040 break;
2041 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002042 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002043 spin_unlock_irqrestore(
2044 &qseecom.registered_app_list_lock, flags);
2045 if (!found_app) {
2046 pr_err("app_id %d (%s) is not found\n",
2047 data->client.app_id,
2048 (char *)data->client.app_name);
2049 ret = -ENOENT;
2050 goto exit;
2051 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002052 }
2053 }
2054
Zhen Kongd8cc0052017-11-13 15:13:31 -08002055 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002056 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002057 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002058 list_ptr = __qseecom_find_svc(resp->data);
2059 if (!list_ptr) {
2060 pr_err("Invalid listener ID %d\n", resp->data);
2061 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002062 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002063 goto exit;
2064 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002065 ptr_app->blocked_on_listener_id = resp->data;
2066
2067 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2068 resp->data, list_ptr->listener_in_use,
2069 session_id, data->client.app_id);
2070
2071 /* sleep until listener is available */
2072 sigfillset(&new_sigset);
2073 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2074
2075 do {
2076 qseecom.app_block_ref_cnt++;
2077 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002078 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002079 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302080 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002081 list_ptr->listener_block_app_wq,
2082 !list_ptr->listener_in_use);
2083 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002084 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002085 ptr_app->app_blocked = false;
2086 qseecom.app_block_ref_cnt--;
2087 } while (list_ptr->listener_in_use);
2088
2089 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2090
2091 ptr_app->blocked_on_listener_id = 0;
2092 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2093 resp->data, session_id, data->client.app_id);
2094
2095 /* notify TZ that listener is available */
2096 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2097
2098 if (qseecom.smcinvoke_support)
2099 ireq.app_or_session_id = session_id;
2100 else
2101 ireq.app_or_session_id = data->client.app_id;
2102
2103 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2104 &ireq, sizeof(ireq),
2105 &continue_resp, sizeof(continue_resp));
2106 if (ret && qseecom.smcinvoke_support) {
2107 /* retry with legacy cmd */
2108 qseecom.smcinvoke_support = false;
2109 ireq.app_or_session_id = data->client.app_id;
2110 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2111 &ireq, sizeof(ireq),
2112 &continue_resp, sizeof(continue_resp));
2113 qseecom.smcinvoke_support = true;
2114 if (ret) {
2115 pr_err("unblock app %d or session %d fail\n",
2116 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002117 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002118 goto exit;
2119 }
2120 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002121 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002122 resp->result = continue_resp.result;
2123 resp->resp_type = continue_resp.resp_type;
2124 resp->data = continue_resp.data;
2125 pr_debug("unblock resp = %d\n", resp->result);
2126 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2127
2128 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2129 pr_err("Unexpected unblock resp %d\n", resp->result);
2130 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002131 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002132exit:
2133 return ret;
2134}
2135
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002136static int __qseecom_reentrancy_process_incomplete_cmd(
2137 struct qseecom_dev_handle *data,
2138 struct qseecom_command_scm_resp *resp)
2139{
2140 int ret = 0;
2141 int rc = 0;
2142 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002143 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2144 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2145 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002146 struct qseecom_registered_listener_list *ptr_svc = NULL;
2147 sigset_t new_sigset;
2148 sigset_t old_sigset;
2149 uint32_t status;
2150 void *cmd_buf = NULL;
2151 size_t cmd_len;
2152 struct sglist_info *table = NULL;
2153
Zhen Kong26e62742018-05-04 17:19:06 -07002154 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002155 lstnr = resp->data;
2156 /*
2157 * Wake up blocking lsitener service with the lstnr id
2158 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002159 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002160 list_for_each_entry(ptr_svc,
2161 &qseecom.registered_listener_list_head, list) {
2162 if (ptr_svc->svc.listener_id == lstnr) {
2163 ptr_svc->listener_in_use = true;
2164 ptr_svc->rcv_req_flag = 1;
AnilKumar Chimata20f86662019-12-11 11:51:03 +05302165 rc = msm_ion_do_cache_op(qseecom.ion_clnt,
2166 ptr_svc->ihandle,
2167 ptr_svc->sb_virt,
2168 ptr_svc->sb_length,
2169 ION_IOC_INV_CACHES);
2170 if (rc) {
2171 pr_err("cache opp failed %d\n", rc);
2172 status = QSEOS_RESULT_FAILURE;
2173 goto err_resp;
2174 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002175 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2176 break;
2177 }
2178 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002179
2180 if (ptr_svc == NULL) {
2181 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002182 rc = -EINVAL;
2183 status = QSEOS_RESULT_FAILURE;
2184 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002185 }
2186
2187 if (!ptr_svc->ihandle) {
2188 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002189 rc = -EINVAL;
2190 status = QSEOS_RESULT_FAILURE;
2191 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002192 }
2193
2194 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002195 pr_err("Service %d does not exist\n",
2196 lstnr);
2197 rc = -ERESTARTSYS;
2198 ptr_svc = NULL;
2199 status = QSEOS_RESULT_FAILURE;
2200 goto err_resp;
2201 }
2202
2203 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002204 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002205 lstnr, ptr_svc->abort);
2206 rc = -ENODEV;
2207 status = QSEOS_RESULT_FAILURE;
2208 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002209 }
Zhen Kong25731112018-09-20 13:10:03 -07002210
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002211 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2212
2213 /* initialize the new signal mask with all signals*/
2214 sigfillset(&new_sigset);
2215
2216 /* block all signals */
2217 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2218
2219 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002220 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002221 mutex_unlock(&app_access_lock);
2222 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302223 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002224 __qseecom_reentrancy_listener_has_sent_rsp(
2225 data, ptr_svc))) {
2226 break;
2227 }
2228 } while (1);
2229 /* lock mutex again after resp sent */
2230 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002231 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002232 ptr_svc->send_resp_flag = 0;
2233 qseecom.send_resp_flag = 0;
2234
2235 /* restore signal mask */
2236 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002237 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002238 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2239 data->client.app_id, lstnr, ret);
2240 rc = -ENODEV;
2241 status = QSEOS_RESULT_FAILURE;
2242 } else {
2243 status = QSEOS_RESULT_SUCCESS;
2244 }
Zhen Kong26e62742018-05-04 17:19:06 -07002245err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002246 if (ptr_svc)
2247 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002248 if (qseecom.qsee_version < QSEE_VERSION_40) {
2249 send_data_rsp.listener_id = lstnr;
2250 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002251 if (table) {
2252 send_data_rsp.sglistinfo_ptr =
2253 (uint32_t)virt_to_phys(table);
2254 send_data_rsp.sglistinfo_len =
2255 SGLISTINFO_TABLE_SIZE;
2256 dmac_flush_range((void *)table,
2257 (void *)table + SGLISTINFO_TABLE_SIZE);
2258 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002259 cmd_buf = (void *)&send_data_rsp;
2260 cmd_len = sizeof(send_data_rsp);
2261 } else {
2262 send_data_rsp_64bit.listener_id = lstnr;
2263 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002264 if (table) {
2265 send_data_rsp_64bit.sglistinfo_ptr =
2266 virt_to_phys(table);
2267 send_data_rsp_64bit.sglistinfo_len =
2268 SGLISTINFO_TABLE_SIZE;
2269 dmac_flush_range((void *)table,
2270 (void *)table + SGLISTINFO_TABLE_SIZE);
2271 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002272 cmd_buf = (void *)&send_data_rsp_64bit;
2273 cmd_len = sizeof(send_data_rsp_64bit);
2274 }
Zhen Kong7d500032018-08-06 16:58:31 -07002275 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002276 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2277 else
2278 *(uint32_t *)cmd_buf =
2279 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002280 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002281 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2282 ptr_svc->ihandle,
2283 ptr_svc->sb_virt, ptr_svc->sb_length,
2284 ION_IOC_CLEAN_INV_CACHES);
2285 if (ret) {
2286 pr_err("cache operation failed %d\n", ret);
2287 return ret;
2288 }
2289 }
2290 if (lstnr == RPMB_SERVICE) {
2291 ret = __qseecom_enable_clk(CLK_QSEE);
2292 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002293 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002294 }
2295
2296 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2297 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002298 if (ptr_svc) {
2299 ptr_svc->listener_in_use = false;
2300 __qseecom_clean_listener_sglistinfo(ptr_svc);
2301 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2302 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002303
2304 if (ret) {
2305 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2306 ret, data->client.app_id);
2307 goto exit;
2308 }
2309
2310 switch (resp->result) {
2311 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2312 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2313 lstnr, data->client.app_id, resp->data);
2314 if (lstnr == resp->data) {
2315 pr_err("lstnr %d should not be blocked!\n",
2316 lstnr);
2317 ret = -EINVAL;
2318 goto exit;
2319 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002320 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002321 ret = __qseecom_process_reentrancy_blocked_on_listener(
2322 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002323 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002324 if (ret) {
2325 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2326 data->client.app_id,
2327 data->client.app_name, resp->data);
2328 goto exit;
2329 }
2330 case QSEOS_RESULT_SUCCESS:
2331 case QSEOS_RESULT_INCOMPLETE:
2332 break;
2333 default:
2334 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2335 resp->result, data->client.app_id, lstnr);
2336 ret = -EINVAL;
2337 goto exit;
2338 }
2339exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002340 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002341 if (lstnr == RPMB_SERVICE)
2342 __qseecom_disable_clk(CLK_QSEE);
2343
2344 }
2345 if (rc)
2346 return rc;
2347
2348 return ret;
2349}
2350
2351/*
2352 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2353 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2354 * So, needs to first check if no app blocked before sending OS level scm call,
2355 * then wait until all apps are unblocked.
2356 */
2357static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2358{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002359 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2360 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2361 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2362 /* thread sleep until this app unblocked */
2363 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002364 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002365 wait_event_interruptible(qseecom.app_block_wq,
2366 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002367 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002368 }
2369 }
2370}
2371
2372/*
2373 * scm_call of send data will fail if this TA is blocked or there are more
2374 * than one TA requesting listener services; So, first check to see if need
2375 * to wait.
2376 */
2377static void __qseecom_reentrancy_check_if_this_app_blocked(
2378 struct qseecom_registered_app_list *ptr_app)
2379{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002380 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002381 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002382 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2383 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002384 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002385 wait_event_interruptible(qseecom.app_block_wq,
2386 (!ptr_app->app_blocked &&
2387 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002388 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002389 }
Zhen Kongdea10592018-07-30 17:50:10 -07002390 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002391 }
2392}
2393
2394static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2395 uint32_t *app_id)
2396{
2397 int32_t ret;
2398 struct qseecom_command_scm_resp resp;
2399 bool found_app = false;
2400 struct qseecom_registered_app_list *entry = NULL;
2401 unsigned long flags = 0;
2402
2403 if (!app_id) {
2404 pr_err("Null pointer to app_id\n");
2405 return -EINVAL;
2406 }
2407 *app_id = 0;
2408
2409 /* check if app exists and has been registered locally */
2410 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2411 list_for_each_entry(entry,
2412 &qseecom.registered_app_list_head, list) {
2413 if (!strcmp(entry->app_name, req.app_name)) {
2414 found_app = true;
2415 break;
2416 }
2417 }
2418 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2419 if (found_app) {
2420 pr_debug("Found app with id %d\n", entry->app_id);
2421 *app_id = entry->app_id;
2422 return 0;
2423 }
2424
2425 memset((void *)&resp, 0, sizeof(resp));
2426
2427 /* SCM_CALL to check if app_id for the mentioned app exists */
2428 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2429 sizeof(struct qseecom_check_app_ireq),
2430 &resp, sizeof(resp));
2431 if (ret) {
2432 pr_err("scm_call to check if app is already loaded failed\n");
2433 return -EINVAL;
2434 }
2435
2436 if (resp.result == QSEOS_RESULT_FAILURE)
2437 return 0;
2438
2439 switch (resp.resp_type) {
2440 /*qsee returned listener type response */
2441 case QSEOS_LISTENER_ID:
2442 pr_err("resp type is of listener type instead of app");
2443 return -EINVAL;
2444 case QSEOS_APP_ID:
2445 *app_id = resp.data;
2446 return 0;
2447 default:
2448 pr_err("invalid resp type (%d) from qsee",
2449 resp.resp_type);
2450 return -ENODEV;
2451 }
2452}
2453
2454static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2455{
2456 struct qseecom_registered_app_list *entry = NULL;
2457 unsigned long flags = 0;
2458 u32 app_id = 0;
2459 struct ion_handle *ihandle; /* Ion handle */
2460 struct qseecom_load_img_req load_img_req;
2461 int32_t ret = 0;
2462 ion_phys_addr_t pa = 0;
2463 size_t len;
2464 struct qseecom_command_scm_resp resp;
2465 struct qseecom_check_app_ireq req;
2466 struct qseecom_load_app_ireq load_req;
2467 struct qseecom_load_app_64bit_ireq load_req_64bit;
2468 void *cmd_buf = NULL;
2469 size_t cmd_len;
2470 bool first_time = false;
2471
2472 /* Copy the relevant information needed for loading the image */
2473 if (copy_from_user(&load_img_req,
2474 (void __user *)argp,
2475 sizeof(struct qseecom_load_img_req))) {
2476 pr_err("copy_from_user failed\n");
2477 return -EFAULT;
2478 }
2479
2480 /* Check and load cmnlib */
2481 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2482 if (!qseecom.commonlib_loaded &&
2483 load_img_req.app_arch == ELFCLASS32) {
2484 ret = qseecom_load_commonlib_image(data, "cmnlib");
2485 if (ret) {
2486 pr_err("failed to load cmnlib\n");
2487 return -EIO;
2488 }
2489 qseecom.commonlib_loaded = true;
2490 pr_debug("cmnlib is loaded\n");
2491 }
2492
2493 if (!qseecom.commonlib64_loaded &&
2494 load_img_req.app_arch == ELFCLASS64) {
2495 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2496 if (ret) {
2497 pr_err("failed to load cmnlib64\n");
2498 return -EIO;
2499 }
2500 qseecom.commonlib64_loaded = true;
2501 pr_debug("cmnlib64 is loaded\n");
2502 }
2503 }
2504
2505 if (qseecom.support_bus_scaling) {
2506 mutex_lock(&qsee_bw_mutex);
2507 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2508 mutex_unlock(&qsee_bw_mutex);
2509 if (ret)
2510 return ret;
2511 }
2512
2513 /* Vote for the SFPB clock */
2514 ret = __qseecom_enable_clk_scale_up(data);
2515 if (ret)
2516 goto enable_clk_err;
2517
2518 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2519 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2520 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2521
2522 ret = __qseecom_check_app_exists(req, &app_id);
2523 if (ret < 0)
2524 goto loadapp_err;
2525
2526 if (app_id) {
2527 pr_debug("App id %d (%s) already exists\n", app_id,
2528 (char *)(req.app_name));
2529 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2530 list_for_each_entry(entry,
2531 &qseecom.registered_app_list_head, list){
2532 if (entry->app_id == app_id) {
2533 entry->ref_cnt++;
2534 break;
2535 }
2536 }
2537 spin_unlock_irqrestore(
2538 &qseecom.registered_app_list_lock, flags);
2539 ret = 0;
2540 } else {
2541 first_time = true;
2542 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2543 (char *)(load_img_req.img_name));
2544 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002545 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002546 load_img_req.ifd_data_fd);
2547 if (IS_ERR_OR_NULL(ihandle)) {
2548 pr_err("Ion client could not retrieve the handle\n");
2549 ret = -ENOMEM;
2550 goto loadapp_err;
2551 }
2552
2553 /* Get the physical address of the ION BUF */
2554 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2555 if (ret) {
2556 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2557 ret);
2558 goto loadapp_err;
2559 }
2560 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2561 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2562 len, load_img_req.mdt_len,
2563 load_img_req.img_len);
2564 ret = -EINVAL;
2565 goto loadapp_err;
2566 }
2567 /* Populate the structure for sending scm call to load image */
2568 if (qseecom.qsee_version < QSEE_VERSION_40) {
2569 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2570 load_req.mdt_len = load_img_req.mdt_len;
2571 load_req.img_len = load_img_req.img_len;
2572 strlcpy(load_req.app_name, load_img_req.img_name,
2573 MAX_APP_NAME_SIZE);
2574 load_req.phy_addr = (uint32_t)pa;
2575 cmd_buf = (void *)&load_req;
2576 cmd_len = sizeof(struct qseecom_load_app_ireq);
2577 } else {
2578 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2579 load_req_64bit.mdt_len = load_img_req.mdt_len;
2580 load_req_64bit.img_len = load_img_req.img_len;
2581 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2582 MAX_APP_NAME_SIZE);
2583 load_req_64bit.phy_addr = (uint64_t)pa;
2584 cmd_buf = (void *)&load_req_64bit;
2585 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2586 }
2587
2588 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2589 ION_IOC_CLEAN_INV_CACHES);
2590 if (ret) {
2591 pr_err("cache operation failed %d\n", ret);
2592 goto loadapp_err;
2593 }
2594
2595 /* SCM_CALL to load the app and get the app_id back */
2596 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2597 cmd_len, &resp, sizeof(resp));
2598 if (ret) {
2599 pr_err("scm_call to load app failed\n");
2600 if (!IS_ERR_OR_NULL(ihandle))
2601 ion_free(qseecom.ion_clnt, ihandle);
2602 ret = -EINVAL;
2603 goto loadapp_err;
2604 }
2605
2606 if (resp.result == QSEOS_RESULT_FAILURE) {
2607 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2608 if (!IS_ERR_OR_NULL(ihandle))
2609 ion_free(qseecom.ion_clnt, ihandle);
2610 ret = -EFAULT;
2611 goto loadapp_err;
2612 }
2613
2614 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2615 ret = __qseecom_process_incomplete_cmd(data, &resp);
2616 if (ret) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002617 /* TZ has created app_id, need to unload it */
2618 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
2619 ret, resp.result, resp.data,
2620 load_img_req.img_name);
2621 __qseecom_unload_app(data, resp.data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002622 if (!IS_ERR_OR_NULL(ihandle))
2623 ion_free(qseecom.ion_clnt, ihandle);
2624 ret = -EFAULT;
2625 goto loadapp_err;
2626 }
2627 }
2628
2629 if (resp.result != QSEOS_RESULT_SUCCESS) {
2630 pr_err("scm_call failed resp.result unknown, %d\n",
2631 resp.result);
2632 if (!IS_ERR_OR_NULL(ihandle))
2633 ion_free(qseecom.ion_clnt, ihandle);
2634 ret = -EFAULT;
2635 goto loadapp_err;
2636 }
2637
2638 app_id = resp.data;
2639
2640 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2641 if (!entry) {
2642 ret = -ENOMEM;
2643 goto loadapp_err;
2644 }
2645 entry->app_id = app_id;
2646 entry->ref_cnt = 1;
2647 entry->app_arch = load_img_req.app_arch;
2648 /*
2649 * keymaster app may be first loaded as "keymaste" by qseecomd,
2650 * and then used as "keymaster" on some targets. To avoid app
2651 * name checking error, register "keymaster" into app_list and
2652 * thread private data.
2653 */
2654 if (!strcmp(load_img_req.img_name, "keymaste"))
2655 strlcpy(entry->app_name, "keymaster",
2656 MAX_APP_NAME_SIZE);
2657 else
2658 strlcpy(entry->app_name, load_img_req.img_name,
2659 MAX_APP_NAME_SIZE);
2660 entry->app_blocked = false;
2661 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002662 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002663
2664 /* Deallocate the handle */
2665 if (!IS_ERR_OR_NULL(ihandle))
2666 ion_free(qseecom.ion_clnt, ihandle);
2667
2668 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2669 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2670 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2671 flags);
2672
2673 pr_warn("App with id %u (%s) now loaded\n", app_id,
2674 (char *)(load_img_req.img_name));
2675 }
2676 data->client.app_id = app_id;
2677 data->client.app_arch = load_img_req.app_arch;
2678 if (!strcmp(load_img_req.img_name, "keymaste"))
2679 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2680 else
2681 strlcpy(data->client.app_name, load_img_req.img_name,
2682 MAX_APP_NAME_SIZE);
2683 load_img_req.app_id = app_id;
2684 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2685 pr_err("copy_to_user failed\n");
2686 ret = -EFAULT;
2687 if (first_time == true) {
2688 spin_lock_irqsave(
2689 &qseecom.registered_app_list_lock, flags);
2690 list_del(&entry->list);
2691 spin_unlock_irqrestore(
2692 &qseecom.registered_app_list_lock, flags);
2693 kzfree(entry);
2694 }
2695 }
2696
2697loadapp_err:
2698 __qseecom_disable_clk_scale_down(data);
2699enable_clk_err:
2700 if (qseecom.support_bus_scaling) {
2701 mutex_lock(&qsee_bw_mutex);
2702 qseecom_unregister_bus_bandwidth_needs(data);
2703 mutex_unlock(&qsee_bw_mutex);
2704 }
2705 return ret;
2706}
2707
2708static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2709{
2710 int ret = 1; /* Set unload app */
2711
2712 wake_up_all(&qseecom.send_resp_wq);
2713 if (qseecom.qsee_reentrancy_support)
2714 mutex_unlock(&app_access_lock);
2715 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302716 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002717 atomic_read(&data->ioctl_count) <= 1)) {
2718 pr_err("Interrupted from abort\n");
2719 ret = -ERESTARTSYS;
2720 break;
2721 }
2722 }
2723 if (qseecom.qsee_reentrancy_support)
2724 mutex_lock(&app_access_lock);
2725 return ret;
2726}
2727
2728static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2729{
2730 int ret = 0;
2731
2732 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2733 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2734 ion_free(qseecom.ion_clnt, data->client.ihandle);
jitendrathakarec7ff9e42019-09-12 19:46:48 +05302735 memset((void *)&data->client,
2736 0, sizeof(struct qseecom_client_handle));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002737 }
2738 return ret;
2739}
2740
Zhen Kong03b2eae2019-09-17 16:58:46 -07002741static int __qseecom_unload_app(struct qseecom_dev_handle *data,
2742 uint32_t app_id)
2743{
2744 struct qseecom_unload_app_ireq req;
2745 struct qseecom_command_scm_resp resp;
2746 int ret = 0;
2747
2748 /* Populate the structure for sending scm call to load image */
2749 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2750 req.app_id = app_id;
2751
2752 /* SCM_CALL to unload the app */
2753 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2754 sizeof(struct qseecom_unload_app_ireq),
2755 &resp, sizeof(resp));
2756 if (ret) {
2757 pr_err("scm_call to unload app (id = %d) failed\n", app_id);
2758 return -EFAULT;
2759 }
2760 switch (resp.result) {
2761 case QSEOS_RESULT_SUCCESS:
2762 pr_warn("App (%d) is unloaded\n", app_id);
2763 break;
2764 case QSEOS_RESULT_INCOMPLETE:
2765 ret = __qseecom_process_incomplete_cmd(data, &resp);
2766 if (ret)
2767 pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
2768 app_id, ret, resp.result, resp.data);
2769 else
2770 pr_warn("App (%d) is unloaded\n", app_id);
2771 break;
2772 case QSEOS_RESULT_FAILURE:
2773 pr_err("app (%d) unload_failed!!\n", app_id);
2774 ret = -EFAULT;
2775 break;
2776 default:
2777 pr_err("unload app %d get unknown resp.result %d\n",
2778 app_id, resp.result);
2779 ret = -EFAULT;
2780 break;
2781 }
2782 return ret;
2783}
2784
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002785static int qseecom_unload_app(struct qseecom_dev_handle *data,
2786 bool app_crash)
2787{
2788 unsigned long flags;
2789 unsigned long flags1;
2790 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002791 struct qseecom_registered_app_list *ptr_app = NULL;
2792 bool unload = false;
2793 bool found_app = false;
2794 bool found_dead_app = false;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002795 bool doublecheck = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002796
2797 if (!data) {
2798 pr_err("Invalid/uninitialized device handle\n");
2799 return -EINVAL;
2800 }
2801
2802 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2803 pr_debug("Do not unload keymaster app from tz\n");
2804 goto unload_exit;
2805 }
2806
2807 __qseecom_cleanup_app(data);
2808 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2809
2810 if (data->client.app_id > 0) {
2811 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2812 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2813 list) {
2814 if (ptr_app->app_id == data->client.app_id) {
2815 if (!strcmp((void *)ptr_app->app_name,
2816 (void *)data->client.app_name)) {
2817 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002818 if (ptr_app->app_blocked ||
2819 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002820 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002821 if (app_crash || ptr_app->ref_cnt == 1)
2822 unload = true;
2823 break;
2824 }
2825 found_dead_app = true;
2826 break;
2827 }
2828 }
2829 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2830 flags);
2831 if (found_app == false && found_dead_app == false) {
2832 pr_err("Cannot find app with id = %d (%s)\n",
2833 data->client.app_id,
2834 (char *)data->client.app_name);
2835 ret = -EINVAL;
2836 goto unload_exit;
2837 }
2838 }
2839
2840 if (found_dead_app)
2841 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2842 (char *)data->client.app_name);
2843
2844 if (unload) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002845 ret = __qseecom_unload_app(data, data->client.app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002846
Zhen Kongf818f152019-03-13 12:31:32 -07002847 /* double check if this app_entry still exists */
Zhen Kongf818f152019-03-13 12:31:32 -07002848 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2849 list_for_each_entry(ptr_app,
2850 &qseecom.registered_app_list_head, list) {
2851 if ((ptr_app->app_id == data->client.app_id) &&
2852 (!strcmp((void *)ptr_app->app_name,
2853 (void *)data->client.app_name))) {
2854 doublecheck = true;
2855 break;
2856 }
2857 }
2858 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2859 flags1);
2860 if (!doublecheck) {
2861 pr_warn("app %d(%s) entry is already removed\n",
2862 data->client.app_id,
2863 (char *)data->client.app_name);
2864 found_app = false;
2865 }
2866 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07002867
Zhen Kong7d500032018-08-06 16:58:31 -07002868unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002869 if (found_app) {
2870 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2871 if (app_crash) {
2872 ptr_app->ref_cnt = 0;
2873 pr_debug("app_crash: ref_count = 0\n");
2874 } else {
2875 if (ptr_app->ref_cnt == 1) {
2876 ptr_app->ref_cnt = 0;
2877 pr_debug("ref_count set to 0\n");
2878 } else {
2879 ptr_app->ref_cnt--;
2880 pr_debug("Can't unload app(%d) inuse\n",
2881 ptr_app->app_id);
2882 }
2883 }
2884 if (unload) {
2885 list_del(&ptr_app->list);
2886 kzfree(ptr_app);
2887 }
2888 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2889 flags1);
2890 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002891 qseecom_unmap_ion_allocated_memory(data);
2892 data->released = true;
2893 return ret;
2894}
2895
Zhen Kong03b2eae2019-09-17 16:58:46 -07002896
2897static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
2898{
2899 struct qseecom_unload_app_pending_list *entry = NULL;
2900
2901 pr_debug("prepare to unload app(%d)(%s), pending %d\n",
2902 data->client.app_id, data->client.app_name,
2903 data->client.unload_pending);
2904 if (data->client.unload_pending)
2905 return 0;
2906 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2907 if (!entry)
2908 return -ENOMEM;
2909 entry->data = data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002910 list_add_tail(&entry->list,
2911 &qseecom.unload_app_pending_list_head);
Zhen Kong03b2eae2019-09-17 16:58:46 -07002912 data->client.unload_pending = true;
2913 pr_debug("unload ta %d pending\n", data->client.app_id);
2914 return 0;
2915}
2916
2917static void __wakeup_unload_app_kthread(void)
2918{
2919 atomic_set(&qseecom.unload_app_kthread_state,
2920 UNLOAD_APP_KT_WAKEUP);
2921 wake_up_interruptible(&qseecom.unload_app_kthread_wq);
2922}
2923
2924static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
2925{
2926 struct qseecom_unload_app_pending_list *entry = NULL;
2927 bool found = false;
2928
2929 mutex_lock(&unload_app_pending_list_lock);
2930 list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
2931 list) {
2932 if ((entry->data->client.app_id == app_id) &&
2933 (!strcmp(entry->data->client.app_name, app_name))) {
2934 found = true;
2935 break;
2936 }
2937 }
2938 mutex_unlock(&unload_app_pending_list_lock);
2939 return found;
2940}
2941
2942static void __qseecom_processing_pending_unload_app(void)
2943{
2944 struct qseecom_unload_app_pending_list *entry = NULL;
2945 struct list_head *pos;
2946 int ret = 0;
2947
2948 mutex_lock(&unload_app_pending_list_lock);
2949 while (!list_empty(&qseecom.unload_app_pending_list_head)) {
2950 pos = qseecom.unload_app_pending_list_head.next;
2951 entry = list_entry(pos,
2952 struct qseecom_unload_app_pending_list, list);
2953 if (entry && entry->data) {
2954 pr_debug("process pending unload app %d (%s)\n",
2955 entry->data->client.app_id,
2956 entry->data->client.app_name);
2957 mutex_unlock(&unload_app_pending_list_lock);
2958 mutex_lock(&app_access_lock);
2959 ret = qseecom_unload_app(entry->data, true);
2960 if (ret)
2961 pr_err("unload app %d pending failed %d\n",
2962 entry->data->client.app_id, ret);
2963 mutex_unlock(&app_access_lock);
2964 mutex_lock(&unload_app_pending_list_lock);
2965 kzfree(entry->data);
2966 }
2967 list_del(pos);
2968 kzfree(entry);
2969 }
2970 mutex_unlock(&unload_app_pending_list_lock);
2971}
2972
2973static int __qseecom_unload_app_kthread_func(void *data)
2974{
2975 while (!kthread_should_stop()) {
2976 wait_event_interruptible(
2977 qseecom.unload_app_kthread_wq,
2978 atomic_read(&qseecom.unload_app_kthread_state)
2979 == UNLOAD_APP_KT_WAKEUP);
2980 pr_debug("kthread to unload app is called, state %d\n",
2981 atomic_read(&qseecom.unload_app_kthread_state));
2982 __qseecom_processing_pending_unload_app();
2983 atomic_set(&qseecom.unload_app_kthread_state,
2984 UNLOAD_APP_KT_SLEEP);
2985 }
2986 pr_warn("kthread to unload app stopped\n");
2987 return 0;
2988}
2989
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002990static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2991 unsigned long virt)
2992{
2993 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2994}
2995
2996static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2997 unsigned long virt)
2998{
2999 return (uintptr_t)data->client.sb_virt +
3000 (virt - data->client.user_virt_sb_base);
3001}
3002
3003int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
3004 struct qseecom_send_svc_cmd_req *req_ptr,
3005 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
3006{
3007 int ret = 0;
3008 void *req_buf = NULL;
3009
3010 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
3011 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
3012 req_ptr, send_svc_ireq_ptr);
3013 return -EINVAL;
3014 }
3015
3016 /* Clients need to ensure req_buf is at base offset of shared buffer */
3017 if ((uintptr_t)req_ptr->cmd_req_buf !=
3018 data_ptr->client.user_virt_sb_base) {
3019 pr_err("cmd buf not pointing to base offset of shared buffer\n");
3020 return -EINVAL;
3021 }
3022
3023 if (data_ptr->client.sb_length <
3024 sizeof(struct qseecom_rpmb_provision_key)) {
3025 pr_err("shared buffer is too small to hold key type\n");
3026 return -EINVAL;
3027 }
3028 req_buf = data_ptr->client.sb_virt;
3029
3030 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3031 send_svc_ireq_ptr->key_type =
3032 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
3033 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3034 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3035 data_ptr, (uintptr_t)req_ptr->resp_buf));
3036 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3037
3038 return ret;
3039}
3040
3041int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
3042 struct qseecom_send_svc_cmd_req *req_ptr,
3043 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
3044{
3045 int ret = 0;
3046 uint32_t reqd_len_sb_in = 0;
3047
3048 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
3049 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
3050 req_ptr, send_svc_ireq_ptr);
3051 return -EINVAL;
3052 }
3053
3054 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
3055 if (reqd_len_sb_in > data_ptr->client.sb_length) {
3056 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
3057 pr_err("Required: %u, Available: %zu\n",
3058 reqd_len_sb_in, data_ptr->client.sb_length);
3059 return -ENOMEM;
3060 }
3061
3062 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3063 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3064 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3065 data_ptr, (uintptr_t)req_ptr->resp_buf));
3066 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3067
3068 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3069 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
3070
3071
3072 return ret;
3073}
3074
3075static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
3076 struct qseecom_send_svc_cmd_req *req)
3077{
3078 if (!req || !req->resp_buf || !req->cmd_req_buf) {
3079 pr_err("req or cmd buffer or response buffer is null\n");
3080 return -EINVAL;
3081 }
3082
3083 if (!data || !data->client.ihandle) {
3084 pr_err("Client or client handle is not initialized\n");
3085 return -EINVAL;
3086 }
3087
3088 if (data->client.sb_virt == NULL) {
3089 pr_err("sb_virt null\n");
3090 return -EINVAL;
3091 }
3092
3093 if (data->client.user_virt_sb_base == 0) {
3094 pr_err("user_virt_sb_base is null\n");
3095 return -EINVAL;
3096 }
3097
3098 if (data->client.sb_length == 0) {
3099 pr_err("sb_length is 0\n");
3100 return -EINVAL;
3101 }
3102
3103 if (((uintptr_t)req->cmd_req_buf <
3104 data->client.user_virt_sb_base) ||
3105 ((uintptr_t)req->cmd_req_buf >=
3106 (data->client.user_virt_sb_base + data->client.sb_length))) {
3107 pr_err("cmd buffer address not within shared bufffer\n");
3108 return -EINVAL;
3109 }
3110 if (((uintptr_t)req->resp_buf <
3111 data->client.user_virt_sb_base) ||
3112 ((uintptr_t)req->resp_buf >=
3113 (data->client.user_virt_sb_base + data->client.sb_length))) {
3114 pr_err("response buffer address not within shared bufffer\n");
3115 return -EINVAL;
3116 }
3117 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
3118 (req->cmd_req_len > data->client.sb_length) ||
3119 (req->resp_len > data->client.sb_length)) {
3120 pr_err("cmd buf length or response buf length not valid\n");
3121 return -EINVAL;
3122 }
3123 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3124 pr_err("Integer overflow detected in req_len & rsp_len\n");
3125 return -EINVAL;
3126 }
3127
3128 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3129 pr_debug("Not enough memory to fit cmd_buf.\n");
3130 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3131 (req->cmd_req_len + req->resp_len),
3132 data->client.sb_length);
3133 return -ENOMEM;
3134 }
3135 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3136 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3137 return -EINVAL;
3138 }
3139 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3140 pr_err("Integer overflow in resp_len & resp_buf\n");
3141 return -EINVAL;
3142 }
3143 if (data->client.user_virt_sb_base >
3144 (ULONG_MAX - data->client.sb_length)) {
3145 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3146 return -EINVAL;
3147 }
3148 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3149 ((uintptr_t)data->client.user_virt_sb_base +
3150 data->client.sb_length)) ||
3151 (((uintptr_t)req->resp_buf + req->resp_len) >
3152 ((uintptr_t)data->client.user_virt_sb_base +
3153 data->client.sb_length))) {
3154 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3155 return -EINVAL;
3156 }
3157 return 0;
3158}
3159
3160static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3161 void __user *argp)
3162{
3163 int ret = 0;
3164 struct qseecom_client_send_service_ireq send_svc_ireq;
3165 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3166 struct qseecom_command_scm_resp resp;
3167 struct qseecom_send_svc_cmd_req req;
3168 void *send_req_ptr;
3169 size_t req_buf_size;
3170
3171 /*struct qseecom_command_scm_resp resp;*/
3172
3173 if (copy_from_user(&req,
3174 (void __user *)argp,
3175 sizeof(req))) {
3176 pr_err("copy_from_user failed\n");
3177 return -EFAULT;
3178 }
3179
3180 if (__validate_send_service_cmd_inputs(data, &req))
3181 return -EINVAL;
3182
3183 data->type = QSEECOM_SECURE_SERVICE;
3184
3185 switch (req.cmd_id) {
3186 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3187 case QSEOS_RPMB_ERASE_COMMAND:
3188 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3189 send_req_ptr = &send_svc_ireq;
3190 req_buf_size = sizeof(send_svc_ireq);
3191 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3192 send_req_ptr))
3193 return -EINVAL;
3194 break;
3195 case QSEOS_FSM_LTEOTA_REQ_CMD:
3196 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3197 case QSEOS_FSM_IKE_REQ_CMD:
3198 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3199 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3200 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3201 case QSEOS_FSM_ENCFS_REQ_CMD:
3202 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3203 send_req_ptr = &send_fsm_key_svc_ireq;
3204 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3205 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3206 send_req_ptr))
3207 return -EINVAL;
3208 break;
3209 default:
3210 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3211 return -EINVAL;
3212 }
3213
3214 if (qseecom.support_bus_scaling) {
3215 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3216 if (ret) {
3217 pr_err("Fail to set bw HIGH\n");
3218 return ret;
3219 }
3220 } else {
3221 ret = qseecom_perf_enable(data);
3222 if (ret) {
3223 pr_err("Failed to vote for clocks with err %d\n", ret);
3224 goto exit;
3225 }
3226 }
3227
3228 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3229 data->client.sb_virt, data->client.sb_length,
3230 ION_IOC_CLEAN_INV_CACHES);
3231 if (ret) {
3232 pr_err("cache operation failed %d\n", ret);
3233 goto exit;
3234 }
3235 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3236 (const void *)send_req_ptr,
3237 req_buf_size, &resp, sizeof(resp));
3238 if (ret) {
3239 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3240 if (!qseecom.support_bus_scaling) {
3241 qsee_disable_clock_vote(data, CLK_DFAB);
3242 qsee_disable_clock_vote(data, CLK_SFPB);
3243 } else {
3244 __qseecom_add_bw_scale_down_timer(
3245 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3246 }
3247 goto exit;
3248 }
3249 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3250 data->client.sb_virt, data->client.sb_length,
3251 ION_IOC_INV_CACHES);
3252 if (ret) {
3253 pr_err("cache operation failed %d\n", ret);
3254 goto exit;
3255 }
3256 switch (resp.result) {
3257 case QSEOS_RESULT_SUCCESS:
3258 break;
3259 case QSEOS_RESULT_INCOMPLETE:
3260 pr_debug("qseos_result_incomplete\n");
3261 ret = __qseecom_process_incomplete_cmd(data, &resp);
3262 if (ret) {
3263 pr_err("process_incomplete_cmd fail with result: %d\n",
3264 resp.result);
3265 }
3266 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3267 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303268 if (put_user(resp.result,
3269 (uint32_t __user *)req.resp_buf)) {
3270 ret = -EINVAL;
3271 goto exit;
3272 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003273 ret = 0;
3274 }
3275 break;
3276 case QSEOS_RESULT_FAILURE:
3277 pr_err("scm call failed with resp.result: %d\n", resp.result);
3278 ret = -EINVAL;
3279 break;
3280 default:
3281 pr_err("Response result %d not supported\n",
3282 resp.result);
3283 ret = -EINVAL;
3284 break;
3285 }
3286 if (!qseecom.support_bus_scaling) {
3287 qsee_disable_clock_vote(data, CLK_DFAB);
3288 qsee_disable_clock_vote(data, CLK_SFPB);
3289 } else {
3290 __qseecom_add_bw_scale_down_timer(
3291 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3292 }
3293
3294exit:
3295 return ret;
3296}
3297
3298static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3299 struct qseecom_send_cmd_req *req)
3300
3301{
3302 if (!data || !data->client.ihandle) {
3303 pr_err("Client or client handle is not initialized\n");
3304 return -EINVAL;
3305 }
3306 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3307 (req->cmd_req_buf == NULL)) {
3308 pr_err("cmd buffer or response buffer is null\n");
3309 return -EINVAL;
3310 }
3311 if (((uintptr_t)req->cmd_req_buf <
3312 data->client.user_virt_sb_base) ||
3313 ((uintptr_t)req->cmd_req_buf >=
3314 (data->client.user_virt_sb_base + data->client.sb_length))) {
3315 pr_err("cmd buffer address not within shared bufffer\n");
3316 return -EINVAL;
3317 }
3318 if (((uintptr_t)req->resp_buf <
3319 data->client.user_virt_sb_base) ||
3320 ((uintptr_t)req->resp_buf >=
3321 (data->client.user_virt_sb_base + data->client.sb_length))) {
3322 pr_err("response buffer address not within shared bufffer\n");
3323 return -EINVAL;
3324 }
3325 if ((req->cmd_req_len == 0) ||
3326 (req->cmd_req_len > data->client.sb_length) ||
3327 (req->resp_len > data->client.sb_length)) {
3328 pr_err("cmd buf length or response buf length not valid\n");
3329 return -EINVAL;
3330 }
3331 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3332 pr_err("Integer overflow detected in req_len & rsp_len\n");
3333 return -EINVAL;
3334 }
3335
3336 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3337 pr_debug("Not enough memory to fit cmd_buf.\n");
3338 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3339 (req->cmd_req_len + req->resp_len),
3340 data->client.sb_length);
3341 return -ENOMEM;
3342 }
3343 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3344 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3345 return -EINVAL;
3346 }
3347 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3348 pr_err("Integer overflow in resp_len & resp_buf\n");
3349 return -EINVAL;
3350 }
3351 if (data->client.user_virt_sb_base >
3352 (ULONG_MAX - data->client.sb_length)) {
3353 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3354 return -EINVAL;
3355 }
3356 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3357 ((uintptr_t)data->client.user_virt_sb_base +
3358 data->client.sb_length)) ||
3359 (((uintptr_t)req->resp_buf + req->resp_len) >
3360 ((uintptr_t)data->client.user_virt_sb_base +
3361 data->client.sb_length))) {
3362 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3363 return -EINVAL;
3364 }
3365 return 0;
3366}
3367
3368int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3369 struct qseecom_registered_app_list *ptr_app,
3370 struct qseecom_dev_handle *data)
3371{
3372 int ret = 0;
3373
3374 switch (resp->result) {
3375 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3376 pr_warn("App(%d) %s is blocked on listener %d\n",
3377 data->client.app_id, data->client.app_name,
3378 resp->data);
3379 ret = __qseecom_process_reentrancy_blocked_on_listener(
3380 resp, ptr_app, data);
3381 if (ret) {
3382 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3383 data->client.app_id, data->client.app_name, resp->data);
3384 return ret;
3385 }
3386
3387 case QSEOS_RESULT_INCOMPLETE:
3388 qseecom.app_block_ref_cnt++;
3389 ptr_app->app_blocked = true;
3390 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3391 ptr_app->app_blocked = false;
3392 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003393 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003394 if (ret)
3395 pr_err("process_incomplete_cmd failed err: %d\n",
3396 ret);
3397 return ret;
3398 case QSEOS_RESULT_SUCCESS:
3399 return ret;
3400 default:
3401 pr_err("Response result %d not supported\n",
3402 resp->result);
3403 return -EINVAL;
3404 }
3405}
3406
3407static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3408 struct qseecom_send_cmd_req *req)
3409{
3410 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003411 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003412 u32 reqd_len_sb_in = 0;
3413 struct qseecom_client_send_data_ireq send_data_req = {0};
3414 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3415 struct qseecom_command_scm_resp resp;
3416 unsigned long flags;
3417 struct qseecom_registered_app_list *ptr_app;
3418 bool found_app = false;
3419 void *cmd_buf = NULL;
3420 size_t cmd_len;
3421 struct sglist_info *table = data->sglistinfo_ptr;
3422
3423 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3424 /* find app_id & img_name from list */
3425 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3426 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3427 list) {
3428 if ((ptr_app->app_id == data->client.app_id) &&
3429 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3430 found_app = true;
3431 break;
3432 }
3433 }
3434 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3435
3436 if (!found_app) {
3437 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3438 (char *)data->client.app_name);
3439 return -ENOENT;
3440 }
3441
Zhen Kong03b2eae2019-09-17 16:58:46 -07003442 if (__qseecom_find_pending_unload_app(data->client.app_id,
3443 data->client.app_name)) {
3444 pr_err("app %d (%s) unload is pending\n",
3445 data->client.app_id, data->client.app_name);
3446 return -ENOENT;
3447 }
3448
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003449 if (qseecom.qsee_version < QSEE_VERSION_40) {
3450 send_data_req.app_id = data->client.app_id;
3451 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3452 data, (uintptr_t)req->cmd_req_buf));
3453 send_data_req.req_len = req->cmd_req_len;
3454 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3455 data, (uintptr_t)req->resp_buf));
3456 send_data_req.rsp_len = req->resp_len;
3457 send_data_req.sglistinfo_ptr =
3458 (uint32_t)virt_to_phys(table);
3459 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3460 dmac_flush_range((void *)table,
3461 (void *)table + SGLISTINFO_TABLE_SIZE);
3462 cmd_buf = (void *)&send_data_req;
3463 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3464 } else {
3465 send_data_req_64bit.app_id = data->client.app_id;
3466 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3467 (uintptr_t)req->cmd_req_buf);
3468 send_data_req_64bit.req_len = req->cmd_req_len;
3469 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3470 (uintptr_t)req->resp_buf);
3471 send_data_req_64bit.rsp_len = req->resp_len;
3472 /* check if 32bit app's phys_addr region is under 4GB.*/
3473 if ((data->client.app_arch == ELFCLASS32) &&
3474 ((send_data_req_64bit.req_ptr >=
3475 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3476 (send_data_req_64bit.rsp_ptr >=
3477 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3478 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3479 data->client.app_name,
3480 send_data_req_64bit.req_ptr,
3481 send_data_req_64bit.req_len,
3482 send_data_req_64bit.rsp_ptr,
3483 send_data_req_64bit.rsp_len);
3484 return -EFAULT;
3485 }
3486 send_data_req_64bit.sglistinfo_ptr =
3487 (uint64_t)virt_to_phys(table);
3488 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3489 dmac_flush_range((void *)table,
3490 (void *)table + SGLISTINFO_TABLE_SIZE);
3491 cmd_buf = (void *)&send_data_req_64bit;
3492 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3493 }
3494
3495 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3496 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3497 else
3498 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3499
3500 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3501 data->client.sb_virt,
3502 reqd_len_sb_in,
3503 ION_IOC_CLEAN_INV_CACHES);
3504 if (ret) {
3505 pr_err("cache operation failed %d\n", ret);
3506 return ret;
3507 }
3508
3509 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3510
3511 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3512 cmd_buf, cmd_len,
3513 &resp, sizeof(resp));
3514 if (ret) {
3515 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3516 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003517 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003518 }
3519
3520 if (qseecom.qsee_reentrancy_support) {
3521 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003522 if (ret)
3523 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003524 } else {
3525 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3526 ret = __qseecom_process_incomplete_cmd(data, &resp);
3527 if (ret) {
3528 pr_err("process_incomplete_cmd failed err: %d\n",
3529 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003530 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003531 }
3532 } else {
3533 if (resp.result != QSEOS_RESULT_SUCCESS) {
3534 pr_err("Response result %d not supported\n",
3535 resp.result);
3536 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003537 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003538 }
3539 }
3540 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003541exit:
3542 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003543 data->client.sb_virt, data->client.sb_length,
3544 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003545 if (ret2) {
3546 pr_err("cache operation failed %d\n", ret2);
3547 return ret2;
3548 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003549 return ret;
3550}
3551
3552static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3553{
3554 int ret = 0;
3555 struct qseecom_send_cmd_req req;
3556
3557 ret = copy_from_user(&req, argp, sizeof(req));
3558 if (ret) {
3559 pr_err("copy_from_user failed\n");
3560 return ret;
3561 }
3562
3563 if (__validate_send_cmd_inputs(data, &req))
3564 return -EINVAL;
3565
3566 ret = __qseecom_send_cmd(data, &req);
3567
3568 if (ret)
3569 return ret;
3570
3571 return ret;
3572}
3573
3574int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3575 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3576 struct qseecom_dev_handle *data, int i) {
3577
3578 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3579 (req->ifd_data[i].fd > 0)) {
3580 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3581 (req->ifd_data[i].cmd_buf_offset >
3582 req->cmd_req_len - sizeof(uint32_t))) {
3583 pr_err("Invalid offset (req len) 0x%x\n",
3584 req->ifd_data[i].cmd_buf_offset);
3585 return -EINVAL;
3586 }
3587 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3588 (lstnr_resp->ifd_data[i].fd > 0)) {
3589 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3590 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3591 lstnr_resp->resp_len - sizeof(uint32_t))) {
3592 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3593 lstnr_resp->ifd_data[i].cmd_buf_offset);
3594 return -EINVAL;
3595 }
3596 }
3597 return 0;
3598}
3599
Zhen Kongd097c6e02019-08-01 16:10:20 -07003600static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req,
3601 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3602 struct qseecom_dev_handle *data, int i)
3603{
3604
3605 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3606 (req->ifd_data[i].fd > 0)) {
3607 if ((req->cmd_req_len < sizeof(uint64_t)) ||
3608 (req->ifd_data[i].cmd_buf_offset >
3609 req->cmd_req_len - sizeof(uint64_t))) {
3610 pr_err("Invalid offset (req len) 0x%x\n",
3611 req->ifd_data[i].cmd_buf_offset);
3612 return -EINVAL;
3613 }
3614 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3615 (lstnr_resp->ifd_data[i].fd > 0)) {
3616 if ((lstnr_resp->resp_len < sizeof(uint64_t)) ||
3617 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3618 lstnr_resp->resp_len - sizeof(uint64_t))) {
3619 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3620 lstnr_resp->ifd_data[i].cmd_buf_offset);
3621 return -EINVAL;
3622 }
3623 }
3624 return 0;
3625}
3626
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003627static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3628 struct qseecom_dev_handle *data)
3629{
3630 struct ion_handle *ihandle;
3631 char *field;
3632 int ret = 0;
3633 int i = 0;
3634 uint32_t len = 0;
3635 struct scatterlist *sg;
3636 struct qseecom_send_modfd_cmd_req *req = NULL;
3637 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3638 struct qseecom_registered_listener_list *this_lstnr = NULL;
3639 uint32_t offset;
3640 struct sg_table *sg_ptr;
3641
3642 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3643 (data->type != QSEECOM_CLIENT_APP))
3644 return -EFAULT;
3645
3646 if (msg == NULL) {
3647 pr_err("Invalid address\n");
3648 return -EINVAL;
3649 }
3650 if (data->type == QSEECOM_LISTENER_SERVICE) {
3651 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3652 this_lstnr = __qseecom_find_svc(data->listener.id);
3653 if (IS_ERR_OR_NULL(this_lstnr)) {
3654 pr_err("Invalid listener ID\n");
3655 return -ENOMEM;
3656 }
3657 } else {
3658 req = (struct qseecom_send_modfd_cmd_req *)msg;
3659 }
3660
3661 for (i = 0; i < MAX_ION_FD; i++) {
3662 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3663 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003664 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003665 req->ifd_data[i].fd);
3666 if (IS_ERR_OR_NULL(ihandle)) {
3667 pr_err("Ion client can't retrieve the handle\n");
3668 return -ENOMEM;
3669 }
3670 field = (char *) req->cmd_req_buf +
3671 req->ifd_data[i].cmd_buf_offset;
3672 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3673 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003674 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003675 lstnr_resp->ifd_data[i].fd);
3676 if (IS_ERR_OR_NULL(ihandle)) {
3677 pr_err("Ion client can't retrieve the handle\n");
3678 return -ENOMEM;
3679 }
3680 field = lstnr_resp->resp_buf_ptr +
3681 lstnr_resp->ifd_data[i].cmd_buf_offset;
3682 } else {
3683 continue;
3684 }
3685 /* Populate the cmd data structure with the phys_addr */
3686 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3687 if (IS_ERR_OR_NULL(sg_ptr)) {
3688 pr_err("IOn client could not retrieve sg table\n");
3689 goto err;
3690 }
3691 if (sg_ptr->nents == 0) {
3692 pr_err("Num of scattered entries is 0\n");
3693 goto err;
3694 }
3695 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3696 pr_err("Num of scattered entries");
3697 pr_err(" (%d) is greater than max supported %d\n",
3698 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3699 goto err;
3700 }
3701 sg = sg_ptr->sgl;
3702 if (sg_ptr->nents == 1) {
3703 uint32_t *update;
3704
3705 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3706 goto err;
3707 if ((data->type == QSEECOM_CLIENT_APP &&
3708 (data->client.app_arch == ELFCLASS32 ||
3709 data->client.app_arch == ELFCLASS64)) ||
3710 (data->type == QSEECOM_LISTENER_SERVICE)) {
3711 /*
3712 * Check if sg list phy add region is under 4GB
3713 */
3714 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3715 (!cleanup) &&
3716 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3717 >= PHY_ADDR_4G - sg->length)) {
3718 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3719 data->client.app_name,
3720 &(sg_dma_address(sg_ptr->sgl)),
3721 sg->length);
3722 goto err;
3723 }
3724 update = (uint32_t *) field;
3725 *update = cleanup ? 0 :
3726 (uint32_t)sg_dma_address(sg_ptr->sgl);
3727 } else {
3728 pr_err("QSEE app arch %u is not supported\n",
3729 data->client.app_arch);
3730 goto err;
3731 }
3732 len += (uint32_t)sg->length;
3733 } else {
3734 struct qseecom_sg_entry *update;
3735 int j = 0;
3736
3737 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3738 (req->ifd_data[i].fd > 0)) {
3739
3740 if ((req->cmd_req_len <
3741 SG_ENTRY_SZ * sg_ptr->nents) ||
3742 (req->ifd_data[i].cmd_buf_offset >
3743 (req->cmd_req_len -
3744 SG_ENTRY_SZ * sg_ptr->nents))) {
3745 pr_err("Invalid offset = 0x%x\n",
3746 req->ifd_data[i].cmd_buf_offset);
3747 goto err;
3748 }
3749
3750 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3751 (lstnr_resp->ifd_data[i].fd > 0)) {
3752
3753 if ((lstnr_resp->resp_len <
3754 SG_ENTRY_SZ * sg_ptr->nents) ||
3755 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3756 (lstnr_resp->resp_len -
3757 SG_ENTRY_SZ * sg_ptr->nents))) {
3758 goto err;
3759 }
3760 }
3761 if ((data->type == QSEECOM_CLIENT_APP &&
3762 (data->client.app_arch == ELFCLASS32 ||
3763 data->client.app_arch == ELFCLASS64)) ||
3764 (data->type == QSEECOM_LISTENER_SERVICE)) {
3765 update = (struct qseecom_sg_entry *)field;
3766 for (j = 0; j < sg_ptr->nents; j++) {
3767 /*
3768 * Check if sg list PA is under 4GB
3769 */
3770 if ((qseecom.qsee_version >=
3771 QSEE_VERSION_40) &&
3772 (!cleanup) &&
3773 ((uint64_t)(sg_dma_address(sg))
3774 >= PHY_ADDR_4G - sg->length)) {
3775 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3776 data->client.app_name,
3777 &(sg_dma_address(sg)),
3778 sg->length);
3779 goto err;
3780 }
3781 update->phys_addr = cleanup ? 0 :
3782 (uint32_t)sg_dma_address(sg);
3783 update->len = cleanup ? 0 : sg->length;
3784 update++;
3785 len += sg->length;
3786 sg = sg_next(sg);
3787 }
3788 } else {
3789 pr_err("QSEE app arch %u is not supported\n",
3790 data->client.app_arch);
3791 goto err;
3792 }
3793 }
3794
3795 if (cleanup) {
3796 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3797 ihandle, NULL, len,
3798 ION_IOC_INV_CACHES);
3799 if (ret) {
3800 pr_err("cache operation failed %d\n", ret);
3801 goto err;
3802 }
3803 } else {
3804 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3805 ihandle, NULL, len,
3806 ION_IOC_CLEAN_INV_CACHES);
3807 if (ret) {
3808 pr_err("cache operation failed %d\n", ret);
3809 goto err;
3810 }
3811 if (data->type == QSEECOM_CLIENT_APP) {
3812 offset = req->ifd_data[i].cmd_buf_offset;
3813 data->sglistinfo_ptr[i].indexAndFlags =
3814 SGLISTINFO_SET_INDEX_FLAG(
3815 (sg_ptr->nents == 1), 0, offset);
3816 data->sglistinfo_ptr[i].sizeOrCount =
3817 (sg_ptr->nents == 1) ?
3818 sg->length : sg_ptr->nents;
3819 data->sglist_cnt = i + 1;
3820 } else {
3821 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3822 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3823 (uintptr_t)this_lstnr->sb_virt);
3824 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3825 SGLISTINFO_SET_INDEX_FLAG(
3826 (sg_ptr->nents == 1), 0, offset);
3827 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3828 (sg_ptr->nents == 1) ?
3829 sg->length : sg_ptr->nents;
3830 this_lstnr->sglist_cnt = i + 1;
3831 }
3832 }
3833 /* Deallocate the handle */
3834 if (!IS_ERR_OR_NULL(ihandle))
3835 ion_free(qseecom.ion_clnt, ihandle);
3836 }
3837 return ret;
3838err:
3839 if (!IS_ERR_OR_NULL(ihandle))
3840 ion_free(qseecom.ion_clnt, ihandle);
3841 return -ENOMEM;
3842}
3843
3844static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3845 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3846{
3847 struct scatterlist *sg = sg_ptr->sgl;
3848 struct qseecom_sg_entry_64bit *sg_entry;
3849 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3850 void *buf;
3851 uint i;
3852 size_t size;
3853 dma_addr_t coh_pmem;
3854
3855 if (fd_idx >= MAX_ION_FD) {
3856 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3857 return -ENOMEM;
3858 }
3859 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3860 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3861 /* Allocate a contiguous kernel buffer */
3862 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3863 size = (size + PAGE_SIZE) & PAGE_MASK;
3864 buf = dma_alloc_coherent(qseecom.pdev,
3865 size, &coh_pmem, GFP_KERNEL);
3866 if (buf == NULL) {
3867 pr_err("failed to alloc memory for sg buf\n");
3868 return -ENOMEM;
3869 }
3870 /* update qseecom_sg_list_buf_hdr_64bit */
3871 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3872 buf_hdr->new_buf_phys_addr = coh_pmem;
3873 buf_hdr->nents_total = sg_ptr->nents;
3874 /* save the left sg entries into new allocated buf */
3875 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3876 for (i = 0; i < sg_ptr->nents; i++) {
3877 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3878 sg_entry->len = sg->length;
3879 sg_entry++;
3880 sg = sg_next(sg);
3881 }
3882
3883 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3884 data->client.sec_buf_fd[fd_idx].vbase = buf;
3885 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3886 data->client.sec_buf_fd[fd_idx].size = size;
3887
3888 return 0;
3889}
3890
3891static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3892 struct qseecom_dev_handle *data)
3893{
3894 struct ion_handle *ihandle;
3895 char *field;
3896 int ret = 0;
3897 int i = 0;
3898 uint32_t len = 0;
3899 struct scatterlist *sg;
3900 struct qseecom_send_modfd_cmd_req *req = NULL;
3901 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3902 struct qseecom_registered_listener_list *this_lstnr = NULL;
3903 uint32_t offset;
3904 struct sg_table *sg_ptr;
3905
3906 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3907 (data->type != QSEECOM_CLIENT_APP))
3908 return -EFAULT;
3909
3910 if (msg == NULL) {
3911 pr_err("Invalid address\n");
3912 return -EINVAL;
3913 }
3914 if (data->type == QSEECOM_LISTENER_SERVICE) {
3915 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3916 this_lstnr = __qseecom_find_svc(data->listener.id);
3917 if (IS_ERR_OR_NULL(this_lstnr)) {
3918 pr_err("Invalid listener ID\n");
3919 return -ENOMEM;
3920 }
3921 } else {
3922 req = (struct qseecom_send_modfd_cmd_req *)msg;
3923 }
3924
3925 for (i = 0; i < MAX_ION_FD; i++) {
3926 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3927 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003928 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003929 req->ifd_data[i].fd);
3930 if (IS_ERR_OR_NULL(ihandle)) {
3931 pr_err("Ion client can't retrieve the handle\n");
3932 return -ENOMEM;
3933 }
3934 field = (char *) req->cmd_req_buf +
3935 req->ifd_data[i].cmd_buf_offset;
3936 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3937 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003938 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003939 lstnr_resp->ifd_data[i].fd);
3940 if (IS_ERR_OR_NULL(ihandle)) {
3941 pr_err("Ion client can't retrieve the handle\n");
3942 return -ENOMEM;
3943 }
3944 field = lstnr_resp->resp_buf_ptr +
3945 lstnr_resp->ifd_data[i].cmd_buf_offset;
3946 } else {
3947 continue;
3948 }
3949 /* Populate the cmd data structure with the phys_addr */
3950 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3951 if (IS_ERR_OR_NULL(sg_ptr)) {
3952 pr_err("IOn client could not retrieve sg table\n");
3953 goto err;
3954 }
3955 if (sg_ptr->nents == 0) {
3956 pr_err("Num of scattered entries is 0\n");
3957 goto err;
3958 }
3959 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3960 pr_warn("Num of scattered entries");
3961 pr_warn(" (%d) is greater than %d\n",
3962 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3963 if (cleanup) {
3964 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3965 data->client.sec_buf_fd[i].vbase)
3966 dma_free_coherent(qseecom.pdev,
3967 data->client.sec_buf_fd[i].size,
3968 data->client.sec_buf_fd[i].vbase,
3969 data->client.sec_buf_fd[i].pbase);
3970 } else {
3971 ret = __qseecom_allocate_sg_list_buffer(data,
3972 field, i, sg_ptr);
3973 if (ret) {
3974 pr_err("Failed to allocate sg list buffer\n");
3975 goto err;
3976 }
3977 }
3978 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3979 sg = sg_ptr->sgl;
3980 goto cleanup;
3981 }
3982 sg = sg_ptr->sgl;
3983 if (sg_ptr->nents == 1) {
3984 uint64_t *update_64bit;
3985
Zhen Kongd097c6e02019-08-01 16:10:20 -07003986 if (__boundary_checks_offset_64(req, lstnr_resp,
3987 data, i))
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003988 goto err;
3989 /* 64bit app uses 64bit address */
3990 update_64bit = (uint64_t *) field;
3991 *update_64bit = cleanup ? 0 :
3992 (uint64_t)sg_dma_address(sg_ptr->sgl);
3993 len += (uint32_t)sg->length;
3994 } else {
3995 struct qseecom_sg_entry_64bit *update_64bit;
3996 int j = 0;
3997
3998 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3999 (req->ifd_data[i].fd > 0)) {
4000
4001 if ((req->cmd_req_len <
4002 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
4003 (req->ifd_data[i].cmd_buf_offset >
4004 (req->cmd_req_len -
4005 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
4006 pr_err("Invalid offset = 0x%x\n",
4007 req->ifd_data[i].cmd_buf_offset);
4008 goto err;
4009 }
4010
4011 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
4012 (lstnr_resp->ifd_data[i].fd > 0)) {
4013
4014 if ((lstnr_resp->resp_len <
4015 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
4016 (lstnr_resp->ifd_data[i].cmd_buf_offset >
4017 (lstnr_resp->resp_len -
4018 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
4019 goto err;
4020 }
4021 }
4022 /* 64bit app uses 64bit address */
4023 update_64bit = (struct qseecom_sg_entry_64bit *)field;
4024 for (j = 0; j < sg_ptr->nents; j++) {
4025 update_64bit->phys_addr = cleanup ? 0 :
4026 (uint64_t)sg_dma_address(sg);
4027 update_64bit->len = cleanup ? 0 :
4028 (uint32_t)sg->length;
4029 update_64bit++;
4030 len += sg->length;
4031 sg = sg_next(sg);
4032 }
4033 }
4034cleanup:
4035 if (cleanup) {
4036 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4037 ihandle, NULL, len,
4038 ION_IOC_INV_CACHES);
4039 if (ret) {
4040 pr_err("cache operation failed %d\n", ret);
4041 goto err;
4042 }
4043 } else {
4044 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4045 ihandle, NULL, len,
4046 ION_IOC_CLEAN_INV_CACHES);
4047 if (ret) {
4048 pr_err("cache operation failed %d\n", ret);
4049 goto err;
4050 }
4051 if (data->type == QSEECOM_CLIENT_APP) {
4052 offset = req->ifd_data[i].cmd_buf_offset;
4053 data->sglistinfo_ptr[i].indexAndFlags =
4054 SGLISTINFO_SET_INDEX_FLAG(
4055 (sg_ptr->nents == 1), 1, offset);
4056 data->sglistinfo_ptr[i].sizeOrCount =
4057 (sg_ptr->nents == 1) ?
4058 sg->length : sg_ptr->nents;
4059 data->sglist_cnt = i + 1;
4060 } else {
4061 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
4062 + (uintptr_t)lstnr_resp->resp_buf_ptr -
4063 (uintptr_t)this_lstnr->sb_virt);
4064 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
4065 SGLISTINFO_SET_INDEX_FLAG(
4066 (sg_ptr->nents == 1), 1, offset);
4067 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
4068 (sg_ptr->nents == 1) ?
4069 sg->length : sg_ptr->nents;
4070 this_lstnr->sglist_cnt = i + 1;
4071 }
4072 }
4073 /* Deallocate the handle */
4074 if (!IS_ERR_OR_NULL(ihandle))
4075 ion_free(qseecom.ion_clnt, ihandle);
4076 }
4077 return ret;
4078err:
4079 for (i = 0; i < MAX_ION_FD; i++)
4080 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
4081 data->client.sec_buf_fd[i].vbase)
4082 dma_free_coherent(qseecom.pdev,
4083 data->client.sec_buf_fd[i].size,
4084 data->client.sec_buf_fd[i].vbase,
4085 data->client.sec_buf_fd[i].pbase);
4086 if (!IS_ERR_OR_NULL(ihandle))
4087 ion_free(qseecom.ion_clnt, ihandle);
4088 return -ENOMEM;
4089}
4090
4091static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4092 void __user *argp,
4093 bool is_64bit_addr)
4094{
4095 int ret = 0;
4096 int i;
4097 struct qseecom_send_modfd_cmd_req req;
4098 struct qseecom_send_cmd_req send_cmd_req;
4099
4100 ret = copy_from_user(&req, argp, sizeof(req));
4101 if (ret) {
4102 pr_err("copy_from_user failed\n");
4103 return ret;
4104 }
4105
4106 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
4107 send_cmd_req.cmd_req_len = req.cmd_req_len;
4108 send_cmd_req.resp_buf = req.resp_buf;
4109 send_cmd_req.resp_len = req.resp_len;
4110
4111 if (__validate_send_cmd_inputs(data, &send_cmd_req))
4112 return -EINVAL;
4113
4114 /* validate offsets */
4115 for (i = 0; i < MAX_ION_FD; i++) {
4116 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
4117 pr_err("Invalid offset %d = 0x%x\n",
4118 i, req.ifd_data[i].cmd_buf_offset);
4119 return -EINVAL;
4120 }
4121 }
4122 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4123 (uintptr_t)req.cmd_req_buf);
4124 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4125 (uintptr_t)req.resp_buf);
4126
4127 if (!is_64bit_addr) {
4128 ret = __qseecom_update_cmd_buf(&req, false, data);
4129 if (ret)
4130 return ret;
4131 ret = __qseecom_send_cmd(data, &send_cmd_req);
4132 if (ret)
4133 return ret;
4134 ret = __qseecom_update_cmd_buf(&req, true, data);
4135 if (ret)
4136 return ret;
4137 } else {
4138 ret = __qseecom_update_cmd_buf_64(&req, false, data);
4139 if (ret)
4140 return ret;
4141 ret = __qseecom_send_cmd(data, &send_cmd_req);
4142 if (ret)
4143 return ret;
4144 ret = __qseecom_update_cmd_buf_64(&req, true, data);
4145 if (ret)
4146 return ret;
4147 }
4148
4149 return ret;
4150}
4151
4152static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4153 void __user *argp)
4154{
4155 return __qseecom_send_modfd_cmd(data, argp, false);
4156}
4157
4158static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
4159 void __user *argp)
4160{
4161 return __qseecom_send_modfd_cmd(data, argp, true);
4162}
4163
4164
4165
4166static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
4167 struct qseecom_registered_listener_list *svc)
4168{
4169 int ret;
4170
Zhen Kongf5087172018-10-11 17:22:05 -07004171 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08004172 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004173}
4174
4175static int qseecom_receive_req(struct qseecom_dev_handle *data)
4176{
4177 int ret = 0;
4178 struct qseecom_registered_listener_list *this_lstnr;
4179
Zhen Kongbcdeda22018-11-16 13:50:51 -08004180 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004181 this_lstnr = __qseecom_find_svc(data->listener.id);
4182 if (!this_lstnr) {
4183 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004184 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004185 return -ENODATA;
4186 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004187 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004188
4189 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304190 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004191 __qseecom_listener_has_rcvd_req(data,
4192 this_lstnr))) {
Zhen Kong52ce9062018-09-24 14:33:27 -07004193 pr_debug("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004194 (uint32_t)data->listener.id);
4195 /* woken up for different reason */
4196 return -ERESTARTSYS;
4197 }
4198
Zhen Kongbcdeda22018-11-16 13:50:51 -08004199 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004200 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004201 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004202 return -ENODEV;
4203 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004204 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004205 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004206 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004207 break;
4208 }
4209 return ret;
4210}
4211
4212static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4213{
4214 unsigned char app_arch = 0;
4215 struct elf32_hdr *ehdr;
4216 struct elf64_hdr *ehdr64;
4217
4218 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4219
4220 switch (app_arch) {
4221 case ELFCLASS32: {
4222 ehdr = (struct elf32_hdr *)fw_entry->data;
4223 if (fw_entry->size < sizeof(*ehdr)) {
4224 pr_err("%s: Not big enough to be an elf32 header\n",
4225 qseecom.pdev->init_name);
4226 return false;
4227 }
4228 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4229 pr_err("%s: Not an elf32 header\n",
4230 qseecom.pdev->init_name);
4231 return false;
4232 }
4233 if (ehdr->e_phnum == 0) {
4234 pr_err("%s: No loadable segments\n",
4235 qseecom.pdev->init_name);
4236 return false;
4237 }
4238 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4239 sizeof(struct elf32_hdr) > fw_entry->size) {
4240 pr_err("%s: Program headers not within mdt\n",
4241 qseecom.pdev->init_name);
4242 return false;
4243 }
4244 break;
4245 }
4246 case ELFCLASS64: {
4247 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4248 if (fw_entry->size < sizeof(*ehdr64)) {
4249 pr_err("%s: Not big enough to be an elf64 header\n",
4250 qseecom.pdev->init_name);
4251 return false;
4252 }
4253 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4254 pr_err("%s: Not an elf64 header\n",
4255 qseecom.pdev->init_name);
4256 return false;
4257 }
4258 if (ehdr64->e_phnum == 0) {
4259 pr_err("%s: No loadable segments\n",
4260 qseecom.pdev->init_name);
4261 return false;
4262 }
4263 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4264 sizeof(struct elf64_hdr) > fw_entry->size) {
4265 pr_err("%s: Program headers not within mdt\n",
4266 qseecom.pdev->init_name);
4267 return false;
4268 }
4269 break;
4270 }
4271 default: {
4272 pr_err("QSEE app arch %u is not supported\n", app_arch);
4273 return false;
4274 }
4275 }
4276 return true;
4277}
4278
4279static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4280 uint32_t *app_arch)
4281{
4282 int ret = -1;
4283 int i = 0, rc = 0;
4284 const struct firmware *fw_entry = NULL;
4285 char fw_name[MAX_APP_NAME_SIZE];
4286 struct elf32_hdr *ehdr;
4287 struct elf64_hdr *ehdr64;
4288 int num_images = 0;
4289
4290 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4291 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4292 if (rc) {
4293 pr_err("error with request_firmware\n");
4294 ret = -EIO;
4295 goto err;
4296 }
4297 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4298 ret = -EIO;
4299 goto err;
4300 }
4301 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4302 *fw_size = fw_entry->size;
4303 if (*app_arch == ELFCLASS32) {
4304 ehdr = (struct elf32_hdr *)fw_entry->data;
4305 num_images = ehdr->e_phnum;
4306 } else if (*app_arch == ELFCLASS64) {
4307 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4308 num_images = ehdr64->e_phnum;
4309 } else {
4310 pr_err("QSEE %s app, arch %u is not supported\n",
4311 appname, *app_arch);
4312 ret = -EIO;
4313 goto err;
4314 }
4315 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4316 release_firmware(fw_entry);
4317 fw_entry = NULL;
4318 for (i = 0; i < num_images; i++) {
4319 memset(fw_name, 0, sizeof(fw_name));
4320 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4321 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4322 if (ret)
4323 goto err;
4324 if (*fw_size > U32_MAX - fw_entry->size) {
4325 pr_err("QSEE %s app file size overflow\n", appname);
4326 ret = -EINVAL;
4327 goto err;
4328 }
4329 *fw_size += fw_entry->size;
4330 release_firmware(fw_entry);
4331 fw_entry = NULL;
4332 }
4333
4334 return ret;
4335err:
4336 if (fw_entry)
4337 release_firmware(fw_entry);
4338 *fw_size = 0;
4339 return ret;
4340}
4341
4342static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4343 uint32_t fw_size,
4344 struct qseecom_load_app_ireq *load_req)
4345{
4346 int ret = -1;
4347 int i = 0, rc = 0;
4348 const struct firmware *fw_entry = NULL;
4349 char fw_name[MAX_APP_NAME_SIZE];
4350 u8 *img_data_ptr = img_data;
4351 struct elf32_hdr *ehdr;
4352 struct elf64_hdr *ehdr64;
4353 int num_images = 0;
4354 unsigned char app_arch = 0;
4355
4356 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4357 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4358 if (rc) {
4359 ret = -EIO;
4360 goto err;
4361 }
4362
4363 load_req->img_len = fw_entry->size;
4364 if (load_req->img_len > fw_size) {
4365 pr_err("app %s size %zu is larger than buf size %u\n",
4366 appname, fw_entry->size, fw_size);
4367 ret = -EINVAL;
4368 goto err;
4369 }
4370 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4371 img_data_ptr = img_data_ptr + fw_entry->size;
4372 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4373
4374 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4375 if (app_arch == ELFCLASS32) {
4376 ehdr = (struct elf32_hdr *)fw_entry->data;
4377 num_images = ehdr->e_phnum;
4378 } else if (app_arch == ELFCLASS64) {
4379 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4380 num_images = ehdr64->e_phnum;
4381 } else {
4382 pr_err("QSEE %s app, arch %u is not supported\n",
4383 appname, app_arch);
4384 ret = -EIO;
4385 goto err;
4386 }
4387 release_firmware(fw_entry);
4388 fw_entry = NULL;
4389 for (i = 0; i < num_images; i++) {
4390 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4391 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4392 if (ret) {
4393 pr_err("Failed to locate blob %s\n", fw_name);
4394 goto err;
4395 }
4396 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4397 (fw_entry->size + load_req->img_len > fw_size)) {
4398 pr_err("Invalid file size for %s\n", fw_name);
4399 ret = -EINVAL;
4400 goto err;
4401 }
4402 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4403 img_data_ptr = img_data_ptr + fw_entry->size;
4404 load_req->img_len += fw_entry->size;
4405 release_firmware(fw_entry);
4406 fw_entry = NULL;
4407 }
4408 return ret;
4409err:
4410 release_firmware(fw_entry);
4411 return ret;
4412}
4413
4414static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4415 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4416{
4417 size_t len = 0;
4418 int ret = 0;
4419 ion_phys_addr_t pa;
4420 struct ion_handle *ihandle = NULL;
4421 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004422 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004423 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004424
Zhen Kong3dd92792017-12-08 09:47:15 -08004425 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004426 if (retry++) {
4427 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004428 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004429 mutex_lock(&app_access_lock);
4430 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004431 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004432 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004433 } while (IS_ERR_OR_NULL(ihandle) &&
4434 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004435
4436 if (IS_ERR_OR_NULL(ihandle)) {
4437 pr_err("ION alloc failed\n");
4438 return -ENOMEM;
4439 }
4440 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4441 ihandle);
4442
4443 if (IS_ERR_OR_NULL(img_data)) {
4444 pr_err("ION memory mapping for image loading failed\n");
4445 ret = -ENOMEM;
4446 goto exit_ion_free;
4447 }
4448 /* Get the physical address of the ION BUF */
4449 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4450 if (ret) {
4451 pr_err("physical memory retrieval failure\n");
4452 ret = -EIO;
4453 goto exit_ion_unmap_kernel;
4454 }
4455
4456 *pihandle = ihandle;
4457 *data = img_data;
4458 *paddr = pa;
4459 return ret;
4460
4461exit_ion_unmap_kernel:
4462 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4463exit_ion_free:
4464 ion_free(qseecom.ion_clnt, ihandle);
4465 ihandle = NULL;
4466 return ret;
4467}
4468
4469static void __qseecom_free_img_data(struct ion_handle **ihandle)
4470{
4471 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4472 ion_free(qseecom.ion_clnt, *ihandle);
4473 *ihandle = NULL;
4474}
4475
4476static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4477 uint32_t *app_id)
4478{
4479 int ret = -1;
4480 uint32_t fw_size = 0;
4481 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4482 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4483 struct qseecom_command_scm_resp resp;
4484 u8 *img_data = NULL;
4485 ion_phys_addr_t pa = 0;
4486 struct ion_handle *ihandle = NULL;
4487 void *cmd_buf = NULL;
4488 size_t cmd_len;
4489 uint32_t app_arch = 0;
4490
4491 if (!data || !appname || !app_id) {
4492 pr_err("Null pointer to data or appname or appid\n");
4493 return -EINVAL;
4494 }
4495 *app_id = 0;
4496 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4497 return -EIO;
4498 data->client.app_arch = app_arch;
4499
4500 /* Check and load cmnlib */
4501 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4502 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4503 ret = qseecom_load_commonlib_image(data, "cmnlib");
4504 if (ret) {
4505 pr_err("failed to load cmnlib\n");
4506 return -EIO;
4507 }
4508 qseecom.commonlib_loaded = true;
4509 pr_debug("cmnlib is loaded\n");
4510 }
4511
4512 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4513 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4514 if (ret) {
4515 pr_err("failed to load cmnlib64\n");
4516 return -EIO;
4517 }
4518 qseecom.commonlib64_loaded = true;
4519 pr_debug("cmnlib64 is loaded\n");
4520 }
4521 }
4522
4523 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4524 if (ret)
4525 return ret;
4526
4527 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4528 if (ret) {
4529 ret = -EIO;
4530 goto exit_free_img_data;
4531 }
4532
4533 /* Populate the load_req parameters */
4534 if (qseecom.qsee_version < QSEE_VERSION_40) {
4535 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4536 load_req.mdt_len = load_req.mdt_len;
4537 load_req.img_len = load_req.img_len;
4538 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4539 load_req.phy_addr = (uint32_t)pa;
4540 cmd_buf = (void *)&load_req;
4541 cmd_len = sizeof(struct qseecom_load_app_ireq);
4542 } else {
4543 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4544 load_req_64bit.mdt_len = load_req.mdt_len;
4545 load_req_64bit.img_len = load_req.img_len;
4546 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4547 load_req_64bit.phy_addr = (uint64_t)pa;
4548 cmd_buf = (void *)&load_req_64bit;
4549 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4550 }
4551
4552 if (qseecom.support_bus_scaling) {
4553 mutex_lock(&qsee_bw_mutex);
4554 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4555 mutex_unlock(&qsee_bw_mutex);
4556 if (ret) {
4557 ret = -EIO;
4558 goto exit_free_img_data;
4559 }
4560 }
4561
4562 ret = __qseecom_enable_clk_scale_up(data);
4563 if (ret) {
4564 ret = -EIO;
4565 goto exit_unregister_bus_bw_need;
4566 }
4567
4568 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4569 img_data, fw_size,
4570 ION_IOC_CLEAN_INV_CACHES);
4571 if (ret) {
4572 pr_err("cache operation failed %d\n", ret);
4573 goto exit_disable_clk_vote;
4574 }
4575
4576 /* SCM_CALL to load the image */
4577 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4578 &resp, sizeof(resp));
4579 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004580 pr_err("scm_call to load failed : ret %d, result %x\n",
4581 ret, resp.result);
4582 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4583 ret = -EEXIST;
4584 else
4585 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004586 goto exit_disable_clk_vote;
4587 }
4588
4589 switch (resp.result) {
4590 case QSEOS_RESULT_SUCCESS:
4591 *app_id = resp.data;
4592 break;
4593 case QSEOS_RESULT_INCOMPLETE:
4594 ret = __qseecom_process_incomplete_cmd(data, &resp);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004595 if (ret) {
4596 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
4597 ret, resp.result, resp.data, appname);
4598 __qseecom_unload_app(data, resp.data);
4599 ret = -EFAULT;
4600 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004601 *app_id = resp.data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07004602 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004603 break;
4604 case QSEOS_RESULT_FAILURE:
4605 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4606 break;
4607 default:
4608 pr_err("scm call return unknown response %d\n", resp.result);
4609 ret = -EINVAL;
4610 break;
4611 }
4612
4613exit_disable_clk_vote:
4614 __qseecom_disable_clk_scale_down(data);
4615
4616exit_unregister_bus_bw_need:
4617 if (qseecom.support_bus_scaling) {
4618 mutex_lock(&qsee_bw_mutex);
4619 qseecom_unregister_bus_bandwidth_needs(data);
4620 mutex_unlock(&qsee_bw_mutex);
4621 }
4622
4623exit_free_img_data:
4624 __qseecom_free_img_data(&ihandle);
4625 return ret;
4626}
4627
4628static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4629 char *cmnlib_name)
4630{
4631 int ret = 0;
4632 uint32_t fw_size = 0;
4633 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4634 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4635 struct qseecom_command_scm_resp resp;
4636 u8 *img_data = NULL;
4637 ion_phys_addr_t pa = 0;
4638 void *cmd_buf = NULL;
4639 size_t cmd_len;
4640 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004641 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004642
4643 if (!cmnlib_name) {
4644 pr_err("cmnlib_name is NULL\n");
4645 return -EINVAL;
4646 }
4647 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4648 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4649 cmnlib_name, strlen(cmnlib_name));
4650 return -EINVAL;
4651 }
4652
4653 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4654 return -EIO;
4655
Zhen Kong3bafb312017-10-18 10:27:20 -07004656 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004657 &img_data, fw_size, &pa);
4658 if (ret)
4659 return -EIO;
4660
4661 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4662 if (ret) {
4663 ret = -EIO;
4664 goto exit_free_img_data;
4665 }
4666 if (qseecom.qsee_version < QSEE_VERSION_40) {
4667 load_req.phy_addr = (uint32_t)pa;
4668 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4669 cmd_buf = (void *)&load_req;
4670 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4671 } else {
4672 load_req_64bit.phy_addr = (uint64_t)pa;
4673 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4674 load_req_64bit.img_len = load_req.img_len;
4675 load_req_64bit.mdt_len = load_req.mdt_len;
4676 cmd_buf = (void *)&load_req_64bit;
4677 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4678 }
4679
4680 if (qseecom.support_bus_scaling) {
4681 mutex_lock(&qsee_bw_mutex);
4682 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4683 mutex_unlock(&qsee_bw_mutex);
4684 if (ret) {
4685 ret = -EIO;
4686 goto exit_free_img_data;
4687 }
4688 }
4689
4690 /* Vote for the SFPB clock */
4691 ret = __qseecom_enable_clk_scale_up(data);
4692 if (ret) {
4693 ret = -EIO;
4694 goto exit_unregister_bus_bw_need;
4695 }
4696
Zhen Kong3bafb312017-10-18 10:27:20 -07004697 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004698 img_data, fw_size,
4699 ION_IOC_CLEAN_INV_CACHES);
4700 if (ret) {
4701 pr_err("cache operation failed %d\n", ret);
4702 goto exit_disable_clk_vote;
4703 }
4704
4705 /* SCM_CALL to load the image */
4706 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4707 &resp, sizeof(resp));
4708 if (ret) {
4709 pr_err("scm_call to load failed : ret %d\n", ret);
4710 ret = -EIO;
4711 goto exit_disable_clk_vote;
4712 }
4713
4714 switch (resp.result) {
4715 case QSEOS_RESULT_SUCCESS:
4716 break;
4717 case QSEOS_RESULT_FAILURE:
4718 pr_err("scm call failed w/response result%d\n", resp.result);
4719 ret = -EINVAL;
4720 goto exit_disable_clk_vote;
4721 case QSEOS_RESULT_INCOMPLETE:
4722 ret = __qseecom_process_incomplete_cmd(data, &resp);
4723 if (ret) {
4724 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4725 goto exit_disable_clk_vote;
4726 }
4727 break;
4728 default:
4729 pr_err("scm call return unknown response %d\n", resp.result);
4730 ret = -EINVAL;
4731 goto exit_disable_clk_vote;
4732 }
4733
4734exit_disable_clk_vote:
4735 __qseecom_disable_clk_scale_down(data);
4736
4737exit_unregister_bus_bw_need:
4738 if (qseecom.support_bus_scaling) {
4739 mutex_lock(&qsee_bw_mutex);
4740 qseecom_unregister_bus_bandwidth_needs(data);
4741 mutex_unlock(&qsee_bw_mutex);
4742 }
4743
4744exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004745 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004746 return ret;
4747}
4748
4749static int qseecom_unload_commonlib_image(void)
4750{
4751 int ret = -EINVAL;
4752 struct qseecom_unload_lib_image_ireq unload_req = {0};
4753 struct qseecom_command_scm_resp resp;
4754
4755 /* Populate the remaining parameters */
4756 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4757
4758 /* SCM_CALL to load the image */
4759 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4760 sizeof(struct qseecom_unload_lib_image_ireq),
4761 &resp, sizeof(resp));
4762 if (ret) {
4763 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4764 ret = -EIO;
4765 } else {
4766 switch (resp.result) {
4767 case QSEOS_RESULT_SUCCESS:
4768 break;
4769 case QSEOS_RESULT_FAILURE:
4770 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4771 break;
4772 default:
4773 pr_err("scm call return unknown response %d\n",
4774 resp.result);
4775 ret = -EINVAL;
4776 break;
4777 }
4778 }
4779
4780 return ret;
4781}
4782
4783int qseecom_start_app(struct qseecom_handle **handle,
4784 char *app_name, uint32_t size)
4785{
4786 int32_t ret = 0;
4787 unsigned long flags = 0;
4788 struct qseecom_dev_handle *data = NULL;
4789 struct qseecom_check_app_ireq app_ireq;
4790 struct qseecom_registered_app_list *entry = NULL;
4791 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4792 bool found_app = false;
4793 size_t len;
4794 ion_phys_addr_t pa;
4795 uint32_t fw_size, app_arch;
4796 uint32_t app_id = 0;
4797
Zhen Kongc4c162a2019-01-23 12:07:12 -08004798 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004799 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004800
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004801 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4802 pr_err("Not allowed to be called in %d state\n",
4803 atomic_read(&qseecom.qseecom_state));
4804 return -EPERM;
4805 }
4806 if (!app_name) {
4807 pr_err("failed to get the app name\n");
4808 return -EINVAL;
4809 }
4810
Zhen Kong64a6d7282017-06-16 11:55:07 -07004811 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004812 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004813 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004814 return -EINVAL;
4815 }
4816
4817 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4818 if (!(*handle))
4819 return -ENOMEM;
4820
4821 data = kzalloc(sizeof(*data), GFP_KERNEL);
4822 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304823 ret = -ENOMEM;
4824 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004825 }
4826 data->abort = 0;
4827 data->type = QSEECOM_CLIENT_APP;
4828 data->released = false;
4829 data->client.sb_length = size;
4830 data->client.user_virt_sb_base = 0;
4831 data->client.ihandle = NULL;
4832
4833 init_waitqueue_head(&data->abort_wq);
4834
4835 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4836 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4837 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4838 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304839 ret = -ENOMEM;
4840 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004841 }
4842 mutex_lock(&app_access_lock);
4843
Zhen Kong5d02be92018-05-29 16:17:29 -07004844recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004845 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4846 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4847 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4848 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304849 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004850
4851 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4852 if (app_id) {
4853 pr_warn("App id %d for [%s] app exists\n", app_id,
4854 (char *)app_ireq.app_name);
4855 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4856 list_for_each_entry(entry,
4857 &qseecom.registered_app_list_head, list){
4858 if (entry->app_id == app_id) {
4859 entry->ref_cnt++;
4860 found_app = true;
4861 break;
4862 }
4863 }
4864 spin_unlock_irqrestore(
4865 &qseecom.registered_app_list_lock, flags);
4866 if (!found_app)
4867 pr_warn("App_id %d [%s] was loaded but not registered\n",
4868 ret, (char *)app_ireq.app_name);
4869 } else {
4870 /* load the app and get the app_id */
4871 pr_debug("%s: Loading app for the first time'\n",
4872 qseecom.pdev->init_name);
4873 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004874 if (ret == -EEXIST) {
4875 pr_err("recheck if TA %s is loaded\n", app_name);
4876 goto recheck;
4877 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304878 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004879 }
4880 data->client.app_id = app_id;
4881 if (!found_app) {
4882 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4883 if (!entry) {
4884 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304885 ret = -ENOMEM;
4886 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004887 }
4888 entry->app_id = app_id;
4889 entry->ref_cnt = 1;
4890 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4891 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4892 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304893 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004894 }
4895 entry->app_arch = app_arch;
4896 entry->app_blocked = false;
4897 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004898 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004899 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4900 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4901 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4902 flags);
4903 }
4904
4905 /* Get the physical address of the ION BUF */
4906 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4907 if (ret) {
4908 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4909 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304910 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004911 }
4912
4913 /* Populate the structure for sending scm call to load image */
4914 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4915 data->client.ihandle);
4916 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4917 pr_err("ION memory mapping for client shared buf failed\n");
4918 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304919 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004920 }
4921 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4922 data->client.sb_phys = (phys_addr_t)pa;
4923 (*handle)->dev = (void *)data;
4924 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4925 (*handle)->sbuf_len = data->client.sb_length;
4926
4927 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4928 if (!kclient_entry) {
4929 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304930 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004931 }
4932 kclient_entry->handle = *handle;
4933
4934 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4935 list_add_tail(&kclient_entry->list,
4936 &qseecom.registered_kclient_list_head);
4937 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4938
4939 mutex_unlock(&app_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004940 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004941 return 0;
4942
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304943exit_ion_unmap_kernel:
4944 if (!IS_ERR_OR_NULL(data->client.ihandle))
4945 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4946exit_entry_free:
4947 kfree(entry);
4948exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004949 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304950 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4951 ion_free(qseecom.ion_clnt, data->client.ihandle);
4952 data->client.ihandle = NULL;
4953 }
4954exit_data_free:
4955 kfree(data);
4956exit_handle_free:
4957 if (*handle) {
4958 kfree(*handle);
4959 *handle = NULL;
4960 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07004961 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004962 return ret;
4963}
4964EXPORT_SYMBOL(qseecom_start_app);
4965
4966int qseecom_shutdown_app(struct qseecom_handle **handle)
4967{
4968 int ret = -EINVAL;
4969 struct qseecom_dev_handle *data;
4970
4971 struct qseecom_registered_kclient_list *kclient = NULL;
4972 unsigned long flags = 0;
4973 bool found_handle = false;
4974
Zhen Kongc4c162a2019-01-23 12:07:12 -08004975 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004976 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004977
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004978 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4979 pr_err("Not allowed to be called in %d state\n",
4980 atomic_read(&qseecom.qseecom_state));
4981 return -EPERM;
4982 }
4983
4984 if ((handle == NULL) || (*handle == NULL)) {
4985 pr_err("Handle is not initialized\n");
4986 return -EINVAL;
4987 }
4988 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4989 mutex_lock(&app_access_lock);
4990
4991 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4992 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4993 list) {
4994 if (kclient->handle == (*handle)) {
4995 list_del(&kclient->list);
4996 found_handle = true;
4997 break;
4998 }
4999 }
5000 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
5001 if (!found_handle)
5002 pr_err("Unable to find the handle, exiting\n");
5003 else
5004 ret = qseecom_unload_app(data, false);
5005
5006 mutex_unlock(&app_access_lock);
5007 if (ret == 0) {
5008 kzfree(data);
5009 kzfree(*handle);
5010 kzfree(kclient);
5011 *handle = NULL;
5012 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07005013 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005014 return ret;
5015}
5016EXPORT_SYMBOL(qseecom_shutdown_app);
5017
5018int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
5019 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
5020{
5021 int ret = 0;
5022 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
5023 struct qseecom_dev_handle *data;
5024 bool perf_enabled = false;
5025
Zhen Kongc4c162a2019-01-23 12:07:12 -08005026 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07005027 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08005028
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005029 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
5030 pr_err("Not allowed to be called in %d state\n",
5031 atomic_read(&qseecom.qseecom_state));
5032 return -EPERM;
5033 }
5034
5035 if (handle == NULL) {
5036 pr_err("Handle is not initialized\n");
5037 return -EINVAL;
5038 }
5039 data = handle->dev;
5040
5041 req.cmd_req_len = sbuf_len;
5042 req.resp_len = rbuf_len;
5043 req.cmd_req_buf = send_buf;
5044 req.resp_buf = resp_buf;
5045
5046 if (__validate_send_cmd_inputs(data, &req))
5047 return -EINVAL;
5048
5049 mutex_lock(&app_access_lock);
5050 if (qseecom.support_bus_scaling) {
5051 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
5052 if (ret) {
5053 pr_err("Failed to set bw.\n");
5054 mutex_unlock(&app_access_lock);
5055 return ret;
5056 }
5057 }
5058 /*
5059 * On targets where crypto clock is handled by HLOS,
5060 * if clk_access_cnt is zero and perf_enabled is false,
5061 * then the crypto clock was not enabled before sending cmd
5062 * to tz, qseecom will enable the clock to avoid service failure.
5063 */
5064 if (!qseecom.no_clock_support &&
5065 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
5066 pr_debug("ce clock is not enabled!\n");
5067 ret = qseecom_perf_enable(data);
5068 if (ret) {
5069 pr_err("Failed to vote for clock with err %d\n",
5070 ret);
5071 mutex_unlock(&app_access_lock);
5072 return -EINVAL;
5073 }
5074 perf_enabled = true;
5075 }
Mohamed Sunfeer4887bbc2020-01-16 13:26:38 +05305076 if (!strcmp(data->client.app_name, "securemm") ||
5077 !strcmp(data->client.app_name, "bgapp")) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005078 data->use_legacy_cmd = true;
Mohamed Sunfeer4887bbc2020-01-16 13:26:38 +05305079 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005080
5081 ret = __qseecom_send_cmd(data, &req);
5082 data->use_legacy_cmd = false;
5083 if (qseecom.support_bus_scaling)
5084 __qseecom_add_bw_scale_down_timer(
5085 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
5086
5087 if (perf_enabled) {
5088 qsee_disable_clock_vote(data, CLK_DFAB);
5089 qsee_disable_clock_vote(data, CLK_SFPB);
5090 }
5091
5092 mutex_unlock(&app_access_lock);
5093
5094 if (ret)
5095 return ret;
5096
5097 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
5098 req.resp_len, req.resp_buf);
5099 return ret;
5100}
5101EXPORT_SYMBOL(qseecom_send_command);
5102
5103int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
5104{
5105 int ret = 0;
5106
5107 if ((handle == NULL) || (handle->dev == NULL)) {
5108 pr_err("No valid kernel client\n");
5109 return -EINVAL;
5110 }
5111 if (high) {
5112 if (qseecom.support_bus_scaling) {
5113 mutex_lock(&qsee_bw_mutex);
5114 __qseecom_register_bus_bandwidth_needs(handle->dev,
5115 HIGH);
5116 mutex_unlock(&qsee_bw_mutex);
5117 } else {
5118 ret = qseecom_perf_enable(handle->dev);
5119 if (ret)
5120 pr_err("Failed to vote for clock with err %d\n",
5121 ret);
5122 }
5123 } else {
5124 if (!qseecom.support_bus_scaling) {
5125 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
5126 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
5127 } else {
5128 mutex_lock(&qsee_bw_mutex);
5129 qseecom_unregister_bus_bandwidth_needs(handle->dev);
5130 mutex_unlock(&qsee_bw_mutex);
5131 }
5132 }
5133 return ret;
5134}
5135EXPORT_SYMBOL(qseecom_set_bandwidth);
5136
5137int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
5138{
5139 struct qseecom_registered_app_list dummy_app_entry = { {0} };
5140 struct qseecom_dev_handle dummy_private_data = {0};
5141 struct qseecom_command_scm_resp resp;
5142 int ret = 0;
5143
5144 if (!desc) {
5145 pr_err("desc is NULL\n");
5146 return -EINVAL;
5147 }
5148
5149 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07005150 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005151 resp.data = desc->ret[2]; /*listener_id*/
5152
Zhen Konge7f525f2017-12-01 18:26:25 -08005153 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07005154 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08005155 dummy_app_entry.app_id = desc->ret[1];
5156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005157 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005158 if (qseecom.qsee_reentrancy_support)
5159 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005160 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005161 else
5162 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
5163 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005164 mutex_unlock(&app_access_lock);
5165 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07005166 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005167 (int)desc->ret[0], (int)desc->ret[2],
5168 (int)desc->ret[1], ret);
5169 desc->ret[0] = resp.result;
5170 desc->ret[1] = resp.resp_type;
5171 desc->ret[2] = resp.data;
5172 return ret;
5173}
5174EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
5175
5176static int qseecom_send_resp(void)
5177{
5178 qseecom.send_resp_flag = 1;
5179 wake_up_interruptible(&qseecom.send_resp_wq);
5180 return 0;
5181}
5182
5183static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
5184{
5185 struct qseecom_registered_listener_list *this_lstnr = NULL;
5186
5187 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
5188 this_lstnr = __qseecom_find_svc(data->listener.id);
5189 if (this_lstnr == NULL)
5190 return -EINVAL;
5191 qseecom.send_resp_flag = 1;
5192 this_lstnr->send_resp_flag = 1;
5193 wake_up_interruptible(&qseecom.send_resp_wq);
5194 return 0;
5195}
5196
5197static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5198 struct qseecom_send_modfd_listener_resp *resp,
5199 struct qseecom_registered_listener_list *this_lstnr)
5200{
5201 int i;
5202
5203 if (!data || !resp || !this_lstnr) {
5204 pr_err("listener handle or resp msg is null\n");
5205 return -EINVAL;
5206 }
5207
5208 if (resp->resp_buf_ptr == NULL) {
5209 pr_err("resp buffer is null\n");
5210 return -EINVAL;
5211 }
5212 /* validate resp buf length */
5213 if ((resp->resp_len == 0) ||
5214 (resp->resp_len > this_lstnr->sb_length)) {
5215 pr_err("resp buf length %d not valid\n", resp->resp_len);
5216 return -EINVAL;
5217 }
5218
5219 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5220 pr_err("Integer overflow in resp_len & resp_buf\n");
5221 return -EINVAL;
5222 }
5223 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5224 (ULONG_MAX - this_lstnr->sb_length)) {
5225 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5226 return -EINVAL;
5227 }
5228 /* validate resp buf */
5229 if (((uintptr_t)resp->resp_buf_ptr <
5230 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5231 ((uintptr_t)resp->resp_buf_ptr >=
5232 ((uintptr_t)this_lstnr->user_virt_sb_base +
5233 this_lstnr->sb_length)) ||
5234 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5235 ((uintptr_t)this_lstnr->user_virt_sb_base +
5236 this_lstnr->sb_length))) {
5237 pr_err("resp buf is out of shared buffer region\n");
5238 return -EINVAL;
5239 }
5240
5241 /* validate offsets */
5242 for (i = 0; i < MAX_ION_FD; i++) {
5243 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5244 pr_err("Invalid offset %d = 0x%x\n",
5245 i, resp->ifd_data[i].cmd_buf_offset);
5246 return -EINVAL;
5247 }
5248 }
5249
5250 return 0;
5251}
5252
5253static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5254 void __user *argp, bool is_64bit_addr)
5255{
5256 struct qseecom_send_modfd_listener_resp resp;
5257 struct qseecom_registered_listener_list *this_lstnr = NULL;
5258
5259 if (copy_from_user(&resp, argp, sizeof(resp))) {
5260 pr_err("copy_from_user failed");
5261 return -EINVAL;
5262 }
5263
5264 this_lstnr = __qseecom_find_svc(data->listener.id);
5265 if (this_lstnr == NULL)
5266 return -EINVAL;
5267
5268 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5269 return -EINVAL;
5270
5271 resp.resp_buf_ptr = this_lstnr->sb_virt +
5272 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5273
5274 if (!is_64bit_addr)
5275 __qseecom_update_cmd_buf(&resp, false, data);
5276 else
5277 __qseecom_update_cmd_buf_64(&resp, false, data);
5278 qseecom.send_resp_flag = 1;
5279 this_lstnr->send_resp_flag = 1;
5280 wake_up_interruptible(&qseecom.send_resp_wq);
5281 return 0;
5282}
5283
5284static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5285 void __user *argp)
5286{
5287 return __qseecom_send_modfd_resp(data, argp, false);
5288}
5289
5290static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5291 void __user *argp)
5292{
5293 return __qseecom_send_modfd_resp(data, argp, true);
5294}
5295
5296static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5297 void __user *argp)
5298{
5299 struct qseecom_qseos_version_req req;
5300
5301 if (copy_from_user(&req, argp, sizeof(req))) {
5302 pr_err("copy_from_user failed");
5303 return -EINVAL;
5304 }
5305 req.qseos_version = qseecom.qseos_version;
5306 if (copy_to_user(argp, &req, sizeof(req))) {
5307 pr_err("copy_to_user failed");
5308 return -EINVAL;
5309 }
5310 return 0;
5311}
5312
5313static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5314{
5315 int rc = 0;
5316 struct qseecom_clk *qclk = NULL;
5317
5318 if (qseecom.no_clock_support)
5319 return 0;
5320
5321 if (ce == CLK_QSEE)
5322 qclk = &qseecom.qsee;
5323 if (ce == CLK_CE_DRV)
5324 qclk = &qseecom.ce_drv;
5325
5326 if (qclk == NULL) {
5327 pr_err("CLK type not supported\n");
5328 return -EINVAL;
5329 }
5330 mutex_lock(&clk_access_lock);
5331
5332 if (qclk->clk_access_cnt == ULONG_MAX) {
5333 pr_err("clk_access_cnt beyond limitation\n");
5334 goto err;
5335 }
5336 if (qclk->clk_access_cnt > 0) {
5337 qclk->clk_access_cnt++;
5338 mutex_unlock(&clk_access_lock);
5339 return rc;
5340 }
5341
5342 /* Enable CE core clk */
5343 if (qclk->ce_core_clk != NULL) {
5344 rc = clk_prepare_enable(qclk->ce_core_clk);
5345 if (rc) {
5346 pr_err("Unable to enable/prepare CE core clk\n");
5347 goto err;
5348 }
5349 }
5350 /* Enable CE clk */
5351 if (qclk->ce_clk != NULL) {
5352 rc = clk_prepare_enable(qclk->ce_clk);
5353 if (rc) {
5354 pr_err("Unable to enable/prepare CE iface clk\n");
5355 goto ce_clk_err;
5356 }
5357 }
5358 /* Enable AXI clk */
5359 if (qclk->ce_bus_clk != NULL) {
5360 rc = clk_prepare_enable(qclk->ce_bus_clk);
5361 if (rc) {
5362 pr_err("Unable to enable/prepare CE bus clk\n");
5363 goto ce_bus_clk_err;
5364 }
5365 }
5366 qclk->clk_access_cnt++;
5367 mutex_unlock(&clk_access_lock);
5368 return 0;
5369
5370ce_bus_clk_err:
5371 if (qclk->ce_clk != NULL)
5372 clk_disable_unprepare(qclk->ce_clk);
5373ce_clk_err:
5374 if (qclk->ce_core_clk != NULL)
5375 clk_disable_unprepare(qclk->ce_core_clk);
5376err:
5377 mutex_unlock(&clk_access_lock);
5378 return -EIO;
5379}
5380
5381static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5382{
5383 struct qseecom_clk *qclk;
5384
5385 if (qseecom.no_clock_support)
5386 return;
5387
5388 if (ce == CLK_QSEE)
5389 qclk = &qseecom.qsee;
5390 else
5391 qclk = &qseecom.ce_drv;
5392
5393 mutex_lock(&clk_access_lock);
5394
5395 if (qclk->clk_access_cnt == 0) {
5396 mutex_unlock(&clk_access_lock);
5397 return;
5398 }
5399
5400 if (qclk->clk_access_cnt == 1) {
5401 if (qclk->ce_clk != NULL)
5402 clk_disable_unprepare(qclk->ce_clk);
5403 if (qclk->ce_core_clk != NULL)
5404 clk_disable_unprepare(qclk->ce_core_clk);
5405 if (qclk->ce_bus_clk != NULL)
5406 clk_disable_unprepare(qclk->ce_bus_clk);
5407 }
5408 qclk->clk_access_cnt--;
5409 mutex_unlock(&clk_access_lock);
5410}
5411
5412static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5413 int32_t clk_type)
5414{
5415 int ret = 0;
5416 struct qseecom_clk *qclk;
5417
5418 if (qseecom.no_clock_support)
5419 return 0;
5420
5421 qclk = &qseecom.qsee;
5422 if (!qseecom.qsee_perf_client)
5423 return ret;
5424
5425 switch (clk_type) {
5426 case CLK_DFAB:
5427 mutex_lock(&qsee_bw_mutex);
5428 if (!qseecom.qsee_bw_count) {
5429 if (qseecom.qsee_sfpb_bw_count > 0)
5430 ret = msm_bus_scale_client_update_request(
5431 qseecom.qsee_perf_client, 3);
5432 else {
5433 if (qclk->ce_core_src_clk != NULL)
5434 ret = __qseecom_enable_clk(CLK_QSEE);
5435 if (!ret) {
5436 ret =
5437 msm_bus_scale_client_update_request(
5438 qseecom.qsee_perf_client, 1);
5439 if ((ret) &&
5440 (qclk->ce_core_src_clk != NULL))
5441 __qseecom_disable_clk(CLK_QSEE);
5442 }
5443 }
5444 if (ret)
5445 pr_err("DFAB Bandwidth req failed (%d)\n",
5446 ret);
5447 else {
5448 qseecom.qsee_bw_count++;
5449 data->perf_enabled = true;
5450 }
5451 } else {
5452 qseecom.qsee_bw_count++;
5453 data->perf_enabled = true;
5454 }
5455 mutex_unlock(&qsee_bw_mutex);
5456 break;
5457 case CLK_SFPB:
5458 mutex_lock(&qsee_bw_mutex);
5459 if (!qseecom.qsee_sfpb_bw_count) {
5460 if (qseecom.qsee_bw_count > 0)
5461 ret = msm_bus_scale_client_update_request(
5462 qseecom.qsee_perf_client, 3);
5463 else {
5464 if (qclk->ce_core_src_clk != NULL)
5465 ret = __qseecom_enable_clk(CLK_QSEE);
5466 if (!ret) {
5467 ret =
5468 msm_bus_scale_client_update_request(
5469 qseecom.qsee_perf_client, 2);
5470 if ((ret) &&
5471 (qclk->ce_core_src_clk != NULL))
5472 __qseecom_disable_clk(CLK_QSEE);
5473 }
5474 }
5475
5476 if (ret)
5477 pr_err("SFPB Bandwidth req failed (%d)\n",
5478 ret);
5479 else {
5480 qseecom.qsee_sfpb_bw_count++;
5481 data->fast_load_enabled = true;
5482 }
5483 } else {
5484 qseecom.qsee_sfpb_bw_count++;
5485 data->fast_load_enabled = true;
5486 }
5487 mutex_unlock(&qsee_bw_mutex);
5488 break;
5489 default:
5490 pr_err("Clock type not defined\n");
5491 break;
5492 }
5493 return ret;
5494}
5495
5496static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5497 int32_t clk_type)
5498{
5499 int32_t ret = 0;
5500 struct qseecom_clk *qclk;
5501
5502 qclk = &qseecom.qsee;
5503
5504 if (qseecom.no_clock_support)
5505 return;
5506 if (!qseecom.qsee_perf_client)
5507 return;
5508
5509 switch (clk_type) {
5510 case CLK_DFAB:
5511 mutex_lock(&qsee_bw_mutex);
5512 if (qseecom.qsee_bw_count == 0) {
5513 pr_err("Client error.Extra call to disable DFAB clk\n");
5514 mutex_unlock(&qsee_bw_mutex);
5515 return;
5516 }
5517
5518 if (qseecom.qsee_bw_count == 1) {
5519 if (qseecom.qsee_sfpb_bw_count > 0)
5520 ret = msm_bus_scale_client_update_request(
5521 qseecom.qsee_perf_client, 2);
5522 else {
5523 ret = msm_bus_scale_client_update_request(
5524 qseecom.qsee_perf_client, 0);
5525 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5526 __qseecom_disable_clk(CLK_QSEE);
5527 }
5528 if (ret)
5529 pr_err("SFPB Bandwidth req fail (%d)\n",
5530 ret);
5531 else {
5532 qseecom.qsee_bw_count--;
5533 data->perf_enabled = false;
5534 }
5535 } else {
5536 qseecom.qsee_bw_count--;
5537 data->perf_enabled = false;
5538 }
5539 mutex_unlock(&qsee_bw_mutex);
5540 break;
5541 case CLK_SFPB:
5542 mutex_lock(&qsee_bw_mutex);
5543 if (qseecom.qsee_sfpb_bw_count == 0) {
5544 pr_err("Client error.Extra call to disable SFPB clk\n");
5545 mutex_unlock(&qsee_bw_mutex);
5546 return;
5547 }
5548 if (qseecom.qsee_sfpb_bw_count == 1) {
5549 if (qseecom.qsee_bw_count > 0)
5550 ret = msm_bus_scale_client_update_request(
5551 qseecom.qsee_perf_client, 1);
5552 else {
5553 ret = msm_bus_scale_client_update_request(
5554 qseecom.qsee_perf_client, 0);
5555 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5556 __qseecom_disable_clk(CLK_QSEE);
5557 }
5558 if (ret)
5559 pr_err("SFPB Bandwidth req fail (%d)\n",
5560 ret);
5561 else {
5562 qseecom.qsee_sfpb_bw_count--;
5563 data->fast_load_enabled = false;
5564 }
5565 } else {
5566 qseecom.qsee_sfpb_bw_count--;
5567 data->fast_load_enabled = false;
5568 }
5569 mutex_unlock(&qsee_bw_mutex);
5570 break;
5571 default:
5572 pr_err("Clock type not defined\n");
5573 break;
5574 }
5575
5576}
5577
5578static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5579 void __user *argp)
5580{
5581 struct ion_handle *ihandle; /* Ion handle */
5582 struct qseecom_load_img_req load_img_req;
5583 int uret = 0;
5584 int ret;
5585 ion_phys_addr_t pa = 0;
5586 size_t len;
5587 struct qseecom_load_app_ireq load_req;
5588 struct qseecom_load_app_64bit_ireq load_req_64bit;
5589 struct qseecom_command_scm_resp resp;
5590 void *cmd_buf = NULL;
5591 size_t cmd_len;
5592 /* Copy the relevant information needed for loading the image */
5593 if (copy_from_user(&load_img_req,
5594 (void __user *)argp,
5595 sizeof(struct qseecom_load_img_req))) {
5596 pr_err("copy_from_user failed\n");
5597 return -EFAULT;
5598 }
5599
5600 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005601 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005602 load_img_req.ifd_data_fd);
5603 if (IS_ERR_OR_NULL(ihandle)) {
5604 pr_err("Ion client could not retrieve the handle\n");
5605 return -ENOMEM;
5606 }
5607
5608 /* Get the physical address of the ION BUF */
5609 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5610 if (ret) {
5611 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5612 ret);
5613 return ret;
5614 }
5615 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5616 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5617 len, load_img_req.mdt_len,
5618 load_img_req.img_len);
5619 return ret;
5620 }
5621 /* Populate the structure for sending scm call to load image */
5622 if (qseecom.qsee_version < QSEE_VERSION_40) {
5623 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5624 load_req.mdt_len = load_img_req.mdt_len;
5625 load_req.img_len = load_img_req.img_len;
5626 load_req.phy_addr = (uint32_t)pa;
5627 cmd_buf = (void *)&load_req;
5628 cmd_len = sizeof(struct qseecom_load_app_ireq);
5629 } else {
5630 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5631 load_req_64bit.mdt_len = load_img_req.mdt_len;
5632 load_req_64bit.img_len = load_img_req.img_len;
5633 load_req_64bit.phy_addr = (uint64_t)pa;
5634 cmd_buf = (void *)&load_req_64bit;
5635 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5636 }
5637
5638 if (qseecom.support_bus_scaling) {
5639 mutex_lock(&qsee_bw_mutex);
5640 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5641 mutex_unlock(&qsee_bw_mutex);
5642 if (ret) {
5643 ret = -EIO;
5644 goto exit_cpu_restore;
5645 }
5646 }
5647
5648 /* Vote for the SFPB clock */
5649 ret = __qseecom_enable_clk_scale_up(data);
5650 if (ret) {
5651 ret = -EIO;
5652 goto exit_register_bus_bandwidth_needs;
5653 }
5654 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5655 ION_IOC_CLEAN_INV_CACHES);
5656 if (ret) {
5657 pr_err("cache operation failed %d\n", ret);
5658 goto exit_disable_clock;
5659 }
5660 /* SCM_CALL to load the external elf */
5661 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5662 &resp, sizeof(resp));
5663 if (ret) {
5664 pr_err("scm_call to load failed : ret %d\n",
5665 ret);
5666 ret = -EFAULT;
5667 goto exit_disable_clock;
5668 }
5669
5670 switch (resp.result) {
5671 case QSEOS_RESULT_SUCCESS:
5672 break;
5673 case QSEOS_RESULT_INCOMPLETE:
5674 pr_err("%s: qseos result incomplete\n", __func__);
5675 ret = __qseecom_process_incomplete_cmd(data, &resp);
5676 if (ret)
5677 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5678 break;
5679 case QSEOS_RESULT_FAILURE:
5680 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5681 ret = -EFAULT;
5682 break;
5683 default:
5684 pr_err("scm_call response result %d not supported\n",
5685 resp.result);
5686 ret = -EFAULT;
5687 break;
5688 }
5689
5690exit_disable_clock:
5691 __qseecom_disable_clk_scale_down(data);
5692
5693exit_register_bus_bandwidth_needs:
5694 if (qseecom.support_bus_scaling) {
5695 mutex_lock(&qsee_bw_mutex);
5696 uret = qseecom_unregister_bus_bandwidth_needs(data);
5697 mutex_unlock(&qsee_bw_mutex);
5698 if (uret)
5699 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5700 uret, ret);
5701 }
5702
5703exit_cpu_restore:
5704 /* Deallocate the handle */
5705 if (!IS_ERR_OR_NULL(ihandle))
5706 ion_free(qseecom.ion_clnt, ihandle);
5707 return ret;
5708}
5709
5710static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5711{
5712 int ret = 0;
5713 struct qseecom_command_scm_resp resp;
5714 struct qseecom_unload_app_ireq req;
5715
5716 /* unavailable client app */
5717 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5718
5719 /* Populate the structure for sending scm call to unload image */
5720 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5721
5722 /* SCM_CALL to unload the external elf */
5723 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5724 sizeof(struct qseecom_unload_app_ireq),
5725 &resp, sizeof(resp));
5726 if (ret) {
5727 pr_err("scm_call to unload failed : ret %d\n",
5728 ret);
5729 ret = -EFAULT;
5730 goto qseecom_unload_external_elf_scm_err;
5731 }
5732 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5733 ret = __qseecom_process_incomplete_cmd(data, &resp);
5734 if (ret)
5735 pr_err("process_incomplete_cmd fail err: %d\n",
5736 ret);
5737 } else {
5738 if (resp.result != QSEOS_RESULT_SUCCESS) {
5739 pr_err("scm_call to unload image failed resp.result =%d\n",
5740 resp.result);
5741 ret = -EFAULT;
5742 }
5743 }
5744
5745qseecom_unload_external_elf_scm_err:
5746
5747 return ret;
5748}
5749
5750static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5751 void __user *argp)
5752{
5753
5754 int32_t ret;
5755 struct qseecom_qseos_app_load_query query_req;
5756 struct qseecom_check_app_ireq req;
5757 struct qseecom_registered_app_list *entry = NULL;
5758 unsigned long flags = 0;
5759 uint32_t app_arch = 0, app_id = 0;
5760 bool found_app = false;
5761
5762 /* Copy the relevant information needed for loading the image */
5763 if (copy_from_user(&query_req,
5764 (void __user *)argp,
5765 sizeof(struct qseecom_qseos_app_load_query))) {
5766 pr_err("copy_from_user failed\n");
5767 return -EFAULT;
5768 }
5769
5770 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5771 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5772 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5773
5774 ret = __qseecom_check_app_exists(req, &app_id);
5775 if (ret) {
5776 pr_err(" scm call to check if app is loaded failed");
5777 return ret; /* scm call failed */
5778 }
5779 if (app_id) {
5780 pr_debug("App id %d (%s) already exists\n", app_id,
5781 (char *)(req.app_name));
5782 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5783 list_for_each_entry(entry,
5784 &qseecom.registered_app_list_head, list){
5785 if (entry->app_id == app_id) {
5786 app_arch = entry->app_arch;
5787 entry->ref_cnt++;
5788 found_app = true;
5789 break;
5790 }
5791 }
5792 spin_unlock_irqrestore(
5793 &qseecom.registered_app_list_lock, flags);
5794 data->client.app_id = app_id;
5795 query_req.app_id = app_id;
5796 if (app_arch) {
5797 data->client.app_arch = app_arch;
5798 query_req.app_arch = app_arch;
5799 } else {
5800 data->client.app_arch = 0;
5801 query_req.app_arch = 0;
5802 }
5803 strlcpy(data->client.app_name, query_req.app_name,
5804 MAX_APP_NAME_SIZE);
5805 /*
5806 * If app was loaded by appsbl before and was not registered,
5807 * regiser this app now.
5808 */
5809 if (!found_app) {
5810 pr_debug("Register app %d [%s] which was loaded before\n",
5811 ret, (char *)query_req.app_name);
5812 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5813 if (!entry) {
5814 pr_err("kmalloc for app entry failed\n");
5815 return -ENOMEM;
5816 }
5817 entry->app_id = app_id;
5818 entry->ref_cnt = 1;
5819 entry->app_arch = data->client.app_arch;
5820 strlcpy(entry->app_name, data->client.app_name,
5821 MAX_APP_NAME_SIZE);
5822 entry->app_blocked = false;
5823 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005824 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005825 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5826 flags);
5827 list_add_tail(&entry->list,
5828 &qseecom.registered_app_list_head);
5829 spin_unlock_irqrestore(
5830 &qseecom.registered_app_list_lock, flags);
5831 }
5832 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5833 pr_err("copy_to_user failed\n");
5834 return -EFAULT;
5835 }
5836 return -EEXIST; /* app already loaded */
5837 } else {
5838 return 0; /* app not loaded */
5839 }
5840}
5841
5842static int __qseecom_get_ce_pipe_info(
5843 enum qseecom_key_management_usage_type usage,
5844 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5845{
5846 int ret = -EINVAL;
5847 int i, j;
5848 struct qseecom_ce_info_use *p = NULL;
5849 int total = 0;
5850 struct qseecom_ce_pipe_entry *pcepipe;
5851
5852 switch (usage) {
5853 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5854 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5855 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5856 if (qseecom.support_fde) {
5857 p = qseecom.ce_info.fde;
5858 total = qseecom.ce_info.num_fde;
5859 } else {
5860 pr_err("system does not support fde\n");
5861 return -EINVAL;
5862 }
5863 break;
5864 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5865 if (qseecom.support_pfe) {
5866 p = qseecom.ce_info.pfe;
5867 total = qseecom.ce_info.num_pfe;
5868 } else {
5869 pr_err("system does not support pfe\n");
5870 return -EINVAL;
5871 }
5872 break;
5873 default:
5874 pr_err("unsupported usage %d\n", usage);
5875 return -EINVAL;
5876 }
5877
5878 for (j = 0; j < total; j++) {
5879 if (p->unit_num == unit) {
5880 pcepipe = p->ce_pipe_entry;
5881 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5882 (*ce_hw)[i] = pcepipe->ce_num;
5883 *pipe = pcepipe->ce_pipe_pair;
5884 pcepipe++;
5885 }
5886 ret = 0;
5887 break;
5888 }
5889 p++;
5890 }
5891 return ret;
5892}
5893
5894static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5895 enum qseecom_key_management_usage_type usage,
5896 struct qseecom_key_generate_ireq *ireq)
5897{
5898 struct qseecom_command_scm_resp resp;
5899 int ret;
5900
5901 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5902 usage >= QSEOS_KM_USAGE_MAX) {
5903 pr_err("Error:: unsupported usage %d\n", usage);
5904 return -EFAULT;
5905 }
5906 ret = __qseecom_enable_clk(CLK_QSEE);
5907 if (ret)
5908 return ret;
5909
5910 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5911 ireq, sizeof(struct qseecom_key_generate_ireq),
5912 &resp, sizeof(resp));
5913 if (ret) {
5914 if (ret == -EINVAL &&
5915 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5916 pr_debug("Key ID exists.\n");
5917 ret = 0;
5918 } else {
5919 pr_err("scm call to generate key failed : %d\n", ret);
5920 ret = -EFAULT;
5921 }
5922 goto generate_key_exit;
5923 }
5924
5925 switch (resp.result) {
5926 case QSEOS_RESULT_SUCCESS:
5927 break;
5928 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5929 pr_debug("Key ID exists.\n");
5930 break;
5931 case QSEOS_RESULT_INCOMPLETE:
5932 ret = __qseecom_process_incomplete_cmd(data, &resp);
5933 if (ret) {
5934 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5935 pr_debug("Key ID exists.\n");
5936 ret = 0;
5937 } else {
5938 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5939 resp.result);
5940 }
5941 }
5942 break;
5943 case QSEOS_RESULT_FAILURE:
5944 default:
5945 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5946 ret = -EINVAL;
5947 break;
5948 }
5949generate_key_exit:
5950 __qseecom_disable_clk(CLK_QSEE);
5951 return ret;
5952}
5953
5954static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5955 enum qseecom_key_management_usage_type usage,
5956 struct qseecom_key_delete_ireq *ireq)
5957{
5958 struct qseecom_command_scm_resp resp;
5959 int ret;
5960
5961 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5962 usage >= QSEOS_KM_USAGE_MAX) {
5963 pr_err("Error:: unsupported usage %d\n", usage);
5964 return -EFAULT;
5965 }
5966 ret = __qseecom_enable_clk(CLK_QSEE);
5967 if (ret)
5968 return ret;
5969
5970 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5971 ireq, sizeof(struct qseecom_key_delete_ireq),
5972 &resp, sizeof(struct qseecom_command_scm_resp));
5973 if (ret) {
5974 if (ret == -EINVAL &&
5975 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5976 pr_debug("Max attempts to input password reached.\n");
5977 ret = -ERANGE;
5978 } else {
5979 pr_err("scm call to delete key failed : %d\n", ret);
5980 ret = -EFAULT;
5981 }
5982 goto del_key_exit;
5983 }
5984
5985 switch (resp.result) {
5986 case QSEOS_RESULT_SUCCESS:
5987 break;
5988 case QSEOS_RESULT_INCOMPLETE:
5989 ret = __qseecom_process_incomplete_cmd(data, &resp);
5990 if (ret) {
5991 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5992 resp.result);
5993 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5994 pr_debug("Max attempts to input password reached.\n");
5995 ret = -ERANGE;
5996 }
5997 }
5998 break;
5999 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
6000 pr_debug("Max attempts to input password reached.\n");
6001 ret = -ERANGE;
6002 break;
6003 case QSEOS_RESULT_FAILURE:
6004 default:
6005 pr_err("Delete key scm call failed resp.result %d\n",
6006 resp.result);
6007 ret = -EINVAL;
6008 break;
6009 }
6010del_key_exit:
6011 __qseecom_disable_clk(CLK_QSEE);
6012 return ret;
6013}
6014
6015static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
6016 enum qseecom_key_management_usage_type usage,
6017 struct qseecom_key_select_ireq *ireq)
6018{
6019 struct qseecom_command_scm_resp resp;
6020 int ret;
6021
6022 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6023 usage >= QSEOS_KM_USAGE_MAX) {
6024 pr_err("Error:: unsupported usage %d\n", usage);
6025 return -EFAULT;
6026 }
6027 ret = __qseecom_enable_clk(CLK_QSEE);
6028 if (ret)
6029 return ret;
6030
6031 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
6032 ret = __qseecom_enable_clk(CLK_CE_DRV);
6033 if (ret)
6034 return ret;
6035 }
6036
6037 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6038 ireq, sizeof(struct qseecom_key_select_ireq),
6039 &resp, sizeof(struct qseecom_command_scm_resp));
6040 if (ret) {
6041 if (ret == -EINVAL &&
6042 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6043 pr_debug("Max attempts to input password reached.\n");
6044 ret = -ERANGE;
6045 } else if (ret == -EINVAL &&
6046 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6047 pr_debug("Set Key operation under processing...\n");
6048 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6049 } else {
6050 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
6051 ret);
6052 ret = -EFAULT;
6053 }
6054 goto set_key_exit;
6055 }
6056
6057 switch (resp.result) {
6058 case QSEOS_RESULT_SUCCESS:
6059 break;
6060 case QSEOS_RESULT_INCOMPLETE:
6061 ret = __qseecom_process_incomplete_cmd(data, &resp);
6062 if (ret) {
6063 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6064 resp.result);
6065 if (resp.result ==
6066 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6067 pr_debug("Set Key operation under processing...\n");
6068 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6069 }
6070 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6071 pr_debug("Max attempts to input password reached.\n");
6072 ret = -ERANGE;
6073 }
6074 }
6075 break;
6076 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
6077 pr_debug("Max attempts to input password reached.\n");
6078 ret = -ERANGE;
6079 break;
6080 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6081 pr_debug("Set Key operation under processing...\n");
6082 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6083 break;
6084 case QSEOS_RESULT_FAILURE:
6085 default:
6086 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6087 ret = -EINVAL;
6088 break;
6089 }
6090set_key_exit:
6091 __qseecom_disable_clk(CLK_QSEE);
6092 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
6093 __qseecom_disable_clk(CLK_CE_DRV);
6094 return ret;
6095}
6096
6097static int __qseecom_update_current_key_user_info(
6098 struct qseecom_dev_handle *data,
6099 enum qseecom_key_management_usage_type usage,
6100 struct qseecom_key_userinfo_update_ireq *ireq)
6101{
6102 struct qseecom_command_scm_resp resp;
6103 int ret;
6104
6105 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6106 usage >= QSEOS_KM_USAGE_MAX) {
6107 pr_err("Error:: unsupported usage %d\n", usage);
6108 return -EFAULT;
6109 }
6110 ret = __qseecom_enable_clk(CLK_QSEE);
6111 if (ret)
6112 return ret;
6113
6114 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6115 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
6116 &resp, sizeof(struct qseecom_command_scm_resp));
6117 if (ret) {
6118 if (ret == -EINVAL &&
6119 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6120 pr_debug("Set Key operation under processing...\n");
6121 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6122 } else {
6123 pr_err("scm call to update key userinfo failed: %d\n",
6124 ret);
6125 __qseecom_disable_clk(CLK_QSEE);
6126 return -EFAULT;
6127 }
6128 }
6129
6130 switch (resp.result) {
6131 case QSEOS_RESULT_SUCCESS:
6132 break;
6133 case QSEOS_RESULT_INCOMPLETE:
6134 ret = __qseecom_process_incomplete_cmd(data, &resp);
6135 if (resp.result ==
6136 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6137 pr_debug("Set Key operation under processing...\n");
6138 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6139 }
6140 if (ret)
6141 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6142 resp.result);
6143 break;
6144 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6145 pr_debug("Update Key operation under processing...\n");
6146 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6147 break;
6148 case QSEOS_RESULT_FAILURE:
6149 default:
6150 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6151 ret = -EINVAL;
6152 break;
6153 }
6154
6155 __qseecom_disable_clk(CLK_QSEE);
6156 return ret;
6157}
6158
6159
6160static int qseecom_enable_ice_setup(int usage)
6161{
6162 int ret = 0;
6163
6164 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6165 ret = qcom_ice_setup_ice_hw("ufs", true);
6166 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6167 ret = qcom_ice_setup_ice_hw("sdcc", true);
6168
6169 return ret;
6170}
6171
6172static int qseecom_disable_ice_setup(int usage)
6173{
6174 int ret = 0;
6175
6176 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6177 ret = qcom_ice_setup_ice_hw("ufs", false);
6178 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6179 ret = qcom_ice_setup_ice_hw("sdcc", false);
6180
6181 return ret;
6182}
6183
6184static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
6185{
6186 struct qseecom_ce_info_use *pce_info_use, *p;
6187 int total = 0;
6188 int i;
6189
6190 switch (usage) {
6191 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
6192 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
6193 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6194 p = qseecom.ce_info.fde;
6195 total = qseecom.ce_info.num_fde;
6196 break;
6197 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6198 p = qseecom.ce_info.pfe;
6199 total = qseecom.ce_info.num_pfe;
6200 break;
6201 default:
6202 pr_err("unsupported usage %d\n", usage);
6203 return -EINVAL;
6204 }
6205
6206 pce_info_use = NULL;
6207
6208 for (i = 0; i < total; i++) {
6209 if (p->unit_num == unit) {
6210 pce_info_use = p;
6211 break;
6212 }
6213 p++;
6214 }
6215 if (!pce_info_use) {
6216 pr_err("can not find %d\n", unit);
6217 return -EINVAL;
6218 }
6219 return pce_info_use->num_ce_pipe_entries;
6220}
6221
6222static int qseecom_create_key(struct qseecom_dev_handle *data,
6223 void __user *argp)
6224{
6225 int i;
6226 uint32_t *ce_hw = NULL;
6227 uint32_t pipe = 0;
6228 int ret = 0;
6229 uint32_t flags = 0;
6230 struct qseecom_create_key_req create_key_req;
6231 struct qseecom_key_generate_ireq generate_key_ireq;
6232 struct qseecom_key_select_ireq set_key_ireq;
6233 uint32_t entries = 0;
6234
6235 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6236 if (ret) {
6237 pr_err("copy_from_user failed\n");
6238 return ret;
6239 }
6240
6241 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6242 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6243 pr_err("unsupported usage %d\n", create_key_req.usage);
6244 ret = -EFAULT;
6245 return ret;
6246 }
6247 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6248 create_key_req.usage);
6249 if (entries <= 0) {
6250 pr_err("no ce instance for usage %d instance %d\n",
6251 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6252 ret = -EINVAL;
6253 return ret;
6254 }
6255
6256 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6257 if (!ce_hw) {
6258 ret = -ENOMEM;
6259 return ret;
6260 }
6261 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6262 DEFAULT_CE_INFO_UNIT);
6263 if (ret) {
6264 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6265 ret = -EINVAL;
6266 goto free_buf;
6267 }
6268
6269 if (qseecom.fde_key_size)
6270 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6271 else
6272 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6273
Jiten Patela7bb1d52018-05-11 12:34:26 +05306274 if (qseecom.enable_key_wrap_in_ks == true)
6275 flags |= ENABLE_KEY_WRAP_IN_KS;
6276
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006277 generate_key_ireq.flags = flags;
6278 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6279 memset((void *)generate_key_ireq.key_id,
6280 0, QSEECOM_KEY_ID_SIZE);
6281 memset((void *)generate_key_ireq.hash32,
6282 0, QSEECOM_HASH_SIZE);
6283 memcpy((void *)generate_key_ireq.key_id,
6284 (void *)key_id_array[create_key_req.usage].desc,
6285 QSEECOM_KEY_ID_SIZE);
6286 memcpy((void *)generate_key_ireq.hash32,
6287 (void *)create_key_req.hash32,
6288 QSEECOM_HASH_SIZE);
6289
6290 ret = __qseecom_generate_and_save_key(data,
6291 create_key_req.usage, &generate_key_ireq);
6292 if (ret) {
6293 pr_err("Failed to generate key on storage: %d\n", ret);
6294 goto free_buf;
6295 }
6296
6297 for (i = 0; i < entries; i++) {
6298 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6299 if (create_key_req.usage ==
6300 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6301 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6302 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6303
6304 } else if (create_key_req.usage ==
6305 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6306 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6307 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6308
6309 } else {
6310 set_key_ireq.ce = ce_hw[i];
6311 set_key_ireq.pipe = pipe;
6312 }
6313 set_key_ireq.flags = flags;
6314
6315 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6316 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6317 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6318 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6319 memcpy((void *)set_key_ireq.key_id,
6320 (void *)key_id_array[create_key_req.usage].desc,
6321 QSEECOM_KEY_ID_SIZE);
6322 memcpy((void *)set_key_ireq.hash32,
6323 (void *)create_key_req.hash32,
6324 QSEECOM_HASH_SIZE);
6325 /*
6326 * It will return false if it is GPCE based crypto instance or
6327 * ICE is setup properly
6328 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006329 ret = qseecom_enable_ice_setup(create_key_req.usage);
6330 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006331 goto free_buf;
6332
6333 do {
6334 ret = __qseecom_set_clear_ce_key(data,
6335 create_key_req.usage,
6336 &set_key_ireq);
6337 /*
6338 * wait a little before calling scm again to let other
6339 * processes run
6340 */
6341 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6342 msleep(50);
6343
6344 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6345
6346 qseecom_disable_ice_setup(create_key_req.usage);
6347
6348 if (ret) {
6349 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6350 pipe, ce_hw[i], ret);
6351 goto free_buf;
6352 } else {
6353 pr_err("Set the key successfully\n");
6354 if ((create_key_req.usage ==
6355 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6356 (create_key_req.usage ==
6357 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6358 goto free_buf;
6359 }
6360 }
6361
6362free_buf:
6363 kzfree(ce_hw);
6364 return ret;
6365}
6366
6367static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6368 void __user *argp)
6369{
6370 uint32_t *ce_hw = NULL;
6371 uint32_t pipe = 0;
6372 int ret = 0;
6373 uint32_t flags = 0;
6374 int i, j;
6375 struct qseecom_wipe_key_req wipe_key_req;
6376 struct qseecom_key_delete_ireq delete_key_ireq;
6377 struct qseecom_key_select_ireq clear_key_ireq;
6378 uint32_t entries = 0;
6379
6380 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6381 if (ret) {
6382 pr_err("copy_from_user failed\n");
6383 return ret;
6384 }
6385
6386 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6387 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6388 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6389 ret = -EFAULT;
6390 return ret;
6391 }
6392
6393 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6394 wipe_key_req.usage);
6395 if (entries <= 0) {
6396 pr_err("no ce instance for usage %d instance %d\n",
6397 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6398 ret = -EINVAL;
6399 return ret;
6400 }
6401
6402 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6403 if (!ce_hw) {
6404 ret = -ENOMEM;
6405 return ret;
6406 }
6407
6408 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6409 DEFAULT_CE_INFO_UNIT);
6410 if (ret) {
6411 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6412 ret = -EINVAL;
6413 goto free_buf;
6414 }
6415
6416 if (wipe_key_req.wipe_key_flag) {
6417 delete_key_ireq.flags = flags;
6418 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6419 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6420 memcpy((void *)delete_key_ireq.key_id,
6421 (void *)key_id_array[wipe_key_req.usage].desc,
6422 QSEECOM_KEY_ID_SIZE);
6423 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6424
6425 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6426 &delete_key_ireq);
6427 if (ret) {
6428 pr_err("Failed to delete key from ssd storage: %d\n",
6429 ret);
6430 ret = -EFAULT;
6431 goto free_buf;
6432 }
6433 }
6434
6435 for (j = 0; j < entries; j++) {
6436 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6437 if (wipe_key_req.usage ==
6438 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6439 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6440 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6441 } else if (wipe_key_req.usage ==
6442 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6443 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6444 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6445 } else {
6446 clear_key_ireq.ce = ce_hw[j];
6447 clear_key_ireq.pipe = pipe;
6448 }
6449 clear_key_ireq.flags = flags;
6450 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6451 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6452 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6453 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6454
6455 /*
6456 * It will return false if it is GPCE based crypto instance or
6457 * ICE is setup properly
6458 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006459 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6460 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006461 goto free_buf;
6462
6463 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6464 &clear_key_ireq);
6465
6466 qseecom_disable_ice_setup(wipe_key_req.usage);
6467
6468 if (ret) {
6469 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6470 pipe, ce_hw[j], ret);
6471 ret = -EFAULT;
6472 goto free_buf;
6473 }
6474 }
6475
6476free_buf:
6477 kzfree(ce_hw);
6478 return ret;
6479}
6480
6481static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6482 void __user *argp)
6483{
6484 int ret = 0;
6485 uint32_t flags = 0;
6486 struct qseecom_update_key_userinfo_req update_key_req;
6487 struct qseecom_key_userinfo_update_ireq ireq;
6488
6489 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6490 if (ret) {
6491 pr_err("copy_from_user failed\n");
6492 return ret;
6493 }
6494
6495 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6496 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6497 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6498 return -EFAULT;
6499 }
6500
6501 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6502
6503 if (qseecom.fde_key_size)
6504 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6505 else
6506 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6507
6508 ireq.flags = flags;
6509 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6510 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6511 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6512 memcpy((void *)ireq.key_id,
6513 (void *)key_id_array[update_key_req.usage].desc,
6514 QSEECOM_KEY_ID_SIZE);
6515 memcpy((void *)ireq.current_hash32,
6516 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6517 memcpy((void *)ireq.new_hash32,
6518 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6519
6520 do {
6521 ret = __qseecom_update_current_key_user_info(data,
6522 update_key_req.usage,
6523 &ireq);
6524 /*
6525 * wait a little before calling scm again to let other
6526 * processes run
6527 */
6528 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6529 msleep(50);
6530
6531 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6532 if (ret) {
6533 pr_err("Failed to update key info: %d\n", ret);
6534 return ret;
6535 }
6536 return ret;
6537
6538}
6539static int qseecom_is_es_activated(void __user *argp)
6540{
Zhen Kong26e62742018-05-04 17:19:06 -07006541 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006542 struct qseecom_command_scm_resp resp;
6543 int ret;
6544
6545 if (qseecom.qsee_version < QSEE_VERSION_04) {
6546 pr_err("invalid qsee version\n");
6547 return -ENODEV;
6548 }
6549
6550 if (argp == NULL) {
6551 pr_err("arg is null\n");
6552 return -EINVAL;
6553 }
6554
6555 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6556 &req, sizeof(req), &resp, sizeof(resp));
6557 if (ret) {
6558 pr_err("scm_call failed\n");
6559 return ret;
6560 }
6561
6562 req.is_activated = resp.result;
6563 ret = copy_to_user(argp, &req, sizeof(req));
6564 if (ret) {
6565 pr_err("copy_to_user failed\n");
6566 return ret;
6567 }
6568
6569 return 0;
6570}
6571
6572static int qseecom_save_partition_hash(void __user *argp)
6573{
6574 struct qseecom_save_partition_hash_req req;
6575 struct qseecom_command_scm_resp resp;
6576 int ret;
6577
6578 memset(&resp, 0x00, sizeof(resp));
6579
6580 if (qseecom.qsee_version < QSEE_VERSION_04) {
6581 pr_err("invalid qsee version\n");
6582 return -ENODEV;
6583 }
6584
6585 if (argp == NULL) {
6586 pr_err("arg is null\n");
6587 return -EINVAL;
6588 }
6589
6590 ret = copy_from_user(&req, argp, sizeof(req));
6591 if (ret) {
6592 pr_err("copy_from_user failed\n");
6593 return ret;
6594 }
6595
6596 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6597 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6598 if (ret) {
6599 pr_err("qseecom_scm_call failed\n");
6600 return ret;
6601 }
6602
6603 return 0;
6604}
6605
6606static int qseecom_mdtp_cipher_dip(void __user *argp)
6607{
6608 struct qseecom_mdtp_cipher_dip_req req;
6609 u32 tzbuflenin, tzbuflenout;
6610 char *tzbufin = NULL, *tzbufout = NULL;
6611 struct scm_desc desc = {0};
6612 int ret;
6613
6614 do {
6615 /* Copy the parameters from userspace */
6616 if (argp == NULL) {
6617 pr_err("arg is null\n");
6618 ret = -EINVAL;
6619 break;
6620 }
6621
6622 ret = copy_from_user(&req, argp, sizeof(req));
6623 if (ret) {
6624 pr_err("copy_from_user failed, ret= %d\n", ret);
6625 break;
6626 }
6627
6628 if (req.in_buf == NULL || req.out_buf == NULL ||
6629 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6630 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6631 req.direction > 1) {
6632 pr_err("invalid parameters\n");
6633 ret = -EINVAL;
6634 break;
6635 }
6636
6637 /* Copy the input buffer from userspace to kernel space */
6638 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6639 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6640 if (!tzbufin) {
6641 pr_err("error allocating in buffer\n");
6642 ret = -ENOMEM;
6643 break;
6644 }
6645
6646 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6647 if (ret) {
6648 pr_err("copy_from_user failed, ret=%d\n", ret);
6649 break;
6650 }
6651
6652 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6653
6654 /* Prepare the output buffer in kernel space */
6655 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6656 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6657 if (!tzbufout) {
6658 pr_err("error allocating out buffer\n");
6659 ret = -ENOMEM;
6660 break;
6661 }
6662
6663 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6664
6665 /* Send the command to TZ */
6666 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6667 desc.args[0] = virt_to_phys(tzbufin);
6668 desc.args[1] = req.in_buf_size;
6669 desc.args[2] = virt_to_phys(tzbufout);
6670 desc.args[3] = req.out_buf_size;
6671 desc.args[4] = req.direction;
6672
6673 ret = __qseecom_enable_clk(CLK_QSEE);
6674 if (ret)
6675 break;
6676
Zhen Kong03f220d2019-02-01 17:12:34 -08006677 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006678
6679 __qseecom_disable_clk(CLK_QSEE);
6680
6681 if (ret) {
6682 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6683 ret);
6684 break;
6685 }
6686
6687 /* Copy the output buffer from kernel space to userspace */
6688 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6689 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6690 if (ret) {
6691 pr_err("copy_to_user failed, ret=%d\n", ret);
6692 break;
6693 }
6694 } while (0);
6695
6696 kzfree(tzbufin);
6697 kzfree(tzbufout);
6698
6699 return ret;
6700}
6701
6702static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6703 struct qseecom_qteec_req *req)
6704{
6705 if (!data || !data->client.ihandle) {
6706 pr_err("Client or client handle is not initialized\n");
6707 return -EINVAL;
6708 }
6709
6710 if (data->type != QSEECOM_CLIENT_APP)
6711 return -EFAULT;
6712
6713 if (req->req_len > UINT_MAX - req->resp_len) {
6714 pr_err("Integer overflow detected in req_len & rsp_len\n");
6715 return -EINVAL;
6716 }
6717
6718 if (req->req_len + req->resp_len > data->client.sb_length) {
6719 pr_debug("Not enough memory to fit cmd_buf.\n");
6720 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6721 (req->req_len + req->resp_len), data->client.sb_length);
6722 return -ENOMEM;
6723 }
6724
6725 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6726 pr_err("cmd buffer or response buffer is null\n");
6727 return -EINVAL;
6728 }
6729 if (((uintptr_t)req->req_ptr <
6730 data->client.user_virt_sb_base) ||
6731 ((uintptr_t)req->req_ptr >=
6732 (data->client.user_virt_sb_base + data->client.sb_length))) {
6733 pr_err("cmd buffer address not within shared bufffer\n");
6734 return -EINVAL;
6735 }
6736
6737 if (((uintptr_t)req->resp_ptr <
6738 data->client.user_virt_sb_base) ||
6739 ((uintptr_t)req->resp_ptr >=
6740 (data->client.user_virt_sb_base + data->client.sb_length))) {
6741 pr_err("response buffer address not within shared bufffer\n");
6742 return -EINVAL;
6743 }
6744
6745 if ((req->req_len == 0) || (req->resp_len == 0)) {
6746 pr_err("cmd buf lengtgh/response buf length not valid\n");
6747 return -EINVAL;
6748 }
6749
6750 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6751 pr_err("Integer overflow in req_len & req_ptr\n");
6752 return -EINVAL;
6753 }
6754
6755 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6756 pr_err("Integer overflow in resp_len & resp_ptr\n");
6757 return -EINVAL;
6758 }
6759
6760 if (data->client.user_virt_sb_base >
6761 (ULONG_MAX - data->client.sb_length)) {
6762 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6763 return -EINVAL;
6764 }
6765 if ((((uintptr_t)req->req_ptr + req->req_len) >
6766 ((uintptr_t)data->client.user_virt_sb_base +
6767 data->client.sb_length)) ||
6768 (((uintptr_t)req->resp_ptr + req->resp_len) >
6769 ((uintptr_t)data->client.user_virt_sb_base +
6770 data->client.sb_length))) {
6771 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6772 return -EINVAL;
6773 }
6774 return 0;
6775}
6776
6777static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6778 uint32_t fd_idx, struct sg_table *sg_ptr)
6779{
6780 struct scatterlist *sg = sg_ptr->sgl;
6781 struct qseecom_sg_entry *sg_entry;
6782 void *buf;
6783 uint i;
6784 size_t size;
6785 dma_addr_t coh_pmem;
6786
6787 if (fd_idx >= MAX_ION_FD) {
6788 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6789 return -ENOMEM;
6790 }
6791 /*
6792 * Allocate a buffer, populate it with number of entry plus
6793 * each sg entry's phy addr and length; then return the
6794 * phy_addr of the buffer.
6795 */
6796 size = sizeof(uint32_t) +
6797 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6798 size = (size + PAGE_SIZE) & PAGE_MASK;
6799 buf = dma_alloc_coherent(qseecom.pdev,
6800 size, &coh_pmem, GFP_KERNEL);
6801 if (buf == NULL) {
6802 pr_err("failed to alloc memory for sg buf\n");
6803 return -ENOMEM;
6804 }
6805 *(uint32_t *)buf = sg_ptr->nents;
6806 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6807 for (i = 0; i < sg_ptr->nents; i++) {
6808 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6809 sg_entry->len = sg->length;
6810 sg_entry++;
6811 sg = sg_next(sg);
6812 }
6813 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6814 data->client.sec_buf_fd[fd_idx].vbase = buf;
6815 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6816 data->client.sec_buf_fd[fd_idx].size = size;
6817 return 0;
6818}
6819
6820static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6821 struct qseecom_dev_handle *data, bool cleanup)
6822{
6823 struct ion_handle *ihandle;
6824 int ret = 0;
6825 int i = 0;
6826 uint32_t *update;
6827 struct sg_table *sg_ptr = NULL;
6828 struct scatterlist *sg;
6829 struct qseecom_param_memref *memref;
6830
6831 if (req == NULL) {
6832 pr_err("Invalid address\n");
6833 return -EINVAL;
6834 }
6835 for (i = 0; i < MAX_ION_FD; i++) {
6836 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006837 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006838 req->ifd_data[i].fd);
6839 if (IS_ERR_OR_NULL(ihandle)) {
6840 pr_err("Ion client can't retrieve the handle\n");
6841 return -ENOMEM;
6842 }
6843 if ((req->req_len < sizeof(uint32_t)) ||
6844 (req->ifd_data[i].cmd_buf_offset >
6845 req->req_len - sizeof(uint32_t))) {
6846 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6847 req->req_len,
6848 req->ifd_data[i].cmd_buf_offset);
6849 return -EINVAL;
6850 }
6851 update = (uint32_t *)((char *) req->req_ptr +
6852 req->ifd_data[i].cmd_buf_offset);
6853 if (!update) {
6854 pr_err("update pointer is NULL\n");
6855 return -EINVAL;
6856 }
6857 } else {
6858 continue;
6859 }
6860 /* Populate the cmd data structure with the phys_addr */
6861 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6862 if (IS_ERR_OR_NULL(sg_ptr)) {
6863 pr_err("IOn client could not retrieve sg table\n");
6864 goto err;
6865 }
6866 sg = sg_ptr->sgl;
6867 if (sg == NULL) {
6868 pr_err("sg is NULL\n");
6869 goto err;
6870 }
6871 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6872 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6873 sg_ptr->nents, sg->length);
6874 goto err;
6875 }
6876 /* clean up buf for pre-allocated fd */
6877 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6878 (*update)) {
6879 if (data->client.sec_buf_fd[i].vbase)
6880 dma_free_coherent(qseecom.pdev,
6881 data->client.sec_buf_fd[i].size,
6882 data->client.sec_buf_fd[i].vbase,
6883 data->client.sec_buf_fd[i].pbase);
6884 memset((void *)update, 0,
6885 sizeof(struct qseecom_param_memref));
6886 memset(&(data->client.sec_buf_fd[i]), 0,
6887 sizeof(struct qseecom_sec_buf_fd_info));
6888 goto clean;
6889 }
6890
6891 if (*update == 0) {
6892 /* update buf for pre-allocated fd from secure heap*/
6893 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6894 sg_ptr);
6895 if (ret) {
6896 pr_err("Failed to handle buf for fd[%d]\n", i);
6897 goto err;
6898 }
6899 memref = (struct qseecom_param_memref *)update;
6900 memref->buffer =
6901 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6902 memref->size =
6903 (uint32_t)(data->client.sec_buf_fd[i].size);
6904 } else {
6905 /* update buf for fd from non-secure qseecom heap */
6906 if (sg_ptr->nents != 1) {
6907 pr_err("Num of scat entr (%d) invalid\n",
6908 sg_ptr->nents);
6909 goto err;
6910 }
6911 if (cleanup)
6912 *update = 0;
6913 else
6914 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6915 }
6916clean:
6917 if (cleanup) {
6918 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6919 ihandle, NULL, sg->length,
6920 ION_IOC_INV_CACHES);
6921 if (ret) {
6922 pr_err("cache operation failed %d\n", ret);
6923 goto err;
6924 }
6925 } else {
6926 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6927 ihandle, NULL, sg->length,
6928 ION_IOC_CLEAN_INV_CACHES);
6929 if (ret) {
6930 pr_err("cache operation failed %d\n", ret);
6931 goto err;
6932 }
6933 data->sglistinfo_ptr[i].indexAndFlags =
6934 SGLISTINFO_SET_INDEX_FLAG(
6935 (sg_ptr->nents == 1), 0,
6936 req->ifd_data[i].cmd_buf_offset);
6937 data->sglistinfo_ptr[i].sizeOrCount =
6938 (sg_ptr->nents == 1) ?
6939 sg->length : sg_ptr->nents;
6940 data->sglist_cnt = i + 1;
6941 }
6942 /* Deallocate the handle */
6943 if (!IS_ERR_OR_NULL(ihandle))
6944 ion_free(qseecom.ion_clnt, ihandle);
6945 }
6946 return ret;
6947err:
6948 if (!IS_ERR_OR_NULL(ihandle))
6949 ion_free(qseecom.ion_clnt, ihandle);
6950 return -ENOMEM;
6951}
6952
6953static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6954 struct qseecom_qteec_req *req, uint32_t cmd_id)
6955{
6956 struct qseecom_command_scm_resp resp;
6957 struct qseecom_qteec_ireq ireq;
6958 struct qseecom_qteec_64bit_ireq ireq_64bit;
6959 struct qseecom_registered_app_list *ptr_app;
6960 bool found_app = false;
6961 unsigned long flags;
6962 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006963 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006964 uint32_t reqd_len_sb_in = 0;
6965 void *cmd_buf = NULL;
6966 size_t cmd_len;
6967 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306968 void *req_ptr = NULL;
6969 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006970
6971 ret = __qseecom_qteec_validate_msg(data, req);
6972 if (ret)
6973 return ret;
6974
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306975 req_ptr = req->req_ptr;
6976 resp_ptr = req->resp_ptr;
6977
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006978 /* find app_id & img_name from list */
6979 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6980 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6981 list) {
6982 if ((ptr_app->app_id == data->client.app_id) &&
6983 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6984 found_app = true;
6985 break;
6986 }
6987 }
6988 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6989 if (!found_app) {
6990 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6991 (char *)data->client.app_name);
6992 return -ENOENT;
6993 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07006994 if (__qseecom_find_pending_unload_app(data->client.app_id,
6995 data->client.app_name)) {
6996 pr_err("app %d (%s) unload is pending\n",
6997 data->client.app_id, data->client.app_name);
6998 return -ENOENT;
6999 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007000
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307001 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7002 (uintptr_t)req->req_ptr);
7003 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7004 (uintptr_t)req->resp_ptr);
7005
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007006 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
7007 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
7008 ret = __qseecom_update_qteec_req_buf(
7009 (struct qseecom_qteec_modfd_req *)req, data, false);
7010 if (ret)
7011 return ret;
7012 }
7013
7014 if (qseecom.qsee_version < QSEE_VERSION_40) {
7015 ireq.app_id = data->client.app_id;
7016 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307017 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007018 ireq.req_len = req->req_len;
7019 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307020 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007021 ireq.resp_len = req->resp_len;
7022 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7023 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7024 dmac_flush_range((void *)table,
7025 (void *)table + SGLISTINFO_TABLE_SIZE);
7026 cmd_buf = (void *)&ireq;
7027 cmd_len = sizeof(struct qseecom_qteec_ireq);
7028 } else {
7029 ireq_64bit.app_id = data->client.app_id;
7030 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307031 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007032 ireq_64bit.req_len = req->req_len;
7033 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307034 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007035 ireq_64bit.resp_len = req->resp_len;
7036 if ((data->client.app_arch == ELFCLASS32) &&
7037 ((ireq_64bit.req_ptr >=
7038 PHY_ADDR_4G - ireq_64bit.req_len) ||
7039 (ireq_64bit.resp_ptr >=
7040 PHY_ADDR_4G - ireq_64bit.resp_len))){
7041 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
7042 data->client.app_name, data->client.app_id);
7043 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
7044 ireq_64bit.req_ptr, ireq_64bit.req_len,
7045 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
7046 return -EFAULT;
7047 }
7048 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7049 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7050 dmac_flush_range((void *)table,
7051 (void *)table + SGLISTINFO_TABLE_SIZE);
7052 cmd_buf = (void *)&ireq_64bit;
7053 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7054 }
7055 if (qseecom.whitelist_support == true
7056 && cmd_id == QSEOS_TEE_OPEN_SESSION)
7057 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
7058 else
7059 *(uint32_t *)cmd_buf = cmd_id;
7060
7061 reqd_len_sb_in = req->req_len + req->resp_len;
7062 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7063 data->client.sb_virt,
7064 reqd_len_sb_in,
7065 ION_IOC_CLEAN_INV_CACHES);
7066 if (ret) {
7067 pr_err("cache operation failed %d\n", ret);
7068 return ret;
7069 }
7070
7071 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7072
7073 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7074 cmd_buf, cmd_len,
7075 &resp, sizeof(resp));
7076 if (ret) {
7077 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7078 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07007079 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007080 }
7081
7082 if (qseecom.qsee_reentrancy_support) {
7083 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07007084 if (ret)
7085 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007086 } else {
7087 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7088 ret = __qseecom_process_incomplete_cmd(data, &resp);
7089 if (ret) {
7090 pr_err("process_incomplete_cmd failed err: %d\n",
7091 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007092 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007093 }
7094 } else {
7095 if (resp.result != QSEOS_RESULT_SUCCESS) {
7096 pr_err("Response result %d not supported\n",
7097 resp.result);
7098 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07007099 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007100 }
7101 }
7102 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007103exit:
7104 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007105 data->client.sb_virt, data->client.sb_length,
7106 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07007107 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007108 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007109 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007110 }
7111
7112 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
7113 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07007114 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007115 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07007116 if (ret2)
7117 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007118 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007119 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007120}
7121
7122static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
7123 void __user *argp)
7124{
7125 struct qseecom_qteec_modfd_req req;
7126 int ret = 0;
7127
7128 ret = copy_from_user(&req, argp,
7129 sizeof(struct qseecom_qteec_modfd_req));
7130 if (ret) {
7131 pr_err("copy_from_user failed\n");
7132 return ret;
7133 }
7134 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7135 QSEOS_TEE_OPEN_SESSION);
7136
7137 return ret;
7138}
7139
7140static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
7141 void __user *argp)
7142{
7143 struct qseecom_qteec_req req;
7144 int ret = 0;
7145
7146 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
7147 if (ret) {
7148 pr_err("copy_from_user failed\n");
7149 return ret;
7150 }
7151 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
7152 return ret;
7153}
7154
7155static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
7156 void __user *argp)
7157{
7158 struct qseecom_qteec_modfd_req req;
7159 struct qseecom_command_scm_resp resp;
7160 struct qseecom_qteec_ireq ireq;
7161 struct qseecom_qteec_64bit_ireq ireq_64bit;
7162 struct qseecom_registered_app_list *ptr_app;
7163 bool found_app = false;
7164 unsigned long flags;
7165 int ret = 0;
7166 int i = 0;
7167 uint32_t reqd_len_sb_in = 0;
7168 void *cmd_buf = NULL;
7169 size_t cmd_len;
7170 struct sglist_info *table = data->sglistinfo_ptr;
7171 void *req_ptr = NULL;
7172 void *resp_ptr = NULL;
7173
7174 ret = copy_from_user(&req, argp,
7175 sizeof(struct qseecom_qteec_modfd_req));
7176 if (ret) {
7177 pr_err("copy_from_user failed\n");
7178 return ret;
7179 }
7180 ret = __qseecom_qteec_validate_msg(data,
7181 (struct qseecom_qteec_req *)(&req));
7182 if (ret)
7183 return ret;
7184 req_ptr = req.req_ptr;
7185 resp_ptr = req.resp_ptr;
7186
7187 /* find app_id & img_name from list */
7188 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
7189 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
7190 list) {
7191 if ((ptr_app->app_id == data->client.app_id) &&
7192 (!strcmp(ptr_app->app_name, data->client.app_name))) {
7193 found_app = true;
7194 break;
7195 }
7196 }
7197 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
7198 if (!found_app) {
7199 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7200 (char *)data->client.app_name);
7201 return -ENOENT;
7202 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07007203 if (__qseecom_find_pending_unload_app(data->client.app_id,
7204 data->client.app_name)) {
7205 pr_err("app %d (%s) unload is pending\n",
7206 data->client.app_id, data->client.app_name);
7207 return -ENOENT;
7208 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007209
7210 /* validate offsets */
7211 for (i = 0; i < MAX_ION_FD; i++) {
7212 if (req.ifd_data[i].fd) {
7213 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7214 return -EINVAL;
7215 }
7216 }
7217 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7218 (uintptr_t)req.req_ptr);
7219 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7220 (uintptr_t)req.resp_ptr);
7221 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7222 if (ret)
7223 return ret;
7224
7225 if (qseecom.qsee_version < QSEE_VERSION_40) {
7226 ireq.app_id = data->client.app_id;
7227 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7228 (uintptr_t)req_ptr);
7229 ireq.req_len = req.req_len;
7230 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7231 (uintptr_t)resp_ptr);
7232 ireq.resp_len = req.resp_len;
7233 cmd_buf = (void *)&ireq;
7234 cmd_len = sizeof(struct qseecom_qteec_ireq);
7235 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7236 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7237 dmac_flush_range((void *)table,
7238 (void *)table + SGLISTINFO_TABLE_SIZE);
7239 } else {
7240 ireq_64bit.app_id = data->client.app_id;
7241 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7242 (uintptr_t)req_ptr);
7243 ireq_64bit.req_len = req.req_len;
7244 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7245 (uintptr_t)resp_ptr);
7246 ireq_64bit.resp_len = req.resp_len;
7247 cmd_buf = (void *)&ireq_64bit;
7248 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7249 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7250 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7251 dmac_flush_range((void *)table,
7252 (void *)table + SGLISTINFO_TABLE_SIZE);
7253 }
7254 reqd_len_sb_in = req.req_len + req.resp_len;
7255 if (qseecom.whitelist_support == true)
7256 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7257 else
7258 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7259
7260 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7261 data->client.sb_virt,
7262 reqd_len_sb_in,
7263 ION_IOC_CLEAN_INV_CACHES);
7264 if (ret) {
7265 pr_err("cache operation failed %d\n", ret);
7266 return ret;
7267 }
7268
7269 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7270
7271 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7272 cmd_buf, cmd_len,
7273 &resp, sizeof(resp));
7274 if (ret) {
7275 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7276 ret, data->client.app_id);
7277 return ret;
7278 }
7279
7280 if (qseecom.qsee_reentrancy_support) {
7281 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7282 } else {
7283 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7284 ret = __qseecom_process_incomplete_cmd(data, &resp);
7285 if (ret) {
7286 pr_err("process_incomplete_cmd failed err: %d\n",
7287 ret);
7288 return ret;
7289 }
7290 } else {
7291 if (resp.result != QSEOS_RESULT_SUCCESS) {
7292 pr_err("Response result %d not supported\n",
7293 resp.result);
7294 ret = -EINVAL;
7295 }
7296 }
7297 }
7298 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7299 if (ret)
7300 return ret;
7301
7302 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7303 data->client.sb_virt, data->client.sb_length,
7304 ION_IOC_INV_CACHES);
7305 if (ret) {
7306 pr_err("cache operation failed %d\n", ret);
7307 return ret;
7308 }
7309 return 0;
7310}
7311
7312static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7313 void __user *argp)
7314{
7315 struct qseecom_qteec_modfd_req req;
7316 int ret = 0;
7317
7318 ret = copy_from_user(&req, argp,
7319 sizeof(struct qseecom_qteec_modfd_req));
7320 if (ret) {
7321 pr_err("copy_from_user failed\n");
7322 return ret;
7323 }
7324 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7325 QSEOS_TEE_REQUEST_CANCELLATION);
7326
7327 return ret;
7328}
7329
7330static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7331{
7332 if (data->sglist_cnt) {
7333 memset(data->sglistinfo_ptr, 0,
7334 SGLISTINFO_TABLE_SIZE);
7335 data->sglist_cnt = 0;
7336 }
7337}
7338
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307339static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007340 unsigned int cmd, unsigned long arg)
7341{
7342 int ret = 0;
7343 struct qseecom_dev_handle *data = file->private_data;
7344 void __user *argp = (void __user *) arg;
7345 bool perf_enabled = false;
7346
7347 if (!data) {
7348 pr_err("Invalid/uninitialized device handle\n");
7349 return -EINVAL;
7350 }
7351
7352 if (data->abort) {
7353 pr_err("Aborting qseecom driver\n");
7354 return -ENODEV;
7355 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007356 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7357 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7358 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7359 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007360 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07007361 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007362
7363 switch (cmd) {
7364 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7365 if (data->type != QSEECOM_GENERIC) {
7366 pr_err("reg lstnr req: invalid handle (%d)\n",
7367 data->type);
7368 ret = -EINVAL;
7369 break;
7370 }
7371 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007372 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007373 atomic_inc(&data->ioctl_count);
7374 data->type = QSEECOM_LISTENER_SERVICE;
7375 ret = qseecom_register_listener(data, argp);
7376 atomic_dec(&data->ioctl_count);
7377 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007378 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007379 if (ret)
7380 pr_err("failed qseecom_register_listener: %d\n", ret);
7381 break;
7382 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307383 case QSEECOM_IOCTL_SET_ICE_INFO: {
7384 struct qseecom_ice_data_t ice_data;
7385
7386 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7387 if (ret) {
7388 pr_err("copy_from_user failed\n");
7389 return -EFAULT;
7390 }
7391 qcom_ice_set_fde_flag(ice_data.flag);
7392 break;
7393 }
7394
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007395 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7396 if ((data->listener.id == 0) ||
7397 (data->type != QSEECOM_LISTENER_SERVICE)) {
7398 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7399 data->type, data->listener.id);
7400 ret = -EINVAL;
7401 break;
7402 }
7403 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007404 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007405 atomic_inc(&data->ioctl_count);
7406 ret = qseecom_unregister_listener(data);
7407 atomic_dec(&data->ioctl_count);
7408 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007409 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007410 if (ret)
7411 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7412 break;
7413 }
7414 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7415 if ((data->client.app_id == 0) ||
7416 (data->type != QSEECOM_CLIENT_APP)) {
7417 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7418 data->type, data->client.app_id);
7419 ret = -EINVAL;
7420 break;
7421 }
7422 /* Only one client allowed here at a time */
7423 mutex_lock(&app_access_lock);
7424 if (qseecom.support_bus_scaling) {
7425 /* register bus bw in case the client doesn't do it */
7426 if (!data->mode) {
7427 mutex_lock(&qsee_bw_mutex);
7428 __qseecom_register_bus_bandwidth_needs(
7429 data, HIGH);
7430 mutex_unlock(&qsee_bw_mutex);
7431 }
7432 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7433 if (ret) {
7434 pr_err("Failed to set bw.\n");
7435 ret = -EINVAL;
7436 mutex_unlock(&app_access_lock);
7437 break;
7438 }
7439 }
7440 /*
7441 * On targets where crypto clock is handled by HLOS,
7442 * if clk_access_cnt is zero and perf_enabled is false,
7443 * then the crypto clock was not enabled before sending cmd to
7444 * tz, qseecom will enable the clock to avoid service failure.
7445 */
7446 if (!qseecom.no_clock_support &&
7447 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7448 pr_debug("ce clock is not enabled!\n");
7449 ret = qseecom_perf_enable(data);
7450 if (ret) {
7451 pr_err("Failed to vote for clock with err %d\n",
7452 ret);
7453 mutex_unlock(&app_access_lock);
7454 ret = -EINVAL;
7455 break;
7456 }
7457 perf_enabled = true;
7458 }
7459 atomic_inc(&data->ioctl_count);
7460 ret = qseecom_send_cmd(data, argp);
7461 if (qseecom.support_bus_scaling)
7462 __qseecom_add_bw_scale_down_timer(
7463 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7464 if (perf_enabled) {
7465 qsee_disable_clock_vote(data, CLK_DFAB);
7466 qsee_disable_clock_vote(data, CLK_SFPB);
7467 }
7468 atomic_dec(&data->ioctl_count);
7469 wake_up_all(&data->abort_wq);
7470 mutex_unlock(&app_access_lock);
7471 if (ret)
7472 pr_err("failed qseecom_send_cmd: %d\n", ret);
7473 break;
7474 }
7475 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7476 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7477 if ((data->client.app_id == 0) ||
7478 (data->type != QSEECOM_CLIENT_APP)) {
7479 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7480 data->type, data->client.app_id);
7481 ret = -EINVAL;
7482 break;
7483 }
7484 /* Only one client allowed here at a time */
7485 mutex_lock(&app_access_lock);
7486 if (qseecom.support_bus_scaling) {
7487 if (!data->mode) {
7488 mutex_lock(&qsee_bw_mutex);
7489 __qseecom_register_bus_bandwidth_needs(
7490 data, HIGH);
7491 mutex_unlock(&qsee_bw_mutex);
7492 }
7493 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7494 if (ret) {
7495 pr_err("Failed to set bw.\n");
7496 mutex_unlock(&app_access_lock);
7497 ret = -EINVAL;
7498 break;
7499 }
7500 }
7501 /*
7502 * On targets where crypto clock is handled by HLOS,
7503 * if clk_access_cnt is zero and perf_enabled is false,
7504 * then the crypto clock was not enabled before sending cmd to
7505 * tz, qseecom will enable the clock to avoid service failure.
7506 */
7507 if (!qseecom.no_clock_support &&
7508 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7509 pr_debug("ce clock is not enabled!\n");
7510 ret = qseecom_perf_enable(data);
7511 if (ret) {
7512 pr_err("Failed to vote for clock with err %d\n",
7513 ret);
7514 mutex_unlock(&app_access_lock);
7515 ret = -EINVAL;
7516 break;
7517 }
7518 perf_enabled = true;
7519 }
7520 atomic_inc(&data->ioctl_count);
7521 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7522 ret = qseecom_send_modfd_cmd(data, argp);
7523 else
7524 ret = qseecom_send_modfd_cmd_64(data, argp);
7525 if (qseecom.support_bus_scaling)
7526 __qseecom_add_bw_scale_down_timer(
7527 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7528 if (perf_enabled) {
7529 qsee_disable_clock_vote(data, CLK_DFAB);
7530 qsee_disable_clock_vote(data, CLK_SFPB);
7531 }
7532 atomic_dec(&data->ioctl_count);
7533 wake_up_all(&data->abort_wq);
7534 mutex_unlock(&app_access_lock);
7535 if (ret)
7536 pr_err("failed qseecom_send_cmd: %d\n", ret);
7537 __qseecom_clean_data_sglistinfo(data);
7538 break;
7539 }
7540 case QSEECOM_IOCTL_RECEIVE_REQ: {
7541 if ((data->listener.id == 0) ||
7542 (data->type != QSEECOM_LISTENER_SERVICE)) {
7543 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7544 data->type, data->listener.id);
7545 ret = -EINVAL;
7546 break;
7547 }
7548 atomic_inc(&data->ioctl_count);
7549 ret = qseecom_receive_req(data);
7550 atomic_dec(&data->ioctl_count);
7551 wake_up_all(&data->abort_wq);
7552 if (ret && (ret != -ERESTARTSYS))
7553 pr_err("failed qseecom_receive_req: %d\n", ret);
7554 break;
7555 }
7556 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7557 if ((data->listener.id == 0) ||
7558 (data->type != QSEECOM_LISTENER_SERVICE)) {
7559 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7560 data->type, data->listener.id);
7561 ret = -EINVAL;
7562 break;
7563 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007564 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007565 atomic_inc(&data->ioctl_count);
7566 if (!qseecom.qsee_reentrancy_support)
7567 ret = qseecom_send_resp();
7568 else
7569 ret = qseecom_reentrancy_send_resp(data);
7570 atomic_dec(&data->ioctl_count);
7571 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007572 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007573 if (ret)
7574 pr_err("failed qseecom_send_resp: %d\n", ret);
7575 break;
7576 }
7577 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7578 if ((data->type != QSEECOM_CLIENT_APP) &&
7579 (data->type != QSEECOM_GENERIC) &&
7580 (data->type != QSEECOM_SECURE_SERVICE)) {
7581 pr_err("set mem param req: invalid handle (%d)\n",
7582 data->type);
7583 ret = -EINVAL;
7584 break;
7585 }
7586 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7587 mutex_lock(&app_access_lock);
7588 atomic_inc(&data->ioctl_count);
7589 ret = qseecom_set_client_mem_param(data, argp);
7590 atomic_dec(&data->ioctl_count);
7591 mutex_unlock(&app_access_lock);
7592 if (ret)
7593 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7594 ret);
7595 break;
7596 }
7597 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7598 if ((data->type != QSEECOM_GENERIC) &&
7599 (data->type != QSEECOM_CLIENT_APP)) {
7600 pr_err("load app req: invalid handle (%d)\n",
7601 data->type);
7602 ret = -EINVAL;
7603 break;
7604 }
7605 data->type = QSEECOM_CLIENT_APP;
7606 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7607 mutex_lock(&app_access_lock);
7608 atomic_inc(&data->ioctl_count);
7609 ret = qseecom_load_app(data, argp);
7610 atomic_dec(&data->ioctl_count);
7611 mutex_unlock(&app_access_lock);
7612 if (ret)
7613 pr_err("failed load_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007614 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007615 break;
7616 }
7617 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7618 if ((data->client.app_id == 0) ||
7619 (data->type != QSEECOM_CLIENT_APP)) {
7620 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7621 data->type, data->client.app_id);
7622 ret = -EINVAL;
7623 break;
7624 }
7625 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7626 mutex_lock(&app_access_lock);
7627 atomic_inc(&data->ioctl_count);
7628 ret = qseecom_unload_app(data, false);
7629 atomic_dec(&data->ioctl_count);
7630 mutex_unlock(&app_access_lock);
7631 if (ret)
7632 pr_err("failed unload_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007633 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007634 break;
7635 }
7636 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7637 atomic_inc(&data->ioctl_count);
7638 ret = qseecom_get_qseos_version(data, argp);
7639 if (ret)
7640 pr_err("qseecom_get_qseos_version: %d\n", ret);
7641 atomic_dec(&data->ioctl_count);
7642 break;
7643 }
7644 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7645 if ((data->type != QSEECOM_GENERIC) &&
7646 (data->type != QSEECOM_CLIENT_APP)) {
7647 pr_err("perf enable req: invalid handle (%d)\n",
7648 data->type);
7649 ret = -EINVAL;
7650 break;
7651 }
7652 if ((data->type == QSEECOM_CLIENT_APP) &&
7653 (data->client.app_id == 0)) {
7654 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7655 data->type, data->client.app_id);
7656 ret = -EINVAL;
7657 break;
7658 }
7659 atomic_inc(&data->ioctl_count);
7660 if (qseecom.support_bus_scaling) {
7661 mutex_lock(&qsee_bw_mutex);
7662 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7663 mutex_unlock(&qsee_bw_mutex);
7664 } else {
7665 ret = qseecom_perf_enable(data);
7666 if (ret)
7667 pr_err("Fail to vote for clocks %d\n", ret);
7668 }
7669 atomic_dec(&data->ioctl_count);
7670 break;
7671 }
7672 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7673 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7674 (data->type != QSEECOM_CLIENT_APP)) {
7675 pr_err("perf disable req: invalid handle (%d)\n",
7676 data->type);
7677 ret = -EINVAL;
7678 break;
7679 }
7680 if ((data->type == QSEECOM_CLIENT_APP) &&
7681 (data->client.app_id == 0)) {
7682 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7683 data->type, data->client.app_id);
7684 ret = -EINVAL;
7685 break;
7686 }
7687 atomic_inc(&data->ioctl_count);
7688 if (!qseecom.support_bus_scaling) {
7689 qsee_disable_clock_vote(data, CLK_DFAB);
7690 qsee_disable_clock_vote(data, CLK_SFPB);
7691 } else {
7692 mutex_lock(&qsee_bw_mutex);
7693 qseecom_unregister_bus_bandwidth_needs(data);
7694 mutex_unlock(&qsee_bw_mutex);
7695 }
7696 atomic_dec(&data->ioctl_count);
7697 break;
7698 }
7699
7700 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7701 /* If crypto clock is not handled by HLOS, return directly. */
7702 if (qseecom.no_clock_support) {
7703 pr_debug("crypto clock is not handled by HLOS\n");
7704 break;
7705 }
7706 if ((data->client.app_id == 0) ||
7707 (data->type != QSEECOM_CLIENT_APP)) {
7708 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7709 data->type, data->client.app_id);
7710 ret = -EINVAL;
7711 break;
7712 }
7713 atomic_inc(&data->ioctl_count);
7714 ret = qseecom_scale_bus_bandwidth(data, argp);
7715 atomic_dec(&data->ioctl_count);
7716 break;
7717 }
7718 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7719 if (data->type != QSEECOM_GENERIC) {
7720 pr_err("load ext elf req: invalid client handle (%d)\n",
7721 data->type);
7722 ret = -EINVAL;
7723 break;
7724 }
7725 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7726 data->released = true;
7727 mutex_lock(&app_access_lock);
7728 atomic_inc(&data->ioctl_count);
7729 ret = qseecom_load_external_elf(data, argp);
7730 atomic_dec(&data->ioctl_count);
7731 mutex_unlock(&app_access_lock);
7732 if (ret)
7733 pr_err("failed load_external_elf request: %d\n", ret);
7734 break;
7735 }
7736 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7737 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7738 pr_err("unload ext elf req: invalid handle (%d)\n",
7739 data->type);
7740 ret = -EINVAL;
7741 break;
7742 }
7743 data->released = true;
7744 mutex_lock(&app_access_lock);
7745 atomic_inc(&data->ioctl_count);
7746 ret = qseecom_unload_external_elf(data);
7747 atomic_dec(&data->ioctl_count);
7748 mutex_unlock(&app_access_lock);
7749 if (ret)
7750 pr_err("failed unload_app request: %d\n", ret);
7751 break;
7752 }
7753 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
Zhen Kong677362c2019-08-30 10:50:25 -07007754 if ((data->type != QSEECOM_GENERIC) &&
7755 (data->type != QSEECOM_CLIENT_APP)) {
7756 pr_err("app loaded query req: invalid handle (%d)\n",
7757 data->type);
7758 ret = -EINVAL;
7759 break;
7760 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007761 data->type = QSEECOM_CLIENT_APP;
7762 mutex_lock(&app_access_lock);
7763 atomic_inc(&data->ioctl_count);
7764 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7765 ret = qseecom_query_app_loaded(data, argp);
7766 atomic_dec(&data->ioctl_count);
7767 mutex_unlock(&app_access_lock);
7768 break;
7769 }
7770 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7771 if (data->type != QSEECOM_GENERIC) {
7772 pr_err("send cmd svc req: invalid handle (%d)\n",
7773 data->type);
7774 ret = -EINVAL;
7775 break;
7776 }
7777 data->type = QSEECOM_SECURE_SERVICE;
7778 if (qseecom.qsee_version < QSEE_VERSION_03) {
7779 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7780 qseecom.qsee_version);
7781 return -EINVAL;
7782 }
7783 mutex_lock(&app_access_lock);
7784 atomic_inc(&data->ioctl_count);
7785 ret = qseecom_send_service_cmd(data, argp);
7786 atomic_dec(&data->ioctl_count);
7787 mutex_unlock(&app_access_lock);
7788 break;
7789 }
7790 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7791 if (!(qseecom.support_pfe || qseecom.support_fde))
7792 pr_err("Features requiring key init not supported\n");
7793 if (data->type != QSEECOM_GENERIC) {
7794 pr_err("create key req: invalid handle (%d)\n",
7795 data->type);
7796 ret = -EINVAL;
7797 break;
7798 }
7799 if (qseecom.qsee_version < QSEE_VERSION_05) {
7800 pr_err("Create Key feature unsupported: qsee ver %u\n",
7801 qseecom.qsee_version);
7802 return -EINVAL;
7803 }
7804 data->released = true;
7805 mutex_lock(&app_access_lock);
7806 atomic_inc(&data->ioctl_count);
7807 ret = qseecom_create_key(data, argp);
7808 if (ret)
7809 pr_err("failed to create encryption key: %d\n", ret);
7810
7811 atomic_dec(&data->ioctl_count);
7812 mutex_unlock(&app_access_lock);
7813 break;
7814 }
7815 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7816 if (!(qseecom.support_pfe || qseecom.support_fde))
7817 pr_err("Features requiring key init not supported\n");
7818 if (data->type != QSEECOM_GENERIC) {
7819 pr_err("wipe key req: invalid handle (%d)\n",
7820 data->type);
7821 ret = -EINVAL;
7822 break;
7823 }
7824 if (qseecom.qsee_version < QSEE_VERSION_05) {
7825 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7826 qseecom.qsee_version);
7827 return -EINVAL;
7828 }
7829 data->released = true;
7830 mutex_lock(&app_access_lock);
7831 atomic_inc(&data->ioctl_count);
7832 ret = qseecom_wipe_key(data, argp);
7833 if (ret)
7834 pr_err("failed to wipe encryption key: %d\n", ret);
7835 atomic_dec(&data->ioctl_count);
7836 mutex_unlock(&app_access_lock);
7837 break;
7838 }
7839 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7840 if (!(qseecom.support_pfe || qseecom.support_fde))
7841 pr_err("Features requiring key init not supported\n");
7842 if (data->type != QSEECOM_GENERIC) {
7843 pr_err("update key req: invalid handle (%d)\n",
7844 data->type);
7845 ret = -EINVAL;
7846 break;
7847 }
7848 if (qseecom.qsee_version < QSEE_VERSION_05) {
7849 pr_err("Update Key feature unsupported in qsee ver %u\n",
7850 qseecom.qsee_version);
7851 return -EINVAL;
7852 }
7853 data->released = true;
7854 mutex_lock(&app_access_lock);
7855 atomic_inc(&data->ioctl_count);
7856 ret = qseecom_update_key_user_info(data, argp);
7857 if (ret)
7858 pr_err("failed to update key user info: %d\n", ret);
7859 atomic_dec(&data->ioctl_count);
7860 mutex_unlock(&app_access_lock);
7861 break;
7862 }
7863 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7864 if (data->type != QSEECOM_GENERIC) {
7865 pr_err("save part hash req: invalid handle (%d)\n",
7866 data->type);
7867 ret = -EINVAL;
7868 break;
7869 }
7870 data->released = true;
7871 mutex_lock(&app_access_lock);
7872 atomic_inc(&data->ioctl_count);
7873 ret = qseecom_save_partition_hash(argp);
7874 atomic_dec(&data->ioctl_count);
7875 mutex_unlock(&app_access_lock);
7876 break;
7877 }
7878 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7879 if (data->type != QSEECOM_GENERIC) {
7880 pr_err("ES activated req: invalid handle (%d)\n",
7881 data->type);
7882 ret = -EINVAL;
7883 break;
7884 }
7885 data->released = true;
7886 mutex_lock(&app_access_lock);
7887 atomic_inc(&data->ioctl_count);
7888 ret = qseecom_is_es_activated(argp);
7889 atomic_dec(&data->ioctl_count);
7890 mutex_unlock(&app_access_lock);
7891 break;
7892 }
7893 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7894 if (data->type != QSEECOM_GENERIC) {
7895 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7896 data->type);
7897 ret = -EINVAL;
7898 break;
7899 }
7900 data->released = true;
7901 mutex_lock(&app_access_lock);
7902 atomic_inc(&data->ioctl_count);
7903 ret = qseecom_mdtp_cipher_dip(argp);
7904 atomic_dec(&data->ioctl_count);
7905 mutex_unlock(&app_access_lock);
7906 break;
7907 }
7908 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7909 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7910 if ((data->listener.id == 0) ||
7911 (data->type != QSEECOM_LISTENER_SERVICE)) {
7912 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7913 data->type, data->listener.id);
7914 ret = -EINVAL;
7915 break;
7916 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007917 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007918 atomic_inc(&data->ioctl_count);
7919 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7920 ret = qseecom_send_modfd_resp(data, argp);
7921 else
7922 ret = qseecom_send_modfd_resp_64(data, argp);
7923 atomic_dec(&data->ioctl_count);
7924 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007925 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007926 if (ret)
7927 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7928 __qseecom_clean_data_sglistinfo(data);
7929 break;
7930 }
7931 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7932 if ((data->client.app_id == 0) ||
7933 (data->type != QSEECOM_CLIENT_APP)) {
7934 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7935 data->type, data->client.app_id);
7936 ret = -EINVAL;
7937 break;
7938 }
7939 if (qseecom.qsee_version < QSEE_VERSION_40) {
7940 pr_err("GP feature unsupported: qsee ver %u\n",
7941 qseecom.qsee_version);
7942 return -EINVAL;
7943 }
7944 /* Only one client allowed here at a time */
7945 mutex_lock(&app_access_lock);
7946 atomic_inc(&data->ioctl_count);
7947 ret = qseecom_qteec_open_session(data, argp);
7948 atomic_dec(&data->ioctl_count);
7949 wake_up_all(&data->abort_wq);
7950 mutex_unlock(&app_access_lock);
7951 if (ret)
7952 pr_err("failed open_session_cmd: %d\n", ret);
7953 __qseecom_clean_data_sglistinfo(data);
7954 break;
7955 }
7956 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7957 if ((data->client.app_id == 0) ||
7958 (data->type != QSEECOM_CLIENT_APP)) {
7959 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7960 data->type, data->client.app_id);
7961 ret = -EINVAL;
7962 break;
7963 }
7964 if (qseecom.qsee_version < QSEE_VERSION_40) {
7965 pr_err("GP feature unsupported: qsee ver %u\n",
7966 qseecom.qsee_version);
7967 return -EINVAL;
7968 }
7969 /* Only one client allowed here at a time */
7970 mutex_lock(&app_access_lock);
7971 atomic_inc(&data->ioctl_count);
7972 ret = qseecom_qteec_close_session(data, argp);
7973 atomic_dec(&data->ioctl_count);
7974 wake_up_all(&data->abort_wq);
7975 mutex_unlock(&app_access_lock);
7976 if (ret)
7977 pr_err("failed close_session_cmd: %d\n", ret);
7978 break;
7979 }
7980 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7981 if ((data->client.app_id == 0) ||
7982 (data->type != QSEECOM_CLIENT_APP)) {
7983 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7984 data->type, data->client.app_id);
7985 ret = -EINVAL;
7986 break;
7987 }
7988 if (qseecom.qsee_version < QSEE_VERSION_40) {
7989 pr_err("GP feature unsupported: qsee ver %u\n",
7990 qseecom.qsee_version);
7991 return -EINVAL;
7992 }
7993 /* Only one client allowed here at a time */
7994 mutex_lock(&app_access_lock);
7995 atomic_inc(&data->ioctl_count);
7996 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7997 atomic_dec(&data->ioctl_count);
7998 wake_up_all(&data->abort_wq);
7999 mutex_unlock(&app_access_lock);
8000 if (ret)
8001 pr_err("failed Invoke cmd: %d\n", ret);
8002 __qseecom_clean_data_sglistinfo(data);
8003 break;
8004 }
8005 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
8006 if ((data->client.app_id == 0) ||
8007 (data->type != QSEECOM_CLIENT_APP)) {
8008 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
8009 data->type, data->client.app_id);
8010 ret = -EINVAL;
8011 break;
8012 }
8013 if (qseecom.qsee_version < QSEE_VERSION_40) {
8014 pr_err("GP feature unsupported: qsee ver %u\n",
8015 qseecom.qsee_version);
8016 return -EINVAL;
8017 }
8018 /* Only one client allowed here at a time */
8019 mutex_lock(&app_access_lock);
8020 atomic_inc(&data->ioctl_count);
8021 ret = qseecom_qteec_request_cancellation(data, argp);
8022 atomic_dec(&data->ioctl_count);
8023 wake_up_all(&data->abort_wq);
8024 mutex_unlock(&app_access_lock);
8025 if (ret)
8026 pr_err("failed request_cancellation: %d\n", ret);
8027 break;
8028 }
8029 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
8030 atomic_inc(&data->ioctl_count);
8031 ret = qseecom_get_ce_info(data, argp);
8032 if (ret)
8033 pr_err("failed get fde ce pipe info: %d\n", ret);
8034 atomic_dec(&data->ioctl_count);
8035 break;
8036 }
8037 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
8038 atomic_inc(&data->ioctl_count);
8039 ret = qseecom_free_ce_info(data, argp);
8040 if (ret)
8041 pr_err("failed get fde ce pipe info: %d\n", ret);
8042 atomic_dec(&data->ioctl_count);
8043 break;
8044 }
8045 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
8046 atomic_inc(&data->ioctl_count);
8047 ret = qseecom_query_ce_info(data, argp);
8048 if (ret)
8049 pr_err("failed get fde ce pipe info: %d\n", ret);
8050 atomic_dec(&data->ioctl_count);
8051 break;
8052 }
8053 default:
8054 pr_err("Invalid IOCTL: 0x%x\n", cmd);
8055 return -EINVAL;
8056 }
8057 return ret;
8058}
8059
8060static int qseecom_open(struct inode *inode, struct file *file)
8061{
8062 int ret = 0;
8063 struct qseecom_dev_handle *data;
8064
8065 data = kzalloc(sizeof(*data), GFP_KERNEL);
8066 if (!data)
8067 return -ENOMEM;
8068 file->private_data = data;
8069 data->abort = 0;
8070 data->type = QSEECOM_GENERIC;
8071 data->released = false;
8072 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
8073 data->mode = INACTIVE;
8074 init_waitqueue_head(&data->abort_wq);
8075 atomic_set(&data->ioctl_count, 0);
8076 return ret;
8077}
8078
Zhen Konge5e6c942019-10-01 15:45:25 -07008079static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
8080{
8081 if (qseecom.no_clock_support)
8082 return;
8083 if (qseecom.support_bus_scaling) {
8084 mutex_lock(&qsee_bw_mutex);
8085 if (data->mode != INACTIVE) {
8086 qseecom_unregister_bus_bandwidth_needs(data);
8087 if (qseecom.cumulative_mode == INACTIVE)
8088 __qseecom_set_msm_bus_request(INACTIVE);
8089 }
8090 mutex_unlock(&qsee_bw_mutex);
8091 } else {
8092 if (data->fast_load_enabled)
8093 qsee_disable_clock_vote(data, CLK_SFPB);
8094 if (data->perf_enabled)
8095 qsee_disable_clock_vote(data, CLK_DFAB);
8096 }
8097}
8098
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008099static int qseecom_release(struct inode *inode, struct file *file)
8100{
8101 struct qseecom_dev_handle *data = file->private_data;
8102 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008103 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008104
Zhen Konge5e6c942019-10-01 15:45:25 -07008105 __qseecom_release_disable_clk(data);
8106 if (!data->released) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008107 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
8108 data->type, data->mode, data);
8109 switch (data->type) {
8110 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08008111 pr_debug("release lsnr svc %d\n", data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008112 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008113 ret = qseecom_unregister_listener(data);
Zhen Konge6ac4132019-09-20 13:49:41 -07008114 if (!ret)
8115 free_private_data = false;
Zhen Kong87dcf0e2019-01-04 12:34:50 -08008116 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008117 mutex_unlock(&listener_access_lock);
Zhen Konge5e6c942019-10-01 15:45:25 -07008118 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008119 break;
8120 case QSEECOM_CLIENT_APP:
Zhen Kong03b2eae2019-09-17 16:58:46 -07008121 pr_debug("release app %d (%s)\n",
8122 data->client.app_id, data->client.app_name);
8123 if (data->client.app_id) {
8124 free_private_data = false;
Zhen Konge5e6c942019-10-01 15:45:25 -07008125 mutex_lock(&unload_app_pending_list_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008126 ret = qseecom_prepare_unload_app(data);
Zhen Konge5e6c942019-10-01 15:45:25 -07008127 mutex_unlock(&unload_app_pending_list_lock);
8128 __wakeup_unload_app_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07008129 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008130 break;
8131 case QSEECOM_SECURE_SERVICE:
8132 case QSEECOM_GENERIC:
8133 ret = qseecom_unmap_ion_allocated_memory(data);
8134 if (ret)
8135 pr_err("Ion Unmap failed\n");
8136 break;
8137 case QSEECOM_UNAVAILABLE_CLIENT_APP:
8138 break;
8139 default:
8140 pr_err("Unsupported clnt_handle_type %d",
8141 data->type);
8142 break;
8143 }
8144 }
8145
Zhen Kongbcdeda22018-11-16 13:50:51 -08008146 if (free_private_data)
8147 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008148 return ret;
8149}
8150
8151#ifdef CONFIG_COMPAT
8152#include "compat_qseecom.c"
8153#else
8154#define compat_qseecom_ioctl NULL
8155#endif
8156
8157static const struct file_operations qseecom_fops = {
8158 .owner = THIS_MODULE,
8159 .unlocked_ioctl = qseecom_ioctl,
8160 .compat_ioctl = compat_qseecom_ioctl,
8161 .open = qseecom_open,
8162 .release = qseecom_release
8163};
8164
8165static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
8166{
8167 int rc = 0;
8168 struct device *pdev;
8169 struct qseecom_clk *qclk;
8170 char *core_clk_src = NULL;
8171 char *core_clk = NULL;
8172 char *iface_clk = NULL;
8173 char *bus_clk = NULL;
8174
8175 switch (ce) {
8176 case CLK_QSEE: {
8177 core_clk_src = "core_clk_src";
8178 core_clk = "core_clk";
8179 iface_clk = "iface_clk";
8180 bus_clk = "bus_clk";
8181 qclk = &qseecom.qsee;
8182 qclk->instance = CLK_QSEE;
8183 break;
8184 };
8185 case CLK_CE_DRV: {
8186 core_clk_src = "ce_drv_core_clk_src";
8187 core_clk = "ce_drv_core_clk";
8188 iface_clk = "ce_drv_iface_clk";
8189 bus_clk = "ce_drv_bus_clk";
8190 qclk = &qseecom.ce_drv;
8191 qclk->instance = CLK_CE_DRV;
8192 break;
8193 };
8194 default:
8195 pr_err("Invalid ce hw instance: %d!\n", ce);
8196 return -EIO;
8197 }
8198
8199 if (qseecom.no_clock_support) {
8200 qclk->ce_core_clk = NULL;
8201 qclk->ce_clk = NULL;
8202 qclk->ce_bus_clk = NULL;
8203 qclk->ce_core_src_clk = NULL;
8204 return 0;
8205 }
8206
8207 pdev = qseecom.pdev;
8208
8209 /* Get CE3 src core clk. */
8210 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
8211 if (!IS_ERR(qclk->ce_core_src_clk)) {
8212 rc = clk_set_rate(qclk->ce_core_src_clk,
8213 qseecom.ce_opp_freq_hz);
8214 if (rc) {
8215 clk_put(qclk->ce_core_src_clk);
8216 qclk->ce_core_src_clk = NULL;
8217 pr_err("Unable to set the core src clk @%uMhz.\n",
8218 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
8219 return -EIO;
8220 }
8221 } else {
8222 pr_warn("Unable to get CE core src clk, set to NULL\n");
8223 qclk->ce_core_src_clk = NULL;
8224 }
8225
8226 /* Get CE core clk */
8227 qclk->ce_core_clk = clk_get(pdev, core_clk);
8228 if (IS_ERR(qclk->ce_core_clk)) {
8229 rc = PTR_ERR(qclk->ce_core_clk);
8230 pr_err("Unable to get CE core clk\n");
8231 if (qclk->ce_core_src_clk != NULL)
8232 clk_put(qclk->ce_core_src_clk);
8233 return -EIO;
8234 }
8235
8236 /* Get CE Interface clk */
8237 qclk->ce_clk = clk_get(pdev, iface_clk);
8238 if (IS_ERR(qclk->ce_clk)) {
8239 rc = PTR_ERR(qclk->ce_clk);
8240 pr_err("Unable to get CE interface clk\n");
8241 if (qclk->ce_core_src_clk != NULL)
8242 clk_put(qclk->ce_core_src_clk);
8243 clk_put(qclk->ce_core_clk);
8244 return -EIO;
8245 }
8246
8247 /* Get CE AXI clk */
8248 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8249 if (IS_ERR(qclk->ce_bus_clk)) {
8250 rc = PTR_ERR(qclk->ce_bus_clk);
8251 pr_err("Unable to get CE BUS interface clk\n");
8252 if (qclk->ce_core_src_clk != NULL)
8253 clk_put(qclk->ce_core_src_clk);
8254 clk_put(qclk->ce_core_clk);
8255 clk_put(qclk->ce_clk);
8256 return -EIO;
8257 }
8258
8259 return rc;
8260}
8261
8262static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8263{
8264 struct qseecom_clk *qclk;
8265
8266 if (ce == CLK_QSEE)
8267 qclk = &qseecom.qsee;
8268 else
8269 qclk = &qseecom.ce_drv;
8270
8271 if (qclk->ce_clk != NULL) {
8272 clk_put(qclk->ce_clk);
8273 qclk->ce_clk = NULL;
8274 }
8275 if (qclk->ce_core_clk != NULL) {
8276 clk_put(qclk->ce_core_clk);
8277 qclk->ce_core_clk = NULL;
8278 }
8279 if (qclk->ce_bus_clk != NULL) {
8280 clk_put(qclk->ce_bus_clk);
8281 qclk->ce_bus_clk = NULL;
8282 }
8283 if (qclk->ce_core_src_clk != NULL) {
8284 clk_put(qclk->ce_core_src_clk);
8285 qclk->ce_core_src_clk = NULL;
8286 }
8287 qclk->instance = CLK_INVALID;
8288}
8289
8290static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8291{
8292 int rc = 0;
8293 uint32_t hlos_num_ce_hw_instances;
8294 uint32_t disk_encrypt_pipe;
8295 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008296 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008297 int i;
8298 const int *tbl;
8299 int size;
8300 int entry;
8301 struct qseecom_crypto_info *pfde_tbl = NULL;
8302 struct qseecom_crypto_info *p;
8303 int tbl_size;
8304 int j;
8305 bool old_db = true;
8306 struct qseecom_ce_info_use *pce_info_use;
8307 uint32_t *unit_tbl = NULL;
8308 int total_units = 0;
8309 struct qseecom_ce_pipe_entry *pce_entry;
8310
8311 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8312 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8313
8314 if (of_property_read_u32((&pdev->dev)->of_node,
8315 "qcom,qsee-ce-hw-instance",
8316 &qseecom.ce_info.qsee_ce_hw_instance)) {
8317 pr_err("Fail to get qsee ce hw instance information.\n");
8318 rc = -EINVAL;
8319 goto out;
8320 } else {
8321 pr_debug("qsee-ce-hw-instance=0x%x\n",
8322 qseecom.ce_info.qsee_ce_hw_instance);
8323 }
8324
8325 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8326 "qcom,support-fde");
8327 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8328 "qcom,support-pfe");
8329
8330 if (!qseecom.support_pfe && !qseecom.support_fde) {
8331 pr_warn("Device does not support PFE/FDE");
8332 goto out;
8333 }
8334
8335 if (qseecom.support_fde)
8336 tbl = of_get_property((&pdev->dev)->of_node,
8337 "qcom,full-disk-encrypt-info", &size);
8338 else
8339 tbl = NULL;
8340 if (tbl) {
8341 old_db = false;
8342 if (size % sizeof(struct qseecom_crypto_info)) {
8343 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8344 size);
8345 rc = -EINVAL;
8346 goto out;
8347 }
8348 tbl_size = size / sizeof
8349 (struct qseecom_crypto_info);
8350
8351 pfde_tbl = kzalloc(size, GFP_KERNEL);
8352 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8353 total_units = 0;
8354
8355 if (!pfde_tbl || !unit_tbl) {
8356 pr_err("failed to alloc memory\n");
8357 rc = -ENOMEM;
8358 goto out;
8359 }
8360 if (of_property_read_u32_array((&pdev->dev)->of_node,
8361 "qcom,full-disk-encrypt-info",
8362 (u32 *)pfde_tbl, size/sizeof(u32))) {
8363 pr_err("failed to read full-disk-encrypt-info tbl\n");
8364 rc = -EINVAL;
8365 goto out;
8366 }
8367
8368 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8369 for (j = 0; j < total_units; j++) {
8370 if (p->unit_num == *(unit_tbl + j))
8371 break;
8372 }
8373 if (j == total_units) {
8374 *(unit_tbl + total_units) = p->unit_num;
8375 total_units++;
8376 }
8377 }
8378
8379 qseecom.ce_info.num_fde = total_units;
8380 pce_info_use = qseecom.ce_info.fde = kcalloc(
8381 total_units, sizeof(struct qseecom_ce_info_use),
8382 GFP_KERNEL);
8383 if (!pce_info_use) {
8384 pr_err("failed to alloc memory\n");
8385 rc = -ENOMEM;
8386 goto out;
8387 }
8388
8389 for (j = 0; j < total_units; j++, pce_info_use++) {
8390 pce_info_use->unit_num = *(unit_tbl + j);
8391 pce_info_use->alloc = false;
8392 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8393 pce_info_use->num_ce_pipe_entries = 0;
8394 pce_info_use->ce_pipe_entry = NULL;
8395 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8396 if (p->unit_num == pce_info_use->unit_num)
8397 pce_info_use->num_ce_pipe_entries++;
8398 }
8399
8400 entry = pce_info_use->num_ce_pipe_entries;
8401 pce_entry = pce_info_use->ce_pipe_entry =
8402 kcalloc(entry,
8403 sizeof(struct qseecom_ce_pipe_entry),
8404 GFP_KERNEL);
8405 if (pce_entry == NULL) {
8406 pr_err("failed to alloc memory\n");
8407 rc = -ENOMEM;
8408 goto out;
8409 }
8410
8411 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8412 if (p->unit_num == pce_info_use->unit_num) {
8413 pce_entry->ce_num = p->ce;
8414 pce_entry->ce_pipe_pair =
8415 p->pipe_pair;
8416 pce_entry->valid = true;
8417 pce_entry++;
8418 }
8419 }
8420 }
8421 kfree(unit_tbl);
8422 unit_tbl = NULL;
8423 kfree(pfde_tbl);
8424 pfde_tbl = NULL;
8425 }
8426
8427 if (qseecom.support_pfe)
8428 tbl = of_get_property((&pdev->dev)->of_node,
8429 "qcom,per-file-encrypt-info", &size);
8430 else
8431 tbl = NULL;
8432 if (tbl) {
8433 old_db = false;
8434 if (size % sizeof(struct qseecom_crypto_info)) {
8435 pr_err("per-file-encrypt-info tbl size(%d)\n",
8436 size);
8437 rc = -EINVAL;
8438 goto out;
8439 }
8440 tbl_size = size / sizeof
8441 (struct qseecom_crypto_info);
8442
8443 pfde_tbl = kzalloc(size, GFP_KERNEL);
8444 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8445 total_units = 0;
8446 if (!pfde_tbl || !unit_tbl) {
8447 pr_err("failed to alloc memory\n");
8448 rc = -ENOMEM;
8449 goto out;
8450 }
8451 if (of_property_read_u32_array((&pdev->dev)->of_node,
8452 "qcom,per-file-encrypt-info",
8453 (u32 *)pfde_tbl, size/sizeof(u32))) {
8454 pr_err("failed to read per-file-encrypt-info tbl\n");
8455 rc = -EINVAL;
8456 goto out;
8457 }
8458
8459 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8460 for (j = 0; j < total_units; j++) {
8461 if (p->unit_num == *(unit_tbl + j))
8462 break;
8463 }
8464 if (j == total_units) {
8465 *(unit_tbl + total_units) = p->unit_num;
8466 total_units++;
8467 }
8468 }
8469
8470 qseecom.ce_info.num_pfe = total_units;
8471 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8472 total_units, sizeof(struct qseecom_ce_info_use),
8473 GFP_KERNEL);
8474 if (!pce_info_use) {
8475 pr_err("failed to alloc memory\n");
8476 rc = -ENOMEM;
8477 goto out;
8478 }
8479
8480 for (j = 0; j < total_units; j++, pce_info_use++) {
8481 pce_info_use->unit_num = *(unit_tbl + j);
8482 pce_info_use->alloc = false;
8483 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8484 pce_info_use->num_ce_pipe_entries = 0;
8485 pce_info_use->ce_pipe_entry = NULL;
8486 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8487 if (p->unit_num == pce_info_use->unit_num)
8488 pce_info_use->num_ce_pipe_entries++;
8489 }
8490
8491 entry = pce_info_use->num_ce_pipe_entries;
8492 pce_entry = pce_info_use->ce_pipe_entry =
8493 kcalloc(entry,
8494 sizeof(struct qseecom_ce_pipe_entry),
8495 GFP_KERNEL);
8496 if (pce_entry == NULL) {
8497 pr_err("failed to alloc memory\n");
8498 rc = -ENOMEM;
8499 goto out;
8500 }
8501
8502 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8503 if (p->unit_num == pce_info_use->unit_num) {
8504 pce_entry->ce_num = p->ce;
8505 pce_entry->ce_pipe_pair =
8506 p->pipe_pair;
8507 pce_entry->valid = true;
8508 pce_entry++;
8509 }
8510 }
8511 }
8512 kfree(unit_tbl);
8513 unit_tbl = NULL;
8514 kfree(pfde_tbl);
8515 pfde_tbl = NULL;
8516 }
8517
8518 if (!old_db)
8519 goto out1;
8520
8521 if (of_property_read_bool((&pdev->dev)->of_node,
8522 "qcom,support-multiple-ce-hw-instance")) {
8523 if (of_property_read_u32((&pdev->dev)->of_node,
8524 "qcom,hlos-num-ce-hw-instances",
8525 &hlos_num_ce_hw_instances)) {
8526 pr_err("Fail: get hlos number of ce hw instance\n");
8527 rc = -EINVAL;
8528 goto out;
8529 }
8530 } else {
8531 hlos_num_ce_hw_instances = 1;
8532 }
8533
8534 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8535 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8536 MAX_CE_PIPE_PAIR_PER_UNIT);
8537 rc = -EINVAL;
8538 goto out;
8539 }
8540
8541 if (of_property_read_u32_array((&pdev->dev)->of_node,
8542 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8543 hlos_num_ce_hw_instances)) {
8544 pr_err("Fail: get hlos ce hw instance info\n");
8545 rc = -EINVAL;
8546 goto out;
8547 }
8548
8549 if (qseecom.support_fde) {
8550 pce_info_use = qseecom.ce_info.fde =
8551 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8552 if (!pce_info_use) {
8553 pr_err("failed to alloc memory\n");
8554 rc = -ENOMEM;
8555 goto out;
8556 }
8557 /* by default for old db */
8558 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8559 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8560 pce_info_use->alloc = false;
8561 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8562 pce_info_use->ce_pipe_entry = NULL;
8563 if (of_property_read_u32((&pdev->dev)->of_node,
8564 "qcom,disk-encrypt-pipe-pair",
8565 &disk_encrypt_pipe)) {
8566 pr_err("Fail to get FDE pipe information.\n");
8567 rc = -EINVAL;
8568 goto out;
8569 } else {
8570 pr_debug("disk-encrypt-pipe-pair=0x%x",
8571 disk_encrypt_pipe);
8572 }
8573 entry = pce_info_use->num_ce_pipe_entries =
8574 hlos_num_ce_hw_instances;
8575 pce_entry = pce_info_use->ce_pipe_entry =
8576 kcalloc(entry,
8577 sizeof(struct qseecom_ce_pipe_entry),
8578 GFP_KERNEL);
8579 if (pce_entry == NULL) {
8580 pr_err("failed to alloc memory\n");
8581 rc = -ENOMEM;
8582 goto out;
8583 }
8584 for (i = 0; i < entry; i++) {
8585 pce_entry->ce_num = hlos_ce_hw_instance[i];
8586 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8587 pce_entry->valid = 1;
8588 pce_entry++;
8589 }
8590 } else {
8591 pr_warn("Device does not support FDE");
8592 disk_encrypt_pipe = 0xff;
8593 }
8594 if (qseecom.support_pfe) {
8595 pce_info_use = qseecom.ce_info.pfe =
8596 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8597 if (!pce_info_use) {
8598 pr_err("failed to alloc memory\n");
8599 rc = -ENOMEM;
8600 goto out;
8601 }
8602 /* by default for old db */
8603 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8604 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8605 pce_info_use->alloc = false;
8606 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8607 pce_info_use->ce_pipe_entry = NULL;
8608
8609 if (of_property_read_u32((&pdev->dev)->of_node,
8610 "qcom,file-encrypt-pipe-pair",
8611 &file_encrypt_pipe)) {
8612 pr_err("Fail to get PFE pipe information.\n");
8613 rc = -EINVAL;
8614 goto out;
8615 } else {
8616 pr_debug("file-encrypt-pipe-pair=0x%x",
8617 file_encrypt_pipe);
8618 }
8619 entry = pce_info_use->num_ce_pipe_entries =
8620 hlos_num_ce_hw_instances;
8621 pce_entry = pce_info_use->ce_pipe_entry =
8622 kcalloc(entry,
8623 sizeof(struct qseecom_ce_pipe_entry),
8624 GFP_KERNEL);
8625 if (pce_entry == NULL) {
8626 pr_err("failed to alloc memory\n");
8627 rc = -ENOMEM;
8628 goto out;
8629 }
8630 for (i = 0; i < entry; i++) {
8631 pce_entry->ce_num = hlos_ce_hw_instance[i];
8632 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8633 pce_entry->valid = 1;
8634 pce_entry++;
8635 }
8636 } else {
8637 pr_warn("Device does not support PFE");
8638 file_encrypt_pipe = 0xff;
8639 }
8640
8641out1:
8642 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8643 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8644out:
8645 if (rc) {
8646 if (qseecom.ce_info.fde) {
8647 pce_info_use = qseecom.ce_info.fde;
8648 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8649 pce_entry = pce_info_use->ce_pipe_entry;
8650 kfree(pce_entry);
8651 pce_info_use++;
8652 }
8653 }
8654 kfree(qseecom.ce_info.fde);
8655 qseecom.ce_info.fde = NULL;
8656 if (qseecom.ce_info.pfe) {
8657 pce_info_use = qseecom.ce_info.pfe;
8658 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8659 pce_entry = pce_info_use->ce_pipe_entry;
8660 kfree(pce_entry);
8661 pce_info_use++;
8662 }
8663 }
8664 kfree(qseecom.ce_info.pfe);
8665 qseecom.ce_info.pfe = NULL;
8666 }
8667 kfree(unit_tbl);
8668 kfree(pfde_tbl);
8669 return rc;
8670}
8671
8672static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8673 void __user *argp)
8674{
8675 struct qseecom_ce_info_req req;
8676 struct qseecom_ce_info_req *pinfo = &req;
8677 int ret = 0;
8678 int i;
8679 unsigned int entries;
8680 struct qseecom_ce_info_use *pce_info_use, *p;
8681 int total = 0;
8682 bool found = false;
8683 struct qseecom_ce_pipe_entry *pce_entry;
8684
8685 ret = copy_from_user(pinfo, argp,
8686 sizeof(struct qseecom_ce_info_req));
8687 if (ret) {
8688 pr_err("copy_from_user failed\n");
8689 return ret;
8690 }
8691
8692 switch (pinfo->usage) {
8693 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8694 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8695 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8696 if (qseecom.support_fde) {
8697 p = qseecom.ce_info.fde;
8698 total = qseecom.ce_info.num_fde;
8699 } else {
8700 pr_err("system does not support fde\n");
8701 return -EINVAL;
8702 }
8703 break;
8704 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8705 if (qseecom.support_pfe) {
8706 p = qseecom.ce_info.pfe;
8707 total = qseecom.ce_info.num_pfe;
8708 } else {
8709 pr_err("system does not support pfe\n");
8710 return -EINVAL;
8711 }
8712 break;
8713 default:
8714 pr_err("unsupported usage %d\n", pinfo->usage);
8715 return -EINVAL;
8716 }
8717
8718 pce_info_use = NULL;
8719 for (i = 0; i < total; i++) {
8720 if (!p->alloc)
8721 pce_info_use = p;
8722 else if (!memcmp(p->handle, pinfo->handle,
8723 MAX_CE_INFO_HANDLE_SIZE)) {
8724 pce_info_use = p;
8725 found = true;
8726 break;
8727 }
8728 p++;
8729 }
8730
8731 if (pce_info_use == NULL)
8732 return -EBUSY;
8733
8734 pinfo->unit_num = pce_info_use->unit_num;
8735 if (!pce_info_use->alloc) {
8736 pce_info_use->alloc = true;
8737 memcpy(pce_info_use->handle,
8738 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8739 }
8740 if (pce_info_use->num_ce_pipe_entries >
8741 MAX_CE_PIPE_PAIR_PER_UNIT)
8742 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8743 else
8744 entries = pce_info_use->num_ce_pipe_entries;
8745 pinfo->num_ce_pipe_entries = entries;
8746 pce_entry = pce_info_use->ce_pipe_entry;
8747 for (i = 0; i < entries; i++, pce_entry++)
8748 pinfo->ce_pipe_entry[i] = *pce_entry;
8749 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8750 pinfo->ce_pipe_entry[i].valid = 0;
8751
8752 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8753 pr_err("copy_to_user failed\n");
8754 ret = -EFAULT;
8755 }
8756 return ret;
8757}
8758
8759static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8760 void __user *argp)
8761{
8762 struct qseecom_ce_info_req req;
8763 struct qseecom_ce_info_req *pinfo = &req;
8764 int ret = 0;
8765 struct qseecom_ce_info_use *p;
8766 int total = 0;
8767 int i;
8768 bool found = false;
8769
8770 ret = copy_from_user(pinfo, argp,
8771 sizeof(struct qseecom_ce_info_req));
8772 if (ret)
8773 return ret;
8774
8775 switch (pinfo->usage) {
8776 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8777 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8778 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8779 if (qseecom.support_fde) {
8780 p = qseecom.ce_info.fde;
8781 total = qseecom.ce_info.num_fde;
8782 } else {
8783 pr_err("system does not support fde\n");
8784 return -EINVAL;
8785 }
8786 break;
8787 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8788 if (qseecom.support_pfe) {
8789 p = qseecom.ce_info.pfe;
8790 total = qseecom.ce_info.num_pfe;
8791 } else {
8792 pr_err("system does not support pfe\n");
8793 return -EINVAL;
8794 }
8795 break;
8796 default:
8797 pr_err("unsupported usage %d\n", pinfo->usage);
8798 return -EINVAL;
8799 }
8800
8801 for (i = 0; i < total; i++) {
8802 if (p->alloc &&
8803 !memcmp(p->handle, pinfo->handle,
8804 MAX_CE_INFO_HANDLE_SIZE)) {
8805 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8806 p->alloc = false;
8807 found = true;
8808 break;
8809 }
8810 p++;
8811 }
8812 return ret;
8813}
8814
8815static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8816 void __user *argp)
8817{
8818 struct qseecom_ce_info_req req;
8819 struct qseecom_ce_info_req *pinfo = &req;
8820 int ret = 0;
8821 int i;
8822 unsigned int entries;
8823 struct qseecom_ce_info_use *pce_info_use, *p;
8824 int total = 0;
8825 bool found = false;
8826 struct qseecom_ce_pipe_entry *pce_entry;
8827
8828 ret = copy_from_user(pinfo, argp,
8829 sizeof(struct qseecom_ce_info_req));
8830 if (ret)
8831 return ret;
8832
8833 switch (pinfo->usage) {
8834 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8835 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8836 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8837 if (qseecom.support_fde) {
8838 p = qseecom.ce_info.fde;
8839 total = qseecom.ce_info.num_fde;
8840 } else {
8841 pr_err("system does not support fde\n");
8842 return -EINVAL;
8843 }
8844 break;
8845 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8846 if (qseecom.support_pfe) {
8847 p = qseecom.ce_info.pfe;
8848 total = qseecom.ce_info.num_pfe;
8849 } else {
8850 pr_err("system does not support pfe\n");
8851 return -EINVAL;
8852 }
8853 break;
8854 default:
8855 pr_err("unsupported usage %d\n", pinfo->usage);
8856 return -EINVAL;
8857 }
8858
8859 pce_info_use = NULL;
8860 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8861 pinfo->num_ce_pipe_entries = 0;
8862 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8863 pinfo->ce_pipe_entry[i].valid = 0;
8864
8865 for (i = 0; i < total; i++) {
8866
8867 if (p->alloc && !memcmp(p->handle,
8868 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8869 pce_info_use = p;
8870 found = true;
8871 break;
8872 }
8873 p++;
8874 }
8875 if (!pce_info_use)
8876 goto out;
8877 pinfo->unit_num = pce_info_use->unit_num;
8878 if (pce_info_use->num_ce_pipe_entries >
8879 MAX_CE_PIPE_PAIR_PER_UNIT)
8880 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8881 else
8882 entries = pce_info_use->num_ce_pipe_entries;
8883 pinfo->num_ce_pipe_entries = entries;
8884 pce_entry = pce_info_use->ce_pipe_entry;
8885 for (i = 0; i < entries; i++, pce_entry++)
8886 pinfo->ce_pipe_entry[i] = *pce_entry;
8887 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8888 pinfo->ce_pipe_entry[i].valid = 0;
8889out:
8890 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8891 pr_err("copy_to_user failed\n");
8892 ret = -EFAULT;
8893 }
8894 return ret;
8895}
8896
8897/*
8898 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8899 * then whitelist feature is not supported.
8900 */
8901static int qseecom_check_whitelist_feature(void)
8902{
8903 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8904
8905 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8906}
8907
8908static int qseecom_probe(struct platform_device *pdev)
8909{
8910 int rc;
8911 int i;
8912 uint32_t feature = 10;
8913 struct device *class_dev;
8914 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8915 struct qseecom_command_scm_resp resp;
8916 struct qseecom_ce_info_use *pce_info_use = NULL;
8917
8918 qseecom.qsee_bw_count = 0;
8919 qseecom.qsee_perf_client = 0;
8920 qseecom.qsee_sfpb_bw_count = 0;
8921
8922 qseecom.qsee.ce_core_clk = NULL;
8923 qseecom.qsee.ce_clk = NULL;
8924 qseecom.qsee.ce_core_src_clk = NULL;
8925 qseecom.qsee.ce_bus_clk = NULL;
8926
8927 qseecom.cumulative_mode = 0;
8928 qseecom.current_mode = INACTIVE;
8929 qseecom.support_bus_scaling = false;
8930 qseecom.support_fde = false;
8931 qseecom.support_pfe = false;
8932
8933 qseecom.ce_drv.ce_core_clk = NULL;
8934 qseecom.ce_drv.ce_clk = NULL;
8935 qseecom.ce_drv.ce_core_src_clk = NULL;
8936 qseecom.ce_drv.ce_bus_clk = NULL;
8937 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8938
8939 qseecom.app_block_ref_cnt = 0;
8940 init_waitqueue_head(&qseecom.app_block_wq);
8941 qseecom.whitelist_support = true;
8942
8943 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8944 if (rc < 0) {
8945 pr_err("alloc_chrdev_region failed %d\n", rc);
8946 return rc;
8947 }
8948
8949 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8950 if (IS_ERR(driver_class)) {
8951 rc = -ENOMEM;
8952 pr_err("class_create failed %d\n", rc);
8953 goto exit_unreg_chrdev_region;
8954 }
8955
8956 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8957 QSEECOM_DEV);
8958 if (IS_ERR(class_dev)) {
8959 pr_err("class_device_create failed %d\n", rc);
8960 rc = -ENOMEM;
8961 goto exit_destroy_class;
8962 }
8963
8964 cdev_init(&qseecom.cdev, &qseecom_fops);
8965 qseecom.cdev.owner = THIS_MODULE;
8966
8967 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8968 if (rc < 0) {
8969 pr_err("cdev_add failed %d\n", rc);
8970 goto exit_destroy_device;
8971 }
8972
8973 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008974 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8975 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008976 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008977 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8978 spin_lock_init(&qseecom.registered_kclient_list_lock);
8979 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008980 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008981 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008982 INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
8983 init_waitqueue_head(&qseecom.unload_app_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008984 qseecom.send_resp_flag = 0;
8985
8986 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008987 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008988 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8989 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008990 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008991 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8992 if (rc) {
8993 pr_err("Failed to get QSEE version info %d\n", rc);
8994 goto exit_del_cdev;
8995 }
8996 qseecom.qsee_version = resp.result;
8997 qseecom.qseos_version = QSEOS_VERSION_14;
8998 qseecom.commonlib_loaded = false;
8999 qseecom.commonlib64_loaded = false;
9000 qseecom.pdev = class_dev;
9001 /* Create ION msm client */
9002 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
9003 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
9004 pr_err("Ion client cannot be created\n");
9005 rc = -ENOMEM;
9006 goto exit_del_cdev;
9007 }
9008
9009 /* register client for bus scaling */
9010 if (pdev->dev.of_node) {
9011 qseecom.pdev->of_node = pdev->dev.of_node;
9012 qseecom.support_bus_scaling =
9013 of_property_read_bool((&pdev->dev)->of_node,
9014 "qcom,support-bus-scaling");
9015 rc = qseecom_retrieve_ce_data(pdev);
9016 if (rc)
9017 goto exit_destroy_ion_client;
9018 qseecom.appsbl_qseecom_support =
9019 of_property_read_bool((&pdev->dev)->of_node,
9020 "qcom,appsbl-qseecom-support");
9021 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
9022 qseecom.appsbl_qseecom_support);
9023
9024 qseecom.commonlib64_loaded =
9025 of_property_read_bool((&pdev->dev)->of_node,
9026 "qcom,commonlib64-loaded-by-uefi");
9027 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
9028 qseecom.commonlib64_loaded);
9029 qseecom.fde_key_size =
9030 of_property_read_bool((&pdev->dev)->of_node,
9031 "qcom,fde-key-size");
9032 qseecom.no_clock_support =
9033 of_property_read_bool((&pdev->dev)->of_node,
9034 "qcom,no-clock-support");
9035 if (!qseecom.no_clock_support) {
9036 pr_info("qseecom clocks handled by other subsystem\n");
9037 } else {
9038 pr_info("no-clock-support=0x%x",
9039 qseecom.no_clock_support);
9040 }
9041
9042 if (of_property_read_u32((&pdev->dev)->of_node,
9043 "qcom,qsee-reentrancy-support",
9044 &qseecom.qsee_reentrancy_support)) {
9045 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
9046 qseecom.qsee_reentrancy_support = 0;
9047 } else {
9048 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
9049 qseecom.qsee_reentrancy_support);
9050 }
9051
Jiten Patela7bb1d52018-05-11 12:34:26 +05309052 qseecom.enable_key_wrap_in_ks =
9053 of_property_read_bool((&pdev->dev)->of_node,
9054 "qcom,enable-key-wrap-in-ks");
9055 if (qseecom.enable_key_wrap_in_ks) {
9056 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
9057 qseecom.enable_key_wrap_in_ks);
9058 }
9059
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009060 /*
9061 * The qseecom bus scaling flag can not be enabled when
9062 * crypto clock is not handled by HLOS.
9063 */
9064 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
9065 pr_err("support_bus_scaling flag can not be enabled.\n");
9066 rc = -EINVAL;
9067 goto exit_destroy_ion_client;
9068 }
9069
9070 if (of_property_read_u32((&pdev->dev)->of_node,
9071 "qcom,ce-opp-freq",
9072 &qseecom.ce_opp_freq_hz)) {
9073 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
9074 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
9075 }
9076 rc = __qseecom_init_clk(CLK_QSEE);
9077 if (rc)
9078 goto exit_destroy_ion_client;
9079
9080 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9081 (qseecom.support_pfe || qseecom.support_fde)) {
9082 rc = __qseecom_init_clk(CLK_CE_DRV);
9083 if (rc) {
9084 __qseecom_deinit_clk(CLK_QSEE);
9085 goto exit_destroy_ion_client;
9086 }
9087 } else {
9088 struct qseecom_clk *qclk;
9089
9090 qclk = &qseecom.qsee;
9091 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
9092 qseecom.ce_drv.ce_clk = qclk->ce_clk;
9093 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
9094 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
9095 }
9096
9097 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9098 msm_bus_cl_get_pdata(pdev);
9099 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
9100 (!qseecom.is_apps_region_protected &&
9101 !qseecom.appsbl_qseecom_support)) {
9102 struct resource *resource = NULL;
9103 struct qsee_apps_region_info_ireq req;
9104 struct qsee_apps_region_info_64bit_ireq req_64bit;
9105 struct qseecom_command_scm_resp resp;
9106 void *cmd_buf = NULL;
9107 size_t cmd_len;
9108
9109 resource = platform_get_resource_byname(pdev,
9110 IORESOURCE_MEM, "secapp-region");
9111 if (resource) {
9112 if (qseecom.qsee_version < QSEE_VERSION_40) {
9113 req.qsee_cmd_id =
9114 QSEOS_APP_REGION_NOTIFICATION;
9115 req.addr = (uint32_t)resource->start;
9116 req.size = resource_size(resource);
9117 cmd_buf = (void *)&req;
9118 cmd_len = sizeof(struct
9119 qsee_apps_region_info_ireq);
9120 pr_warn("secure app region addr=0x%x size=0x%x",
9121 req.addr, req.size);
9122 } else {
9123 req_64bit.qsee_cmd_id =
9124 QSEOS_APP_REGION_NOTIFICATION;
9125 req_64bit.addr = resource->start;
9126 req_64bit.size = resource_size(
9127 resource);
9128 cmd_buf = (void *)&req_64bit;
9129 cmd_len = sizeof(struct
9130 qsee_apps_region_info_64bit_ireq);
9131 pr_warn("secure app region addr=0x%llx size=0x%x",
9132 req_64bit.addr, req_64bit.size);
9133 }
9134 } else {
9135 pr_err("Fail to get secure app region info\n");
9136 rc = -EINVAL;
9137 goto exit_deinit_clock;
9138 }
9139 rc = __qseecom_enable_clk(CLK_QSEE);
9140 if (rc) {
9141 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
9142 rc = -EIO;
9143 goto exit_deinit_clock;
9144 }
Zhen Kong03f220d2019-02-01 17:12:34 -08009145 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009146 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
9147 cmd_buf, cmd_len,
9148 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08009149 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009150 __qseecom_disable_clk(CLK_QSEE);
9151 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
9152 pr_err("send secapp reg fail %d resp.res %d\n",
9153 rc, resp.result);
9154 rc = -EINVAL;
9155 goto exit_deinit_clock;
9156 }
9157 }
9158 /*
9159 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
9160 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
9161 * Pls add "qseecom.commonlib64_loaded = true" here too.
9162 */
9163 if (qseecom.is_apps_region_protected ||
9164 qseecom.appsbl_qseecom_support)
9165 qseecom.commonlib_loaded = true;
9166 } else {
9167 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9168 pdev->dev.platform_data;
9169 }
9170 if (qseecom.support_bus_scaling) {
9171 init_timer(&(qseecom.bw_scale_down_timer));
9172 INIT_WORK(&qseecom.bw_inactive_req_ws,
9173 qseecom_bw_inactive_req_work);
9174 qseecom.bw_scale_down_timer.function =
9175 qseecom_scale_bus_bandwidth_timer_callback;
9176 }
9177 qseecom.timer_running = false;
9178 qseecom.qsee_perf_client = msm_bus_scale_register_client(
9179 qseecom_platform_support);
9180
9181 qseecom.whitelist_support = qseecom_check_whitelist_feature();
9182 pr_warn("qseecom.whitelist_support = %d\n",
9183 qseecom.whitelist_support);
9184
9185 if (!qseecom.qsee_perf_client)
9186 pr_err("Unable to register bus client\n");
9187
Zhen Kongc4c162a2019-01-23 12:07:12 -08009188 /*create a kthread to process pending listener unregister task */
9189 qseecom.unregister_lsnr_kthread_task = kthread_run(
9190 __qseecom_unregister_listener_kthread_func,
9191 NULL, "qseecom-unreg-lsnr");
9192 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
9193 pr_err("failed to create kthread to unregister listener\n");
9194 rc = -EINVAL;
9195 goto exit_deinit_clock;
9196 }
9197 atomic_set(&qseecom.unregister_lsnr_kthread_state,
9198 LSNR_UNREG_KT_SLEEP);
Zhen Kong03b2eae2019-09-17 16:58:46 -07009199
9200 /*create a kthread to process pending ta unloading task */
9201 qseecom.unload_app_kthread_task = kthread_run(
9202 __qseecom_unload_app_kthread_func,
9203 NULL, "qseecom-unload-ta");
9204 if (IS_ERR(qseecom.unload_app_kthread_task)) {
9205 pr_err("failed to create kthread to unload ta\n");
9206 rc = -EINVAL;
9207 goto exit_kill_unreg_lsnr_kthread;
9208 }
9209 atomic_set(&qseecom.unload_app_kthread_state,
9210 UNLOAD_APP_KT_SLEEP);
9211
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009212 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9213 return 0;
9214
Zhen Kong03b2eae2019-09-17 16:58:46 -07009215exit_kill_unreg_lsnr_kthread:
9216 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9217
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009218exit_deinit_clock:
9219 __qseecom_deinit_clk(CLK_QSEE);
9220 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9221 (qseecom.support_pfe || qseecom.support_fde))
9222 __qseecom_deinit_clk(CLK_CE_DRV);
9223exit_destroy_ion_client:
9224 if (qseecom.ce_info.fde) {
9225 pce_info_use = qseecom.ce_info.fde;
9226 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9227 kzfree(pce_info_use->ce_pipe_entry);
9228 pce_info_use++;
9229 }
9230 kfree(qseecom.ce_info.fde);
9231 }
9232 if (qseecom.ce_info.pfe) {
9233 pce_info_use = qseecom.ce_info.pfe;
9234 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9235 kzfree(pce_info_use->ce_pipe_entry);
9236 pce_info_use++;
9237 }
9238 kfree(qseecom.ce_info.pfe);
9239 }
9240 ion_client_destroy(qseecom.ion_clnt);
9241exit_del_cdev:
9242 cdev_del(&qseecom.cdev);
9243exit_destroy_device:
9244 device_destroy(driver_class, qseecom_device_no);
9245exit_destroy_class:
9246 class_destroy(driver_class);
9247exit_unreg_chrdev_region:
9248 unregister_chrdev_region(qseecom_device_no, 1);
9249 return rc;
9250}
9251
9252static int qseecom_remove(struct platform_device *pdev)
9253{
9254 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309255 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009256 unsigned long flags = 0;
9257 int ret = 0;
9258 int i;
9259 struct qseecom_ce_pipe_entry *pce_entry;
9260 struct qseecom_ce_info_use *pce_info_use;
9261
9262 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9263 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9264
Monika Singhe711b162018-04-24 09:54:50 +05309265 list_for_each_entry_safe(kclient, kclient_tmp,
9266 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009267
9268 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009269 if (!kclient->handle) {
9270 list_del(&kclient->list);
9271 kzfree(kclient);
9272 break;
9273 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009274
9275 list_del(&kclient->list);
9276 mutex_lock(&app_access_lock);
9277 ret = qseecom_unload_app(kclient->handle->dev, false);
9278 mutex_unlock(&app_access_lock);
9279 if (!ret) {
9280 kzfree(kclient->handle->dev);
9281 kzfree(kclient->handle);
9282 kzfree(kclient);
9283 }
9284 }
9285
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009286 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9287
9288 if (qseecom.qseos_version > QSEEE_VERSION_00)
9289 qseecom_unload_commonlib_image();
9290
9291 if (qseecom.qsee_perf_client)
9292 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9293 0);
9294 if (pdev->dev.platform_data != NULL)
9295 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9296
9297 if (qseecom.support_bus_scaling) {
9298 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9299 del_timer_sync(&qseecom.bw_scale_down_timer);
9300 }
9301
9302 if (qseecom.ce_info.fde) {
9303 pce_info_use = qseecom.ce_info.fde;
9304 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9305 pce_entry = pce_info_use->ce_pipe_entry;
9306 kfree(pce_entry);
9307 pce_info_use++;
9308 }
9309 }
9310 kfree(qseecom.ce_info.fde);
9311 if (qseecom.ce_info.pfe) {
9312 pce_info_use = qseecom.ce_info.pfe;
9313 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9314 pce_entry = pce_info_use->ce_pipe_entry;
9315 kfree(pce_entry);
9316 pce_info_use++;
9317 }
9318 }
9319 kfree(qseecom.ce_info.pfe);
9320
9321 /* register client for bus scaling */
9322 if (pdev->dev.of_node) {
9323 __qseecom_deinit_clk(CLK_QSEE);
9324 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9325 (qseecom.support_pfe || qseecom.support_fde))
9326 __qseecom_deinit_clk(CLK_CE_DRV);
9327 }
9328
9329 ion_client_destroy(qseecom.ion_clnt);
9330
Zhen Kong03b2eae2019-09-17 16:58:46 -07009331 kthread_stop(qseecom.unload_app_kthread_task);
9332
Zhen Kongc4c162a2019-01-23 12:07:12 -08009333 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9334
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009335 cdev_del(&qseecom.cdev);
9336
9337 device_destroy(driver_class, qseecom_device_no);
9338
9339 class_destroy(driver_class);
9340
9341 unregister_chrdev_region(qseecom_device_no, 1);
9342
9343 return ret;
9344}
9345
9346static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9347{
9348 int ret = 0;
9349 struct qseecom_clk *qclk;
9350
9351 qclk = &qseecom.qsee;
9352 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9353 if (qseecom.no_clock_support)
9354 return 0;
9355
9356 mutex_lock(&qsee_bw_mutex);
9357 mutex_lock(&clk_access_lock);
9358
9359 if (qseecom.current_mode != INACTIVE) {
9360 ret = msm_bus_scale_client_update_request(
9361 qseecom.qsee_perf_client, INACTIVE);
9362 if (ret)
9363 pr_err("Fail to scale down bus\n");
9364 else
9365 qseecom.current_mode = INACTIVE;
9366 }
9367
9368 if (qclk->clk_access_cnt) {
9369 if (qclk->ce_clk != NULL)
9370 clk_disable_unprepare(qclk->ce_clk);
9371 if (qclk->ce_core_clk != NULL)
9372 clk_disable_unprepare(qclk->ce_core_clk);
9373 if (qclk->ce_bus_clk != NULL)
9374 clk_disable_unprepare(qclk->ce_bus_clk);
9375 }
9376
9377 del_timer_sync(&(qseecom.bw_scale_down_timer));
9378 qseecom.timer_running = false;
9379
9380 mutex_unlock(&clk_access_lock);
9381 mutex_unlock(&qsee_bw_mutex);
9382 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9383
9384 return 0;
9385}
9386
9387static int qseecom_resume(struct platform_device *pdev)
9388{
9389 int mode = 0;
9390 int ret = 0;
9391 struct qseecom_clk *qclk;
9392
9393 qclk = &qseecom.qsee;
9394 if (qseecom.no_clock_support)
9395 goto exit;
9396
9397 mutex_lock(&qsee_bw_mutex);
9398 mutex_lock(&clk_access_lock);
9399 if (qseecom.cumulative_mode >= HIGH)
9400 mode = HIGH;
9401 else
9402 mode = qseecom.cumulative_mode;
9403
9404 if (qseecom.cumulative_mode != INACTIVE) {
9405 ret = msm_bus_scale_client_update_request(
9406 qseecom.qsee_perf_client, mode);
9407 if (ret)
9408 pr_err("Fail to scale up bus to %d\n", mode);
9409 else
9410 qseecom.current_mode = mode;
9411 }
9412
9413 if (qclk->clk_access_cnt) {
9414 if (qclk->ce_core_clk != NULL) {
9415 ret = clk_prepare_enable(qclk->ce_core_clk);
9416 if (ret) {
9417 pr_err("Unable to enable/prep CE core clk\n");
9418 qclk->clk_access_cnt = 0;
9419 goto err;
9420 }
9421 }
9422 if (qclk->ce_clk != NULL) {
9423 ret = clk_prepare_enable(qclk->ce_clk);
9424 if (ret) {
9425 pr_err("Unable to enable/prep CE iface clk\n");
9426 qclk->clk_access_cnt = 0;
9427 goto ce_clk_err;
9428 }
9429 }
9430 if (qclk->ce_bus_clk != NULL) {
9431 ret = clk_prepare_enable(qclk->ce_bus_clk);
9432 if (ret) {
9433 pr_err("Unable to enable/prep CE bus clk\n");
9434 qclk->clk_access_cnt = 0;
9435 goto ce_bus_clk_err;
9436 }
9437 }
9438 }
9439
9440 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9441 qseecom.bw_scale_down_timer.expires = jiffies +
9442 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9443 mod_timer(&(qseecom.bw_scale_down_timer),
9444 qseecom.bw_scale_down_timer.expires);
9445 qseecom.timer_running = true;
9446 }
9447
9448 mutex_unlock(&clk_access_lock);
9449 mutex_unlock(&qsee_bw_mutex);
9450 goto exit;
9451
9452ce_bus_clk_err:
9453 if (qclk->ce_clk)
9454 clk_disable_unprepare(qclk->ce_clk);
9455ce_clk_err:
9456 if (qclk->ce_core_clk)
9457 clk_disable_unprepare(qclk->ce_core_clk);
9458err:
9459 mutex_unlock(&clk_access_lock);
9460 mutex_unlock(&qsee_bw_mutex);
9461 ret = -EIO;
9462exit:
9463 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9464 return ret;
9465}
9466
9467static const struct of_device_id qseecom_match[] = {
9468 {
9469 .compatible = "qcom,qseecom",
9470 },
9471 {}
9472};
9473
9474static struct platform_driver qseecom_plat_driver = {
9475 .probe = qseecom_probe,
9476 .remove = qseecom_remove,
9477 .suspend = qseecom_suspend,
9478 .resume = qseecom_resume,
9479 .driver = {
9480 .name = "qseecom",
9481 .owner = THIS_MODULE,
9482 .of_match_table = qseecom_match,
9483 },
9484};
9485
9486static int qseecom_init(void)
9487{
9488 return platform_driver_register(&qseecom_plat_driver);
9489}
9490
9491static void qseecom_exit(void)
9492{
9493 platform_driver_unregister(&qseecom_plat_driver);
9494}
9495
9496MODULE_LICENSE("GPL v2");
9497MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9498
9499module_init(qseecom_init);
9500module_exit(qseecom_exit);