blob: 67d8e33246fab7cb1ba9507eb33a9e0cd4c3db09 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
Zhen Kong03b2eae2019-09-17 16:58:46 -0700149enum qseecom_unload_app_kthread_state {
150 UNLOAD_APP_KT_SLEEP = 0,
151 UNLOAD_APP_KT_WAKEUP,
152};
153
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700154static struct class *driver_class;
155static dev_t qseecom_device_no;
156
157static DEFINE_MUTEX(qsee_bw_mutex);
158static DEFINE_MUTEX(app_access_lock);
159static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800160static DEFINE_MUTEX(listener_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700161static DEFINE_MUTEX(unload_app_pending_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800162
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700163
164struct sglist_info {
165 uint32_t indexAndFlags;
166 uint32_t sizeOrCount;
167};
168
169/*
170 * The 31th bit indicates only one or multiple physical address inside
171 * the request buffer. If it is set, the index locates a single physical addr
172 * inside the request buffer, and `sizeOrCount` is the size of the memory being
173 * shared at that physical address.
174 * Otherwise, the index locates an array of {start, len} pairs (a
175 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
176 * that array.
177 *
178 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
179 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
180 *
181 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
182 */
183#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
184 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
185
186#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
187
188#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
189
190#define MAKE_WHITELIST_VERSION(major, minor, patch) \
191 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
192
193struct qseecom_registered_listener_list {
194 struct list_head list;
195 struct qseecom_register_listener_req svc;
196 void *user_virt_sb_base;
197 u8 *sb_virt;
198 phys_addr_t sb_phys;
199 size_t sb_length;
200 struct ion_handle *ihandle; /* Retrieve phy addr */
201 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 int rcv_req_flag;
204 int send_resp_flag;
205 bool listener_in_use;
206 /* wq for thread blocked on this listener*/
207 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800208 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
209 uint32_t sglist_cnt;
210 int abort;
211 bool unregister_pending;
212};
213
214struct qseecom_unregister_pending_list {
215 struct list_head list;
216 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700217};
218
219struct qseecom_registered_app_list {
220 struct list_head list;
221 u32 app_id;
222 u32 ref_cnt;
223 char app_name[MAX_APP_NAME_SIZE];
224 u32 app_arch;
225 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700226 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700227 u32 blocked_on_listener_id;
228};
229
230struct qseecom_registered_kclient_list {
231 struct list_head list;
232 struct qseecom_handle *handle;
233};
234
235struct qseecom_ce_info_use {
236 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
237 unsigned int unit_num;
238 unsigned int num_ce_pipe_entries;
239 struct qseecom_ce_pipe_entry *ce_pipe_entry;
240 bool alloc;
241 uint32_t type;
242};
243
244struct ce_hw_usage_info {
245 uint32_t qsee_ce_hw_instance;
246 uint32_t num_fde;
247 struct qseecom_ce_info_use *fde;
248 uint32_t num_pfe;
249 struct qseecom_ce_info_use *pfe;
250};
251
252struct qseecom_clk {
253 enum qseecom_ce_hw_instance instance;
254 struct clk *ce_core_clk;
255 struct clk *ce_clk;
256 struct clk *ce_core_src_clk;
257 struct clk *ce_bus_clk;
258 uint32_t clk_access_cnt;
259};
260
261struct qseecom_control {
262 struct ion_client *ion_clnt; /* Ion client */
263 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700264
265 struct list_head registered_app_list_head;
266 spinlock_t registered_app_list_lock;
267
268 struct list_head registered_kclient_list_head;
269 spinlock_t registered_kclient_list_lock;
270
271 wait_queue_head_t send_resp_wq;
272 int send_resp_flag;
273
274 uint32_t qseos_version;
275 uint32_t qsee_version;
276 struct device *pdev;
277 bool whitelist_support;
278 bool commonlib_loaded;
279 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700280 struct ce_hw_usage_info ce_info;
281
282 int qsee_bw_count;
283 int qsee_sfpb_bw_count;
284
285 uint32_t qsee_perf_client;
286 struct qseecom_clk qsee;
287 struct qseecom_clk ce_drv;
288
289 bool support_bus_scaling;
290 bool support_fde;
291 bool support_pfe;
292 bool fde_key_size;
293 uint32_t cumulative_mode;
294 enum qseecom_bandwidth_request_mode current_mode;
295 struct timer_list bw_scale_down_timer;
296 struct work_struct bw_inactive_req_ws;
297 struct cdev cdev;
298 bool timer_running;
299 bool no_clock_support;
300 unsigned int ce_opp_freq_hz;
301 bool appsbl_qseecom_support;
302 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530303 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700304
305 uint32_t app_block_ref_cnt;
306 wait_queue_head_t app_block_wq;
307 atomic_t qseecom_state;
308 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700309 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800310
311 struct list_head unregister_lsnr_pending_list_head;
312 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800313 struct task_struct *unregister_lsnr_kthread_task;
314 wait_queue_head_t unregister_lsnr_kthread_wq;
315 atomic_t unregister_lsnr_kthread_state;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700316
317 struct list_head unload_app_pending_list_head;
318 struct task_struct *unload_app_kthread_task;
319 wait_queue_head_t unload_app_kthread_wq;
320 atomic_t unload_app_kthread_state;
321};
322
323struct qseecom_unload_app_pending_list {
324 struct list_head list;
325 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700326};
327
328struct qseecom_sec_buf_fd_info {
329 bool is_sec_buf_fd;
330 size_t size;
331 void *vbase;
332 dma_addr_t pbase;
333};
334
335struct qseecom_param_memref {
336 uint32_t buffer;
337 uint32_t size;
338};
339
340struct qseecom_client_handle {
341 u32 app_id;
342 u8 *sb_virt;
343 phys_addr_t sb_phys;
344 unsigned long user_virt_sb_base;
345 size_t sb_length;
346 struct ion_handle *ihandle; /* Retrieve phy addr */
347 char app_name[MAX_APP_NAME_SIZE];
348 u32 app_arch;
349 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700350 bool from_smcinvoke;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700351 bool unload_pending;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700352};
353
354struct qseecom_listener_handle {
355 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800356 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800357 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700358};
359
360static struct qseecom_control qseecom;
361
362struct qseecom_dev_handle {
363 enum qseecom_client_handle_type type;
364 union {
365 struct qseecom_client_handle client;
366 struct qseecom_listener_handle listener;
367 };
368 bool released;
369 int abort;
370 wait_queue_head_t abort_wq;
371 atomic_t ioctl_count;
372 bool perf_enabled;
373 bool fast_load_enabled;
374 enum qseecom_bandwidth_request_mode mode;
375 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
376 uint32_t sglist_cnt;
377 bool use_legacy_cmd;
378};
379
380struct qseecom_key_id_usage_desc {
381 uint8_t desc[QSEECOM_KEY_ID_SIZE];
382};
383
384struct qseecom_crypto_info {
385 unsigned int unit_num;
386 unsigned int ce;
387 unsigned int pipe_pair;
388};
389
390static struct qseecom_key_id_usage_desc key_id_array[] = {
391 {
392 .desc = "Undefined Usage Index",
393 },
394
395 {
396 .desc = "Full Disk Encryption",
397 },
398
399 {
400 .desc = "Per File Encryption",
401 },
402
403 {
404 .desc = "UFS ICE Full Disk Encryption",
405 },
406
407 {
408 .desc = "SDCC ICE Full Disk Encryption",
409 },
410};
411
412/* Function proto types */
413static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
414static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
415static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
416static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
417static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
418static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
419 char *cmnlib_name);
420static int qseecom_enable_ice_setup(int usage);
421static int qseecom_disable_ice_setup(int usage);
422static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
423static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
424 void __user *argp);
425static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
426 void __user *argp);
427static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
428 void __user *argp);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700429static int __qseecom_unload_app(struct qseecom_dev_handle *data,
430 uint32_t app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700431
432static int get_qseecom_keymaster_status(char *str)
433{
434 get_option(&str, &qseecom.is_apps_region_protected);
435 return 1;
436}
437__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
438
Zhen Kong03f220d2019-02-01 17:12:34 -0800439
440#define QSEECOM_SCM_EBUSY_WAIT_MS 30
441#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
442
443static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
444{
445 int ret = 0;
446 int retry_count = 0;
447
448 do {
449 ret = scm_call2_noretry(smc_id, desc);
450 if (ret == -EBUSY) {
451 mutex_unlock(&app_access_lock);
452 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
453 mutex_lock(&app_access_lock);
454 }
455 if (retry_count == 33)
456 pr_warn("secure world has been busy for 1 second!\n");
457 } while (ret == -EBUSY &&
458 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
459 return ret;
460}
461
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700462static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
463 const void *req_buf, void *resp_buf)
464{
465 int ret = 0;
466 uint32_t smc_id = 0;
467 uint32_t qseos_cmd_id = 0;
468 struct scm_desc desc = {0};
469 struct qseecom_command_scm_resp *scm_resp = NULL;
470
471 if (!req_buf || !resp_buf) {
472 pr_err("Invalid buffer pointer\n");
473 return -EINVAL;
474 }
475 qseos_cmd_id = *(uint32_t *)req_buf;
476 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
477
478 switch (svc_id) {
479 case 6: {
480 if (tz_cmd_id == 3) {
481 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
482 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
483 desc.args[0] = *(uint32_t *)req_buf;
484 } else {
485 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
486 svc_id, tz_cmd_id);
487 return -EINVAL;
488 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800489 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700490 break;
491 }
492 case SCM_SVC_ES: {
493 switch (tz_cmd_id) {
494 case SCM_SAVE_PARTITION_HASH_ID: {
495 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
496 struct qseecom_save_partition_hash_req *p_hash_req =
497 (struct qseecom_save_partition_hash_req *)
498 req_buf;
499 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
500
501 if (!tzbuf)
502 return -ENOMEM;
503 memset(tzbuf, 0, tzbuflen);
504 memcpy(tzbuf, p_hash_req->digest,
505 SHA256_DIGEST_LENGTH);
506 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
507 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
508 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
509 desc.args[0] = p_hash_req->partition_id;
510 desc.args[1] = virt_to_phys(tzbuf);
511 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800512 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700513 kzfree(tzbuf);
514 break;
515 }
516 default: {
517 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
518 tz_cmd_id);
519 ret = -EINVAL;
520 break;
521 }
522 } /* end of switch (tz_cmd_id) */
523 break;
524 } /* end of case SCM_SVC_ES */
525 case SCM_SVC_TZSCHEDULER: {
526 switch (qseos_cmd_id) {
527 case QSEOS_APP_START_COMMAND: {
528 struct qseecom_load_app_ireq *req;
529 struct qseecom_load_app_64bit_ireq *req_64bit;
530
531 smc_id = TZ_OS_APP_START_ID;
532 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
533 if (qseecom.qsee_version < QSEE_VERSION_40) {
534 req = (struct qseecom_load_app_ireq *)req_buf;
535 desc.args[0] = req->mdt_len;
536 desc.args[1] = req->img_len;
537 desc.args[2] = req->phy_addr;
538 } else {
539 req_64bit =
540 (struct qseecom_load_app_64bit_ireq *)
541 req_buf;
542 desc.args[0] = req_64bit->mdt_len;
543 desc.args[1] = req_64bit->img_len;
544 desc.args[2] = req_64bit->phy_addr;
545 }
546 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800547 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700548 break;
549 }
550 case QSEOS_APP_SHUTDOWN_COMMAND: {
551 struct qseecom_unload_app_ireq *req;
552
553 req = (struct qseecom_unload_app_ireq *)req_buf;
554 smc_id = TZ_OS_APP_SHUTDOWN_ID;
555 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
556 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700557 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700558 break;
559 }
560 case QSEOS_APP_LOOKUP_COMMAND: {
561 struct qseecom_check_app_ireq *req;
562 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
563 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
564
565 if (!tzbuf)
566 return -ENOMEM;
567 req = (struct qseecom_check_app_ireq *)req_buf;
568 pr_debug("Lookup app_name = %s\n", req->app_name);
569 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
570 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
571 smc_id = TZ_OS_APP_LOOKUP_ID;
572 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
573 desc.args[0] = virt_to_phys(tzbuf);
574 desc.args[1] = strlen(req->app_name);
575 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800576 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700577 kzfree(tzbuf);
578 break;
579 }
580 case QSEOS_APP_REGION_NOTIFICATION: {
581 struct qsee_apps_region_info_ireq *req;
582 struct qsee_apps_region_info_64bit_ireq *req_64bit;
583
584 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
585 desc.arginfo =
586 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
587 if (qseecom.qsee_version < QSEE_VERSION_40) {
588 req = (struct qsee_apps_region_info_ireq *)
589 req_buf;
590 desc.args[0] = req->addr;
591 desc.args[1] = req->size;
592 } else {
593 req_64bit =
594 (struct qsee_apps_region_info_64bit_ireq *)
595 req_buf;
596 desc.args[0] = req_64bit->addr;
597 desc.args[1] = req_64bit->size;
598 }
599 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800600 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700601 break;
602 }
603 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
604 struct qseecom_load_lib_image_ireq *req;
605 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
606
607 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
608 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
609 if (qseecom.qsee_version < QSEE_VERSION_40) {
610 req = (struct qseecom_load_lib_image_ireq *)
611 req_buf;
612 desc.args[0] = req->mdt_len;
613 desc.args[1] = req->img_len;
614 desc.args[2] = req->phy_addr;
615 } else {
616 req_64bit =
617 (struct qseecom_load_lib_image_64bit_ireq *)
618 req_buf;
619 desc.args[0] = req_64bit->mdt_len;
620 desc.args[1] = req_64bit->img_len;
621 desc.args[2] = req_64bit->phy_addr;
622 }
623 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800624 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700625 break;
626 }
627 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
628 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
629 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
630 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800631 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700632 break;
633 }
634 case QSEOS_REGISTER_LISTENER: {
635 struct qseecom_register_listener_ireq *req;
636 struct qseecom_register_listener_64bit_ireq *req_64bit;
637
638 desc.arginfo =
639 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
640 if (qseecom.qsee_version < QSEE_VERSION_40) {
641 req = (struct qseecom_register_listener_ireq *)
642 req_buf;
643 desc.args[0] = req->listener_id;
644 desc.args[1] = req->sb_ptr;
645 desc.args[2] = req->sb_len;
646 } else {
647 req_64bit =
648 (struct qseecom_register_listener_64bit_ireq *)
649 req_buf;
650 desc.args[0] = req_64bit->listener_id;
651 desc.args[1] = req_64bit->sb_ptr;
652 desc.args[2] = req_64bit->sb_len;
653 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700654 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700655 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800656 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800657 if (ret == -EIO) {
658 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700659 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700660 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800661 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700662 }
663 break;
664 }
665 case QSEOS_DEREGISTER_LISTENER: {
666 struct qseecom_unregister_listener_ireq *req;
667
668 req = (struct qseecom_unregister_listener_ireq *)
669 req_buf;
670 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
671 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
672 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800673 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700674 break;
675 }
676 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
677 struct qseecom_client_listener_data_irsp *req;
678
679 req = (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
682 desc.arginfo =
683 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
684 desc.args[0] = req->listener_id;
685 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800686 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700687 break;
688 }
689 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
690 struct qseecom_client_listener_data_irsp *req;
691 struct qseecom_client_listener_data_64bit_irsp *req_64;
692
693 smc_id =
694 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
695 desc.arginfo =
696 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
697 if (qseecom.qsee_version < QSEE_VERSION_40) {
698 req =
699 (struct qseecom_client_listener_data_irsp *)
700 req_buf;
701 desc.args[0] = req->listener_id;
702 desc.args[1] = req->status;
703 desc.args[2] = req->sglistinfo_ptr;
704 desc.args[3] = req->sglistinfo_len;
705 } else {
706 req_64 =
707 (struct qseecom_client_listener_data_64bit_irsp *)
708 req_buf;
709 desc.args[0] = req_64->listener_id;
710 desc.args[1] = req_64->status;
711 desc.args[2] = req_64->sglistinfo_ptr;
712 desc.args[3] = req_64->sglistinfo_len;
713 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800714 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700715 break;
716 }
717 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
718 struct qseecom_load_app_ireq *req;
719 struct qseecom_load_app_64bit_ireq *req_64bit;
720
721 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
722 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
723 if (qseecom.qsee_version < QSEE_VERSION_40) {
724 req = (struct qseecom_load_app_ireq *)req_buf;
725 desc.args[0] = req->mdt_len;
726 desc.args[1] = req->img_len;
727 desc.args[2] = req->phy_addr;
728 } else {
729 req_64bit =
730 (struct qseecom_load_app_64bit_ireq *)req_buf;
731 desc.args[0] = req_64bit->mdt_len;
732 desc.args[1] = req_64bit->img_len;
733 desc.args[2] = req_64bit->phy_addr;
734 }
735 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800736 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700737 break;
738 }
739 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
740 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
741 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
742 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800743 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700744 break;
745 }
746
747 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
748 struct qseecom_client_send_data_ireq *req;
749 struct qseecom_client_send_data_64bit_ireq *req_64bit;
750
751 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
752 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
753 if (qseecom.qsee_version < QSEE_VERSION_40) {
754 req = (struct qseecom_client_send_data_ireq *)
755 req_buf;
756 desc.args[0] = req->app_id;
757 desc.args[1] = req->req_ptr;
758 desc.args[2] = req->req_len;
759 desc.args[3] = req->rsp_ptr;
760 desc.args[4] = req->rsp_len;
761 } else {
762 req_64bit =
763 (struct qseecom_client_send_data_64bit_ireq *)
764 req_buf;
765 desc.args[0] = req_64bit->app_id;
766 desc.args[1] = req_64bit->req_ptr;
767 desc.args[2] = req_64bit->req_len;
768 desc.args[3] = req_64bit->rsp_ptr;
769 desc.args[4] = req_64bit->rsp_len;
770 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800771 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700772 break;
773 }
774 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
775 struct qseecom_client_send_data_ireq *req;
776 struct qseecom_client_send_data_64bit_ireq *req_64bit;
777
778 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
779 desc.arginfo =
780 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
781 if (qseecom.qsee_version < QSEE_VERSION_40) {
782 req = (struct qseecom_client_send_data_ireq *)
783 req_buf;
784 desc.args[0] = req->app_id;
785 desc.args[1] = req->req_ptr;
786 desc.args[2] = req->req_len;
787 desc.args[3] = req->rsp_ptr;
788 desc.args[4] = req->rsp_len;
789 desc.args[5] = req->sglistinfo_ptr;
790 desc.args[6] = req->sglistinfo_len;
791 } else {
792 req_64bit =
793 (struct qseecom_client_send_data_64bit_ireq *)
794 req_buf;
795 desc.args[0] = req_64bit->app_id;
796 desc.args[1] = req_64bit->req_ptr;
797 desc.args[2] = req_64bit->req_len;
798 desc.args[3] = req_64bit->rsp_ptr;
799 desc.args[4] = req_64bit->rsp_len;
800 desc.args[5] = req_64bit->sglistinfo_ptr;
801 desc.args[6] = req_64bit->sglistinfo_len;
802 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800803 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700804 break;
805 }
806 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
807 struct qseecom_client_send_service_ireq *req;
808
809 req = (struct qseecom_client_send_service_ireq *)
810 req_buf;
811 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
812 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
813 desc.args[0] = req->key_type;
814 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800815 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700816 break;
817 }
818 case QSEOS_RPMB_ERASE_COMMAND: {
819 smc_id = TZ_OS_RPMB_ERASE_ID;
820 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
821 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800822 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700823 break;
824 }
825 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
826 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
827 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
828 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800829 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700830 break;
831 }
832 case QSEOS_GENERATE_KEY: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_generate_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t),
842 (sizeof(struct qseecom_key_generate_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_GEN_KEY_ID;
846 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800850 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_DELETE_KEY: {
855 u32 tzbuflen = PAGE_ALIGN(sizeof
856 (struct qseecom_key_delete_ireq) -
857 sizeof(uint32_t));
858 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
859
860 if (!tzbuf)
861 return -ENOMEM;
862 memset(tzbuf, 0, tzbuflen);
863 memcpy(tzbuf, req_buf + sizeof(uint32_t),
864 (sizeof(struct qseecom_key_delete_ireq) -
865 sizeof(uint32_t)));
866 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
867 smc_id = TZ_OS_KS_DEL_KEY_ID;
868 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
869 desc.args[0] = virt_to_phys(tzbuf);
870 desc.args[1] = tzbuflen;
871 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800872 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700873 kzfree(tzbuf);
874 break;
875 }
876 case QSEOS_SET_KEY: {
877 u32 tzbuflen = PAGE_ALIGN(sizeof
878 (struct qseecom_key_select_ireq) -
879 sizeof(uint32_t));
880 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
881
882 if (!tzbuf)
883 return -ENOMEM;
884 memset(tzbuf, 0, tzbuflen);
885 memcpy(tzbuf, req_buf + sizeof(uint32_t),
886 (sizeof(struct qseecom_key_select_ireq) -
887 sizeof(uint32_t)));
888 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
889 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
890 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
891 desc.args[0] = virt_to_phys(tzbuf);
892 desc.args[1] = tzbuflen;
893 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800894 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700895 kzfree(tzbuf);
896 break;
897 }
898 case QSEOS_UPDATE_KEY_USERINFO: {
899 u32 tzbuflen = PAGE_ALIGN(sizeof
900 (struct qseecom_key_userinfo_update_ireq) -
901 sizeof(uint32_t));
902 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
903
904 if (!tzbuf)
905 return -ENOMEM;
906 memset(tzbuf, 0, tzbuflen);
907 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
908 (struct qseecom_key_userinfo_update_ireq) -
909 sizeof(uint32_t)));
910 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
911 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
912 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
913 desc.args[0] = virt_to_phys(tzbuf);
914 desc.args[1] = tzbuflen;
915 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800916 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700917 kzfree(tzbuf);
918 break;
919 }
920 case QSEOS_TEE_OPEN_SESSION: {
921 struct qseecom_qteec_ireq *req;
922 struct qseecom_qteec_64bit_ireq *req_64bit;
923
924 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
925 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
926 if (qseecom.qsee_version < QSEE_VERSION_40) {
927 req = (struct qseecom_qteec_ireq *)req_buf;
928 desc.args[0] = req->app_id;
929 desc.args[1] = req->req_ptr;
930 desc.args[2] = req->req_len;
931 desc.args[3] = req->resp_ptr;
932 desc.args[4] = req->resp_len;
933 } else {
934 req_64bit = (struct qseecom_qteec_64bit_ireq *)
935 req_buf;
936 desc.args[0] = req_64bit->app_id;
937 desc.args[1] = req_64bit->req_ptr;
938 desc.args[2] = req_64bit->req_len;
939 desc.args[3] = req_64bit->resp_ptr;
940 desc.args[4] = req_64bit->resp_len;
941 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800942 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700943 break;
944 }
945 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
946 struct qseecom_qteec_ireq *req;
947 struct qseecom_qteec_64bit_ireq *req_64bit;
948
949 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
950 desc.arginfo =
951 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
952 if (qseecom.qsee_version < QSEE_VERSION_40) {
953 req = (struct qseecom_qteec_ireq *)req_buf;
954 desc.args[0] = req->app_id;
955 desc.args[1] = req->req_ptr;
956 desc.args[2] = req->req_len;
957 desc.args[3] = req->resp_ptr;
958 desc.args[4] = req->resp_len;
959 desc.args[5] = req->sglistinfo_ptr;
960 desc.args[6] = req->sglistinfo_len;
961 } else {
962 req_64bit = (struct qseecom_qteec_64bit_ireq *)
963 req_buf;
964 desc.args[0] = req_64bit->app_id;
965 desc.args[1] = req_64bit->req_ptr;
966 desc.args[2] = req_64bit->req_len;
967 desc.args[3] = req_64bit->resp_ptr;
968 desc.args[4] = req_64bit->resp_len;
969 desc.args[5] = req_64bit->sglistinfo_ptr;
970 desc.args[6] = req_64bit->sglistinfo_len;
971 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800972 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700973 break;
974 }
975 case QSEOS_TEE_INVOKE_COMMAND: {
976 struct qseecom_qteec_ireq *req;
977 struct qseecom_qteec_64bit_ireq *req_64bit;
978
979 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
980 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
981 if (qseecom.qsee_version < QSEE_VERSION_40) {
982 req = (struct qseecom_qteec_ireq *)req_buf;
983 desc.args[0] = req->app_id;
984 desc.args[1] = req->req_ptr;
985 desc.args[2] = req->req_len;
986 desc.args[3] = req->resp_ptr;
987 desc.args[4] = req->resp_len;
988 } else {
989 req_64bit = (struct qseecom_qteec_64bit_ireq *)
990 req_buf;
991 desc.args[0] = req_64bit->app_id;
992 desc.args[1] = req_64bit->req_ptr;
993 desc.args[2] = req_64bit->req_len;
994 desc.args[3] = req_64bit->resp_ptr;
995 desc.args[4] = req_64bit->resp_len;
996 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800997 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700998 break;
999 }
1000 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
1001 struct qseecom_qteec_ireq *req;
1002 struct qseecom_qteec_64bit_ireq *req_64bit;
1003
1004 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
1005 desc.arginfo =
1006 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
1007 if (qseecom.qsee_version < QSEE_VERSION_40) {
1008 req = (struct qseecom_qteec_ireq *)req_buf;
1009 desc.args[0] = req->app_id;
1010 desc.args[1] = req->req_ptr;
1011 desc.args[2] = req->req_len;
1012 desc.args[3] = req->resp_ptr;
1013 desc.args[4] = req->resp_len;
1014 desc.args[5] = req->sglistinfo_ptr;
1015 desc.args[6] = req->sglistinfo_len;
1016 } else {
1017 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1018 req_buf;
1019 desc.args[0] = req_64bit->app_id;
1020 desc.args[1] = req_64bit->req_ptr;
1021 desc.args[2] = req_64bit->req_len;
1022 desc.args[3] = req_64bit->resp_ptr;
1023 desc.args[4] = req_64bit->resp_len;
1024 desc.args[5] = req_64bit->sglistinfo_ptr;
1025 desc.args[6] = req_64bit->sglistinfo_len;
1026 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001027 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001028 break;
1029 }
1030 case QSEOS_TEE_CLOSE_SESSION: {
1031 struct qseecom_qteec_ireq *req;
1032 struct qseecom_qteec_64bit_ireq *req_64bit;
1033
1034 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1035 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1036 if (qseecom.qsee_version < QSEE_VERSION_40) {
1037 req = (struct qseecom_qteec_ireq *)req_buf;
1038 desc.args[0] = req->app_id;
1039 desc.args[1] = req->req_ptr;
1040 desc.args[2] = req->req_len;
1041 desc.args[3] = req->resp_ptr;
1042 desc.args[4] = req->resp_len;
1043 } else {
1044 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1045 req_buf;
1046 desc.args[0] = req_64bit->app_id;
1047 desc.args[1] = req_64bit->req_ptr;
1048 desc.args[2] = req_64bit->req_len;
1049 desc.args[3] = req_64bit->resp_ptr;
1050 desc.args[4] = req_64bit->resp_len;
1051 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001052 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001053 break;
1054 }
1055 case QSEOS_TEE_REQUEST_CANCELLATION: {
1056 struct qseecom_qteec_ireq *req;
1057 struct qseecom_qteec_64bit_ireq *req_64bit;
1058
1059 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1060 desc.arginfo =
1061 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1062 if (qseecom.qsee_version < QSEE_VERSION_40) {
1063 req = (struct qseecom_qteec_ireq *)req_buf;
1064 desc.args[0] = req->app_id;
1065 desc.args[1] = req->req_ptr;
1066 desc.args[2] = req->req_len;
1067 desc.args[3] = req->resp_ptr;
1068 desc.args[4] = req->resp_len;
1069 } else {
1070 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1071 req_buf;
1072 desc.args[0] = req_64bit->app_id;
1073 desc.args[1] = req_64bit->req_ptr;
1074 desc.args[2] = req_64bit->req_len;
1075 desc.args[3] = req_64bit->resp_ptr;
1076 desc.args[4] = req_64bit->resp_len;
1077 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001078 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001079 break;
1080 }
1081 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1082 struct qseecom_continue_blocked_request_ireq *req =
1083 (struct qseecom_continue_blocked_request_ireq *)
1084 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001085 if (qseecom.smcinvoke_support)
1086 smc_id =
1087 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1088 else
1089 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001090 desc.arginfo =
1091 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001092 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001093 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001094 break;
1095 }
1096 default: {
1097 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1098 qseos_cmd_id);
1099 ret = -EINVAL;
1100 break;
1101 }
1102 } /*end of switch (qsee_cmd_id) */
1103 break;
1104 } /*end of case SCM_SVC_TZSCHEDULER*/
1105 default: {
1106 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1107 svc_id);
1108 ret = -EINVAL;
1109 break;
1110 }
1111 } /*end of switch svc_id */
1112 scm_resp->result = desc.ret[0];
1113 scm_resp->resp_type = desc.ret[1];
1114 scm_resp->data = desc.ret[2];
1115 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1116 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1117 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1118 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1119 return ret;
1120}
1121
1122
1123static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1124 size_t cmd_len, void *resp_buf, size_t resp_len)
1125{
1126 if (!is_scm_armv8())
1127 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1128 resp_buf, resp_len);
1129 else
1130 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1131}
1132
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001133static struct qseecom_registered_listener_list *__qseecom_find_svc(
1134 int32_t listener_id)
1135{
1136 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001137
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001138 list_for_each_entry(entry,
1139 &qseecom.registered_listener_list_head, list) {
1140 if (entry->svc.listener_id == listener_id)
1141 break;
1142 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001143 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001144 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 return NULL;
1146 }
1147
1148 return entry;
1149}
1150
1151static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1152 struct qseecom_dev_handle *handle,
1153 struct qseecom_register_listener_req *listener)
1154{
1155 int ret = 0;
1156 struct qseecom_register_listener_ireq req;
1157 struct qseecom_register_listener_64bit_ireq req_64bit;
1158 struct qseecom_command_scm_resp resp;
1159 ion_phys_addr_t pa;
1160 void *cmd_buf = NULL;
1161 size_t cmd_len;
1162
1163 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001164 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001165 listener->ifd_data_fd);
1166 if (IS_ERR_OR_NULL(svc->ihandle)) {
1167 pr_err("Ion client could not retrieve the handle\n");
1168 return -ENOMEM;
1169 }
1170
1171 /* Get the physical address of the ION BUF */
1172 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1173 if (ret) {
1174 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1175 ret);
1176 return ret;
1177 }
1178 /* Populate the structure for sending scm call to load image */
1179 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1180 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1181 pr_err("ION memory mapping for listener shared buffer failed\n");
1182 return -ENOMEM;
1183 }
1184 svc->sb_phys = (phys_addr_t)pa;
1185
1186 if (qseecom.qsee_version < QSEE_VERSION_40) {
1187 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1188 req.listener_id = svc->svc.listener_id;
1189 req.sb_len = svc->sb_length;
1190 req.sb_ptr = (uint32_t)svc->sb_phys;
1191 cmd_buf = (void *)&req;
1192 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1193 } else {
1194 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1195 req_64bit.listener_id = svc->svc.listener_id;
1196 req_64bit.sb_len = svc->sb_length;
1197 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1198 cmd_buf = (void *)&req_64bit;
1199 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1200 }
1201
1202 resp.result = QSEOS_RESULT_INCOMPLETE;
1203
Zhen Kongc4c162a2019-01-23 12:07:12 -08001204 mutex_unlock(&listener_access_lock);
1205 mutex_lock(&app_access_lock);
1206 __qseecom_reentrancy_check_if_no_app_blocked(
1207 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1209 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001210 mutex_unlock(&app_access_lock);
1211 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212 if (ret) {
1213 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1214 return -EINVAL;
1215 }
1216
1217 if (resp.result != QSEOS_RESULT_SUCCESS) {
1218 pr_err("Error SB registration req: resp.result = %d\n",
1219 resp.result);
1220 return -EPERM;
1221 }
1222 return 0;
1223}
1224
1225static int qseecom_register_listener(struct qseecom_dev_handle *data,
1226 void __user *argp)
1227{
1228 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 struct qseecom_register_listener_req rcvd_lstnr;
1230 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001232
1233 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1234 if (ret) {
1235 pr_err("copy_from_user failed\n");
1236 return ret;
1237 }
1238 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1239 rcvd_lstnr.sb_size))
1240 return -EFAULT;
1241
Zhen Kongbcdeda22018-11-16 13:50:51 -08001242 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1243 if (ptr_svc) {
1244 if (ptr_svc->unregister_pending == false) {
1245 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001246 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001247 data->released = true;
1248 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001249 } else {
1250 /*wait until listener is unregistered*/
1251 pr_debug("register %d has to wait\n",
1252 rcvd_lstnr.listener_id);
1253 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301254 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001255 qseecom.register_lsnr_pending_wq,
1256 list_empty(
1257 &qseecom.unregister_lsnr_pending_list_head));
1258 if (ret) {
1259 pr_err("interrupted register_pending_wq %d\n",
1260 rcvd_lstnr.listener_id);
1261 mutex_lock(&listener_access_lock);
1262 return -ERESTARTSYS;
1263 }
1264 mutex_lock(&listener_access_lock);
1265 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001266 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001267 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1268 if (!new_entry)
1269 return -ENOMEM;
1270 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001271 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272
1273 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1274 new_entry->sb_length = rcvd_lstnr.sb_size;
1275 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1276 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001277 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1278 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 kzfree(new_entry);
1280 return -ENOMEM;
1281 }
1282
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001283 init_waitqueue_head(&new_entry->rcv_req_wq);
1284 init_waitqueue_head(&new_entry->listener_block_app_wq);
1285 new_entry->send_resp_flag = 0;
1286 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001287 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001288
Zhen Konge6ac4132019-09-20 13:49:41 -07001289 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kong52ce9062018-09-24 14:33:27 -07001290 pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 return ret;
1292}
1293
Zhen Kongbcdeda22018-11-16 13:50:51 -08001294static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1295 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001296{
1297 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001298 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001299 struct qseecom_command_scm_resp resp;
1300 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1301
1302 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1303 req.listener_id = data->listener.id;
1304 resp.result = QSEOS_RESULT_INCOMPLETE;
1305
Zhen Kongc4c162a2019-01-23 12:07:12 -08001306 mutex_unlock(&listener_access_lock);
1307 mutex_lock(&app_access_lock);
1308 __qseecom_reentrancy_check_if_no_app_blocked(
1309 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001310 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1311 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001312 mutex_unlock(&app_access_lock);
1313 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001314 if (ret) {
1315 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1316 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001317 if (ret == -EBUSY)
1318 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001319 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001320 }
1321
1322 if (resp.result != QSEOS_RESULT_SUCCESS) {
1323 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1324 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001325 ret = -EPERM;
1326 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001327 }
1328
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001329 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301330 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 atomic_read(&data->ioctl_count) <= 1)) {
1332 pr_err("Interrupted from abort\n");
1333 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001334 }
1335 }
1336
Zhen Kong3c674612018-09-06 22:51:27 -07001337exit:
1338 if (ptr_svc->sb_virt) {
1339 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001340 if (!IS_ERR_OR_NULL(ihandle)) {
1341 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1342 ion_free(qseecom.ion_clnt, ihandle);
1343 }
1344 }
Zhen Kong3c674612018-09-06 22:51:27 -07001345 list_del(&ptr_svc->list);
1346 kzfree(ptr_svc);
1347
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001348 data->released = true;
Zhen Kong52ce9062018-09-24 14:33:27 -07001349 pr_debug("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001350 return ret;
1351}
1352
Zhen Kongbcdeda22018-11-16 13:50:51 -08001353static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1354{
1355 struct qseecom_registered_listener_list *ptr_svc = NULL;
1356 struct qseecom_unregister_pending_list *entry = NULL;
1357
Zhen Konge6ac4132019-09-20 13:49:41 -07001358 if (data->released) {
1359 pr_err("Don't unregister lsnr %d\n", data->listener.id);
1360 return -EINVAL;
1361 }
1362
Zhen Kongbcdeda22018-11-16 13:50:51 -08001363 ptr_svc = __qseecom_find_svc(data->listener.id);
1364 if (!ptr_svc) {
1365 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1366 return -ENODATA;
1367 }
1368 /* stop CA thread waiting for listener response */
1369 ptr_svc->abort = 1;
1370 wake_up_interruptible_all(&qseecom.send_resp_wq);
1371
Zhen Kongc4c162a2019-01-23 12:07:12 -08001372 /* stop listener thread waiting for listener request */
1373 data->abort = 1;
1374 wake_up_all(&ptr_svc->rcv_req_wq);
1375
Zhen Kongbcdeda22018-11-16 13:50:51 -08001376 /* return directly if pending*/
1377 if (ptr_svc->unregister_pending)
1378 return 0;
1379
1380 /*add unregistration into pending list*/
1381 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1382 if (!entry)
1383 return -ENOMEM;
1384 entry->data = data;
1385 list_add_tail(&entry->list,
1386 &qseecom.unregister_lsnr_pending_list_head);
1387 ptr_svc->unregister_pending = true;
1388 pr_debug("unregister %d pending\n", data->listener.id);
1389 return 0;
1390}
1391
1392static void __qseecom_processing_pending_lsnr_unregister(void)
1393{
1394 struct qseecom_unregister_pending_list *entry = NULL;
1395 struct qseecom_registered_listener_list *ptr_svc = NULL;
1396 struct list_head *pos;
1397 int ret = 0;
1398
1399 mutex_lock(&listener_access_lock);
1400 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1401 pos = qseecom.unregister_lsnr_pending_list_head.next;
1402 entry = list_entry(pos,
1403 struct qseecom_unregister_pending_list, list);
1404 if (entry && entry->data) {
1405 pr_debug("process pending unregister %d\n",
1406 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001407 /* don't process if qseecom_release is not called*/
1408 if (!entry->data->listener.release_called)
1409 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001410 ptr_svc = __qseecom_find_svc(
1411 entry->data->listener.id);
1412 if (ptr_svc) {
1413 ret = __qseecom_unregister_listener(
1414 entry->data, ptr_svc);
1415 if (ret == -EBUSY) {
1416 pr_debug("unregister %d pending again\n",
1417 entry->data->listener.id);
1418 mutex_unlock(&listener_access_lock);
1419 return;
1420 }
1421 } else
1422 pr_err("invalid listener %d\n",
1423 entry->data->listener.id);
1424 kzfree(entry->data);
1425 }
1426 list_del(pos);
1427 kzfree(entry);
1428 }
1429 mutex_unlock(&listener_access_lock);
1430 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1431}
1432
Zhen Kongc4c162a2019-01-23 12:07:12 -08001433static void __wakeup_unregister_listener_kthread(void)
1434{
1435 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1436 LSNR_UNREG_KT_WAKEUP);
1437 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1438}
1439
1440static int __qseecom_unregister_listener_kthread_func(void *data)
1441{
1442 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301443 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001444 qseecom.unregister_lsnr_kthread_wq,
1445 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1446 == LSNR_UNREG_KT_WAKEUP);
1447 pr_debug("kthread to unregister listener is called %d\n",
1448 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1449 __qseecom_processing_pending_lsnr_unregister();
1450 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1451 LSNR_UNREG_KT_SLEEP);
1452 }
1453 pr_warn("kthread to unregister listener stopped\n");
1454 return 0;
1455}
1456
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001457static int __qseecom_set_msm_bus_request(uint32_t mode)
1458{
1459 int ret = 0;
1460 struct qseecom_clk *qclk;
1461
1462 qclk = &qseecom.qsee;
1463 if (qclk->ce_core_src_clk != NULL) {
1464 if (mode == INACTIVE) {
1465 __qseecom_disable_clk(CLK_QSEE);
1466 } else {
1467 ret = __qseecom_enable_clk(CLK_QSEE);
1468 if (ret)
1469 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1470 ret, mode);
1471 }
1472 }
1473
1474 if ((!ret) && (qseecom.current_mode != mode)) {
1475 ret = msm_bus_scale_client_update_request(
1476 qseecom.qsee_perf_client, mode);
1477 if (ret) {
1478 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1479 ret, mode);
1480 if (qclk->ce_core_src_clk != NULL) {
1481 if (mode == INACTIVE) {
1482 ret = __qseecom_enable_clk(CLK_QSEE);
1483 if (ret)
1484 pr_err("CLK enable failed\n");
1485 } else
1486 __qseecom_disable_clk(CLK_QSEE);
1487 }
1488 }
1489 qseecom.current_mode = mode;
1490 }
1491 return ret;
1492}
1493
1494static void qseecom_bw_inactive_req_work(struct work_struct *work)
1495{
1496 mutex_lock(&app_access_lock);
1497 mutex_lock(&qsee_bw_mutex);
1498 if (qseecom.timer_running)
1499 __qseecom_set_msm_bus_request(INACTIVE);
1500 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1501 qseecom.current_mode, qseecom.cumulative_mode);
1502 qseecom.timer_running = false;
1503 mutex_unlock(&qsee_bw_mutex);
1504 mutex_unlock(&app_access_lock);
1505}
1506
1507static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1508{
1509 schedule_work(&qseecom.bw_inactive_req_ws);
1510}
1511
1512static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1513{
1514 struct qseecom_clk *qclk;
1515 int ret = 0;
1516
1517 mutex_lock(&clk_access_lock);
1518 if (ce == CLK_QSEE)
1519 qclk = &qseecom.qsee;
1520 else
1521 qclk = &qseecom.ce_drv;
1522
Zhen Kongf99808af2019-07-09 13:28:24 -07001523 if (qclk->clk_access_cnt > 0) {
1524 qclk->clk_access_cnt--;
1525 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001526 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1527 ret = -EINVAL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001528 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001529
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001530 mutex_unlock(&clk_access_lock);
1531 return ret;
1532}
1533
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001534static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1535{
1536 int32_t ret = 0;
1537 int32_t request_mode = INACTIVE;
1538
1539 mutex_lock(&qsee_bw_mutex);
1540 if (mode == 0) {
1541 if (qseecom.cumulative_mode > MEDIUM)
1542 request_mode = HIGH;
1543 else
1544 request_mode = qseecom.cumulative_mode;
1545 } else {
1546 request_mode = mode;
1547 }
1548
1549 ret = __qseecom_set_msm_bus_request(request_mode);
1550 if (ret) {
1551 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1552 ret, request_mode);
1553 goto err_scale_timer;
1554 }
1555
1556 if (qseecom.timer_running) {
1557 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1558 if (ret) {
1559 pr_err("Failed to decrease clk ref count.\n");
1560 goto err_scale_timer;
1561 }
1562 del_timer_sync(&(qseecom.bw_scale_down_timer));
1563 qseecom.timer_running = false;
1564 }
1565err_scale_timer:
1566 mutex_unlock(&qsee_bw_mutex);
1567 return ret;
1568}
1569
1570
1571static int qseecom_unregister_bus_bandwidth_needs(
1572 struct qseecom_dev_handle *data)
1573{
1574 int32_t ret = 0;
1575
1576 qseecom.cumulative_mode -= data->mode;
1577 data->mode = INACTIVE;
1578
1579 return ret;
1580}
1581
1582static int __qseecom_register_bus_bandwidth_needs(
1583 struct qseecom_dev_handle *data, uint32_t request_mode)
1584{
1585 int32_t ret = 0;
1586
1587 if (data->mode == INACTIVE) {
1588 qseecom.cumulative_mode += request_mode;
1589 data->mode = request_mode;
1590 } else {
1591 if (data->mode != request_mode) {
1592 qseecom.cumulative_mode -= data->mode;
1593 qseecom.cumulative_mode += request_mode;
1594 data->mode = request_mode;
1595 }
1596 }
1597 return ret;
1598}
1599
1600static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1601{
1602 int ret = 0;
1603
1604 ret = qsee_vote_for_clock(data, CLK_DFAB);
1605 if (ret) {
1606 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1607 goto perf_enable_exit;
1608 }
1609 ret = qsee_vote_for_clock(data, CLK_SFPB);
1610 if (ret) {
1611 qsee_disable_clock_vote(data, CLK_DFAB);
1612 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1613 goto perf_enable_exit;
1614 }
1615
1616perf_enable_exit:
1617 return ret;
1618}
1619
1620static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1621 void __user *argp)
1622{
1623 int32_t ret = 0;
1624 int32_t req_mode;
1625
1626 if (qseecom.no_clock_support)
1627 return 0;
1628
1629 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1630 if (ret) {
1631 pr_err("copy_from_user failed\n");
1632 return ret;
1633 }
1634 if (req_mode > HIGH) {
1635 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1636 return -EINVAL;
1637 }
1638
1639 /*
1640 * Register bus bandwidth needs if bus scaling feature is enabled;
1641 * otherwise, qseecom enable/disable clocks for the client directly.
1642 */
1643 if (qseecom.support_bus_scaling) {
1644 mutex_lock(&qsee_bw_mutex);
1645 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1646 mutex_unlock(&qsee_bw_mutex);
1647 } else {
1648 pr_debug("Bus scaling feature is NOT enabled\n");
1649 pr_debug("request bandwidth mode %d for the client\n",
1650 req_mode);
1651 if (req_mode != INACTIVE) {
1652 ret = qseecom_perf_enable(data);
1653 if (ret)
1654 pr_err("Failed to vote for clock with err %d\n",
1655 ret);
1656 } else {
1657 qsee_disable_clock_vote(data, CLK_DFAB);
1658 qsee_disable_clock_vote(data, CLK_SFPB);
1659 }
1660 }
1661 return ret;
1662}
1663
1664static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1665{
1666 if (qseecom.no_clock_support)
1667 return;
1668
1669 mutex_lock(&qsee_bw_mutex);
1670 qseecom.bw_scale_down_timer.expires = jiffies +
1671 msecs_to_jiffies(duration);
1672 mod_timer(&(qseecom.bw_scale_down_timer),
1673 qseecom.bw_scale_down_timer.expires);
1674 qseecom.timer_running = true;
1675 mutex_unlock(&qsee_bw_mutex);
1676}
1677
1678static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1679{
1680 if (!qseecom.support_bus_scaling)
1681 qsee_disable_clock_vote(data, CLK_SFPB);
1682 else
1683 __qseecom_add_bw_scale_down_timer(
1684 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1685}
1686
1687static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1688{
1689 int ret = 0;
1690
1691 if (qseecom.support_bus_scaling) {
1692 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1693 if (ret)
1694 pr_err("Failed to set bw MEDIUM.\n");
1695 } else {
1696 ret = qsee_vote_for_clock(data, CLK_SFPB);
1697 if (ret)
1698 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1699 }
1700 return ret;
1701}
1702
1703static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1704 void __user *argp)
1705{
1706 ion_phys_addr_t pa;
1707 int32_t ret;
1708 struct qseecom_set_sb_mem_param_req req;
1709 size_t len;
1710
1711 /* Copy the relevant information needed for loading the image */
1712 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1713 return -EFAULT;
1714
1715 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1716 (req.sb_len == 0)) {
1717 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1718 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1719 return -EFAULT;
1720 }
1721 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1722 req.sb_len))
1723 return -EFAULT;
1724
1725 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001726 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001727 req.ifd_data_fd);
1728 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1729 pr_err("Ion client could not retrieve the handle\n");
1730 return -ENOMEM;
1731 }
1732 /* Get the physical address of the ION BUF */
1733 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1734 if (ret) {
1735
1736 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1737 ret);
1738 return ret;
1739 }
1740
1741 if (len < req.sb_len) {
1742 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1743 req.sb_len, len);
1744 return -EINVAL;
1745 }
1746 /* Populate the structure for sending scm call to load image */
1747 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1748 data->client.ihandle);
1749 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1750 pr_err("ION memory mapping for client shared buf failed\n");
1751 return -ENOMEM;
1752 }
1753 data->client.sb_phys = (phys_addr_t)pa;
1754 data->client.sb_length = req.sb_len;
1755 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1756 return 0;
1757}
1758
Zhen Kong26e62742018-05-04 17:19:06 -07001759static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1760 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001761{
1762 int ret;
1763
1764 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001765 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001766}
1767
1768static int __qseecom_reentrancy_listener_has_sent_rsp(
1769 struct qseecom_dev_handle *data,
1770 struct qseecom_registered_listener_list *ptr_svc)
1771{
1772 int ret;
1773
1774 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001775 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776}
1777
1778static void __qseecom_clean_listener_sglistinfo(
1779 struct qseecom_registered_listener_list *ptr_svc)
1780{
1781 if (ptr_svc->sglist_cnt) {
1782 memset(ptr_svc->sglistinfo_ptr, 0,
1783 SGLISTINFO_TABLE_SIZE);
1784 ptr_svc->sglist_cnt = 0;
1785 }
1786}
1787
1788static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1789 struct qseecom_command_scm_resp *resp)
1790{
1791 int ret = 0;
1792 int rc = 0;
1793 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001794 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1795 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1796 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001797 struct qseecom_registered_listener_list *ptr_svc = NULL;
1798 sigset_t new_sigset;
1799 sigset_t old_sigset;
1800 uint32_t status;
1801 void *cmd_buf = NULL;
1802 size_t cmd_len;
1803 struct sglist_info *table = NULL;
1804
Zhen Kongbcdeda22018-11-16 13:50:51 -08001805 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1807 lstnr = resp->data;
1808 /*
1809 * Wake up blocking lsitener service with the lstnr id
1810 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001811 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001812 list_for_each_entry(ptr_svc,
1813 &qseecom.registered_listener_list_head, list) {
1814 if (ptr_svc->svc.listener_id == lstnr) {
1815 ptr_svc->listener_in_use = true;
1816 ptr_svc->rcv_req_flag = 1;
1817 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1818 break;
1819 }
1820 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001821
1822 if (ptr_svc == NULL) {
1823 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001824 rc = -EINVAL;
1825 status = QSEOS_RESULT_FAILURE;
1826 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001827 }
1828
1829 if (!ptr_svc->ihandle) {
1830 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001831 rc = -EINVAL;
1832 status = QSEOS_RESULT_FAILURE;
1833 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001834 }
1835
1836 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001837 pr_err("Service %d does not exist\n",
1838 lstnr);
1839 rc = -ERESTARTSYS;
1840 ptr_svc = NULL;
1841 status = QSEOS_RESULT_FAILURE;
1842 goto err_resp;
1843 }
1844
1845 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001846 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001847 lstnr, ptr_svc->abort);
1848 rc = -ENODEV;
1849 status = QSEOS_RESULT_FAILURE;
1850 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001851 }
Zhen Kong25731112018-09-20 13:10:03 -07001852
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001853 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1854
1855 /* initialize the new signal mask with all signals*/
1856 sigfillset(&new_sigset);
1857 /* block all signals */
1858 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1859
Zhen Kongbcdeda22018-11-16 13:50:51 -08001860 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 do {
1862 /*
1863 * When reentrancy is not supported, check global
1864 * send_resp_flag; otherwise, check this listener's
1865 * send_resp_flag.
1866 */
1867 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301868 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001869 __qseecom_listener_has_sent_rsp(
1870 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001871 break;
1872 }
1873
1874 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301875 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001876 __qseecom_reentrancy_listener_has_sent_rsp(
1877 data, ptr_svc))) {
1878 break;
1879 }
1880 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001881 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001882 /* restore signal mask */
1883 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001884 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001885 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1886 data->client.app_id, lstnr, ret);
1887 rc = -ENODEV;
1888 status = QSEOS_RESULT_FAILURE;
1889 } else {
1890 status = QSEOS_RESULT_SUCCESS;
1891 }
Zhen Kong26e62742018-05-04 17:19:06 -07001892err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001893 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001894 if (ptr_svc) {
1895 ptr_svc->send_resp_flag = 0;
1896 table = ptr_svc->sglistinfo_ptr;
1897 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001898 if (qseecom.qsee_version < QSEE_VERSION_40) {
1899 send_data_rsp.listener_id = lstnr;
1900 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001901 if (table) {
1902 send_data_rsp.sglistinfo_ptr =
1903 (uint32_t)virt_to_phys(table);
1904 send_data_rsp.sglistinfo_len =
1905 SGLISTINFO_TABLE_SIZE;
1906 dmac_flush_range((void *)table,
1907 (void *)table + SGLISTINFO_TABLE_SIZE);
1908 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001909 cmd_buf = (void *)&send_data_rsp;
1910 cmd_len = sizeof(send_data_rsp);
1911 } else {
1912 send_data_rsp_64bit.listener_id = lstnr;
1913 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001914 if (table) {
1915 send_data_rsp_64bit.sglistinfo_ptr =
1916 virt_to_phys(table);
1917 send_data_rsp_64bit.sglistinfo_len =
1918 SGLISTINFO_TABLE_SIZE;
1919 dmac_flush_range((void *)table,
1920 (void *)table + SGLISTINFO_TABLE_SIZE);
1921 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001922 cmd_buf = (void *)&send_data_rsp_64bit;
1923 cmd_len = sizeof(send_data_rsp_64bit);
1924 }
Zhen Kong7d500032018-08-06 16:58:31 -07001925 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001926 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1927 else
1928 *(uint32_t *)cmd_buf =
1929 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001930 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001931 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1932 ptr_svc->ihandle,
1933 ptr_svc->sb_virt, ptr_svc->sb_length,
1934 ION_IOC_CLEAN_INV_CACHES);
1935 if (ret) {
1936 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001937 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001938 }
1939 }
1940
1941 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1942 ret = __qseecom_enable_clk(CLK_QSEE);
1943 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001944 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001945 }
1946
1947 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1948 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001949 if (ptr_svc) {
1950 ptr_svc->listener_in_use = false;
1951 __qseecom_clean_listener_sglistinfo(ptr_svc);
1952 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001953 if (ret) {
1954 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1955 ret, data->client.app_id);
1956 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1957 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001958 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001959 }
Zhen Kong26e62742018-05-04 17:19:06 -07001960 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1961 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001962 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1963 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1964 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1965 resp->result, data->client.app_id, lstnr);
1966 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001967 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001968 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001969exit:
1970 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001971 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1972 __qseecom_disable_clk(CLK_QSEE);
1973
1974 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001975 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001976 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001977 if (rc)
1978 return rc;
1979
1980 return ret;
1981}
1982
Zhen Konga91aaf02018-02-02 17:21:04 -08001983static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001984 struct qseecom_command_scm_resp *resp,
1985 struct qseecom_registered_app_list *ptr_app,
1986 struct qseecom_dev_handle *data)
1987{
1988 struct qseecom_registered_listener_list *list_ptr;
1989 int ret = 0;
1990 struct qseecom_continue_blocked_request_ireq ireq;
1991 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001992 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001993 sigset_t new_sigset;
1994 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001995 unsigned long flags;
1996 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07001997 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001998
1999 if (!resp || !data) {
2000 pr_err("invalid resp or data pointer\n");
2001 ret = -EINVAL;
2002 goto exit;
2003 }
2004
2005 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07002006 if (!ptr_app) {
2007 if (data->client.from_smcinvoke) {
2008 pr_debug("This request is from smcinvoke\n");
2009 ptr_app = &dummy_app_entry;
2010 ptr_app->app_id = data->client.app_id;
2011 } else {
2012 spin_lock_irqsave(&qseecom.registered_app_list_lock,
2013 flags);
2014 list_for_each_entry(ptr_app,
2015 &qseecom.registered_app_list_head, list) {
2016 if ((ptr_app->app_id == data->client.app_id) &&
2017 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002018 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07002019 found_app = true;
2020 break;
2021 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002022 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002023 spin_unlock_irqrestore(
2024 &qseecom.registered_app_list_lock, flags);
2025 if (!found_app) {
2026 pr_err("app_id %d (%s) is not found\n",
2027 data->client.app_id,
2028 (char *)data->client.app_name);
2029 ret = -ENOENT;
2030 goto exit;
2031 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002032 }
2033 }
2034
Zhen Kongd8cc0052017-11-13 15:13:31 -08002035 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002036 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002037 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002038 list_ptr = __qseecom_find_svc(resp->data);
2039 if (!list_ptr) {
2040 pr_err("Invalid listener ID %d\n", resp->data);
2041 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002042 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002043 goto exit;
2044 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002045 ptr_app->blocked_on_listener_id = resp->data;
2046
2047 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2048 resp->data, list_ptr->listener_in_use,
2049 session_id, data->client.app_id);
2050
2051 /* sleep until listener is available */
2052 sigfillset(&new_sigset);
2053 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2054
2055 do {
2056 qseecom.app_block_ref_cnt++;
2057 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002058 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002059 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302060 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002061 list_ptr->listener_block_app_wq,
2062 !list_ptr->listener_in_use);
2063 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002064 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002065 ptr_app->app_blocked = false;
2066 qseecom.app_block_ref_cnt--;
2067 } while (list_ptr->listener_in_use);
2068
2069 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2070
2071 ptr_app->blocked_on_listener_id = 0;
2072 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2073 resp->data, session_id, data->client.app_id);
2074
2075 /* notify TZ that listener is available */
2076 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2077
2078 if (qseecom.smcinvoke_support)
2079 ireq.app_or_session_id = session_id;
2080 else
2081 ireq.app_or_session_id = data->client.app_id;
2082
2083 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2084 &ireq, sizeof(ireq),
2085 &continue_resp, sizeof(continue_resp));
2086 if (ret && qseecom.smcinvoke_support) {
2087 /* retry with legacy cmd */
2088 qseecom.smcinvoke_support = false;
2089 ireq.app_or_session_id = data->client.app_id;
2090 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2091 &ireq, sizeof(ireq),
2092 &continue_resp, sizeof(continue_resp));
2093 qseecom.smcinvoke_support = true;
2094 if (ret) {
2095 pr_err("unblock app %d or session %d fail\n",
2096 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002097 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002098 goto exit;
2099 }
2100 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002101 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002102 resp->result = continue_resp.result;
2103 resp->resp_type = continue_resp.resp_type;
2104 resp->data = continue_resp.data;
2105 pr_debug("unblock resp = %d\n", resp->result);
2106 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2107
2108 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2109 pr_err("Unexpected unblock resp %d\n", resp->result);
2110 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002111 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002112exit:
2113 return ret;
2114}
2115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002116static int __qseecom_reentrancy_process_incomplete_cmd(
2117 struct qseecom_dev_handle *data,
2118 struct qseecom_command_scm_resp *resp)
2119{
2120 int ret = 0;
2121 int rc = 0;
2122 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002123 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2124 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2125 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002126 struct qseecom_registered_listener_list *ptr_svc = NULL;
2127 sigset_t new_sigset;
2128 sigset_t old_sigset;
2129 uint32_t status;
2130 void *cmd_buf = NULL;
2131 size_t cmd_len;
2132 struct sglist_info *table = NULL;
2133
Zhen Kong26e62742018-05-04 17:19:06 -07002134 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002135 lstnr = resp->data;
2136 /*
2137 * Wake up blocking lsitener service with the lstnr id
2138 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002139 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002140 list_for_each_entry(ptr_svc,
2141 &qseecom.registered_listener_list_head, list) {
2142 if (ptr_svc->svc.listener_id == lstnr) {
2143 ptr_svc->listener_in_use = true;
2144 ptr_svc->rcv_req_flag = 1;
2145 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2146 break;
2147 }
2148 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002149
2150 if (ptr_svc == NULL) {
2151 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002152 rc = -EINVAL;
2153 status = QSEOS_RESULT_FAILURE;
2154 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002155 }
2156
2157 if (!ptr_svc->ihandle) {
2158 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002159 rc = -EINVAL;
2160 status = QSEOS_RESULT_FAILURE;
2161 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002162 }
2163
2164 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002165 pr_err("Service %d does not exist\n",
2166 lstnr);
2167 rc = -ERESTARTSYS;
2168 ptr_svc = NULL;
2169 status = QSEOS_RESULT_FAILURE;
2170 goto err_resp;
2171 }
2172
2173 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002174 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002175 lstnr, ptr_svc->abort);
2176 rc = -ENODEV;
2177 status = QSEOS_RESULT_FAILURE;
2178 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002179 }
Zhen Kong25731112018-09-20 13:10:03 -07002180
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002181 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2182
2183 /* initialize the new signal mask with all signals*/
2184 sigfillset(&new_sigset);
2185
2186 /* block all signals */
2187 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2188
2189 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002190 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002191 mutex_unlock(&app_access_lock);
2192 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302193 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002194 __qseecom_reentrancy_listener_has_sent_rsp(
2195 data, ptr_svc))) {
2196 break;
2197 }
2198 } while (1);
2199 /* lock mutex again after resp sent */
2200 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002201 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002202 ptr_svc->send_resp_flag = 0;
2203 qseecom.send_resp_flag = 0;
2204
2205 /* restore signal mask */
2206 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002207 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002208 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2209 data->client.app_id, lstnr, ret);
2210 rc = -ENODEV;
2211 status = QSEOS_RESULT_FAILURE;
2212 } else {
2213 status = QSEOS_RESULT_SUCCESS;
2214 }
Zhen Kong26e62742018-05-04 17:19:06 -07002215err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002216 if (ptr_svc)
2217 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002218 if (qseecom.qsee_version < QSEE_VERSION_40) {
2219 send_data_rsp.listener_id = lstnr;
2220 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002221 if (table) {
2222 send_data_rsp.sglistinfo_ptr =
2223 (uint32_t)virt_to_phys(table);
2224 send_data_rsp.sglistinfo_len =
2225 SGLISTINFO_TABLE_SIZE;
2226 dmac_flush_range((void *)table,
2227 (void *)table + SGLISTINFO_TABLE_SIZE);
2228 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002229 cmd_buf = (void *)&send_data_rsp;
2230 cmd_len = sizeof(send_data_rsp);
2231 } else {
2232 send_data_rsp_64bit.listener_id = lstnr;
2233 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002234 if (table) {
2235 send_data_rsp_64bit.sglistinfo_ptr =
2236 virt_to_phys(table);
2237 send_data_rsp_64bit.sglistinfo_len =
2238 SGLISTINFO_TABLE_SIZE;
2239 dmac_flush_range((void *)table,
2240 (void *)table + SGLISTINFO_TABLE_SIZE);
2241 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002242 cmd_buf = (void *)&send_data_rsp_64bit;
2243 cmd_len = sizeof(send_data_rsp_64bit);
2244 }
Zhen Kong7d500032018-08-06 16:58:31 -07002245 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002246 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2247 else
2248 *(uint32_t *)cmd_buf =
2249 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002250 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002251 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2252 ptr_svc->ihandle,
2253 ptr_svc->sb_virt, ptr_svc->sb_length,
2254 ION_IOC_CLEAN_INV_CACHES);
2255 if (ret) {
2256 pr_err("cache operation failed %d\n", ret);
2257 return ret;
2258 }
2259 }
2260 if (lstnr == RPMB_SERVICE) {
2261 ret = __qseecom_enable_clk(CLK_QSEE);
2262 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002263 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002264 }
2265
2266 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2267 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002268 if (ptr_svc) {
2269 ptr_svc->listener_in_use = false;
2270 __qseecom_clean_listener_sglistinfo(ptr_svc);
2271 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2272 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002273
2274 if (ret) {
2275 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2276 ret, data->client.app_id);
2277 goto exit;
2278 }
2279
2280 switch (resp->result) {
2281 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2282 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2283 lstnr, data->client.app_id, resp->data);
2284 if (lstnr == resp->data) {
2285 pr_err("lstnr %d should not be blocked!\n",
2286 lstnr);
2287 ret = -EINVAL;
2288 goto exit;
2289 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002290 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002291 ret = __qseecom_process_reentrancy_blocked_on_listener(
2292 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002293 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002294 if (ret) {
2295 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2296 data->client.app_id,
2297 data->client.app_name, resp->data);
2298 goto exit;
2299 }
2300 case QSEOS_RESULT_SUCCESS:
2301 case QSEOS_RESULT_INCOMPLETE:
2302 break;
2303 default:
2304 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2305 resp->result, data->client.app_id, lstnr);
2306 ret = -EINVAL;
2307 goto exit;
2308 }
2309exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002310 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002311 if (lstnr == RPMB_SERVICE)
2312 __qseecom_disable_clk(CLK_QSEE);
2313
2314 }
2315 if (rc)
2316 return rc;
2317
2318 return ret;
2319}
2320
2321/*
2322 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2323 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2324 * So, needs to first check if no app blocked before sending OS level scm call,
2325 * then wait until all apps are unblocked.
2326 */
2327static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2328{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002329 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2330 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2331 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2332 /* thread sleep until this app unblocked */
2333 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002334 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002335 wait_event_interruptible(qseecom.app_block_wq,
2336 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002337 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002338 }
2339 }
2340}
2341
2342/*
2343 * scm_call of send data will fail if this TA is blocked or there are more
2344 * than one TA requesting listener services; So, first check to see if need
2345 * to wait.
2346 */
2347static void __qseecom_reentrancy_check_if_this_app_blocked(
2348 struct qseecom_registered_app_list *ptr_app)
2349{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002350 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002351 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002352 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2353 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002354 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002355 wait_event_interruptible(qseecom.app_block_wq,
2356 (!ptr_app->app_blocked &&
2357 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002358 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002359 }
Zhen Kongdea10592018-07-30 17:50:10 -07002360 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002361 }
2362}
2363
2364static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2365 uint32_t *app_id)
2366{
2367 int32_t ret;
2368 struct qseecom_command_scm_resp resp;
2369 bool found_app = false;
2370 struct qseecom_registered_app_list *entry = NULL;
2371 unsigned long flags = 0;
2372
2373 if (!app_id) {
2374 pr_err("Null pointer to app_id\n");
2375 return -EINVAL;
2376 }
2377 *app_id = 0;
2378
2379 /* check if app exists and has been registered locally */
2380 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2381 list_for_each_entry(entry,
2382 &qseecom.registered_app_list_head, list) {
2383 if (!strcmp(entry->app_name, req.app_name)) {
2384 found_app = true;
2385 break;
2386 }
2387 }
2388 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2389 if (found_app) {
2390 pr_debug("Found app with id %d\n", entry->app_id);
2391 *app_id = entry->app_id;
2392 return 0;
2393 }
2394
2395 memset((void *)&resp, 0, sizeof(resp));
2396
2397 /* SCM_CALL to check if app_id for the mentioned app exists */
2398 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2399 sizeof(struct qseecom_check_app_ireq),
2400 &resp, sizeof(resp));
2401 if (ret) {
2402 pr_err("scm_call to check if app is already loaded failed\n");
2403 return -EINVAL;
2404 }
2405
2406 if (resp.result == QSEOS_RESULT_FAILURE)
2407 return 0;
2408
2409 switch (resp.resp_type) {
2410 /*qsee returned listener type response */
2411 case QSEOS_LISTENER_ID:
2412 pr_err("resp type is of listener type instead of app");
2413 return -EINVAL;
2414 case QSEOS_APP_ID:
2415 *app_id = resp.data;
2416 return 0;
2417 default:
2418 pr_err("invalid resp type (%d) from qsee",
2419 resp.resp_type);
2420 return -ENODEV;
2421 }
2422}
2423
2424static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2425{
2426 struct qseecom_registered_app_list *entry = NULL;
2427 unsigned long flags = 0;
2428 u32 app_id = 0;
2429 struct ion_handle *ihandle; /* Ion handle */
2430 struct qseecom_load_img_req load_img_req;
2431 int32_t ret = 0;
2432 ion_phys_addr_t pa = 0;
2433 size_t len;
2434 struct qseecom_command_scm_resp resp;
2435 struct qseecom_check_app_ireq req;
2436 struct qseecom_load_app_ireq load_req;
2437 struct qseecom_load_app_64bit_ireq load_req_64bit;
2438 void *cmd_buf = NULL;
2439 size_t cmd_len;
2440 bool first_time = false;
2441
2442 /* Copy the relevant information needed for loading the image */
2443 if (copy_from_user(&load_img_req,
2444 (void __user *)argp,
2445 sizeof(struct qseecom_load_img_req))) {
2446 pr_err("copy_from_user failed\n");
2447 return -EFAULT;
2448 }
2449
2450 /* Check and load cmnlib */
2451 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2452 if (!qseecom.commonlib_loaded &&
2453 load_img_req.app_arch == ELFCLASS32) {
2454 ret = qseecom_load_commonlib_image(data, "cmnlib");
2455 if (ret) {
2456 pr_err("failed to load cmnlib\n");
2457 return -EIO;
2458 }
2459 qseecom.commonlib_loaded = true;
2460 pr_debug("cmnlib is loaded\n");
2461 }
2462
2463 if (!qseecom.commonlib64_loaded &&
2464 load_img_req.app_arch == ELFCLASS64) {
2465 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2466 if (ret) {
2467 pr_err("failed to load cmnlib64\n");
2468 return -EIO;
2469 }
2470 qseecom.commonlib64_loaded = true;
2471 pr_debug("cmnlib64 is loaded\n");
2472 }
2473 }
2474
2475 if (qseecom.support_bus_scaling) {
2476 mutex_lock(&qsee_bw_mutex);
2477 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2478 mutex_unlock(&qsee_bw_mutex);
2479 if (ret)
2480 return ret;
2481 }
2482
2483 /* Vote for the SFPB clock */
2484 ret = __qseecom_enable_clk_scale_up(data);
2485 if (ret)
2486 goto enable_clk_err;
2487
2488 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2489 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2490 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2491
2492 ret = __qseecom_check_app_exists(req, &app_id);
2493 if (ret < 0)
2494 goto loadapp_err;
2495
2496 if (app_id) {
2497 pr_debug("App id %d (%s) already exists\n", app_id,
2498 (char *)(req.app_name));
2499 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2500 list_for_each_entry(entry,
2501 &qseecom.registered_app_list_head, list){
2502 if (entry->app_id == app_id) {
2503 entry->ref_cnt++;
2504 break;
2505 }
2506 }
2507 spin_unlock_irqrestore(
2508 &qseecom.registered_app_list_lock, flags);
2509 ret = 0;
2510 } else {
2511 first_time = true;
2512 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2513 (char *)(load_img_req.img_name));
2514 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002515 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002516 load_img_req.ifd_data_fd);
2517 if (IS_ERR_OR_NULL(ihandle)) {
2518 pr_err("Ion client could not retrieve the handle\n");
2519 ret = -ENOMEM;
2520 goto loadapp_err;
2521 }
2522
2523 /* Get the physical address of the ION BUF */
2524 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2525 if (ret) {
2526 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2527 ret);
2528 goto loadapp_err;
2529 }
2530 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2531 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2532 len, load_img_req.mdt_len,
2533 load_img_req.img_len);
2534 ret = -EINVAL;
2535 goto loadapp_err;
2536 }
2537 /* Populate the structure for sending scm call to load image */
2538 if (qseecom.qsee_version < QSEE_VERSION_40) {
2539 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2540 load_req.mdt_len = load_img_req.mdt_len;
2541 load_req.img_len = load_img_req.img_len;
2542 strlcpy(load_req.app_name, load_img_req.img_name,
2543 MAX_APP_NAME_SIZE);
2544 load_req.phy_addr = (uint32_t)pa;
2545 cmd_buf = (void *)&load_req;
2546 cmd_len = sizeof(struct qseecom_load_app_ireq);
2547 } else {
2548 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2549 load_req_64bit.mdt_len = load_img_req.mdt_len;
2550 load_req_64bit.img_len = load_img_req.img_len;
2551 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2552 MAX_APP_NAME_SIZE);
2553 load_req_64bit.phy_addr = (uint64_t)pa;
2554 cmd_buf = (void *)&load_req_64bit;
2555 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2556 }
2557
2558 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2559 ION_IOC_CLEAN_INV_CACHES);
2560 if (ret) {
2561 pr_err("cache operation failed %d\n", ret);
2562 goto loadapp_err;
2563 }
2564
2565 /* SCM_CALL to load the app and get the app_id back */
2566 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2567 cmd_len, &resp, sizeof(resp));
2568 if (ret) {
2569 pr_err("scm_call to load app failed\n");
2570 if (!IS_ERR_OR_NULL(ihandle))
2571 ion_free(qseecom.ion_clnt, ihandle);
2572 ret = -EINVAL;
2573 goto loadapp_err;
2574 }
2575
2576 if (resp.result == QSEOS_RESULT_FAILURE) {
2577 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2578 if (!IS_ERR_OR_NULL(ihandle))
2579 ion_free(qseecom.ion_clnt, ihandle);
2580 ret = -EFAULT;
2581 goto loadapp_err;
2582 }
2583
2584 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2585 ret = __qseecom_process_incomplete_cmd(data, &resp);
2586 if (ret) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002587 /* TZ has created app_id, need to unload it */
2588 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
2589 ret, resp.result, resp.data,
2590 load_img_req.img_name);
2591 __qseecom_unload_app(data, resp.data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002592 if (!IS_ERR_OR_NULL(ihandle))
2593 ion_free(qseecom.ion_clnt, ihandle);
2594 ret = -EFAULT;
2595 goto loadapp_err;
2596 }
2597 }
2598
2599 if (resp.result != QSEOS_RESULT_SUCCESS) {
2600 pr_err("scm_call failed resp.result unknown, %d\n",
2601 resp.result);
2602 if (!IS_ERR_OR_NULL(ihandle))
2603 ion_free(qseecom.ion_clnt, ihandle);
2604 ret = -EFAULT;
2605 goto loadapp_err;
2606 }
2607
2608 app_id = resp.data;
2609
2610 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2611 if (!entry) {
2612 ret = -ENOMEM;
2613 goto loadapp_err;
2614 }
2615 entry->app_id = app_id;
2616 entry->ref_cnt = 1;
2617 entry->app_arch = load_img_req.app_arch;
2618 /*
2619 * keymaster app may be first loaded as "keymaste" by qseecomd,
2620 * and then used as "keymaster" on some targets. To avoid app
2621 * name checking error, register "keymaster" into app_list and
2622 * thread private data.
2623 */
2624 if (!strcmp(load_img_req.img_name, "keymaste"))
2625 strlcpy(entry->app_name, "keymaster",
2626 MAX_APP_NAME_SIZE);
2627 else
2628 strlcpy(entry->app_name, load_img_req.img_name,
2629 MAX_APP_NAME_SIZE);
2630 entry->app_blocked = false;
2631 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002632 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002633
2634 /* Deallocate the handle */
2635 if (!IS_ERR_OR_NULL(ihandle))
2636 ion_free(qseecom.ion_clnt, ihandle);
2637
2638 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2639 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2640 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2641 flags);
2642
2643 pr_warn("App with id %u (%s) now loaded\n", app_id,
2644 (char *)(load_img_req.img_name));
2645 }
2646 data->client.app_id = app_id;
2647 data->client.app_arch = load_img_req.app_arch;
2648 if (!strcmp(load_img_req.img_name, "keymaste"))
2649 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2650 else
2651 strlcpy(data->client.app_name, load_img_req.img_name,
2652 MAX_APP_NAME_SIZE);
2653 load_img_req.app_id = app_id;
2654 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2655 pr_err("copy_to_user failed\n");
2656 ret = -EFAULT;
2657 if (first_time == true) {
2658 spin_lock_irqsave(
2659 &qseecom.registered_app_list_lock, flags);
2660 list_del(&entry->list);
2661 spin_unlock_irqrestore(
2662 &qseecom.registered_app_list_lock, flags);
2663 kzfree(entry);
2664 }
2665 }
2666
2667loadapp_err:
2668 __qseecom_disable_clk_scale_down(data);
2669enable_clk_err:
2670 if (qseecom.support_bus_scaling) {
2671 mutex_lock(&qsee_bw_mutex);
2672 qseecom_unregister_bus_bandwidth_needs(data);
2673 mutex_unlock(&qsee_bw_mutex);
2674 }
2675 return ret;
2676}
2677
2678static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2679{
2680 int ret = 1; /* Set unload app */
2681
2682 wake_up_all(&qseecom.send_resp_wq);
2683 if (qseecom.qsee_reentrancy_support)
2684 mutex_unlock(&app_access_lock);
2685 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302686 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002687 atomic_read(&data->ioctl_count) <= 1)) {
2688 pr_err("Interrupted from abort\n");
2689 ret = -ERESTARTSYS;
2690 break;
2691 }
2692 }
2693 if (qseecom.qsee_reentrancy_support)
2694 mutex_lock(&app_access_lock);
2695 return ret;
2696}
2697
2698static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2699{
2700 int ret = 0;
2701
2702 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2703 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2704 ion_free(qseecom.ion_clnt, data->client.ihandle);
jitendrathakarec7ff9e42019-09-12 19:46:48 +05302705 memset((void *)&data->client,
2706 0, sizeof(struct qseecom_client_handle));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002707 }
2708 return ret;
2709}
2710
Zhen Kong03b2eae2019-09-17 16:58:46 -07002711static int __qseecom_unload_app(struct qseecom_dev_handle *data,
2712 uint32_t app_id)
2713{
2714 struct qseecom_unload_app_ireq req;
2715 struct qseecom_command_scm_resp resp;
2716 int ret = 0;
2717
2718 /* Populate the structure for sending scm call to load image */
2719 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2720 req.app_id = app_id;
2721
2722 /* SCM_CALL to unload the app */
2723 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2724 sizeof(struct qseecom_unload_app_ireq),
2725 &resp, sizeof(resp));
2726 if (ret) {
2727 pr_err("scm_call to unload app (id = %d) failed\n", app_id);
2728 return -EFAULT;
2729 }
2730 switch (resp.result) {
2731 case QSEOS_RESULT_SUCCESS:
2732 pr_warn("App (%d) is unloaded\n", app_id);
2733 break;
2734 case QSEOS_RESULT_INCOMPLETE:
2735 ret = __qseecom_process_incomplete_cmd(data, &resp);
2736 if (ret)
2737 pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
2738 app_id, ret, resp.result, resp.data);
2739 else
2740 pr_warn("App (%d) is unloaded\n", app_id);
2741 break;
2742 case QSEOS_RESULT_FAILURE:
2743 pr_err("app (%d) unload_failed!!\n", app_id);
2744 ret = -EFAULT;
2745 break;
2746 default:
2747 pr_err("unload app %d get unknown resp.result %d\n",
2748 app_id, resp.result);
2749 ret = -EFAULT;
2750 break;
2751 }
2752 return ret;
2753}
2754
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002755static int qseecom_unload_app(struct qseecom_dev_handle *data,
2756 bool app_crash)
2757{
2758 unsigned long flags;
2759 unsigned long flags1;
2760 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002761 struct qseecom_registered_app_list *ptr_app = NULL;
2762 bool unload = false;
2763 bool found_app = false;
2764 bool found_dead_app = false;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002765 bool doublecheck = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002766
2767 if (!data) {
2768 pr_err("Invalid/uninitialized device handle\n");
2769 return -EINVAL;
2770 }
2771
2772 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2773 pr_debug("Do not unload keymaster app from tz\n");
2774 goto unload_exit;
2775 }
2776
2777 __qseecom_cleanup_app(data);
2778 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2779
2780 if (data->client.app_id > 0) {
2781 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2782 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2783 list) {
2784 if (ptr_app->app_id == data->client.app_id) {
2785 if (!strcmp((void *)ptr_app->app_name,
2786 (void *)data->client.app_name)) {
2787 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002788 if (ptr_app->app_blocked ||
2789 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002790 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002791 if (app_crash || ptr_app->ref_cnt == 1)
2792 unload = true;
2793 break;
2794 }
2795 found_dead_app = true;
2796 break;
2797 }
2798 }
2799 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2800 flags);
2801 if (found_app == false && found_dead_app == false) {
2802 pr_err("Cannot find app with id = %d (%s)\n",
2803 data->client.app_id,
2804 (char *)data->client.app_name);
2805 ret = -EINVAL;
2806 goto unload_exit;
2807 }
2808 }
2809
2810 if (found_dead_app)
2811 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2812 (char *)data->client.app_name);
2813
2814 if (unload) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002815 ret = __qseecom_unload_app(data, data->client.app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002816
Zhen Kongf818f152019-03-13 12:31:32 -07002817 /* double check if this app_entry still exists */
Zhen Kongf818f152019-03-13 12:31:32 -07002818 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2819 list_for_each_entry(ptr_app,
2820 &qseecom.registered_app_list_head, list) {
2821 if ((ptr_app->app_id == data->client.app_id) &&
2822 (!strcmp((void *)ptr_app->app_name,
2823 (void *)data->client.app_name))) {
2824 doublecheck = true;
2825 break;
2826 }
2827 }
2828 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2829 flags1);
2830 if (!doublecheck) {
2831 pr_warn("app %d(%s) entry is already removed\n",
2832 data->client.app_id,
2833 (char *)data->client.app_name);
2834 found_app = false;
2835 }
2836 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07002837
Zhen Kong7d500032018-08-06 16:58:31 -07002838unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002839 if (found_app) {
2840 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2841 if (app_crash) {
2842 ptr_app->ref_cnt = 0;
2843 pr_debug("app_crash: ref_count = 0\n");
2844 } else {
2845 if (ptr_app->ref_cnt == 1) {
2846 ptr_app->ref_cnt = 0;
2847 pr_debug("ref_count set to 0\n");
2848 } else {
2849 ptr_app->ref_cnt--;
2850 pr_debug("Can't unload app(%d) inuse\n",
2851 ptr_app->app_id);
2852 }
2853 }
2854 if (unload) {
2855 list_del(&ptr_app->list);
2856 kzfree(ptr_app);
2857 }
2858 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2859 flags1);
2860 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002861 qseecom_unmap_ion_allocated_memory(data);
2862 data->released = true;
2863 return ret;
2864}
2865
Zhen Kong03b2eae2019-09-17 16:58:46 -07002866
2867static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
2868{
2869 struct qseecom_unload_app_pending_list *entry = NULL;
2870
2871 pr_debug("prepare to unload app(%d)(%s), pending %d\n",
2872 data->client.app_id, data->client.app_name,
2873 data->client.unload_pending);
2874 if (data->client.unload_pending)
2875 return 0;
2876 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2877 if (!entry)
2878 return -ENOMEM;
2879 entry->data = data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002880 list_add_tail(&entry->list,
2881 &qseecom.unload_app_pending_list_head);
Zhen Kong03b2eae2019-09-17 16:58:46 -07002882 data->client.unload_pending = true;
2883 pr_debug("unload ta %d pending\n", data->client.app_id);
2884 return 0;
2885}
2886
2887static void __wakeup_unload_app_kthread(void)
2888{
2889 atomic_set(&qseecom.unload_app_kthread_state,
2890 UNLOAD_APP_KT_WAKEUP);
2891 wake_up_interruptible(&qseecom.unload_app_kthread_wq);
2892}
2893
2894static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
2895{
2896 struct qseecom_unload_app_pending_list *entry = NULL;
2897 bool found = false;
2898
2899 mutex_lock(&unload_app_pending_list_lock);
2900 list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
2901 list) {
2902 if ((entry->data->client.app_id == app_id) &&
2903 (!strcmp(entry->data->client.app_name, app_name))) {
2904 found = true;
2905 break;
2906 }
2907 }
2908 mutex_unlock(&unload_app_pending_list_lock);
2909 return found;
2910}
2911
2912static void __qseecom_processing_pending_unload_app(void)
2913{
2914 struct qseecom_unload_app_pending_list *entry = NULL;
2915 struct list_head *pos;
2916 int ret = 0;
2917
2918 mutex_lock(&unload_app_pending_list_lock);
2919 while (!list_empty(&qseecom.unload_app_pending_list_head)) {
2920 pos = qseecom.unload_app_pending_list_head.next;
2921 entry = list_entry(pos,
2922 struct qseecom_unload_app_pending_list, list);
2923 if (entry && entry->data) {
2924 pr_debug("process pending unload app %d (%s)\n",
2925 entry->data->client.app_id,
2926 entry->data->client.app_name);
2927 mutex_unlock(&unload_app_pending_list_lock);
2928 mutex_lock(&app_access_lock);
2929 ret = qseecom_unload_app(entry->data, true);
2930 if (ret)
2931 pr_err("unload app %d pending failed %d\n",
2932 entry->data->client.app_id, ret);
2933 mutex_unlock(&app_access_lock);
2934 mutex_lock(&unload_app_pending_list_lock);
2935 kzfree(entry->data);
2936 }
2937 list_del(pos);
2938 kzfree(entry);
2939 }
2940 mutex_unlock(&unload_app_pending_list_lock);
2941}
2942
2943static int __qseecom_unload_app_kthread_func(void *data)
2944{
2945 while (!kthread_should_stop()) {
2946 wait_event_interruptible(
2947 qseecom.unload_app_kthread_wq,
2948 atomic_read(&qseecom.unload_app_kthread_state)
2949 == UNLOAD_APP_KT_WAKEUP);
2950 pr_debug("kthread to unload app is called, state %d\n",
2951 atomic_read(&qseecom.unload_app_kthread_state));
2952 __qseecom_processing_pending_unload_app();
2953 atomic_set(&qseecom.unload_app_kthread_state,
2954 UNLOAD_APP_KT_SLEEP);
2955 }
2956 pr_warn("kthread to unload app stopped\n");
2957 return 0;
2958}
2959
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002960static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2961 unsigned long virt)
2962{
2963 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2964}
2965
2966static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2967 unsigned long virt)
2968{
2969 return (uintptr_t)data->client.sb_virt +
2970 (virt - data->client.user_virt_sb_base);
2971}
2972
2973int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2974 struct qseecom_send_svc_cmd_req *req_ptr,
2975 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2976{
2977 int ret = 0;
2978 void *req_buf = NULL;
2979
2980 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2981 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2982 req_ptr, send_svc_ireq_ptr);
2983 return -EINVAL;
2984 }
2985
2986 /* Clients need to ensure req_buf is at base offset of shared buffer */
2987 if ((uintptr_t)req_ptr->cmd_req_buf !=
2988 data_ptr->client.user_virt_sb_base) {
2989 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2990 return -EINVAL;
2991 }
2992
2993 if (data_ptr->client.sb_length <
2994 sizeof(struct qseecom_rpmb_provision_key)) {
2995 pr_err("shared buffer is too small to hold key type\n");
2996 return -EINVAL;
2997 }
2998 req_buf = data_ptr->client.sb_virt;
2999
3000 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3001 send_svc_ireq_ptr->key_type =
3002 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
3003 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3004 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3005 data_ptr, (uintptr_t)req_ptr->resp_buf));
3006 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3007
3008 return ret;
3009}
3010
3011int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
3012 struct qseecom_send_svc_cmd_req *req_ptr,
3013 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
3014{
3015 int ret = 0;
3016 uint32_t reqd_len_sb_in = 0;
3017
3018 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
3019 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
3020 req_ptr, send_svc_ireq_ptr);
3021 return -EINVAL;
3022 }
3023
3024 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
3025 if (reqd_len_sb_in > data_ptr->client.sb_length) {
3026 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
3027 pr_err("Required: %u, Available: %zu\n",
3028 reqd_len_sb_in, data_ptr->client.sb_length);
3029 return -ENOMEM;
3030 }
3031
3032 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3033 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3034 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3035 data_ptr, (uintptr_t)req_ptr->resp_buf));
3036 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3037
3038 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3039 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
3040
3041
3042 return ret;
3043}
3044
3045static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
3046 struct qseecom_send_svc_cmd_req *req)
3047{
3048 if (!req || !req->resp_buf || !req->cmd_req_buf) {
3049 pr_err("req or cmd buffer or response buffer is null\n");
3050 return -EINVAL;
3051 }
3052
3053 if (!data || !data->client.ihandle) {
3054 pr_err("Client or client handle is not initialized\n");
3055 return -EINVAL;
3056 }
3057
3058 if (data->client.sb_virt == NULL) {
3059 pr_err("sb_virt null\n");
3060 return -EINVAL;
3061 }
3062
3063 if (data->client.user_virt_sb_base == 0) {
3064 pr_err("user_virt_sb_base is null\n");
3065 return -EINVAL;
3066 }
3067
3068 if (data->client.sb_length == 0) {
3069 pr_err("sb_length is 0\n");
3070 return -EINVAL;
3071 }
3072
3073 if (((uintptr_t)req->cmd_req_buf <
3074 data->client.user_virt_sb_base) ||
3075 ((uintptr_t)req->cmd_req_buf >=
3076 (data->client.user_virt_sb_base + data->client.sb_length))) {
3077 pr_err("cmd buffer address not within shared bufffer\n");
3078 return -EINVAL;
3079 }
3080 if (((uintptr_t)req->resp_buf <
3081 data->client.user_virt_sb_base) ||
3082 ((uintptr_t)req->resp_buf >=
3083 (data->client.user_virt_sb_base + data->client.sb_length))) {
3084 pr_err("response buffer address not within shared bufffer\n");
3085 return -EINVAL;
3086 }
3087 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
3088 (req->cmd_req_len > data->client.sb_length) ||
3089 (req->resp_len > data->client.sb_length)) {
3090 pr_err("cmd buf length or response buf length not valid\n");
3091 return -EINVAL;
3092 }
3093 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3094 pr_err("Integer overflow detected in req_len & rsp_len\n");
3095 return -EINVAL;
3096 }
3097
3098 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3099 pr_debug("Not enough memory to fit cmd_buf.\n");
3100 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3101 (req->cmd_req_len + req->resp_len),
3102 data->client.sb_length);
3103 return -ENOMEM;
3104 }
3105 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3106 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3107 return -EINVAL;
3108 }
3109 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3110 pr_err("Integer overflow in resp_len & resp_buf\n");
3111 return -EINVAL;
3112 }
3113 if (data->client.user_virt_sb_base >
3114 (ULONG_MAX - data->client.sb_length)) {
3115 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3116 return -EINVAL;
3117 }
3118 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3119 ((uintptr_t)data->client.user_virt_sb_base +
3120 data->client.sb_length)) ||
3121 (((uintptr_t)req->resp_buf + req->resp_len) >
3122 ((uintptr_t)data->client.user_virt_sb_base +
3123 data->client.sb_length))) {
3124 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3125 return -EINVAL;
3126 }
3127 return 0;
3128}
3129
3130static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3131 void __user *argp)
3132{
3133 int ret = 0;
3134 struct qseecom_client_send_service_ireq send_svc_ireq;
3135 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3136 struct qseecom_command_scm_resp resp;
3137 struct qseecom_send_svc_cmd_req req;
3138 void *send_req_ptr;
3139 size_t req_buf_size;
3140
3141 /*struct qseecom_command_scm_resp resp;*/
3142
3143 if (copy_from_user(&req,
3144 (void __user *)argp,
3145 sizeof(req))) {
3146 pr_err("copy_from_user failed\n");
3147 return -EFAULT;
3148 }
3149
3150 if (__validate_send_service_cmd_inputs(data, &req))
3151 return -EINVAL;
3152
3153 data->type = QSEECOM_SECURE_SERVICE;
3154
3155 switch (req.cmd_id) {
3156 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3157 case QSEOS_RPMB_ERASE_COMMAND:
3158 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3159 send_req_ptr = &send_svc_ireq;
3160 req_buf_size = sizeof(send_svc_ireq);
3161 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3162 send_req_ptr))
3163 return -EINVAL;
3164 break;
3165 case QSEOS_FSM_LTEOTA_REQ_CMD:
3166 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3167 case QSEOS_FSM_IKE_REQ_CMD:
3168 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3169 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3170 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3171 case QSEOS_FSM_ENCFS_REQ_CMD:
3172 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3173 send_req_ptr = &send_fsm_key_svc_ireq;
3174 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3175 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3176 send_req_ptr))
3177 return -EINVAL;
3178 break;
3179 default:
3180 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3181 return -EINVAL;
3182 }
3183
3184 if (qseecom.support_bus_scaling) {
3185 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3186 if (ret) {
3187 pr_err("Fail to set bw HIGH\n");
3188 return ret;
3189 }
3190 } else {
3191 ret = qseecom_perf_enable(data);
3192 if (ret) {
3193 pr_err("Failed to vote for clocks with err %d\n", ret);
3194 goto exit;
3195 }
3196 }
3197
3198 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3199 data->client.sb_virt, data->client.sb_length,
3200 ION_IOC_CLEAN_INV_CACHES);
3201 if (ret) {
3202 pr_err("cache operation failed %d\n", ret);
3203 goto exit;
3204 }
3205 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3206 (const void *)send_req_ptr,
3207 req_buf_size, &resp, sizeof(resp));
3208 if (ret) {
3209 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3210 if (!qseecom.support_bus_scaling) {
3211 qsee_disable_clock_vote(data, CLK_DFAB);
3212 qsee_disable_clock_vote(data, CLK_SFPB);
3213 } else {
3214 __qseecom_add_bw_scale_down_timer(
3215 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3216 }
3217 goto exit;
3218 }
3219 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3220 data->client.sb_virt, data->client.sb_length,
3221 ION_IOC_INV_CACHES);
3222 if (ret) {
3223 pr_err("cache operation failed %d\n", ret);
3224 goto exit;
3225 }
3226 switch (resp.result) {
3227 case QSEOS_RESULT_SUCCESS:
3228 break;
3229 case QSEOS_RESULT_INCOMPLETE:
3230 pr_debug("qseos_result_incomplete\n");
3231 ret = __qseecom_process_incomplete_cmd(data, &resp);
3232 if (ret) {
3233 pr_err("process_incomplete_cmd fail with result: %d\n",
3234 resp.result);
3235 }
3236 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3237 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303238 if (put_user(resp.result,
3239 (uint32_t __user *)req.resp_buf)) {
3240 ret = -EINVAL;
3241 goto exit;
3242 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003243 ret = 0;
3244 }
3245 break;
3246 case QSEOS_RESULT_FAILURE:
3247 pr_err("scm call failed with resp.result: %d\n", resp.result);
3248 ret = -EINVAL;
3249 break;
3250 default:
3251 pr_err("Response result %d not supported\n",
3252 resp.result);
3253 ret = -EINVAL;
3254 break;
3255 }
3256 if (!qseecom.support_bus_scaling) {
3257 qsee_disable_clock_vote(data, CLK_DFAB);
3258 qsee_disable_clock_vote(data, CLK_SFPB);
3259 } else {
3260 __qseecom_add_bw_scale_down_timer(
3261 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3262 }
3263
3264exit:
3265 return ret;
3266}
3267
3268static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3269 struct qseecom_send_cmd_req *req)
3270
3271{
3272 if (!data || !data->client.ihandle) {
3273 pr_err("Client or client handle is not initialized\n");
3274 return -EINVAL;
3275 }
3276 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3277 (req->cmd_req_buf == NULL)) {
3278 pr_err("cmd buffer or response buffer is null\n");
3279 return -EINVAL;
3280 }
3281 if (((uintptr_t)req->cmd_req_buf <
3282 data->client.user_virt_sb_base) ||
3283 ((uintptr_t)req->cmd_req_buf >=
3284 (data->client.user_virt_sb_base + data->client.sb_length))) {
3285 pr_err("cmd buffer address not within shared bufffer\n");
3286 return -EINVAL;
3287 }
3288 if (((uintptr_t)req->resp_buf <
3289 data->client.user_virt_sb_base) ||
3290 ((uintptr_t)req->resp_buf >=
3291 (data->client.user_virt_sb_base + data->client.sb_length))) {
3292 pr_err("response buffer address not within shared bufffer\n");
3293 return -EINVAL;
3294 }
3295 if ((req->cmd_req_len == 0) ||
3296 (req->cmd_req_len > data->client.sb_length) ||
3297 (req->resp_len > data->client.sb_length)) {
3298 pr_err("cmd buf length or response buf length not valid\n");
3299 return -EINVAL;
3300 }
3301 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3302 pr_err("Integer overflow detected in req_len & rsp_len\n");
3303 return -EINVAL;
3304 }
3305
3306 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3307 pr_debug("Not enough memory to fit cmd_buf.\n");
3308 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3309 (req->cmd_req_len + req->resp_len),
3310 data->client.sb_length);
3311 return -ENOMEM;
3312 }
3313 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3314 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3315 return -EINVAL;
3316 }
3317 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3318 pr_err("Integer overflow in resp_len & resp_buf\n");
3319 return -EINVAL;
3320 }
3321 if (data->client.user_virt_sb_base >
3322 (ULONG_MAX - data->client.sb_length)) {
3323 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3324 return -EINVAL;
3325 }
3326 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3327 ((uintptr_t)data->client.user_virt_sb_base +
3328 data->client.sb_length)) ||
3329 (((uintptr_t)req->resp_buf + req->resp_len) >
3330 ((uintptr_t)data->client.user_virt_sb_base +
3331 data->client.sb_length))) {
3332 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3333 return -EINVAL;
3334 }
3335 return 0;
3336}
3337
3338int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3339 struct qseecom_registered_app_list *ptr_app,
3340 struct qseecom_dev_handle *data)
3341{
3342 int ret = 0;
3343
3344 switch (resp->result) {
3345 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3346 pr_warn("App(%d) %s is blocked on listener %d\n",
3347 data->client.app_id, data->client.app_name,
3348 resp->data);
3349 ret = __qseecom_process_reentrancy_blocked_on_listener(
3350 resp, ptr_app, data);
3351 if (ret) {
3352 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3353 data->client.app_id, data->client.app_name, resp->data);
3354 return ret;
3355 }
3356
3357 case QSEOS_RESULT_INCOMPLETE:
3358 qseecom.app_block_ref_cnt++;
3359 ptr_app->app_blocked = true;
3360 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3361 ptr_app->app_blocked = false;
3362 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003363 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003364 if (ret)
3365 pr_err("process_incomplete_cmd failed err: %d\n",
3366 ret);
3367 return ret;
3368 case QSEOS_RESULT_SUCCESS:
3369 return ret;
3370 default:
3371 pr_err("Response result %d not supported\n",
3372 resp->result);
3373 return -EINVAL;
3374 }
3375}
3376
3377static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3378 struct qseecom_send_cmd_req *req)
3379{
3380 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003381 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003382 u32 reqd_len_sb_in = 0;
3383 struct qseecom_client_send_data_ireq send_data_req = {0};
3384 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3385 struct qseecom_command_scm_resp resp;
3386 unsigned long flags;
3387 struct qseecom_registered_app_list *ptr_app;
3388 bool found_app = false;
3389 void *cmd_buf = NULL;
3390 size_t cmd_len;
3391 struct sglist_info *table = data->sglistinfo_ptr;
3392
3393 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3394 /* find app_id & img_name from list */
3395 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3396 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3397 list) {
3398 if ((ptr_app->app_id == data->client.app_id) &&
3399 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3400 found_app = true;
3401 break;
3402 }
3403 }
3404 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3405
3406 if (!found_app) {
3407 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3408 (char *)data->client.app_name);
3409 return -ENOENT;
3410 }
3411
Zhen Kong03b2eae2019-09-17 16:58:46 -07003412 if (__qseecom_find_pending_unload_app(data->client.app_id,
3413 data->client.app_name)) {
3414 pr_err("app %d (%s) unload is pending\n",
3415 data->client.app_id, data->client.app_name);
3416 return -ENOENT;
3417 }
3418
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003419 if (qseecom.qsee_version < QSEE_VERSION_40) {
3420 send_data_req.app_id = data->client.app_id;
3421 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3422 data, (uintptr_t)req->cmd_req_buf));
3423 send_data_req.req_len = req->cmd_req_len;
3424 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3425 data, (uintptr_t)req->resp_buf));
3426 send_data_req.rsp_len = req->resp_len;
3427 send_data_req.sglistinfo_ptr =
3428 (uint32_t)virt_to_phys(table);
3429 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3430 dmac_flush_range((void *)table,
3431 (void *)table + SGLISTINFO_TABLE_SIZE);
3432 cmd_buf = (void *)&send_data_req;
3433 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3434 } else {
3435 send_data_req_64bit.app_id = data->client.app_id;
3436 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3437 (uintptr_t)req->cmd_req_buf);
3438 send_data_req_64bit.req_len = req->cmd_req_len;
3439 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3440 (uintptr_t)req->resp_buf);
3441 send_data_req_64bit.rsp_len = req->resp_len;
3442 /* check if 32bit app's phys_addr region is under 4GB.*/
3443 if ((data->client.app_arch == ELFCLASS32) &&
3444 ((send_data_req_64bit.req_ptr >=
3445 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3446 (send_data_req_64bit.rsp_ptr >=
3447 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3448 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3449 data->client.app_name,
3450 send_data_req_64bit.req_ptr,
3451 send_data_req_64bit.req_len,
3452 send_data_req_64bit.rsp_ptr,
3453 send_data_req_64bit.rsp_len);
3454 return -EFAULT;
3455 }
3456 send_data_req_64bit.sglistinfo_ptr =
3457 (uint64_t)virt_to_phys(table);
3458 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3459 dmac_flush_range((void *)table,
3460 (void *)table + SGLISTINFO_TABLE_SIZE);
3461 cmd_buf = (void *)&send_data_req_64bit;
3462 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3463 }
3464
3465 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3466 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3467 else
3468 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3469
3470 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3471 data->client.sb_virt,
3472 reqd_len_sb_in,
3473 ION_IOC_CLEAN_INV_CACHES);
3474 if (ret) {
3475 pr_err("cache operation failed %d\n", ret);
3476 return ret;
3477 }
3478
3479 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3480
3481 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3482 cmd_buf, cmd_len,
3483 &resp, sizeof(resp));
3484 if (ret) {
3485 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3486 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003487 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003488 }
3489
3490 if (qseecom.qsee_reentrancy_support) {
3491 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003492 if (ret)
3493 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003494 } else {
3495 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3496 ret = __qseecom_process_incomplete_cmd(data, &resp);
3497 if (ret) {
3498 pr_err("process_incomplete_cmd failed err: %d\n",
3499 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003500 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003501 }
3502 } else {
3503 if (resp.result != QSEOS_RESULT_SUCCESS) {
3504 pr_err("Response result %d not supported\n",
3505 resp.result);
3506 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003507 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003508 }
3509 }
3510 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003511exit:
3512 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003513 data->client.sb_virt, data->client.sb_length,
3514 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003515 if (ret2) {
3516 pr_err("cache operation failed %d\n", ret2);
3517 return ret2;
3518 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003519 return ret;
3520}
3521
3522static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3523{
3524 int ret = 0;
3525 struct qseecom_send_cmd_req req;
3526
3527 ret = copy_from_user(&req, argp, sizeof(req));
3528 if (ret) {
3529 pr_err("copy_from_user failed\n");
3530 return ret;
3531 }
3532
3533 if (__validate_send_cmd_inputs(data, &req))
3534 return -EINVAL;
3535
3536 ret = __qseecom_send_cmd(data, &req);
3537
3538 if (ret)
3539 return ret;
3540
3541 return ret;
3542}
3543
3544int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3545 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3546 struct qseecom_dev_handle *data, int i) {
3547
3548 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3549 (req->ifd_data[i].fd > 0)) {
3550 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3551 (req->ifd_data[i].cmd_buf_offset >
3552 req->cmd_req_len - sizeof(uint32_t))) {
3553 pr_err("Invalid offset (req len) 0x%x\n",
3554 req->ifd_data[i].cmd_buf_offset);
3555 return -EINVAL;
3556 }
3557 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3558 (lstnr_resp->ifd_data[i].fd > 0)) {
3559 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3560 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3561 lstnr_resp->resp_len - sizeof(uint32_t))) {
3562 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3563 lstnr_resp->ifd_data[i].cmd_buf_offset);
3564 return -EINVAL;
3565 }
3566 }
3567 return 0;
3568}
3569
Zhen Kongd097c6e02019-08-01 16:10:20 -07003570static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req,
3571 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3572 struct qseecom_dev_handle *data, int i)
3573{
3574
3575 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3576 (req->ifd_data[i].fd > 0)) {
3577 if ((req->cmd_req_len < sizeof(uint64_t)) ||
3578 (req->ifd_data[i].cmd_buf_offset >
3579 req->cmd_req_len - sizeof(uint64_t))) {
3580 pr_err("Invalid offset (req len) 0x%x\n",
3581 req->ifd_data[i].cmd_buf_offset);
3582 return -EINVAL;
3583 }
3584 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3585 (lstnr_resp->ifd_data[i].fd > 0)) {
3586 if ((lstnr_resp->resp_len < sizeof(uint64_t)) ||
3587 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3588 lstnr_resp->resp_len - sizeof(uint64_t))) {
3589 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3590 lstnr_resp->ifd_data[i].cmd_buf_offset);
3591 return -EINVAL;
3592 }
3593 }
3594 return 0;
3595}
3596
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003597static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3598 struct qseecom_dev_handle *data)
3599{
3600 struct ion_handle *ihandle;
3601 char *field;
3602 int ret = 0;
3603 int i = 0;
3604 uint32_t len = 0;
3605 struct scatterlist *sg;
3606 struct qseecom_send_modfd_cmd_req *req = NULL;
3607 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3608 struct qseecom_registered_listener_list *this_lstnr = NULL;
3609 uint32_t offset;
3610 struct sg_table *sg_ptr;
3611
3612 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3613 (data->type != QSEECOM_CLIENT_APP))
3614 return -EFAULT;
3615
3616 if (msg == NULL) {
3617 pr_err("Invalid address\n");
3618 return -EINVAL;
3619 }
3620 if (data->type == QSEECOM_LISTENER_SERVICE) {
3621 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3622 this_lstnr = __qseecom_find_svc(data->listener.id);
3623 if (IS_ERR_OR_NULL(this_lstnr)) {
3624 pr_err("Invalid listener ID\n");
3625 return -ENOMEM;
3626 }
3627 } else {
3628 req = (struct qseecom_send_modfd_cmd_req *)msg;
3629 }
3630
3631 for (i = 0; i < MAX_ION_FD; i++) {
3632 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3633 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003634 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003635 req->ifd_data[i].fd);
3636 if (IS_ERR_OR_NULL(ihandle)) {
3637 pr_err("Ion client can't retrieve the handle\n");
3638 return -ENOMEM;
3639 }
3640 field = (char *) req->cmd_req_buf +
3641 req->ifd_data[i].cmd_buf_offset;
3642 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3643 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003644 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003645 lstnr_resp->ifd_data[i].fd);
3646 if (IS_ERR_OR_NULL(ihandle)) {
3647 pr_err("Ion client can't retrieve the handle\n");
3648 return -ENOMEM;
3649 }
3650 field = lstnr_resp->resp_buf_ptr +
3651 lstnr_resp->ifd_data[i].cmd_buf_offset;
3652 } else {
3653 continue;
3654 }
3655 /* Populate the cmd data structure with the phys_addr */
3656 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3657 if (IS_ERR_OR_NULL(sg_ptr)) {
3658 pr_err("IOn client could not retrieve sg table\n");
3659 goto err;
3660 }
3661 if (sg_ptr->nents == 0) {
3662 pr_err("Num of scattered entries is 0\n");
3663 goto err;
3664 }
3665 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3666 pr_err("Num of scattered entries");
3667 pr_err(" (%d) is greater than max supported %d\n",
3668 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3669 goto err;
3670 }
3671 sg = sg_ptr->sgl;
3672 if (sg_ptr->nents == 1) {
3673 uint32_t *update;
3674
3675 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3676 goto err;
3677 if ((data->type == QSEECOM_CLIENT_APP &&
3678 (data->client.app_arch == ELFCLASS32 ||
3679 data->client.app_arch == ELFCLASS64)) ||
3680 (data->type == QSEECOM_LISTENER_SERVICE)) {
3681 /*
3682 * Check if sg list phy add region is under 4GB
3683 */
3684 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3685 (!cleanup) &&
3686 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3687 >= PHY_ADDR_4G - sg->length)) {
3688 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3689 data->client.app_name,
3690 &(sg_dma_address(sg_ptr->sgl)),
3691 sg->length);
3692 goto err;
3693 }
3694 update = (uint32_t *) field;
3695 *update = cleanup ? 0 :
3696 (uint32_t)sg_dma_address(sg_ptr->sgl);
3697 } else {
3698 pr_err("QSEE app arch %u is not supported\n",
3699 data->client.app_arch);
3700 goto err;
3701 }
3702 len += (uint32_t)sg->length;
3703 } else {
3704 struct qseecom_sg_entry *update;
3705 int j = 0;
3706
3707 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3708 (req->ifd_data[i].fd > 0)) {
3709
3710 if ((req->cmd_req_len <
3711 SG_ENTRY_SZ * sg_ptr->nents) ||
3712 (req->ifd_data[i].cmd_buf_offset >
3713 (req->cmd_req_len -
3714 SG_ENTRY_SZ * sg_ptr->nents))) {
3715 pr_err("Invalid offset = 0x%x\n",
3716 req->ifd_data[i].cmd_buf_offset);
3717 goto err;
3718 }
3719
3720 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3721 (lstnr_resp->ifd_data[i].fd > 0)) {
3722
3723 if ((lstnr_resp->resp_len <
3724 SG_ENTRY_SZ * sg_ptr->nents) ||
3725 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3726 (lstnr_resp->resp_len -
3727 SG_ENTRY_SZ * sg_ptr->nents))) {
3728 goto err;
3729 }
3730 }
3731 if ((data->type == QSEECOM_CLIENT_APP &&
3732 (data->client.app_arch == ELFCLASS32 ||
3733 data->client.app_arch == ELFCLASS64)) ||
3734 (data->type == QSEECOM_LISTENER_SERVICE)) {
3735 update = (struct qseecom_sg_entry *)field;
3736 for (j = 0; j < sg_ptr->nents; j++) {
3737 /*
3738 * Check if sg list PA is under 4GB
3739 */
3740 if ((qseecom.qsee_version >=
3741 QSEE_VERSION_40) &&
3742 (!cleanup) &&
3743 ((uint64_t)(sg_dma_address(sg))
3744 >= PHY_ADDR_4G - sg->length)) {
3745 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3746 data->client.app_name,
3747 &(sg_dma_address(sg)),
3748 sg->length);
3749 goto err;
3750 }
3751 update->phys_addr = cleanup ? 0 :
3752 (uint32_t)sg_dma_address(sg);
3753 update->len = cleanup ? 0 : sg->length;
3754 update++;
3755 len += sg->length;
3756 sg = sg_next(sg);
3757 }
3758 } else {
3759 pr_err("QSEE app arch %u is not supported\n",
3760 data->client.app_arch);
3761 goto err;
3762 }
3763 }
3764
3765 if (cleanup) {
3766 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3767 ihandle, NULL, len,
3768 ION_IOC_INV_CACHES);
3769 if (ret) {
3770 pr_err("cache operation failed %d\n", ret);
3771 goto err;
3772 }
3773 } else {
3774 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3775 ihandle, NULL, len,
3776 ION_IOC_CLEAN_INV_CACHES);
3777 if (ret) {
3778 pr_err("cache operation failed %d\n", ret);
3779 goto err;
3780 }
3781 if (data->type == QSEECOM_CLIENT_APP) {
3782 offset = req->ifd_data[i].cmd_buf_offset;
3783 data->sglistinfo_ptr[i].indexAndFlags =
3784 SGLISTINFO_SET_INDEX_FLAG(
3785 (sg_ptr->nents == 1), 0, offset);
3786 data->sglistinfo_ptr[i].sizeOrCount =
3787 (sg_ptr->nents == 1) ?
3788 sg->length : sg_ptr->nents;
3789 data->sglist_cnt = i + 1;
3790 } else {
3791 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3792 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3793 (uintptr_t)this_lstnr->sb_virt);
3794 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3795 SGLISTINFO_SET_INDEX_FLAG(
3796 (sg_ptr->nents == 1), 0, offset);
3797 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3798 (sg_ptr->nents == 1) ?
3799 sg->length : sg_ptr->nents;
3800 this_lstnr->sglist_cnt = i + 1;
3801 }
3802 }
3803 /* Deallocate the handle */
3804 if (!IS_ERR_OR_NULL(ihandle))
3805 ion_free(qseecom.ion_clnt, ihandle);
3806 }
3807 return ret;
3808err:
3809 if (!IS_ERR_OR_NULL(ihandle))
3810 ion_free(qseecom.ion_clnt, ihandle);
3811 return -ENOMEM;
3812}
3813
3814static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3815 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3816{
3817 struct scatterlist *sg = sg_ptr->sgl;
3818 struct qseecom_sg_entry_64bit *sg_entry;
3819 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3820 void *buf;
3821 uint i;
3822 size_t size;
3823 dma_addr_t coh_pmem;
3824
3825 if (fd_idx >= MAX_ION_FD) {
3826 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3827 return -ENOMEM;
3828 }
3829 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3830 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3831 /* Allocate a contiguous kernel buffer */
3832 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3833 size = (size + PAGE_SIZE) & PAGE_MASK;
3834 buf = dma_alloc_coherent(qseecom.pdev,
3835 size, &coh_pmem, GFP_KERNEL);
3836 if (buf == NULL) {
3837 pr_err("failed to alloc memory for sg buf\n");
3838 return -ENOMEM;
3839 }
3840 /* update qseecom_sg_list_buf_hdr_64bit */
3841 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3842 buf_hdr->new_buf_phys_addr = coh_pmem;
3843 buf_hdr->nents_total = sg_ptr->nents;
3844 /* save the left sg entries into new allocated buf */
3845 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3846 for (i = 0; i < sg_ptr->nents; i++) {
3847 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3848 sg_entry->len = sg->length;
3849 sg_entry++;
3850 sg = sg_next(sg);
3851 }
3852
3853 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3854 data->client.sec_buf_fd[fd_idx].vbase = buf;
3855 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3856 data->client.sec_buf_fd[fd_idx].size = size;
3857
3858 return 0;
3859}
3860
3861static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3862 struct qseecom_dev_handle *data)
3863{
3864 struct ion_handle *ihandle;
3865 char *field;
3866 int ret = 0;
3867 int i = 0;
3868 uint32_t len = 0;
3869 struct scatterlist *sg;
3870 struct qseecom_send_modfd_cmd_req *req = NULL;
3871 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3872 struct qseecom_registered_listener_list *this_lstnr = NULL;
3873 uint32_t offset;
3874 struct sg_table *sg_ptr;
3875
3876 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3877 (data->type != QSEECOM_CLIENT_APP))
3878 return -EFAULT;
3879
3880 if (msg == NULL) {
3881 pr_err("Invalid address\n");
3882 return -EINVAL;
3883 }
3884 if (data->type == QSEECOM_LISTENER_SERVICE) {
3885 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3886 this_lstnr = __qseecom_find_svc(data->listener.id);
3887 if (IS_ERR_OR_NULL(this_lstnr)) {
3888 pr_err("Invalid listener ID\n");
3889 return -ENOMEM;
3890 }
3891 } else {
3892 req = (struct qseecom_send_modfd_cmd_req *)msg;
3893 }
3894
3895 for (i = 0; i < MAX_ION_FD; i++) {
3896 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3897 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003898 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003899 req->ifd_data[i].fd);
3900 if (IS_ERR_OR_NULL(ihandle)) {
3901 pr_err("Ion client can't retrieve the handle\n");
3902 return -ENOMEM;
3903 }
3904 field = (char *) req->cmd_req_buf +
3905 req->ifd_data[i].cmd_buf_offset;
3906 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3907 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003908 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003909 lstnr_resp->ifd_data[i].fd);
3910 if (IS_ERR_OR_NULL(ihandle)) {
3911 pr_err("Ion client can't retrieve the handle\n");
3912 return -ENOMEM;
3913 }
3914 field = lstnr_resp->resp_buf_ptr +
3915 lstnr_resp->ifd_data[i].cmd_buf_offset;
3916 } else {
3917 continue;
3918 }
3919 /* Populate the cmd data structure with the phys_addr */
3920 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3921 if (IS_ERR_OR_NULL(sg_ptr)) {
3922 pr_err("IOn client could not retrieve sg table\n");
3923 goto err;
3924 }
3925 if (sg_ptr->nents == 0) {
3926 pr_err("Num of scattered entries is 0\n");
3927 goto err;
3928 }
3929 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3930 pr_warn("Num of scattered entries");
3931 pr_warn(" (%d) is greater than %d\n",
3932 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3933 if (cleanup) {
3934 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3935 data->client.sec_buf_fd[i].vbase)
3936 dma_free_coherent(qseecom.pdev,
3937 data->client.sec_buf_fd[i].size,
3938 data->client.sec_buf_fd[i].vbase,
3939 data->client.sec_buf_fd[i].pbase);
3940 } else {
3941 ret = __qseecom_allocate_sg_list_buffer(data,
3942 field, i, sg_ptr);
3943 if (ret) {
3944 pr_err("Failed to allocate sg list buffer\n");
3945 goto err;
3946 }
3947 }
3948 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3949 sg = sg_ptr->sgl;
3950 goto cleanup;
3951 }
3952 sg = sg_ptr->sgl;
3953 if (sg_ptr->nents == 1) {
3954 uint64_t *update_64bit;
3955
Zhen Kongd097c6e02019-08-01 16:10:20 -07003956 if (__boundary_checks_offset_64(req, lstnr_resp,
3957 data, i))
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003958 goto err;
3959 /* 64bit app uses 64bit address */
3960 update_64bit = (uint64_t *) field;
3961 *update_64bit = cleanup ? 0 :
3962 (uint64_t)sg_dma_address(sg_ptr->sgl);
3963 len += (uint32_t)sg->length;
3964 } else {
3965 struct qseecom_sg_entry_64bit *update_64bit;
3966 int j = 0;
3967
3968 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3969 (req->ifd_data[i].fd > 0)) {
3970
3971 if ((req->cmd_req_len <
3972 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3973 (req->ifd_data[i].cmd_buf_offset >
3974 (req->cmd_req_len -
3975 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3976 pr_err("Invalid offset = 0x%x\n",
3977 req->ifd_data[i].cmd_buf_offset);
3978 goto err;
3979 }
3980
3981 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3982 (lstnr_resp->ifd_data[i].fd > 0)) {
3983
3984 if ((lstnr_resp->resp_len <
3985 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3986 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3987 (lstnr_resp->resp_len -
3988 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3989 goto err;
3990 }
3991 }
3992 /* 64bit app uses 64bit address */
3993 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3994 for (j = 0; j < sg_ptr->nents; j++) {
3995 update_64bit->phys_addr = cleanup ? 0 :
3996 (uint64_t)sg_dma_address(sg);
3997 update_64bit->len = cleanup ? 0 :
3998 (uint32_t)sg->length;
3999 update_64bit++;
4000 len += sg->length;
4001 sg = sg_next(sg);
4002 }
4003 }
4004cleanup:
4005 if (cleanup) {
4006 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4007 ihandle, NULL, len,
4008 ION_IOC_INV_CACHES);
4009 if (ret) {
4010 pr_err("cache operation failed %d\n", ret);
4011 goto err;
4012 }
4013 } else {
4014 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4015 ihandle, NULL, len,
4016 ION_IOC_CLEAN_INV_CACHES);
4017 if (ret) {
4018 pr_err("cache operation failed %d\n", ret);
4019 goto err;
4020 }
4021 if (data->type == QSEECOM_CLIENT_APP) {
4022 offset = req->ifd_data[i].cmd_buf_offset;
4023 data->sglistinfo_ptr[i].indexAndFlags =
4024 SGLISTINFO_SET_INDEX_FLAG(
4025 (sg_ptr->nents == 1), 1, offset);
4026 data->sglistinfo_ptr[i].sizeOrCount =
4027 (sg_ptr->nents == 1) ?
4028 sg->length : sg_ptr->nents;
4029 data->sglist_cnt = i + 1;
4030 } else {
4031 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
4032 + (uintptr_t)lstnr_resp->resp_buf_ptr -
4033 (uintptr_t)this_lstnr->sb_virt);
4034 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
4035 SGLISTINFO_SET_INDEX_FLAG(
4036 (sg_ptr->nents == 1), 1, offset);
4037 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
4038 (sg_ptr->nents == 1) ?
4039 sg->length : sg_ptr->nents;
4040 this_lstnr->sglist_cnt = i + 1;
4041 }
4042 }
4043 /* Deallocate the handle */
4044 if (!IS_ERR_OR_NULL(ihandle))
4045 ion_free(qseecom.ion_clnt, ihandle);
4046 }
4047 return ret;
4048err:
4049 for (i = 0; i < MAX_ION_FD; i++)
4050 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
4051 data->client.sec_buf_fd[i].vbase)
4052 dma_free_coherent(qseecom.pdev,
4053 data->client.sec_buf_fd[i].size,
4054 data->client.sec_buf_fd[i].vbase,
4055 data->client.sec_buf_fd[i].pbase);
4056 if (!IS_ERR_OR_NULL(ihandle))
4057 ion_free(qseecom.ion_clnt, ihandle);
4058 return -ENOMEM;
4059}
4060
4061static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4062 void __user *argp,
4063 bool is_64bit_addr)
4064{
4065 int ret = 0;
4066 int i;
4067 struct qseecom_send_modfd_cmd_req req;
4068 struct qseecom_send_cmd_req send_cmd_req;
4069
4070 ret = copy_from_user(&req, argp, sizeof(req));
4071 if (ret) {
4072 pr_err("copy_from_user failed\n");
4073 return ret;
4074 }
4075
4076 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
4077 send_cmd_req.cmd_req_len = req.cmd_req_len;
4078 send_cmd_req.resp_buf = req.resp_buf;
4079 send_cmd_req.resp_len = req.resp_len;
4080
4081 if (__validate_send_cmd_inputs(data, &send_cmd_req))
4082 return -EINVAL;
4083
4084 /* validate offsets */
4085 for (i = 0; i < MAX_ION_FD; i++) {
4086 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
4087 pr_err("Invalid offset %d = 0x%x\n",
4088 i, req.ifd_data[i].cmd_buf_offset);
4089 return -EINVAL;
4090 }
4091 }
4092 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4093 (uintptr_t)req.cmd_req_buf);
4094 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4095 (uintptr_t)req.resp_buf);
4096
4097 if (!is_64bit_addr) {
4098 ret = __qseecom_update_cmd_buf(&req, false, data);
4099 if (ret)
4100 return ret;
4101 ret = __qseecom_send_cmd(data, &send_cmd_req);
4102 if (ret)
4103 return ret;
4104 ret = __qseecom_update_cmd_buf(&req, true, data);
4105 if (ret)
4106 return ret;
4107 } else {
4108 ret = __qseecom_update_cmd_buf_64(&req, false, data);
4109 if (ret)
4110 return ret;
4111 ret = __qseecom_send_cmd(data, &send_cmd_req);
4112 if (ret)
4113 return ret;
4114 ret = __qseecom_update_cmd_buf_64(&req, true, data);
4115 if (ret)
4116 return ret;
4117 }
4118
4119 return ret;
4120}
4121
4122static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4123 void __user *argp)
4124{
4125 return __qseecom_send_modfd_cmd(data, argp, false);
4126}
4127
4128static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
4129 void __user *argp)
4130{
4131 return __qseecom_send_modfd_cmd(data, argp, true);
4132}
4133
4134
4135
4136static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
4137 struct qseecom_registered_listener_list *svc)
4138{
4139 int ret;
4140
Zhen Kongf5087172018-10-11 17:22:05 -07004141 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08004142 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004143}
4144
4145static int qseecom_receive_req(struct qseecom_dev_handle *data)
4146{
4147 int ret = 0;
4148 struct qseecom_registered_listener_list *this_lstnr;
4149
Zhen Kongbcdeda22018-11-16 13:50:51 -08004150 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004151 this_lstnr = __qseecom_find_svc(data->listener.id);
4152 if (!this_lstnr) {
4153 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004154 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004155 return -ENODATA;
4156 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004157 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004158
4159 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304160 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004161 __qseecom_listener_has_rcvd_req(data,
4162 this_lstnr))) {
Zhen Kong52ce9062018-09-24 14:33:27 -07004163 pr_debug("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004164 (uint32_t)data->listener.id);
4165 /* woken up for different reason */
4166 return -ERESTARTSYS;
4167 }
4168
Zhen Kongbcdeda22018-11-16 13:50:51 -08004169 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004170 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004171 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004172 return -ENODEV;
4173 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004174 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004175 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004176 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004177 break;
4178 }
4179 return ret;
4180}
4181
4182static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4183{
4184 unsigned char app_arch = 0;
4185 struct elf32_hdr *ehdr;
4186 struct elf64_hdr *ehdr64;
4187
4188 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4189
4190 switch (app_arch) {
4191 case ELFCLASS32: {
4192 ehdr = (struct elf32_hdr *)fw_entry->data;
4193 if (fw_entry->size < sizeof(*ehdr)) {
4194 pr_err("%s: Not big enough to be an elf32 header\n",
4195 qseecom.pdev->init_name);
4196 return false;
4197 }
4198 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4199 pr_err("%s: Not an elf32 header\n",
4200 qseecom.pdev->init_name);
4201 return false;
4202 }
4203 if (ehdr->e_phnum == 0) {
4204 pr_err("%s: No loadable segments\n",
4205 qseecom.pdev->init_name);
4206 return false;
4207 }
4208 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4209 sizeof(struct elf32_hdr) > fw_entry->size) {
4210 pr_err("%s: Program headers not within mdt\n",
4211 qseecom.pdev->init_name);
4212 return false;
4213 }
4214 break;
4215 }
4216 case ELFCLASS64: {
4217 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4218 if (fw_entry->size < sizeof(*ehdr64)) {
4219 pr_err("%s: Not big enough to be an elf64 header\n",
4220 qseecom.pdev->init_name);
4221 return false;
4222 }
4223 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4224 pr_err("%s: Not an elf64 header\n",
4225 qseecom.pdev->init_name);
4226 return false;
4227 }
4228 if (ehdr64->e_phnum == 0) {
4229 pr_err("%s: No loadable segments\n",
4230 qseecom.pdev->init_name);
4231 return false;
4232 }
4233 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4234 sizeof(struct elf64_hdr) > fw_entry->size) {
4235 pr_err("%s: Program headers not within mdt\n",
4236 qseecom.pdev->init_name);
4237 return false;
4238 }
4239 break;
4240 }
4241 default: {
4242 pr_err("QSEE app arch %u is not supported\n", app_arch);
4243 return false;
4244 }
4245 }
4246 return true;
4247}
4248
4249static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4250 uint32_t *app_arch)
4251{
4252 int ret = -1;
4253 int i = 0, rc = 0;
4254 const struct firmware *fw_entry = NULL;
4255 char fw_name[MAX_APP_NAME_SIZE];
4256 struct elf32_hdr *ehdr;
4257 struct elf64_hdr *ehdr64;
4258 int num_images = 0;
4259
4260 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4261 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4262 if (rc) {
4263 pr_err("error with request_firmware\n");
4264 ret = -EIO;
4265 goto err;
4266 }
4267 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4268 ret = -EIO;
4269 goto err;
4270 }
4271 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4272 *fw_size = fw_entry->size;
4273 if (*app_arch == ELFCLASS32) {
4274 ehdr = (struct elf32_hdr *)fw_entry->data;
4275 num_images = ehdr->e_phnum;
4276 } else if (*app_arch == ELFCLASS64) {
4277 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4278 num_images = ehdr64->e_phnum;
4279 } else {
4280 pr_err("QSEE %s app, arch %u is not supported\n",
4281 appname, *app_arch);
4282 ret = -EIO;
4283 goto err;
4284 }
4285 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4286 release_firmware(fw_entry);
4287 fw_entry = NULL;
4288 for (i = 0; i < num_images; i++) {
4289 memset(fw_name, 0, sizeof(fw_name));
4290 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4291 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4292 if (ret)
4293 goto err;
4294 if (*fw_size > U32_MAX - fw_entry->size) {
4295 pr_err("QSEE %s app file size overflow\n", appname);
4296 ret = -EINVAL;
4297 goto err;
4298 }
4299 *fw_size += fw_entry->size;
4300 release_firmware(fw_entry);
4301 fw_entry = NULL;
4302 }
4303
4304 return ret;
4305err:
4306 if (fw_entry)
4307 release_firmware(fw_entry);
4308 *fw_size = 0;
4309 return ret;
4310}
4311
4312static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4313 uint32_t fw_size,
4314 struct qseecom_load_app_ireq *load_req)
4315{
4316 int ret = -1;
4317 int i = 0, rc = 0;
4318 const struct firmware *fw_entry = NULL;
4319 char fw_name[MAX_APP_NAME_SIZE];
4320 u8 *img_data_ptr = img_data;
4321 struct elf32_hdr *ehdr;
4322 struct elf64_hdr *ehdr64;
4323 int num_images = 0;
4324 unsigned char app_arch = 0;
4325
4326 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4327 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4328 if (rc) {
4329 ret = -EIO;
4330 goto err;
4331 }
4332
4333 load_req->img_len = fw_entry->size;
4334 if (load_req->img_len > fw_size) {
4335 pr_err("app %s size %zu is larger than buf size %u\n",
4336 appname, fw_entry->size, fw_size);
4337 ret = -EINVAL;
4338 goto err;
4339 }
4340 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4341 img_data_ptr = img_data_ptr + fw_entry->size;
4342 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4343
4344 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4345 if (app_arch == ELFCLASS32) {
4346 ehdr = (struct elf32_hdr *)fw_entry->data;
4347 num_images = ehdr->e_phnum;
4348 } else if (app_arch == ELFCLASS64) {
4349 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4350 num_images = ehdr64->e_phnum;
4351 } else {
4352 pr_err("QSEE %s app, arch %u is not supported\n",
4353 appname, app_arch);
4354 ret = -EIO;
4355 goto err;
4356 }
4357 release_firmware(fw_entry);
4358 fw_entry = NULL;
4359 for (i = 0; i < num_images; i++) {
4360 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4361 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4362 if (ret) {
4363 pr_err("Failed to locate blob %s\n", fw_name);
4364 goto err;
4365 }
4366 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4367 (fw_entry->size + load_req->img_len > fw_size)) {
4368 pr_err("Invalid file size for %s\n", fw_name);
4369 ret = -EINVAL;
4370 goto err;
4371 }
4372 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4373 img_data_ptr = img_data_ptr + fw_entry->size;
4374 load_req->img_len += fw_entry->size;
4375 release_firmware(fw_entry);
4376 fw_entry = NULL;
4377 }
4378 return ret;
4379err:
4380 release_firmware(fw_entry);
4381 return ret;
4382}
4383
4384static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4385 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4386{
4387 size_t len = 0;
4388 int ret = 0;
4389 ion_phys_addr_t pa;
4390 struct ion_handle *ihandle = NULL;
4391 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004392 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004393 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004394
Zhen Kong3dd92792017-12-08 09:47:15 -08004395 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004396 if (retry++) {
4397 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004398 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004399 mutex_lock(&app_access_lock);
4400 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004401 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004402 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004403 } while (IS_ERR_OR_NULL(ihandle) &&
4404 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004405
4406 if (IS_ERR_OR_NULL(ihandle)) {
4407 pr_err("ION alloc failed\n");
4408 return -ENOMEM;
4409 }
4410 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4411 ihandle);
4412
4413 if (IS_ERR_OR_NULL(img_data)) {
4414 pr_err("ION memory mapping for image loading failed\n");
4415 ret = -ENOMEM;
4416 goto exit_ion_free;
4417 }
4418 /* Get the physical address of the ION BUF */
4419 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4420 if (ret) {
4421 pr_err("physical memory retrieval failure\n");
4422 ret = -EIO;
4423 goto exit_ion_unmap_kernel;
4424 }
4425
4426 *pihandle = ihandle;
4427 *data = img_data;
4428 *paddr = pa;
4429 return ret;
4430
4431exit_ion_unmap_kernel:
4432 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4433exit_ion_free:
4434 ion_free(qseecom.ion_clnt, ihandle);
4435 ihandle = NULL;
4436 return ret;
4437}
4438
4439static void __qseecom_free_img_data(struct ion_handle **ihandle)
4440{
4441 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4442 ion_free(qseecom.ion_clnt, *ihandle);
4443 *ihandle = NULL;
4444}
4445
4446static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4447 uint32_t *app_id)
4448{
4449 int ret = -1;
4450 uint32_t fw_size = 0;
4451 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4452 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4453 struct qseecom_command_scm_resp resp;
4454 u8 *img_data = NULL;
4455 ion_phys_addr_t pa = 0;
4456 struct ion_handle *ihandle = NULL;
4457 void *cmd_buf = NULL;
4458 size_t cmd_len;
4459 uint32_t app_arch = 0;
4460
4461 if (!data || !appname || !app_id) {
4462 pr_err("Null pointer to data or appname or appid\n");
4463 return -EINVAL;
4464 }
4465 *app_id = 0;
4466 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4467 return -EIO;
4468 data->client.app_arch = app_arch;
4469
4470 /* Check and load cmnlib */
4471 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4472 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4473 ret = qseecom_load_commonlib_image(data, "cmnlib");
4474 if (ret) {
4475 pr_err("failed to load cmnlib\n");
4476 return -EIO;
4477 }
4478 qseecom.commonlib_loaded = true;
4479 pr_debug("cmnlib is loaded\n");
4480 }
4481
4482 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4483 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4484 if (ret) {
4485 pr_err("failed to load cmnlib64\n");
4486 return -EIO;
4487 }
4488 qseecom.commonlib64_loaded = true;
4489 pr_debug("cmnlib64 is loaded\n");
4490 }
4491 }
4492
4493 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4494 if (ret)
4495 return ret;
4496
4497 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4498 if (ret) {
4499 ret = -EIO;
4500 goto exit_free_img_data;
4501 }
4502
4503 /* Populate the load_req parameters */
4504 if (qseecom.qsee_version < QSEE_VERSION_40) {
4505 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4506 load_req.mdt_len = load_req.mdt_len;
4507 load_req.img_len = load_req.img_len;
4508 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4509 load_req.phy_addr = (uint32_t)pa;
4510 cmd_buf = (void *)&load_req;
4511 cmd_len = sizeof(struct qseecom_load_app_ireq);
4512 } else {
4513 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4514 load_req_64bit.mdt_len = load_req.mdt_len;
4515 load_req_64bit.img_len = load_req.img_len;
4516 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4517 load_req_64bit.phy_addr = (uint64_t)pa;
4518 cmd_buf = (void *)&load_req_64bit;
4519 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4520 }
4521
4522 if (qseecom.support_bus_scaling) {
4523 mutex_lock(&qsee_bw_mutex);
4524 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4525 mutex_unlock(&qsee_bw_mutex);
4526 if (ret) {
4527 ret = -EIO;
4528 goto exit_free_img_data;
4529 }
4530 }
4531
4532 ret = __qseecom_enable_clk_scale_up(data);
4533 if (ret) {
4534 ret = -EIO;
4535 goto exit_unregister_bus_bw_need;
4536 }
4537
4538 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4539 img_data, fw_size,
4540 ION_IOC_CLEAN_INV_CACHES);
4541 if (ret) {
4542 pr_err("cache operation failed %d\n", ret);
4543 goto exit_disable_clk_vote;
4544 }
4545
4546 /* SCM_CALL to load the image */
4547 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4548 &resp, sizeof(resp));
4549 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004550 pr_err("scm_call to load failed : ret %d, result %x\n",
4551 ret, resp.result);
4552 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4553 ret = -EEXIST;
4554 else
4555 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004556 goto exit_disable_clk_vote;
4557 }
4558
4559 switch (resp.result) {
4560 case QSEOS_RESULT_SUCCESS:
4561 *app_id = resp.data;
4562 break;
4563 case QSEOS_RESULT_INCOMPLETE:
4564 ret = __qseecom_process_incomplete_cmd(data, &resp);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004565 if (ret) {
4566 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
4567 ret, resp.result, resp.data, appname);
4568 __qseecom_unload_app(data, resp.data);
4569 ret = -EFAULT;
4570 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004571 *app_id = resp.data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07004572 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004573 break;
4574 case QSEOS_RESULT_FAILURE:
4575 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4576 break;
4577 default:
4578 pr_err("scm call return unknown response %d\n", resp.result);
4579 ret = -EINVAL;
4580 break;
4581 }
4582
4583exit_disable_clk_vote:
4584 __qseecom_disable_clk_scale_down(data);
4585
4586exit_unregister_bus_bw_need:
4587 if (qseecom.support_bus_scaling) {
4588 mutex_lock(&qsee_bw_mutex);
4589 qseecom_unregister_bus_bandwidth_needs(data);
4590 mutex_unlock(&qsee_bw_mutex);
4591 }
4592
4593exit_free_img_data:
4594 __qseecom_free_img_data(&ihandle);
4595 return ret;
4596}
4597
4598static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4599 char *cmnlib_name)
4600{
4601 int ret = 0;
4602 uint32_t fw_size = 0;
4603 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4604 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4605 struct qseecom_command_scm_resp resp;
4606 u8 *img_data = NULL;
4607 ion_phys_addr_t pa = 0;
4608 void *cmd_buf = NULL;
4609 size_t cmd_len;
4610 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004611 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004612
4613 if (!cmnlib_name) {
4614 pr_err("cmnlib_name is NULL\n");
4615 return -EINVAL;
4616 }
4617 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4618 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4619 cmnlib_name, strlen(cmnlib_name));
4620 return -EINVAL;
4621 }
4622
4623 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4624 return -EIO;
4625
Zhen Kong3bafb312017-10-18 10:27:20 -07004626 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004627 &img_data, fw_size, &pa);
4628 if (ret)
4629 return -EIO;
4630
4631 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4632 if (ret) {
4633 ret = -EIO;
4634 goto exit_free_img_data;
4635 }
4636 if (qseecom.qsee_version < QSEE_VERSION_40) {
4637 load_req.phy_addr = (uint32_t)pa;
4638 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4639 cmd_buf = (void *)&load_req;
4640 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4641 } else {
4642 load_req_64bit.phy_addr = (uint64_t)pa;
4643 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4644 load_req_64bit.img_len = load_req.img_len;
4645 load_req_64bit.mdt_len = load_req.mdt_len;
4646 cmd_buf = (void *)&load_req_64bit;
4647 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4648 }
4649
4650 if (qseecom.support_bus_scaling) {
4651 mutex_lock(&qsee_bw_mutex);
4652 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4653 mutex_unlock(&qsee_bw_mutex);
4654 if (ret) {
4655 ret = -EIO;
4656 goto exit_free_img_data;
4657 }
4658 }
4659
4660 /* Vote for the SFPB clock */
4661 ret = __qseecom_enable_clk_scale_up(data);
4662 if (ret) {
4663 ret = -EIO;
4664 goto exit_unregister_bus_bw_need;
4665 }
4666
Zhen Kong3bafb312017-10-18 10:27:20 -07004667 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004668 img_data, fw_size,
4669 ION_IOC_CLEAN_INV_CACHES);
4670 if (ret) {
4671 pr_err("cache operation failed %d\n", ret);
4672 goto exit_disable_clk_vote;
4673 }
4674
4675 /* SCM_CALL to load the image */
4676 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4677 &resp, sizeof(resp));
4678 if (ret) {
4679 pr_err("scm_call to load failed : ret %d\n", ret);
4680 ret = -EIO;
4681 goto exit_disable_clk_vote;
4682 }
4683
4684 switch (resp.result) {
4685 case QSEOS_RESULT_SUCCESS:
4686 break;
4687 case QSEOS_RESULT_FAILURE:
4688 pr_err("scm call failed w/response result%d\n", resp.result);
4689 ret = -EINVAL;
4690 goto exit_disable_clk_vote;
4691 case QSEOS_RESULT_INCOMPLETE:
4692 ret = __qseecom_process_incomplete_cmd(data, &resp);
4693 if (ret) {
4694 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4695 goto exit_disable_clk_vote;
4696 }
4697 break;
4698 default:
4699 pr_err("scm call return unknown response %d\n", resp.result);
4700 ret = -EINVAL;
4701 goto exit_disable_clk_vote;
4702 }
4703
4704exit_disable_clk_vote:
4705 __qseecom_disable_clk_scale_down(data);
4706
4707exit_unregister_bus_bw_need:
4708 if (qseecom.support_bus_scaling) {
4709 mutex_lock(&qsee_bw_mutex);
4710 qseecom_unregister_bus_bandwidth_needs(data);
4711 mutex_unlock(&qsee_bw_mutex);
4712 }
4713
4714exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004715 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004716 return ret;
4717}
4718
4719static int qseecom_unload_commonlib_image(void)
4720{
4721 int ret = -EINVAL;
4722 struct qseecom_unload_lib_image_ireq unload_req = {0};
4723 struct qseecom_command_scm_resp resp;
4724
4725 /* Populate the remaining parameters */
4726 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4727
4728 /* SCM_CALL to load the image */
4729 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4730 sizeof(struct qseecom_unload_lib_image_ireq),
4731 &resp, sizeof(resp));
4732 if (ret) {
4733 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4734 ret = -EIO;
4735 } else {
4736 switch (resp.result) {
4737 case QSEOS_RESULT_SUCCESS:
4738 break;
4739 case QSEOS_RESULT_FAILURE:
4740 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4741 break;
4742 default:
4743 pr_err("scm call return unknown response %d\n",
4744 resp.result);
4745 ret = -EINVAL;
4746 break;
4747 }
4748 }
4749
4750 return ret;
4751}
4752
4753int qseecom_start_app(struct qseecom_handle **handle,
4754 char *app_name, uint32_t size)
4755{
4756 int32_t ret = 0;
4757 unsigned long flags = 0;
4758 struct qseecom_dev_handle *data = NULL;
4759 struct qseecom_check_app_ireq app_ireq;
4760 struct qseecom_registered_app_list *entry = NULL;
4761 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4762 bool found_app = false;
4763 size_t len;
4764 ion_phys_addr_t pa;
4765 uint32_t fw_size, app_arch;
4766 uint32_t app_id = 0;
4767
Zhen Kongc4c162a2019-01-23 12:07:12 -08004768 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004769 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004770
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004771 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4772 pr_err("Not allowed to be called in %d state\n",
4773 atomic_read(&qseecom.qseecom_state));
4774 return -EPERM;
4775 }
4776 if (!app_name) {
4777 pr_err("failed to get the app name\n");
4778 return -EINVAL;
4779 }
4780
Zhen Kong64a6d7282017-06-16 11:55:07 -07004781 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004782 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004783 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004784 return -EINVAL;
4785 }
4786
4787 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4788 if (!(*handle))
4789 return -ENOMEM;
4790
4791 data = kzalloc(sizeof(*data), GFP_KERNEL);
4792 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304793 ret = -ENOMEM;
4794 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004795 }
4796 data->abort = 0;
4797 data->type = QSEECOM_CLIENT_APP;
4798 data->released = false;
4799 data->client.sb_length = size;
4800 data->client.user_virt_sb_base = 0;
4801 data->client.ihandle = NULL;
4802
4803 init_waitqueue_head(&data->abort_wq);
4804
4805 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4806 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4807 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4808 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304809 ret = -ENOMEM;
4810 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004811 }
4812 mutex_lock(&app_access_lock);
4813
Zhen Kong5d02be92018-05-29 16:17:29 -07004814recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004815 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4816 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4817 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4818 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304819 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004820
4821 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4822 if (app_id) {
4823 pr_warn("App id %d for [%s] app exists\n", app_id,
4824 (char *)app_ireq.app_name);
4825 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4826 list_for_each_entry(entry,
4827 &qseecom.registered_app_list_head, list){
4828 if (entry->app_id == app_id) {
4829 entry->ref_cnt++;
4830 found_app = true;
4831 break;
4832 }
4833 }
4834 spin_unlock_irqrestore(
4835 &qseecom.registered_app_list_lock, flags);
4836 if (!found_app)
4837 pr_warn("App_id %d [%s] was loaded but not registered\n",
4838 ret, (char *)app_ireq.app_name);
4839 } else {
4840 /* load the app and get the app_id */
4841 pr_debug("%s: Loading app for the first time'\n",
4842 qseecom.pdev->init_name);
4843 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004844 if (ret == -EEXIST) {
4845 pr_err("recheck if TA %s is loaded\n", app_name);
4846 goto recheck;
4847 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304848 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004849 }
4850 data->client.app_id = app_id;
4851 if (!found_app) {
4852 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4853 if (!entry) {
4854 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304855 ret = -ENOMEM;
4856 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004857 }
4858 entry->app_id = app_id;
4859 entry->ref_cnt = 1;
4860 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4861 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4862 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304863 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004864 }
4865 entry->app_arch = app_arch;
4866 entry->app_blocked = false;
4867 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004868 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004869 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4870 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4871 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4872 flags);
4873 }
4874
4875 /* Get the physical address of the ION BUF */
4876 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4877 if (ret) {
4878 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4879 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304880 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004881 }
4882
4883 /* Populate the structure for sending scm call to load image */
4884 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4885 data->client.ihandle);
4886 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4887 pr_err("ION memory mapping for client shared buf failed\n");
4888 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304889 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004890 }
4891 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4892 data->client.sb_phys = (phys_addr_t)pa;
4893 (*handle)->dev = (void *)data;
4894 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4895 (*handle)->sbuf_len = data->client.sb_length;
4896
4897 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4898 if (!kclient_entry) {
4899 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304900 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004901 }
4902 kclient_entry->handle = *handle;
4903
4904 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4905 list_add_tail(&kclient_entry->list,
4906 &qseecom.registered_kclient_list_head);
4907 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4908
4909 mutex_unlock(&app_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004910 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004911 return 0;
4912
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304913exit_ion_unmap_kernel:
4914 if (!IS_ERR_OR_NULL(data->client.ihandle))
4915 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4916exit_entry_free:
4917 kfree(entry);
4918exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004919 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304920 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4921 ion_free(qseecom.ion_clnt, data->client.ihandle);
4922 data->client.ihandle = NULL;
4923 }
4924exit_data_free:
4925 kfree(data);
4926exit_handle_free:
4927 if (*handle) {
4928 kfree(*handle);
4929 *handle = NULL;
4930 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07004931 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004932 return ret;
4933}
4934EXPORT_SYMBOL(qseecom_start_app);
4935
4936int qseecom_shutdown_app(struct qseecom_handle **handle)
4937{
4938 int ret = -EINVAL;
4939 struct qseecom_dev_handle *data;
4940
4941 struct qseecom_registered_kclient_list *kclient = NULL;
4942 unsigned long flags = 0;
4943 bool found_handle = false;
4944
Zhen Kongc4c162a2019-01-23 12:07:12 -08004945 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004946 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004947
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004948 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4949 pr_err("Not allowed to be called in %d state\n",
4950 atomic_read(&qseecom.qseecom_state));
4951 return -EPERM;
4952 }
4953
4954 if ((handle == NULL) || (*handle == NULL)) {
4955 pr_err("Handle is not initialized\n");
4956 return -EINVAL;
4957 }
4958 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4959 mutex_lock(&app_access_lock);
4960
4961 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4962 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4963 list) {
4964 if (kclient->handle == (*handle)) {
4965 list_del(&kclient->list);
4966 found_handle = true;
4967 break;
4968 }
4969 }
4970 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4971 if (!found_handle)
4972 pr_err("Unable to find the handle, exiting\n");
4973 else
4974 ret = qseecom_unload_app(data, false);
4975
4976 mutex_unlock(&app_access_lock);
4977 if (ret == 0) {
4978 kzfree(data);
4979 kzfree(*handle);
4980 kzfree(kclient);
4981 *handle = NULL;
4982 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07004983 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004984 return ret;
4985}
4986EXPORT_SYMBOL(qseecom_shutdown_app);
4987
4988int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4989 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4990{
4991 int ret = 0;
4992 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4993 struct qseecom_dev_handle *data;
4994 bool perf_enabled = false;
4995
Zhen Kongc4c162a2019-01-23 12:07:12 -08004996 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004997 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004998
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004999 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
5000 pr_err("Not allowed to be called in %d state\n",
5001 atomic_read(&qseecom.qseecom_state));
5002 return -EPERM;
5003 }
5004
5005 if (handle == NULL) {
5006 pr_err("Handle is not initialized\n");
5007 return -EINVAL;
5008 }
5009 data = handle->dev;
5010
5011 req.cmd_req_len = sbuf_len;
5012 req.resp_len = rbuf_len;
5013 req.cmd_req_buf = send_buf;
5014 req.resp_buf = resp_buf;
5015
5016 if (__validate_send_cmd_inputs(data, &req))
5017 return -EINVAL;
5018
5019 mutex_lock(&app_access_lock);
5020 if (qseecom.support_bus_scaling) {
5021 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
5022 if (ret) {
5023 pr_err("Failed to set bw.\n");
5024 mutex_unlock(&app_access_lock);
5025 return ret;
5026 }
5027 }
5028 /*
5029 * On targets where crypto clock is handled by HLOS,
5030 * if clk_access_cnt is zero and perf_enabled is false,
5031 * then the crypto clock was not enabled before sending cmd
5032 * to tz, qseecom will enable the clock to avoid service failure.
5033 */
5034 if (!qseecom.no_clock_support &&
5035 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
5036 pr_debug("ce clock is not enabled!\n");
5037 ret = qseecom_perf_enable(data);
5038 if (ret) {
5039 pr_err("Failed to vote for clock with err %d\n",
5040 ret);
5041 mutex_unlock(&app_access_lock);
5042 return -EINVAL;
5043 }
5044 perf_enabled = true;
5045 }
5046 if (!strcmp(data->client.app_name, "securemm"))
5047 data->use_legacy_cmd = true;
5048
5049 ret = __qseecom_send_cmd(data, &req);
5050 data->use_legacy_cmd = false;
5051 if (qseecom.support_bus_scaling)
5052 __qseecom_add_bw_scale_down_timer(
5053 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
5054
5055 if (perf_enabled) {
5056 qsee_disable_clock_vote(data, CLK_DFAB);
5057 qsee_disable_clock_vote(data, CLK_SFPB);
5058 }
5059
5060 mutex_unlock(&app_access_lock);
5061
5062 if (ret)
5063 return ret;
5064
5065 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
5066 req.resp_len, req.resp_buf);
5067 return ret;
5068}
5069EXPORT_SYMBOL(qseecom_send_command);
5070
5071int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
5072{
5073 int ret = 0;
5074
5075 if ((handle == NULL) || (handle->dev == NULL)) {
5076 pr_err("No valid kernel client\n");
5077 return -EINVAL;
5078 }
5079 if (high) {
5080 if (qseecom.support_bus_scaling) {
5081 mutex_lock(&qsee_bw_mutex);
5082 __qseecom_register_bus_bandwidth_needs(handle->dev,
5083 HIGH);
5084 mutex_unlock(&qsee_bw_mutex);
5085 } else {
5086 ret = qseecom_perf_enable(handle->dev);
5087 if (ret)
5088 pr_err("Failed to vote for clock with err %d\n",
5089 ret);
5090 }
5091 } else {
5092 if (!qseecom.support_bus_scaling) {
5093 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
5094 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
5095 } else {
5096 mutex_lock(&qsee_bw_mutex);
5097 qseecom_unregister_bus_bandwidth_needs(handle->dev);
5098 mutex_unlock(&qsee_bw_mutex);
5099 }
5100 }
5101 return ret;
5102}
5103EXPORT_SYMBOL(qseecom_set_bandwidth);
5104
5105int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
5106{
5107 struct qseecom_registered_app_list dummy_app_entry = { {0} };
5108 struct qseecom_dev_handle dummy_private_data = {0};
5109 struct qseecom_command_scm_resp resp;
5110 int ret = 0;
5111
5112 if (!desc) {
5113 pr_err("desc is NULL\n");
5114 return -EINVAL;
5115 }
5116
5117 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07005118 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005119 resp.data = desc->ret[2]; /*listener_id*/
5120
Zhen Konge7f525f2017-12-01 18:26:25 -08005121 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07005122 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08005123 dummy_app_entry.app_id = desc->ret[1];
5124
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005125 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005126 if (qseecom.qsee_reentrancy_support)
5127 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005128 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005129 else
5130 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
5131 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005132 mutex_unlock(&app_access_lock);
5133 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07005134 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005135 (int)desc->ret[0], (int)desc->ret[2],
5136 (int)desc->ret[1], ret);
5137 desc->ret[0] = resp.result;
5138 desc->ret[1] = resp.resp_type;
5139 desc->ret[2] = resp.data;
5140 return ret;
5141}
5142EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
5143
5144static int qseecom_send_resp(void)
5145{
5146 qseecom.send_resp_flag = 1;
5147 wake_up_interruptible(&qseecom.send_resp_wq);
5148 return 0;
5149}
5150
5151static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
5152{
5153 struct qseecom_registered_listener_list *this_lstnr = NULL;
5154
5155 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
5156 this_lstnr = __qseecom_find_svc(data->listener.id);
5157 if (this_lstnr == NULL)
5158 return -EINVAL;
5159 qseecom.send_resp_flag = 1;
5160 this_lstnr->send_resp_flag = 1;
5161 wake_up_interruptible(&qseecom.send_resp_wq);
5162 return 0;
5163}
5164
5165static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5166 struct qseecom_send_modfd_listener_resp *resp,
5167 struct qseecom_registered_listener_list *this_lstnr)
5168{
5169 int i;
5170
5171 if (!data || !resp || !this_lstnr) {
5172 pr_err("listener handle or resp msg is null\n");
5173 return -EINVAL;
5174 }
5175
5176 if (resp->resp_buf_ptr == NULL) {
5177 pr_err("resp buffer is null\n");
5178 return -EINVAL;
5179 }
5180 /* validate resp buf length */
5181 if ((resp->resp_len == 0) ||
5182 (resp->resp_len > this_lstnr->sb_length)) {
5183 pr_err("resp buf length %d not valid\n", resp->resp_len);
5184 return -EINVAL;
5185 }
5186
5187 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5188 pr_err("Integer overflow in resp_len & resp_buf\n");
5189 return -EINVAL;
5190 }
5191 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5192 (ULONG_MAX - this_lstnr->sb_length)) {
5193 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5194 return -EINVAL;
5195 }
5196 /* validate resp buf */
5197 if (((uintptr_t)resp->resp_buf_ptr <
5198 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5199 ((uintptr_t)resp->resp_buf_ptr >=
5200 ((uintptr_t)this_lstnr->user_virt_sb_base +
5201 this_lstnr->sb_length)) ||
5202 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5203 ((uintptr_t)this_lstnr->user_virt_sb_base +
5204 this_lstnr->sb_length))) {
5205 pr_err("resp buf is out of shared buffer region\n");
5206 return -EINVAL;
5207 }
5208
5209 /* validate offsets */
5210 for (i = 0; i < MAX_ION_FD; i++) {
5211 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5212 pr_err("Invalid offset %d = 0x%x\n",
5213 i, resp->ifd_data[i].cmd_buf_offset);
5214 return -EINVAL;
5215 }
5216 }
5217
5218 return 0;
5219}
5220
5221static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5222 void __user *argp, bool is_64bit_addr)
5223{
5224 struct qseecom_send_modfd_listener_resp resp;
5225 struct qseecom_registered_listener_list *this_lstnr = NULL;
5226
5227 if (copy_from_user(&resp, argp, sizeof(resp))) {
5228 pr_err("copy_from_user failed");
5229 return -EINVAL;
5230 }
5231
5232 this_lstnr = __qseecom_find_svc(data->listener.id);
5233 if (this_lstnr == NULL)
5234 return -EINVAL;
5235
5236 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5237 return -EINVAL;
5238
5239 resp.resp_buf_ptr = this_lstnr->sb_virt +
5240 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5241
5242 if (!is_64bit_addr)
5243 __qseecom_update_cmd_buf(&resp, false, data);
5244 else
5245 __qseecom_update_cmd_buf_64(&resp, false, data);
5246 qseecom.send_resp_flag = 1;
5247 this_lstnr->send_resp_flag = 1;
5248 wake_up_interruptible(&qseecom.send_resp_wq);
5249 return 0;
5250}
5251
5252static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5253 void __user *argp)
5254{
5255 return __qseecom_send_modfd_resp(data, argp, false);
5256}
5257
5258static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5259 void __user *argp)
5260{
5261 return __qseecom_send_modfd_resp(data, argp, true);
5262}
5263
5264static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5265 void __user *argp)
5266{
5267 struct qseecom_qseos_version_req req;
5268
5269 if (copy_from_user(&req, argp, sizeof(req))) {
5270 pr_err("copy_from_user failed");
5271 return -EINVAL;
5272 }
5273 req.qseos_version = qseecom.qseos_version;
5274 if (copy_to_user(argp, &req, sizeof(req))) {
5275 pr_err("copy_to_user failed");
5276 return -EINVAL;
5277 }
5278 return 0;
5279}
5280
5281static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5282{
5283 int rc = 0;
5284 struct qseecom_clk *qclk = NULL;
5285
5286 if (qseecom.no_clock_support)
5287 return 0;
5288
5289 if (ce == CLK_QSEE)
5290 qclk = &qseecom.qsee;
5291 if (ce == CLK_CE_DRV)
5292 qclk = &qseecom.ce_drv;
5293
5294 if (qclk == NULL) {
5295 pr_err("CLK type not supported\n");
5296 return -EINVAL;
5297 }
5298 mutex_lock(&clk_access_lock);
5299
5300 if (qclk->clk_access_cnt == ULONG_MAX) {
5301 pr_err("clk_access_cnt beyond limitation\n");
5302 goto err;
5303 }
5304 if (qclk->clk_access_cnt > 0) {
5305 qclk->clk_access_cnt++;
5306 mutex_unlock(&clk_access_lock);
5307 return rc;
5308 }
5309
5310 /* Enable CE core clk */
5311 if (qclk->ce_core_clk != NULL) {
5312 rc = clk_prepare_enable(qclk->ce_core_clk);
5313 if (rc) {
5314 pr_err("Unable to enable/prepare CE core clk\n");
5315 goto err;
5316 }
5317 }
5318 /* Enable CE clk */
5319 if (qclk->ce_clk != NULL) {
5320 rc = clk_prepare_enable(qclk->ce_clk);
5321 if (rc) {
5322 pr_err("Unable to enable/prepare CE iface clk\n");
5323 goto ce_clk_err;
5324 }
5325 }
5326 /* Enable AXI clk */
5327 if (qclk->ce_bus_clk != NULL) {
5328 rc = clk_prepare_enable(qclk->ce_bus_clk);
5329 if (rc) {
5330 pr_err("Unable to enable/prepare CE bus clk\n");
5331 goto ce_bus_clk_err;
5332 }
5333 }
5334 qclk->clk_access_cnt++;
5335 mutex_unlock(&clk_access_lock);
5336 return 0;
5337
5338ce_bus_clk_err:
5339 if (qclk->ce_clk != NULL)
5340 clk_disable_unprepare(qclk->ce_clk);
5341ce_clk_err:
5342 if (qclk->ce_core_clk != NULL)
5343 clk_disable_unprepare(qclk->ce_core_clk);
5344err:
5345 mutex_unlock(&clk_access_lock);
5346 return -EIO;
5347}
5348
5349static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5350{
5351 struct qseecom_clk *qclk;
5352
5353 if (qseecom.no_clock_support)
5354 return;
5355
5356 if (ce == CLK_QSEE)
5357 qclk = &qseecom.qsee;
5358 else
5359 qclk = &qseecom.ce_drv;
5360
5361 mutex_lock(&clk_access_lock);
5362
5363 if (qclk->clk_access_cnt == 0) {
5364 mutex_unlock(&clk_access_lock);
5365 return;
5366 }
5367
5368 if (qclk->clk_access_cnt == 1) {
5369 if (qclk->ce_clk != NULL)
5370 clk_disable_unprepare(qclk->ce_clk);
5371 if (qclk->ce_core_clk != NULL)
5372 clk_disable_unprepare(qclk->ce_core_clk);
5373 if (qclk->ce_bus_clk != NULL)
5374 clk_disable_unprepare(qclk->ce_bus_clk);
5375 }
5376 qclk->clk_access_cnt--;
5377 mutex_unlock(&clk_access_lock);
5378}
5379
5380static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5381 int32_t clk_type)
5382{
5383 int ret = 0;
5384 struct qseecom_clk *qclk;
5385
5386 if (qseecom.no_clock_support)
5387 return 0;
5388
5389 qclk = &qseecom.qsee;
5390 if (!qseecom.qsee_perf_client)
5391 return ret;
5392
5393 switch (clk_type) {
5394 case CLK_DFAB:
5395 mutex_lock(&qsee_bw_mutex);
5396 if (!qseecom.qsee_bw_count) {
5397 if (qseecom.qsee_sfpb_bw_count > 0)
5398 ret = msm_bus_scale_client_update_request(
5399 qseecom.qsee_perf_client, 3);
5400 else {
5401 if (qclk->ce_core_src_clk != NULL)
5402 ret = __qseecom_enable_clk(CLK_QSEE);
5403 if (!ret) {
5404 ret =
5405 msm_bus_scale_client_update_request(
5406 qseecom.qsee_perf_client, 1);
5407 if ((ret) &&
5408 (qclk->ce_core_src_clk != NULL))
5409 __qseecom_disable_clk(CLK_QSEE);
5410 }
5411 }
5412 if (ret)
5413 pr_err("DFAB Bandwidth req failed (%d)\n",
5414 ret);
5415 else {
5416 qseecom.qsee_bw_count++;
5417 data->perf_enabled = true;
5418 }
5419 } else {
5420 qseecom.qsee_bw_count++;
5421 data->perf_enabled = true;
5422 }
5423 mutex_unlock(&qsee_bw_mutex);
5424 break;
5425 case CLK_SFPB:
5426 mutex_lock(&qsee_bw_mutex);
5427 if (!qseecom.qsee_sfpb_bw_count) {
5428 if (qseecom.qsee_bw_count > 0)
5429 ret = msm_bus_scale_client_update_request(
5430 qseecom.qsee_perf_client, 3);
5431 else {
5432 if (qclk->ce_core_src_clk != NULL)
5433 ret = __qseecom_enable_clk(CLK_QSEE);
5434 if (!ret) {
5435 ret =
5436 msm_bus_scale_client_update_request(
5437 qseecom.qsee_perf_client, 2);
5438 if ((ret) &&
5439 (qclk->ce_core_src_clk != NULL))
5440 __qseecom_disable_clk(CLK_QSEE);
5441 }
5442 }
5443
5444 if (ret)
5445 pr_err("SFPB Bandwidth req failed (%d)\n",
5446 ret);
5447 else {
5448 qseecom.qsee_sfpb_bw_count++;
5449 data->fast_load_enabled = true;
5450 }
5451 } else {
5452 qseecom.qsee_sfpb_bw_count++;
5453 data->fast_load_enabled = true;
5454 }
5455 mutex_unlock(&qsee_bw_mutex);
5456 break;
5457 default:
5458 pr_err("Clock type not defined\n");
5459 break;
5460 }
5461 return ret;
5462}
5463
5464static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5465 int32_t clk_type)
5466{
5467 int32_t ret = 0;
5468 struct qseecom_clk *qclk;
5469
5470 qclk = &qseecom.qsee;
5471
5472 if (qseecom.no_clock_support)
5473 return;
5474 if (!qseecom.qsee_perf_client)
5475 return;
5476
5477 switch (clk_type) {
5478 case CLK_DFAB:
5479 mutex_lock(&qsee_bw_mutex);
5480 if (qseecom.qsee_bw_count == 0) {
5481 pr_err("Client error.Extra call to disable DFAB clk\n");
5482 mutex_unlock(&qsee_bw_mutex);
5483 return;
5484 }
5485
5486 if (qseecom.qsee_bw_count == 1) {
5487 if (qseecom.qsee_sfpb_bw_count > 0)
5488 ret = msm_bus_scale_client_update_request(
5489 qseecom.qsee_perf_client, 2);
5490 else {
5491 ret = msm_bus_scale_client_update_request(
5492 qseecom.qsee_perf_client, 0);
5493 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5494 __qseecom_disable_clk(CLK_QSEE);
5495 }
5496 if (ret)
5497 pr_err("SFPB Bandwidth req fail (%d)\n",
5498 ret);
5499 else {
5500 qseecom.qsee_bw_count--;
5501 data->perf_enabled = false;
5502 }
5503 } else {
5504 qseecom.qsee_bw_count--;
5505 data->perf_enabled = false;
5506 }
5507 mutex_unlock(&qsee_bw_mutex);
5508 break;
5509 case CLK_SFPB:
5510 mutex_lock(&qsee_bw_mutex);
5511 if (qseecom.qsee_sfpb_bw_count == 0) {
5512 pr_err("Client error.Extra call to disable SFPB clk\n");
5513 mutex_unlock(&qsee_bw_mutex);
5514 return;
5515 }
5516 if (qseecom.qsee_sfpb_bw_count == 1) {
5517 if (qseecom.qsee_bw_count > 0)
5518 ret = msm_bus_scale_client_update_request(
5519 qseecom.qsee_perf_client, 1);
5520 else {
5521 ret = msm_bus_scale_client_update_request(
5522 qseecom.qsee_perf_client, 0);
5523 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5524 __qseecom_disable_clk(CLK_QSEE);
5525 }
5526 if (ret)
5527 pr_err("SFPB Bandwidth req fail (%d)\n",
5528 ret);
5529 else {
5530 qseecom.qsee_sfpb_bw_count--;
5531 data->fast_load_enabled = false;
5532 }
5533 } else {
5534 qseecom.qsee_sfpb_bw_count--;
5535 data->fast_load_enabled = false;
5536 }
5537 mutex_unlock(&qsee_bw_mutex);
5538 break;
5539 default:
5540 pr_err("Clock type not defined\n");
5541 break;
5542 }
5543
5544}
5545
5546static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5547 void __user *argp)
5548{
5549 struct ion_handle *ihandle; /* Ion handle */
5550 struct qseecom_load_img_req load_img_req;
5551 int uret = 0;
5552 int ret;
5553 ion_phys_addr_t pa = 0;
5554 size_t len;
5555 struct qseecom_load_app_ireq load_req;
5556 struct qseecom_load_app_64bit_ireq load_req_64bit;
5557 struct qseecom_command_scm_resp resp;
5558 void *cmd_buf = NULL;
5559 size_t cmd_len;
5560 /* Copy the relevant information needed for loading the image */
5561 if (copy_from_user(&load_img_req,
5562 (void __user *)argp,
5563 sizeof(struct qseecom_load_img_req))) {
5564 pr_err("copy_from_user failed\n");
5565 return -EFAULT;
5566 }
5567
5568 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005569 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005570 load_img_req.ifd_data_fd);
5571 if (IS_ERR_OR_NULL(ihandle)) {
5572 pr_err("Ion client could not retrieve the handle\n");
5573 return -ENOMEM;
5574 }
5575
5576 /* Get the physical address of the ION BUF */
5577 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5578 if (ret) {
5579 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5580 ret);
5581 return ret;
5582 }
5583 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5584 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5585 len, load_img_req.mdt_len,
5586 load_img_req.img_len);
5587 return ret;
5588 }
5589 /* Populate the structure for sending scm call to load image */
5590 if (qseecom.qsee_version < QSEE_VERSION_40) {
5591 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5592 load_req.mdt_len = load_img_req.mdt_len;
5593 load_req.img_len = load_img_req.img_len;
5594 load_req.phy_addr = (uint32_t)pa;
5595 cmd_buf = (void *)&load_req;
5596 cmd_len = sizeof(struct qseecom_load_app_ireq);
5597 } else {
5598 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5599 load_req_64bit.mdt_len = load_img_req.mdt_len;
5600 load_req_64bit.img_len = load_img_req.img_len;
5601 load_req_64bit.phy_addr = (uint64_t)pa;
5602 cmd_buf = (void *)&load_req_64bit;
5603 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5604 }
5605
5606 if (qseecom.support_bus_scaling) {
5607 mutex_lock(&qsee_bw_mutex);
5608 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5609 mutex_unlock(&qsee_bw_mutex);
5610 if (ret) {
5611 ret = -EIO;
5612 goto exit_cpu_restore;
5613 }
5614 }
5615
5616 /* Vote for the SFPB clock */
5617 ret = __qseecom_enable_clk_scale_up(data);
5618 if (ret) {
5619 ret = -EIO;
5620 goto exit_register_bus_bandwidth_needs;
5621 }
5622 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5623 ION_IOC_CLEAN_INV_CACHES);
5624 if (ret) {
5625 pr_err("cache operation failed %d\n", ret);
5626 goto exit_disable_clock;
5627 }
5628 /* SCM_CALL to load the external elf */
5629 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5630 &resp, sizeof(resp));
5631 if (ret) {
5632 pr_err("scm_call to load failed : ret %d\n",
5633 ret);
5634 ret = -EFAULT;
5635 goto exit_disable_clock;
5636 }
5637
5638 switch (resp.result) {
5639 case QSEOS_RESULT_SUCCESS:
5640 break;
5641 case QSEOS_RESULT_INCOMPLETE:
5642 pr_err("%s: qseos result incomplete\n", __func__);
5643 ret = __qseecom_process_incomplete_cmd(data, &resp);
5644 if (ret)
5645 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5646 break;
5647 case QSEOS_RESULT_FAILURE:
5648 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5649 ret = -EFAULT;
5650 break;
5651 default:
5652 pr_err("scm_call response result %d not supported\n",
5653 resp.result);
5654 ret = -EFAULT;
5655 break;
5656 }
5657
5658exit_disable_clock:
5659 __qseecom_disable_clk_scale_down(data);
5660
5661exit_register_bus_bandwidth_needs:
5662 if (qseecom.support_bus_scaling) {
5663 mutex_lock(&qsee_bw_mutex);
5664 uret = qseecom_unregister_bus_bandwidth_needs(data);
5665 mutex_unlock(&qsee_bw_mutex);
5666 if (uret)
5667 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5668 uret, ret);
5669 }
5670
5671exit_cpu_restore:
5672 /* Deallocate the handle */
5673 if (!IS_ERR_OR_NULL(ihandle))
5674 ion_free(qseecom.ion_clnt, ihandle);
5675 return ret;
5676}
5677
5678static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5679{
5680 int ret = 0;
5681 struct qseecom_command_scm_resp resp;
5682 struct qseecom_unload_app_ireq req;
5683
5684 /* unavailable client app */
5685 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5686
5687 /* Populate the structure for sending scm call to unload image */
5688 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5689
5690 /* SCM_CALL to unload the external elf */
5691 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5692 sizeof(struct qseecom_unload_app_ireq),
5693 &resp, sizeof(resp));
5694 if (ret) {
5695 pr_err("scm_call to unload failed : ret %d\n",
5696 ret);
5697 ret = -EFAULT;
5698 goto qseecom_unload_external_elf_scm_err;
5699 }
5700 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5701 ret = __qseecom_process_incomplete_cmd(data, &resp);
5702 if (ret)
5703 pr_err("process_incomplete_cmd fail err: %d\n",
5704 ret);
5705 } else {
5706 if (resp.result != QSEOS_RESULT_SUCCESS) {
5707 pr_err("scm_call to unload image failed resp.result =%d\n",
5708 resp.result);
5709 ret = -EFAULT;
5710 }
5711 }
5712
5713qseecom_unload_external_elf_scm_err:
5714
5715 return ret;
5716}
5717
5718static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5719 void __user *argp)
5720{
5721
5722 int32_t ret;
5723 struct qseecom_qseos_app_load_query query_req;
5724 struct qseecom_check_app_ireq req;
5725 struct qseecom_registered_app_list *entry = NULL;
5726 unsigned long flags = 0;
5727 uint32_t app_arch = 0, app_id = 0;
5728 bool found_app = false;
5729
5730 /* Copy the relevant information needed for loading the image */
5731 if (copy_from_user(&query_req,
5732 (void __user *)argp,
5733 sizeof(struct qseecom_qseos_app_load_query))) {
5734 pr_err("copy_from_user failed\n");
5735 return -EFAULT;
5736 }
5737
5738 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5739 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5740 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5741
5742 ret = __qseecom_check_app_exists(req, &app_id);
5743 if (ret) {
5744 pr_err(" scm call to check if app is loaded failed");
5745 return ret; /* scm call failed */
5746 }
5747 if (app_id) {
5748 pr_debug("App id %d (%s) already exists\n", app_id,
5749 (char *)(req.app_name));
5750 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5751 list_for_each_entry(entry,
5752 &qseecom.registered_app_list_head, list){
5753 if (entry->app_id == app_id) {
5754 app_arch = entry->app_arch;
5755 entry->ref_cnt++;
5756 found_app = true;
5757 break;
5758 }
5759 }
5760 spin_unlock_irqrestore(
5761 &qseecom.registered_app_list_lock, flags);
5762 data->client.app_id = app_id;
5763 query_req.app_id = app_id;
5764 if (app_arch) {
5765 data->client.app_arch = app_arch;
5766 query_req.app_arch = app_arch;
5767 } else {
5768 data->client.app_arch = 0;
5769 query_req.app_arch = 0;
5770 }
5771 strlcpy(data->client.app_name, query_req.app_name,
5772 MAX_APP_NAME_SIZE);
5773 /*
5774 * If app was loaded by appsbl before and was not registered,
5775 * regiser this app now.
5776 */
5777 if (!found_app) {
5778 pr_debug("Register app %d [%s] which was loaded before\n",
5779 ret, (char *)query_req.app_name);
5780 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5781 if (!entry) {
5782 pr_err("kmalloc for app entry failed\n");
5783 return -ENOMEM;
5784 }
5785 entry->app_id = app_id;
5786 entry->ref_cnt = 1;
5787 entry->app_arch = data->client.app_arch;
5788 strlcpy(entry->app_name, data->client.app_name,
5789 MAX_APP_NAME_SIZE);
5790 entry->app_blocked = false;
5791 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005792 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005793 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5794 flags);
5795 list_add_tail(&entry->list,
5796 &qseecom.registered_app_list_head);
5797 spin_unlock_irqrestore(
5798 &qseecom.registered_app_list_lock, flags);
5799 }
5800 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5801 pr_err("copy_to_user failed\n");
5802 return -EFAULT;
5803 }
5804 return -EEXIST; /* app already loaded */
5805 } else {
5806 return 0; /* app not loaded */
5807 }
5808}
5809
5810static int __qseecom_get_ce_pipe_info(
5811 enum qseecom_key_management_usage_type usage,
5812 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5813{
5814 int ret = -EINVAL;
5815 int i, j;
5816 struct qseecom_ce_info_use *p = NULL;
5817 int total = 0;
5818 struct qseecom_ce_pipe_entry *pcepipe;
5819
5820 switch (usage) {
5821 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5822 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5823 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5824 if (qseecom.support_fde) {
5825 p = qseecom.ce_info.fde;
5826 total = qseecom.ce_info.num_fde;
5827 } else {
5828 pr_err("system does not support fde\n");
5829 return -EINVAL;
5830 }
5831 break;
5832 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5833 if (qseecom.support_pfe) {
5834 p = qseecom.ce_info.pfe;
5835 total = qseecom.ce_info.num_pfe;
5836 } else {
5837 pr_err("system does not support pfe\n");
5838 return -EINVAL;
5839 }
5840 break;
5841 default:
5842 pr_err("unsupported usage %d\n", usage);
5843 return -EINVAL;
5844 }
5845
5846 for (j = 0; j < total; j++) {
5847 if (p->unit_num == unit) {
5848 pcepipe = p->ce_pipe_entry;
5849 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5850 (*ce_hw)[i] = pcepipe->ce_num;
5851 *pipe = pcepipe->ce_pipe_pair;
5852 pcepipe++;
5853 }
5854 ret = 0;
5855 break;
5856 }
5857 p++;
5858 }
5859 return ret;
5860}
5861
5862static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5863 enum qseecom_key_management_usage_type usage,
5864 struct qseecom_key_generate_ireq *ireq)
5865{
5866 struct qseecom_command_scm_resp resp;
5867 int ret;
5868
5869 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5870 usage >= QSEOS_KM_USAGE_MAX) {
5871 pr_err("Error:: unsupported usage %d\n", usage);
5872 return -EFAULT;
5873 }
5874 ret = __qseecom_enable_clk(CLK_QSEE);
5875 if (ret)
5876 return ret;
5877
5878 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5879 ireq, sizeof(struct qseecom_key_generate_ireq),
5880 &resp, sizeof(resp));
5881 if (ret) {
5882 if (ret == -EINVAL &&
5883 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5884 pr_debug("Key ID exists.\n");
5885 ret = 0;
5886 } else {
5887 pr_err("scm call to generate key failed : %d\n", ret);
5888 ret = -EFAULT;
5889 }
5890 goto generate_key_exit;
5891 }
5892
5893 switch (resp.result) {
5894 case QSEOS_RESULT_SUCCESS:
5895 break;
5896 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5897 pr_debug("Key ID exists.\n");
5898 break;
5899 case QSEOS_RESULT_INCOMPLETE:
5900 ret = __qseecom_process_incomplete_cmd(data, &resp);
5901 if (ret) {
5902 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5903 pr_debug("Key ID exists.\n");
5904 ret = 0;
5905 } else {
5906 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5907 resp.result);
5908 }
5909 }
5910 break;
5911 case QSEOS_RESULT_FAILURE:
5912 default:
5913 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5914 ret = -EINVAL;
5915 break;
5916 }
5917generate_key_exit:
5918 __qseecom_disable_clk(CLK_QSEE);
5919 return ret;
5920}
5921
5922static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5923 enum qseecom_key_management_usage_type usage,
5924 struct qseecom_key_delete_ireq *ireq)
5925{
5926 struct qseecom_command_scm_resp resp;
5927 int ret;
5928
5929 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5930 usage >= QSEOS_KM_USAGE_MAX) {
5931 pr_err("Error:: unsupported usage %d\n", usage);
5932 return -EFAULT;
5933 }
5934 ret = __qseecom_enable_clk(CLK_QSEE);
5935 if (ret)
5936 return ret;
5937
5938 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5939 ireq, sizeof(struct qseecom_key_delete_ireq),
5940 &resp, sizeof(struct qseecom_command_scm_resp));
5941 if (ret) {
5942 if (ret == -EINVAL &&
5943 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5944 pr_debug("Max attempts to input password reached.\n");
5945 ret = -ERANGE;
5946 } else {
5947 pr_err("scm call to delete key failed : %d\n", ret);
5948 ret = -EFAULT;
5949 }
5950 goto del_key_exit;
5951 }
5952
5953 switch (resp.result) {
5954 case QSEOS_RESULT_SUCCESS:
5955 break;
5956 case QSEOS_RESULT_INCOMPLETE:
5957 ret = __qseecom_process_incomplete_cmd(data, &resp);
5958 if (ret) {
5959 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5960 resp.result);
5961 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5962 pr_debug("Max attempts to input password reached.\n");
5963 ret = -ERANGE;
5964 }
5965 }
5966 break;
5967 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5968 pr_debug("Max attempts to input password reached.\n");
5969 ret = -ERANGE;
5970 break;
5971 case QSEOS_RESULT_FAILURE:
5972 default:
5973 pr_err("Delete key scm call failed resp.result %d\n",
5974 resp.result);
5975 ret = -EINVAL;
5976 break;
5977 }
5978del_key_exit:
5979 __qseecom_disable_clk(CLK_QSEE);
5980 return ret;
5981}
5982
5983static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5984 enum qseecom_key_management_usage_type usage,
5985 struct qseecom_key_select_ireq *ireq)
5986{
5987 struct qseecom_command_scm_resp resp;
5988 int ret;
5989
5990 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5991 usage >= QSEOS_KM_USAGE_MAX) {
5992 pr_err("Error:: unsupported usage %d\n", usage);
5993 return -EFAULT;
5994 }
5995 ret = __qseecom_enable_clk(CLK_QSEE);
5996 if (ret)
5997 return ret;
5998
5999 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
6000 ret = __qseecom_enable_clk(CLK_CE_DRV);
6001 if (ret)
6002 return ret;
6003 }
6004
6005 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6006 ireq, sizeof(struct qseecom_key_select_ireq),
6007 &resp, sizeof(struct qseecom_command_scm_resp));
6008 if (ret) {
6009 if (ret == -EINVAL &&
6010 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6011 pr_debug("Max attempts to input password reached.\n");
6012 ret = -ERANGE;
6013 } else if (ret == -EINVAL &&
6014 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6015 pr_debug("Set Key operation under processing...\n");
6016 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6017 } else {
6018 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
6019 ret);
6020 ret = -EFAULT;
6021 }
6022 goto set_key_exit;
6023 }
6024
6025 switch (resp.result) {
6026 case QSEOS_RESULT_SUCCESS:
6027 break;
6028 case QSEOS_RESULT_INCOMPLETE:
6029 ret = __qseecom_process_incomplete_cmd(data, &resp);
6030 if (ret) {
6031 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6032 resp.result);
6033 if (resp.result ==
6034 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6035 pr_debug("Set Key operation under processing...\n");
6036 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6037 }
6038 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6039 pr_debug("Max attempts to input password reached.\n");
6040 ret = -ERANGE;
6041 }
6042 }
6043 break;
6044 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
6045 pr_debug("Max attempts to input password reached.\n");
6046 ret = -ERANGE;
6047 break;
6048 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6049 pr_debug("Set Key operation under processing...\n");
6050 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6051 break;
6052 case QSEOS_RESULT_FAILURE:
6053 default:
6054 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6055 ret = -EINVAL;
6056 break;
6057 }
6058set_key_exit:
6059 __qseecom_disable_clk(CLK_QSEE);
6060 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
6061 __qseecom_disable_clk(CLK_CE_DRV);
6062 return ret;
6063}
6064
6065static int __qseecom_update_current_key_user_info(
6066 struct qseecom_dev_handle *data,
6067 enum qseecom_key_management_usage_type usage,
6068 struct qseecom_key_userinfo_update_ireq *ireq)
6069{
6070 struct qseecom_command_scm_resp resp;
6071 int ret;
6072
6073 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6074 usage >= QSEOS_KM_USAGE_MAX) {
6075 pr_err("Error:: unsupported usage %d\n", usage);
6076 return -EFAULT;
6077 }
6078 ret = __qseecom_enable_clk(CLK_QSEE);
6079 if (ret)
6080 return ret;
6081
6082 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6083 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
6084 &resp, sizeof(struct qseecom_command_scm_resp));
6085 if (ret) {
6086 if (ret == -EINVAL &&
6087 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6088 pr_debug("Set Key operation under processing...\n");
6089 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6090 } else {
6091 pr_err("scm call to update key userinfo failed: %d\n",
6092 ret);
6093 __qseecom_disable_clk(CLK_QSEE);
6094 return -EFAULT;
6095 }
6096 }
6097
6098 switch (resp.result) {
6099 case QSEOS_RESULT_SUCCESS:
6100 break;
6101 case QSEOS_RESULT_INCOMPLETE:
6102 ret = __qseecom_process_incomplete_cmd(data, &resp);
6103 if (resp.result ==
6104 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6105 pr_debug("Set Key operation under processing...\n");
6106 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6107 }
6108 if (ret)
6109 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6110 resp.result);
6111 break;
6112 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6113 pr_debug("Update Key operation under processing...\n");
6114 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6115 break;
6116 case QSEOS_RESULT_FAILURE:
6117 default:
6118 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6119 ret = -EINVAL;
6120 break;
6121 }
6122
6123 __qseecom_disable_clk(CLK_QSEE);
6124 return ret;
6125}
6126
6127
6128static int qseecom_enable_ice_setup(int usage)
6129{
6130 int ret = 0;
6131
6132 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6133 ret = qcom_ice_setup_ice_hw("ufs", true);
6134 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6135 ret = qcom_ice_setup_ice_hw("sdcc", true);
6136
6137 return ret;
6138}
6139
6140static int qseecom_disable_ice_setup(int usage)
6141{
6142 int ret = 0;
6143
6144 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6145 ret = qcom_ice_setup_ice_hw("ufs", false);
6146 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6147 ret = qcom_ice_setup_ice_hw("sdcc", false);
6148
6149 return ret;
6150}
6151
6152static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
6153{
6154 struct qseecom_ce_info_use *pce_info_use, *p;
6155 int total = 0;
6156 int i;
6157
6158 switch (usage) {
6159 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
6160 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
6161 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6162 p = qseecom.ce_info.fde;
6163 total = qseecom.ce_info.num_fde;
6164 break;
6165 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6166 p = qseecom.ce_info.pfe;
6167 total = qseecom.ce_info.num_pfe;
6168 break;
6169 default:
6170 pr_err("unsupported usage %d\n", usage);
6171 return -EINVAL;
6172 }
6173
6174 pce_info_use = NULL;
6175
6176 for (i = 0; i < total; i++) {
6177 if (p->unit_num == unit) {
6178 pce_info_use = p;
6179 break;
6180 }
6181 p++;
6182 }
6183 if (!pce_info_use) {
6184 pr_err("can not find %d\n", unit);
6185 return -EINVAL;
6186 }
6187 return pce_info_use->num_ce_pipe_entries;
6188}
6189
6190static int qseecom_create_key(struct qseecom_dev_handle *data,
6191 void __user *argp)
6192{
6193 int i;
6194 uint32_t *ce_hw = NULL;
6195 uint32_t pipe = 0;
6196 int ret = 0;
6197 uint32_t flags = 0;
6198 struct qseecom_create_key_req create_key_req;
6199 struct qseecom_key_generate_ireq generate_key_ireq;
6200 struct qseecom_key_select_ireq set_key_ireq;
6201 uint32_t entries = 0;
6202
6203 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6204 if (ret) {
6205 pr_err("copy_from_user failed\n");
6206 return ret;
6207 }
6208
6209 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6210 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6211 pr_err("unsupported usage %d\n", create_key_req.usage);
6212 ret = -EFAULT;
6213 return ret;
6214 }
6215 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6216 create_key_req.usage);
6217 if (entries <= 0) {
6218 pr_err("no ce instance for usage %d instance %d\n",
6219 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6220 ret = -EINVAL;
6221 return ret;
6222 }
6223
6224 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6225 if (!ce_hw) {
6226 ret = -ENOMEM;
6227 return ret;
6228 }
6229 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6230 DEFAULT_CE_INFO_UNIT);
6231 if (ret) {
6232 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6233 ret = -EINVAL;
6234 goto free_buf;
6235 }
6236
6237 if (qseecom.fde_key_size)
6238 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6239 else
6240 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6241
Jiten Patela7bb1d52018-05-11 12:34:26 +05306242 if (qseecom.enable_key_wrap_in_ks == true)
6243 flags |= ENABLE_KEY_WRAP_IN_KS;
6244
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006245 generate_key_ireq.flags = flags;
6246 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6247 memset((void *)generate_key_ireq.key_id,
6248 0, QSEECOM_KEY_ID_SIZE);
6249 memset((void *)generate_key_ireq.hash32,
6250 0, QSEECOM_HASH_SIZE);
6251 memcpy((void *)generate_key_ireq.key_id,
6252 (void *)key_id_array[create_key_req.usage].desc,
6253 QSEECOM_KEY_ID_SIZE);
6254 memcpy((void *)generate_key_ireq.hash32,
6255 (void *)create_key_req.hash32,
6256 QSEECOM_HASH_SIZE);
6257
6258 ret = __qseecom_generate_and_save_key(data,
6259 create_key_req.usage, &generate_key_ireq);
6260 if (ret) {
6261 pr_err("Failed to generate key on storage: %d\n", ret);
6262 goto free_buf;
6263 }
6264
6265 for (i = 0; i < entries; i++) {
6266 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6267 if (create_key_req.usage ==
6268 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6269 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6270 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6271
6272 } else if (create_key_req.usage ==
6273 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6274 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6275 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6276
6277 } else {
6278 set_key_ireq.ce = ce_hw[i];
6279 set_key_ireq.pipe = pipe;
6280 }
6281 set_key_ireq.flags = flags;
6282
6283 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6284 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6285 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6286 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6287 memcpy((void *)set_key_ireq.key_id,
6288 (void *)key_id_array[create_key_req.usage].desc,
6289 QSEECOM_KEY_ID_SIZE);
6290 memcpy((void *)set_key_ireq.hash32,
6291 (void *)create_key_req.hash32,
6292 QSEECOM_HASH_SIZE);
6293 /*
6294 * It will return false if it is GPCE based crypto instance or
6295 * ICE is setup properly
6296 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006297 ret = qseecom_enable_ice_setup(create_key_req.usage);
6298 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006299 goto free_buf;
6300
6301 do {
6302 ret = __qseecom_set_clear_ce_key(data,
6303 create_key_req.usage,
6304 &set_key_ireq);
6305 /*
6306 * wait a little before calling scm again to let other
6307 * processes run
6308 */
6309 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6310 msleep(50);
6311
6312 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6313
6314 qseecom_disable_ice_setup(create_key_req.usage);
6315
6316 if (ret) {
6317 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6318 pipe, ce_hw[i], ret);
6319 goto free_buf;
6320 } else {
6321 pr_err("Set the key successfully\n");
6322 if ((create_key_req.usage ==
6323 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6324 (create_key_req.usage ==
6325 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6326 goto free_buf;
6327 }
6328 }
6329
6330free_buf:
6331 kzfree(ce_hw);
6332 return ret;
6333}
6334
6335static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6336 void __user *argp)
6337{
6338 uint32_t *ce_hw = NULL;
6339 uint32_t pipe = 0;
6340 int ret = 0;
6341 uint32_t flags = 0;
6342 int i, j;
6343 struct qseecom_wipe_key_req wipe_key_req;
6344 struct qseecom_key_delete_ireq delete_key_ireq;
6345 struct qseecom_key_select_ireq clear_key_ireq;
6346 uint32_t entries = 0;
6347
6348 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6349 if (ret) {
6350 pr_err("copy_from_user failed\n");
6351 return ret;
6352 }
6353
6354 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6355 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6356 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6357 ret = -EFAULT;
6358 return ret;
6359 }
6360
6361 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6362 wipe_key_req.usage);
6363 if (entries <= 0) {
6364 pr_err("no ce instance for usage %d instance %d\n",
6365 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6366 ret = -EINVAL;
6367 return ret;
6368 }
6369
6370 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6371 if (!ce_hw) {
6372 ret = -ENOMEM;
6373 return ret;
6374 }
6375
6376 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6377 DEFAULT_CE_INFO_UNIT);
6378 if (ret) {
6379 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6380 ret = -EINVAL;
6381 goto free_buf;
6382 }
6383
6384 if (wipe_key_req.wipe_key_flag) {
6385 delete_key_ireq.flags = flags;
6386 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6387 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6388 memcpy((void *)delete_key_ireq.key_id,
6389 (void *)key_id_array[wipe_key_req.usage].desc,
6390 QSEECOM_KEY_ID_SIZE);
6391 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6392
6393 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6394 &delete_key_ireq);
6395 if (ret) {
6396 pr_err("Failed to delete key from ssd storage: %d\n",
6397 ret);
6398 ret = -EFAULT;
6399 goto free_buf;
6400 }
6401 }
6402
6403 for (j = 0; j < entries; j++) {
6404 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6405 if (wipe_key_req.usage ==
6406 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6407 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6408 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6409 } else if (wipe_key_req.usage ==
6410 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6411 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6412 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6413 } else {
6414 clear_key_ireq.ce = ce_hw[j];
6415 clear_key_ireq.pipe = pipe;
6416 }
6417 clear_key_ireq.flags = flags;
6418 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6419 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6420 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6421 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6422
6423 /*
6424 * It will return false if it is GPCE based crypto instance or
6425 * ICE is setup properly
6426 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006427 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6428 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006429 goto free_buf;
6430
6431 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6432 &clear_key_ireq);
6433
6434 qseecom_disable_ice_setup(wipe_key_req.usage);
6435
6436 if (ret) {
6437 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6438 pipe, ce_hw[j], ret);
6439 ret = -EFAULT;
6440 goto free_buf;
6441 }
6442 }
6443
6444free_buf:
6445 kzfree(ce_hw);
6446 return ret;
6447}
6448
6449static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6450 void __user *argp)
6451{
6452 int ret = 0;
6453 uint32_t flags = 0;
6454 struct qseecom_update_key_userinfo_req update_key_req;
6455 struct qseecom_key_userinfo_update_ireq ireq;
6456
6457 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6458 if (ret) {
6459 pr_err("copy_from_user failed\n");
6460 return ret;
6461 }
6462
6463 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6464 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6465 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6466 return -EFAULT;
6467 }
6468
6469 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6470
6471 if (qseecom.fde_key_size)
6472 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6473 else
6474 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6475
6476 ireq.flags = flags;
6477 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6478 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6479 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6480 memcpy((void *)ireq.key_id,
6481 (void *)key_id_array[update_key_req.usage].desc,
6482 QSEECOM_KEY_ID_SIZE);
6483 memcpy((void *)ireq.current_hash32,
6484 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6485 memcpy((void *)ireq.new_hash32,
6486 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6487
6488 do {
6489 ret = __qseecom_update_current_key_user_info(data,
6490 update_key_req.usage,
6491 &ireq);
6492 /*
6493 * wait a little before calling scm again to let other
6494 * processes run
6495 */
6496 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6497 msleep(50);
6498
6499 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6500 if (ret) {
6501 pr_err("Failed to update key info: %d\n", ret);
6502 return ret;
6503 }
6504 return ret;
6505
6506}
6507static int qseecom_is_es_activated(void __user *argp)
6508{
Zhen Kong26e62742018-05-04 17:19:06 -07006509 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006510 struct qseecom_command_scm_resp resp;
6511 int ret;
6512
6513 if (qseecom.qsee_version < QSEE_VERSION_04) {
6514 pr_err("invalid qsee version\n");
6515 return -ENODEV;
6516 }
6517
6518 if (argp == NULL) {
6519 pr_err("arg is null\n");
6520 return -EINVAL;
6521 }
6522
6523 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6524 &req, sizeof(req), &resp, sizeof(resp));
6525 if (ret) {
6526 pr_err("scm_call failed\n");
6527 return ret;
6528 }
6529
6530 req.is_activated = resp.result;
6531 ret = copy_to_user(argp, &req, sizeof(req));
6532 if (ret) {
6533 pr_err("copy_to_user failed\n");
6534 return ret;
6535 }
6536
6537 return 0;
6538}
6539
6540static int qseecom_save_partition_hash(void __user *argp)
6541{
6542 struct qseecom_save_partition_hash_req req;
6543 struct qseecom_command_scm_resp resp;
6544 int ret;
6545
6546 memset(&resp, 0x00, sizeof(resp));
6547
6548 if (qseecom.qsee_version < QSEE_VERSION_04) {
6549 pr_err("invalid qsee version\n");
6550 return -ENODEV;
6551 }
6552
6553 if (argp == NULL) {
6554 pr_err("arg is null\n");
6555 return -EINVAL;
6556 }
6557
6558 ret = copy_from_user(&req, argp, sizeof(req));
6559 if (ret) {
6560 pr_err("copy_from_user failed\n");
6561 return ret;
6562 }
6563
6564 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6565 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6566 if (ret) {
6567 pr_err("qseecom_scm_call failed\n");
6568 return ret;
6569 }
6570
6571 return 0;
6572}
6573
6574static int qseecom_mdtp_cipher_dip(void __user *argp)
6575{
6576 struct qseecom_mdtp_cipher_dip_req req;
6577 u32 tzbuflenin, tzbuflenout;
6578 char *tzbufin = NULL, *tzbufout = NULL;
6579 struct scm_desc desc = {0};
6580 int ret;
6581
6582 do {
6583 /* Copy the parameters from userspace */
6584 if (argp == NULL) {
6585 pr_err("arg is null\n");
6586 ret = -EINVAL;
6587 break;
6588 }
6589
6590 ret = copy_from_user(&req, argp, sizeof(req));
6591 if (ret) {
6592 pr_err("copy_from_user failed, ret= %d\n", ret);
6593 break;
6594 }
6595
6596 if (req.in_buf == NULL || req.out_buf == NULL ||
6597 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6598 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6599 req.direction > 1) {
6600 pr_err("invalid parameters\n");
6601 ret = -EINVAL;
6602 break;
6603 }
6604
6605 /* Copy the input buffer from userspace to kernel space */
6606 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6607 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6608 if (!tzbufin) {
6609 pr_err("error allocating in buffer\n");
6610 ret = -ENOMEM;
6611 break;
6612 }
6613
6614 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6615 if (ret) {
6616 pr_err("copy_from_user failed, ret=%d\n", ret);
6617 break;
6618 }
6619
6620 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6621
6622 /* Prepare the output buffer in kernel space */
6623 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6624 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6625 if (!tzbufout) {
6626 pr_err("error allocating out buffer\n");
6627 ret = -ENOMEM;
6628 break;
6629 }
6630
6631 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6632
6633 /* Send the command to TZ */
6634 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6635 desc.args[0] = virt_to_phys(tzbufin);
6636 desc.args[1] = req.in_buf_size;
6637 desc.args[2] = virt_to_phys(tzbufout);
6638 desc.args[3] = req.out_buf_size;
6639 desc.args[4] = req.direction;
6640
6641 ret = __qseecom_enable_clk(CLK_QSEE);
6642 if (ret)
6643 break;
6644
Zhen Kong03f220d2019-02-01 17:12:34 -08006645 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006646
6647 __qseecom_disable_clk(CLK_QSEE);
6648
6649 if (ret) {
6650 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6651 ret);
6652 break;
6653 }
6654
6655 /* Copy the output buffer from kernel space to userspace */
6656 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6657 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6658 if (ret) {
6659 pr_err("copy_to_user failed, ret=%d\n", ret);
6660 break;
6661 }
6662 } while (0);
6663
6664 kzfree(tzbufin);
6665 kzfree(tzbufout);
6666
6667 return ret;
6668}
6669
6670static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6671 struct qseecom_qteec_req *req)
6672{
6673 if (!data || !data->client.ihandle) {
6674 pr_err("Client or client handle is not initialized\n");
6675 return -EINVAL;
6676 }
6677
6678 if (data->type != QSEECOM_CLIENT_APP)
6679 return -EFAULT;
6680
6681 if (req->req_len > UINT_MAX - req->resp_len) {
6682 pr_err("Integer overflow detected in req_len & rsp_len\n");
6683 return -EINVAL;
6684 }
6685
6686 if (req->req_len + req->resp_len > data->client.sb_length) {
6687 pr_debug("Not enough memory to fit cmd_buf.\n");
6688 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6689 (req->req_len + req->resp_len), data->client.sb_length);
6690 return -ENOMEM;
6691 }
6692
6693 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6694 pr_err("cmd buffer or response buffer is null\n");
6695 return -EINVAL;
6696 }
6697 if (((uintptr_t)req->req_ptr <
6698 data->client.user_virt_sb_base) ||
6699 ((uintptr_t)req->req_ptr >=
6700 (data->client.user_virt_sb_base + data->client.sb_length))) {
6701 pr_err("cmd buffer address not within shared bufffer\n");
6702 return -EINVAL;
6703 }
6704
6705 if (((uintptr_t)req->resp_ptr <
6706 data->client.user_virt_sb_base) ||
6707 ((uintptr_t)req->resp_ptr >=
6708 (data->client.user_virt_sb_base + data->client.sb_length))) {
6709 pr_err("response buffer address not within shared bufffer\n");
6710 return -EINVAL;
6711 }
6712
6713 if ((req->req_len == 0) || (req->resp_len == 0)) {
6714 pr_err("cmd buf lengtgh/response buf length not valid\n");
6715 return -EINVAL;
6716 }
6717
6718 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6719 pr_err("Integer overflow in req_len & req_ptr\n");
6720 return -EINVAL;
6721 }
6722
6723 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6724 pr_err("Integer overflow in resp_len & resp_ptr\n");
6725 return -EINVAL;
6726 }
6727
6728 if (data->client.user_virt_sb_base >
6729 (ULONG_MAX - data->client.sb_length)) {
6730 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6731 return -EINVAL;
6732 }
6733 if ((((uintptr_t)req->req_ptr + req->req_len) >
6734 ((uintptr_t)data->client.user_virt_sb_base +
6735 data->client.sb_length)) ||
6736 (((uintptr_t)req->resp_ptr + req->resp_len) >
6737 ((uintptr_t)data->client.user_virt_sb_base +
6738 data->client.sb_length))) {
6739 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6740 return -EINVAL;
6741 }
6742 return 0;
6743}
6744
6745static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6746 uint32_t fd_idx, struct sg_table *sg_ptr)
6747{
6748 struct scatterlist *sg = sg_ptr->sgl;
6749 struct qseecom_sg_entry *sg_entry;
6750 void *buf;
6751 uint i;
6752 size_t size;
6753 dma_addr_t coh_pmem;
6754
6755 if (fd_idx >= MAX_ION_FD) {
6756 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6757 return -ENOMEM;
6758 }
6759 /*
6760 * Allocate a buffer, populate it with number of entry plus
6761 * each sg entry's phy addr and length; then return the
6762 * phy_addr of the buffer.
6763 */
6764 size = sizeof(uint32_t) +
6765 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6766 size = (size + PAGE_SIZE) & PAGE_MASK;
6767 buf = dma_alloc_coherent(qseecom.pdev,
6768 size, &coh_pmem, GFP_KERNEL);
6769 if (buf == NULL) {
6770 pr_err("failed to alloc memory for sg buf\n");
6771 return -ENOMEM;
6772 }
6773 *(uint32_t *)buf = sg_ptr->nents;
6774 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6775 for (i = 0; i < sg_ptr->nents; i++) {
6776 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6777 sg_entry->len = sg->length;
6778 sg_entry++;
6779 sg = sg_next(sg);
6780 }
6781 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6782 data->client.sec_buf_fd[fd_idx].vbase = buf;
6783 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6784 data->client.sec_buf_fd[fd_idx].size = size;
6785 return 0;
6786}
6787
6788static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6789 struct qseecom_dev_handle *data, bool cleanup)
6790{
6791 struct ion_handle *ihandle;
6792 int ret = 0;
6793 int i = 0;
6794 uint32_t *update;
6795 struct sg_table *sg_ptr = NULL;
6796 struct scatterlist *sg;
6797 struct qseecom_param_memref *memref;
6798
6799 if (req == NULL) {
6800 pr_err("Invalid address\n");
6801 return -EINVAL;
6802 }
6803 for (i = 0; i < MAX_ION_FD; i++) {
6804 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006805 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006806 req->ifd_data[i].fd);
6807 if (IS_ERR_OR_NULL(ihandle)) {
6808 pr_err("Ion client can't retrieve the handle\n");
6809 return -ENOMEM;
6810 }
6811 if ((req->req_len < sizeof(uint32_t)) ||
6812 (req->ifd_data[i].cmd_buf_offset >
6813 req->req_len - sizeof(uint32_t))) {
6814 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6815 req->req_len,
6816 req->ifd_data[i].cmd_buf_offset);
6817 return -EINVAL;
6818 }
6819 update = (uint32_t *)((char *) req->req_ptr +
6820 req->ifd_data[i].cmd_buf_offset);
6821 if (!update) {
6822 pr_err("update pointer is NULL\n");
6823 return -EINVAL;
6824 }
6825 } else {
6826 continue;
6827 }
6828 /* Populate the cmd data structure with the phys_addr */
6829 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6830 if (IS_ERR_OR_NULL(sg_ptr)) {
6831 pr_err("IOn client could not retrieve sg table\n");
6832 goto err;
6833 }
6834 sg = sg_ptr->sgl;
6835 if (sg == NULL) {
6836 pr_err("sg is NULL\n");
6837 goto err;
6838 }
6839 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6840 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6841 sg_ptr->nents, sg->length);
6842 goto err;
6843 }
6844 /* clean up buf for pre-allocated fd */
6845 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6846 (*update)) {
6847 if (data->client.sec_buf_fd[i].vbase)
6848 dma_free_coherent(qseecom.pdev,
6849 data->client.sec_buf_fd[i].size,
6850 data->client.sec_buf_fd[i].vbase,
6851 data->client.sec_buf_fd[i].pbase);
6852 memset((void *)update, 0,
6853 sizeof(struct qseecom_param_memref));
6854 memset(&(data->client.sec_buf_fd[i]), 0,
6855 sizeof(struct qseecom_sec_buf_fd_info));
6856 goto clean;
6857 }
6858
6859 if (*update == 0) {
6860 /* update buf for pre-allocated fd from secure heap*/
6861 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6862 sg_ptr);
6863 if (ret) {
6864 pr_err("Failed to handle buf for fd[%d]\n", i);
6865 goto err;
6866 }
6867 memref = (struct qseecom_param_memref *)update;
6868 memref->buffer =
6869 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6870 memref->size =
6871 (uint32_t)(data->client.sec_buf_fd[i].size);
6872 } else {
6873 /* update buf for fd from non-secure qseecom heap */
6874 if (sg_ptr->nents != 1) {
6875 pr_err("Num of scat entr (%d) invalid\n",
6876 sg_ptr->nents);
6877 goto err;
6878 }
6879 if (cleanup)
6880 *update = 0;
6881 else
6882 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6883 }
6884clean:
6885 if (cleanup) {
6886 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6887 ihandle, NULL, sg->length,
6888 ION_IOC_INV_CACHES);
6889 if (ret) {
6890 pr_err("cache operation failed %d\n", ret);
6891 goto err;
6892 }
6893 } else {
6894 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6895 ihandle, NULL, sg->length,
6896 ION_IOC_CLEAN_INV_CACHES);
6897 if (ret) {
6898 pr_err("cache operation failed %d\n", ret);
6899 goto err;
6900 }
6901 data->sglistinfo_ptr[i].indexAndFlags =
6902 SGLISTINFO_SET_INDEX_FLAG(
6903 (sg_ptr->nents == 1), 0,
6904 req->ifd_data[i].cmd_buf_offset);
6905 data->sglistinfo_ptr[i].sizeOrCount =
6906 (sg_ptr->nents == 1) ?
6907 sg->length : sg_ptr->nents;
6908 data->sglist_cnt = i + 1;
6909 }
6910 /* Deallocate the handle */
6911 if (!IS_ERR_OR_NULL(ihandle))
6912 ion_free(qseecom.ion_clnt, ihandle);
6913 }
6914 return ret;
6915err:
6916 if (!IS_ERR_OR_NULL(ihandle))
6917 ion_free(qseecom.ion_clnt, ihandle);
6918 return -ENOMEM;
6919}
6920
6921static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6922 struct qseecom_qteec_req *req, uint32_t cmd_id)
6923{
6924 struct qseecom_command_scm_resp resp;
6925 struct qseecom_qteec_ireq ireq;
6926 struct qseecom_qteec_64bit_ireq ireq_64bit;
6927 struct qseecom_registered_app_list *ptr_app;
6928 bool found_app = false;
6929 unsigned long flags;
6930 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006931 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006932 uint32_t reqd_len_sb_in = 0;
6933 void *cmd_buf = NULL;
6934 size_t cmd_len;
6935 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306936 void *req_ptr = NULL;
6937 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006938
6939 ret = __qseecom_qteec_validate_msg(data, req);
6940 if (ret)
6941 return ret;
6942
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306943 req_ptr = req->req_ptr;
6944 resp_ptr = req->resp_ptr;
6945
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006946 /* find app_id & img_name from list */
6947 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6948 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6949 list) {
6950 if ((ptr_app->app_id == data->client.app_id) &&
6951 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6952 found_app = true;
6953 break;
6954 }
6955 }
6956 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6957 if (!found_app) {
6958 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6959 (char *)data->client.app_name);
6960 return -ENOENT;
6961 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07006962 if (__qseecom_find_pending_unload_app(data->client.app_id,
6963 data->client.app_name)) {
6964 pr_err("app %d (%s) unload is pending\n",
6965 data->client.app_id, data->client.app_name);
6966 return -ENOENT;
6967 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006968
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306969 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6970 (uintptr_t)req->req_ptr);
6971 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6972 (uintptr_t)req->resp_ptr);
6973
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006974 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6975 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6976 ret = __qseecom_update_qteec_req_buf(
6977 (struct qseecom_qteec_modfd_req *)req, data, false);
6978 if (ret)
6979 return ret;
6980 }
6981
6982 if (qseecom.qsee_version < QSEE_VERSION_40) {
6983 ireq.app_id = data->client.app_id;
6984 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306985 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006986 ireq.req_len = req->req_len;
6987 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306988 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006989 ireq.resp_len = req->resp_len;
6990 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6991 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6992 dmac_flush_range((void *)table,
6993 (void *)table + SGLISTINFO_TABLE_SIZE);
6994 cmd_buf = (void *)&ireq;
6995 cmd_len = sizeof(struct qseecom_qteec_ireq);
6996 } else {
6997 ireq_64bit.app_id = data->client.app_id;
6998 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306999 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007000 ireq_64bit.req_len = req->req_len;
7001 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307002 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007003 ireq_64bit.resp_len = req->resp_len;
7004 if ((data->client.app_arch == ELFCLASS32) &&
7005 ((ireq_64bit.req_ptr >=
7006 PHY_ADDR_4G - ireq_64bit.req_len) ||
7007 (ireq_64bit.resp_ptr >=
7008 PHY_ADDR_4G - ireq_64bit.resp_len))){
7009 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
7010 data->client.app_name, data->client.app_id);
7011 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
7012 ireq_64bit.req_ptr, ireq_64bit.req_len,
7013 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
7014 return -EFAULT;
7015 }
7016 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7017 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7018 dmac_flush_range((void *)table,
7019 (void *)table + SGLISTINFO_TABLE_SIZE);
7020 cmd_buf = (void *)&ireq_64bit;
7021 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7022 }
7023 if (qseecom.whitelist_support == true
7024 && cmd_id == QSEOS_TEE_OPEN_SESSION)
7025 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
7026 else
7027 *(uint32_t *)cmd_buf = cmd_id;
7028
7029 reqd_len_sb_in = req->req_len + req->resp_len;
7030 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7031 data->client.sb_virt,
7032 reqd_len_sb_in,
7033 ION_IOC_CLEAN_INV_CACHES);
7034 if (ret) {
7035 pr_err("cache operation failed %d\n", ret);
7036 return ret;
7037 }
7038
7039 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7040
7041 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7042 cmd_buf, cmd_len,
7043 &resp, sizeof(resp));
7044 if (ret) {
7045 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7046 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07007047 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007048 }
7049
7050 if (qseecom.qsee_reentrancy_support) {
7051 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07007052 if (ret)
7053 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007054 } else {
7055 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7056 ret = __qseecom_process_incomplete_cmd(data, &resp);
7057 if (ret) {
7058 pr_err("process_incomplete_cmd failed err: %d\n",
7059 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007060 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007061 }
7062 } else {
7063 if (resp.result != QSEOS_RESULT_SUCCESS) {
7064 pr_err("Response result %d not supported\n",
7065 resp.result);
7066 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07007067 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007068 }
7069 }
7070 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007071exit:
7072 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007073 data->client.sb_virt, data->client.sb_length,
7074 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07007075 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007076 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007077 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007078 }
7079
7080 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
7081 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07007082 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007083 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07007084 if (ret2)
7085 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007086 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007087 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007088}
7089
7090static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
7091 void __user *argp)
7092{
7093 struct qseecom_qteec_modfd_req req;
7094 int ret = 0;
7095
7096 ret = copy_from_user(&req, argp,
7097 sizeof(struct qseecom_qteec_modfd_req));
7098 if (ret) {
7099 pr_err("copy_from_user failed\n");
7100 return ret;
7101 }
7102 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7103 QSEOS_TEE_OPEN_SESSION);
7104
7105 return ret;
7106}
7107
7108static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
7109 void __user *argp)
7110{
7111 struct qseecom_qteec_req req;
7112 int ret = 0;
7113
7114 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
7115 if (ret) {
7116 pr_err("copy_from_user failed\n");
7117 return ret;
7118 }
7119 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
7120 return ret;
7121}
7122
7123static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
7124 void __user *argp)
7125{
7126 struct qseecom_qteec_modfd_req req;
7127 struct qseecom_command_scm_resp resp;
7128 struct qseecom_qteec_ireq ireq;
7129 struct qseecom_qteec_64bit_ireq ireq_64bit;
7130 struct qseecom_registered_app_list *ptr_app;
7131 bool found_app = false;
7132 unsigned long flags;
7133 int ret = 0;
7134 int i = 0;
7135 uint32_t reqd_len_sb_in = 0;
7136 void *cmd_buf = NULL;
7137 size_t cmd_len;
7138 struct sglist_info *table = data->sglistinfo_ptr;
7139 void *req_ptr = NULL;
7140 void *resp_ptr = NULL;
7141
7142 ret = copy_from_user(&req, argp,
7143 sizeof(struct qseecom_qteec_modfd_req));
7144 if (ret) {
7145 pr_err("copy_from_user failed\n");
7146 return ret;
7147 }
7148 ret = __qseecom_qteec_validate_msg(data,
7149 (struct qseecom_qteec_req *)(&req));
7150 if (ret)
7151 return ret;
7152 req_ptr = req.req_ptr;
7153 resp_ptr = req.resp_ptr;
7154
7155 /* find app_id & img_name from list */
7156 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
7157 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
7158 list) {
7159 if ((ptr_app->app_id == data->client.app_id) &&
7160 (!strcmp(ptr_app->app_name, data->client.app_name))) {
7161 found_app = true;
7162 break;
7163 }
7164 }
7165 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
7166 if (!found_app) {
7167 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7168 (char *)data->client.app_name);
7169 return -ENOENT;
7170 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07007171 if (__qseecom_find_pending_unload_app(data->client.app_id,
7172 data->client.app_name)) {
7173 pr_err("app %d (%s) unload is pending\n",
7174 data->client.app_id, data->client.app_name);
7175 return -ENOENT;
7176 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007177
7178 /* validate offsets */
7179 for (i = 0; i < MAX_ION_FD; i++) {
7180 if (req.ifd_data[i].fd) {
7181 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7182 return -EINVAL;
7183 }
7184 }
7185 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7186 (uintptr_t)req.req_ptr);
7187 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7188 (uintptr_t)req.resp_ptr);
7189 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7190 if (ret)
7191 return ret;
7192
7193 if (qseecom.qsee_version < QSEE_VERSION_40) {
7194 ireq.app_id = data->client.app_id;
7195 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7196 (uintptr_t)req_ptr);
7197 ireq.req_len = req.req_len;
7198 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7199 (uintptr_t)resp_ptr);
7200 ireq.resp_len = req.resp_len;
7201 cmd_buf = (void *)&ireq;
7202 cmd_len = sizeof(struct qseecom_qteec_ireq);
7203 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7204 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7205 dmac_flush_range((void *)table,
7206 (void *)table + SGLISTINFO_TABLE_SIZE);
7207 } else {
7208 ireq_64bit.app_id = data->client.app_id;
7209 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7210 (uintptr_t)req_ptr);
7211 ireq_64bit.req_len = req.req_len;
7212 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7213 (uintptr_t)resp_ptr);
7214 ireq_64bit.resp_len = req.resp_len;
7215 cmd_buf = (void *)&ireq_64bit;
7216 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7217 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7218 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7219 dmac_flush_range((void *)table,
7220 (void *)table + SGLISTINFO_TABLE_SIZE);
7221 }
7222 reqd_len_sb_in = req.req_len + req.resp_len;
7223 if (qseecom.whitelist_support == true)
7224 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7225 else
7226 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7227
7228 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7229 data->client.sb_virt,
7230 reqd_len_sb_in,
7231 ION_IOC_CLEAN_INV_CACHES);
7232 if (ret) {
7233 pr_err("cache operation failed %d\n", ret);
7234 return ret;
7235 }
7236
7237 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7238
7239 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7240 cmd_buf, cmd_len,
7241 &resp, sizeof(resp));
7242 if (ret) {
7243 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7244 ret, data->client.app_id);
7245 return ret;
7246 }
7247
7248 if (qseecom.qsee_reentrancy_support) {
7249 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7250 } else {
7251 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7252 ret = __qseecom_process_incomplete_cmd(data, &resp);
7253 if (ret) {
7254 pr_err("process_incomplete_cmd failed err: %d\n",
7255 ret);
7256 return ret;
7257 }
7258 } else {
7259 if (resp.result != QSEOS_RESULT_SUCCESS) {
7260 pr_err("Response result %d not supported\n",
7261 resp.result);
7262 ret = -EINVAL;
7263 }
7264 }
7265 }
7266 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7267 if (ret)
7268 return ret;
7269
7270 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7271 data->client.sb_virt, data->client.sb_length,
7272 ION_IOC_INV_CACHES);
7273 if (ret) {
7274 pr_err("cache operation failed %d\n", ret);
7275 return ret;
7276 }
7277 return 0;
7278}
7279
7280static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7281 void __user *argp)
7282{
7283 struct qseecom_qteec_modfd_req req;
7284 int ret = 0;
7285
7286 ret = copy_from_user(&req, argp,
7287 sizeof(struct qseecom_qteec_modfd_req));
7288 if (ret) {
7289 pr_err("copy_from_user failed\n");
7290 return ret;
7291 }
7292 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7293 QSEOS_TEE_REQUEST_CANCELLATION);
7294
7295 return ret;
7296}
7297
7298static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7299{
7300 if (data->sglist_cnt) {
7301 memset(data->sglistinfo_ptr, 0,
7302 SGLISTINFO_TABLE_SIZE);
7303 data->sglist_cnt = 0;
7304 }
7305}
7306
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307307static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007308 unsigned int cmd, unsigned long arg)
7309{
7310 int ret = 0;
7311 struct qseecom_dev_handle *data = file->private_data;
7312 void __user *argp = (void __user *) arg;
7313 bool perf_enabled = false;
7314
7315 if (!data) {
7316 pr_err("Invalid/uninitialized device handle\n");
7317 return -EINVAL;
7318 }
7319
7320 if (data->abort) {
7321 pr_err("Aborting qseecom driver\n");
7322 return -ENODEV;
7323 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007324 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7325 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7326 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7327 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007328 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07007329 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007330
7331 switch (cmd) {
7332 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7333 if (data->type != QSEECOM_GENERIC) {
7334 pr_err("reg lstnr req: invalid handle (%d)\n",
7335 data->type);
7336 ret = -EINVAL;
7337 break;
7338 }
7339 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007340 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007341 atomic_inc(&data->ioctl_count);
7342 data->type = QSEECOM_LISTENER_SERVICE;
7343 ret = qseecom_register_listener(data, argp);
7344 atomic_dec(&data->ioctl_count);
7345 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007346 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007347 if (ret)
7348 pr_err("failed qseecom_register_listener: %d\n", ret);
7349 break;
7350 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307351 case QSEECOM_IOCTL_SET_ICE_INFO: {
7352 struct qseecom_ice_data_t ice_data;
7353
7354 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7355 if (ret) {
7356 pr_err("copy_from_user failed\n");
7357 return -EFAULT;
7358 }
7359 qcom_ice_set_fde_flag(ice_data.flag);
7360 break;
7361 }
7362
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007363 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7364 if ((data->listener.id == 0) ||
7365 (data->type != QSEECOM_LISTENER_SERVICE)) {
7366 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7367 data->type, data->listener.id);
7368 ret = -EINVAL;
7369 break;
7370 }
7371 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007372 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007373 atomic_inc(&data->ioctl_count);
7374 ret = qseecom_unregister_listener(data);
7375 atomic_dec(&data->ioctl_count);
7376 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007377 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007378 if (ret)
7379 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7380 break;
7381 }
7382 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7383 if ((data->client.app_id == 0) ||
7384 (data->type != QSEECOM_CLIENT_APP)) {
7385 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7386 data->type, data->client.app_id);
7387 ret = -EINVAL;
7388 break;
7389 }
7390 /* Only one client allowed here at a time */
7391 mutex_lock(&app_access_lock);
7392 if (qseecom.support_bus_scaling) {
7393 /* register bus bw in case the client doesn't do it */
7394 if (!data->mode) {
7395 mutex_lock(&qsee_bw_mutex);
7396 __qseecom_register_bus_bandwidth_needs(
7397 data, HIGH);
7398 mutex_unlock(&qsee_bw_mutex);
7399 }
7400 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7401 if (ret) {
7402 pr_err("Failed to set bw.\n");
7403 ret = -EINVAL;
7404 mutex_unlock(&app_access_lock);
7405 break;
7406 }
7407 }
7408 /*
7409 * On targets where crypto clock is handled by HLOS,
7410 * if clk_access_cnt is zero and perf_enabled is false,
7411 * then the crypto clock was not enabled before sending cmd to
7412 * tz, qseecom will enable the clock to avoid service failure.
7413 */
7414 if (!qseecom.no_clock_support &&
7415 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7416 pr_debug("ce clock is not enabled!\n");
7417 ret = qseecom_perf_enable(data);
7418 if (ret) {
7419 pr_err("Failed to vote for clock with err %d\n",
7420 ret);
7421 mutex_unlock(&app_access_lock);
7422 ret = -EINVAL;
7423 break;
7424 }
7425 perf_enabled = true;
7426 }
7427 atomic_inc(&data->ioctl_count);
7428 ret = qseecom_send_cmd(data, argp);
7429 if (qseecom.support_bus_scaling)
7430 __qseecom_add_bw_scale_down_timer(
7431 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7432 if (perf_enabled) {
7433 qsee_disable_clock_vote(data, CLK_DFAB);
7434 qsee_disable_clock_vote(data, CLK_SFPB);
7435 }
7436 atomic_dec(&data->ioctl_count);
7437 wake_up_all(&data->abort_wq);
7438 mutex_unlock(&app_access_lock);
7439 if (ret)
7440 pr_err("failed qseecom_send_cmd: %d\n", ret);
7441 break;
7442 }
7443 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7444 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7445 if ((data->client.app_id == 0) ||
7446 (data->type != QSEECOM_CLIENT_APP)) {
7447 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7448 data->type, data->client.app_id);
7449 ret = -EINVAL;
7450 break;
7451 }
7452 /* Only one client allowed here at a time */
7453 mutex_lock(&app_access_lock);
7454 if (qseecom.support_bus_scaling) {
7455 if (!data->mode) {
7456 mutex_lock(&qsee_bw_mutex);
7457 __qseecom_register_bus_bandwidth_needs(
7458 data, HIGH);
7459 mutex_unlock(&qsee_bw_mutex);
7460 }
7461 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7462 if (ret) {
7463 pr_err("Failed to set bw.\n");
7464 mutex_unlock(&app_access_lock);
7465 ret = -EINVAL;
7466 break;
7467 }
7468 }
7469 /*
7470 * On targets where crypto clock is handled by HLOS,
7471 * if clk_access_cnt is zero and perf_enabled is false,
7472 * then the crypto clock was not enabled before sending cmd to
7473 * tz, qseecom will enable the clock to avoid service failure.
7474 */
7475 if (!qseecom.no_clock_support &&
7476 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7477 pr_debug("ce clock is not enabled!\n");
7478 ret = qseecom_perf_enable(data);
7479 if (ret) {
7480 pr_err("Failed to vote for clock with err %d\n",
7481 ret);
7482 mutex_unlock(&app_access_lock);
7483 ret = -EINVAL;
7484 break;
7485 }
7486 perf_enabled = true;
7487 }
7488 atomic_inc(&data->ioctl_count);
7489 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7490 ret = qseecom_send_modfd_cmd(data, argp);
7491 else
7492 ret = qseecom_send_modfd_cmd_64(data, argp);
7493 if (qseecom.support_bus_scaling)
7494 __qseecom_add_bw_scale_down_timer(
7495 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7496 if (perf_enabled) {
7497 qsee_disable_clock_vote(data, CLK_DFAB);
7498 qsee_disable_clock_vote(data, CLK_SFPB);
7499 }
7500 atomic_dec(&data->ioctl_count);
7501 wake_up_all(&data->abort_wq);
7502 mutex_unlock(&app_access_lock);
7503 if (ret)
7504 pr_err("failed qseecom_send_cmd: %d\n", ret);
7505 __qseecom_clean_data_sglistinfo(data);
7506 break;
7507 }
7508 case QSEECOM_IOCTL_RECEIVE_REQ: {
7509 if ((data->listener.id == 0) ||
7510 (data->type != QSEECOM_LISTENER_SERVICE)) {
7511 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7512 data->type, data->listener.id);
7513 ret = -EINVAL;
7514 break;
7515 }
7516 atomic_inc(&data->ioctl_count);
7517 ret = qseecom_receive_req(data);
7518 atomic_dec(&data->ioctl_count);
7519 wake_up_all(&data->abort_wq);
7520 if (ret && (ret != -ERESTARTSYS))
7521 pr_err("failed qseecom_receive_req: %d\n", ret);
7522 break;
7523 }
7524 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7525 if ((data->listener.id == 0) ||
7526 (data->type != QSEECOM_LISTENER_SERVICE)) {
7527 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7528 data->type, data->listener.id);
7529 ret = -EINVAL;
7530 break;
7531 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007532 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007533 atomic_inc(&data->ioctl_count);
7534 if (!qseecom.qsee_reentrancy_support)
7535 ret = qseecom_send_resp();
7536 else
7537 ret = qseecom_reentrancy_send_resp(data);
7538 atomic_dec(&data->ioctl_count);
7539 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007540 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007541 if (ret)
7542 pr_err("failed qseecom_send_resp: %d\n", ret);
7543 break;
7544 }
7545 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7546 if ((data->type != QSEECOM_CLIENT_APP) &&
7547 (data->type != QSEECOM_GENERIC) &&
7548 (data->type != QSEECOM_SECURE_SERVICE)) {
7549 pr_err("set mem param req: invalid handle (%d)\n",
7550 data->type);
7551 ret = -EINVAL;
7552 break;
7553 }
7554 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7555 mutex_lock(&app_access_lock);
7556 atomic_inc(&data->ioctl_count);
7557 ret = qseecom_set_client_mem_param(data, argp);
7558 atomic_dec(&data->ioctl_count);
7559 mutex_unlock(&app_access_lock);
7560 if (ret)
7561 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7562 ret);
7563 break;
7564 }
7565 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7566 if ((data->type != QSEECOM_GENERIC) &&
7567 (data->type != QSEECOM_CLIENT_APP)) {
7568 pr_err("load app req: invalid handle (%d)\n",
7569 data->type);
7570 ret = -EINVAL;
7571 break;
7572 }
7573 data->type = QSEECOM_CLIENT_APP;
7574 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7575 mutex_lock(&app_access_lock);
7576 atomic_inc(&data->ioctl_count);
7577 ret = qseecom_load_app(data, argp);
7578 atomic_dec(&data->ioctl_count);
7579 mutex_unlock(&app_access_lock);
7580 if (ret)
7581 pr_err("failed load_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007582 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007583 break;
7584 }
7585 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7586 if ((data->client.app_id == 0) ||
7587 (data->type != QSEECOM_CLIENT_APP)) {
7588 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7589 data->type, data->client.app_id);
7590 ret = -EINVAL;
7591 break;
7592 }
7593 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7594 mutex_lock(&app_access_lock);
7595 atomic_inc(&data->ioctl_count);
7596 ret = qseecom_unload_app(data, false);
7597 atomic_dec(&data->ioctl_count);
7598 mutex_unlock(&app_access_lock);
7599 if (ret)
7600 pr_err("failed unload_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007601 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007602 break;
7603 }
7604 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7605 atomic_inc(&data->ioctl_count);
7606 ret = qseecom_get_qseos_version(data, argp);
7607 if (ret)
7608 pr_err("qseecom_get_qseos_version: %d\n", ret);
7609 atomic_dec(&data->ioctl_count);
7610 break;
7611 }
7612 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7613 if ((data->type != QSEECOM_GENERIC) &&
7614 (data->type != QSEECOM_CLIENT_APP)) {
7615 pr_err("perf enable req: invalid handle (%d)\n",
7616 data->type);
7617 ret = -EINVAL;
7618 break;
7619 }
7620 if ((data->type == QSEECOM_CLIENT_APP) &&
7621 (data->client.app_id == 0)) {
7622 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7623 data->type, data->client.app_id);
7624 ret = -EINVAL;
7625 break;
7626 }
7627 atomic_inc(&data->ioctl_count);
7628 if (qseecom.support_bus_scaling) {
7629 mutex_lock(&qsee_bw_mutex);
7630 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7631 mutex_unlock(&qsee_bw_mutex);
7632 } else {
7633 ret = qseecom_perf_enable(data);
7634 if (ret)
7635 pr_err("Fail to vote for clocks %d\n", ret);
7636 }
7637 atomic_dec(&data->ioctl_count);
7638 break;
7639 }
7640 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7641 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7642 (data->type != QSEECOM_CLIENT_APP)) {
7643 pr_err("perf disable req: invalid handle (%d)\n",
7644 data->type);
7645 ret = -EINVAL;
7646 break;
7647 }
7648 if ((data->type == QSEECOM_CLIENT_APP) &&
7649 (data->client.app_id == 0)) {
7650 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7651 data->type, data->client.app_id);
7652 ret = -EINVAL;
7653 break;
7654 }
7655 atomic_inc(&data->ioctl_count);
7656 if (!qseecom.support_bus_scaling) {
7657 qsee_disable_clock_vote(data, CLK_DFAB);
7658 qsee_disable_clock_vote(data, CLK_SFPB);
7659 } else {
7660 mutex_lock(&qsee_bw_mutex);
7661 qseecom_unregister_bus_bandwidth_needs(data);
7662 mutex_unlock(&qsee_bw_mutex);
7663 }
7664 atomic_dec(&data->ioctl_count);
7665 break;
7666 }
7667
7668 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7669 /* If crypto clock is not handled by HLOS, return directly. */
7670 if (qseecom.no_clock_support) {
7671 pr_debug("crypto clock is not handled by HLOS\n");
7672 break;
7673 }
7674 if ((data->client.app_id == 0) ||
7675 (data->type != QSEECOM_CLIENT_APP)) {
7676 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7677 data->type, data->client.app_id);
7678 ret = -EINVAL;
7679 break;
7680 }
7681 atomic_inc(&data->ioctl_count);
7682 ret = qseecom_scale_bus_bandwidth(data, argp);
7683 atomic_dec(&data->ioctl_count);
7684 break;
7685 }
7686 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7687 if (data->type != QSEECOM_GENERIC) {
7688 pr_err("load ext elf req: invalid client handle (%d)\n",
7689 data->type);
7690 ret = -EINVAL;
7691 break;
7692 }
7693 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7694 data->released = true;
7695 mutex_lock(&app_access_lock);
7696 atomic_inc(&data->ioctl_count);
7697 ret = qseecom_load_external_elf(data, argp);
7698 atomic_dec(&data->ioctl_count);
7699 mutex_unlock(&app_access_lock);
7700 if (ret)
7701 pr_err("failed load_external_elf request: %d\n", ret);
7702 break;
7703 }
7704 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7705 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7706 pr_err("unload ext elf req: invalid handle (%d)\n",
7707 data->type);
7708 ret = -EINVAL;
7709 break;
7710 }
7711 data->released = true;
7712 mutex_lock(&app_access_lock);
7713 atomic_inc(&data->ioctl_count);
7714 ret = qseecom_unload_external_elf(data);
7715 atomic_dec(&data->ioctl_count);
7716 mutex_unlock(&app_access_lock);
7717 if (ret)
7718 pr_err("failed unload_app request: %d\n", ret);
7719 break;
7720 }
7721 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
Zhen Kong677362c2019-08-30 10:50:25 -07007722 if ((data->type != QSEECOM_GENERIC) &&
7723 (data->type != QSEECOM_CLIENT_APP)) {
7724 pr_err("app loaded query req: invalid handle (%d)\n",
7725 data->type);
7726 ret = -EINVAL;
7727 break;
7728 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007729 data->type = QSEECOM_CLIENT_APP;
7730 mutex_lock(&app_access_lock);
7731 atomic_inc(&data->ioctl_count);
7732 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7733 ret = qseecom_query_app_loaded(data, argp);
7734 atomic_dec(&data->ioctl_count);
7735 mutex_unlock(&app_access_lock);
7736 break;
7737 }
7738 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7739 if (data->type != QSEECOM_GENERIC) {
7740 pr_err("send cmd svc req: invalid handle (%d)\n",
7741 data->type);
7742 ret = -EINVAL;
7743 break;
7744 }
7745 data->type = QSEECOM_SECURE_SERVICE;
7746 if (qseecom.qsee_version < QSEE_VERSION_03) {
7747 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7748 qseecom.qsee_version);
7749 return -EINVAL;
7750 }
7751 mutex_lock(&app_access_lock);
7752 atomic_inc(&data->ioctl_count);
7753 ret = qseecom_send_service_cmd(data, argp);
7754 atomic_dec(&data->ioctl_count);
7755 mutex_unlock(&app_access_lock);
7756 break;
7757 }
7758 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7759 if (!(qseecom.support_pfe || qseecom.support_fde))
7760 pr_err("Features requiring key init not supported\n");
7761 if (data->type != QSEECOM_GENERIC) {
7762 pr_err("create key req: invalid handle (%d)\n",
7763 data->type);
7764 ret = -EINVAL;
7765 break;
7766 }
7767 if (qseecom.qsee_version < QSEE_VERSION_05) {
7768 pr_err("Create Key feature unsupported: qsee ver %u\n",
7769 qseecom.qsee_version);
7770 return -EINVAL;
7771 }
7772 data->released = true;
7773 mutex_lock(&app_access_lock);
7774 atomic_inc(&data->ioctl_count);
7775 ret = qseecom_create_key(data, argp);
7776 if (ret)
7777 pr_err("failed to create encryption key: %d\n", ret);
7778
7779 atomic_dec(&data->ioctl_count);
7780 mutex_unlock(&app_access_lock);
7781 break;
7782 }
7783 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7784 if (!(qseecom.support_pfe || qseecom.support_fde))
7785 pr_err("Features requiring key init not supported\n");
7786 if (data->type != QSEECOM_GENERIC) {
7787 pr_err("wipe key req: invalid handle (%d)\n",
7788 data->type);
7789 ret = -EINVAL;
7790 break;
7791 }
7792 if (qseecom.qsee_version < QSEE_VERSION_05) {
7793 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7794 qseecom.qsee_version);
7795 return -EINVAL;
7796 }
7797 data->released = true;
7798 mutex_lock(&app_access_lock);
7799 atomic_inc(&data->ioctl_count);
7800 ret = qseecom_wipe_key(data, argp);
7801 if (ret)
7802 pr_err("failed to wipe encryption key: %d\n", ret);
7803 atomic_dec(&data->ioctl_count);
7804 mutex_unlock(&app_access_lock);
7805 break;
7806 }
7807 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7808 if (!(qseecom.support_pfe || qseecom.support_fde))
7809 pr_err("Features requiring key init not supported\n");
7810 if (data->type != QSEECOM_GENERIC) {
7811 pr_err("update key req: invalid handle (%d)\n",
7812 data->type);
7813 ret = -EINVAL;
7814 break;
7815 }
7816 if (qseecom.qsee_version < QSEE_VERSION_05) {
7817 pr_err("Update Key feature unsupported in qsee ver %u\n",
7818 qseecom.qsee_version);
7819 return -EINVAL;
7820 }
7821 data->released = true;
7822 mutex_lock(&app_access_lock);
7823 atomic_inc(&data->ioctl_count);
7824 ret = qseecom_update_key_user_info(data, argp);
7825 if (ret)
7826 pr_err("failed to update key user info: %d\n", ret);
7827 atomic_dec(&data->ioctl_count);
7828 mutex_unlock(&app_access_lock);
7829 break;
7830 }
7831 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7832 if (data->type != QSEECOM_GENERIC) {
7833 pr_err("save part hash req: invalid handle (%d)\n",
7834 data->type);
7835 ret = -EINVAL;
7836 break;
7837 }
7838 data->released = true;
7839 mutex_lock(&app_access_lock);
7840 atomic_inc(&data->ioctl_count);
7841 ret = qseecom_save_partition_hash(argp);
7842 atomic_dec(&data->ioctl_count);
7843 mutex_unlock(&app_access_lock);
7844 break;
7845 }
7846 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7847 if (data->type != QSEECOM_GENERIC) {
7848 pr_err("ES activated req: invalid handle (%d)\n",
7849 data->type);
7850 ret = -EINVAL;
7851 break;
7852 }
7853 data->released = true;
7854 mutex_lock(&app_access_lock);
7855 atomic_inc(&data->ioctl_count);
7856 ret = qseecom_is_es_activated(argp);
7857 atomic_dec(&data->ioctl_count);
7858 mutex_unlock(&app_access_lock);
7859 break;
7860 }
7861 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7862 if (data->type != QSEECOM_GENERIC) {
7863 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7864 data->type);
7865 ret = -EINVAL;
7866 break;
7867 }
7868 data->released = true;
7869 mutex_lock(&app_access_lock);
7870 atomic_inc(&data->ioctl_count);
7871 ret = qseecom_mdtp_cipher_dip(argp);
7872 atomic_dec(&data->ioctl_count);
7873 mutex_unlock(&app_access_lock);
7874 break;
7875 }
7876 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7877 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7878 if ((data->listener.id == 0) ||
7879 (data->type != QSEECOM_LISTENER_SERVICE)) {
7880 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7881 data->type, data->listener.id);
7882 ret = -EINVAL;
7883 break;
7884 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007885 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007886 atomic_inc(&data->ioctl_count);
7887 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7888 ret = qseecom_send_modfd_resp(data, argp);
7889 else
7890 ret = qseecom_send_modfd_resp_64(data, argp);
7891 atomic_dec(&data->ioctl_count);
7892 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007893 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007894 if (ret)
7895 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7896 __qseecom_clean_data_sglistinfo(data);
7897 break;
7898 }
7899 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7900 if ((data->client.app_id == 0) ||
7901 (data->type != QSEECOM_CLIENT_APP)) {
7902 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7903 data->type, data->client.app_id);
7904 ret = -EINVAL;
7905 break;
7906 }
7907 if (qseecom.qsee_version < QSEE_VERSION_40) {
7908 pr_err("GP feature unsupported: qsee ver %u\n",
7909 qseecom.qsee_version);
7910 return -EINVAL;
7911 }
7912 /* Only one client allowed here at a time */
7913 mutex_lock(&app_access_lock);
7914 atomic_inc(&data->ioctl_count);
7915 ret = qseecom_qteec_open_session(data, argp);
7916 atomic_dec(&data->ioctl_count);
7917 wake_up_all(&data->abort_wq);
7918 mutex_unlock(&app_access_lock);
7919 if (ret)
7920 pr_err("failed open_session_cmd: %d\n", ret);
7921 __qseecom_clean_data_sglistinfo(data);
7922 break;
7923 }
7924 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7925 if ((data->client.app_id == 0) ||
7926 (data->type != QSEECOM_CLIENT_APP)) {
7927 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7928 data->type, data->client.app_id);
7929 ret = -EINVAL;
7930 break;
7931 }
7932 if (qseecom.qsee_version < QSEE_VERSION_40) {
7933 pr_err("GP feature unsupported: qsee ver %u\n",
7934 qseecom.qsee_version);
7935 return -EINVAL;
7936 }
7937 /* Only one client allowed here at a time */
7938 mutex_lock(&app_access_lock);
7939 atomic_inc(&data->ioctl_count);
7940 ret = qseecom_qteec_close_session(data, argp);
7941 atomic_dec(&data->ioctl_count);
7942 wake_up_all(&data->abort_wq);
7943 mutex_unlock(&app_access_lock);
7944 if (ret)
7945 pr_err("failed close_session_cmd: %d\n", ret);
7946 break;
7947 }
7948 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7949 if ((data->client.app_id == 0) ||
7950 (data->type != QSEECOM_CLIENT_APP)) {
7951 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7952 data->type, data->client.app_id);
7953 ret = -EINVAL;
7954 break;
7955 }
7956 if (qseecom.qsee_version < QSEE_VERSION_40) {
7957 pr_err("GP feature unsupported: qsee ver %u\n",
7958 qseecom.qsee_version);
7959 return -EINVAL;
7960 }
7961 /* Only one client allowed here at a time */
7962 mutex_lock(&app_access_lock);
7963 atomic_inc(&data->ioctl_count);
7964 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7965 atomic_dec(&data->ioctl_count);
7966 wake_up_all(&data->abort_wq);
7967 mutex_unlock(&app_access_lock);
7968 if (ret)
7969 pr_err("failed Invoke cmd: %d\n", ret);
7970 __qseecom_clean_data_sglistinfo(data);
7971 break;
7972 }
7973 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7974 if ((data->client.app_id == 0) ||
7975 (data->type != QSEECOM_CLIENT_APP)) {
7976 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7977 data->type, data->client.app_id);
7978 ret = -EINVAL;
7979 break;
7980 }
7981 if (qseecom.qsee_version < QSEE_VERSION_40) {
7982 pr_err("GP feature unsupported: qsee ver %u\n",
7983 qseecom.qsee_version);
7984 return -EINVAL;
7985 }
7986 /* Only one client allowed here at a time */
7987 mutex_lock(&app_access_lock);
7988 atomic_inc(&data->ioctl_count);
7989 ret = qseecom_qteec_request_cancellation(data, argp);
7990 atomic_dec(&data->ioctl_count);
7991 wake_up_all(&data->abort_wq);
7992 mutex_unlock(&app_access_lock);
7993 if (ret)
7994 pr_err("failed request_cancellation: %d\n", ret);
7995 break;
7996 }
7997 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7998 atomic_inc(&data->ioctl_count);
7999 ret = qseecom_get_ce_info(data, argp);
8000 if (ret)
8001 pr_err("failed get fde ce pipe info: %d\n", ret);
8002 atomic_dec(&data->ioctl_count);
8003 break;
8004 }
8005 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
8006 atomic_inc(&data->ioctl_count);
8007 ret = qseecom_free_ce_info(data, argp);
8008 if (ret)
8009 pr_err("failed get fde ce pipe info: %d\n", ret);
8010 atomic_dec(&data->ioctl_count);
8011 break;
8012 }
8013 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
8014 atomic_inc(&data->ioctl_count);
8015 ret = qseecom_query_ce_info(data, argp);
8016 if (ret)
8017 pr_err("failed get fde ce pipe info: %d\n", ret);
8018 atomic_dec(&data->ioctl_count);
8019 break;
8020 }
8021 default:
8022 pr_err("Invalid IOCTL: 0x%x\n", cmd);
8023 return -EINVAL;
8024 }
8025 return ret;
8026}
8027
8028static int qseecom_open(struct inode *inode, struct file *file)
8029{
8030 int ret = 0;
8031 struct qseecom_dev_handle *data;
8032
8033 data = kzalloc(sizeof(*data), GFP_KERNEL);
8034 if (!data)
8035 return -ENOMEM;
8036 file->private_data = data;
8037 data->abort = 0;
8038 data->type = QSEECOM_GENERIC;
8039 data->released = false;
8040 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
8041 data->mode = INACTIVE;
8042 init_waitqueue_head(&data->abort_wq);
8043 atomic_set(&data->ioctl_count, 0);
8044 return ret;
8045}
8046
Zhen Konge5e6c942019-10-01 15:45:25 -07008047static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
8048{
8049 if (qseecom.no_clock_support)
8050 return;
8051 if (qseecom.support_bus_scaling) {
8052 mutex_lock(&qsee_bw_mutex);
8053 if (data->mode != INACTIVE) {
8054 qseecom_unregister_bus_bandwidth_needs(data);
8055 if (qseecom.cumulative_mode == INACTIVE)
8056 __qseecom_set_msm_bus_request(INACTIVE);
8057 }
8058 mutex_unlock(&qsee_bw_mutex);
8059 } else {
8060 if (data->fast_load_enabled)
8061 qsee_disable_clock_vote(data, CLK_SFPB);
8062 if (data->perf_enabled)
8063 qsee_disable_clock_vote(data, CLK_DFAB);
8064 }
8065}
8066
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008067static int qseecom_release(struct inode *inode, struct file *file)
8068{
8069 struct qseecom_dev_handle *data = file->private_data;
8070 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008071 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008072
Zhen Konge5e6c942019-10-01 15:45:25 -07008073 __qseecom_release_disable_clk(data);
8074 if (!data->released) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008075 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
8076 data->type, data->mode, data);
8077 switch (data->type) {
8078 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08008079 pr_debug("release lsnr svc %d\n", data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008080 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008081 ret = qseecom_unregister_listener(data);
Zhen Konge6ac4132019-09-20 13:49:41 -07008082 if (!ret)
8083 free_private_data = false;
Zhen Kong87dcf0e2019-01-04 12:34:50 -08008084 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008085 mutex_unlock(&listener_access_lock);
Zhen Konge5e6c942019-10-01 15:45:25 -07008086 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008087 break;
8088 case QSEECOM_CLIENT_APP:
Zhen Kong03b2eae2019-09-17 16:58:46 -07008089 pr_debug("release app %d (%s)\n",
8090 data->client.app_id, data->client.app_name);
8091 if (data->client.app_id) {
8092 free_private_data = false;
Zhen Konge5e6c942019-10-01 15:45:25 -07008093 mutex_lock(&unload_app_pending_list_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008094 ret = qseecom_prepare_unload_app(data);
Zhen Konge5e6c942019-10-01 15:45:25 -07008095 mutex_unlock(&unload_app_pending_list_lock);
8096 __wakeup_unload_app_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07008097 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008098 break;
8099 case QSEECOM_SECURE_SERVICE:
8100 case QSEECOM_GENERIC:
8101 ret = qseecom_unmap_ion_allocated_memory(data);
8102 if (ret)
8103 pr_err("Ion Unmap failed\n");
8104 break;
8105 case QSEECOM_UNAVAILABLE_CLIENT_APP:
8106 break;
8107 default:
8108 pr_err("Unsupported clnt_handle_type %d",
8109 data->type);
8110 break;
8111 }
8112 }
8113
Zhen Kongbcdeda22018-11-16 13:50:51 -08008114 if (free_private_data)
8115 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008116 return ret;
8117}
8118
8119#ifdef CONFIG_COMPAT
8120#include "compat_qseecom.c"
8121#else
8122#define compat_qseecom_ioctl NULL
8123#endif
8124
8125static const struct file_operations qseecom_fops = {
8126 .owner = THIS_MODULE,
8127 .unlocked_ioctl = qseecom_ioctl,
8128 .compat_ioctl = compat_qseecom_ioctl,
8129 .open = qseecom_open,
8130 .release = qseecom_release
8131};
8132
8133static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
8134{
8135 int rc = 0;
8136 struct device *pdev;
8137 struct qseecom_clk *qclk;
8138 char *core_clk_src = NULL;
8139 char *core_clk = NULL;
8140 char *iface_clk = NULL;
8141 char *bus_clk = NULL;
8142
8143 switch (ce) {
8144 case CLK_QSEE: {
8145 core_clk_src = "core_clk_src";
8146 core_clk = "core_clk";
8147 iface_clk = "iface_clk";
8148 bus_clk = "bus_clk";
8149 qclk = &qseecom.qsee;
8150 qclk->instance = CLK_QSEE;
8151 break;
8152 };
8153 case CLK_CE_DRV: {
8154 core_clk_src = "ce_drv_core_clk_src";
8155 core_clk = "ce_drv_core_clk";
8156 iface_clk = "ce_drv_iface_clk";
8157 bus_clk = "ce_drv_bus_clk";
8158 qclk = &qseecom.ce_drv;
8159 qclk->instance = CLK_CE_DRV;
8160 break;
8161 };
8162 default:
8163 pr_err("Invalid ce hw instance: %d!\n", ce);
8164 return -EIO;
8165 }
8166
8167 if (qseecom.no_clock_support) {
8168 qclk->ce_core_clk = NULL;
8169 qclk->ce_clk = NULL;
8170 qclk->ce_bus_clk = NULL;
8171 qclk->ce_core_src_clk = NULL;
8172 return 0;
8173 }
8174
8175 pdev = qseecom.pdev;
8176
8177 /* Get CE3 src core clk. */
8178 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
8179 if (!IS_ERR(qclk->ce_core_src_clk)) {
8180 rc = clk_set_rate(qclk->ce_core_src_clk,
8181 qseecom.ce_opp_freq_hz);
8182 if (rc) {
8183 clk_put(qclk->ce_core_src_clk);
8184 qclk->ce_core_src_clk = NULL;
8185 pr_err("Unable to set the core src clk @%uMhz.\n",
8186 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
8187 return -EIO;
8188 }
8189 } else {
8190 pr_warn("Unable to get CE core src clk, set to NULL\n");
8191 qclk->ce_core_src_clk = NULL;
8192 }
8193
8194 /* Get CE core clk */
8195 qclk->ce_core_clk = clk_get(pdev, core_clk);
8196 if (IS_ERR(qclk->ce_core_clk)) {
8197 rc = PTR_ERR(qclk->ce_core_clk);
8198 pr_err("Unable to get CE core clk\n");
8199 if (qclk->ce_core_src_clk != NULL)
8200 clk_put(qclk->ce_core_src_clk);
8201 return -EIO;
8202 }
8203
8204 /* Get CE Interface clk */
8205 qclk->ce_clk = clk_get(pdev, iface_clk);
8206 if (IS_ERR(qclk->ce_clk)) {
8207 rc = PTR_ERR(qclk->ce_clk);
8208 pr_err("Unable to get CE interface clk\n");
8209 if (qclk->ce_core_src_clk != NULL)
8210 clk_put(qclk->ce_core_src_clk);
8211 clk_put(qclk->ce_core_clk);
8212 return -EIO;
8213 }
8214
8215 /* Get CE AXI clk */
8216 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8217 if (IS_ERR(qclk->ce_bus_clk)) {
8218 rc = PTR_ERR(qclk->ce_bus_clk);
8219 pr_err("Unable to get CE BUS interface clk\n");
8220 if (qclk->ce_core_src_clk != NULL)
8221 clk_put(qclk->ce_core_src_clk);
8222 clk_put(qclk->ce_core_clk);
8223 clk_put(qclk->ce_clk);
8224 return -EIO;
8225 }
8226
8227 return rc;
8228}
8229
8230static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8231{
8232 struct qseecom_clk *qclk;
8233
8234 if (ce == CLK_QSEE)
8235 qclk = &qseecom.qsee;
8236 else
8237 qclk = &qseecom.ce_drv;
8238
8239 if (qclk->ce_clk != NULL) {
8240 clk_put(qclk->ce_clk);
8241 qclk->ce_clk = NULL;
8242 }
8243 if (qclk->ce_core_clk != NULL) {
8244 clk_put(qclk->ce_core_clk);
8245 qclk->ce_core_clk = NULL;
8246 }
8247 if (qclk->ce_bus_clk != NULL) {
8248 clk_put(qclk->ce_bus_clk);
8249 qclk->ce_bus_clk = NULL;
8250 }
8251 if (qclk->ce_core_src_clk != NULL) {
8252 clk_put(qclk->ce_core_src_clk);
8253 qclk->ce_core_src_clk = NULL;
8254 }
8255 qclk->instance = CLK_INVALID;
8256}
8257
8258static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8259{
8260 int rc = 0;
8261 uint32_t hlos_num_ce_hw_instances;
8262 uint32_t disk_encrypt_pipe;
8263 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008264 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008265 int i;
8266 const int *tbl;
8267 int size;
8268 int entry;
8269 struct qseecom_crypto_info *pfde_tbl = NULL;
8270 struct qseecom_crypto_info *p;
8271 int tbl_size;
8272 int j;
8273 bool old_db = true;
8274 struct qseecom_ce_info_use *pce_info_use;
8275 uint32_t *unit_tbl = NULL;
8276 int total_units = 0;
8277 struct qseecom_ce_pipe_entry *pce_entry;
8278
8279 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8280 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8281
8282 if (of_property_read_u32((&pdev->dev)->of_node,
8283 "qcom,qsee-ce-hw-instance",
8284 &qseecom.ce_info.qsee_ce_hw_instance)) {
8285 pr_err("Fail to get qsee ce hw instance information.\n");
8286 rc = -EINVAL;
8287 goto out;
8288 } else {
8289 pr_debug("qsee-ce-hw-instance=0x%x\n",
8290 qseecom.ce_info.qsee_ce_hw_instance);
8291 }
8292
8293 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8294 "qcom,support-fde");
8295 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8296 "qcom,support-pfe");
8297
8298 if (!qseecom.support_pfe && !qseecom.support_fde) {
8299 pr_warn("Device does not support PFE/FDE");
8300 goto out;
8301 }
8302
8303 if (qseecom.support_fde)
8304 tbl = of_get_property((&pdev->dev)->of_node,
8305 "qcom,full-disk-encrypt-info", &size);
8306 else
8307 tbl = NULL;
8308 if (tbl) {
8309 old_db = false;
8310 if (size % sizeof(struct qseecom_crypto_info)) {
8311 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8312 size);
8313 rc = -EINVAL;
8314 goto out;
8315 }
8316 tbl_size = size / sizeof
8317 (struct qseecom_crypto_info);
8318
8319 pfde_tbl = kzalloc(size, GFP_KERNEL);
8320 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8321 total_units = 0;
8322
8323 if (!pfde_tbl || !unit_tbl) {
8324 pr_err("failed to alloc memory\n");
8325 rc = -ENOMEM;
8326 goto out;
8327 }
8328 if (of_property_read_u32_array((&pdev->dev)->of_node,
8329 "qcom,full-disk-encrypt-info",
8330 (u32 *)pfde_tbl, size/sizeof(u32))) {
8331 pr_err("failed to read full-disk-encrypt-info tbl\n");
8332 rc = -EINVAL;
8333 goto out;
8334 }
8335
8336 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8337 for (j = 0; j < total_units; j++) {
8338 if (p->unit_num == *(unit_tbl + j))
8339 break;
8340 }
8341 if (j == total_units) {
8342 *(unit_tbl + total_units) = p->unit_num;
8343 total_units++;
8344 }
8345 }
8346
8347 qseecom.ce_info.num_fde = total_units;
8348 pce_info_use = qseecom.ce_info.fde = kcalloc(
8349 total_units, sizeof(struct qseecom_ce_info_use),
8350 GFP_KERNEL);
8351 if (!pce_info_use) {
8352 pr_err("failed to alloc memory\n");
8353 rc = -ENOMEM;
8354 goto out;
8355 }
8356
8357 for (j = 0; j < total_units; j++, pce_info_use++) {
8358 pce_info_use->unit_num = *(unit_tbl + j);
8359 pce_info_use->alloc = false;
8360 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8361 pce_info_use->num_ce_pipe_entries = 0;
8362 pce_info_use->ce_pipe_entry = NULL;
8363 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8364 if (p->unit_num == pce_info_use->unit_num)
8365 pce_info_use->num_ce_pipe_entries++;
8366 }
8367
8368 entry = pce_info_use->num_ce_pipe_entries;
8369 pce_entry = pce_info_use->ce_pipe_entry =
8370 kcalloc(entry,
8371 sizeof(struct qseecom_ce_pipe_entry),
8372 GFP_KERNEL);
8373 if (pce_entry == NULL) {
8374 pr_err("failed to alloc memory\n");
8375 rc = -ENOMEM;
8376 goto out;
8377 }
8378
8379 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8380 if (p->unit_num == pce_info_use->unit_num) {
8381 pce_entry->ce_num = p->ce;
8382 pce_entry->ce_pipe_pair =
8383 p->pipe_pair;
8384 pce_entry->valid = true;
8385 pce_entry++;
8386 }
8387 }
8388 }
8389 kfree(unit_tbl);
8390 unit_tbl = NULL;
8391 kfree(pfde_tbl);
8392 pfde_tbl = NULL;
8393 }
8394
8395 if (qseecom.support_pfe)
8396 tbl = of_get_property((&pdev->dev)->of_node,
8397 "qcom,per-file-encrypt-info", &size);
8398 else
8399 tbl = NULL;
8400 if (tbl) {
8401 old_db = false;
8402 if (size % sizeof(struct qseecom_crypto_info)) {
8403 pr_err("per-file-encrypt-info tbl size(%d)\n",
8404 size);
8405 rc = -EINVAL;
8406 goto out;
8407 }
8408 tbl_size = size / sizeof
8409 (struct qseecom_crypto_info);
8410
8411 pfde_tbl = kzalloc(size, GFP_KERNEL);
8412 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8413 total_units = 0;
8414 if (!pfde_tbl || !unit_tbl) {
8415 pr_err("failed to alloc memory\n");
8416 rc = -ENOMEM;
8417 goto out;
8418 }
8419 if (of_property_read_u32_array((&pdev->dev)->of_node,
8420 "qcom,per-file-encrypt-info",
8421 (u32 *)pfde_tbl, size/sizeof(u32))) {
8422 pr_err("failed to read per-file-encrypt-info tbl\n");
8423 rc = -EINVAL;
8424 goto out;
8425 }
8426
8427 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8428 for (j = 0; j < total_units; j++) {
8429 if (p->unit_num == *(unit_tbl + j))
8430 break;
8431 }
8432 if (j == total_units) {
8433 *(unit_tbl + total_units) = p->unit_num;
8434 total_units++;
8435 }
8436 }
8437
8438 qseecom.ce_info.num_pfe = total_units;
8439 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8440 total_units, sizeof(struct qseecom_ce_info_use),
8441 GFP_KERNEL);
8442 if (!pce_info_use) {
8443 pr_err("failed to alloc memory\n");
8444 rc = -ENOMEM;
8445 goto out;
8446 }
8447
8448 for (j = 0; j < total_units; j++, pce_info_use++) {
8449 pce_info_use->unit_num = *(unit_tbl + j);
8450 pce_info_use->alloc = false;
8451 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8452 pce_info_use->num_ce_pipe_entries = 0;
8453 pce_info_use->ce_pipe_entry = NULL;
8454 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8455 if (p->unit_num == pce_info_use->unit_num)
8456 pce_info_use->num_ce_pipe_entries++;
8457 }
8458
8459 entry = pce_info_use->num_ce_pipe_entries;
8460 pce_entry = pce_info_use->ce_pipe_entry =
8461 kcalloc(entry,
8462 sizeof(struct qseecom_ce_pipe_entry),
8463 GFP_KERNEL);
8464 if (pce_entry == NULL) {
8465 pr_err("failed to alloc memory\n");
8466 rc = -ENOMEM;
8467 goto out;
8468 }
8469
8470 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8471 if (p->unit_num == pce_info_use->unit_num) {
8472 pce_entry->ce_num = p->ce;
8473 pce_entry->ce_pipe_pair =
8474 p->pipe_pair;
8475 pce_entry->valid = true;
8476 pce_entry++;
8477 }
8478 }
8479 }
8480 kfree(unit_tbl);
8481 unit_tbl = NULL;
8482 kfree(pfde_tbl);
8483 pfde_tbl = NULL;
8484 }
8485
8486 if (!old_db)
8487 goto out1;
8488
8489 if (of_property_read_bool((&pdev->dev)->of_node,
8490 "qcom,support-multiple-ce-hw-instance")) {
8491 if (of_property_read_u32((&pdev->dev)->of_node,
8492 "qcom,hlos-num-ce-hw-instances",
8493 &hlos_num_ce_hw_instances)) {
8494 pr_err("Fail: get hlos number of ce hw instance\n");
8495 rc = -EINVAL;
8496 goto out;
8497 }
8498 } else {
8499 hlos_num_ce_hw_instances = 1;
8500 }
8501
8502 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8503 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8504 MAX_CE_PIPE_PAIR_PER_UNIT);
8505 rc = -EINVAL;
8506 goto out;
8507 }
8508
8509 if (of_property_read_u32_array((&pdev->dev)->of_node,
8510 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8511 hlos_num_ce_hw_instances)) {
8512 pr_err("Fail: get hlos ce hw instance info\n");
8513 rc = -EINVAL;
8514 goto out;
8515 }
8516
8517 if (qseecom.support_fde) {
8518 pce_info_use = qseecom.ce_info.fde =
8519 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8520 if (!pce_info_use) {
8521 pr_err("failed to alloc memory\n");
8522 rc = -ENOMEM;
8523 goto out;
8524 }
8525 /* by default for old db */
8526 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8527 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8528 pce_info_use->alloc = false;
8529 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8530 pce_info_use->ce_pipe_entry = NULL;
8531 if (of_property_read_u32((&pdev->dev)->of_node,
8532 "qcom,disk-encrypt-pipe-pair",
8533 &disk_encrypt_pipe)) {
8534 pr_err("Fail to get FDE pipe information.\n");
8535 rc = -EINVAL;
8536 goto out;
8537 } else {
8538 pr_debug("disk-encrypt-pipe-pair=0x%x",
8539 disk_encrypt_pipe);
8540 }
8541 entry = pce_info_use->num_ce_pipe_entries =
8542 hlos_num_ce_hw_instances;
8543 pce_entry = pce_info_use->ce_pipe_entry =
8544 kcalloc(entry,
8545 sizeof(struct qseecom_ce_pipe_entry),
8546 GFP_KERNEL);
8547 if (pce_entry == NULL) {
8548 pr_err("failed to alloc memory\n");
8549 rc = -ENOMEM;
8550 goto out;
8551 }
8552 for (i = 0; i < entry; i++) {
8553 pce_entry->ce_num = hlos_ce_hw_instance[i];
8554 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8555 pce_entry->valid = 1;
8556 pce_entry++;
8557 }
8558 } else {
8559 pr_warn("Device does not support FDE");
8560 disk_encrypt_pipe = 0xff;
8561 }
8562 if (qseecom.support_pfe) {
8563 pce_info_use = qseecom.ce_info.pfe =
8564 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8565 if (!pce_info_use) {
8566 pr_err("failed to alloc memory\n");
8567 rc = -ENOMEM;
8568 goto out;
8569 }
8570 /* by default for old db */
8571 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8572 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8573 pce_info_use->alloc = false;
8574 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8575 pce_info_use->ce_pipe_entry = NULL;
8576
8577 if (of_property_read_u32((&pdev->dev)->of_node,
8578 "qcom,file-encrypt-pipe-pair",
8579 &file_encrypt_pipe)) {
8580 pr_err("Fail to get PFE pipe information.\n");
8581 rc = -EINVAL;
8582 goto out;
8583 } else {
8584 pr_debug("file-encrypt-pipe-pair=0x%x",
8585 file_encrypt_pipe);
8586 }
8587 entry = pce_info_use->num_ce_pipe_entries =
8588 hlos_num_ce_hw_instances;
8589 pce_entry = pce_info_use->ce_pipe_entry =
8590 kcalloc(entry,
8591 sizeof(struct qseecom_ce_pipe_entry),
8592 GFP_KERNEL);
8593 if (pce_entry == NULL) {
8594 pr_err("failed to alloc memory\n");
8595 rc = -ENOMEM;
8596 goto out;
8597 }
8598 for (i = 0; i < entry; i++) {
8599 pce_entry->ce_num = hlos_ce_hw_instance[i];
8600 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8601 pce_entry->valid = 1;
8602 pce_entry++;
8603 }
8604 } else {
8605 pr_warn("Device does not support PFE");
8606 file_encrypt_pipe = 0xff;
8607 }
8608
8609out1:
8610 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8611 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8612out:
8613 if (rc) {
8614 if (qseecom.ce_info.fde) {
8615 pce_info_use = qseecom.ce_info.fde;
8616 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8617 pce_entry = pce_info_use->ce_pipe_entry;
8618 kfree(pce_entry);
8619 pce_info_use++;
8620 }
8621 }
8622 kfree(qseecom.ce_info.fde);
8623 qseecom.ce_info.fde = NULL;
8624 if (qseecom.ce_info.pfe) {
8625 pce_info_use = qseecom.ce_info.pfe;
8626 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8627 pce_entry = pce_info_use->ce_pipe_entry;
8628 kfree(pce_entry);
8629 pce_info_use++;
8630 }
8631 }
8632 kfree(qseecom.ce_info.pfe);
8633 qseecom.ce_info.pfe = NULL;
8634 }
8635 kfree(unit_tbl);
8636 kfree(pfde_tbl);
8637 return rc;
8638}
8639
8640static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8641 void __user *argp)
8642{
8643 struct qseecom_ce_info_req req;
8644 struct qseecom_ce_info_req *pinfo = &req;
8645 int ret = 0;
8646 int i;
8647 unsigned int entries;
8648 struct qseecom_ce_info_use *pce_info_use, *p;
8649 int total = 0;
8650 bool found = false;
8651 struct qseecom_ce_pipe_entry *pce_entry;
8652
8653 ret = copy_from_user(pinfo, argp,
8654 sizeof(struct qseecom_ce_info_req));
8655 if (ret) {
8656 pr_err("copy_from_user failed\n");
8657 return ret;
8658 }
8659
8660 switch (pinfo->usage) {
8661 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8662 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8663 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8664 if (qseecom.support_fde) {
8665 p = qseecom.ce_info.fde;
8666 total = qseecom.ce_info.num_fde;
8667 } else {
8668 pr_err("system does not support fde\n");
8669 return -EINVAL;
8670 }
8671 break;
8672 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8673 if (qseecom.support_pfe) {
8674 p = qseecom.ce_info.pfe;
8675 total = qseecom.ce_info.num_pfe;
8676 } else {
8677 pr_err("system does not support pfe\n");
8678 return -EINVAL;
8679 }
8680 break;
8681 default:
8682 pr_err("unsupported usage %d\n", pinfo->usage);
8683 return -EINVAL;
8684 }
8685
8686 pce_info_use = NULL;
8687 for (i = 0; i < total; i++) {
8688 if (!p->alloc)
8689 pce_info_use = p;
8690 else if (!memcmp(p->handle, pinfo->handle,
8691 MAX_CE_INFO_HANDLE_SIZE)) {
8692 pce_info_use = p;
8693 found = true;
8694 break;
8695 }
8696 p++;
8697 }
8698
8699 if (pce_info_use == NULL)
8700 return -EBUSY;
8701
8702 pinfo->unit_num = pce_info_use->unit_num;
8703 if (!pce_info_use->alloc) {
8704 pce_info_use->alloc = true;
8705 memcpy(pce_info_use->handle,
8706 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8707 }
8708 if (pce_info_use->num_ce_pipe_entries >
8709 MAX_CE_PIPE_PAIR_PER_UNIT)
8710 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8711 else
8712 entries = pce_info_use->num_ce_pipe_entries;
8713 pinfo->num_ce_pipe_entries = entries;
8714 pce_entry = pce_info_use->ce_pipe_entry;
8715 for (i = 0; i < entries; i++, pce_entry++)
8716 pinfo->ce_pipe_entry[i] = *pce_entry;
8717 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8718 pinfo->ce_pipe_entry[i].valid = 0;
8719
8720 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8721 pr_err("copy_to_user failed\n");
8722 ret = -EFAULT;
8723 }
8724 return ret;
8725}
8726
8727static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8728 void __user *argp)
8729{
8730 struct qseecom_ce_info_req req;
8731 struct qseecom_ce_info_req *pinfo = &req;
8732 int ret = 0;
8733 struct qseecom_ce_info_use *p;
8734 int total = 0;
8735 int i;
8736 bool found = false;
8737
8738 ret = copy_from_user(pinfo, argp,
8739 sizeof(struct qseecom_ce_info_req));
8740 if (ret)
8741 return ret;
8742
8743 switch (pinfo->usage) {
8744 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8745 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8746 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8747 if (qseecom.support_fde) {
8748 p = qseecom.ce_info.fde;
8749 total = qseecom.ce_info.num_fde;
8750 } else {
8751 pr_err("system does not support fde\n");
8752 return -EINVAL;
8753 }
8754 break;
8755 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8756 if (qseecom.support_pfe) {
8757 p = qseecom.ce_info.pfe;
8758 total = qseecom.ce_info.num_pfe;
8759 } else {
8760 pr_err("system does not support pfe\n");
8761 return -EINVAL;
8762 }
8763 break;
8764 default:
8765 pr_err("unsupported usage %d\n", pinfo->usage);
8766 return -EINVAL;
8767 }
8768
8769 for (i = 0; i < total; i++) {
8770 if (p->alloc &&
8771 !memcmp(p->handle, pinfo->handle,
8772 MAX_CE_INFO_HANDLE_SIZE)) {
8773 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8774 p->alloc = false;
8775 found = true;
8776 break;
8777 }
8778 p++;
8779 }
8780 return ret;
8781}
8782
8783static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8784 void __user *argp)
8785{
8786 struct qseecom_ce_info_req req;
8787 struct qseecom_ce_info_req *pinfo = &req;
8788 int ret = 0;
8789 int i;
8790 unsigned int entries;
8791 struct qseecom_ce_info_use *pce_info_use, *p;
8792 int total = 0;
8793 bool found = false;
8794 struct qseecom_ce_pipe_entry *pce_entry;
8795
8796 ret = copy_from_user(pinfo, argp,
8797 sizeof(struct qseecom_ce_info_req));
8798 if (ret)
8799 return ret;
8800
8801 switch (pinfo->usage) {
8802 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8803 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8804 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8805 if (qseecom.support_fde) {
8806 p = qseecom.ce_info.fde;
8807 total = qseecom.ce_info.num_fde;
8808 } else {
8809 pr_err("system does not support fde\n");
8810 return -EINVAL;
8811 }
8812 break;
8813 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8814 if (qseecom.support_pfe) {
8815 p = qseecom.ce_info.pfe;
8816 total = qseecom.ce_info.num_pfe;
8817 } else {
8818 pr_err("system does not support pfe\n");
8819 return -EINVAL;
8820 }
8821 break;
8822 default:
8823 pr_err("unsupported usage %d\n", pinfo->usage);
8824 return -EINVAL;
8825 }
8826
8827 pce_info_use = NULL;
8828 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8829 pinfo->num_ce_pipe_entries = 0;
8830 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8831 pinfo->ce_pipe_entry[i].valid = 0;
8832
8833 for (i = 0; i < total; i++) {
8834
8835 if (p->alloc && !memcmp(p->handle,
8836 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8837 pce_info_use = p;
8838 found = true;
8839 break;
8840 }
8841 p++;
8842 }
8843 if (!pce_info_use)
8844 goto out;
8845 pinfo->unit_num = pce_info_use->unit_num;
8846 if (pce_info_use->num_ce_pipe_entries >
8847 MAX_CE_PIPE_PAIR_PER_UNIT)
8848 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8849 else
8850 entries = pce_info_use->num_ce_pipe_entries;
8851 pinfo->num_ce_pipe_entries = entries;
8852 pce_entry = pce_info_use->ce_pipe_entry;
8853 for (i = 0; i < entries; i++, pce_entry++)
8854 pinfo->ce_pipe_entry[i] = *pce_entry;
8855 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8856 pinfo->ce_pipe_entry[i].valid = 0;
8857out:
8858 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8859 pr_err("copy_to_user failed\n");
8860 ret = -EFAULT;
8861 }
8862 return ret;
8863}
8864
8865/*
8866 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8867 * then whitelist feature is not supported.
8868 */
8869static int qseecom_check_whitelist_feature(void)
8870{
8871 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8872
8873 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8874}
8875
8876static int qseecom_probe(struct platform_device *pdev)
8877{
8878 int rc;
8879 int i;
8880 uint32_t feature = 10;
8881 struct device *class_dev;
8882 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8883 struct qseecom_command_scm_resp resp;
8884 struct qseecom_ce_info_use *pce_info_use = NULL;
8885
8886 qseecom.qsee_bw_count = 0;
8887 qseecom.qsee_perf_client = 0;
8888 qseecom.qsee_sfpb_bw_count = 0;
8889
8890 qseecom.qsee.ce_core_clk = NULL;
8891 qseecom.qsee.ce_clk = NULL;
8892 qseecom.qsee.ce_core_src_clk = NULL;
8893 qseecom.qsee.ce_bus_clk = NULL;
8894
8895 qseecom.cumulative_mode = 0;
8896 qseecom.current_mode = INACTIVE;
8897 qseecom.support_bus_scaling = false;
8898 qseecom.support_fde = false;
8899 qseecom.support_pfe = false;
8900
8901 qseecom.ce_drv.ce_core_clk = NULL;
8902 qseecom.ce_drv.ce_clk = NULL;
8903 qseecom.ce_drv.ce_core_src_clk = NULL;
8904 qseecom.ce_drv.ce_bus_clk = NULL;
8905 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8906
8907 qseecom.app_block_ref_cnt = 0;
8908 init_waitqueue_head(&qseecom.app_block_wq);
8909 qseecom.whitelist_support = true;
8910
8911 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8912 if (rc < 0) {
8913 pr_err("alloc_chrdev_region failed %d\n", rc);
8914 return rc;
8915 }
8916
8917 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8918 if (IS_ERR(driver_class)) {
8919 rc = -ENOMEM;
8920 pr_err("class_create failed %d\n", rc);
8921 goto exit_unreg_chrdev_region;
8922 }
8923
8924 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8925 QSEECOM_DEV);
8926 if (IS_ERR(class_dev)) {
8927 pr_err("class_device_create failed %d\n", rc);
8928 rc = -ENOMEM;
8929 goto exit_destroy_class;
8930 }
8931
8932 cdev_init(&qseecom.cdev, &qseecom_fops);
8933 qseecom.cdev.owner = THIS_MODULE;
8934
8935 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8936 if (rc < 0) {
8937 pr_err("cdev_add failed %d\n", rc);
8938 goto exit_destroy_device;
8939 }
8940
8941 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008942 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8943 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008944 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008945 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8946 spin_lock_init(&qseecom.registered_kclient_list_lock);
8947 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008948 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008949 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008950 INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
8951 init_waitqueue_head(&qseecom.unload_app_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008952 qseecom.send_resp_flag = 0;
8953
8954 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008955 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008956 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8957 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008958 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008959 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8960 if (rc) {
8961 pr_err("Failed to get QSEE version info %d\n", rc);
8962 goto exit_del_cdev;
8963 }
8964 qseecom.qsee_version = resp.result;
8965 qseecom.qseos_version = QSEOS_VERSION_14;
8966 qseecom.commonlib_loaded = false;
8967 qseecom.commonlib64_loaded = false;
8968 qseecom.pdev = class_dev;
8969 /* Create ION msm client */
8970 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8971 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8972 pr_err("Ion client cannot be created\n");
8973 rc = -ENOMEM;
8974 goto exit_del_cdev;
8975 }
8976
8977 /* register client for bus scaling */
8978 if (pdev->dev.of_node) {
8979 qseecom.pdev->of_node = pdev->dev.of_node;
8980 qseecom.support_bus_scaling =
8981 of_property_read_bool((&pdev->dev)->of_node,
8982 "qcom,support-bus-scaling");
8983 rc = qseecom_retrieve_ce_data(pdev);
8984 if (rc)
8985 goto exit_destroy_ion_client;
8986 qseecom.appsbl_qseecom_support =
8987 of_property_read_bool((&pdev->dev)->of_node,
8988 "qcom,appsbl-qseecom-support");
8989 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8990 qseecom.appsbl_qseecom_support);
8991
8992 qseecom.commonlib64_loaded =
8993 of_property_read_bool((&pdev->dev)->of_node,
8994 "qcom,commonlib64-loaded-by-uefi");
8995 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8996 qseecom.commonlib64_loaded);
8997 qseecom.fde_key_size =
8998 of_property_read_bool((&pdev->dev)->of_node,
8999 "qcom,fde-key-size");
9000 qseecom.no_clock_support =
9001 of_property_read_bool((&pdev->dev)->of_node,
9002 "qcom,no-clock-support");
9003 if (!qseecom.no_clock_support) {
9004 pr_info("qseecom clocks handled by other subsystem\n");
9005 } else {
9006 pr_info("no-clock-support=0x%x",
9007 qseecom.no_clock_support);
9008 }
9009
9010 if (of_property_read_u32((&pdev->dev)->of_node,
9011 "qcom,qsee-reentrancy-support",
9012 &qseecom.qsee_reentrancy_support)) {
9013 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
9014 qseecom.qsee_reentrancy_support = 0;
9015 } else {
9016 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
9017 qseecom.qsee_reentrancy_support);
9018 }
9019
Jiten Patela7bb1d52018-05-11 12:34:26 +05309020 qseecom.enable_key_wrap_in_ks =
9021 of_property_read_bool((&pdev->dev)->of_node,
9022 "qcom,enable-key-wrap-in-ks");
9023 if (qseecom.enable_key_wrap_in_ks) {
9024 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
9025 qseecom.enable_key_wrap_in_ks);
9026 }
9027
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009028 /*
9029 * The qseecom bus scaling flag can not be enabled when
9030 * crypto clock is not handled by HLOS.
9031 */
9032 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
9033 pr_err("support_bus_scaling flag can not be enabled.\n");
9034 rc = -EINVAL;
9035 goto exit_destroy_ion_client;
9036 }
9037
9038 if (of_property_read_u32((&pdev->dev)->of_node,
9039 "qcom,ce-opp-freq",
9040 &qseecom.ce_opp_freq_hz)) {
9041 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
9042 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
9043 }
9044 rc = __qseecom_init_clk(CLK_QSEE);
9045 if (rc)
9046 goto exit_destroy_ion_client;
9047
9048 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9049 (qseecom.support_pfe || qseecom.support_fde)) {
9050 rc = __qseecom_init_clk(CLK_CE_DRV);
9051 if (rc) {
9052 __qseecom_deinit_clk(CLK_QSEE);
9053 goto exit_destroy_ion_client;
9054 }
9055 } else {
9056 struct qseecom_clk *qclk;
9057
9058 qclk = &qseecom.qsee;
9059 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
9060 qseecom.ce_drv.ce_clk = qclk->ce_clk;
9061 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
9062 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
9063 }
9064
9065 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9066 msm_bus_cl_get_pdata(pdev);
9067 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
9068 (!qseecom.is_apps_region_protected &&
9069 !qseecom.appsbl_qseecom_support)) {
9070 struct resource *resource = NULL;
9071 struct qsee_apps_region_info_ireq req;
9072 struct qsee_apps_region_info_64bit_ireq req_64bit;
9073 struct qseecom_command_scm_resp resp;
9074 void *cmd_buf = NULL;
9075 size_t cmd_len;
9076
9077 resource = platform_get_resource_byname(pdev,
9078 IORESOURCE_MEM, "secapp-region");
9079 if (resource) {
9080 if (qseecom.qsee_version < QSEE_VERSION_40) {
9081 req.qsee_cmd_id =
9082 QSEOS_APP_REGION_NOTIFICATION;
9083 req.addr = (uint32_t)resource->start;
9084 req.size = resource_size(resource);
9085 cmd_buf = (void *)&req;
9086 cmd_len = sizeof(struct
9087 qsee_apps_region_info_ireq);
9088 pr_warn("secure app region addr=0x%x size=0x%x",
9089 req.addr, req.size);
9090 } else {
9091 req_64bit.qsee_cmd_id =
9092 QSEOS_APP_REGION_NOTIFICATION;
9093 req_64bit.addr = resource->start;
9094 req_64bit.size = resource_size(
9095 resource);
9096 cmd_buf = (void *)&req_64bit;
9097 cmd_len = sizeof(struct
9098 qsee_apps_region_info_64bit_ireq);
9099 pr_warn("secure app region addr=0x%llx size=0x%x",
9100 req_64bit.addr, req_64bit.size);
9101 }
9102 } else {
9103 pr_err("Fail to get secure app region info\n");
9104 rc = -EINVAL;
9105 goto exit_deinit_clock;
9106 }
9107 rc = __qseecom_enable_clk(CLK_QSEE);
9108 if (rc) {
9109 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
9110 rc = -EIO;
9111 goto exit_deinit_clock;
9112 }
Zhen Kong03f220d2019-02-01 17:12:34 -08009113 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009114 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
9115 cmd_buf, cmd_len,
9116 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08009117 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009118 __qseecom_disable_clk(CLK_QSEE);
9119 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
9120 pr_err("send secapp reg fail %d resp.res %d\n",
9121 rc, resp.result);
9122 rc = -EINVAL;
9123 goto exit_deinit_clock;
9124 }
9125 }
9126 /*
9127 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
9128 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
9129 * Pls add "qseecom.commonlib64_loaded = true" here too.
9130 */
9131 if (qseecom.is_apps_region_protected ||
9132 qseecom.appsbl_qseecom_support)
9133 qseecom.commonlib_loaded = true;
9134 } else {
9135 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9136 pdev->dev.platform_data;
9137 }
9138 if (qseecom.support_bus_scaling) {
9139 init_timer(&(qseecom.bw_scale_down_timer));
9140 INIT_WORK(&qseecom.bw_inactive_req_ws,
9141 qseecom_bw_inactive_req_work);
9142 qseecom.bw_scale_down_timer.function =
9143 qseecom_scale_bus_bandwidth_timer_callback;
9144 }
9145 qseecom.timer_running = false;
9146 qseecom.qsee_perf_client = msm_bus_scale_register_client(
9147 qseecom_platform_support);
9148
9149 qseecom.whitelist_support = qseecom_check_whitelist_feature();
9150 pr_warn("qseecom.whitelist_support = %d\n",
9151 qseecom.whitelist_support);
9152
9153 if (!qseecom.qsee_perf_client)
9154 pr_err("Unable to register bus client\n");
9155
Zhen Kongc4c162a2019-01-23 12:07:12 -08009156 /*create a kthread to process pending listener unregister task */
9157 qseecom.unregister_lsnr_kthread_task = kthread_run(
9158 __qseecom_unregister_listener_kthread_func,
9159 NULL, "qseecom-unreg-lsnr");
9160 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
9161 pr_err("failed to create kthread to unregister listener\n");
9162 rc = -EINVAL;
9163 goto exit_deinit_clock;
9164 }
9165 atomic_set(&qseecom.unregister_lsnr_kthread_state,
9166 LSNR_UNREG_KT_SLEEP);
Zhen Kong03b2eae2019-09-17 16:58:46 -07009167
9168 /*create a kthread to process pending ta unloading task */
9169 qseecom.unload_app_kthread_task = kthread_run(
9170 __qseecom_unload_app_kthread_func,
9171 NULL, "qseecom-unload-ta");
9172 if (IS_ERR(qseecom.unload_app_kthread_task)) {
9173 pr_err("failed to create kthread to unload ta\n");
9174 rc = -EINVAL;
9175 goto exit_kill_unreg_lsnr_kthread;
9176 }
9177 atomic_set(&qseecom.unload_app_kthread_state,
9178 UNLOAD_APP_KT_SLEEP);
9179
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009180 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9181 return 0;
9182
Zhen Kong03b2eae2019-09-17 16:58:46 -07009183exit_kill_unreg_lsnr_kthread:
9184 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9185
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009186exit_deinit_clock:
9187 __qseecom_deinit_clk(CLK_QSEE);
9188 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9189 (qseecom.support_pfe || qseecom.support_fde))
9190 __qseecom_deinit_clk(CLK_CE_DRV);
9191exit_destroy_ion_client:
9192 if (qseecom.ce_info.fde) {
9193 pce_info_use = qseecom.ce_info.fde;
9194 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9195 kzfree(pce_info_use->ce_pipe_entry);
9196 pce_info_use++;
9197 }
9198 kfree(qseecom.ce_info.fde);
9199 }
9200 if (qseecom.ce_info.pfe) {
9201 pce_info_use = qseecom.ce_info.pfe;
9202 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9203 kzfree(pce_info_use->ce_pipe_entry);
9204 pce_info_use++;
9205 }
9206 kfree(qseecom.ce_info.pfe);
9207 }
9208 ion_client_destroy(qseecom.ion_clnt);
9209exit_del_cdev:
9210 cdev_del(&qseecom.cdev);
9211exit_destroy_device:
9212 device_destroy(driver_class, qseecom_device_no);
9213exit_destroy_class:
9214 class_destroy(driver_class);
9215exit_unreg_chrdev_region:
9216 unregister_chrdev_region(qseecom_device_no, 1);
9217 return rc;
9218}
9219
9220static int qseecom_remove(struct platform_device *pdev)
9221{
9222 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309223 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009224 unsigned long flags = 0;
9225 int ret = 0;
9226 int i;
9227 struct qseecom_ce_pipe_entry *pce_entry;
9228 struct qseecom_ce_info_use *pce_info_use;
9229
9230 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9231 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9232
Monika Singhe711b162018-04-24 09:54:50 +05309233 list_for_each_entry_safe(kclient, kclient_tmp,
9234 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009235
9236 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009237 if (!kclient->handle) {
9238 list_del(&kclient->list);
9239 kzfree(kclient);
9240 break;
9241 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009242
9243 list_del(&kclient->list);
9244 mutex_lock(&app_access_lock);
9245 ret = qseecom_unload_app(kclient->handle->dev, false);
9246 mutex_unlock(&app_access_lock);
9247 if (!ret) {
9248 kzfree(kclient->handle->dev);
9249 kzfree(kclient->handle);
9250 kzfree(kclient);
9251 }
9252 }
9253
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009254 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9255
9256 if (qseecom.qseos_version > QSEEE_VERSION_00)
9257 qseecom_unload_commonlib_image();
9258
9259 if (qseecom.qsee_perf_client)
9260 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9261 0);
9262 if (pdev->dev.platform_data != NULL)
9263 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9264
9265 if (qseecom.support_bus_scaling) {
9266 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9267 del_timer_sync(&qseecom.bw_scale_down_timer);
9268 }
9269
9270 if (qseecom.ce_info.fde) {
9271 pce_info_use = qseecom.ce_info.fde;
9272 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9273 pce_entry = pce_info_use->ce_pipe_entry;
9274 kfree(pce_entry);
9275 pce_info_use++;
9276 }
9277 }
9278 kfree(qseecom.ce_info.fde);
9279 if (qseecom.ce_info.pfe) {
9280 pce_info_use = qseecom.ce_info.pfe;
9281 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9282 pce_entry = pce_info_use->ce_pipe_entry;
9283 kfree(pce_entry);
9284 pce_info_use++;
9285 }
9286 }
9287 kfree(qseecom.ce_info.pfe);
9288
9289 /* register client for bus scaling */
9290 if (pdev->dev.of_node) {
9291 __qseecom_deinit_clk(CLK_QSEE);
9292 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9293 (qseecom.support_pfe || qseecom.support_fde))
9294 __qseecom_deinit_clk(CLK_CE_DRV);
9295 }
9296
9297 ion_client_destroy(qseecom.ion_clnt);
9298
Zhen Kong03b2eae2019-09-17 16:58:46 -07009299 kthread_stop(qseecom.unload_app_kthread_task);
9300
Zhen Kongc4c162a2019-01-23 12:07:12 -08009301 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9302
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009303 cdev_del(&qseecom.cdev);
9304
9305 device_destroy(driver_class, qseecom_device_no);
9306
9307 class_destroy(driver_class);
9308
9309 unregister_chrdev_region(qseecom_device_no, 1);
9310
9311 return ret;
9312}
9313
9314static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9315{
9316 int ret = 0;
9317 struct qseecom_clk *qclk;
9318
9319 qclk = &qseecom.qsee;
9320 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9321 if (qseecom.no_clock_support)
9322 return 0;
9323
9324 mutex_lock(&qsee_bw_mutex);
9325 mutex_lock(&clk_access_lock);
9326
9327 if (qseecom.current_mode != INACTIVE) {
9328 ret = msm_bus_scale_client_update_request(
9329 qseecom.qsee_perf_client, INACTIVE);
9330 if (ret)
9331 pr_err("Fail to scale down bus\n");
9332 else
9333 qseecom.current_mode = INACTIVE;
9334 }
9335
9336 if (qclk->clk_access_cnt) {
9337 if (qclk->ce_clk != NULL)
9338 clk_disable_unprepare(qclk->ce_clk);
9339 if (qclk->ce_core_clk != NULL)
9340 clk_disable_unprepare(qclk->ce_core_clk);
9341 if (qclk->ce_bus_clk != NULL)
9342 clk_disable_unprepare(qclk->ce_bus_clk);
9343 }
9344
9345 del_timer_sync(&(qseecom.bw_scale_down_timer));
9346 qseecom.timer_running = false;
9347
9348 mutex_unlock(&clk_access_lock);
9349 mutex_unlock(&qsee_bw_mutex);
9350 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9351
9352 return 0;
9353}
9354
9355static int qseecom_resume(struct platform_device *pdev)
9356{
9357 int mode = 0;
9358 int ret = 0;
9359 struct qseecom_clk *qclk;
9360
9361 qclk = &qseecom.qsee;
9362 if (qseecom.no_clock_support)
9363 goto exit;
9364
9365 mutex_lock(&qsee_bw_mutex);
9366 mutex_lock(&clk_access_lock);
9367 if (qseecom.cumulative_mode >= HIGH)
9368 mode = HIGH;
9369 else
9370 mode = qseecom.cumulative_mode;
9371
9372 if (qseecom.cumulative_mode != INACTIVE) {
9373 ret = msm_bus_scale_client_update_request(
9374 qseecom.qsee_perf_client, mode);
9375 if (ret)
9376 pr_err("Fail to scale up bus to %d\n", mode);
9377 else
9378 qseecom.current_mode = mode;
9379 }
9380
9381 if (qclk->clk_access_cnt) {
9382 if (qclk->ce_core_clk != NULL) {
9383 ret = clk_prepare_enable(qclk->ce_core_clk);
9384 if (ret) {
9385 pr_err("Unable to enable/prep CE core clk\n");
9386 qclk->clk_access_cnt = 0;
9387 goto err;
9388 }
9389 }
9390 if (qclk->ce_clk != NULL) {
9391 ret = clk_prepare_enable(qclk->ce_clk);
9392 if (ret) {
9393 pr_err("Unable to enable/prep CE iface clk\n");
9394 qclk->clk_access_cnt = 0;
9395 goto ce_clk_err;
9396 }
9397 }
9398 if (qclk->ce_bus_clk != NULL) {
9399 ret = clk_prepare_enable(qclk->ce_bus_clk);
9400 if (ret) {
9401 pr_err("Unable to enable/prep CE bus clk\n");
9402 qclk->clk_access_cnt = 0;
9403 goto ce_bus_clk_err;
9404 }
9405 }
9406 }
9407
9408 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9409 qseecom.bw_scale_down_timer.expires = jiffies +
9410 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9411 mod_timer(&(qseecom.bw_scale_down_timer),
9412 qseecom.bw_scale_down_timer.expires);
9413 qseecom.timer_running = true;
9414 }
9415
9416 mutex_unlock(&clk_access_lock);
9417 mutex_unlock(&qsee_bw_mutex);
9418 goto exit;
9419
9420ce_bus_clk_err:
9421 if (qclk->ce_clk)
9422 clk_disable_unprepare(qclk->ce_clk);
9423ce_clk_err:
9424 if (qclk->ce_core_clk)
9425 clk_disable_unprepare(qclk->ce_core_clk);
9426err:
9427 mutex_unlock(&clk_access_lock);
9428 mutex_unlock(&qsee_bw_mutex);
9429 ret = -EIO;
9430exit:
9431 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9432 return ret;
9433}
9434
9435static const struct of_device_id qseecom_match[] = {
9436 {
9437 .compatible = "qcom,qseecom",
9438 },
9439 {}
9440};
9441
9442static struct platform_driver qseecom_plat_driver = {
9443 .probe = qseecom_probe,
9444 .remove = qseecom_remove,
9445 .suspend = qseecom_suspend,
9446 .resume = qseecom_resume,
9447 .driver = {
9448 .name = "qseecom",
9449 .owner = THIS_MODULE,
9450 .of_match_table = qseecom_match,
9451 },
9452};
9453
9454static int qseecom_init(void)
9455{
9456 return platform_driver_register(&qseecom_plat_driver);
9457}
9458
9459static void qseecom_exit(void)
9460{
9461 platform_driver_unregister(&qseecom_plat_driver);
9462}
9463
9464MODULE_LICENSE("GPL v2");
9465MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9466
9467module_init(qseecom_init);
9468module_exit(qseecom_exit);