blob: e238cb4997364a8dc50bd460f86e19df24553247 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
Zhen Kong03b2eae2019-09-17 16:58:46 -0700149enum qseecom_unload_app_kthread_state {
150 UNLOAD_APP_KT_SLEEP = 0,
151 UNLOAD_APP_KT_WAKEUP,
152};
153
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700154static struct class *driver_class;
155static dev_t qseecom_device_no;
156
157static DEFINE_MUTEX(qsee_bw_mutex);
158static DEFINE_MUTEX(app_access_lock);
159static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800160static DEFINE_MUTEX(listener_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700161static DEFINE_MUTEX(unload_app_pending_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800162
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700163
164struct sglist_info {
165 uint32_t indexAndFlags;
166 uint32_t sizeOrCount;
167};
168
169/*
170 * The 31th bit indicates only one or multiple physical address inside
171 * the request buffer. If it is set, the index locates a single physical addr
172 * inside the request buffer, and `sizeOrCount` is the size of the memory being
173 * shared at that physical address.
174 * Otherwise, the index locates an array of {start, len} pairs (a
175 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
176 * that array.
177 *
178 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
179 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
180 *
181 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
182 */
183#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
184 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
185
186#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
187
188#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
189
190#define MAKE_WHITELIST_VERSION(major, minor, patch) \
191 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
192
193struct qseecom_registered_listener_list {
194 struct list_head list;
195 struct qseecom_register_listener_req svc;
196 void *user_virt_sb_base;
197 u8 *sb_virt;
198 phys_addr_t sb_phys;
199 size_t sb_length;
200 struct ion_handle *ihandle; /* Retrieve phy addr */
201 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 int rcv_req_flag;
204 int send_resp_flag;
205 bool listener_in_use;
206 /* wq for thread blocked on this listener*/
207 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800208 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
209 uint32_t sglist_cnt;
210 int abort;
211 bool unregister_pending;
212};
213
214struct qseecom_unregister_pending_list {
215 struct list_head list;
216 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700217};
218
219struct qseecom_registered_app_list {
220 struct list_head list;
221 u32 app_id;
222 u32 ref_cnt;
223 char app_name[MAX_APP_NAME_SIZE];
224 u32 app_arch;
225 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700226 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700227 u32 blocked_on_listener_id;
228};
229
230struct qseecom_registered_kclient_list {
231 struct list_head list;
232 struct qseecom_handle *handle;
233};
234
235struct qseecom_ce_info_use {
236 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
237 unsigned int unit_num;
238 unsigned int num_ce_pipe_entries;
239 struct qseecom_ce_pipe_entry *ce_pipe_entry;
240 bool alloc;
241 uint32_t type;
242};
243
244struct ce_hw_usage_info {
245 uint32_t qsee_ce_hw_instance;
246 uint32_t num_fde;
247 struct qseecom_ce_info_use *fde;
248 uint32_t num_pfe;
249 struct qseecom_ce_info_use *pfe;
250};
251
252struct qseecom_clk {
253 enum qseecom_ce_hw_instance instance;
254 struct clk *ce_core_clk;
255 struct clk *ce_clk;
256 struct clk *ce_core_src_clk;
257 struct clk *ce_bus_clk;
258 uint32_t clk_access_cnt;
259};
260
261struct qseecom_control {
262 struct ion_client *ion_clnt; /* Ion client */
263 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700264
265 struct list_head registered_app_list_head;
266 spinlock_t registered_app_list_lock;
267
268 struct list_head registered_kclient_list_head;
269 spinlock_t registered_kclient_list_lock;
270
271 wait_queue_head_t send_resp_wq;
272 int send_resp_flag;
273
274 uint32_t qseos_version;
275 uint32_t qsee_version;
276 struct device *pdev;
277 bool whitelist_support;
278 bool commonlib_loaded;
279 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700280 struct ce_hw_usage_info ce_info;
281
282 int qsee_bw_count;
283 int qsee_sfpb_bw_count;
284
285 uint32_t qsee_perf_client;
286 struct qseecom_clk qsee;
287 struct qseecom_clk ce_drv;
288
289 bool support_bus_scaling;
290 bool support_fde;
291 bool support_pfe;
292 bool fde_key_size;
293 uint32_t cumulative_mode;
294 enum qseecom_bandwidth_request_mode current_mode;
295 struct timer_list bw_scale_down_timer;
296 struct work_struct bw_inactive_req_ws;
297 struct cdev cdev;
298 bool timer_running;
299 bool no_clock_support;
300 unsigned int ce_opp_freq_hz;
301 bool appsbl_qseecom_support;
302 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530303 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700304
305 uint32_t app_block_ref_cnt;
306 wait_queue_head_t app_block_wq;
307 atomic_t qseecom_state;
308 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700309 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800310
311 struct list_head unregister_lsnr_pending_list_head;
312 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800313 struct task_struct *unregister_lsnr_kthread_task;
314 wait_queue_head_t unregister_lsnr_kthread_wq;
315 atomic_t unregister_lsnr_kthread_state;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700316
317 struct list_head unload_app_pending_list_head;
318 struct task_struct *unload_app_kthread_task;
319 wait_queue_head_t unload_app_kthread_wq;
320 atomic_t unload_app_kthread_state;
321};
322
323struct qseecom_unload_app_pending_list {
324 struct list_head list;
325 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700326};
327
328struct qseecom_sec_buf_fd_info {
329 bool is_sec_buf_fd;
330 size_t size;
331 void *vbase;
332 dma_addr_t pbase;
333};
334
335struct qseecom_param_memref {
336 uint32_t buffer;
337 uint32_t size;
338};
339
340struct qseecom_client_handle {
341 u32 app_id;
342 u8 *sb_virt;
343 phys_addr_t sb_phys;
344 unsigned long user_virt_sb_base;
345 size_t sb_length;
346 struct ion_handle *ihandle; /* Retrieve phy addr */
347 char app_name[MAX_APP_NAME_SIZE];
348 u32 app_arch;
349 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700350 bool from_smcinvoke;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700351 bool unload_pending;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700352};
353
354struct qseecom_listener_handle {
355 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800356 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800357 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700358};
359
360static struct qseecom_control qseecom;
361
362struct qseecom_dev_handle {
363 enum qseecom_client_handle_type type;
364 union {
365 struct qseecom_client_handle client;
366 struct qseecom_listener_handle listener;
367 };
368 bool released;
369 int abort;
370 wait_queue_head_t abort_wq;
371 atomic_t ioctl_count;
372 bool perf_enabled;
373 bool fast_load_enabled;
374 enum qseecom_bandwidth_request_mode mode;
375 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
376 uint32_t sglist_cnt;
377 bool use_legacy_cmd;
378};
379
380struct qseecom_key_id_usage_desc {
381 uint8_t desc[QSEECOM_KEY_ID_SIZE];
382};
383
384struct qseecom_crypto_info {
385 unsigned int unit_num;
386 unsigned int ce;
387 unsigned int pipe_pair;
388};
389
390static struct qseecom_key_id_usage_desc key_id_array[] = {
391 {
392 .desc = "Undefined Usage Index",
393 },
394
395 {
396 .desc = "Full Disk Encryption",
397 },
398
399 {
400 .desc = "Per File Encryption",
401 },
402
403 {
404 .desc = "UFS ICE Full Disk Encryption",
405 },
406
407 {
408 .desc = "SDCC ICE Full Disk Encryption",
409 },
410};
411
412/* Function proto types */
413static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
414static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
415static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
416static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
417static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
418static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
419 char *cmnlib_name);
420static int qseecom_enable_ice_setup(int usage);
421static int qseecom_disable_ice_setup(int usage);
422static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
423static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
424 void __user *argp);
425static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
426 void __user *argp);
427static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
428 void __user *argp);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700429static int __qseecom_unload_app(struct qseecom_dev_handle *data,
430 uint32_t app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700431
432static int get_qseecom_keymaster_status(char *str)
433{
434 get_option(&str, &qseecom.is_apps_region_protected);
435 return 1;
436}
437__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
438
Zhen Kong03f220d2019-02-01 17:12:34 -0800439
440#define QSEECOM_SCM_EBUSY_WAIT_MS 30
441#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
442
443static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
444{
445 int ret = 0;
446 int retry_count = 0;
447
448 do {
449 ret = scm_call2_noretry(smc_id, desc);
450 if (ret == -EBUSY) {
451 mutex_unlock(&app_access_lock);
452 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
453 mutex_lock(&app_access_lock);
454 }
455 if (retry_count == 33)
456 pr_warn("secure world has been busy for 1 second!\n");
457 } while (ret == -EBUSY &&
458 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
459 return ret;
460}
461
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700462static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
463 const void *req_buf, void *resp_buf)
464{
465 int ret = 0;
466 uint32_t smc_id = 0;
467 uint32_t qseos_cmd_id = 0;
468 struct scm_desc desc = {0};
469 struct qseecom_command_scm_resp *scm_resp = NULL;
470
471 if (!req_buf || !resp_buf) {
472 pr_err("Invalid buffer pointer\n");
473 return -EINVAL;
474 }
475 qseos_cmd_id = *(uint32_t *)req_buf;
476 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
477
478 switch (svc_id) {
479 case 6: {
480 if (tz_cmd_id == 3) {
481 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
482 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
483 desc.args[0] = *(uint32_t *)req_buf;
484 } else {
485 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
486 svc_id, tz_cmd_id);
487 return -EINVAL;
488 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800489 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700490 break;
491 }
492 case SCM_SVC_ES: {
493 switch (tz_cmd_id) {
494 case SCM_SAVE_PARTITION_HASH_ID: {
495 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
496 struct qseecom_save_partition_hash_req *p_hash_req =
497 (struct qseecom_save_partition_hash_req *)
498 req_buf;
499 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
500
501 if (!tzbuf)
502 return -ENOMEM;
503 memset(tzbuf, 0, tzbuflen);
504 memcpy(tzbuf, p_hash_req->digest,
505 SHA256_DIGEST_LENGTH);
506 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
507 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
508 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
509 desc.args[0] = p_hash_req->partition_id;
510 desc.args[1] = virt_to_phys(tzbuf);
511 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800512 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700513 kzfree(tzbuf);
514 break;
515 }
516 default: {
517 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
518 tz_cmd_id);
519 ret = -EINVAL;
520 break;
521 }
522 } /* end of switch (tz_cmd_id) */
523 break;
524 } /* end of case SCM_SVC_ES */
525 case SCM_SVC_TZSCHEDULER: {
526 switch (qseos_cmd_id) {
527 case QSEOS_APP_START_COMMAND: {
528 struct qseecom_load_app_ireq *req;
529 struct qseecom_load_app_64bit_ireq *req_64bit;
530
531 smc_id = TZ_OS_APP_START_ID;
532 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
533 if (qseecom.qsee_version < QSEE_VERSION_40) {
534 req = (struct qseecom_load_app_ireq *)req_buf;
535 desc.args[0] = req->mdt_len;
536 desc.args[1] = req->img_len;
537 desc.args[2] = req->phy_addr;
538 } else {
539 req_64bit =
540 (struct qseecom_load_app_64bit_ireq *)
541 req_buf;
542 desc.args[0] = req_64bit->mdt_len;
543 desc.args[1] = req_64bit->img_len;
544 desc.args[2] = req_64bit->phy_addr;
545 }
546 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800547 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700548 break;
549 }
550 case QSEOS_APP_SHUTDOWN_COMMAND: {
551 struct qseecom_unload_app_ireq *req;
552
553 req = (struct qseecom_unload_app_ireq *)req_buf;
554 smc_id = TZ_OS_APP_SHUTDOWN_ID;
555 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
556 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700557 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700558 break;
559 }
560 case QSEOS_APP_LOOKUP_COMMAND: {
561 struct qseecom_check_app_ireq *req;
562 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
563 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
564
565 if (!tzbuf)
566 return -ENOMEM;
567 req = (struct qseecom_check_app_ireq *)req_buf;
568 pr_debug("Lookup app_name = %s\n", req->app_name);
569 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
570 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
571 smc_id = TZ_OS_APP_LOOKUP_ID;
572 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
573 desc.args[0] = virt_to_phys(tzbuf);
574 desc.args[1] = strlen(req->app_name);
575 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800576 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700577 kzfree(tzbuf);
578 break;
579 }
580 case QSEOS_APP_REGION_NOTIFICATION: {
581 struct qsee_apps_region_info_ireq *req;
582 struct qsee_apps_region_info_64bit_ireq *req_64bit;
583
584 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
585 desc.arginfo =
586 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
587 if (qseecom.qsee_version < QSEE_VERSION_40) {
588 req = (struct qsee_apps_region_info_ireq *)
589 req_buf;
590 desc.args[0] = req->addr;
591 desc.args[1] = req->size;
592 } else {
593 req_64bit =
594 (struct qsee_apps_region_info_64bit_ireq *)
595 req_buf;
596 desc.args[0] = req_64bit->addr;
597 desc.args[1] = req_64bit->size;
598 }
599 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800600 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700601 break;
602 }
603 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
604 struct qseecom_load_lib_image_ireq *req;
605 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
606
607 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
608 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
609 if (qseecom.qsee_version < QSEE_VERSION_40) {
610 req = (struct qseecom_load_lib_image_ireq *)
611 req_buf;
612 desc.args[0] = req->mdt_len;
613 desc.args[1] = req->img_len;
614 desc.args[2] = req->phy_addr;
615 } else {
616 req_64bit =
617 (struct qseecom_load_lib_image_64bit_ireq *)
618 req_buf;
619 desc.args[0] = req_64bit->mdt_len;
620 desc.args[1] = req_64bit->img_len;
621 desc.args[2] = req_64bit->phy_addr;
622 }
623 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800624 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700625 break;
626 }
627 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
628 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
629 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
630 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800631 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700632 break;
633 }
634 case QSEOS_REGISTER_LISTENER: {
635 struct qseecom_register_listener_ireq *req;
636 struct qseecom_register_listener_64bit_ireq *req_64bit;
637
638 desc.arginfo =
639 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
640 if (qseecom.qsee_version < QSEE_VERSION_40) {
641 req = (struct qseecom_register_listener_ireq *)
642 req_buf;
643 desc.args[0] = req->listener_id;
644 desc.args[1] = req->sb_ptr;
645 desc.args[2] = req->sb_len;
646 } else {
647 req_64bit =
648 (struct qseecom_register_listener_64bit_ireq *)
649 req_buf;
650 desc.args[0] = req_64bit->listener_id;
651 desc.args[1] = req_64bit->sb_ptr;
652 desc.args[2] = req_64bit->sb_len;
653 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700654 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700655 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800656 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800657 if (ret == -EIO) {
658 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700659 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700660 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800661 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700662 }
663 break;
664 }
665 case QSEOS_DEREGISTER_LISTENER: {
666 struct qseecom_unregister_listener_ireq *req;
667
668 req = (struct qseecom_unregister_listener_ireq *)
669 req_buf;
670 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
671 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
672 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800673 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700674 break;
675 }
676 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
677 struct qseecom_client_listener_data_irsp *req;
678
679 req = (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
682 desc.arginfo =
683 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
684 desc.args[0] = req->listener_id;
685 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800686 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700687 break;
688 }
689 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
690 struct qseecom_client_listener_data_irsp *req;
691 struct qseecom_client_listener_data_64bit_irsp *req_64;
692
693 smc_id =
694 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
695 desc.arginfo =
696 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
697 if (qseecom.qsee_version < QSEE_VERSION_40) {
698 req =
699 (struct qseecom_client_listener_data_irsp *)
700 req_buf;
701 desc.args[0] = req->listener_id;
702 desc.args[1] = req->status;
703 desc.args[2] = req->sglistinfo_ptr;
704 desc.args[3] = req->sglistinfo_len;
705 } else {
706 req_64 =
707 (struct qseecom_client_listener_data_64bit_irsp *)
708 req_buf;
709 desc.args[0] = req_64->listener_id;
710 desc.args[1] = req_64->status;
711 desc.args[2] = req_64->sglistinfo_ptr;
712 desc.args[3] = req_64->sglistinfo_len;
713 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800714 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700715 break;
716 }
717 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
718 struct qseecom_load_app_ireq *req;
719 struct qseecom_load_app_64bit_ireq *req_64bit;
720
721 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
722 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
723 if (qseecom.qsee_version < QSEE_VERSION_40) {
724 req = (struct qseecom_load_app_ireq *)req_buf;
725 desc.args[0] = req->mdt_len;
726 desc.args[1] = req->img_len;
727 desc.args[2] = req->phy_addr;
728 } else {
729 req_64bit =
730 (struct qseecom_load_app_64bit_ireq *)req_buf;
731 desc.args[0] = req_64bit->mdt_len;
732 desc.args[1] = req_64bit->img_len;
733 desc.args[2] = req_64bit->phy_addr;
734 }
735 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800736 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700737 break;
738 }
739 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
740 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
741 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
742 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800743 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700744 break;
745 }
746
747 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
748 struct qseecom_client_send_data_ireq *req;
749 struct qseecom_client_send_data_64bit_ireq *req_64bit;
750
751 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
752 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
753 if (qseecom.qsee_version < QSEE_VERSION_40) {
754 req = (struct qseecom_client_send_data_ireq *)
755 req_buf;
756 desc.args[0] = req->app_id;
757 desc.args[1] = req->req_ptr;
758 desc.args[2] = req->req_len;
759 desc.args[3] = req->rsp_ptr;
760 desc.args[4] = req->rsp_len;
761 } else {
762 req_64bit =
763 (struct qseecom_client_send_data_64bit_ireq *)
764 req_buf;
765 desc.args[0] = req_64bit->app_id;
766 desc.args[1] = req_64bit->req_ptr;
767 desc.args[2] = req_64bit->req_len;
768 desc.args[3] = req_64bit->rsp_ptr;
769 desc.args[4] = req_64bit->rsp_len;
770 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800771 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700772 break;
773 }
774 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
775 struct qseecom_client_send_data_ireq *req;
776 struct qseecom_client_send_data_64bit_ireq *req_64bit;
777
778 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
779 desc.arginfo =
780 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
781 if (qseecom.qsee_version < QSEE_VERSION_40) {
782 req = (struct qseecom_client_send_data_ireq *)
783 req_buf;
784 desc.args[0] = req->app_id;
785 desc.args[1] = req->req_ptr;
786 desc.args[2] = req->req_len;
787 desc.args[3] = req->rsp_ptr;
788 desc.args[4] = req->rsp_len;
789 desc.args[5] = req->sglistinfo_ptr;
790 desc.args[6] = req->sglistinfo_len;
791 } else {
792 req_64bit =
793 (struct qseecom_client_send_data_64bit_ireq *)
794 req_buf;
795 desc.args[0] = req_64bit->app_id;
796 desc.args[1] = req_64bit->req_ptr;
797 desc.args[2] = req_64bit->req_len;
798 desc.args[3] = req_64bit->rsp_ptr;
799 desc.args[4] = req_64bit->rsp_len;
800 desc.args[5] = req_64bit->sglistinfo_ptr;
801 desc.args[6] = req_64bit->sglistinfo_len;
802 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800803 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700804 break;
805 }
806 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
807 struct qseecom_client_send_service_ireq *req;
808
809 req = (struct qseecom_client_send_service_ireq *)
810 req_buf;
811 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
812 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
813 desc.args[0] = req->key_type;
814 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800815 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700816 break;
817 }
818 case QSEOS_RPMB_ERASE_COMMAND: {
819 smc_id = TZ_OS_RPMB_ERASE_ID;
820 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
821 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800822 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700823 break;
824 }
825 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
826 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
827 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
828 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800829 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700830 break;
831 }
832 case QSEOS_GENERATE_KEY: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_generate_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t),
842 (sizeof(struct qseecom_key_generate_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_GEN_KEY_ID;
846 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800850 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_DELETE_KEY: {
855 u32 tzbuflen = PAGE_ALIGN(sizeof
856 (struct qseecom_key_delete_ireq) -
857 sizeof(uint32_t));
858 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
859
860 if (!tzbuf)
861 return -ENOMEM;
862 memset(tzbuf, 0, tzbuflen);
863 memcpy(tzbuf, req_buf + sizeof(uint32_t),
864 (sizeof(struct qseecom_key_delete_ireq) -
865 sizeof(uint32_t)));
866 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
867 smc_id = TZ_OS_KS_DEL_KEY_ID;
868 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
869 desc.args[0] = virt_to_phys(tzbuf);
870 desc.args[1] = tzbuflen;
871 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800872 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700873 kzfree(tzbuf);
874 break;
875 }
876 case QSEOS_SET_KEY: {
877 u32 tzbuflen = PAGE_ALIGN(sizeof
878 (struct qseecom_key_select_ireq) -
879 sizeof(uint32_t));
880 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
881
882 if (!tzbuf)
883 return -ENOMEM;
884 memset(tzbuf, 0, tzbuflen);
885 memcpy(tzbuf, req_buf + sizeof(uint32_t),
886 (sizeof(struct qseecom_key_select_ireq) -
887 sizeof(uint32_t)));
888 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
889 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
890 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
891 desc.args[0] = virt_to_phys(tzbuf);
892 desc.args[1] = tzbuflen;
893 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800894 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700895 kzfree(tzbuf);
896 break;
897 }
898 case QSEOS_UPDATE_KEY_USERINFO: {
899 u32 tzbuflen = PAGE_ALIGN(sizeof
900 (struct qseecom_key_userinfo_update_ireq) -
901 sizeof(uint32_t));
902 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
903
904 if (!tzbuf)
905 return -ENOMEM;
906 memset(tzbuf, 0, tzbuflen);
907 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
908 (struct qseecom_key_userinfo_update_ireq) -
909 sizeof(uint32_t)));
910 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
911 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
912 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
913 desc.args[0] = virt_to_phys(tzbuf);
914 desc.args[1] = tzbuflen;
915 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800916 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700917 kzfree(tzbuf);
918 break;
919 }
920 case QSEOS_TEE_OPEN_SESSION: {
921 struct qseecom_qteec_ireq *req;
922 struct qseecom_qteec_64bit_ireq *req_64bit;
923
924 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
925 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
926 if (qseecom.qsee_version < QSEE_VERSION_40) {
927 req = (struct qseecom_qteec_ireq *)req_buf;
928 desc.args[0] = req->app_id;
929 desc.args[1] = req->req_ptr;
930 desc.args[2] = req->req_len;
931 desc.args[3] = req->resp_ptr;
932 desc.args[4] = req->resp_len;
933 } else {
934 req_64bit = (struct qseecom_qteec_64bit_ireq *)
935 req_buf;
936 desc.args[0] = req_64bit->app_id;
937 desc.args[1] = req_64bit->req_ptr;
938 desc.args[2] = req_64bit->req_len;
939 desc.args[3] = req_64bit->resp_ptr;
940 desc.args[4] = req_64bit->resp_len;
941 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800942 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700943 break;
944 }
945 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
946 struct qseecom_qteec_ireq *req;
947 struct qseecom_qteec_64bit_ireq *req_64bit;
948
949 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
950 desc.arginfo =
951 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
952 if (qseecom.qsee_version < QSEE_VERSION_40) {
953 req = (struct qseecom_qteec_ireq *)req_buf;
954 desc.args[0] = req->app_id;
955 desc.args[1] = req->req_ptr;
956 desc.args[2] = req->req_len;
957 desc.args[3] = req->resp_ptr;
958 desc.args[4] = req->resp_len;
959 desc.args[5] = req->sglistinfo_ptr;
960 desc.args[6] = req->sglistinfo_len;
961 } else {
962 req_64bit = (struct qseecom_qteec_64bit_ireq *)
963 req_buf;
964 desc.args[0] = req_64bit->app_id;
965 desc.args[1] = req_64bit->req_ptr;
966 desc.args[2] = req_64bit->req_len;
967 desc.args[3] = req_64bit->resp_ptr;
968 desc.args[4] = req_64bit->resp_len;
969 desc.args[5] = req_64bit->sglistinfo_ptr;
970 desc.args[6] = req_64bit->sglistinfo_len;
971 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800972 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700973 break;
974 }
975 case QSEOS_TEE_INVOKE_COMMAND: {
976 struct qseecom_qteec_ireq *req;
977 struct qseecom_qteec_64bit_ireq *req_64bit;
978
979 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
980 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
981 if (qseecom.qsee_version < QSEE_VERSION_40) {
982 req = (struct qseecom_qteec_ireq *)req_buf;
983 desc.args[0] = req->app_id;
984 desc.args[1] = req->req_ptr;
985 desc.args[2] = req->req_len;
986 desc.args[3] = req->resp_ptr;
987 desc.args[4] = req->resp_len;
988 } else {
989 req_64bit = (struct qseecom_qteec_64bit_ireq *)
990 req_buf;
991 desc.args[0] = req_64bit->app_id;
992 desc.args[1] = req_64bit->req_ptr;
993 desc.args[2] = req_64bit->req_len;
994 desc.args[3] = req_64bit->resp_ptr;
995 desc.args[4] = req_64bit->resp_len;
996 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800997 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700998 break;
999 }
1000 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
1001 struct qseecom_qteec_ireq *req;
1002 struct qseecom_qteec_64bit_ireq *req_64bit;
1003
1004 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
1005 desc.arginfo =
1006 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
1007 if (qseecom.qsee_version < QSEE_VERSION_40) {
1008 req = (struct qseecom_qteec_ireq *)req_buf;
1009 desc.args[0] = req->app_id;
1010 desc.args[1] = req->req_ptr;
1011 desc.args[2] = req->req_len;
1012 desc.args[3] = req->resp_ptr;
1013 desc.args[4] = req->resp_len;
1014 desc.args[5] = req->sglistinfo_ptr;
1015 desc.args[6] = req->sglistinfo_len;
1016 } else {
1017 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1018 req_buf;
1019 desc.args[0] = req_64bit->app_id;
1020 desc.args[1] = req_64bit->req_ptr;
1021 desc.args[2] = req_64bit->req_len;
1022 desc.args[3] = req_64bit->resp_ptr;
1023 desc.args[4] = req_64bit->resp_len;
1024 desc.args[5] = req_64bit->sglistinfo_ptr;
1025 desc.args[6] = req_64bit->sglistinfo_len;
1026 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001027 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001028 break;
1029 }
1030 case QSEOS_TEE_CLOSE_SESSION: {
1031 struct qseecom_qteec_ireq *req;
1032 struct qseecom_qteec_64bit_ireq *req_64bit;
1033
1034 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1035 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1036 if (qseecom.qsee_version < QSEE_VERSION_40) {
1037 req = (struct qseecom_qteec_ireq *)req_buf;
1038 desc.args[0] = req->app_id;
1039 desc.args[1] = req->req_ptr;
1040 desc.args[2] = req->req_len;
1041 desc.args[3] = req->resp_ptr;
1042 desc.args[4] = req->resp_len;
1043 } else {
1044 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1045 req_buf;
1046 desc.args[0] = req_64bit->app_id;
1047 desc.args[1] = req_64bit->req_ptr;
1048 desc.args[2] = req_64bit->req_len;
1049 desc.args[3] = req_64bit->resp_ptr;
1050 desc.args[4] = req_64bit->resp_len;
1051 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001052 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001053 break;
1054 }
1055 case QSEOS_TEE_REQUEST_CANCELLATION: {
1056 struct qseecom_qteec_ireq *req;
1057 struct qseecom_qteec_64bit_ireq *req_64bit;
1058
1059 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1060 desc.arginfo =
1061 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1062 if (qseecom.qsee_version < QSEE_VERSION_40) {
1063 req = (struct qseecom_qteec_ireq *)req_buf;
1064 desc.args[0] = req->app_id;
1065 desc.args[1] = req->req_ptr;
1066 desc.args[2] = req->req_len;
1067 desc.args[3] = req->resp_ptr;
1068 desc.args[4] = req->resp_len;
1069 } else {
1070 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1071 req_buf;
1072 desc.args[0] = req_64bit->app_id;
1073 desc.args[1] = req_64bit->req_ptr;
1074 desc.args[2] = req_64bit->req_len;
1075 desc.args[3] = req_64bit->resp_ptr;
1076 desc.args[4] = req_64bit->resp_len;
1077 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001078 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001079 break;
1080 }
1081 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1082 struct qseecom_continue_blocked_request_ireq *req =
1083 (struct qseecom_continue_blocked_request_ireq *)
1084 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001085 if (qseecom.smcinvoke_support)
1086 smc_id =
1087 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1088 else
1089 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001090 desc.arginfo =
1091 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001092 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001093 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001094 break;
1095 }
1096 default: {
1097 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1098 qseos_cmd_id);
1099 ret = -EINVAL;
1100 break;
1101 }
1102 } /*end of switch (qsee_cmd_id) */
1103 break;
1104 } /*end of case SCM_SVC_TZSCHEDULER*/
1105 default: {
1106 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1107 svc_id);
1108 ret = -EINVAL;
1109 break;
1110 }
1111 } /*end of switch svc_id */
1112 scm_resp->result = desc.ret[0];
1113 scm_resp->resp_type = desc.ret[1];
1114 scm_resp->data = desc.ret[2];
1115 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1116 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1117 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1118 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1119 return ret;
1120}
1121
1122
1123static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1124 size_t cmd_len, void *resp_buf, size_t resp_len)
1125{
1126 if (!is_scm_armv8())
1127 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1128 resp_buf, resp_len);
1129 else
1130 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1131}
1132
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001133static struct qseecom_registered_listener_list *__qseecom_find_svc(
1134 int32_t listener_id)
1135{
1136 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001137
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001138 list_for_each_entry(entry,
1139 &qseecom.registered_listener_list_head, list) {
1140 if (entry->svc.listener_id == listener_id)
1141 break;
1142 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001143 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001144 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 return NULL;
1146 }
1147
1148 return entry;
1149}
1150
1151static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1152 struct qseecom_dev_handle *handle,
1153 struct qseecom_register_listener_req *listener)
1154{
1155 int ret = 0;
1156 struct qseecom_register_listener_ireq req;
1157 struct qseecom_register_listener_64bit_ireq req_64bit;
1158 struct qseecom_command_scm_resp resp;
1159 ion_phys_addr_t pa;
1160 void *cmd_buf = NULL;
1161 size_t cmd_len;
1162
1163 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001164 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001165 listener->ifd_data_fd);
1166 if (IS_ERR_OR_NULL(svc->ihandle)) {
1167 pr_err("Ion client could not retrieve the handle\n");
1168 return -ENOMEM;
1169 }
1170
1171 /* Get the physical address of the ION BUF */
1172 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1173 if (ret) {
1174 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1175 ret);
1176 return ret;
1177 }
1178 /* Populate the structure for sending scm call to load image */
1179 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1180 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1181 pr_err("ION memory mapping for listener shared buffer failed\n");
1182 return -ENOMEM;
1183 }
1184 svc->sb_phys = (phys_addr_t)pa;
1185
1186 if (qseecom.qsee_version < QSEE_VERSION_40) {
1187 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1188 req.listener_id = svc->svc.listener_id;
1189 req.sb_len = svc->sb_length;
1190 req.sb_ptr = (uint32_t)svc->sb_phys;
1191 cmd_buf = (void *)&req;
1192 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1193 } else {
1194 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1195 req_64bit.listener_id = svc->svc.listener_id;
1196 req_64bit.sb_len = svc->sb_length;
1197 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1198 cmd_buf = (void *)&req_64bit;
1199 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1200 }
1201
1202 resp.result = QSEOS_RESULT_INCOMPLETE;
1203
Zhen Kongc4c162a2019-01-23 12:07:12 -08001204 mutex_unlock(&listener_access_lock);
1205 mutex_lock(&app_access_lock);
1206 __qseecom_reentrancy_check_if_no_app_blocked(
1207 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1209 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001210 mutex_unlock(&app_access_lock);
1211 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212 if (ret) {
1213 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1214 return -EINVAL;
1215 }
1216
1217 if (resp.result != QSEOS_RESULT_SUCCESS) {
1218 pr_err("Error SB registration req: resp.result = %d\n",
1219 resp.result);
1220 return -EPERM;
1221 }
1222 return 0;
1223}
1224
1225static int qseecom_register_listener(struct qseecom_dev_handle *data,
1226 void __user *argp)
1227{
1228 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 struct qseecom_register_listener_req rcvd_lstnr;
1230 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001232
1233 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1234 if (ret) {
1235 pr_err("copy_from_user failed\n");
1236 return ret;
1237 }
1238 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1239 rcvd_lstnr.sb_size))
1240 return -EFAULT;
1241
Zhen Kongbcdeda22018-11-16 13:50:51 -08001242 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1243 if (ptr_svc) {
1244 if (ptr_svc->unregister_pending == false) {
1245 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001246 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001247 data->released = true;
1248 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001249 } else {
1250 /*wait until listener is unregistered*/
1251 pr_debug("register %d has to wait\n",
1252 rcvd_lstnr.listener_id);
1253 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301254 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001255 qseecom.register_lsnr_pending_wq,
1256 list_empty(
1257 &qseecom.unregister_lsnr_pending_list_head));
1258 if (ret) {
1259 pr_err("interrupted register_pending_wq %d\n",
1260 rcvd_lstnr.listener_id);
1261 mutex_lock(&listener_access_lock);
1262 return -ERESTARTSYS;
1263 }
1264 mutex_lock(&listener_access_lock);
1265 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001266 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001267 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1268 if (!new_entry)
1269 return -ENOMEM;
1270 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001271 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272
1273 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1274 new_entry->sb_length = rcvd_lstnr.sb_size;
1275 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1276 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001277 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1278 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 kzfree(new_entry);
1280 return -ENOMEM;
1281 }
1282
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001283 init_waitqueue_head(&new_entry->rcv_req_wq);
1284 init_waitqueue_head(&new_entry->listener_block_app_wq);
1285 new_entry->send_resp_flag = 0;
1286 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001287 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001288
Zhen Konge6ac4132019-09-20 13:49:41 -07001289 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kong52ce9062018-09-24 14:33:27 -07001290 pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 return ret;
1292}
1293
Zhen Kongbcdeda22018-11-16 13:50:51 -08001294static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1295 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001296{
1297 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001298 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001299 struct qseecom_command_scm_resp resp;
1300 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1301
1302 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1303 req.listener_id = data->listener.id;
1304 resp.result = QSEOS_RESULT_INCOMPLETE;
1305
Zhen Kongc4c162a2019-01-23 12:07:12 -08001306 mutex_unlock(&listener_access_lock);
1307 mutex_lock(&app_access_lock);
1308 __qseecom_reentrancy_check_if_no_app_blocked(
1309 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001310 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1311 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001312 mutex_unlock(&app_access_lock);
1313 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001314 if (ret) {
1315 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1316 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001317 if (ret == -EBUSY)
1318 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001319 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001320 }
1321
1322 if (resp.result != QSEOS_RESULT_SUCCESS) {
1323 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1324 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001325 ret = -EPERM;
1326 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001327 }
1328
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001329 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301330 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 atomic_read(&data->ioctl_count) <= 1)) {
1332 pr_err("Interrupted from abort\n");
1333 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001334 }
1335 }
1336
Zhen Kong3c674612018-09-06 22:51:27 -07001337exit:
1338 if (ptr_svc->sb_virt) {
1339 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001340 if (!IS_ERR_OR_NULL(ihandle)) {
1341 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1342 ion_free(qseecom.ion_clnt, ihandle);
1343 }
1344 }
Zhen Kong3c674612018-09-06 22:51:27 -07001345 list_del(&ptr_svc->list);
1346 kzfree(ptr_svc);
1347
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001348 data->released = true;
Zhen Kong52ce9062018-09-24 14:33:27 -07001349 pr_debug("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001350 return ret;
1351}
1352
Zhen Kongbcdeda22018-11-16 13:50:51 -08001353static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1354{
1355 struct qseecom_registered_listener_list *ptr_svc = NULL;
1356 struct qseecom_unregister_pending_list *entry = NULL;
1357
Zhen Konge6ac4132019-09-20 13:49:41 -07001358 if (data->released) {
1359 pr_err("Don't unregister lsnr %d\n", data->listener.id);
1360 return -EINVAL;
1361 }
1362
Zhen Kongbcdeda22018-11-16 13:50:51 -08001363 ptr_svc = __qseecom_find_svc(data->listener.id);
1364 if (!ptr_svc) {
1365 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1366 return -ENODATA;
1367 }
1368 /* stop CA thread waiting for listener response */
1369 ptr_svc->abort = 1;
1370 wake_up_interruptible_all(&qseecom.send_resp_wq);
1371
Zhen Kongc4c162a2019-01-23 12:07:12 -08001372 /* stop listener thread waiting for listener request */
1373 data->abort = 1;
1374 wake_up_all(&ptr_svc->rcv_req_wq);
1375
Zhen Kongbcdeda22018-11-16 13:50:51 -08001376 /* return directly if pending*/
1377 if (ptr_svc->unregister_pending)
1378 return 0;
1379
1380 /*add unregistration into pending list*/
1381 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1382 if (!entry)
1383 return -ENOMEM;
1384 entry->data = data;
1385 list_add_tail(&entry->list,
1386 &qseecom.unregister_lsnr_pending_list_head);
1387 ptr_svc->unregister_pending = true;
1388 pr_debug("unregister %d pending\n", data->listener.id);
1389 return 0;
1390}
1391
1392static void __qseecom_processing_pending_lsnr_unregister(void)
1393{
1394 struct qseecom_unregister_pending_list *entry = NULL;
1395 struct qseecom_registered_listener_list *ptr_svc = NULL;
1396 struct list_head *pos;
1397 int ret = 0;
1398
1399 mutex_lock(&listener_access_lock);
1400 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1401 pos = qseecom.unregister_lsnr_pending_list_head.next;
1402 entry = list_entry(pos,
1403 struct qseecom_unregister_pending_list, list);
1404 if (entry && entry->data) {
1405 pr_debug("process pending unregister %d\n",
1406 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001407 /* don't process if qseecom_release is not called*/
1408 if (!entry->data->listener.release_called)
1409 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001410 ptr_svc = __qseecom_find_svc(
1411 entry->data->listener.id);
1412 if (ptr_svc) {
1413 ret = __qseecom_unregister_listener(
1414 entry->data, ptr_svc);
1415 if (ret == -EBUSY) {
1416 pr_debug("unregister %d pending again\n",
1417 entry->data->listener.id);
1418 mutex_unlock(&listener_access_lock);
1419 return;
1420 }
1421 } else
1422 pr_err("invalid listener %d\n",
1423 entry->data->listener.id);
1424 kzfree(entry->data);
1425 }
1426 list_del(pos);
1427 kzfree(entry);
1428 }
1429 mutex_unlock(&listener_access_lock);
1430 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1431}
1432
Zhen Kongc4c162a2019-01-23 12:07:12 -08001433static void __wakeup_unregister_listener_kthread(void)
1434{
1435 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1436 LSNR_UNREG_KT_WAKEUP);
1437 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1438}
1439
1440static int __qseecom_unregister_listener_kthread_func(void *data)
1441{
1442 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301443 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001444 qseecom.unregister_lsnr_kthread_wq,
1445 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1446 == LSNR_UNREG_KT_WAKEUP);
1447 pr_debug("kthread to unregister listener is called %d\n",
1448 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1449 __qseecom_processing_pending_lsnr_unregister();
1450 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1451 LSNR_UNREG_KT_SLEEP);
1452 }
1453 pr_warn("kthread to unregister listener stopped\n");
1454 return 0;
1455}
1456
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001457static int __qseecom_set_msm_bus_request(uint32_t mode)
1458{
1459 int ret = 0;
1460 struct qseecom_clk *qclk;
1461
1462 qclk = &qseecom.qsee;
1463 if (qclk->ce_core_src_clk != NULL) {
1464 if (mode == INACTIVE) {
1465 __qseecom_disable_clk(CLK_QSEE);
1466 } else {
1467 ret = __qseecom_enable_clk(CLK_QSEE);
1468 if (ret)
1469 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1470 ret, mode);
1471 }
1472 }
1473
1474 if ((!ret) && (qseecom.current_mode != mode)) {
1475 ret = msm_bus_scale_client_update_request(
1476 qseecom.qsee_perf_client, mode);
1477 if (ret) {
1478 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1479 ret, mode);
1480 if (qclk->ce_core_src_clk != NULL) {
1481 if (mode == INACTIVE) {
1482 ret = __qseecom_enable_clk(CLK_QSEE);
1483 if (ret)
1484 pr_err("CLK enable failed\n");
1485 } else
1486 __qseecom_disable_clk(CLK_QSEE);
1487 }
1488 }
1489 qseecom.current_mode = mode;
1490 }
1491 return ret;
1492}
1493
1494static void qseecom_bw_inactive_req_work(struct work_struct *work)
1495{
1496 mutex_lock(&app_access_lock);
1497 mutex_lock(&qsee_bw_mutex);
1498 if (qseecom.timer_running)
1499 __qseecom_set_msm_bus_request(INACTIVE);
1500 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1501 qseecom.current_mode, qseecom.cumulative_mode);
1502 qseecom.timer_running = false;
1503 mutex_unlock(&qsee_bw_mutex);
1504 mutex_unlock(&app_access_lock);
1505}
1506
1507static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1508{
1509 schedule_work(&qseecom.bw_inactive_req_ws);
1510}
1511
1512static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1513{
1514 struct qseecom_clk *qclk;
1515 int ret = 0;
1516
1517 mutex_lock(&clk_access_lock);
1518 if (ce == CLK_QSEE)
1519 qclk = &qseecom.qsee;
1520 else
1521 qclk = &qseecom.ce_drv;
1522
Zhen Kongf99808af2019-07-09 13:28:24 -07001523 if (qclk->clk_access_cnt > 0) {
1524 qclk->clk_access_cnt--;
1525 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001526 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1527 ret = -EINVAL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001528 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001529
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001530 mutex_unlock(&clk_access_lock);
1531 return ret;
1532}
1533
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001534static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1535{
1536 int32_t ret = 0;
1537 int32_t request_mode = INACTIVE;
1538
1539 mutex_lock(&qsee_bw_mutex);
1540 if (mode == 0) {
1541 if (qseecom.cumulative_mode > MEDIUM)
1542 request_mode = HIGH;
1543 else
1544 request_mode = qseecom.cumulative_mode;
1545 } else {
1546 request_mode = mode;
1547 }
1548
1549 ret = __qseecom_set_msm_bus_request(request_mode);
1550 if (ret) {
1551 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1552 ret, request_mode);
1553 goto err_scale_timer;
1554 }
1555
1556 if (qseecom.timer_running) {
1557 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1558 if (ret) {
1559 pr_err("Failed to decrease clk ref count.\n");
1560 goto err_scale_timer;
1561 }
1562 del_timer_sync(&(qseecom.bw_scale_down_timer));
1563 qseecom.timer_running = false;
1564 }
1565err_scale_timer:
1566 mutex_unlock(&qsee_bw_mutex);
1567 return ret;
1568}
1569
1570
1571static int qseecom_unregister_bus_bandwidth_needs(
1572 struct qseecom_dev_handle *data)
1573{
1574 int32_t ret = 0;
1575
1576 qseecom.cumulative_mode -= data->mode;
1577 data->mode = INACTIVE;
1578
1579 return ret;
1580}
1581
1582static int __qseecom_register_bus_bandwidth_needs(
1583 struct qseecom_dev_handle *data, uint32_t request_mode)
1584{
1585 int32_t ret = 0;
1586
1587 if (data->mode == INACTIVE) {
1588 qseecom.cumulative_mode += request_mode;
1589 data->mode = request_mode;
1590 } else {
1591 if (data->mode != request_mode) {
1592 qseecom.cumulative_mode -= data->mode;
1593 qseecom.cumulative_mode += request_mode;
1594 data->mode = request_mode;
1595 }
1596 }
1597 return ret;
1598}
1599
1600static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1601{
1602 int ret = 0;
1603
1604 ret = qsee_vote_for_clock(data, CLK_DFAB);
1605 if (ret) {
1606 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1607 goto perf_enable_exit;
1608 }
1609 ret = qsee_vote_for_clock(data, CLK_SFPB);
1610 if (ret) {
1611 qsee_disable_clock_vote(data, CLK_DFAB);
1612 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1613 goto perf_enable_exit;
1614 }
1615
1616perf_enable_exit:
1617 return ret;
1618}
1619
1620static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1621 void __user *argp)
1622{
1623 int32_t ret = 0;
1624 int32_t req_mode;
1625
1626 if (qseecom.no_clock_support)
1627 return 0;
1628
1629 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1630 if (ret) {
1631 pr_err("copy_from_user failed\n");
1632 return ret;
1633 }
1634 if (req_mode > HIGH) {
1635 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1636 return -EINVAL;
1637 }
1638
1639 /*
1640 * Register bus bandwidth needs if bus scaling feature is enabled;
1641 * otherwise, qseecom enable/disable clocks for the client directly.
1642 */
1643 if (qseecom.support_bus_scaling) {
1644 mutex_lock(&qsee_bw_mutex);
1645 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1646 mutex_unlock(&qsee_bw_mutex);
1647 } else {
1648 pr_debug("Bus scaling feature is NOT enabled\n");
1649 pr_debug("request bandwidth mode %d for the client\n",
1650 req_mode);
1651 if (req_mode != INACTIVE) {
1652 ret = qseecom_perf_enable(data);
1653 if (ret)
1654 pr_err("Failed to vote for clock with err %d\n",
1655 ret);
1656 } else {
1657 qsee_disable_clock_vote(data, CLK_DFAB);
1658 qsee_disable_clock_vote(data, CLK_SFPB);
1659 }
1660 }
1661 return ret;
1662}
1663
1664static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1665{
1666 if (qseecom.no_clock_support)
1667 return;
1668
1669 mutex_lock(&qsee_bw_mutex);
1670 qseecom.bw_scale_down_timer.expires = jiffies +
1671 msecs_to_jiffies(duration);
1672 mod_timer(&(qseecom.bw_scale_down_timer),
1673 qseecom.bw_scale_down_timer.expires);
1674 qseecom.timer_running = true;
1675 mutex_unlock(&qsee_bw_mutex);
1676}
1677
1678static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1679{
1680 if (!qseecom.support_bus_scaling)
1681 qsee_disable_clock_vote(data, CLK_SFPB);
1682 else
1683 __qseecom_add_bw_scale_down_timer(
1684 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1685}
1686
1687static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1688{
1689 int ret = 0;
1690
1691 if (qseecom.support_bus_scaling) {
1692 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1693 if (ret)
1694 pr_err("Failed to set bw MEDIUM.\n");
1695 } else {
1696 ret = qsee_vote_for_clock(data, CLK_SFPB);
1697 if (ret)
1698 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1699 }
1700 return ret;
1701}
1702
1703static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1704 void __user *argp)
1705{
1706 ion_phys_addr_t pa;
1707 int32_t ret;
1708 struct qseecom_set_sb_mem_param_req req;
1709 size_t len;
1710
1711 /* Copy the relevant information needed for loading the image */
1712 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1713 return -EFAULT;
1714
1715 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1716 (req.sb_len == 0)) {
1717 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1718 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1719 return -EFAULT;
1720 }
1721 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1722 req.sb_len))
1723 return -EFAULT;
1724
1725 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001726 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001727 req.ifd_data_fd);
1728 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1729 pr_err("Ion client could not retrieve the handle\n");
1730 return -ENOMEM;
1731 }
1732 /* Get the physical address of the ION BUF */
1733 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1734 if (ret) {
1735
1736 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1737 ret);
1738 return ret;
1739 }
1740
1741 if (len < req.sb_len) {
1742 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1743 req.sb_len, len);
1744 return -EINVAL;
1745 }
1746 /* Populate the structure for sending scm call to load image */
1747 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1748 data->client.ihandle);
1749 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1750 pr_err("ION memory mapping for client shared buf failed\n");
1751 return -ENOMEM;
1752 }
1753 data->client.sb_phys = (phys_addr_t)pa;
1754 data->client.sb_length = req.sb_len;
1755 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1756 return 0;
1757}
1758
Zhen Kong26e62742018-05-04 17:19:06 -07001759static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1760 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001761{
1762 int ret;
1763
1764 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001765 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001766}
1767
1768static int __qseecom_reentrancy_listener_has_sent_rsp(
1769 struct qseecom_dev_handle *data,
1770 struct qseecom_registered_listener_list *ptr_svc)
1771{
1772 int ret;
1773
1774 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001775 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776}
1777
1778static void __qseecom_clean_listener_sglistinfo(
1779 struct qseecom_registered_listener_list *ptr_svc)
1780{
1781 if (ptr_svc->sglist_cnt) {
1782 memset(ptr_svc->sglistinfo_ptr, 0,
1783 SGLISTINFO_TABLE_SIZE);
1784 ptr_svc->sglist_cnt = 0;
1785 }
1786}
1787
1788static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1789 struct qseecom_command_scm_resp *resp)
1790{
1791 int ret = 0;
1792 int rc = 0;
1793 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001794 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1795 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1796 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001797 struct qseecom_registered_listener_list *ptr_svc = NULL;
1798 sigset_t new_sigset;
1799 sigset_t old_sigset;
1800 uint32_t status;
1801 void *cmd_buf = NULL;
1802 size_t cmd_len;
1803 struct sglist_info *table = NULL;
1804
Zhen Kongbcdeda22018-11-16 13:50:51 -08001805 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1807 lstnr = resp->data;
1808 /*
1809 * Wake up blocking lsitener service with the lstnr id
1810 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001811 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001812 list_for_each_entry(ptr_svc,
1813 &qseecom.registered_listener_list_head, list) {
1814 if (ptr_svc->svc.listener_id == lstnr) {
1815 ptr_svc->listener_in_use = true;
1816 ptr_svc->rcv_req_flag = 1;
AnilKumar Chimata20f86662019-12-11 11:51:03 +05301817 rc = msm_ion_do_cache_op(qseecom.ion_clnt,
1818 ptr_svc->ihandle,
1819 ptr_svc->sb_virt,
1820 ptr_svc->sb_length,
1821 ION_IOC_INV_CACHES);
1822 if (rc) {
1823 pr_err("cache opp failed %d\n", rc);
1824 status = QSEOS_RESULT_FAILURE;
1825 goto err_resp;
1826 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001827 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1828 break;
1829 }
1830 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001831
1832 if (ptr_svc == NULL) {
1833 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001834 rc = -EINVAL;
1835 status = QSEOS_RESULT_FAILURE;
1836 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001837 }
1838
1839 if (!ptr_svc->ihandle) {
1840 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001841 rc = -EINVAL;
1842 status = QSEOS_RESULT_FAILURE;
1843 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001844 }
1845
1846 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001847 pr_err("Service %d does not exist\n",
1848 lstnr);
1849 rc = -ERESTARTSYS;
1850 ptr_svc = NULL;
1851 status = QSEOS_RESULT_FAILURE;
1852 goto err_resp;
1853 }
1854
1855 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001856 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001857 lstnr, ptr_svc->abort);
1858 rc = -ENODEV;
1859 status = QSEOS_RESULT_FAILURE;
1860 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 }
Zhen Kong25731112018-09-20 13:10:03 -07001862
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001863 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1864
1865 /* initialize the new signal mask with all signals*/
1866 sigfillset(&new_sigset);
1867 /* block all signals */
1868 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1869
Zhen Kongbcdeda22018-11-16 13:50:51 -08001870 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001871 do {
1872 /*
1873 * When reentrancy is not supported, check global
1874 * send_resp_flag; otherwise, check this listener's
1875 * send_resp_flag.
1876 */
1877 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301878 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001879 __qseecom_listener_has_sent_rsp(
1880 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001881 break;
1882 }
1883
1884 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301885 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001886 __qseecom_reentrancy_listener_has_sent_rsp(
1887 data, ptr_svc))) {
1888 break;
1889 }
1890 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001891 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001892 /* restore signal mask */
1893 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001894 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001895 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1896 data->client.app_id, lstnr, ret);
1897 rc = -ENODEV;
1898 status = QSEOS_RESULT_FAILURE;
1899 } else {
1900 status = QSEOS_RESULT_SUCCESS;
1901 }
Zhen Kong26e62742018-05-04 17:19:06 -07001902err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001903 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001904 if (ptr_svc) {
1905 ptr_svc->send_resp_flag = 0;
1906 table = ptr_svc->sglistinfo_ptr;
1907 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001908 if (qseecom.qsee_version < QSEE_VERSION_40) {
1909 send_data_rsp.listener_id = lstnr;
1910 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001911 if (table) {
1912 send_data_rsp.sglistinfo_ptr =
1913 (uint32_t)virt_to_phys(table);
1914 send_data_rsp.sglistinfo_len =
1915 SGLISTINFO_TABLE_SIZE;
1916 dmac_flush_range((void *)table,
1917 (void *)table + SGLISTINFO_TABLE_SIZE);
1918 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001919 cmd_buf = (void *)&send_data_rsp;
1920 cmd_len = sizeof(send_data_rsp);
1921 } else {
1922 send_data_rsp_64bit.listener_id = lstnr;
1923 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001924 if (table) {
1925 send_data_rsp_64bit.sglistinfo_ptr =
1926 virt_to_phys(table);
1927 send_data_rsp_64bit.sglistinfo_len =
1928 SGLISTINFO_TABLE_SIZE;
1929 dmac_flush_range((void *)table,
1930 (void *)table + SGLISTINFO_TABLE_SIZE);
1931 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001932 cmd_buf = (void *)&send_data_rsp_64bit;
1933 cmd_len = sizeof(send_data_rsp_64bit);
1934 }
Zhen Kong7d500032018-08-06 16:58:31 -07001935 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001936 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1937 else
1938 *(uint32_t *)cmd_buf =
1939 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001940 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001941 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1942 ptr_svc->ihandle,
1943 ptr_svc->sb_virt, ptr_svc->sb_length,
1944 ION_IOC_CLEAN_INV_CACHES);
1945 if (ret) {
1946 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001947 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001948 }
1949 }
1950
1951 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1952 ret = __qseecom_enable_clk(CLK_QSEE);
1953 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001955 }
1956
1957 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1958 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001959 if (ptr_svc) {
1960 ptr_svc->listener_in_use = false;
1961 __qseecom_clean_listener_sglistinfo(ptr_svc);
1962 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001963 if (ret) {
1964 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1965 ret, data->client.app_id);
1966 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1967 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001968 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001969 }
Zhen Kong26e62742018-05-04 17:19:06 -07001970 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1971 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001972 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1973 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1974 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1975 resp->result, data->client.app_id, lstnr);
1976 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001977 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001978 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001979exit:
1980 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001981 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1982 __qseecom_disable_clk(CLK_QSEE);
1983
1984 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001985 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001986 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001987 if (rc)
1988 return rc;
1989
1990 return ret;
1991}
1992
Zhen Konga91aaf02018-02-02 17:21:04 -08001993static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001994 struct qseecom_command_scm_resp *resp,
1995 struct qseecom_registered_app_list *ptr_app,
1996 struct qseecom_dev_handle *data)
1997{
1998 struct qseecom_registered_listener_list *list_ptr;
1999 int ret = 0;
2000 struct qseecom_continue_blocked_request_ireq ireq;
2001 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08002002 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08002003 sigset_t new_sigset;
2004 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08002005 unsigned long flags;
2006 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07002007 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002008
2009 if (!resp || !data) {
2010 pr_err("invalid resp or data pointer\n");
2011 ret = -EINVAL;
2012 goto exit;
2013 }
2014
2015 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07002016 if (!ptr_app) {
2017 if (data->client.from_smcinvoke) {
2018 pr_debug("This request is from smcinvoke\n");
2019 ptr_app = &dummy_app_entry;
2020 ptr_app->app_id = data->client.app_id;
2021 } else {
2022 spin_lock_irqsave(&qseecom.registered_app_list_lock,
2023 flags);
2024 list_for_each_entry(ptr_app,
2025 &qseecom.registered_app_list_head, list) {
2026 if ((ptr_app->app_id == data->client.app_id) &&
2027 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002028 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07002029 found_app = true;
2030 break;
2031 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002032 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002033 spin_unlock_irqrestore(
2034 &qseecom.registered_app_list_lock, flags);
2035 if (!found_app) {
2036 pr_err("app_id %d (%s) is not found\n",
2037 data->client.app_id,
2038 (char *)data->client.app_name);
2039 ret = -ENOENT;
2040 goto exit;
2041 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002042 }
2043 }
2044
Zhen Kongd8cc0052017-11-13 15:13:31 -08002045 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002046 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002047 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002048 list_ptr = __qseecom_find_svc(resp->data);
2049 if (!list_ptr) {
2050 pr_err("Invalid listener ID %d\n", resp->data);
2051 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002052 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002053 goto exit;
2054 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002055 ptr_app->blocked_on_listener_id = resp->data;
2056
2057 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2058 resp->data, list_ptr->listener_in_use,
2059 session_id, data->client.app_id);
2060
2061 /* sleep until listener is available */
2062 sigfillset(&new_sigset);
2063 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2064
2065 do {
2066 qseecom.app_block_ref_cnt++;
2067 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002068 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002069 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302070 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002071 list_ptr->listener_block_app_wq,
2072 !list_ptr->listener_in_use);
2073 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002074 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002075 ptr_app->app_blocked = false;
2076 qseecom.app_block_ref_cnt--;
2077 } while (list_ptr->listener_in_use);
2078
2079 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2080
2081 ptr_app->blocked_on_listener_id = 0;
2082 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2083 resp->data, session_id, data->client.app_id);
2084
2085 /* notify TZ that listener is available */
2086 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2087
2088 if (qseecom.smcinvoke_support)
2089 ireq.app_or_session_id = session_id;
2090 else
2091 ireq.app_or_session_id = data->client.app_id;
2092
2093 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2094 &ireq, sizeof(ireq),
2095 &continue_resp, sizeof(continue_resp));
2096 if (ret && qseecom.smcinvoke_support) {
2097 /* retry with legacy cmd */
2098 qseecom.smcinvoke_support = false;
2099 ireq.app_or_session_id = data->client.app_id;
2100 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2101 &ireq, sizeof(ireq),
2102 &continue_resp, sizeof(continue_resp));
2103 qseecom.smcinvoke_support = true;
2104 if (ret) {
2105 pr_err("unblock app %d or session %d fail\n",
2106 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002107 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002108 goto exit;
2109 }
2110 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002111 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002112 resp->result = continue_resp.result;
2113 resp->resp_type = continue_resp.resp_type;
2114 resp->data = continue_resp.data;
2115 pr_debug("unblock resp = %d\n", resp->result);
2116 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2117
2118 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2119 pr_err("Unexpected unblock resp %d\n", resp->result);
2120 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002121 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002122exit:
2123 return ret;
2124}
2125
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002126static int __qseecom_reentrancy_process_incomplete_cmd(
2127 struct qseecom_dev_handle *data,
2128 struct qseecom_command_scm_resp *resp)
2129{
2130 int ret = 0;
2131 int rc = 0;
2132 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002133 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2134 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2135 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002136 struct qseecom_registered_listener_list *ptr_svc = NULL;
2137 sigset_t new_sigset;
2138 sigset_t old_sigset;
2139 uint32_t status;
2140 void *cmd_buf = NULL;
2141 size_t cmd_len;
2142 struct sglist_info *table = NULL;
2143
Zhen Kong26e62742018-05-04 17:19:06 -07002144 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002145 lstnr = resp->data;
2146 /*
2147 * Wake up blocking lsitener service with the lstnr id
2148 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002149 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002150 list_for_each_entry(ptr_svc,
2151 &qseecom.registered_listener_list_head, list) {
2152 if (ptr_svc->svc.listener_id == lstnr) {
2153 ptr_svc->listener_in_use = true;
2154 ptr_svc->rcv_req_flag = 1;
AnilKumar Chimata20f86662019-12-11 11:51:03 +05302155 rc = msm_ion_do_cache_op(qseecom.ion_clnt,
2156 ptr_svc->ihandle,
2157 ptr_svc->sb_virt,
2158 ptr_svc->sb_length,
2159 ION_IOC_INV_CACHES);
2160 if (rc) {
2161 pr_err("cache opp failed %d\n", rc);
2162 status = QSEOS_RESULT_FAILURE;
2163 goto err_resp;
2164 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002165 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2166 break;
2167 }
2168 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002169
2170 if (ptr_svc == NULL) {
2171 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002172 rc = -EINVAL;
2173 status = QSEOS_RESULT_FAILURE;
2174 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002175 }
2176
2177 if (!ptr_svc->ihandle) {
2178 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002179 rc = -EINVAL;
2180 status = QSEOS_RESULT_FAILURE;
2181 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002182 }
2183
2184 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002185 pr_err("Service %d does not exist\n",
2186 lstnr);
2187 rc = -ERESTARTSYS;
2188 ptr_svc = NULL;
2189 status = QSEOS_RESULT_FAILURE;
2190 goto err_resp;
2191 }
2192
2193 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002194 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002195 lstnr, ptr_svc->abort);
2196 rc = -ENODEV;
2197 status = QSEOS_RESULT_FAILURE;
2198 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002199 }
Zhen Kong25731112018-09-20 13:10:03 -07002200
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002201 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2202
2203 /* initialize the new signal mask with all signals*/
2204 sigfillset(&new_sigset);
2205
2206 /* block all signals */
2207 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2208
2209 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002210 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002211 mutex_unlock(&app_access_lock);
2212 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302213 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002214 __qseecom_reentrancy_listener_has_sent_rsp(
2215 data, ptr_svc))) {
2216 break;
2217 }
2218 } while (1);
2219 /* lock mutex again after resp sent */
2220 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002221 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002222 ptr_svc->send_resp_flag = 0;
2223 qseecom.send_resp_flag = 0;
2224
2225 /* restore signal mask */
2226 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002227 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002228 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2229 data->client.app_id, lstnr, ret);
2230 rc = -ENODEV;
2231 status = QSEOS_RESULT_FAILURE;
2232 } else {
2233 status = QSEOS_RESULT_SUCCESS;
2234 }
Zhen Kong26e62742018-05-04 17:19:06 -07002235err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002236 if (ptr_svc)
2237 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002238 if (qseecom.qsee_version < QSEE_VERSION_40) {
2239 send_data_rsp.listener_id = lstnr;
2240 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002241 if (table) {
2242 send_data_rsp.sglistinfo_ptr =
2243 (uint32_t)virt_to_phys(table);
2244 send_data_rsp.sglistinfo_len =
2245 SGLISTINFO_TABLE_SIZE;
2246 dmac_flush_range((void *)table,
2247 (void *)table + SGLISTINFO_TABLE_SIZE);
2248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002249 cmd_buf = (void *)&send_data_rsp;
2250 cmd_len = sizeof(send_data_rsp);
2251 } else {
2252 send_data_rsp_64bit.listener_id = lstnr;
2253 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002254 if (table) {
2255 send_data_rsp_64bit.sglistinfo_ptr =
2256 virt_to_phys(table);
2257 send_data_rsp_64bit.sglistinfo_len =
2258 SGLISTINFO_TABLE_SIZE;
2259 dmac_flush_range((void *)table,
2260 (void *)table + SGLISTINFO_TABLE_SIZE);
2261 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002262 cmd_buf = (void *)&send_data_rsp_64bit;
2263 cmd_len = sizeof(send_data_rsp_64bit);
2264 }
Zhen Kong7d500032018-08-06 16:58:31 -07002265 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002266 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2267 else
2268 *(uint32_t *)cmd_buf =
2269 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002270 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002271 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2272 ptr_svc->ihandle,
2273 ptr_svc->sb_virt, ptr_svc->sb_length,
2274 ION_IOC_CLEAN_INV_CACHES);
2275 if (ret) {
2276 pr_err("cache operation failed %d\n", ret);
2277 return ret;
2278 }
2279 }
2280 if (lstnr == RPMB_SERVICE) {
2281 ret = __qseecom_enable_clk(CLK_QSEE);
2282 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002283 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002284 }
2285
2286 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2287 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002288 if (ptr_svc) {
2289 ptr_svc->listener_in_use = false;
2290 __qseecom_clean_listener_sglistinfo(ptr_svc);
2291 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2292 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002293
2294 if (ret) {
2295 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2296 ret, data->client.app_id);
2297 goto exit;
2298 }
2299
2300 switch (resp->result) {
2301 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2302 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2303 lstnr, data->client.app_id, resp->data);
2304 if (lstnr == resp->data) {
2305 pr_err("lstnr %d should not be blocked!\n",
2306 lstnr);
2307 ret = -EINVAL;
2308 goto exit;
2309 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002310 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002311 ret = __qseecom_process_reentrancy_blocked_on_listener(
2312 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002313 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002314 if (ret) {
2315 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2316 data->client.app_id,
2317 data->client.app_name, resp->data);
2318 goto exit;
2319 }
2320 case QSEOS_RESULT_SUCCESS:
2321 case QSEOS_RESULT_INCOMPLETE:
2322 break;
2323 default:
2324 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2325 resp->result, data->client.app_id, lstnr);
2326 ret = -EINVAL;
2327 goto exit;
2328 }
2329exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002330 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002331 if (lstnr == RPMB_SERVICE)
2332 __qseecom_disable_clk(CLK_QSEE);
2333
2334 }
2335 if (rc)
2336 return rc;
2337
2338 return ret;
2339}
2340
2341/*
2342 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2343 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2344 * So, needs to first check if no app blocked before sending OS level scm call,
2345 * then wait until all apps are unblocked.
2346 */
2347static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2348{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002349 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2350 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2351 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2352 /* thread sleep until this app unblocked */
2353 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002354 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002355 wait_event_interruptible(qseecom.app_block_wq,
2356 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002357 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002358 }
2359 }
2360}
2361
2362/*
2363 * scm_call of send data will fail if this TA is blocked or there are more
2364 * than one TA requesting listener services; So, first check to see if need
2365 * to wait.
2366 */
2367static void __qseecom_reentrancy_check_if_this_app_blocked(
2368 struct qseecom_registered_app_list *ptr_app)
2369{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002370 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002371 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002372 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2373 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002374 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002375 wait_event_interruptible(qseecom.app_block_wq,
2376 (!ptr_app->app_blocked &&
2377 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002378 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002379 }
Zhen Kongdea10592018-07-30 17:50:10 -07002380 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002381 }
2382}
2383
2384static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2385 uint32_t *app_id)
2386{
2387 int32_t ret;
2388 struct qseecom_command_scm_resp resp;
2389 bool found_app = false;
2390 struct qseecom_registered_app_list *entry = NULL;
2391 unsigned long flags = 0;
2392
2393 if (!app_id) {
2394 pr_err("Null pointer to app_id\n");
2395 return -EINVAL;
2396 }
2397 *app_id = 0;
2398
2399 /* check if app exists and has been registered locally */
2400 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2401 list_for_each_entry(entry,
2402 &qseecom.registered_app_list_head, list) {
2403 if (!strcmp(entry->app_name, req.app_name)) {
2404 found_app = true;
2405 break;
2406 }
2407 }
2408 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2409 if (found_app) {
2410 pr_debug("Found app with id %d\n", entry->app_id);
2411 *app_id = entry->app_id;
2412 return 0;
2413 }
2414
2415 memset((void *)&resp, 0, sizeof(resp));
2416
2417 /* SCM_CALL to check if app_id for the mentioned app exists */
2418 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2419 sizeof(struct qseecom_check_app_ireq),
2420 &resp, sizeof(resp));
2421 if (ret) {
2422 pr_err("scm_call to check if app is already loaded failed\n");
2423 return -EINVAL;
2424 }
2425
2426 if (resp.result == QSEOS_RESULT_FAILURE)
2427 return 0;
2428
2429 switch (resp.resp_type) {
2430 /*qsee returned listener type response */
2431 case QSEOS_LISTENER_ID:
2432 pr_err("resp type is of listener type instead of app");
2433 return -EINVAL;
2434 case QSEOS_APP_ID:
2435 *app_id = resp.data;
2436 return 0;
2437 default:
2438 pr_err("invalid resp type (%d) from qsee",
2439 resp.resp_type);
2440 return -ENODEV;
2441 }
2442}
2443
2444static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2445{
2446 struct qseecom_registered_app_list *entry = NULL;
2447 unsigned long flags = 0;
2448 u32 app_id = 0;
2449 struct ion_handle *ihandle; /* Ion handle */
2450 struct qseecom_load_img_req load_img_req;
2451 int32_t ret = 0;
2452 ion_phys_addr_t pa = 0;
2453 size_t len;
2454 struct qseecom_command_scm_resp resp;
2455 struct qseecom_check_app_ireq req;
2456 struct qseecom_load_app_ireq load_req;
2457 struct qseecom_load_app_64bit_ireq load_req_64bit;
2458 void *cmd_buf = NULL;
2459 size_t cmd_len;
2460 bool first_time = false;
2461
2462 /* Copy the relevant information needed for loading the image */
2463 if (copy_from_user(&load_img_req,
2464 (void __user *)argp,
2465 sizeof(struct qseecom_load_img_req))) {
2466 pr_err("copy_from_user failed\n");
2467 return -EFAULT;
2468 }
2469
2470 /* Check and load cmnlib */
2471 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2472 if (!qseecom.commonlib_loaded &&
2473 load_img_req.app_arch == ELFCLASS32) {
2474 ret = qseecom_load_commonlib_image(data, "cmnlib");
2475 if (ret) {
2476 pr_err("failed to load cmnlib\n");
2477 return -EIO;
2478 }
2479 qseecom.commonlib_loaded = true;
2480 pr_debug("cmnlib is loaded\n");
2481 }
2482
2483 if (!qseecom.commonlib64_loaded &&
2484 load_img_req.app_arch == ELFCLASS64) {
2485 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2486 if (ret) {
2487 pr_err("failed to load cmnlib64\n");
2488 return -EIO;
2489 }
2490 qseecom.commonlib64_loaded = true;
2491 pr_debug("cmnlib64 is loaded\n");
2492 }
2493 }
2494
2495 if (qseecom.support_bus_scaling) {
2496 mutex_lock(&qsee_bw_mutex);
2497 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2498 mutex_unlock(&qsee_bw_mutex);
2499 if (ret)
2500 return ret;
2501 }
2502
2503 /* Vote for the SFPB clock */
2504 ret = __qseecom_enable_clk_scale_up(data);
2505 if (ret)
2506 goto enable_clk_err;
2507
2508 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2509 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2510 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2511
2512 ret = __qseecom_check_app_exists(req, &app_id);
2513 if (ret < 0)
2514 goto loadapp_err;
2515
2516 if (app_id) {
2517 pr_debug("App id %d (%s) already exists\n", app_id,
2518 (char *)(req.app_name));
2519 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2520 list_for_each_entry(entry,
2521 &qseecom.registered_app_list_head, list){
2522 if (entry->app_id == app_id) {
2523 entry->ref_cnt++;
2524 break;
2525 }
2526 }
2527 spin_unlock_irqrestore(
2528 &qseecom.registered_app_list_lock, flags);
2529 ret = 0;
2530 } else {
2531 first_time = true;
2532 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2533 (char *)(load_img_req.img_name));
2534 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002535 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002536 load_img_req.ifd_data_fd);
2537 if (IS_ERR_OR_NULL(ihandle)) {
2538 pr_err("Ion client could not retrieve the handle\n");
2539 ret = -ENOMEM;
2540 goto loadapp_err;
2541 }
2542
2543 /* Get the physical address of the ION BUF */
2544 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2545 if (ret) {
2546 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2547 ret);
2548 goto loadapp_err;
2549 }
2550 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2551 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2552 len, load_img_req.mdt_len,
2553 load_img_req.img_len);
2554 ret = -EINVAL;
2555 goto loadapp_err;
2556 }
2557 /* Populate the structure for sending scm call to load image */
2558 if (qseecom.qsee_version < QSEE_VERSION_40) {
2559 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2560 load_req.mdt_len = load_img_req.mdt_len;
2561 load_req.img_len = load_img_req.img_len;
2562 strlcpy(load_req.app_name, load_img_req.img_name,
2563 MAX_APP_NAME_SIZE);
2564 load_req.phy_addr = (uint32_t)pa;
2565 cmd_buf = (void *)&load_req;
2566 cmd_len = sizeof(struct qseecom_load_app_ireq);
2567 } else {
2568 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2569 load_req_64bit.mdt_len = load_img_req.mdt_len;
2570 load_req_64bit.img_len = load_img_req.img_len;
2571 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2572 MAX_APP_NAME_SIZE);
2573 load_req_64bit.phy_addr = (uint64_t)pa;
2574 cmd_buf = (void *)&load_req_64bit;
2575 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2576 }
2577
2578 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2579 ION_IOC_CLEAN_INV_CACHES);
2580 if (ret) {
2581 pr_err("cache operation failed %d\n", ret);
2582 goto loadapp_err;
2583 }
2584
2585 /* SCM_CALL to load the app and get the app_id back */
2586 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2587 cmd_len, &resp, sizeof(resp));
2588 if (ret) {
2589 pr_err("scm_call to load app failed\n");
2590 if (!IS_ERR_OR_NULL(ihandle))
2591 ion_free(qseecom.ion_clnt, ihandle);
2592 ret = -EINVAL;
2593 goto loadapp_err;
2594 }
2595
2596 if (resp.result == QSEOS_RESULT_FAILURE) {
2597 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2598 if (!IS_ERR_OR_NULL(ihandle))
2599 ion_free(qseecom.ion_clnt, ihandle);
2600 ret = -EFAULT;
2601 goto loadapp_err;
2602 }
2603
2604 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2605 ret = __qseecom_process_incomplete_cmd(data, &resp);
2606 if (ret) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002607 /* TZ has created app_id, need to unload it */
2608 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
2609 ret, resp.result, resp.data,
2610 load_img_req.img_name);
2611 __qseecom_unload_app(data, resp.data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002612 if (!IS_ERR_OR_NULL(ihandle))
2613 ion_free(qseecom.ion_clnt, ihandle);
2614 ret = -EFAULT;
2615 goto loadapp_err;
2616 }
2617 }
2618
2619 if (resp.result != QSEOS_RESULT_SUCCESS) {
2620 pr_err("scm_call failed resp.result unknown, %d\n",
2621 resp.result);
2622 if (!IS_ERR_OR_NULL(ihandle))
2623 ion_free(qseecom.ion_clnt, ihandle);
2624 ret = -EFAULT;
2625 goto loadapp_err;
2626 }
2627
2628 app_id = resp.data;
2629
2630 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2631 if (!entry) {
2632 ret = -ENOMEM;
2633 goto loadapp_err;
2634 }
2635 entry->app_id = app_id;
2636 entry->ref_cnt = 1;
2637 entry->app_arch = load_img_req.app_arch;
2638 /*
2639 * keymaster app may be first loaded as "keymaste" by qseecomd,
2640 * and then used as "keymaster" on some targets. To avoid app
2641 * name checking error, register "keymaster" into app_list and
2642 * thread private data.
2643 */
2644 if (!strcmp(load_img_req.img_name, "keymaste"))
2645 strlcpy(entry->app_name, "keymaster",
2646 MAX_APP_NAME_SIZE);
2647 else
2648 strlcpy(entry->app_name, load_img_req.img_name,
2649 MAX_APP_NAME_SIZE);
2650 entry->app_blocked = false;
2651 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002652 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002653
2654 /* Deallocate the handle */
2655 if (!IS_ERR_OR_NULL(ihandle))
2656 ion_free(qseecom.ion_clnt, ihandle);
2657
2658 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2659 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2660 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2661 flags);
2662
2663 pr_warn("App with id %u (%s) now loaded\n", app_id,
2664 (char *)(load_img_req.img_name));
2665 }
2666 data->client.app_id = app_id;
2667 data->client.app_arch = load_img_req.app_arch;
2668 if (!strcmp(load_img_req.img_name, "keymaste"))
2669 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2670 else
2671 strlcpy(data->client.app_name, load_img_req.img_name,
2672 MAX_APP_NAME_SIZE);
2673 load_img_req.app_id = app_id;
2674 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2675 pr_err("copy_to_user failed\n");
2676 ret = -EFAULT;
2677 if (first_time == true) {
2678 spin_lock_irqsave(
2679 &qseecom.registered_app_list_lock, flags);
2680 list_del(&entry->list);
2681 spin_unlock_irqrestore(
2682 &qseecom.registered_app_list_lock, flags);
2683 kzfree(entry);
2684 }
2685 }
2686
2687loadapp_err:
2688 __qseecom_disable_clk_scale_down(data);
2689enable_clk_err:
2690 if (qseecom.support_bus_scaling) {
2691 mutex_lock(&qsee_bw_mutex);
2692 qseecom_unregister_bus_bandwidth_needs(data);
2693 mutex_unlock(&qsee_bw_mutex);
2694 }
2695 return ret;
2696}
2697
2698static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2699{
2700 int ret = 1; /* Set unload app */
2701
2702 wake_up_all(&qseecom.send_resp_wq);
2703 if (qseecom.qsee_reentrancy_support)
2704 mutex_unlock(&app_access_lock);
2705 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302706 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002707 atomic_read(&data->ioctl_count) <= 1)) {
2708 pr_err("Interrupted from abort\n");
2709 ret = -ERESTARTSYS;
2710 break;
2711 }
2712 }
2713 if (qseecom.qsee_reentrancy_support)
2714 mutex_lock(&app_access_lock);
2715 return ret;
2716}
2717
2718static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2719{
2720 int ret = 0;
2721
2722 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2723 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2724 ion_free(qseecom.ion_clnt, data->client.ihandle);
jitendrathakarec7ff9e42019-09-12 19:46:48 +05302725 memset((void *)&data->client,
2726 0, sizeof(struct qseecom_client_handle));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002727 }
2728 return ret;
2729}
2730
Zhen Kong03b2eae2019-09-17 16:58:46 -07002731static int __qseecom_unload_app(struct qseecom_dev_handle *data,
2732 uint32_t app_id)
2733{
2734 struct qseecom_unload_app_ireq req;
2735 struct qseecom_command_scm_resp resp;
2736 int ret = 0;
2737
2738 /* Populate the structure for sending scm call to load image */
2739 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2740 req.app_id = app_id;
2741
2742 /* SCM_CALL to unload the app */
2743 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2744 sizeof(struct qseecom_unload_app_ireq),
2745 &resp, sizeof(resp));
2746 if (ret) {
2747 pr_err("scm_call to unload app (id = %d) failed\n", app_id);
2748 return -EFAULT;
2749 }
2750 switch (resp.result) {
2751 case QSEOS_RESULT_SUCCESS:
2752 pr_warn("App (%d) is unloaded\n", app_id);
2753 break;
2754 case QSEOS_RESULT_INCOMPLETE:
2755 ret = __qseecom_process_incomplete_cmd(data, &resp);
2756 if (ret)
2757 pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
2758 app_id, ret, resp.result, resp.data);
2759 else
2760 pr_warn("App (%d) is unloaded\n", app_id);
2761 break;
2762 case QSEOS_RESULT_FAILURE:
2763 pr_err("app (%d) unload_failed!!\n", app_id);
2764 ret = -EFAULT;
2765 break;
2766 default:
2767 pr_err("unload app %d get unknown resp.result %d\n",
2768 app_id, resp.result);
2769 ret = -EFAULT;
2770 break;
2771 }
2772 return ret;
2773}
2774
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002775static int qseecom_unload_app(struct qseecom_dev_handle *data,
2776 bool app_crash)
2777{
2778 unsigned long flags;
2779 unsigned long flags1;
2780 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002781 struct qseecom_registered_app_list *ptr_app = NULL;
2782 bool unload = false;
2783 bool found_app = false;
2784 bool found_dead_app = false;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002785 bool doublecheck = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002786
2787 if (!data) {
2788 pr_err("Invalid/uninitialized device handle\n");
2789 return -EINVAL;
2790 }
2791
2792 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2793 pr_debug("Do not unload keymaster app from tz\n");
2794 goto unload_exit;
2795 }
2796
2797 __qseecom_cleanup_app(data);
2798 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2799
2800 if (data->client.app_id > 0) {
2801 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2802 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2803 list) {
2804 if (ptr_app->app_id == data->client.app_id) {
2805 if (!strcmp((void *)ptr_app->app_name,
2806 (void *)data->client.app_name)) {
2807 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002808 if (ptr_app->app_blocked ||
2809 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002810 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002811 if (app_crash || ptr_app->ref_cnt == 1)
2812 unload = true;
2813 break;
2814 }
2815 found_dead_app = true;
2816 break;
2817 }
2818 }
2819 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2820 flags);
2821 if (found_app == false && found_dead_app == false) {
2822 pr_err("Cannot find app with id = %d (%s)\n",
2823 data->client.app_id,
2824 (char *)data->client.app_name);
2825 ret = -EINVAL;
2826 goto unload_exit;
2827 }
2828 }
2829
2830 if (found_dead_app)
2831 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2832 (char *)data->client.app_name);
2833
2834 if (unload) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002835 ret = __qseecom_unload_app(data, data->client.app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002836
Zhen Kongf818f152019-03-13 12:31:32 -07002837 /* double check if this app_entry still exists */
Zhen Kongf818f152019-03-13 12:31:32 -07002838 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2839 list_for_each_entry(ptr_app,
2840 &qseecom.registered_app_list_head, list) {
2841 if ((ptr_app->app_id == data->client.app_id) &&
2842 (!strcmp((void *)ptr_app->app_name,
2843 (void *)data->client.app_name))) {
2844 doublecheck = true;
2845 break;
2846 }
2847 }
2848 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2849 flags1);
2850 if (!doublecheck) {
2851 pr_warn("app %d(%s) entry is already removed\n",
2852 data->client.app_id,
2853 (char *)data->client.app_name);
2854 found_app = false;
2855 }
2856 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07002857
Zhen Kong7d500032018-08-06 16:58:31 -07002858unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002859 if (found_app) {
2860 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2861 if (app_crash) {
2862 ptr_app->ref_cnt = 0;
2863 pr_debug("app_crash: ref_count = 0\n");
2864 } else {
2865 if (ptr_app->ref_cnt == 1) {
2866 ptr_app->ref_cnt = 0;
2867 pr_debug("ref_count set to 0\n");
2868 } else {
2869 ptr_app->ref_cnt--;
2870 pr_debug("Can't unload app(%d) inuse\n",
2871 ptr_app->app_id);
2872 }
2873 }
2874 if (unload) {
2875 list_del(&ptr_app->list);
2876 kzfree(ptr_app);
2877 }
2878 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2879 flags1);
2880 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002881 qseecom_unmap_ion_allocated_memory(data);
2882 data->released = true;
2883 return ret;
2884}
2885
Zhen Kong03b2eae2019-09-17 16:58:46 -07002886
2887static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
2888{
2889 struct qseecom_unload_app_pending_list *entry = NULL;
2890
2891 pr_debug("prepare to unload app(%d)(%s), pending %d\n",
2892 data->client.app_id, data->client.app_name,
2893 data->client.unload_pending);
2894 if (data->client.unload_pending)
2895 return 0;
2896 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2897 if (!entry)
2898 return -ENOMEM;
2899 entry->data = data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002900 list_add_tail(&entry->list,
2901 &qseecom.unload_app_pending_list_head);
Zhen Kong03b2eae2019-09-17 16:58:46 -07002902 data->client.unload_pending = true;
2903 pr_debug("unload ta %d pending\n", data->client.app_id);
2904 return 0;
2905}
2906
2907static void __wakeup_unload_app_kthread(void)
2908{
2909 atomic_set(&qseecom.unload_app_kthread_state,
2910 UNLOAD_APP_KT_WAKEUP);
2911 wake_up_interruptible(&qseecom.unload_app_kthread_wq);
2912}
2913
2914static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
2915{
2916 struct qseecom_unload_app_pending_list *entry = NULL;
2917 bool found = false;
2918
2919 mutex_lock(&unload_app_pending_list_lock);
2920 list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
2921 list) {
2922 if ((entry->data->client.app_id == app_id) &&
2923 (!strcmp(entry->data->client.app_name, app_name))) {
2924 found = true;
2925 break;
2926 }
2927 }
2928 mutex_unlock(&unload_app_pending_list_lock);
2929 return found;
2930}
2931
2932static void __qseecom_processing_pending_unload_app(void)
2933{
2934 struct qseecom_unload_app_pending_list *entry = NULL;
2935 struct list_head *pos;
2936 int ret = 0;
2937
2938 mutex_lock(&unload_app_pending_list_lock);
2939 while (!list_empty(&qseecom.unload_app_pending_list_head)) {
2940 pos = qseecom.unload_app_pending_list_head.next;
2941 entry = list_entry(pos,
2942 struct qseecom_unload_app_pending_list, list);
2943 if (entry && entry->data) {
2944 pr_debug("process pending unload app %d (%s)\n",
2945 entry->data->client.app_id,
2946 entry->data->client.app_name);
2947 mutex_unlock(&unload_app_pending_list_lock);
2948 mutex_lock(&app_access_lock);
2949 ret = qseecom_unload_app(entry->data, true);
2950 if (ret)
2951 pr_err("unload app %d pending failed %d\n",
2952 entry->data->client.app_id, ret);
2953 mutex_unlock(&app_access_lock);
2954 mutex_lock(&unload_app_pending_list_lock);
2955 kzfree(entry->data);
2956 }
2957 list_del(pos);
2958 kzfree(entry);
2959 }
2960 mutex_unlock(&unload_app_pending_list_lock);
2961}
2962
2963static int __qseecom_unload_app_kthread_func(void *data)
2964{
2965 while (!kthread_should_stop()) {
2966 wait_event_interruptible(
2967 qseecom.unload_app_kthread_wq,
2968 atomic_read(&qseecom.unload_app_kthread_state)
2969 == UNLOAD_APP_KT_WAKEUP);
2970 pr_debug("kthread to unload app is called, state %d\n",
2971 atomic_read(&qseecom.unload_app_kthread_state));
2972 __qseecom_processing_pending_unload_app();
2973 atomic_set(&qseecom.unload_app_kthread_state,
2974 UNLOAD_APP_KT_SLEEP);
2975 }
2976 pr_warn("kthread to unload app stopped\n");
2977 return 0;
2978}
2979
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002980static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2981 unsigned long virt)
2982{
2983 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2984}
2985
2986static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2987 unsigned long virt)
2988{
2989 return (uintptr_t)data->client.sb_virt +
2990 (virt - data->client.user_virt_sb_base);
2991}
2992
2993int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2994 struct qseecom_send_svc_cmd_req *req_ptr,
2995 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2996{
2997 int ret = 0;
2998 void *req_buf = NULL;
2999
3000 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
3001 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
3002 req_ptr, send_svc_ireq_ptr);
3003 return -EINVAL;
3004 }
3005
3006 /* Clients need to ensure req_buf is at base offset of shared buffer */
3007 if ((uintptr_t)req_ptr->cmd_req_buf !=
3008 data_ptr->client.user_virt_sb_base) {
3009 pr_err("cmd buf not pointing to base offset of shared buffer\n");
3010 return -EINVAL;
3011 }
3012
3013 if (data_ptr->client.sb_length <
3014 sizeof(struct qseecom_rpmb_provision_key)) {
3015 pr_err("shared buffer is too small to hold key type\n");
3016 return -EINVAL;
3017 }
3018 req_buf = data_ptr->client.sb_virt;
3019
3020 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3021 send_svc_ireq_ptr->key_type =
3022 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
3023 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3024 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3025 data_ptr, (uintptr_t)req_ptr->resp_buf));
3026 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3027
3028 return ret;
3029}
3030
3031int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
3032 struct qseecom_send_svc_cmd_req *req_ptr,
3033 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
3034{
3035 int ret = 0;
3036 uint32_t reqd_len_sb_in = 0;
3037
3038 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
3039 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
3040 req_ptr, send_svc_ireq_ptr);
3041 return -EINVAL;
3042 }
3043
3044 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
3045 if (reqd_len_sb_in > data_ptr->client.sb_length) {
3046 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
3047 pr_err("Required: %u, Available: %zu\n",
3048 reqd_len_sb_in, data_ptr->client.sb_length);
3049 return -ENOMEM;
3050 }
3051
3052 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3053 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3054 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3055 data_ptr, (uintptr_t)req_ptr->resp_buf));
3056 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3057
3058 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3059 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
3060
3061
3062 return ret;
3063}
3064
3065static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
3066 struct qseecom_send_svc_cmd_req *req)
3067{
3068 if (!req || !req->resp_buf || !req->cmd_req_buf) {
3069 pr_err("req or cmd buffer or response buffer is null\n");
3070 return -EINVAL;
3071 }
3072
3073 if (!data || !data->client.ihandle) {
3074 pr_err("Client or client handle is not initialized\n");
3075 return -EINVAL;
3076 }
3077
3078 if (data->client.sb_virt == NULL) {
3079 pr_err("sb_virt null\n");
3080 return -EINVAL;
3081 }
3082
3083 if (data->client.user_virt_sb_base == 0) {
3084 pr_err("user_virt_sb_base is null\n");
3085 return -EINVAL;
3086 }
3087
3088 if (data->client.sb_length == 0) {
3089 pr_err("sb_length is 0\n");
3090 return -EINVAL;
3091 }
3092
3093 if (((uintptr_t)req->cmd_req_buf <
3094 data->client.user_virt_sb_base) ||
3095 ((uintptr_t)req->cmd_req_buf >=
3096 (data->client.user_virt_sb_base + data->client.sb_length))) {
3097 pr_err("cmd buffer address not within shared bufffer\n");
3098 return -EINVAL;
3099 }
3100 if (((uintptr_t)req->resp_buf <
3101 data->client.user_virt_sb_base) ||
3102 ((uintptr_t)req->resp_buf >=
3103 (data->client.user_virt_sb_base + data->client.sb_length))) {
3104 pr_err("response buffer address not within shared bufffer\n");
3105 return -EINVAL;
3106 }
3107 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
3108 (req->cmd_req_len > data->client.sb_length) ||
3109 (req->resp_len > data->client.sb_length)) {
3110 pr_err("cmd buf length or response buf length not valid\n");
3111 return -EINVAL;
3112 }
3113 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3114 pr_err("Integer overflow detected in req_len & rsp_len\n");
3115 return -EINVAL;
3116 }
3117
3118 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3119 pr_debug("Not enough memory to fit cmd_buf.\n");
3120 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3121 (req->cmd_req_len + req->resp_len),
3122 data->client.sb_length);
3123 return -ENOMEM;
3124 }
3125 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3126 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3127 return -EINVAL;
3128 }
3129 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3130 pr_err("Integer overflow in resp_len & resp_buf\n");
3131 return -EINVAL;
3132 }
3133 if (data->client.user_virt_sb_base >
3134 (ULONG_MAX - data->client.sb_length)) {
3135 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3136 return -EINVAL;
3137 }
3138 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3139 ((uintptr_t)data->client.user_virt_sb_base +
3140 data->client.sb_length)) ||
3141 (((uintptr_t)req->resp_buf + req->resp_len) >
3142 ((uintptr_t)data->client.user_virt_sb_base +
3143 data->client.sb_length))) {
3144 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3145 return -EINVAL;
3146 }
3147 return 0;
3148}
3149
3150static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3151 void __user *argp)
3152{
3153 int ret = 0;
3154 struct qseecom_client_send_service_ireq send_svc_ireq;
3155 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3156 struct qseecom_command_scm_resp resp;
3157 struct qseecom_send_svc_cmd_req req;
3158 void *send_req_ptr;
3159 size_t req_buf_size;
3160
3161 /*struct qseecom_command_scm_resp resp;*/
3162
3163 if (copy_from_user(&req,
3164 (void __user *)argp,
3165 sizeof(req))) {
3166 pr_err("copy_from_user failed\n");
3167 return -EFAULT;
3168 }
3169
3170 if (__validate_send_service_cmd_inputs(data, &req))
3171 return -EINVAL;
3172
3173 data->type = QSEECOM_SECURE_SERVICE;
3174
3175 switch (req.cmd_id) {
3176 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3177 case QSEOS_RPMB_ERASE_COMMAND:
3178 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3179 send_req_ptr = &send_svc_ireq;
3180 req_buf_size = sizeof(send_svc_ireq);
3181 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3182 send_req_ptr))
3183 return -EINVAL;
3184 break;
3185 case QSEOS_FSM_LTEOTA_REQ_CMD:
3186 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3187 case QSEOS_FSM_IKE_REQ_CMD:
3188 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3189 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3190 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3191 case QSEOS_FSM_ENCFS_REQ_CMD:
3192 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3193 send_req_ptr = &send_fsm_key_svc_ireq;
3194 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3195 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3196 send_req_ptr))
3197 return -EINVAL;
3198 break;
3199 default:
3200 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3201 return -EINVAL;
3202 }
3203
3204 if (qseecom.support_bus_scaling) {
3205 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3206 if (ret) {
3207 pr_err("Fail to set bw HIGH\n");
3208 return ret;
3209 }
3210 } else {
3211 ret = qseecom_perf_enable(data);
3212 if (ret) {
3213 pr_err("Failed to vote for clocks with err %d\n", ret);
3214 goto exit;
3215 }
3216 }
3217
3218 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3219 data->client.sb_virt, data->client.sb_length,
3220 ION_IOC_CLEAN_INV_CACHES);
3221 if (ret) {
3222 pr_err("cache operation failed %d\n", ret);
3223 goto exit;
3224 }
3225 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3226 (const void *)send_req_ptr,
3227 req_buf_size, &resp, sizeof(resp));
3228 if (ret) {
3229 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3230 if (!qseecom.support_bus_scaling) {
3231 qsee_disable_clock_vote(data, CLK_DFAB);
3232 qsee_disable_clock_vote(data, CLK_SFPB);
3233 } else {
3234 __qseecom_add_bw_scale_down_timer(
3235 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3236 }
3237 goto exit;
3238 }
3239 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3240 data->client.sb_virt, data->client.sb_length,
3241 ION_IOC_INV_CACHES);
3242 if (ret) {
3243 pr_err("cache operation failed %d\n", ret);
3244 goto exit;
3245 }
3246 switch (resp.result) {
3247 case QSEOS_RESULT_SUCCESS:
3248 break;
3249 case QSEOS_RESULT_INCOMPLETE:
3250 pr_debug("qseos_result_incomplete\n");
3251 ret = __qseecom_process_incomplete_cmd(data, &resp);
3252 if (ret) {
3253 pr_err("process_incomplete_cmd fail with result: %d\n",
3254 resp.result);
3255 }
3256 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3257 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303258 if (put_user(resp.result,
3259 (uint32_t __user *)req.resp_buf)) {
3260 ret = -EINVAL;
3261 goto exit;
3262 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003263 ret = 0;
3264 }
3265 break;
3266 case QSEOS_RESULT_FAILURE:
3267 pr_err("scm call failed with resp.result: %d\n", resp.result);
3268 ret = -EINVAL;
3269 break;
3270 default:
3271 pr_err("Response result %d not supported\n",
3272 resp.result);
3273 ret = -EINVAL;
3274 break;
3275 }
3276 if (!qseecom.support_bus_scaling) {
3277 qsee_disable_clock_vote(data, CLK_DFAB);
3278 qsee_disable_clock_vote(data, CLK_SFPB);
3279 } else {
3280 __qseecom_add_bw_scale_down_timer(
3281 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3282 }
3283
3284exit:
3285 return ret;
3286}
3287
3288static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3289 struct qseecom_send_cmd_req *req)
3290
3291{
3292 if (!data || !data->client.ihandle) {
3293 pr_err("Client or client handle is not initialized\n");
3294 return -EINVAL;
3295 }
3296 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3297 (req->cmd_req_buf == NULL)) {
3298 pr_err("cmd buffer or response buffer is null\n");
3299 return -EINVAL;
3300 }
3301 if (((uintptr_t)req->cmd_req_buf <
3302 data->client.user_virt_sb_base) ||
3303 ((uintptr_t)req->cmd_req_buf >=
3304 (data->client.user_virt_sb_base + data->client.sb_length))) {
3305 pr_err("cmd buffer address not within shared bufffer\n");
3306 return -EINVAL;
3307 }
3308 if (((uintptr_t)req->resp_buf <
3309 data->client.user_virt_sb_base) ||
3310 ((uintptr_t)req->resp_buf >=
3311 (data->client.user_virt_sb_base + data->client.sb_length))) {
3312 pr_err("response buffer address not within shared bufffer\n");
3313 return -EINVAL;
3314 }
3315 if ((req->cmd_req_len == 0) ||
3316 (req->cmd_req_len > data->client.sb_length) ||
3317 (req->resp_len > data->client.sb_length)) {
3318 pr_err("cmd buf length or response buf length not valid\n");
3319 return -EINVAL;
3320 }
3321 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3322 pr_err("Integer overflow detected in req_len & rsp_len\n");
3323 return -EINVAL;
3324 }
3325
3326 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3327 pr_debug("Not enough memory to fit cmd_buf.\n");
3328 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3329 (req->cmd_req_len + req->resp_len),
3330 data->client.sb_length);
3331 return -ENOMEM;
3332 }
3333 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3334 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3335 return -EINVAL;
3336 }
3337 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3338 pr_err("Integer overflow in resp_len & resp_buf\n");
3339 return -EINVAL;
3340 }
3341 if (data->client.user_virt_sb_base >
3342 (ULONG_MAX - data->client.sb_length)) {
3343 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3344 return -EINVAL;
3345 }
3346 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3347 ((uintptr_t)data->client.user_virt_sb_base +
3348 data->client.sb_length)) ||
3349 (((uintptr_t)req->resp_buf + req->resp_len) >
3350 ((uintptr_t)data->client.user_virt_sb_base +
3351 data->client.sb_length))) {
3352 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3353 return -EINVAL;
3354 }
3355 return 0;
3356}
3357
3358int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3359 struct qseecom_registered_app_list *ptr_app,
3360 struct qseecom_dev_handle *data)
3361{
3362 int ret = 0;
3363
3364 switch (resp->result) {
3365 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3366 pr_warn("App(%d) %s is blocked on listener %d\n",
3367 data->client.app_id, data->client.app_name,
3368 resp->data);
3369 ret = __qseecom_process_reentrancy_blocked_on_listener(
3370 resp, ptr_app, data);
3371 if (ret) {
3372 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3373 data->client.app_id, data->client.app_name, resp->data);
3374 return ret;
3375 }
3376
3377 case QSEOS_RESULT_INCOMPLETE:
3378 qseecom.app_block_ref_cnt++;
3379 ptr_app->app_blocked = true;
3380 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3381 ptr_app->app_blocked = false;
3382 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003383 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003384 if (ret)
3385 pr_err("process_incomplete_cmd failed err: %d\n",
3386 ret);
3387 return ret;
3388 case QSEOS_RESULT_SUCCESS:
3389 return ret;
3390 default:
3391 pr_err("Response result %d not supported\n",
3392 resp->result);
3393 return -EINVAL;
3394 }
3395}
3396
3397static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3398 struct qseecom_send_cmd_req *req)
3399{
3400 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003401 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003402 u32 reqd_len_sb_in = 0;
3403 struct qseecom_client_send_data_ireq send_data_req = {0};
3404 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3405 struct qseecom_command_scm_resp resp;
3406 unsigned long flags;
3407 struct qseecom_registered_app_list *ptr_app;
3408 bool found_app = false;
3409 void *cmd_buf = NULL;
3410 size_t cmd_len;
3411 struct sglist_info *table = data->sglistinfo_ptr;
3412
3413 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3414 /* find app_id & img_name from list */
3415 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3416 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3417 list) {
3418 if ((ptr_app->app_id == data->client.app_id) &&
3419 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3420 found_app = true;
3421 break;
3422 }
3423 }
3424 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3425
3426 if (!found_app) {
3427 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3428 (char *)data->client.app_name);
3429 return -ENOENT;
3430 }
3431
Zhen Kong03b2eae2019-09-17 16:58:46 -07003432 if (__qseecom_find_pending_unload_app(data->client.app_id,
3433 data->client.app_name)) {
3434 pr_err("app %d (%s) unload is pending\n",
3435 data->client.app_id, data->client.app_name);
3436 return -ENOENT;
3437 }
3438
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003439 if (qseecom.qsee_version < QSEE_VERSION_40) {
3440 send_data_req.app_id = data->client.app_id;
3441 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3442 data, (uintptr_t)req->cmd_req_buf));
3443 send_data_req.req_len = req->cmd_req_len;
3444 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3445 data, (uintptr_t)req->resp_buf));
3446 send_data_req.rsp_len = req->resp_len;
3447 send_data_req.sglistinfo_ptr =
3448 (uint32_t)virt_to_phys(table);
3449 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3450 dmac_flush_range((void *)table,
3451 (void *)table + SGLISTINFO_TABLE_SIZE);
3452 cmd_buf = (void *)&send_data_req;
3453 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3454 } else {
3455 send_data_req_64bit.app_id = data->client.app_id;
3456 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3457 (uintptr_t)req->cmd_req_buf);
3458 send_data_req_64bit.req_len = req->cmd_req_len;
3459 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3460 (uintptr_t)req->resp_buf);
3461 send_data_req_64bit.rsp_len = req->resp_len;
3462 /* check if 32bit app's phys_addr region is under 4GB.*/
3463 if ((data->client.app_arch == ELFCLASS32) &&
3464 ((send_data_req_64bit.req_ptr >=
3465 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3466 (send_data_req_64bit.rsp_ptr >=
3467 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3468 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3469 data->client.app_name,
3470 send_data_req_64bit.req_ptr,
3471 send_data_req_64bit.req_len,
3472 send_data_req_64bit.rsp_ptr,
3473 send_data_req_64bit.rsp_len);
3474 return -EFAULT;
3475 }
3476 send_data_req_64bit.sglistinfo_ptr =
3477 (uint64_t)virt_to_phys(table);
3478 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3479 dmac_flush_range((void *)table,
3480 (void *)table + SGLISTINFO_TABLE_SIZE);
3481 cmd_buf = (void *)&send_data_req_64bit;
3482 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3483 }
3484
3485 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3486 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3487 else
3488 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3489
3490 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3491 data->client.sb_virt,
3492 reqd_len_sb_in,
3493 ION_IOC_CLEAN_INV_CACHES);
3494 if (ret) {
3495 pr_err("cache operation failed %d\n", ret);
3496 return ret;
3497 }
3498
3499 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3500
3501 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3502 cmd_buf, cmd_len,
3503 &resp, sizeof(resp));
3504 if (ret) {
3505 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3506 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003507 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003508 }
3509
3510 if (qseecom.qsee_reentrancy_support) {
3511 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003512 if (ret)
3513 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003514 } else {
3515 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3516 ret = __qseecom_process_incomplete_cmd(data, &resp);
3517 if (ret) {
3518 pr_err("process_incomplete_cmd failed err: %d\n",
3519 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003520 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003521 }
3522 } else {
3523 if (resp.result != QSEOS_RESULT_SUCCESS) {
3524 pr_err("Response result %d not supported\n",
3525 resp.result);
3526 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003527 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003528 }
3529 }
3530 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003531exit:
3532 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003533 data->client.sb_virt, data->client.sb_length,
3534 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003535 if (ret2) {
3536 pr_err("cache operation failed %d\n", ret2);
3537 return ret2;
3538 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003539 return ret;
3540}
3541
3542static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3543{
3544 int ret = 0;
3545 struct qseecom_send_cmd_req req;
3546
3547 ret = copy_from_user(&req, argp, sizeof(req));
3548 if (ret) {
3549 pr_err("copy_from_user failed\n");
3550 return ret;
3551 }
3552
3553 if (__validate_send_cmd_inputs(data, &req))
3554 return -EINVAL;
3555
3556 ret = __qseecom_send_cmd(data, &req);
3557
3558 if (ret)
3559 return ret;
3560
3561 return ret;
3562}
3563
3564int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3565 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3566 struct qseecom_dev_handle *data, int i) {
3567
3568 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3569 (req->ifd_data[i].fd > 0)) {
3570 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3571 (req->ifd_data[i].cmd_buf_offset >
3572 req->cmd_req_len - sizeof(uint32_t))) {
3573 pr_err("Invalid offset (req len) 0x%x\n",
3574 req->ifd_data[i].cmd_buf_offset);
3575 return -EINVAL;
3576 }
3577 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3578 (lstnr_resp->ifd_data[i].fd > 0)) {
3579 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3580 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3581 lstnr_resp->resp_len - sizeof(uint32_t))) {
3582 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3583 lstnr_resp->ifd_data[i].cmd_buf_offset);
3584 return -EINVAL;
3585 }
3586 }
3587 return 0;
3588}
3589
Zhen Kongd097c6e02019-08-01 16:10:20 -07003590static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req,
3591 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3592 struct qseecom_dev_handle *data, int i)
3593{
3594
3595 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3596 (req->ifd_data[i].fd > 0)) {
3597 if ((req->cmd_req_len < sizeof(uint64_t)) ||
3598 (req->ifd_data[i].cmd_buf_offset >
3599 req->cmd_req_len - sizeof(uint64_t))) {
3600 pr_err("Invalid offset (req len) 0x%x\n",
3601 req->ifd_data[i].cmd_buf_offset);
3602 return -EINVAL;
3603 }
3604 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3605 (lstnr_resp->ifd_data[i].fd > 0)) {
3606 if ((lstnr_resp->resp_len < sizeof(uint64_t)) ||
3607 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3608 lstnr_resp->resp_len - sizeof(uint64_t))) {
3609 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3610 lstnr_resp->ifd_data[i].cmd_buf_offset);
3611 return -EINVAL;
3612 }
3613 }
3614 return 0;
3615}
3616
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003617static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3618 struct qseecom_dev_handle *data)
3619{
3620 struct ion_handle *ihandle;
3621 char *field;
3622 int ret = 0;
3623 int i = 0;
3624 uint32_t len = 0;
3625 struct scatterlist *sg;
3626 struct qseecom_send_modfd_cmd_req *req = NULL;
3627 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3628 struct qseecom_registered_listener_list *this_lstnr = NULL;
3629 uint32_t offset;
3630 struct sg_table *sg_ptr;
3631
3632 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3633 (data->type != QSEECOM_CLIENT_APP))
3634 return -EFAULT;
3635
3636 if (msg == NULL) {
3637 pr_err("Invalid address\n");
3638 return -EINVAL;
3639 }
3640 if (data->type == QSEECOM_LISTENER_SERVICE) {
3641 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3642 this_lstnr = __qseecom_find_svc(data->listener.id);
3643 if (IS_ERR_OR_NULL(this_lstnr)) {
3644 pr_err("Invalid listener ID\n");
3645 return -ENOMEM;
3646 }
3647 } else {
3648 req = (struct qseecom_send_modfd_cmd_req *)msg;
3649 }
3650
3651 for (i = 0; i < MAX_ION_FD; i++) {
3652 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3653 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003654 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003655 req->ifd_data[i].fd);
3656 if (IS_ERR_OR_NULL(ihandle)) {
3657 pr_err("Ion client can't retrieve the handle\n");
3658 return -ENOMEM;
3659 }
3660 field = (char *) req->cmd_req_buf +
3661 req->ifd_data[i].cmd_buf_offset;
3662 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3663 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003664 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003665 lstnr_resp->ifd_data[i].fd);
3666 if (IS_ERR_OR_NULL(ihandle)) {
3667 pr_err("Ion client can't retrieve the handle\n");
3668 return -ENOMEM;
3669 }
3670 field = lstnr_resp->resp_buf_ptr +
3671 lstnr_resp->ifd_data[i].cmd_buf_offset;
3672 } else {
3673 continue;
3674 }
3675 /* Populate the cmd data structure with the phys_addr */
3676 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3677 if (IS_ERR_OR_NULL(sg_ptr)) {
3678 pr_err("IOn client could not retrieve sg table\n");
3679 goto err;
3680 }
3681 if (sg_ptr->nents == 0) {
3682 pr_err("Num of scattered entries is 0\n");
3683 goto err;
3684 }
3685 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3686 pr_err("Num of scattered entries");
3687 pr_err(" (%d) is greater than max supported %d\n",
3688 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3689 goto err;
3690 }
3691 sg = sg_ptr->sgl;
3692 if (sg_ptr->nents == 1) {
3693 uint32_t *update;
3694
3695 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3696 goto err;
3697 if ((data->type == QSEECOM_CLIENT_APP &&
3698 (data->client.app_arch == ELFCLASS32 ||
3699 data->client.app_arch == ELFCLASS64)) ||
3700 (data->type == QSEECOM_LISTENER_SERVICE)) {
3701 /*
3702 * Check if sg list phy add region is under 4GB
3703 */
3704 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3705 (!cleanup) &&
3706 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3707 >= PHY_ADDR_4G - sg->length)) {
3708 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3709 data->client.app_name,
3710 &(sg_dma_address(sg_ptr->sgl)),
3711 sg->length);
3712 goto err;
3713 }
3714 update = (uint32_t *) field;
3715 *update = cleanup ? 0 :
3716 (uint32_t)sg_dma_address(sg_ptr->sgl);
3717 } else {
3718 pr_err("QSEE app arch %u is not supported\n",
3719 data->client.app_arch);
3720 goto err;
3721 }
3722 len += (uint32_t)sg->length;
3723 } else {
3724 struct qseecom_sg_entry *update;
3725 int j = 0;
3726
3727 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3728 (req->ifd_data[i].fd > 0)) {
3729
3730 if ((req->cmd_req_len <
3731 SG_ENTRY_SZ * sg_ptr->nents) ||
3732 (req->ifd_data[i].cmd_buf_offset >
3733 (req->cmd_req_len -
3734 SG_ENTRY_SZ * sg_ptr->nents))) {
3735 pr_err("Invalid offset = 0x%x\n",
3736 req->ifd_data[i].cmd_buf_offset);
3737 goto err;
3738 }
3739
3740 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3741 (lstnr_resp->ifd_data[i].fd > 0)) {
3742
3743 if ((lstnr_resp->resp_len <
3744 SG_ENTRY_SZ * sg_ptr->nents) ||
3745 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3746 (lstnr_resp->resp_len -
3747 SG_ENTRY_SZ * sg_ptr->nents))) {
3748 goto err;
3749 }
3750 }
3751 if ((data->type == QSEECOM_CLIENT_APP &&
3752 (data->client.app_arch == ELFCLASS32 ||
3753 data->client.app_arch == ELFCLASS64)) ||
3754 (data->type == QSEECOM_LISTENER_SERVICE)) {
3755 update = (struct qseecom_sg_entry *)field;
3756 for (j = 0; j < sg_ptr->nents; j++) {
3757 /*
3758 * Check if sg list PA is under 4GB
3759 */
3760 if ((qseecom.qsee_version >=
3761 QSEE_VERSION_40) &&
3762 (!cleanup) &&
3763 ((uint64_t)(sg_dma_address(sg))
3764 >= PHY_ADDR_4G - sg->length)) {
3765 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3766 data->client.app_name,
3767 &(sg_dma_address(sg)),
3768 sg->length);
3769 goto err;
3770 }
3771 update->phys_addr = cleanup ? 0 :
3772 (uint32_t)sg_dma_address(sg);
3773 update->len = cleanup ? 0 : sg->length;
3774 update++;
3775 len += sg->length;
3776 sg = sg_next(sg);
3777 }
3778 } else {
3779 pr_err("QSEE app arch %u is not supported\n",
3780 data->client.app_arch);
3781 goto err;
3782 }
3783 }
3784
3785 if (cleanup) {
3786 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3787 ihandle, NULL, len,
3788 ION_IOC_INV_CACHES);
3789 if (ret) {
3790 pr_err("cache operation failed %d\n", ret);
3791 goto err;
3792 }
3793 } else {
3794 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3795 ihandle, NULL, len,
3796 ION_IOC_CLEAN_INV_CACHES);
3797 if (ret) {
3798 pr_err("cache operation failed %d\n", ret);
3799 goto err;
3800 }
3801 if (data->type == QSEECOM_CLIENT_APP) {
3802 offset = req->ifd_data[i].cmd_buf_offset;
3803 data->sglistinfo_ptr[i].indexAndFlags =
3804 SGLISTINFO_SET_INDEX_FLAG(
3805 (sg_ptr->nents == 1), 0, offset);
3806 data->sglistinfo_ptr[i].sizeOrCount =
3807 (sg_ptr->nents == 1) ?
3808 sg->length : sg_ptr->nents;
3809 data->sglist_cnt = i + 1;
3810 } else {
3811 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3812 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3813 (uintptr_t)this_lstnr->sb_virt);
3814 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3815 SGLISTINFO_SET_INDEX_FLAG(
3816 (sg_ptr->nents == 1), 0, offset);
3817 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3818 (sg_ptr->nents == 1) ?
3819 sg->length : sg_ptr->nents;
3820 this_lstnr->sglist_cnt = i + 1;
3821 }
3822 }
3823 /* Deallocate the handle */
3824 if (!IS_ERR_OR_NULL(ihandle))
3825 ion_free(qseecom.ion_clnt, ihandle);
3826 }
3827 return ret;
3828err:
3829 if (!IS_ERR_OR_NULL(ihandle))
3830 ion_free(qseecom.ion_clnt, ihandle);
3831 return -ENOMEM;
3832}
3833
3834static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3835 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3836{
3837 struct scatterlist *sg = sg_ptr->sgl;
3838 struct qseecom_sg_entry_64bit *sg_entry;
3839 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3840 void *buf;
3841 uint i;
3842 size_t size;
3843 dma_addr_t coh_pmem;
3844
3845 if (fd_idx >= MAX_ION_FD) {
3846 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3847 return -ENOMEM;
3848 }
3849 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3850 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3851 /* Allocate a contiguous kernel buffer */
3852 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3853 size = (size + PAGE_SIZE) & PAGE_MASK;
3854 buf = dma_alloc_coherent(qseecom.pdev,
3855 size, &coh_pmem, GFP_KERNEL);
3856 if (buf == NULL) {
3857 pr_err("failed to alloc memory for sg buf\n");
3858 return -ENOMEM;
3859 }
3860 /* update qseecom_sg_list_buf_hdr_64bit */
3861 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3862 buf_hdr->new_buf_phys_addr = coh_pmem;
3863 buf_hdr->nents_total = sg_ptr->nents;
3864 /* save the left sg entries into new allocated buf */
3865 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3866 for (i = 0; i < sg_ptr->nents; i++) {
3867 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3868 sg_entry->len = sg->length;
3869 sg_entry++;
3870 sg = sg_next(sg);
3871 }
3872
3873 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3874 data->client.sec_buf_fd[fd_idx].vbase = buf;
3875 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3876 data->client.sec_buf_fd[fd_idx].size = size;
3877
3878 return 0;
3879}
3880
3881static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3882 struct qseecom_dev_handle *data)
3883{
3884 struct ion_handle *ihandle;
3885 char *field;
3886 int ret = 0;
3887 int i = 0;
3888 uint32_t len = 0;
3889 struct scatterlist *sg;
3890 struct qseecom_send_modfd_cmd_req *req = NULL;
3891 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3892 struct qseecom_registered_listener_list *this_lstnr = NULL;
3893 uint32_t offset;
3894 struct sg_table *sg_ptr;
3895
3896 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3897 (data->type != QSEECOM_CLIENT_APP))
3898 return -EFAULT;
3899
3900 if (msg == NULL) {
3901 pr_err("Invalid address\n");
3902 return -EINVAL;
3903 }
3904 if (data->type == QSEECOM_LISTENER_SERVICE) {
3905 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3906 this_lstnr = __qseecom_find_svc(data->listener.id);
3907 if (IS_ERR_OR_NULL(this_lstnr)) {
3908 pr_err("Invalid listener ID\n");
3909 return -ENOMEM;
3910 }
3911 } else {
3912 req = (struct qseecom_send_modfd_cmd_req *)msg;
3913 }
3914
3915 for (i = 0; i < MAX_ION_FD; i++) {
3916 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3917 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003918 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003919 req->ifd_data[i].fd);
3920 if (IS_ERR_OR_NULL(ihandle)) {
3921 pr_err("Ion client can't retrieve the handle\n");
3922 return -ENOMEM;
3923 }
3924 field = (char *) req->cmd_req_buf +
3925 req->ifd_data[i].cmd_buf_offset;
3926 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3927 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003928 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003929 lstnr_resp->ifd_data[i].fd);
3930 if (IS_ERR_OR_NULL(ihandle)) {
3931 pr_err("Ion client can't retrieve the handle\n");
3932 return -ENOMEM;
3933 }
3934 field = lstnr_resp->resp_buf_ptr +
3935 lstnr_resp->ifd_data[i].cmd_buf_offset;
3936 } else {
3937 continue;
3938 }
3939 /* Populate the cmd data structure with the phys_addr */
3940 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3941 if (IS_ERR_OR_NULL(sg_ptr)) {
3942 pr_err("IOn client could not retrieve sg table\n");
3943 goto err;
3944 }
3945 if (sg_ptr->nents == 0) {
3946 pr_err("Num of scattered entries is 0\n");
3947 goto err;
3948 }
3949 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3950 pr_warn("Num of scattered entries");
3951 pr_warn(" (%d) is greater than %d\n",
3952 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3953 if (cleanup) {
3954 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3955 data->client.sec_buf_fd[i].vbase)
3956 dma_free_coherent(qseecom.pdev,
3957 data->client.sec_buf_fd[i].size,
3958 data->client.sec_buf_fd[i].vbase,
3959 data->client.sec_buf_fd[i].pbase);
3960 } else {
3961 ret = __qseecom_allocate_sg_list_buffer(data,
3962 field, i, sg_ptr);
3963 if (ret) {
3964 pr_err("Failed to allocate sg list buffer\n");
3965 goto err;
3966 }
3967 }
3968 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3969 sg = sg_ptr->sgl;
3970 goto cleanup;
3971 }
3972 sg = sg_ptr->sgl;
3973 if (sg_ptr->nents == 1) {
3974 uint64_t *update_64bit;
3975
Zhen Kongd097c6e02019-08-01 16:10:20 -07003976 if (__boundary_checks_offset_64(req, lstnr_resp,
3977 data, i))
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003978 goto err;
3979 /* 64bit app uses 64bit address */
3980 update_64bit = (uint64_t *) field;
3981 *update_64bit = cleanup ? 0 :
3982 (uint64_t)sg_dma_address(sg_ptr->sgl);
3983 len += (uint32_t)sg->length;
3984 } else {
3985 struct qseecom_sg_entry_64bit *update_64bit;
3986 int j = 0;
3987
3988 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3989 (req->ifd_data[i].fd > 0)) {
3990
3991 if ((req->cmd_req_len <
3992 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3993 (req->ifd_data[i].cmd_buf_offset >
3994 (req->cmd_req_len -
3995 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3996 pr_err("Invalid offset = 0x%x\n",
3997 req->ifd_data[i].cmd_buf_offset);
3998 goto err;
3999 }
4000
4001 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
4002 (lstnr_resp->ifd_data[i].fd > 0)) {
4003
4004 if ((lstnr_resp->resp_len <
4005 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
4006 (lstnr_resp->ifd_data[i].cmd_buf_offset >
4007 (lstnr_resp->resp_len -
4008 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
4009 goto err;
4010 }
4011 }
4012 /* 64bit app uses 64bit address */
4013 update_64bit = (struct qseecom_sg_entry_64bit *)field;
4014 for (j = 0; j < sg_ptr->nents; j++) {
4015 update_64bit->phys_addr = cleanup ? 0 :
4016 (uint64_t)sg_dma_address(sg);
4017 update_64bit->len = cleanup ? 0 :
4018 (uint32_t)sg->length;
4019 update_64bit++;
4020 len += sg->length;
4021 sg = sg_next(sg);
4022 }
4023 }
4024cleanup:
4025 if (cleanup) {
4026 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4027 ihandle, NULL, len,
4028 ION_IOC_INV_CACHES);
4029 if (ret) {
4030 pr_err("cache operation failed %d\n", ret);
4031 goto err;
4032 }
4033 } else {
4034 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4035 ihandle, NULL, len,
4036 ION_IOC_CLEAN_INV_CACHES);
4037 if (ret) {
4038 pr_err("cache operation failed %d\n", ret);
4039 goto err;
4040 }
4041 if (data->type == QSEECOM_CLIENT_APP) {
4042 offset = req->ifd_data[i].cmd_buf_offset;
4043 data->sglistinfo_ptr[i].indexAndFlags =
4044 SGLISTINFO_SET_INDEX_FLAG(
4045 (sg_ptr->nents == 1), 1, offset);
4046 data->sglistinfo_ptr[i].sizeOrCount =
4047 (sg_ptr->nents == 1) ?
4048 sg->length : sg_ptr->nents;
4049 data->sglist_cnt = i + 1;
4050 } else {
4051 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
4052 + (uintptr_t)lstnr_resp->resp_buf_ptr -
4053 (uintptr_t)this_lstnr->sb_virt);
4054 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
4055 SGLISTINFO_SET_INDEX_FLAG(
4056 (sg_ptr->nents == 1), 1, offset);
4057 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
4058 (sg_ptr->nents == 1) ?
4059 sg->length : sg_ptr->nents;
4060 this_lstnr->sglist_cnt = i + 1;
4061 }
4062 }
4063 /* Deallocate the handle */
4064 if (!IS_ERR_OR_NULL(ihandle))
4065 ion_free(qseecom.ion_clnt, ihandle);
4066 }
4067 return ret;
4068err:
4069 for (i = 0; i < MAX_ION_FD; i++)
4070 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
4071 data->client.sec_buf_fd[i].vbase)
4072 dma_free_coherent(qseecom.pdev,
4073 data->client.sec_buf_fd[i].size,
4074 data->client.sec_buf_fd[i].vbase,
4075 data->client.sec_buf_fd[i].pbase);
4076 if (!IS_ERR_OR_NULL(ihandle))
4077 ion_free(qseecom.ion_clnt, ihandle);
4078 return -ENOMEM;
4079}
4080
4081static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4082 void __user *argp,
4083 bool is_64bit_addr)
4084{
4085 int ret = 0;
4086 int i;
4087 struct qseecom_send_modfd_cmd_req req;
4088 struct qseecom_send_cmd_req send_cmd_req;
4089
4090 ret = copy_from_user(&req, argp, sizeof(req));
4091 if (ret) {
4092 pr_err("copy_from_user failed\n");
4093 return ret;
4094 }
4095
4096 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
4097 send_cmd_req.cmd_req_len = req.cmd_req_len;
4098 send_cmd_req.resp_buf = req.resp_buf;
4099 send_cmd_req.resp_len = req.resp_len;
4100
4101 if (__validate_send_cmd_inputs(data, &send_cmd_req))
4102 return -EINVAL;
4103
4104 /* validate offsets */
4105 for (i = 0; i < MAX_ION_FD; i++) {
4106 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
4107 pr_err("Invalid offset %d = 0x%x\n",
4108 i, req.ifd_data[i].cmd_buf_offset);
4109 return -EINVAL;
4110 }
4111 }
4112 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4113 (uintptr_t)req.cmd_req_buf);
4114 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4115 (uintptr_t)req.resp_buf);
4116
4117 if (!is_64bit_addr) {
4118 ret = __qseecom_update_cmd_buf(&req, false, data);
4119 if (ret)
4120 return ret;
4121 ret = __qseecom_send_cmd(data, &send_cmd_req);
4122 if (ret)
4123 return ret;
4124 ret = __qseecom_update_cmd_buf(&req, true, data);
4125 if (ret)
4126 return ret;
4127 } else {
4128 ret = __qseecom_update_cmd_buf_64(&req, false, data);
4129 if (ret)
4130 return ret;
4131 ret = __qseecom_send_cmd(data, &send_cmd_req);
4132 if (ret)
4133 return ret;
4134 ret = __qseecom_update_cmd_buf_64(&req, true, data);
4135 if (ret)
4136 return ret;
4137 }
4138
4139 return ret;
4140}
4141
4142static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4143 void __user *argp)
4144{
4145 return __qseecom_send_modfd_cmd(data, argp, false);
4146}
4147
4148static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
4149 void __user *argp)
4150{
4151 return __qseecom_send_modfd_cmd(data, argp, true);
4152}
4153
4154
4155
4156static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
4157 struct qseecom_registered_listener_list *svc)
4158{
4159 int ret;
4160
Zhen Kongf5087172018-10-11 17:22:05 -07004161 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08004162 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004163}
4164
4165static int qseecom_receive_req(struct qseecom_dev_handle *data)
4166{
4167 int ret = 0;
4168 struct qseecom_registered_listener_list *this_lstnr;
4169
Zhen Kongbcdeda22018-11-16 13:50:51 -08004170 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004171 this_lstnr = __qseecom_find_svc(data->listener.id);
4172 if (!this_lstnr) {
4173 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004174 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004175 return -ENODATA;
4176 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004177 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004178
4179 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304180 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004181 __qseecom_listener_has_rcvd_req(data,
4182 this_lstnr))) {
Zhen Kong52ce9062018-09-24 14:33:27 -07004183 pr_debug("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004184 (uint32_t)data->listener.id);
4185 /* woken up for different reason */
4186 return -ERESTARTSYS;
4187 }
4188
Zhen Kongbcdeda22018-11-16 13:50:51 -08004189 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004190 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004191 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004192 return -ENODEV;
4193 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004194 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004195 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004196 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004197 break;
4198 }
4199 return ret;
4200}
4201
4202static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4203{
4204 unsigned char app_arch = 0;
4205 struct elf32_hdr *ehdr;
4206 struct elf64_hdr *ehdr64;
4207
4208 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4209
4210 switch (app_arch) {
4211 case ELFCLASS32: {
4212 ehdr = (struct elf32_hdr *)fw_entry->data;
4213 if (fw_entry->size < sizeof(*ehdr)) {
4214 pr_err("%s: Not big enough to be an elf32 header\n",
4215 qseecom.pdev->init_name);
4216 return false;
4217 }
4218 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4219 pr_err("%s: Not an elf32 header\n",
4220 qseecom.pdev->init_name);
4221 return false;
4222 }
4223 if (ehdr->e_phnum == 0) {
4224 pr_err("%s: No loadable segments\n",
4225 qseecom.pdev->init_name);
4226 return false;
4227 }
4228 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4229 sizeof(struct elf32_hdr) > fw_entry->size) {
4230 pr_err("%s: Program headers not within mdt\n",
4231 qseecom.pdev->init_name);
4232 return false;
4233 }
4234 break;
4235 }
4236 case ELFCLASS64: {
4237 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4238 if (fw_entry->size < sizeof(*ehdr64)) {
4239 pr_err("%s: Not big enough to be an elf64 header\n",
4240 qseecom.pdev->init_name);
4241 return false;
4242 }
4243 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4244 pr_err("%s: Not an elf64 header\n",
4245 qseecom.pdev->init_name);
4246 return false;
4247 }
4248 if (ehdr64->e_phnum == 0) {
4249 pr_err("%s: No loadable segments\n",
4250 qseecom.pdev->init_name);
4251 return false;
4252 }
4253 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4254 sizeof(struct elf64_hdr) > fw_entry->size) {
4255 pr_err("%s: Program headers not within mdt\n",
4256 qseecom.pdev->init_name);
4257 return false;
4258 }
4259 break;
4260 }
4261 default: {
4262 pr_err("QSEE app arch %u is not supported\n", app_arch);
4263 return false;
4264 }
4265 }
4266 return true;
4267}
4268
4269static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4270 uint32_t *app_arch)
4271{
4272 int ret = -1;
4273 int i = 0, rc = 0;
4274 const struct firmware *fw_entry = NULL;
4275 char fw_name[MAX_APP_NAME_SIZE];
4276 struct elf32_hdr *ehdr;
4277 struct elf64_hdr *ehdr64;
4278 int num_images = 0;
4279
4280 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4281 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4282 if (rc) {
4283 pr_err("error with request_firmware\n");
4284 ret = -EIO;
4285 goto err;
4286 }
4287 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4288 ret = -EIO;
4289 goto err;
4290 }
4291 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4292 *fw_size = fw_entry->size;
4293 if (*app_arch == ELFCLASS32) {
4294 ehdr = (struct elf32_hdr *)fw_entry->data;
4295 num_images = ehdr->e_phnum;
4296 } else if (*app_arch == ELFCLASS64) {
4297 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4298 num_images = ehdr64->e_phnum;
4299 } else {
4300 pr_err("QSEE %s app, arch %u is not supported\n",
4301 appname, *app_arch);
4302 ret = -EIO;
4303 goto err;
4304 }
4305 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4306 release_firmware(fw_entry);
4307 fw_entry = NULL;
4308 for (i = 0; i < num_images; i++) {
4309 memset(fw_name, 0, sizeof(fw_name));
4310 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4311 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4312 if (ret)
4313 goto err;
4314 if (*fw_size > U32_MAX - fw_entry->size) {
4315 pr_err("QSEE %s app file size overflow\n", appname);
4316 ret = -EINVAL;
4317 goto err;
4318 }
4319 *fw_size += fw_entry->size;
4320 release_firmware(fw_entry);
4321 fw_entry = NULL;
4322 }
4323
4324 return ret;
4325err:
4326 if (fw_entry)
4327 release_firmware(fw_entry);
4328 *fw_size = 0;
4329 return ret;
4330}
4331
4332static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4333 uint32_t fw_size,
4334 struct qseecom_load_app_ireq *load_req)
4335{
4336 int ret = -1;
4337 int i = 0, rc = 0;
4338 const struct firmware *fw_entry = NULL;
4339 char fw_name[MAX_APP_NAME_SIZE];
4340 u8 *img_data_ptr = img_data;
4341 struct elf32_hdr *ehdr;
4342 struct elf64_hdr *ehdr64;
4343 int num_images = 0;
4344 unsigned char app_arch = 0;
4345
4346 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4347 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4348 if (rc) {
4349 ret = -EIO;
4350 goto err;
4351 }
4352
4353 load_req->img_len = fw_entry->size;
4354 if (load_req->img_len > fw_size) {
4355 pr_err("app %s size %zu is larger than buf size %u\n",
4356 appname, fw_entry->size, fw_size);
4357 ret = -EINVAL;
4358 goto err;
4359 }
4360 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4361 img_data_ptr = img_data_ptr + fw_entry->size;
4362 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4363
4364 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4365 if (app_arch == ELFCLASS32) {
4366 ehdr = (struct elf32_hdr *)fw_entry->data;
4367 num_images = ehdr->e_phnum;
4368 } else if (app_arch == ELFCLASS64) {
4369 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4370 num_images = ehdr64->e_phnum;
4371 } else {
4372 pr_err("QSEE %s app, arch %u is not supported\n",
4373 appname, app_arch);
4374 ret = -EIO;
4375 goto err;
4376 }
4377 release_firmware(fw_entry);
4378 fw_entry = NULL;
4379 for (i = 0; i < num_images; i++) {
4380 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4381 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4382 if (ret) {
4383 pr_err("Failed to locate blob %s\n", fw_name);
4384 goto err;
4385 }
4386 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4387 (fw_entry->size + load_req->img_len > fw_size)) {
4388 pr_err("Invalid file size for %s\n", fw_name);
4389 ret = -EINVAL;
4390 goto err;
4391 }
4392 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4393 img_data_ptr = img_data_ptr + fw_entry->size;
4394 load_req->img_len += fw_entry->size;
4395 release_firmware(fw_entry);
4396 fw_entry = NULL;
4397 }
4398 return ret;
4399err:
4400 release_firmware(fw_entry);
4401 return ret;
4402}
4403
4404static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4405 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4406{
4407 size_t len = 0;
4408 int ret = 0;
4409 ion_phys_addr_t pa;
4410 struct ion_handle *ihandle = NULL;
4411 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004412 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004413 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004414
Zhen Kong3dd92792017-12-08 09:47:15 -08004415 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004416 if (retry++) {
4417 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004418 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004419 mutex_lock(&app_access_lock);
4420 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004421 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004422 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004423 } while (IS_ERR_OR_NULL(ihandle) &&
4424 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004425
4426 if (IS_ERR_OR_NULL(ihandle)) {
4427 pr_err("ION alloc failed\n");
4428 return -ENOMEM;
4429 }
4430 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4431 ihandle);
4432
4433 if (IS_ERR_OR_NULL(img_data)) {
4434 pr_err("ION memory mapping for image loading failed\n");
4435 ret = -ENOMEM;
4436 goto exit_ion_free;
4437 }
4438 /* Get the physical address of the ION BUF */
4439 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4440 if (ret) {
4441 pr_err("physical memory retrieval failure\n");
4442 ret = -EIO;
4443 goto exit_ion_unmap_kernel;
4444 }
4445
4446 *pihandle = ihandle;
4447 *data = img_data;
4448 *paddr = pa;
4449 return ret;
4450
4451exit_ion_unmap_kernel:
4452 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4453exit_ion_free:
4454 ion_free(qseecom.ion_clnt, ihandle);
4455 ihandle = NULL;
4456 return ret;
4457}
4458
4459static void __qseecom_free_img_data(struct ion_handle **ihandle)
4460{
4461 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4462 ion_free(qseecom.ion_clnt, *ihandle);
4463 *ihandle = NULL;
4464}
4465
4466static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4467 uint32_t *app_id)
4468{
4469 int ret = -1;
4470 uint32_t fw_size = 0;
4471 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4472 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4473 struct qseecom_command_scm_resp resp;
4474 u8 *img_data = NULL;
4475 ion_phys_addr_t pa = 0;
4476 struct ion_handle *ihandle = NULL;
4477 void *cmd_buf = NULL;
4478 size_t cmd_len;
4479 uint32_t app_arch = 0;
4480
4481 if (!data || !appname || !app_id) {
4482 pr_err("Null pointer to data or appname or appid\n");
4483 return -EINVAL;
4484 }
4485 *app_id = 0;
4486 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4487 return -EIO;
4488 data->client.app_arch = app_arch;
4489
4490 /* Check and load cmnlib */
4491 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4492 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4493 ret = qseecom_load_commonlib_image(data, "cmnlib");
4494 if (ret) {
4495 pr_err("failed to load cmnlib\n");
4496 return -EIO;
4497 }
4498 qseecom.commonlib_loaded = true;
4499 pr_debug("cmnlib is loaded\n");
4500 }
4501
4502 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4503 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4504 if (ret) {
4505 pr_err("failed to load cmnlib64\n");
4506 return -EIO;
4507 }
4508 qseecom.commonlib64_loaded = true;
4509 pr_debug("cmnlib64 is loaded\n");
4510 }
4511 }
4512
4513 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4514 if (ret)
4515 return ret;
4516
4517 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4518 if (ret) {
4519 ret = -EIO;
4520 goto exit_free_img_data;
4521 }
4522
4523 /* Populate the load_req parameters */
4524 if (qseecom.qsee_version < QSEE_VERSION_40) {
4525 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4526 load_req.mdt_len = load_req.mdt_len;
4527 load_req.img_len = load_req.img_len;
4528 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4529 load_req.phy_addr = (uint32_t)pa;
4530 cmd_buf = (void *)&load_req;
4531 cmd_len = sizeof(struct qseecom_load_app_ireq);
4532 } else {
4533 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4534 load_req_64bit.mdt_len = load_req.mdt_len;
4535 load_req_64bit.img_len = load_req.img_len;
4536 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4537 load_req_64bit.phy_addr = (uint64_t)pa;
4538 cmd_buf = (void *)&load_req_64bit;
4539 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4540 }
4541
4542 if (qseecom.support_bus_scaling) {
4543 mutex_lock(&qsee_bw_mutex);
4544 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4545 mutex_unlock(&qsee_bw_mutex);
4546 if (ret) {
4547 ret = -EIO;
4548 goto exit_free_img_data;
4549 }
4550 }
4551
4552 ret = __qseecom_enable_clk_scale_up(data);
4553 if (ret) {
4554 ret = -EIO;
4555 goto exit_unregister_bus_bw_need;
4556 }
4557
4558 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4559 img_data, fw_size,
4560 ION_IOC_CLEAN_INV_CACHES);
4561 if (ret) {
4562 pr_err("cache operation failed %d\n", ret);
4563 goto exit_disable_clk_vote;
4564 }
4565
4566 /* SCM_CALL to load the image */
4567 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4568 &resp, sizeof(resp));
4569 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004570 pr_err("scm_call to load failed : ret %d, result %x\n",
4571 ret, resp.result);
4572 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4573 ret = -EEXIST;
4574 else
4575 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004576 goto exit_disable_clk_vote;
4577 }
4578
4579 switch (resp.result) {
4580 case QSEOS_RESULT_SUCCESS:
4581 *app_id = resp.data;
4582 break;
4583 case QSEOS_RESULT_INCOMPLETE:
4584 ret = __qseecom_process_incomplete_cmd(data, &resp);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004585 if (ret) {
4586 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
4587 ret, resp.result, resp.data, appname);
4588 __qseecom_unload_app(data, resp.data);
4589 ret = -EFAULT;
4590 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004591 *app_id = resp.data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07004592 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004593 break;
4594 case QSEOS_RESULT_FAILURE:
4595 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4596 break;
4597 default:
4598 pr_err("scm call return unknown response %d\n", resp.result);
4599 ret = -EINVAL;
4600 break;
4601 }
4602
4603exit_disable_clk_vote:
4604 __qseecom_disable_clk_scale_down(data);
4605
4606exit_unregister_bus_bw_need:
4607 if (qseecom.support_bus_scaling) {
4608 mutex_lock(&qsee_bw_mutex);
4609 qseecom_unregister_bus_bandwidth_needs(data);
4610 mutex_unlock(&qsee_bw_mutex);
4611 }
4612
4613exit_free_img_data:
4614 __qseecom_free_img_data(&ihandle);
4615 return ret;
4616}
4617
4618static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4619 char *cmnlib_name)
4620{
4621 int ret = 0;
4622 uint32_t fw_size = 0;
4623 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4624 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4625 struct qseecom_command_scm_resp resp;
4626 u8 *img_data = NULL;
4627 ion_phys_addr_t pa = 0;
4628 void *cmd_buf = NULL;
4629 size_t cmd_len;
4630 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004631 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004632
4633 if (!cmnlib_name) {
4634 pr_err("cmnlib_name is NULL\n");
4635 return -EINVAL;
4636 }
4637 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4638 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4639 cmnlib_name, strlen(cmnlib_name));
4640 return -EINVAL;
4641 }
4642
4643 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4644 return -EIO;
4645
Zhen Kong3bafb312017-10-18 10:27:20 -07004646 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004647 &img_data, fw_size, &pa);
4648 if (ret)
4649 return -EIO;
4650
4651 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4652 if (ret) {
4653 ret = -EIO;
4654 goto exit_free_img_data;
4655 }
4656 if (qseecom.qsee_version < QSEE_VERSION_40) {
4657 load_req.phy_addr = (uint32_t)pa;
4658 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4659 cmd_buf = (void *)&load_req;
4660 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4661 } else {
4662 load_req_64bit.phy_addr = (uint64_t)pa;
4663 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4664 load_req_64bit.img_len = load_req.img_len;
4665 load_req_64bit.mdt_len = load_req.mdt_len;
4666 cmd_buf = (void *)&load_req_64bit;
4667 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4668 }
4669
4670 if (qseecom.support_bus_scaling) {
4671 mutex_lock(&qsee_bw_mutex);
4672 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4673 mutex_unlock(&qsee_bw_mutex);
4674 if (ret) {
4675 ret = -EIO;
4676 goto exit_free_img_data;
4677 }
4678 }
4679
4680 /* Vote for the SFPB clock */
4681 ret = __qseecom_enable_clk_scale_up(data);
4682 if (ret) {
4683 ret = -EIO;
4684 goto exit_unregister_bus_bw_need;
4685 }
4686
Zhen Kong3bafb312017-10-18 10:27:20 -07004687 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004688 img_data, fw_size,
4689 ION_IOC_CLEAN_INV_CACHES);
4690 if (ret) {
4691 pr_err("cache operation failed %d\n", ret);
4692 goto exit_disable_clk_vote;
4693 }
4694
4695 /* SCM_CALL to load the image */
4696 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4697 &resp, sizeof(resp));
4698 if (ret) {
4699 pr_err("scm_call to load failed : ret %d\n", ret);
4700 ret = -EIO;
4701 goto exit_disable_clk_vote;
4702 }
4703
4704 switch (resp.result) {
4705 case QSEOS_RESULT_SUCCESS:
4706 break;
4707 case QSEOS_RESULT_FAILURE:
4708 pr_err("scm call failed w/response result%d\n", resp.result);
4709 ret = -EINVAL;
4710 goto exit_disable_clk_vote;
4711 case QSEOS_RESULT_INCOMPLETE:
4712 ret = __qseecom_process_incomplete_cmd(data, &resp);
4713 if (ret) {
4714 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4715 goto exit_disable_clk_vote;
4716 }
4717 break;
4718 default:
4719 pr_err("scm call return unknown response %d\n", resp.result);
4720 ret = -EINVAL;
4721 goto exit_disable_clk_vote;
4722 }
4723
4724exit_disable_clk_vote:
4725 __qseecom_disable_clk_scale_down(data);
4726
4727exit_unregister_bus_bw_need:
4728 if (qseecom.support_bus_scaling) {
4729 mutex_lock(&qsee_bw_mutex);
4730 qseecom_unregister_bus_bandwidth_needs(data);
4731 mutex_unlock(&qsee_bw_mutex);
4732 }
4733
4734exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004735 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004736 return ret;
4737}
4738
4739static int qseecom_unload_commonlib_image(void)
4740{
4741 int ret = -EINVAL;
4742 struct qseecom_unload_lib_image_ireq unload_req = {0};
4743 struct qseecom_command_scm_resp resp;
4744
4745 /* Populate the remaining parameters */
4746 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4747
4748 /* SCM_CALL to load the image */
4749 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4750 sizeof(struct qseecom_unload_lib_image_ireq),
4751 &resp, sizeof(resp));
4752 if (ret) {
4753 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4754 ret = -EIO;
4755 } else {
4756 switch (resp.result) {
4757 case QSEOS_RESULT_SUCCESS:
4758 break;
4759 case QSEOS_RESULT_FAILURE:
4760 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4761 break;
4762 default:
4763 pr_err("scm call return unknown response %d\n",
4764 resp.result);
4765 ret = -EINVAL;
4766 break;
4767 }
4768 }
4769
4770 return ret;
4771}
4772
4773int qseecom_start_app(struct qseecom_handle **handle,
4774 char *app_name, uint32_t size)
4775{
4776 int32_t ret = 0;
4777 unsigned long flags = 0;
4778 struct qseecom_dev_handle *data = NULL;
4779 struct qseecom_check_app_ireq app_ireq;
4780 struct qseecom_registered_app_list *entry = NULL;
4781 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4782 bool found_app = false;
4783 size_t len;
4784 ion_phys_addr_t pa;
4785 uint32_t fw_size, app_arch;
4786 uint32_t app_id = 0;
4787
Zhen Kongc4c162a2019-01-23 12:07:12 -08004788 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004789 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004790
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004791 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4792 pr_err("Not allowed to be called in %d state\n",
4793 atomic_read(&qseecom.qseecom_state));
4794 return -EPERM;
4795 }
4796 if (!app_name) {
4797 pr_err("failed to get the app name\n");
4798 return -EINVAL;
4799 }
4800
Zhen Kong64a6d7282017-06-16 11:55:07 -07004801 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004802 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004803 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004804 return -EINVAL;
4805 }
4806
4807 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4808 if (!(*handle))
4809 return -ENOMEM;
4810
4811 data = kzalloc(sizeof(*data), GFP_KERNEL);
4812 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304813 ret = -ENOMEM;
4814 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004815 }
4816 data->abort = 0;
4817 data->type = QSEECOM_CLIENT_APP;
4818 data->released = false;
4819 data->client.sb_length = size;
4820 data->client.user_virt_sb_base = 0;
4821 data->client.ihandle = NULL;
4822
4823 init_waitqueue_head(&data->abort_wq);
4824
4825 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4826 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4827 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4828 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304829 ret = -ENOMEM;
4830 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004831 }
4832 mutex_lock(&app_access_lock);
4833
Zhen Kong5d02be92018-05-29 16:17:29 -07004834recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004835 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4836 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4837 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4838 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304839 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004840
4841 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4842 if (app_id) {
4843 pr_warn("App id %d for [%s] app exists\n", app_id,
4844 (char *)app_ireq.app_name);
4845 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4846 list_for_each_entry(entry,
4847 &qseecom.registered_app_list_head, list){
4848 if (entry->app_id == app_id) {
4849 entry->ref_cnt++;
4850 found_app = true;
4851 break;
4852 }
4853 }
4854 spin_unlock_irqrestore(
4855 &qseecom.registered_app_list_lock, flags);
4856 if (!found_app)
4857 pr_warn("App_id %d [%s] was loaded but not registered\n",
4858 ret, (char *)app_ireq.app_name);
4859 } else {
4860 /* load the app and get the app_id */
4861 pr_debug("%s: Loading app for the first time'\n",
4862 qseecom.pdev->init_name);
4863 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004864 if (ret == -EEXIST) {
4865 pr_err("recheck if TA %s is loaded\n", app_name);
4866 goto recheck;
4867 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304868 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004869 }
4870 data->client.app_id = app_id;
4871 if (!found_app) {
4872 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4873 if (!entry) {
4874 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304875 ret = -ENOMEM;
4876 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004877 }
4878 entry->app_id = app_id;
4879 entry->ref_cnt = 1;
4880 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4881 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4882 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304883 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004884 }
4885 entry->app_arch = app_arch;
4886 entry->app_blocked = false;
4887 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004888 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004889 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4890 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4891 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4892 flags);
4893 }
4894
4895 /* Get the physical address of the ION BUF */
4896 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4897 if (ret) {
4898 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4899 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304900 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004901 }
4902
4903 /* Populate the structure for sending scm call to load image */
4904 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4905 data->client.ihandle);
4906 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4907 pr_err("ION memory mapping for client shared buf failed\n");
4908 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304909 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004910 }
4911 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4912 data->client.sb_phys = (phys_addr_t)pa;
4913 (*handle)->dev = (void *)data;
4914 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4915 (*handle)->sbuf_len = data->client.sb_length;
4916
4917 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4918 if (!kclient_entry) {
4919 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304920 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004921 }
4922 kclient_entry->handle = *handle;
4923
4924 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4925 list_add_tail(&kclient_entry->list,
4926 &qseecom.registered_kclient_list_head);
4927 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4928
4929 mutex_unlock(&app_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004930 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004931 return 0;
4932
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304933exit_ion_unmap_kernel:
4934 if (!IS_ERR_OR_NULL(data->client.ihandle))
4935 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4936exit_entry_free:
4937 kfree(entry);
4938exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004939 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304940 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4941 ion_free(qseecom.ion_clnt, data->client.ihandle);
4942 data->client.ihandle = NULL;
4943 }
4944exit_data_free:
4945 kfree(data);
4946exit_handle_free:
4947 if (*handle) {
4948 kfree(*handle);
4949 *handle = NULL;
4950 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07004951 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004952 return ret;
4953}
4954EXPORT_SYMBOL(qseecom_start_app);
4955
4956int qseecom_shutdown_app(struct qseecom_handle **handle)
4957{
4958 int ret = -EINVAL;
4959 struct qseecom_dev_handle *data;
4960
4961 struct qseecom_registered_kclient_list *kclient = NULL;
4962 unsigned long flags = 0;
4963 bool found_handle = false;
4964
Zhen Kongc4c162a2019-01-23 12:07:12 -08004965 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004966 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004967
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004968 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4969 pr_err("Not allowed to be called in %d state\n",
4970 atomic_read(&qseecom.qseecom_state));
4971 return -EPERM;
4972 }
4973
4974 if ((handle == NULL) || (*handle == NULL)) {
4975 pr_err("Handle is not initialized\n");
4976 return -EINVAL;
4977 }
4978 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4979 mutex_lock(&app_access_lock);
4980
4981 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4982 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4983 list) {
4984 if (kclient->handle == (*handle)) {
4985 list_del(&kclient->list);
4986 found_handle = true;
4987 break;
4988 }
4989 }
4990 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4991 if (!found_handle)
4992 pr_err("Unable to find the handle, exiting\n");
4993 else
4994 ret = qseecom_unload_app(data, false);
4995
4996 mutex_unlock(&app_access_lock);
4997 if (ret == 0) {
4998 kzfree(data);
4999 kzfree(*handle);
5000 kzfree(kclient);
5001 *handle = NULL;
5002 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07005003 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005004 return ret;
5005}
5006EXPORT_SYMBOL(qseecom_shutdown_app);
5007
5008int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
5009 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
5010{
5011 int ret = 0;
5012 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
5013 struct qseecom_dev_handle *data;
5014 bool perf_enabled = false;
5015
Zhen Kongc4c162a2019-01-23 12:07:12 -08005016 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07005017 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08005018
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005019 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
5020 pr_err("Not allowed to be called in %d state\n",
5021 atomic_read(&qseecom.qseecom_state));
5022 return -EPERM;
5023 }
5024
5025 if (handle == NULL) {
5026 pr_err("Handle is not initialized\n");
5027 return -EINVAL;
5028 }
5029 data = handle->dev;
5030
5031 req.cmd_req_len = sbuf_len;
5032 req.resp_len = rbuf_len;
5033 req.cmd_req_buf = send_buf;
5034 req.resp_buf = resp_buf;
5035
5036 if (__validate_send_cmd_inputs(data, &req))
5037 return -EINVAL;
5038
5039 mutex_lock(&app_access_lock);
5040 if (qseecom.support_bus_scaling) {
5041 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
5042 if (ret) {
5043 pr_err("Failed to set bw.\n");
5044 mutex_unlock(&app_access_lock);
5045 return ret;
5046 }
5047 }
5048 /*
5049 * On targets where crypto clock is handled by HLOS,
5050 * if clk_access_cnt is zero and perf_enabled is false,
5051 * then the crypto clock was not enabled before sending cmd
5052 * to tz, qseecom will enable the clock to avoid service failure.
5053 */
5054 if (!qseecom.no_clock_support &&
5055 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
5056 pr_debug("ce clock is not enabled!\n");
5057 ret = qseecom_perf_enable(data);
5058 if (ret) {
5059 pr_err("Failed to vote for clock with err %d\n",
5060 ret);
5061 mutex_unlock(&app_access_lock);
5062 return -EINVAL;
5063 }
5064 perf_enabled = true;
5065 }
5066 if (!strcmp(data->client.app_name, "securemm"))
5067 data->use_legacy_cmd = true;
5068
5069 ret = __qseecom_send_cmd(data, &req);
5070 data->use_legacy_cmd = false;
5071 if (qseecom.support_bus_scaling)
5072 __qseecom_add_bw_scale_down_timer(
5073 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
5074
5075 if (perf_enabled) {
5076 qsee_disable_clock_vote(data, CLK_DFAB);
5077 qsee_disable_clock_vote(data, CLK_SFPB);
5078 }
5079
5080 mutex_unlock(&app_access_lock);
5081
5082 if (ret)
5083 return ret;
5084
5085 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
5086 req.resp_len, req.resp_buf);
5087 return ret;
5088}
5089EXPORT_SYMBOL(qseecom_send_command);
5090
5091int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
5092{
5093 int ret = 0;
5094
5095 if ((handle == NULL) || (handle->dev == NULL)) {
5096 pr_err("No valid kernel client\n");
5097 return -EINVAL;
5098 }
5099 if (high) {
5100 if (qseecom.support_bus_scaling) {
5101 mutex_lock(&qsee_bw_mutex);
5102 __qseecom_register_bus_bandwidth_needs(handle->dev,
5103 HIGH);
5104 mutex_unlock(&qsee_bw_mutex);
5105 } else {
5106 ret = qseecom_perf_enable(handle->dev);
5107 if (ret)
5108 pr_err("Failed to vote for clock with err %d\n",
5109 ret);
5110 }
5111 } else {
5112 if (!qseecom.support_bus_scaling) {
5113 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
5114 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
5115 } else {
5116 mutex_lock(&qsee_bw_mutex);
5117 qseecom_unregister_bus_bandwidth_needs(handle->dev);
5118 mutex_unlock(&qsee_bw_mutex);
5119 }
5120 }
5121 return ret;
5122}
5123EXPORT_SYMBOL(qseecom_set_bandwidth);
5124
5125int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
5126{
5127 struct qseecom_registered_app_list dummy_app_entry = { {0} };
5128 struct qseecom_dev_handle dummy_private_data = {0};
5129 struct qseecom_command_scm_resp resp;
5130 int ret = 0;
5131
5132 if (!desc) {
5133 pr_err("desc is NULL\n");
5134 return -EINVAL;
5135 }
5136
5137 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07005138 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005139 resp.data = desc->ret[2]; /*listener_id*/
5140
Zhen Konge7f525f2017-12-01 18:26:25 -08005141 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07005142 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08005143 dummy_app_entry.app_id = desc->ret[1];
5144
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005145 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005146 if (qseecom.qsee_reentrancy_support)
5147 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005148 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005149 else
5150 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
5151 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005152 mutex_unlock(&app_access_lock);
5153 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07005154 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005155 (int)desc->ret[0], (int)desc->ret[2],
5156 (int)desc->ret[1], ret);
5157 desc->ret[0] = resp.result;
5158 desc->ret[1] = resp.resp_type;
5159 desc->ret[2] = resp.data;
5160 return ret;
5161}
5162EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
5163
5164static int qseecom_send_resp(void)
5165{
5166 qseecom.send_resp_flag = 1;
5167 wake_up_interruptible(&qseecom.send_resp_wq);
5168 return 0;
5169}
5170
5171static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
5172{
5173 struct qseecom_registered_listener_list *this_lstnr = NULL;
5174
5175 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
5176 this_lstnr = __qseecom_find_svc(data->listener.id);
5177 if (this_lstnr == NULL)
5178 return -EINVAL;
5179 qseecom.send_resp_flag = 1;
5180 this_lstnr->send_resp_flag = 1;
5181 wake_up_interruptible(&qseecom.send_resp_wq);
5182 return 0;
5183}
5184
5185static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5186 struct qseecom_send_modfd_listener_resp *resp,
5187 struct qseecom_registered_listener_list *this_lstnr)
5188{
5189 int i;
5190
5191 if (!data || !resp || !this_lstnr) {
5192 pr_err("listener handle or resp msg is null\n");
5193 return -EINVAL;
5194 }
5195
5196 if (resp->resp_buf_ptr == NULL) {
5197 pr_err("resp buffer is null\n");
5198 return -EINVAL;
5199 }
5200 /* validate resp buf length */
5201 if ((resp->resp_len == 0) ||
5202 (resp->resp_len > this_lstnr->sb_length)) {
5203 pr_err("resp buf length %d not valid\n", resp->resp_len);
5204 return -EINVAL;
5205 }
5206
5207 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5208 pr_err("Integer overflow in resp_len & resp_buf\n");
5209 return -EINVAL;
5210 }
5211 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5212 (ULONG_MAX - this_lstnr->sb_length)) {
5213 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5214 return -EINVAL;
5215 }
5216 /* validate resp buf */
5217 if (((uintptr_t)resp->resp_buf_ptr <
5218 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5219 ((uintptr_t)resp->resp_buf_ptr >=
5220 ((uintptr_t)this_lstnr->user_virt_sb_base +
5221 this_lstnr->sb_length)) ||
5222 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5223 ((uintptr_t)this_lstnr->user_virt_sb_base +
5224 this_lstnr->sb_length))) {
5225 pr_err("resp buf is out of shared buffer region\n");
5226 return -EINVAL;
5227 }
5228
5229 /* validate offsets */
5230 for (i = 0; i < MAX_ION_FD; i++) {
5231 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5232 pr_err("Invalid offset %d = 0x%x\n",
5233 i, resp->ifd_data[i].cmd_buf_offset);
5234 return -EINVAL;
5235 }
5236 }
5237
5238 return 0;
5239}
5240
5241static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5242 void __user *argp, bool is_64bit_addr)
5243{
5244 struct qseecom_send_modfd_listener_resp resp;
5245 struct qseecom_registered_listener_list *this_lstnr = NULL;
5246
5247 if (copy_from_user(&resp, argp, sizeof(resp))) {
5248 pr_err("copy_from_user failed");
5249 return -EINVAL;
5250 }
5251
5252 this_lstnr = __qseecom_find_svc(data->listener.id);
5253 if (this_lstnr == NULL)
5254 return -EINVAL;
5255
5256 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5257 return -EINVAL;
5258
5259 resp.resp_buf_ptr = this_lstnr->sb_virt +
5260 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5261
5262 if (!is_64bit_addr)
5263 __qseecom_update_cmd_buf(&resp, false, data);
5264 else
5265 __qseecom_update_cmd_buf_64(&resp, false, data);
5266 qseecom.send_resp_flag = 1;
5267 this_lstnr->send_resp_flag = 1;
5268 wake_up_interruptible(&qseecom.send_resp_wq);
5269 return 0;
5270}
5271
5272static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5273 void __user *argp)
5274{
5275 return __qseecom_send_modfd_resp(data, argp, false);
5276}
5277
5278static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5279 void __user *argp)
5280{
5281 return __qseecom_send_modfd_resp(data, argp, true);
5282}
5283
5284static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5285 void __user *argp)
5286{
5287 struct qseecom_qseos_version_req req;
5288
5289 if (copy_from_user(&req, argp, sizeof(req))) {
5290 pr_err("copy_from_user failed");
5291 return -EINVAL;
5292 }
5293 req.qseos_version = qseecom.qseos_version;
5294 if (copy_to_user(argp, &req, sizeof(req))) {
5295 pr_err("copy_to_user failed");
5296 return -EINVAL;
5297 }
5298 return 0;
5299}
5300
5301static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5302{
5303 int rc = 0;
5304 struct qseecom_clk *qclk = NULL;
5305
5306 if (qseecom.no_clock_support)
5307 return 0;
5308
5309 if (ce == CLK_QSEE)
5310 qclk = &qseecom.qsee;
5311 if (ce == CLK_CE_DRV)
5312 qclk = &qseecom.ce_drv;
5313
5314 if (qclk == NULL) {
5315 pr_err("CLK type not supported\n");
5316 return -EINVAL;
5317 }
5318 mutex_lock(&clk_access_lock);
5319
5320 if (qclk->clk_access_cnt == ULONG_MAX) {
5321 pr_err("clk_access_cnt beyond limitation\n");
5322 goto err;
5323 }
5324 if (qclk->clk_access_cnt > 0) {
5325 qclk->clk_access_cnt++;
5326 mutex_unlock(&clk_access_lock);
5327 return rc;
5328 }
5329
5330 /* Enable CE core clk */
5331 if (qclk->ce_core_clk != NULL) {
5332 rc = clk_prepare_enable(qclk->ce_core_clk);
5333 if (rc) {
5334 pr_err("Unable to enable/prepare CE core clk\n");
5335 goto err;
5336 }
5337 }
5338 /* Enable CE clk */
5339 if (qclk->ce_clk != NULL) {
5340 rc = clk_prepare_enable(qclk->ce_clk);
5341 if (rc) {
5342 pr_err("Unable to enable/prepare CE iface clk\n");
5343 goto ce_clk_err;
5344 }
5345 }
5346 /* Enable AXI clk */
5347 if (qclk->ce_bus_clk != NULL) {
5348 rc = clk_prepare_enable(qclk->ce_bus_clk);
5349 if (rc) {
5350 pr_err("Unable to enable/prepare CE bus clk\n");
5351 goto ce_bus_clk_err;
5352 }
5353 }
5354 qclk->clk_access_cnt++;
5355 mutex_unlock(&clk_access_lock);
5356 return 0;
5357
5358ce_bus_clk_err:
5359 if (qclk->ce_clk != NULL)
5360 clk_disable_unprepare(qclk->ce_clk);
5361ce_clk_err:
5362 if (qclk->ce_core_clk != NULL)
5363 clk_disable_unprepare(qclk->ce_core_clk);
5364err:
5365 mutex_unlock(&clk_access_lock);
5366 return -EIO;
5367}
5368
5369static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5370{
5371 struct qseecom_clk *qclk;
5372
5373 if (qseecom.no_clock_support)
5374 return;
5375
5376 if (ce == CLK_QSEE)
5377 qclk = &qseecom.qsee;
5378 else
5379 qclk = &qseecom.ce_drv;
5380
5381 mutex_lock(&clk_access_lock);
5382
5383 if (qclk->clk_access_cnt == 0) {
5384 mutex_unlock(&clk_access_lock);
5385 return;
5386 }
5387
5388 if (qclk->clk_access_cnt == 1) {
5389 if (qclk->ce_clk != NULL)
5390 clk_disable_unprepare(qclk->ce_clk);
5391 if (qclk->ce_core_clk != NULL)
5392 clk_disable_unprepare(qclk->ce_core_clk);
5393 if (qclk->ce_bus_clk != NULL)
5394 clk_disable_unprepare(qclk->ce_bus_clk);
5395 }
5396 qclk->clk_access_cnt--;
5397 mutex_unlock(&clk_access_lock);
5398}
5399
5400static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5401 int32_t clk_type)
5402{
5403 int ret = 0;
5404 struct qseecom_clk *qclk;
5405
5406 if (qseecom.no_clock_support)
5407 return 0;
5408
5409 qclk = &qseecom.qsee;
5410 if (!qseecom.qsee_perf_client)
5411 return ret;
5412
5413 switch (clk_type) {
5414 case CLK_DFAB:
5415 mutex_lock(&qsee_bw_mutex);
5416 if (!qseecom.qsee_bw_count) {
5417 if (qseecom.qsee_sfpb_bw_count > 0)
5418 ret = msm_bus_scale_client_update_request(
5419 qseecom.qsee_perf_client, 3);
5420 else {
5421 if (qclk->ce_core_src_clk != NULL)
5422 ret = __qseecom_enable_clk(CLK_QSEE);
5423 if (!ret) {
5424 ret =
5425 msm_bus_scale_client_update_request(
5426 qseecom.qsee_perf_client, 1);
5427 if ((ret) &&
5428 (qclk->ce_core_src_clk != NULL))
5429 __qseecom_disable_clk(CLK_QSEE);
5430 }
5431 }
5432 if (ret)
5433 pr_err("DFAB Bandwidth req failed (%d)\n",
5434 ret);
5435 else {
5436 qseecom.qsee_bw_count++;
5437 data->perf_enabled = true;
5438 }
5439 } else {
5440 qseecom.qsee_bw_count++;
5441 data->perf_enabled = true;
5442 }
5443 mutex_unlock(&qsee_bw_mutex);
5444 break;
5445 case CLK_SFPB:
5446 mutex_lock(&qsee_bw_mutex);
5447 if (!qseecom.qsee_sfpb_bw_count) {
5448 if (qseecom.qsee_bw_count > 0)
5449 ret = msm_bus_scale_client_update_request(
5450 qseecom.qsee_perf_client, 3);
5451 else {
5452 if (qclk->ce_core_src_clk != NULL)
5453 ret = __qseecom_enable_clk(CLK_QSEE);
5454 if (!ret) {
5455 ret =
5456 msm_bus_scale_client_update_request(
5457 qseecom.qsee_perf_client, 2);
5458 if ((ret) &&
5459 (qclk->ce_core_src_clk != NULL))
5460 __qseecom_disable_clk(CLK_QSEE);
5461 }
5462 }
5463
5464 if (ret)
5465 pr_err("SFPB Bandwidth req failed (%d)\n",
5466 ret);
5467 else {
5468 qseecom.qsee_sfpb_bw_count++;
5469 data->fast_load_enabled = true;
5470 }
5471 } else {
5472 qseecom.qsee_sfpb_bw_count++;
5473 data->fast_load_enabled = true;
5474 }
5475 mutex_unlock(&qsee_bw_mutex);
5476 break;
5477 default:
5478 pr_err("Clock type not defined\n");
5479 break;
5480 }
5481 return ret;
5482}
5483
5484static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5485 int32_t clk_type)
5486{
5487 int32_t ret = 0;
5488 struct qseecom_clk *qclk;
5489
5490 qclk = &qseecom.qsee;
5491
5492 if (qseecom.no_clock_support)
5493 return;
5494 if (!qseecom.qsee_perf_client)
5495 return;
5496
5497 switch (clk_type) {
5498 case CLK_DFAB:
5499 mutex_lock(&qsee_bw_mutex);
5500 if (qseecom.qsee_bw_count == 0) {
5501 pr_err("Client error.Extra call to disable DFAB clk\n");
5502 mutex_unlock(&qsee_bw_mutex);
5503 return;
5504 }
5505
5506 if (qseecom.qsee_bw_count == 1) {
5507 if (qseecom.qsee_sfpb_bw_count > 0)
5508 ret = msm_bus_scale_client_update_request(
5509 qseecom.qsee_perf_client, 2);
5510 else {
5511 ret = msm_bus_scale_client_update_request(
5512 qseecom.qsee_perf_client, 0);
5513 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5514 __qseecom_disable_clk(CLK_QSEE);
5515 }
5516 if (ret)
5517 pr_err("SFPB Bandwidth req fail (%d)\n",
5518 ret);
5519 else {
5520 qseecom.qsee_bw_count--;
5521 data->perf_enabled = false;
5522 }
5523 } else {
5524 qseecom.qsee_bw_count--;
5525 data->perf_enabled = false;
5526 }
5527 mutex_unlock(&qsee_bw_mutex);
5528 break;
5529 case CLK_SFPB:
5530 mutex_lock(&qsee_bw_mutex);
5531 if (qseecom.qsee_sfpb_bw_count == 0) {
5532 pr_err("Client error.Extra call to disable SFPB clk\n");
5533 mutex_unlock(&qsee_bw_mutex);
5534 return;
5535 }
5536 if (qseecom.qsee_sfpb_bw_count == 1) {
5537 if (qseecom.qsee_bw_count > 0)
5538 ret = msm_bus_scale_client_update_request(
5539 qseecom.qsee_perf_client, 1);
5540 else {
5541 ret = msm_bus_scale_client_update_request(
5542 qseecom.qsee_perf_client, 0);
5543 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5544 __qseecom_disable_clk(CLK_QSEE);
5545 }
5546 if (ret)
5547 pr_err("SFPB Bandwidth req fail (%d)\n",
5548 ret);
5549 else {
5550 qseecom.qsee_sfpb_bw_count--;
5551 data->fast_load_enabled = false;
5552 }
5553 } else {
5554 qseecom.qsee_sfpb_bw_count--;
5555 data->fast_load_enabled = false;
5556 }
5557 mutex_unlock(&qsee_bw_mutex);
5558 break;
5559 default:
5560 pr_err("Clock type not defined\n");
5561 break;
5562 }
5563
5564}
5565
5566static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5567 void __user *argp)
5568{
5569 struct ion_handle *ihandle; /* Ion handle */
5570 struct qseecom_load_img_req load_img_req;
5571 int uret = 0;
5572 int ret;
5573 ion_phys_addr_t pa = 0;
5574 size_t len;
5575 struct qseecom_load_app_ireq load_req;
5576 struct qseecom_load_app_64bit_ireq load_req_64bit;
5577 struct qseecom_command_scm_resp resp;
5578 void *cmd_buf = NULL;
5579 size_t cmd_len;
5580 /* Copy the relevant information needed for loading the image */
5581 if (copy_from_user(&load_img_req,
5582 (void __user *)argp,
5583 sizeof(struct qseecom_load_img_req))) {
5584 pr_err("copy_from_user failed\n");
5585 return -EFAULT;
5586 }
5587
5588 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005589 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005590 load_img_req.ifd_data_fd);
5591 if (IS_ERR_OR_NULL(ihandle)) {
5592 pr_err("Ion client could not retrieve the handle\n");
5593 return -ENOMEM;
5594 }
5595
5596 /* Get the physical address of the ION BUF */
5597 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5598 if (ret) {
5599 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5600 ret);
5601 return ret;
5602 }
5603 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5604 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5605 len, load_img_req.mdt_len,
5606 load_img_req.img_len);
5607 return ret;
5608 }
5609 /* Populate the structure for sending scm call to load image */
5610 if (qseecom.qsee_version < QSEE_VERSION_40) {
5611 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5612 load_req.mdt_len = load_img_req.mdt_len;
5613 load_req.img_len = load_img_req.img_len;
5614 load_req.phy_addr = (uint32_t)pa;
5615 cmd_buf = (void *)&load_req;
5616 cmd_len = sizeof(struct qseecom_load_app_ireq);
5617 } else {
5618 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5619 load_req_64bit.mdt_len = load_img_req.mdt_len;
5620 load_req_64bit.img_len = load_img_req.img_len;
5621 load_req_64bit.phy_addr = (uint64_t)pa;
5622 cmd_buf = (void *)&load_req_64bit;
5623 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5624 }
5625
5626 if (qseecom.support_bus_scaling) {
5627 mutex_lock(&qsee_bw_mutex);
5628 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5629 mutex_unlock(&qsee_bw_mutex);
5630 if (ret) {
5631 ret = -EIO;
5632 goto exit_cpu_restore;
5633 }
5634 }
5635
5636 /* Vote for the SFPB clock */
5637 ret = __qseecom_enable_clk_scale_up(data);
5638 if (ret) {
5639 ret = -EIO;
5640 goto exit_register_bus_bandwidth_needs;
5641 }
5642 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5643 ION_IOC_CLEAN_INV_CACHES);
5644 if (ret) {
5645 pr_err("cache operation failed %d\n", ret);
5646 goto exit_disable_clock;
5647 }
5648 /* SCM_CALL to load the external elf */
5649 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5650 &resp, sizeof(resp));
5651 if (ret) {
5652 pr_err("scm_call to load failed : ret %d\n",
5653 ret);
5654 ret = -EFAULT;
5655 goto exit_disable_clock;
5656 }
5657
5658 switch (resp.result) {
5659 case QSEOS_RESULT_SUCCESS:
5660 break;
5661 case QSEOS_RESULT_INCOMPLETE:
5662 pr_err("%s: qseos result incomplete\n", __func__);
5663 ret = __qseecom_process_incomplete_cmd(data, &resp);
5664 if (ret)
5665 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5666 break;
5667 case QSEOS_RESULT_FAILURE:
5668 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5669 ret = -EFAULT;
5670 break;
5671 default:
5672 pr_err("scm_call response result %d not supported\n",
5673 resp.result);
5674 ret = -EFAULT;
5675 break;
5676 }
5677
5678exit_disable_clock:
5679 __qseecom_disable_clk_scale_down(data);
5680
5681exit_register_bus_bandwidth_needs:
5682 if (qseecom.support_bus_scaling) {
5683 mutex_lock(&qsee_bw_mutex);
5684 uret = qseecom_unregister_bus_bandwidth_needs(data);
5685 mutex_unlock(&qsee_bw_mutex);
5686 if (uret)
5687 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5688 uret, ret);
5689 }
5690
5691exit_cpu_restore:
5692 /* Deallocate the handle */
5693 if (!IS_ERR_OR_NULL(ihandle))
5694 ion_free(qseecom.ion_clnt, ihandle);
5695 return ret;
5696}
5697
5698static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5699{
5700 int ret = 0;
5701 struct qseecom_command_scm_resp resp;
5702 struct qseecom_unload_app_ireq req;
5703
5704 /* unavailable client app */
5705 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5706
5707 /* Populate the structure for sending scm call to unload image */
5708 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5709
5710 /* SCM_CALL to unload the external elf */
5711 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5712 sizeof(struct qseecom_unload_app_ireq),
5713 &resp, sizeof(resp));
5714 if (ret) {
5715 pr_err("scm_call to unload failed : ret %d\n",
5716 ret);
5717 ret = -EFAULT;
5718 goto qseecom_unload_external_elf_scm_err;
5719 }
5720 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5721 ret = __qseecom_process_incomplete_cmd(data, &resp);
5722 if (ret)
5723 pr_err("process_incomplete_cmd fail err: %d\n",
5724 ret);
5725 } else {
5726 if (resp.result != QSEOS_RESULT_SUCCESS) {
5727 pr_err("scm_call to unload image failed resp.result =%d\n",
5728 resp.result);
5729 ret = -EFAULT;
5730 }
5731 }
5732
5733qseecom_unload_external_elf_scm_err:
5734
5735 return ret;
5736}
5737
5738static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5739 void __user *argp)
5740{
5741
5742 int32_t ret;
5743 struct qseecom_qseos_app_load_query query_req;
5744 struct qseecom_check_app_ireq req;
5745 struct qseecom_registered_app_list *entry = NULL;
5746 unsigned long flags = 0;
5747 uint32_t app_arch = 0, app_id = 0;
5748 bool found_app = false;
5749
5750 /* Copy the relevant information needed for loading the image */
5751 if (copy_from_user(&query_req,
5752 (void __user *)argp,
5753 sizeof(struct qseecom_qseos_app_load_query))) {
5754 pr_err("copy_from_user failed\n");
5755 return -EFAULT;
5756 }
5757
5758 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5759 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5760 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5761
5762 ret = __qseecom_check_app_exists(req, &app_id);
5763 if (ret) {
5764 pr_err(" scm call to check if app is loaded failed");
5765 return ret; /* scm call failed */
5766 }
5767 if (app_id) {
5768 pr_debug("App id %d (%s) already exists\n", app_id,
5769 (char *)(req.app_name));
5770 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5771 list_for_each_entry(entry,
5772 &qseecom.registered_app_list_head, list){
5773 if (entry->app_id == app_id) {
5774 app_arch = entry->app_arch;
5775 entry->ref_cnt++;
5776 found_app = true;
5777 break;
5778 }
5779 }
5780 spin_unlock_irqrestore(
5781 &qseecom.registered_app_list_lock, flags);
5782 data->client.app_id = app_id;
5783 query_req.app_id = app_id;
5784 if (app_arch) {
5785 data->client.app_arch = app_arch;
5786 query_req.app_arch = app_arch;
5787 } else {
5788 data->client.app_arch = 0;
5789 query_req.app_arch = 0;
5790 }
5791 strlcpy(data->client.app_name, query_req.app_name,
5792 MAX_APP_NAME_SIZE);
5793 /*
5794 * If app was loaded by appsbl before and was not registered,
5795 * regiser this app now.
5796 */
5797 if (!found_app) {
5798 pr_debug("Register app %d [%s] which was loaded before\n",
5799 ret, (char *)query_req.app_name);
5800 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5801 if (!entry) {
5802 pr_err("kmalloc for app entry failed\n");
5803 return -ENOMEM;
5804 }
5805 entry->app_id = app_id;
5806 entry->ref_cnt = 1;
5807 entry->app_arch = data->client.app_arch;
5808 strlcpy(entry->app_name, data->client.app_name,
5809 MAX_APP_NAME_SIZE);
5810 entry->app_blocked = false;
5811 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005812 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005813 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5814 flags);
5815 list_add_tail(&entry->list,
5816 &qseecom.registered_app_list_head);
5817 spin_unlock_irqrestore(
5818 &qseecom.registered_app_list_lock, flags);
5819 }
5820 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5821 pr_err("copy_to_user failed\n");
5822 return -EFAULT;
5823 }
5824 return -EEXIST; /* app already loaded */
5825 } else {
5826 return 0; /* app not loaded */
5827 }
5828}
5829
5830static int __qseecom_get_ce_pipe_info(
5831 enum qseecom_key_management_usage_type usage,
5832 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5833{
5834 int ret = -EINVAL;
5835 int i, j;
5836 struct qseecom_ce_info_use *p = NULL;
5837 int total = 0;
5838 struct qseecom_ce_pipe_entry *pcepipe;
5839
5840 switch (usage) {
5841 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5842 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5843 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5844 if (qseecom.support_fde) {
5845 p = qseecom.ce_info.fde;
5846 total = qseecom.ce_info.num_fde;
5847 } else {
5848 pr_err("system does not support fde\n");
5849 return -EINVAL;
5850 }
5851 break;
5852 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5853 if (qseecom.support_pfe) {
5854 p = qseecom.ce_info.pfe;
5855 total = qseecom.ce_info.num_pfe;
5856 } else {
5857 pr_err("system does not support pfe\n");
5858 return -EINVAL;
5859 }
5860 break;
5861 default:
5862 pr_err("unsupported usage %d\n", usage);
5863 return -EINVAL;
5864 }
5865
5866 for (j = 0; j < total; j++) {
5867 if (p->unit_num == unit) {
5868 pcepipe = p->ce_pipe_entry;
5869 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5870 (*ce_hw)[i] = pcepipe->ce_num;
5871 *pipe = pcepipe->ce_pipe_pair;
5872 pcepipe++;
5873 }
5874 ret = 0;
5875 break;
5876 }
5877 p++;
5878 }
5879 return ret;
5880}
5881
5882static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5883 enum qseecom_key_management_usage_type usage,
5884 struct qseecom_key_generate_ireq *ireq)
5885{
5886 struct qseecom_command_scm_resp resp;
5887 int ret;
5888
5889 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5890 usage >= QSEOS_KM_USAGE_MAX) {
5891 pr_err("Error:: unsupported usage %d\n", usage);
5892 return -EFAULT;
5893 }
5894 ret = __qseecom_enable_clk(CLK_QSEE);
5895 if (ret)
5896 return ret;
5897
5898 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5899 ireq, sizeof(struct qseecom_key_generate_ireq),
5900 &resp, sizeof(resp));
5901 if (ret) {
5902 if (ret == -EINVAL &&
5903 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5904 pr_debug("Key ID exists.\n");
5905 ret = 0;
5906 } else {
5907 pr_err("scm call to generate key failed : %d\n", ret);
5908 ret = -EFAULT;
5909 }
5910 goto generate_key_exit;
5911 }
5912
5913 switch (resp.result) {
5914 case QSEOS_RESULT_SUCCESS:
5915 break;
5916 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5917 pr_debug("Key ID exists.\n");
5918 break;
5919 case QSEOS_RESULT_INCOMPLETE:
5920 ret = __qseecom_process_incomplete_cmd(data, &resp);
5921 if (ret) {
5922 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5923 pr_debug("Key ID exists.\n");
5924 ret = 0;
5925 } else {
5926 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5927 resp.result);
5928 }
5929 }
5930 break;
5931 case QSEOS_RESULT_FAILURE:
5932 default:
5933 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5934 ret = -EINVAL;
5935 break;
5936 }
5937generate_key_exit:
5938 __qseecom_disable_clk(CLK_QSEE);
5939 return ret;
5940}
5941
5942static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5943 enum qseecom_key_management_usage_type usage,
5944 struct qseecom_key_delete_ireq *ireq)
5945{
5946 struct qseecom_command_scm_resp resp;
5947 int ret;
5948
5949 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5950 usage >= QSEOS_KM_USAGE_MAX) {
5951 pr_err("Error:: unsupported usage %d\n", usage);
5952 return -EFAULT;
5953 }
5954 ret = __qseecom_enable_clk(CLK_QSEE);
5955 if (ret)
5956 return ret;
5957
5958 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5959 ireq, sizeof(struct qseecom_key_delete_ireq),
5960 &resp, sizeof(struct qseecom_command_scm_resp));
5961 if (ret) {
5962 if (ret == -EINVAL &&
5963 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5964 pr_debug("Max attempts to input password reached.\n");
5965 ret = -ERANGE;
5966 } else {
5967 pr_err("scm call to delete key failed : %d\n", ret);
5968 ret = -EFAULT;
5969 }
5970 goto del_key_exit;
5971 }
5972
5973 switch (resp.result) {
5974 case QSEOS_RESULT_SUCCESS:
5975 break;
5976 case QSEOS_RESULT_INCOMPLETE:
5977 ret = __qseecom_process_incomplete_cmd(data, &resp);
5978 if (ret) {
5979 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5980 resp.result);
5981 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5982 pr_debug("Max attempts to input password reached.\n");
5983 ret = -ERANGE;
5984 }
5985 }
5986 break;
5987 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5988 pr_debug("Max attempts to input password reached.\n");
5989 ret = -ERANGE;
5990 break;
5991 case QSEOS_RESULT_FAILURE:
5992 default:
5993 pr_err("Delete key scm call failed resp.result %d\n",
5994 resp.result);
5995 ret = -EINVAL;
5996 break;
5997 }
5998del_key_exit:
5999 __qseecom_disable_clk(CLK_QSEE);
6000 return ret;
6001}
6002
6003static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
6004 enum qseecom_key_management_usage_type usage,
6005 struct qseecom_key_select_ireq *ireq)
6006{
6007 struct qseecom_command_scm_resp resp;
6008 int ret;
6009
6010 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6011 usage >= QSEOS_KM_USAGE_MAX) {
6012 pr_err("Error:: unsupported usage %d\n", usage);
6013 return -EFAULT;
6014 }
6015 ret = __qseecom_enable_clk(CLK_QSEE);
6016 if (ret)
6017 return ret;
6018
6019 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
6020 ret = __qseecom_enable_clk(CLK_CE_DRV);
6021 if (ret)
6022 return ret;
6023 }
6024
6025 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6026 ireq, sizeof(struct qseecom_key_select_ireq),
6027 &resp, sizeof(struct qseecom_command_scm_resp));
6028 if (ret) {
6029 if (ret == -EINVAL &&
6030 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6031 pr_debug("Max attempts to input password reached.\n");
6032 ret = -ERANGE;
6033 } else if (ret == -EINVAL &&
6034 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6035 pr_debug("Set Key operation under processing...\n");
6036 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6037 } else {
6038 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
6039 ret);
6040 ret = -EFAULT;
6041 }
6042 goto set_key_exit;
6043 }
6044
6045 switch (resp.result) {
6046 case QSEOS_RESULT_SUCCESS:
6047 break;
6048 case QSEOS_RESULT_INCOMPLETE:
6049 ret = __qseecom_process_incomplete_cmd(data, &resp);
6050 if (ret) {
6051 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6052 resp.result);
6053 if (resp.result ==
6054 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6055 pr_debug("Set Key operation under processing...\n");
6056 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6057 }
6058 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6059 pr_debug("Max attempts to input password reached.\n");
6060 ret = -ERANGE;
6061 }
6062 }
6063 break;
6064 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
6065 pr_debug("Max attempts to input password reached.\n");
6066 ret = -ERANGE;
6067 break;
6068 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6069 pr_debug("Set Key operation under processing...\n");
6070 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6071 break;
6072 case QSEOS_RESULT_FAILURE:
6073 default:
6074 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6075 ret = -EINVAL;
6076 break;
6077 }
6078set_key_exit:
6079 __qseecom_disable_clk(CLK_QSEE);
6080 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
6081 __qseecom_disable_clk(CLK_CE_DRV);
6082 return ret;
6083}
6084
6085static int __qseecom_update_current_key_user_info(
6086 struct qseecom_dev_handle *data,
6087 enum qseecom_key_management_usage_type usage,
6088 struct qseecom_key_userinfo_update_ireq *ireq)
6089{
6090 struct qseecom_command_scm_resp resp;
6091 int ret;
6092
6093 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6094 usage >= QSEOS_KM_USAGE_MAX) {
6095 pr_err("Error:: unsupported usage %d\n", usage);
6096 return -EFAULT;
6097 }
6098 ret = __qseecom_enable_clk(CLK_QSEE);
6099 if (ret)
6100 return ret;
6101
6102 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6103 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
6104 &resp, sizeof(struct qseecom_command_scm_resp));
6105 if (ret) {
6106 if (ret == -EINVAL &&
6107 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6108 pr_debug("Set Key operation under processing...\n");
6109 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6110 } else {
6111 pr_err("scm call to update key userinfo failed: %d\n",
6112 ret);
6113 __qseecom_disable_clk(CLK_QSEE);
6114 return -EFAULT;
6115 }
6116 }
6117
6118 switch (resp.result) {
6119 case QSEOS_RESULT_SUCCESS:
6120 break;
6121 case QSEOS_RESULT_INCOMPLETE:
6122 ret = __qseecom_process_incomplete_cmd(data, &resp);
6123 if (resp.result ==
6124 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6125 pr_debug("Set Key operation under processing...\n");
6126 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6127 }
6128 if (ret)
6129 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6130 resp.result);
6131 break;
6132 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6133 pr_debug("Update Key operation under processing...\n");
6134 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6135 break;
6136 case QSEOS_RESULT_FAILURE:
6137 default:
6138 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6139 ret = -EINVAL;
6140 break;
6141 }
6142
6143 __qseecom_disable_clk(CLK_QSEE);
6144 return ret;
6145}
6146
6147
6148static int qseecom_enable_ice_setup(int usage)
6149{
6150 int ret = 0;
6151
6152 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6153 ret = qcom_ice_setup_ice_hw("ufs", true);
6154 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6155 ret = qcom_ice_setup_ice_hw("sdcc", true);
6156
6157 return ret;
6158}
6159
6160static int qseecom_disable_ice_setup(int usage)
6161{
6162 int ret = 0;
6163
6164 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6165 ret = qcom_ice_setup_ice_hw("ufs", false);
6166 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6167 ret = qcom_ice_setup_ice_hw("sdcc", false);
6168
6169 return ret;
6170}
6171
6172static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
6173{
6174 struct qseecom_ce_info_use *pce_info_use, *p;
6175 int total = 0;
6176 int i;
6177
6178 switch (usage) {
6179 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
6180 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
6181 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6182 p = qseecom.ce_info.fde;
6183 total = qseecom.ce_info.num_fde;
6184 break;
6185 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6186 p = qseecom.ce_info.pfe;
6187 total = qseecom.ce_info.num_pfe;
6188 break;
6189 default:
6190 pr_err("unsupported usage %d\n", usage);
6191 return -EINVAL;
6192 }
6193
6194 pce_info_use = NULL;
6195
6196 for (i = 0; i < total; i++) {
6197 if (p->unit_num == unit) {
6198 pce_info_use = p;
6199 break;
6200 }
6201 p++;
6202 }
6203 if (!pce_info_use) {
6204 pr_err("can not find %d\n", unit);
6205 return -EINVAL;
6206 }
6207 return pce_info_use->num_ce_pipe_entries;
6208}
6209
6210static int qseecom_create_key(struct qseecom_dev_handle *data,
6211 void __user *argp)
6212{
6213 int i;
6214 uint32_t *ce_hw = NULL;
6215 uint32_t pipe = 0;
6216 int ret = 0;
6217 uint32_t flags = 0;
6218 struct qseecom_create_key_req create_key_req;
6219 struct qseecom_key_generate_ireq generate_key_ireq;
6220 struct qseecom_key_select_ireq set_key_ireq;
6221 uint32_t entries = 0;
6222
6223 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6224 if (ret) {
6225 pr_err("copy_from_user failed\n");
6226 return ret;
6227 }
6228
6229 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6230 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6231 pr_err("unsupported usage %d\n", create_key_req.usage);
6232 ret = -EFAULT;
6233 return ret;
6234 }
6235 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6236 create_key_req.usage);
6237 if (entries <= 0) {
6238 pr_err("no ce instance for usage %d instance %d\n",
6239 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6240 ret = -EINVAL;
6241 return ret;
6242 }
6243
6244 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6245 if (!ce_hw) {
6246 ret = -ENOMEM;
6247 return ret;
6248 }
6249 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6250 DEFAULT_CE_INFO_UNIT);
6251 if (ret) {
6252 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6253 ret = -EINVAL;
6254 goto free_buf;
6255 }
6256
6257 if (qseecom.fde_key_size)
6258 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6259 else
6260 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6261
Jiten Patela7bb1d52018-05-11 12:34:26 +05306262 if (qseecom.enable_key_wrap_in_ks == true)
6263 flags |= ENABLE_KEY_WRAP_IN_KS;
6264
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006265 generate_key_ireq.flags = flags;
6266 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6267 memset((void *)generate_key_ireq.key_id,
6268 0, QSEECOM_KEY_ID_SIZE);
6269 memset((void *)generate_key_ireq.hash32,
6270 0, QSEECOM_HASH_SIZE);
6271 memcpy((void *)generate_key_ireq.key_id,
6272 (void *)key_id_array[create_key_req.usage].desc,
6273 QSEECOM_KEY_ID_SIZE);
6274 memcpy((void *)generate_key_ireq.hash32,
6275 (void *)create_key_req.hash32,
6276 QSEECOM_HASH_SIZE);
6277
6278 ret = __qseecom_generate_and_save_key(data,
6279 create_key_req.usage, &generate_key_ireq);
6280 if (ret) {
6281 pr_err("Failed to generate key on storage: %d\n", ret);
6282 goto free_buf;
6283 }
6284
6285 for (i = 0; i < entries; i++) {
6286 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6287 if (create_key_req.usage ==
6288 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6289 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6290 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6291
6292 } else if (create_key_req.usage ==
6293 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6294 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6295 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6296
6297 } else {
6298 set_key_ireq.ce = ce_hw[i];
6299 set_key_ireq.pipe = pipe;
6300 }
6301 set_key_ireq.flags = flags;
6302
6303 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6304 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6305 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6306 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6307 memcpy((void *)set_key_ireq.key_id,
6308 (void *)key_id_array[create_key_req.usage].desc,
6309 QSEECOM_KEY_ID_SIZE);
6310 memcpy((void *)set_key_ireq.hash32,
6311 (void *)create_key_req.hash32,
6312 QSEECOM_HASH_SIZE);
6313 /*
6314 * It will return false if it is GPCE based crypto instance or
6315 * ICE is setup properly
6316 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006317 ret = qseecom_enable_ice_setup(create_key_req.usage);
6318 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006319 goto free_buf;
6320
6321 do {
6322 ret = __qseecom_set_clear_ce_key(data,
6323 create_key_req.usage,
6324 &set_key_ireq);
6325 /*
6326 * wait a little before calling scm again to let other
6327 * processes run
6328 */
6329 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6330 msleep(50);
6331
6332 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6333
6334 qseecom_disable_ice_setup(create_key_req.usage);
6335
6336 if (ret) {
6337 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6338 pipe, ce_hw[i], ret);
6339 goto free_buf;
6340 } else {
6341 pr_err("Set the key successfully\n");
6342 if ((create_key_req.usage ==
6343 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6344 (create_key_req.usage ==
6345 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6346 goto free_buf;
6347 }
6348 }
6349
6350free_buf:
6351 kzfree(ce_hw);
6352 return ret;
6353}
6354
6355static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6356 void __user *argp)
6357{
6358 uint32_t *ce_hw = NULL;
6359 uint32_t pipe = 0;
6360 int ret = 0;
6361 uint32_t flags = 0;
6362 int i, j;
6363 struct qseecom_wipe_key_req wipe_key_req;
6364 struct qseecom_key_delete_ireq delete_key_ireq;
6365 struct qseecom_key_select_ireq clear_key_ireq;
6366 uint32_t entries = 0;
6367
6368 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6369 if (ret) {
6370 pr_err("copy_from_user failed\n");
6371 return ret;
6372 }
6373
6374 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6375 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6376 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6377 ret = -EFAULT;
6378 return ret;
6379 }
6380
6381 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6382 wipe_key_req.usage);
6383 if (entries <= 0) {
6384 pr_err("no ce instance for usage %d instance %d\n",
6385 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6386 ret = -EINVAL;
6387 return ret;
6388 }
6389
6390 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6391 if (!ce_hw) {
6392 ret = -ENOMEM;
6393 return ret;
6394 }
6395
6396 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6397 DEFAULT_CE_INFO_UNIT);
6398 if (ret) {
6399 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6400 ret = -EINVAL;
6401 goto free_buf;
6402 }
6403
6404 if (wipe_key_req.wipe_key_flag) {
6405 delete_key_ireq.flags = flags;
6406 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6407 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6408 memcpy((void *)delete_key_ireq.key_id,
6409 (void *)key_id_array[wipe_key_req.usage].desc,
6410 QSEECOM_KEY_ID_SIZE);
6411 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6412
6413 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6414 &delete_key_ireq);
6415 if (ret) {
6416 pr_err("Failed to delete key from ssd storage: %d\n",
6417 ret);
6418 ret = -EFAULT;
6419 goto free_buf;
6420 }
6421 }
6422
6423 for (j = 0; j < entries; j++) {
6424 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6425 if (wipe_key_req.usage ==
6426 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6427 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6428 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6429 } else if (wipe_key_req.usage ==
6430 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6431 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6432 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6433 } else {
6434 clear_key_ireq.ce = ce_hw[j];
6435 clear_key_ireq.pipe = pipe;
6436 }
6437 clear_key_ireq.flags = flags;
6438 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6439 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6440 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6441 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6442
6443 /*
6444 * It will return false if it is GPCE based crypto instance or
6445 * ICE is setup properly
6446 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006447 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6448 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006449 goto free_buf;
6450
6451 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6452 &clear_key_ireq);
6453
6454 qseecom_disable_ice_setup(wipe_key_req.usage);
6455
6456 if (ret) {
6457 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6458 pipe, ce_hw[j], ret);
6459 ret = -EFAULT;
6460 goto free_buf;
6461 }
6462 }
6463
6464free_buf:
6465 kzfree(ce_hw);
6466 return ret;
6467}
6468
6469static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6470 void __user *argp)
6471{
6472 int ret = 0;
6473 uint32_t flags = 0;
6474 struct qseecom_update_key_userinfo_req update_key_req;
6475 struct qseecom_key_userinfo_update_ireq ireq;
6476
6477 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6478 if (ret) {
6479 pr_err("copy_from_user failed\n");
6480 return ret;
6481 }
6482
6483 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6484 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6485 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6486 return -EFAULT;
6487 }
6488
6489 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6490
6491 if (qseecom.fde_key_size)
6492 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6493 else
6494 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6495
6496 ireq.flags = flags;
6497 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6498 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6499 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6500 memcpy((void *)ireq.key_id,
6501 (void *)key_id_array[update_key_req.usage].desc,
6502 QSEECOM_KEY_ID_SIZE);
6503 memcpy((void *)ireq.current_hash32,
6504 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6505 memcpy((void *)ireq.new_hash32,
6506 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6507
6508 do {
6509 ret = __qseecom_update_current_key_user_info(data,
6510 update_key_req.usage,
6511 &ireq);
6512 /*
6513 * wait a little before calling scm again to let other
6514 * processes run
6515 */
6516 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6517 msleep(50);
6518
6519 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6520 if (ret) {
6521 pr_err("Failed to update key info: %d\n", ret);
6522 return ret;
6523 }
6524 return ret;
6525
6526}
6527static int qseecom_is_es_activated(void __user *argp)
6528{
Zhen Kong26e62742018-05-04 17:19:06 -07006529 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006530 struct qseecom_command_scm_resp resp;
6531 int ret;
6532
6533 if (qseecom.qsee_version < QSEE_VERSION_04) {
6534 pr_err("invalid qsee version\n");
6535 return -ENODEV;
6536 }
6537
6538 if (argp == NULL) {
6539 pr_err("arg is null\n");
6540 return -EINVAL;
6541 }
6542
6543 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6544 &req, sizeof(req), &resp, sizeof(resp));
6545 if (ret) {
6546 pr_err("scm_call failed\n");
6547 return ret;
6548 }
6549
6550 req.is_activated = resp.result;
6551 ret = copy_to_user(argp, &req, sizeof(req));
6552 if (ret) {
6553 pr_err("copy_to_user failed\n");
6554 return ret;
6555 }
6556
6557 return 0;
6558}
6559
6560static int qseecom_save_partition_hash(void __user *argp)
6561{
6562 struct qseecom_save_partition_hash_req req;
6563 struct qseecom_command_scm_resp resp;
6564 int ret;
6565
6566 memset(&resp, 0x00, sizeof(resp));
6567
6568 if (qseecom.qsee_version < QSEE_VERSION_04) {
6569 pr_err("invalid qsee version\n");
6570 return -ENODEV;
6571 }
6572
6573 if (argp == NULL) {
6574 pr_err("arg is null\n");
6575 return -EINVAL;
6576 }
6577
6578 ret = copy_from_user(&req, argp, sizeof(req));
6579 if (ret) {
6580 pr_err("copy_from_user failed\n");
6581 return ret;
6582 }
6583
6584 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6585 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6586 if (ret) {
6587 pr_err("qseecom_scm_call failed\n");
6588 return ret;
6589 }
6590
6591 return 0;
6592}
6593
6594static int qseecom_mdtp_cipher_dip(void __user *argp)
6595{
6596 struct qseecom_mdtp_cipher_dip_req req;
6597 u32 tzbuflenin, tzbuflenout;
6598 char *tzbufin = NULL, *tzbufout = NULL;
6599 struct scm_desc desc = {0};
6600 int ret;
6601
6602 do {
6603 /* Copy the parameters from userspace */
6604 if (argp == NULL) {
6605 pr_err("arg is null\n");
6606 ret = -EINVAL;
6607 break;
6608 }
6609
6610 ret = copy_from_user(&req, argp, sizeof(req));
6611 if (ret) {
6612 pr_err("copy_from_user failed, ret= %d\n", ret);
6613 break;
6614 }
6615
6616 if (req.in_buf == NULL || req.out_buf == NULL ||
6617 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6618 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6619 req.direction > 1) {
6620 pr_err("invalid parameters\n");
6621 ret = -EINVAL;
6622 break;
6623 }
6624
6625 /* Copy the input buffer from userspace to kernel space */
6626 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6627 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6628 if (!tzbufin) {
6629 pr_err("error allocating in buffer\n");
6630 ret = -ENOMEM;
6631 break;
6632 }
6633
6634 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6635 if (ret) {
6636 pr_err("copy_from_user failed, ret=%d\n", ret);
6637 break;
6638 }
6639
6640 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6641
6642 /* Prepare the output buffer in kernel space */
6643 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6644 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6645 if (!tzbufout) {
6646 pr_err("error allocating out buffer\n");
6647 ret = -ENOMEM;
6648 break;
6649 }
6650
6651 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6652
6653 /* Send the command to TZ */
6654 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6655 desc.args[0] = virt_to_phys(tzbufin);
6656 desc.args[1] = req.in_buf_size;
6657 desc.args[2] = virt_to_phys(tzbufout);
6658 desc.args[3] = req.out_buf_size;
6659 desc.args[4] = req.direction;
6660
6661 ret = __qseecom_enable_clk(CLK_QSEE);
6662 if (ret)
6663 break;
6664
Zhen Kong03f220d2019-02-01 17:12:34 -08006665 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006666
6667 __qseecom_disable_clk(CLK_QSEE);
6668
6669 if (ret) {
6670 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6671 ret);
6672 break;
6673 }
6674
6675 /* Copy the output buffer from kernel space to userspace */
6676 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6677 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6678 if (ret) {
6679 pr_err("copy_to_user failed, ret=%d\n", ret);
6680 break;
6681 }
6682 } while (0);
6683
6684 kzfree(tzbufin);
6685 kzfree(tzbufout);
6686
6687 return ret;
6688}
6689
6690static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6691 struct qseecom_qteec_req *req)
6692{
6693 if (!data || !data->client.ihandle) {
6694 pr_err("Client or client handle is not initialized\n");
6695 return -EINVAL;
6696 }
6697
6698 if (data->type != QSEECOM_CLIENT_APP)
6699 return -EFAULT;
6700
6701 if (req->req_len > UINT_MAX - req->resp_len) {
6702 pr_err("Integer overflow detected in req_len & rsp_len\n");
6703 return -EINVAL;
6704 }
6705
6706 if (req->req_len + req->resp_len > data->client.sb_length) {
6707 pr_debug("Not enough memory to fit cmd_buf.\n");
6708 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6709 (req->req_len + req->resp_len), data->client.sb_length);
6710 return -ENOMEM;
6711 }
6712
6713 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6714 pr_err("cmd buffer or response buffer is null\n");
6715 return -EINVAL;
6716 }
6717 if (((uintptr_t)req->req_ptr <
6718 data->client.user_virt_sb_base) ||
6719 ((uintptr_t)req->req_ptr >=
6720 (data->client.user_virt_sb_base + data->client.sb_length))) {
6721 pr_err("cmd buffer address not within shared bufffer\n");
6722 return -EINVAL;
6723 }
6724
6725 if (((uintptr_t)req->resp_ptr <
6726 data->client.user_virt_sb_base) ||
6727 ((uintptr_t)req->resp_ptr >=
6728 (data->client.user_virt_sb_base + data->client.sb_length))) {
6729 pr_err("response buffer address not within shared bufffer\n");
6730 return -EINVAL;
6731 }
6732
6733 if ((req->req_len == 0) || (req->resp_len == 0)) {
6734 pr_err("cmd buf lengtgh/response buf length not valid\n");
6735 return -EINVAL;
6736 }
6737
6738 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6739 pr_err("Integer overflow in req_len & req_ptr\n");
6740 return -EINVAL;
6741 }
6742
6743 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6744 pr_err("Integer overflow in resp_len & resp_ptr\n");
6745 return -EINVAL;
6746 }
6747
6748 if (data->client.user_virt_sb_base >
6749 (ULONG_MAX - data->client.sb_length)) {
6750 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6751 return -EINVAL;
6752 }
6753 if ((((uintptr_t)req->req_ptr + req->req_len) >
6754 ((uintptr_t)data->client.user_virt_sb_base +
6755 data->client.sb_length)) ||
6756 (((uintptr_t)req->resp_ptr + req->resp_len) >
6757 ((uintptr_t)data->client.user_virt_sb_base +
6758 data->client.sb_length))) {
6759 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6760 return -EINVAL;
6761 }
6762 return 0;
6763}
6764
6765static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6766 uint32_t fd_idx, struct sg_table *sg_ptr)
6767{
6768 struct scatterlist *sg = sg_ptr->sgl;
6769 struct qseecom_sg_entry *sg_entry;
6770 void *buf;
6771 uint i;
6772 size_t size;
6773 dma_addr_t coh_pmem;
6774
6775 if (fd_idx >= MAX_ION_FD) {
6776 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6777 return -ENOMEM;
6778 }
6779 /*
6780 * Allocate a buffer, populate it with number of entry plus
6781 * each sg entry's phy addr and length; then return the
6782 * phy_addr of the buffer.
6783 */
6784 size = sizeof(uint32_t) +
6785 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6786 size = (size + PAGE_SIZE) & PAGE_MASK;
6787 buf = dma_alloc_coherent(qseecom.pdev,
6788 size, &coh_pmem, GFP_KERNEL);
6789 if (buf == NULL) {
6790 pr_err("failed to alloc memory for sg buf\n");
6791 return -ENOMEM;
6792 }
6793 *(uint32_t *)buf = sg_ptr->nents;
6794 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6795 for (i = 0; i < sg_ptr->nents; i++) {
6796 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6797 sg_entry->len = sg->length;
6798 sg_entry++;
6799 sg = sg_next(sg);
6800 }
6801 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6802 data->client.sec_buf_fd[fd_idx].vbase = buf;
6803 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6804 data->client.sec_buf_fd[fd_idx].size = size;
6805 return 0;
6806}
6807
6808static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6809 struct qseecom_dev_handle *data, bool cleanup)
6810{
6811 struct ion_handle *ihandle;
6812 int ret = 0;
6813 int i = 0;
6814 uint32_t *update;
6815 struct sg_table *sg_ptr = NULL;
6816 struct scatterlist *sg;
6817 struct qseecom_param_memref *memref;
6818
6819 if (req == NULL) {
6820 pr_err("Invalid address\n");
6821 return -EINVAL;
6822 }
6823 for (i = 0; i < MAX_ION_FD; i++) {
6824 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006825 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006826 req->ifd_data[i].fd);
6827 if (IS_ERR_OR_NULL(ihandle)) {
6828 pr_err("Ion client can't retrieve the handle\n");
6829 return -ENOMEM;
6830 }
6831 if ((req->req_len < sizeof(uint32_t)) ||
6832 (req->ifd_data[i].cmd_buf_offset >
6833 req->req_len - sizeof(uint32_t))) {
6834 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6835 req->req_len,
6836 req->ifd_data[i].cmd_buf_offset);
6837 return -EINVAL;
6838 }
6839 update = (uint32_t *)((char *) req->req_ptr +
6840 req->ifd_data[i].cmd_buf_offset);
6841 if (!update) {
6842 pr_err("update pointer is NULL\n");
6843 return -EINVAL;
6844 }
6845 } else {
6846 continue;
6847 }
6848 /* Populate the cmd data structure with the phys_addr */
6849 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6850 if (IS_ERR_OR_NULL(sg_ptr)) {
6851 pr_err("IOn client could not retrieve sg table\n");
6852 goto err;
6853 }
6854 sg = sg_ptr->sgl;
6855 if (sg == NULL) {
6856 pr_err("sg is NULL\n");
6857 goto err;
6858 }
6859 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6860 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6861 sg_ptr->nents, sg->length);
6862 goto err;
6863 }
6864 /* clean up buf for pre-allocated fd */
6865 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6866 (*update)) {
6867 if (data->client.sec_buf_fd[i].vbase)
6868 dma_free_coherent(qseecom.pdev,
6869 data->client.sec_buf_fd[i].size,
6870 data->client.sec_buf_fd[i].vbase,
6871 data->client.sec_buf_fd[i].pbase);
6872 memset((void *)update, 0,
6873 sizeof(struct qseecom_param_memref));
6874 memset(&(data->client.sec_buf_fd[i]), 0,
6875 sizeof(struct qseecom_sec_buf_fd_info));
6876 goto clean;
6877 }
6878
6879 if (*update == 0) {
6880 /* update buf for pre-allocated fd from secure heap*/
6881 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6882 sg_ptr);
6883 if (ret) {
6884 pr_err("Failed to handle buf for fd[%d]\n", i);
6885 goto err;
6886 }
6887 memref = (struct qseecom_param_memref *)update;
6888 memref->buffer =
6889 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6890 memref->size =
6891 (uint32_t)(data->client.sec_buf_fd[i].size);
6892 } else {
6893 /* update buf for fd from non-secure qseecom heap */
6894 if (sg_ptr->nents != 1) {
6895 pr_err("Num of scat entr (%d) invalid\n",
6896 sg_ptr->nents);
6897 goto err;
6898 }
6899 if (cleanup)
6900 *update = 0;
6901 else
6902 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6903 }
6904clean:
6905 if (cleanup) {
6906 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6907 ihandle, NULL, sg->length,
6908 ION_IOC_INV_CACHES);
6909 if (ret) {
6910 pr_err("cache operation failed %d\n", ret);
6911 goto err;
6912 }
6913 } else {
6914 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6915 ihandle, NULL, sg->length,
6916 ION_IOC_CLEAN_INV_CACHES);
6917 if (ret) {
6918 pr_err("cache operation failed %d\n", ret);
6919 goto err;
6920 }
6921 data->sglistinfo_ptr[i].indexAndFlags =
6922 SGLISTINFO_SET_INDEX_FLAG(
6923 (sg_ptr->nents == 1), 0,
6924 req->ifd_data[i].cmd_buf_offset);
6925 data->sglistinfo_ptr[i].sizeOrCount =
6926 (sg_ptr->nents == 1) ?
6927 sg->length : sg_ptr->nents;
6928 data->sglist_cnt = i + 1;
6929 }
6930 /* Deallocate the handle */
6931 if (!IS_ERR_OR_NULL(ihandle))
6932 ion_free(qseecom.ion_clnt, ihandle);
6933 }
6934 return ret;
6935err:
6936 if (!IS_ERR_OR_NULL(ihandle))
6937 ion_free(qseecom.ion_clnt, ihandle);
6938 return -ENOMEM;
6939}
6940
6941static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6942 struct qseecom_qteec_req *req, uint32_t cmd_id)
6943{
6944 struct qseecom_command_scm_resp resp;
6945 struct qseecom_qteec_ireq ireq;
6946 struct qseecom_qteec_64bit_ireq ireq_64bit;
6947 struct qseecom_registered_app_list *ptr_app;
6948 bool found_app = false;
6949 unsigned long flags;
6950 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006951 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006952 uint32_t reqd_len_sb_in = 0;
6953 void *cmd_buf = NULL;
6954 size_t cmd_len;
6955 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306956 void *req_ptr = NULL;
6957 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006958
6959 ret = __qseecom_qteec_validate_msg(data, req);
6960 if (ret)
6961 return ret;
6962
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306963 req_ptr = req->req_ptr;
6964 resp_ptr = req->resp_ptr;
6965
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006966 /* find app_id & img_name from list */
6967 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6968 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6969 list) {
6970 if ((ptr_app->app_id == data->client.app_id) &&
6971 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6972 found_app = true;
6973 break;
6974 }
6975 }
6976 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6977 if (!found_app) {
6978 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6979 (char *)data->client.app_name);
6980 return -ENOENT;
6981 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07006982 if (__qseecom_find_pending_unload_app(data->client.app_id,
6983 data->client.app_name)) {
6984 pr_err("app %d (%s) unload is pending\n",
6985 data->client.app_id, data->client.app_name);
6986 return -ENOENT;
6987 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006988
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306989 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6990 (uintptr_t)req->req_ptr);
6991 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6992 (uintptr_t)req->resp_ptr);
6993
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006994 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6995 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6996 ret = __qseecom_update_qteec_req_buf(
6997 (struct qseecom_qteec_modfd_req *)req, data, false);
6998 if (ret)
6999 return ret;
7000 }
7001
7002 if (qseecom.qsee_version < QSEE_VERSION_40) {
7003 ireq.app_id = data->client.app_id;
7004 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307005 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007006 ireq.req_len = req->req_len;
7007 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307008 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007009 ireq.resp_len = req->resp_len;
7010 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7011 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7012 dmac_flush_range((void *)table,
7013 (void *)table + SGLISTINFO_TABLE_SIZE);
7014 cmd_buf = (void *)&ireq;
7015 cmd_len = sizeof(struct qseecom_qteec_ireq);
7016 } else {
7017 ireq_64bit.app_id = data->client.app_id;
7018 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307019 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007020 ireq_64bit.req_len = req->req_len;
7021 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05307022 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007023 ireq_64bit.resp_len = req->resp_len;
7024 if ((data->client.app_arch == ELFCLASS32) &&
7025 ((ireq_64bit.req_ptr >=
7026 PHY_ADDR_4G - ireq_64bit.req_len) ||
7027 (ireq_64bit.resp_ptr >=
7028 PHY_ADDR_4G - ireq_64bit.resp_len))){
7029 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
7030 data->client.app_name, data->client.app_id);
7031 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
7032 ireq_64bit.req_ptr, ireq_64bit.req_len,
7033 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
7034 return -EFAULT;
7035 }
7036 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7037 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7038 dmac_flush_range((void *)table,
7039 (void *)table + SGLISTINFO_TABLE_SIZE);
7040 cmd_buf = (void *)&ireq_64bit;
7041 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7042 }
7043 if (qseecom.whitelist_support == true
7044 && cmd_id == QSEOS_TEE_OPEN_SESSION)
7045 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
7046 else
7047 *(uint32_t *)cmd_buf = cmd_id;
7048
7049 reqd_len_sb_in = req->req_len + req->resp_len;
7050 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7051 data->client.sb_virt,
7052 reqd_len_sb_in,
7053 ION_IOC_CLEAN_INV_CACHES);
7054 if (ret) {
7055 pr_err("cache operation failed %d\n", ret);
7056 return ret;
7057 }
7058
7059 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7060
7061 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7062 cmd_buf, cmd_len,
7063 &resp, sizeof(resp));
7064 if (ret) {
7065 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7066 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07007067 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007068 }
7069
7070 if (qseecom.qsee_reentrancy_support) {
7071 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07007072 if (ret)
7073 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007074 } else {
7075 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7076 ret = __qseecom_process_incomplete_cmd(data, &resp);
7077 if (ret) {
7078 pr_err("process_incomplete_cmd failed err: %d\n",
7079 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007080 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007081 }
7082 } else {
7083 if (resp.result != QSEOS_RESULT_SUCCESS) {
7084 pr_err("Response result %d not supported\n",
7085 resp.result);
7086 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07007087 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007088 }
7089 }
7090 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007091exit:
7092 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007093 data->client.sb_virt, data->client.sb_length,
7094 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07007095 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007096 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007097 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007098 }
7099
7100 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
7101 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07007102 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007103 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07007104 if (ret2)
7105 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007106 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007107 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007108}
7109
7110static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
7111 void __user *argp)
7112{
7113 struct qseecom_qteec_modfd_req req;
7114 int ret = 0;
7115
7116 ret = copy_from_user(&req, argp,
7117 sizeof(struct qseecom_qteec_modfd_req));
7118 if (ret) {
7119 pr_err("copy_from_user failed\n");
7120 return ret;
7121 }
7122 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7123 QSEOS_TEE_OPEN_SESSION);
7124
7125 return ret;
7126}
7127
7128static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
7129 void __user *argp)
7130{
7131 struct qseecom_qteec_req req;
7132 int ret = 0;
7133
7134 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
7135 if (ret) {
7136 pr_err("copy_from_user failed\n");
7137 return ret;
7138 }
7139 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
7140 return ret;
7141}
7142
7143static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
7144 void __user *argp)
7145{
7146 struct qseecom_qteec_modfd_req req;
7147 struct qseecom_command_scm_resp resp;
7148 struct qseecom_qteec_ireq ireq;
7149 struct qseecom_qteec_64bit_ireq ireq_64bit;
7150 struct qseecom_registered_app_list *ptr_app;
7151 bool found_app = false;
7152 unsigned long flags;
7153 int ret = 0;
7154 int i = 0;
7155 uint32_t reqd_len_sb_in = 0;
7156 void *cmd_buf = NULL;
7157 size_t cmd_len;
7158 struct sglist_info *table = data->sglistinfo_ptr;
7159 void *req_ptr = NULL;
7160 void *resp_ptr = NULL;
7161
7162 ret = copy_from_user(&req, argp,
7163 sizeof(struct qseecom_qteec_modfd_req));
7164 if (ret) {
7165 pr_err("copy_from_user failed\n");
7166 return ret;
7167 }
7168 ret = __qseecom_qteec_validate_msg(data,
7169 (struct qseecom_qteec_req *)(&req));
7170 if (ret)
7171 return ret;
7172 req_ptr = req.req_ptr;
7173 resp_ptr = req.resp_ptr;
7174
7175 /* find app_id & img_name from list */
7176 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
7177 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
7178 list) {
7179 if ((ptr_app->app_id == data->client.app_id) &&
7180 (!strcmp(ptr_app->app_name, data->client.app_name))) {
7181 found_app = true;
7182 break;
7183 }
7184 }
7185 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
7186 if (!found_app) {
7187 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7188 (char *)data->client.app_name);
7189 return -ENOENT;
7190 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07007191 if (__qseecom_find_pending_unload_app(data->client.app_id,
7192 data->client.app_name)) {
7193 pr_err("app %d (%s) unload is pending\n",
7194 data->client.app_id, data->client.app_name);
7195 return -ENOENT;
7196 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007197
7198 /* validate offsets */
7199 for (i = 0; i < MAX_ION_FD; i++) {
7200 if (req.ifd_data[i].fd) {
7201 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7202 return -EINVAL;
7203 }
7204 }
7205 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7206 (uintptr_t)req.req_ptr);
7207 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7208 (uintptr_t)req.resp_ptr);
7209 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7210 if (ret)
7211 return ret;
7212
7213 if (qseecom.qsee_version < QSEE_VERSION_40) {
7214 ireq.app_id = data->client.app_id;
7215 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7216 (uintptr_t)req_ptr);
7217 ireq.req_len = req.req_len;
7218 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7219 (uintptr_t)resp_ptr);
7220 ireq.resp_len = req.resp_len;
7221 cmd_buf = (void *)&ireq;
7222 cmd_len = sizeof(struct qseecom_qteec_ireq);
7223 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7224 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7225 dmac_flush_range((void *)table,
7226 (void *)table + SGLISTINFO_TABLE_SIZE);
7227 } else {
7228 ireq_64bit.app_id = data->client.app_id;
7229 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7230 (uintptr_t)req_ptr);
7231 ireq_64bit.req_len = req.req_len;
7232 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7233 (uintptr_t)resp_ptr);
7234 ireq_64bit.resp_len = req.resp_len;
7235 cmd_buf = (void *)&ireq_64bit;
7236 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7237 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7238 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7239 dmac_flush_range((void *)table,
7240 (void *)table + SGLISTINFO_TABLE_SIZE);
7241 }
7242 reqd_len_sb_in = req.req_len + req.resp_len;
7243 if (qseecom.whitelist_support == true)
7244 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7245 else
7246 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7247
7248 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7249 data->client.sb_virt,
7250 reqd_len_sb_in,
7251 ION_IOC_CLEAN_INV_CACHES);
7252 if (ret) {
7253 pr_err("cache operation failed %d\n", ret);
7254 return ret;
7255 }
7256
7257 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7258
7259 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7260 cmd_buf, cmd_len,
7261 &resp, sizeof(resp));
7262 if (ret) {
7263 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7264 ret, data->client.app_id);
7265 return ret;
7266 }
7267
7268 if (qseecom.qsee_reentrancy_support) {
7269 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7270 } else {
7271 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7272 ret = __qseecom_process_incomplete_cmd(data, &resp);
7273 if (ret) {
7274 pr_err("process_incomplete_cmd failed err: %d\n",
7275 ret);
7276 return ret;
7277 }
7278 } else {
7279 if (resp.result != QSEOS_RESULT_SUCCESS) {
7280 pr_err("Response result %d not supported\n",
7281 resp.result);
7282 ret = -EINVAL;
7283 }
7284 }
7285 }
7286 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7287 if (ret)
7288 return ret;
7289
7290 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7291 data->client.sb_virt, data->client.sb_length,
7292 ION_IOC_INV_CACHES);
7293 if (ret) {
7294 pr_err("cache operation failed %d\n", ret);
7295 return ret;
7296 }
7297 return 0;
7298}
7299
7300static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7301 void __user *argp)
7302{
7303 struct qseecom_qteec_modfd_req req;
7304 int ret = 0;
7305
7306 ret = copy_from_user(&req, argp,
7307 sizeof(struct qseecom_qteec_modfd_req));
7308 if (ret) {
7309 pr_err("copy_from_user failed\n");
7310 return ret;
7311 }
7312 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7313 QSEOS_TEE_REQUEST_CANCELLATION);
7314
7315 return ret;
7316}
7317
7318static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7319{
7320 if (data->sglist_cnt) {
7321 memset(data->sglistinfo_ptr, 0,
7322 SGLISTINFO_TABLE_SIZE);
7323 data->sglist_cnt = 0;
7324 }
7325}
7326
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307327static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007328 unsigned int cmd, unsigned long arg)
7329{
7330 int ret = 0;
7331 struct qseecom_dev_handle *data = file->private_data;
7332 void __user *argp = (void __user *) arg;
7333 bool perf_enabled = false;
7334
7335 if (!data) {
7336 pr_err("Invalid/uninitialized device handle\n");
7337 return -EINVAL;
7338 }
7339
7340 if (data->abort) {
7341 pr_err("Aborting qseecom driver\n");
7342 return -ENODEV;
7343 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007344 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7345 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7346 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7347 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007348 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07007349 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007350
7351 switch (cmd) {
7352 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7353 if (data->type != QSEECOM_GENERIC) {
7354 pr_err("reg lstnr req: invalid handle (%d)\n",
7355 data->type);
7356 ret = -EINVAL;
7357 break;
7358 }
7359 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007360 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007361 atomic_inc(&data->ioctl_count);
7362 data->type = QSEECOM_LISTENER_SERVICE;
7363 ret = qseecom_register_listener(data, argp);
7364 atomic_dec(&data->ioctl_count);
7365 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007366 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007367 if (ret)
7368 pr_err("failed qseecom_register_listener: %d\n", ret);
7369 break;
7370 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307371 case QSEECOM_IOCTL_SET_ICE_INFO: {
7372 struct qseecom_ice_data_t ice_data;
7373
7374 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7375 if (ret) {
7376 pr_err("copy_from_user failed\n");
7377 return -EFAULT;
7378 }
7379 qcom_ice_set_fde_flag(ice_data.flag);
7380 break;
7381 }
7382
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007383 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7384 if ((data->listener.id == 0) ||
7385 (data->type != QSEECOM_LISTENER_SERVICE)) {
7386 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7387 data->type, data->listener.id);
7388 ret = -EINVAL;
7389 break;
7390 }
7391 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007392 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007393 atomic_inc(&data->ioctl_count);
7394 ret = qseecom_unregister_listener(data);
7395 atomic_dec(&data->ioctl_count);
7396 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007397 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007398 if (ret)
7399 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7400 break;
7401 }
7402 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7403 if ((data->client.app_id == 0) ||
7404 (data->type != QSEECOM_CLIENT_APP)) {
7405 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7406 data->type, data->client.app_id);
7407 ret = -EINVAL;
7408 break;
7409 }
7410 /* Only one client allowed here at a time */
7411 mutex_lock(&app_access_lock);
7412 if (qseecom.support_bus_scaling) {
7413 /* register bus bw in case the client doesn't do it */
7414 if (!data->mode) {
7415 mutex_lock(&qsee_bw_mutex);
7416 __qseecom_register_bus_bandwidth_needs(
7417 data, HIGH);
7418 mutex_unlock(&qsee_bw_mutex);
7419 }
7420 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7421 if (ret) {
7422 pr_err("Failed to set bw.\n");
7423 ret = -EINVAL;
7424 mutex_unlock(&app_access_lock);
7425 break;
7426 }
7427 }
7428 /*
7429 * On targets where crypto clock is handled by HLOS,
7430 * if clk_access_cnt is zero and perf_enabled is false,
7431 * then the crypto clock was not enabled before sending cmd to
7432 * tz, qseecom will enable the clock to avoid service failure.
7433 */
7434 if (!qseecom.no_clock_support &&
7435 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7436 pr_debug("ce clock is not enabled!\n");
7437 ret = qseecom_perf_enable(data);
7438 if (ret) {
7439 pr_err("Failed to vote for clock with err %d\n",
7440 ret);
7441 mutex_unlock(&app_access_lock);
7442 ret = -EINVAL;
7443 break;
7444 }
7445 perf_enabled = true;
7446 }
7447 atomic_inc(&data->ioctl_count);
7448 ret = qseecom_send_cmd(data, argp);
7449 if (qseecom.support_bus_scaling)
7450 __qseecom_add_bw_scale_down_timer(
7451 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7452 if (perf_enabled) {
7453 qsee_disable_clock_vote(data, CLK_DFAB);
7454 qsee_disable_clock_vote(data, CLK_SFPB);
7455 }
7456 atomic_dec(&data->ioctl_count);
7457 wake_up_all(&data->abort_wq);
7458 mutex_unlock(&app_access_lock);
7459 if (ret)
7460 pr_err("failed qseecom_send_cmd: %d\n", ret);
7461 break;
7462 }
7463 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7464 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7465 if ((data->client.app_id == 0) ||
7466 (data->type != QSEECOM_CLIENT_APP)) {
7467 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7468 data->type, data->client.app_id);
7469 ret = -EINVAL;
7470 break;
7471 }
7472 /* Only one client allowed here at a time */
7473 mutex_lock(&app_access_lock);
7474 if (qseecom.support_bus_scaling) {
7475 if (!data->mode) {
7476 mutex_lock(&qsee_bw_mutex);
7477 __qseecom_register_bus_bandwidth_needs(
7478 data, HIGH);
7479 mutex_unlock(&qsee_bw_mutex);
7480 }
7481 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7482 if (ret) {
7483 pr_err("Failed to set bw.\n");
7484 mutex_unlock(&app_access_lock);
7485 ret = -EINVAL;
7486 break;
7487 }
7488 }
7489 /*
7490 * On targets where crypto clock is handled by HLOS,
7491 * if clk_access_cnt is zero and perf_enabled is false,
7492 * then the crypto clock was not enabled before sending cmd to
7493 * tz, qseecom will enable the clock to avoid service failure.
7494 */
7495 if (!qseecom.no_clock_support &&
7496 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7497 pr_debug("ce clock is not enabled!\n");
7498 ret = qseecom_perf_enable(data);
7499 if (ret) {
7500 pr_err("Failed to vote for clock with err %d\n",
7501 ret);
7502 mutex_unlock(&app_access_lock);
7503 ret = -EINVAL;
7504 break;
7505 }
7506 perf_enabled = true;
7507 }
7508 atomic_inc(&data->ioctl_count);
7509 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7510 ret = qseecom_send_modfd_cmd(data, argp);
7511 else
7512 ret = qseecom_send_modfd_cmd_64(data, argp);
7513 if (qseecom.support_bus_scaling)
7514 __qseecom_add_bw_scale_down_timer(
7515 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7516 if (perf_enabled) {
7517 qsee_disable_clock_vote(data, CLK_DFAB);
7518 qsee_disable_clock_vote(data, CLK_SFPB);
7519 }
7520 atomic_dec(&data->ioctl_count);
7521 wake_up_all(&data->abort_wq);
7522 mutex_unlock(&app_access_lock);
7523 if (ret)
7524 pr_err("failed qseecom_send_cmd: %d\n", ret);
7525 __qseecom_clean_data_sglistinfo(data);
7526 break;
7527 }
7528 case QSEECOM_IOCTL_RECEIVE_REQ: {
7529 if ((data->listener.id == 0) ||
7530 (data->type != QSEECOM_LISTENER_SERVICE)) {
7531 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7532 data->type, data->listener.id);
7533 ret = -EINVAL;
7534 break;
7535 }
7536 atomic_inc(&data->ioctl_count);
7537 ret = qseecom_receive_req(data);
7538 atomic_dec(&data->ioctl_count);
7539 wake_up_all(&data->abort_wq);
7540 if (ret && (ret != -ERESTARTSYS))
7541 pr_err("failed qseecom_receive_req: %d\n", ret);
7542 break;
7543 }
7544 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7545 if ((data->listener.id == 0) ||
7546 (data->type != QSEECOM_LISTENER_SERVICE)) {
7547 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7548 data->type, data->listener.id);
7549 ret = -EINVAL;
7550 break;
7551 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007552 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007553 atomic_inc(&data->ioctl_count);
7554 if (!qseecom.qsee_reentrancy_support)
7555 ret = qseecom_send_resp();
7556 else
7557 ret = qseecom_reentrancy_send_resp(data);
7558 atomic_dec(&data->ioctl_count);
7559 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007560 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007561 if (ret)
7562 pr_err("failed qseecom_send_resp: %d\n", ret);
7563 break;
7564 }
7565 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7566 if ((data->type != QSEECOM_CLIENT_APP) &&
7567 (data->type != QSEECOM_GENERIC) &&
7568 (data->type != QSEECOM_SECURE_SERVICE)) {
7569 pr_err("set mem param req: invalid handle (%d)\n",
7570 data->type);
7571 ret = -EINVAL;
7572 break;
7573 }
7574 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7575 mutex_lock(&app_access_lock);
7576 atomic_inc(&data->ioctl_count);
7577 ret = qseecom_set_client_mem_param(data, argp);
7578 atomic_dec(&data->ioctl_count);
7579 mutex_unlock(&app_access_lock);
7580 if (ret)
7581 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7582 ret);
7583 break;
7584 }
7585 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7586 if ((data->type != QSEECOM_GENERIC) &&
7587 (data->type != QSEECOM_CLIENT_APP)) {
7588 pr_err("load app req: invalid handle (%d)\n",
7589 data->type);
7590 ret = -EINVAL;
7591 break;
7592 }
7593 data->type = QSEECOM_CLIENT_APP;
7594 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7595 mutex_lock(&app_access_lock);
7596 atomic_inc(&data->ioctl_count);
7597 ret = qseecom_load_app(data, argp);
7598 atomic_dec(&data->ioctl_count);
7599 mutex_unlock(&app_access_lock);
7600 if (ret)
7601 pr_err("failed load_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007602 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007603 break;
7604 }
7605 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7606 if ((data->client.app_id == 0) ||
7607 (data->type != QSEECOM_CLIENT_APP)) {
7608 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7609 data->type, data->client.app_id);
7610 ret = -EINVAL;
7611 break;
7612 }
7613 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7614 mutex_lock(&app_access_lock);
7615 atomic_inc(&data->ioctl_count);
7616 ret = qseecom_unload_app(data, false);
7617 atomic_dec(&data->ioctl_count);
7618 mutex_unlock(&app_access_lock);
7619 if (ret)
7620 pr_err("failed unload_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007621 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007622 break;
7623 }
7624 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7625 atomic_inc(&data->ioctl_count);
7626 ret = qseecom_get_qseos_version(data, argp);
7627 if (ret)
7628 pr_err("qseecom_get_qseos_version: %d\n", ret);
7629 atomic_dec(&data->ioctl_count);
7630 break;
7631 }
7632 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7633 if ((data->type != QSEECOM_GENERIC) &&
7634 (data->type != QSEECOM_CLIENT_APP)) {
7635 pr_err("perf enable req: invalid handle (%d)\n",
7636 data->type);
7637 ret = -EINVAL;
7638 break;
7639 }
7640 if ((data->type == QSEECOM_CLIENT_APP) &&
7641 (data->client.app_id == 0)) {
7642 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7643 data->type, data->client.app_id);
7644 ret = -EINVAL;
7645 break;
7646 }
7647 atomic_inc(&data->ioctl_count);
7648 if (qseecom.support_bus_scaling) {
7649 mutex_lock(&qsee_bw_mutex);
7650 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7651 mutex_unlock(&qsee_bw_mutex);
7652 } else {
7653 ret = qseecom_perf_enable(data);
7654 if (ret)
7655 pr_err("Fail to vote for clocks %d\n", ret);
7656 }
7657 atomic_dec(&data->ioctl_count);
7658 break;
7659 }
7660 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7661 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7662 (data->type != QSEECOM_CLIENT_APP)) {
7663 pr_err("perf disable req: invalid handle (%d)\n",
7664 data->type);
7665 ret = -EINVAL;
7666 break;
7667 }
7668 if ((data->type == QSEECOM_CLIENT_APP) &&
7669 (data->client.app_id == 0)) {
7670 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7671 data->type, data->client.app_id);
7672 ret = -EINVAL;
7673 break;
7674 }
7675 atomic_inc(&data->ioctl_count);
7676 if (!qseecom.support_bus_scaling) {
7677 qsee_disable_clock_vote(data, CLK_DFAB);
7678 qsee_disable_clock_vote(data, CLK_SFPB);
7679 } else {
7680 mutex_lock(&qsee_bw_mutex);
7681 qseecom_unregister_bus_bandwidth_needs(data);
7682 mutex_unlock(&qsee_bw_mutex);
7683 }
7684 atomic_dec(&data->ioctl_count);
7685 break;
7686 }
7687
7688 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7689 /* If crypto clock is not handled by HLOS, return directly. */
7690 if (qseecom.no_clock_support) {
7691 pr_debug("crypto clock is not handled by HLOS\n");
7692 break;
7693 }
7694 if ((data->client.app_id == 0) ||
7695 (data->type != QSEECOM_CLIENT_APP)) {
7696 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7697 data->type, data->client.app_id);
7698 ret = -EINVAL;
7699 break;
7700 }
7701 atomic_inc(&data->ioctl_count);
7702 ret = qseecom_scale_bus_bandwidth(data, argp);
7703 atomic_dec(&data->ioctl_count);
7704 break;
7705 }
7706 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7707 if (data->type != QSEECOM_GENERIC) {
7708 pr_err("load ext elf req: invalid client handle (%d)\n",
7709 data->type);
7710 ret = -EINVAL;
7711 break;
7712 }
7713 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7714 data->released = true;
7715 mutex_lock(&app_access_lock);
7716 atomic_inc(&data->ioctl_count);
7717 ret = qseecom_load_external_elf(data, argp);
7718 atomic_dec(&data->ioctl_count);
7719 mutex_unlock(&app_access_lock);
7720 if (ret)
7721 pr_err("failed load_external_elf request: %d\n", ret);
7722 break;
7723 }
7724 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7725 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7726 pr_err("unload ext elf req: invalid handle (%d)\n",
7727 data->type);
7728 ret = -EINVAL;
7729 break;
7730 }
7731 data->released = true;
7732 mutex_lock(&app_access_lock);
7733 atomic_inc(&data->ioctl_count);
7734 ret = qseecom_unload_external_elf(data);
7735 atomic_dec(&data->ioctl_count);
7736 mutex_unlock(&app_access_lock);
7737 if (ret)
7738 pr_err("failed unload_app request: %d\n", ret);
7739 break;
7740 }
7741 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
Zhen Kong677362c2019-08-30 10:50:25 -07007742 if ((data->type != QSEECOM_GENERIC) &&
7743 (data->type != QSEECOM_CLIENT_APP)) {
7744 pr_err("app loaded query req: invalid handle (%d)\n",
7745 data->type);
7746 ret = -EINVAL;
7747 break;
7748 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007749 data->type = QSEECOM_CLIENT_APP;
7750 mutex_lock(&app_access_lock);
7751 atomic_inc(&data->ioctl_count);
7752 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7753 ret = qseecom_query_app_loaded(data, argp);
7754 atomic_dec(&data->ioctl_count);
7755 mutex_unlock(&app_access_lock);
7756 break;
7757 }
7758 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7759 if (data->type != QSEECOM_GENERIC) {
7760 pr_err("send cmd svc req: invalid handle (%d)\n",
7761 data->type);
7762 ret = -EINVAL;
7763 break;
7764 }
7765 data->type = QSEECOM_SECURE_SERVICE;
7766 if (qseecom.qsee_version < QSEE_VERSION_03) {
7767 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7768 qseecom.qsee_version);
7769 return -EINVAL;
7770 }
7771 mutex_lock(&app_access_lock);
7772 atomic_inc(&data->ioctl_count);
7773 ret = qseecom_send_service_cmd(data, argp);
7774 atomic_dec(&data->ioctl_count);
7775 mutex_unlock(&app_access_lock);
7776 break;
7777 }
7778 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7779 if (!(qseecom.support_pfe || qseecom.support_fde))
7780 pr_err("Features requiring key init not supported\n");
7781 if (data->type != QSEECOM_GENERIC) {
7782 pr_err("create key req: invalid handle (%d)\n",
7783 data->type);
7784 ret = -EINVAL;
7785 break;
7786 }
7787 if (qseecom.qsee_version < QSEE_VERSION_05) {
7788 pr_err("Create Key feature unsupported: qsee ver %u\n",
7789 qseecom.qsee_version);
7790 return -EINVAL;
7791 }
7792 data->released = true;
7793 mutex_lock(&app_access_lock);
7794 atomic_inc(&data->ioctl_count);
7795 ret = qseecom_create_key(data, argp);
7796 if (ret)
7797 pr_err("failed to create encryption key: %d\n", ret);
7798
7799 atomic_dec(&data->ioctl_count);
7800 mutex_unlock(&app_access_lock);
7801 break;
7802 }
7803 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7804 if (!(qseecom.support_pfe || qseecom.support_fde))
7805 pr_err("Features requiring key init not supported\n");
7806 if (data->type != QSEECOM_GENERIC) {
7807 pr_err("wipe key req: invalid handle (%d)\n",
7808 data->type);
7809 ret = -EINVAL;
7810 break;
7811 }
7812 if (qseecom.qsee_version < QSEE_VERSION_05) {
7813 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7814 qseecom.qsee_version);
7815 return -EINVAL;
7816 }
7817 data->released = true;
7818 mutex_lock(&app_access_lock);
7819 atomic_inc(&data->ioctl_count);
7820 ret = qseecom_wipe_key(data, argp);
7821 if (ret)
7822 pr_err("failed to wipe encryption key: %d\n", ret);
7823 atomic_dec(&data->ioctl_count);
7824 mutex_unlock(&app_access_lock);
7825 break;
7826 }
7827 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7828 if (!(qseecom.support_pfe || qseecom.support_fde))
7829 pr_err("Features requiring key init not supported\n");
7830 if (data->type != QSEECOM_GENERIC) {
7831 pr_err("update key req: invalid handle (%d)\n",
7832 data->type);
7833 ret = -EINVAL;
7834 break;
7835 }
7836 if (qseecom.qsee_version < QSEE_VERSION_05) {
7837 pr_err("Update Key feature unsupported in qsee ver %u\n",
7838 qseecom.qsee_version);
7839 return -EINVAL;
7840 }
7841 data->released = true;
7842 mutex_lock(&app_access_lock);
7843 atomic_inc(&data->ioctl_count);
7844 ret = qseecom_update_key_user_info(data, argp);
7845 if (ret)
7846 pr_err("failed to update key user info: %d\n", ret);
7847 atomic_dec(&data->ioctl_count);
7848 mutex_unlock(&app_access_lock);
7849 break;
7850 }
7851 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7852 if (data->type != QSEECOM_GENERIC) {
7853 pr_err("save part hash req: invalid handle (%d)\n",
7854 data->type);
7855 ret = -EINVAL;
7856 break;
7857 }
7858 data->released = true;
7859 mutex_lock(&app_access_lock);
7860 atomic_inc(&data->ioctl_count);
7861 ret = qseecom_save_partition_hash(argp);
7862 atomic_dec(&data->ioctl_count);
7863 mutex_unlock(&app_access_lock);
7864 break;
7865 }
7866 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7867 if (data->type != QSEECOM_GENERIC) {
7868 pr_err("ES activated req: invalid handle (%d)\n",
7869 data->type);
7870 ret = -EINVAL;
7871 break;
7872 }
7873 data->released = true;
7874 mutex_lock(&app_access_lock);
7875 atomic_inc(&data->ioctl_count);
7876 ret = qseecom_is_es_activated(argp);
7877 atomic_dec(&data->ioctl_count);
7878 mutex_unlock(&app_access_lock);
7879 break;
7880 }
7881 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7882 if (data->type != QSEECOM_GENERIC) {
7883 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7884 data->type);
7885 ret = -EINVAL;
7886 break;
7887 }
7888 data->released = true;
7889 mutex_lock(&app_access_lock);
7890 atomic_inc(&data->ioctl_count);
7891 ret = qseecom_mdtp_cipher_dip(argp);
7892 atomic_dec(&data->ioctl_count);
7893 mutex_unlock(&app_access_lock);
7894 break;
7895 }
7896 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7897 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7898 if ((data->listener.id == 0) ||
7899 (data->type != QSEECOM_LISTENER_SERVICE)) {
7900 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7901 data->type, data->listener.id);
7902 ret = -EINVAL;
7903 break;
7904 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007905 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007906 atomic_inc(&data->ioctl_count);
7907 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7908 ret = qseecom_send_modfd_resp(data, argp);
7909 else
7910 ret = qseecom_send_modfd_resp_64(data, argp);
7911 atomic_dec(&data->ioctl_count);
7912 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007913 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007914 if (ret)
7915 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7916 __qseecom_clean_data_sglistinfo(data);
7917 break;
7918 }
7919 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7920 if ((data->client.app_id == 0) ||
7921 (data->type != QSEECOM_CLIENT_APP)) {
7922 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7923 data->type, data->client.app_id);
7924 ret = -EINVAL;
7925 break;
7926 }
7927 if (qseecom.qsee_version < QSEE_VERSION_40) {
7928 pr_err("GP feature unsupported: qsee ver %u\n",
7929 qseecom.qsee_version);
7930 return -EINVAL;
7931 }
7932 /* Only one client allowed here at a time */
7933 mutex_lock(&app_access_lock);
7934 atomic_inc(&data->ioctl_count);
7935 ret = qseecom_qteec_open_session(data, argp);
7936 atomic_dec(&data->ioctl_count);
7937 wake_up_all(&data->abort_wq);
7938 mutex_unlock(&app_access_lock);
7939 if (ret)
7940 pr_err("failed open_session_cmd: %d\n", ret);
7941 __qseecom_clean_data_sglistinfo(data);
7942 break;
7943 }
7944 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7945 if ((data->client.app_id == 0) ||
7946 (data->type != QSEECOM_CLIENT_APP)) {
7947 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7948 data->type, data->client.app_id);
7949 ret = -EINVAL;
7950 break;
7951 }
7952 if (qseecom.qsee_version < QSEE_VERSION_40) {
7953 pr_err("GP feature unsupported: qsee ver %u\n",
7954 qseecom.qsee_version);
7955 return -EINVAL;
7956 }
7957 /* Only one client allowed here at a time */
7958 mutex_lock(&app_access_lock);
7959 atomic_inc(&data->ioctl_count);
7960 ret = qseecom_qteec_close_session(data, argp);
7961 atomic_dec(&data->ioctl_count);
7962 wake_up_all(&data->abort_wq);
7963 mutex_unlock(&app_access_lock);
7964 if (ret)
7965 pr_err("failed close_session_cmd: %d\n", ret);
7966 break;
7967 }
7968 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7969 if ((data->client.app_id == 0) ||
7970 (data->type != QSEECOM_CLIENT_APP)) {
7971 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7972 data->type, data->client.app_id);
7973 ret = -EINVAL;
7974 break;
7975 }
7976 if (qseecom.qsee_version < QSEE_VERSION_40) {
7977 pr_err("GP feature unsupported: qsee ver %u\n",
7978 qseecom.qsee_version);
7979 return -EINVAL;
7980 }
7981 /* Only one client allowed here at a time */
7982 mutex_lock(&app_access_lock);
7983 atomic_inc(&data->ioctl_count);
7984 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7985 atomic_dec(&data->ioctl_count);
7986 wake_up_all(&data->abort_wq);
7987 mutex_unlock(&app_access_lock);
7988 if (ret)
7989 pr_err("failed Invoke cmd: %d\n", ret);
7990 __qseecom_clean_data_sglistinfo(data);
7991 break;
7992 }
7993 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7994 if ((data->client.app_id == 0) ||
7995 (data->type != QSEECOM_CLIENT_APP)) {
7996 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7997 data->type, data->client.app_id);
7998 ret = -EINVAL;
7999 break;
8000 }
8001 if (qseecom.qsee_version < QSEE_VERSION_40) {
8002 pr_err("GP feature unsupported: qsee ver %u\n",
8003 qseecom.qsee_version);
8004 return -EINVAL;
8005 }
8006 /* Only one client allowed here at a time */
8007 mutex_lock(&app_access_lock);
8008 atomic_inc(&data->ioctl_count);
8009 ret = qseecom_qteec_request_cancellation(data, argp);
8010 atomic_dec(&data->ioctl_count);
8011 wake_up_all(&data->abort_wq);
8012 mutex_unlock(&app_access_lock);
8013 if (ret)
8014 pr_err("failed request_cancellation: %d\n", ret);
8015 break;
8016 }
8017 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
8018 atomic_inc(&data->ioctl_count);
8019 ret = qseecom_get_ce_info(data, argp);
8020 if (ret)
8021 pr_err("failed get fde ce pipe info: %d\n", ret);
8022 atomic_dec(&data->ioctl_count);
8023 break;
8024 }
8025 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
8026 atomic_inc(&data->ioctl_count);
8027 ret = qseecom_free_ce_info(data, argp);
8028 if (ret)
8029 pr_err("failed get fde ce pipe info: %d\n", ret);
8030 atomic_dec(&data->ioctl_count);
8031 break;
8032 }
8033 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
8034 atomic_inc(&data->ioctl_count);
8035 ret = qseecom_query_ce_info(data, argp);
8036 if (ret)
8037 pr_err("failed get fde ce pipe info: %d\n", ret);
8038 atomic_dec(&data->ioctl_count);
8039 break;
8040 }
8041 default:
8042 pr_err("Invalid IOCTL: 0x%x\n", cmd);
8043 return -EINVAL;
8044 }
8045 return ret;
8046}
8047
8048static int qseecom_open(struct inode *inode, struct file *file)
8049{
8050 int ret = 0;
8051 struct qseecom_dev_handle *data;
8052
8053 data = kzalloc(sizeof(*data), GFP_KERNEL);
8054 if (!data)
8055 return -ENOMEM;
8056 file->private_data = data;
8057 data->abort = 0;
8058 data->type = QSEECOM_GENERIC;
8059 data->released = false;
8060 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
8061 data->mode = INACTIVE;
8062 init_waitqueue_head(&data->abort_wq);
8063 atomic_set(&data->ioctl_count, 0);
8064 return ret;
8065}
8066
Zhen Konge5e6c942019-10-01 15:45:25 -07008067static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
8068{
8069 if (qseecom.no_clock_support)
8070 return;
8071 if (qseecom.support_bus_scaling) {
8072 mutex_lock(&qsee_bw_mutex);
8073 if (data->mode != INACTIVE) {
8074 qseecom_unregister_bus_bandwidth_needs(data);
8075 if (qseecom.cumulative_mode == INACTIVE)
8076 __qseecom_set_msm_bus_request(INACTIVE);
8077 }
8078 mutex_unlock(&qsee_bw_mutex);
8079 } else {
8080 if (data->fast_load_enabled)
8081 qsee_disable_clock_vote(data, CLK_SFPB);
8082 if (data->perf_enabled)
8083 qsee_disable_clock_vote(data, CLK_DFAB);
8084 }
8085}
8086
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008087static int qseecom_release(struct inode *inode, struct file *file)
8088{
8089 struct qseecom_dev_handle *data = file->private_data;
8090 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008091 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008092
Zhen Konge5e6c942019-10-01 15:45:25 -07008093 __qseecom_release_disable_clk(data);
8094 if (!data->released) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008095 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
8096 data->type, data->mode, data);
8097 switch (data->type) {
8098 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08008099 pr_debug("release lsnr svc %d\n", data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008100 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008101 ret = qseecom_unregister_listener(data);
Zhen Konge6ac4132019-09-20 13:49:41 -07008102 if (!ret)
8103 free_private_data = false;
Zhen Kong87dcf0e2019-01-04 12:34:50 -08008104 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008105 mutex_unlock(&listener_access_lock);
Zhen Konge5e6c942019-10-01 15:45:25 -07008106 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008107 break;
8108 case QSEECOM_CLIENT_APP:
Zhen Kong03b2eae2019-09-17 16:58:46 -07008109 pr_debug("release app %d (%s)\n",
8110 data->client.app_id, data->client.app_name);
8111 if (data->client.app_id) {
8112 free_private_data = false;
Zhen Konge5e6c942019-10-01 15:45:25 -07008113 mutex_lock(&unload_app_pending_list_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008114 ret = qseecom_prepare_unload_app(data);
Zhen Konge5e6c942019-10-01 15:45:25 -07008115 mutex_unlock(&unload_app_pending_list_lock);
8116 __wakeup_unload_app_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07008117 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008118 break;
8119 case QSEECOM_SECURE_SERVICE:
8120 case QSEECOM_GENERIC:
8121 ret = qseecom_unmap_ion_allocated_memory(data);
8122 if (ret)
8123 pr_err("Ion Unmap failed\n");
8124 break;
8125 case QSEECOM_UNAVAILABLE_CLIENT_APP:
8126 break;
8127 default:
8128 pr_err("Unsupported clnt_handle_type %d",
8129 data->type);
8130 break;
8131 }
8132 }
8133
Zhen Kongbcdeda22018-11-16 13:50:51 -08008134 if (free_private_data)
8135 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008136 return ret;
8137}
8138
8139#ifdef CONFIG_COMPAT
8140#include "compat_qseecom.c"
8141#else
8142#define compat_qseecom_ioctl NULL
8143#endif
8144
8145static const struct file_operations qseecom_fops = {
8146 .owner = THIS_MODULE,
8147 .unlocked_ioctl = qseecom_ioctl,
8148 .compat_ioctl = compat_qseecom_ioctl,
8149 .open = qseecom_open,
8150 .release = qseecom_release
8151};
8152
8153static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
8154{
8155 int rc = 0;
8156 struct device *pdev;
8157 struct qseecom_clk *qclk;
8158 char *core_clk_src = NULL;
8159 char *core_clk = NULL;
8160 char *iface_clk = NULL;
8161 char *bus_clk = NULL;
8162
8163 switch (ce) {
8164 case CLK_QSEE: {
8165 core_clk_src = "core_clk_src";
8166 core_clk = "core_clk";
8167 iface_clk = "iface_clk";
8168 bus_clk = "bus_clk";
8169 qclk = &qseecom.qsee;
8170 qclk->instance = CLK_QSEE;
8171 break;
8172 };
8173 case CLK_CE_DRV: {
8174 core_clk_src = "ce_drv_core_clk_src";
8175 core_clk = "ce_drv_core_clk";
8176 iface_clk = "ce_drv_iface_clk";
8177 bus_clk = "ce_drv_bus_clk";
8178 qclk = &qseecom.ce_drv;
8179 qclk->instance = CLK_CE_DRV;
8180 break;
8181 };
8182 default:
8183 pr_err("Invalid ce hw instance: %d!\n", ce);
8184 return -EIO;
8185 }
8186
8187 if (qseecom.no_clock_support) {
8188 qclk->ce_core_clk = NULL;
8189 qclk->ce_clk = NULL;
8190 qclk->ce_bus_clk = NULL;
8191 qclk->ce_core_src_clk = NULL;
8192 return 0;
8193 }
8194
8195 pdev = qseecom.pdev;
8196
8197 /* Get CE3 src core clk. */
8198 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
8199 if (!IS_ERR(qclk->ce_core_src_clk)) {
8200 rc = clk_set_rate(qclk->ce_core_src_clk,
8201 qseecom.ce_opp_freq_hz);
8202 if (rc) {
8203 clk_put(qclk->ce_core_src_clk);
8204 qclk->ce_core_src_clk = NULL;
8205 pr_err("Unable to set the core src clk @%uMhz.\n",
8206 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
8207 return -EIO;
8208 }
8209 } else {
8210 pr_warn("Unable to get CE core src clk, set to NULL\n");
8211 qclk->ce_core_src_clk = NULL;
8212 }
8213
8214 /* Get CE core clk */
8215 qclk->ce_core_clk = clk_get(pdev, core_clk);
8216 if (IS_ERR(qclk->ce_core_clk)) {
8217 rc = PTR_ERR(qclk->ce_core_clk);
8218 pr_err("Unable to get CE core clk\n");
8219 if (qclk->ce_core_src_clk != NULL)
8220 clk_put(qclk->ce_core_src_clk);
8221 return -EIO;
8222 }
8223
8224 /* Get CE Interface clk */
8225 qclk->ce_clk = clk_get(pdev, iface_clk);
8226 if (IS_ERR(qclk->ce_clk)) {
8227 rc = PTR_ERR(qclk->ce_clk);
8228 pr_err("Unable to get CE interface clk\n");
8229 if (qclk->ce_core_src_clk != NULL)
8230 clk_put(qclk->ce_core_src_clk);
8231 clk_put(qclk->ce_core_clk);
8232 return -EIO;
8233 }
8234
8235 /* Get CE AXI clk */
8236 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8237 if (IS_ERR(qclk->ce_bus_clk)) {
8238 rc = PTR_ERR(qclk->ce_bus_clk);
8239 pr_err("Unable to get CE BUS interface clk\n");
8240 if (qclk->ce_core_src_clk != NULL)
8241 clk_put(qclk->ce_core_src_clk);
8242 clk_put(qclk->ce_core_clk);
8243 clk_put(qclk->ce_clk);
8244 return -EIO;
8245 }
8246
8247 return rc;
8248}
8249
8250static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8251{
8252 struct qseecom_clk *qclk;
8253
8254 if (ce == CLK_QSEE)
8255 qclk = &qseecom.qsee;
8256 else
8257 qclk = &qseecom.ce_drv;
8258
8259 if (qclk->ce_clk != NULL) {
8260 clk_put(qclk->ce_clk);
8261 qclk->ce_clk = NULL;
8262 }
8263 if (qclk->ce_core_clk != NULL) {
8264 clk_put(qclk->ce_core_clk);
8265 qclk->ce_core_clk = NULL;
8266 }
8267 if (qclk->ce_bus_clk != NULL) {
8268 clk_put(qclk->ce_bus_clk);
8269 qclk->ce_bus_clk = NULL;
8270 }
8271 if (qclk->ce_core_src_clk != NULL) {
8272 clk_put(qclk->ce_core_src_clk);
8273 qclk->ce_core_src_clk = NULL;
8274 }
8275 qclk->instance = CLK_INVALID;
8276}
8277
8278static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8279{
8280 int rc = 0;
8281 uint32_t hlos_num_ce_hw_instances;
8282 uint32_t disk_encrypt_pipe;
8283 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008284 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008285 int i;
8286 const int *tbl;
8287 int size;
8288 int entry;
8289 struct qseecom_crypto_info *pfde_tbl = NULL;
8290 struct qseecom_crypto_info *p;
8291 int tbl_size;
8292 int j;
8293 bool old_db = true;
8294 struct qseecom_ce_info_use *pce_info_use;
8295 uint32_t *unit_tbl = NULL;
8296 int total_units = 0;
8297 struct qseecom_ce_pipe_entry *pce_entry;
8298
8299 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8300 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8301
8302 if (of_property_read_u32((&pdev->dev)->of_node,
8303 "qcom,qsee-ce-hw-instance",
8304 &qseecom.ce_info.qsee_ce_hw_instance)) {
8305 pr_err("Fail to get qsee ce hw instance information.\n");
8306 rc = -EINVAL;
8307 goto out;
8308 } else {
8309 pr_debug("qsee-ce-hw-instance=0x%x\n",
8310 qseecom.ce_info.qsee_ce_hw_instance);
8311 }
8312
8313 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8314 "qcom,support-fde");
8315 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8316 "qcom,support-pfe");
8317
8318 if (!qseecom.support_pfe && !qseecom.support_fde) {
8319 pr_warn("Device does not support PFE/FDE");
8320 goto out;
8321 }
8322
8323 if (qseecom.support_fde)
8324 tbl = of_get_property((&pdev->dev)->of_node,
8325 "qcom,full-disk-encrypt-info", &size);
8326 else
8327 tbl = NULL;
8328 if (tbl) {
8329 old_db = false;
8330 if (size % sizeof(struct qseecom_crypto_info)) {
8331 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8332 size);
8333 rc = -EINVAL;
8334 goto out;
8335 }
8336 tbl_size = size / sizeof
8337 (struct qseecom_crypto_info);
8338
8339 pfde_tbl = kzalloc(size, GFP_KERNEL);
8340 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8341 total_units = 0;
8342
8343 if (!pfde_tbl || !unit_tbl) {
8344 pr_err("failed to alloc memory\n");
8345 rc = -ENOMEM;
8346 goto out;
8347 }
8348 if (of_property_read_u32_array((&pdev->dev)->of_node,
8349 "qcom,full-disk-encrypt-info",
8350 (u32 *)pfde_tbl, size/sizeof(u32))) {
8351 pr_err("failed to read full-disk-encrypt-info tbl\n");
8352 rc = -EINVAL;
8353 goto out;
8354 }
8355
8356 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8357 for (j = 0; j < total_units; j++) {
8358 if (p->unit_num == *(unit_tbl + j))
8359 break;
8360 }
8361 if (j == total_units) {
8362 *(unit_tbl + total_units) = p->unit_num;
8363 total_units++;
8364 }
8365 }
8366
8367 qseecom.ce_info.num_fde = total_units;
8368 pce_info_use = qseecom.ce_info.fde = kcalloc(
8369 total_units, sizeof(struct qseecom_ce_info_use),
8370 GFP_KERNEL);
8371 if (!pce_info_use) {
8372 pr_err("failed to alloc memory\n");
8373 rc = -ENOMEM;
8374 goto out;
8375 }
8376
8377 for (j = 0; j < total_units; j++, pce_info_use++) {
8378 pce_info_use->unit_num = *(unit_tbl + j);
8379 pce_info_use->alloc = false;
8380 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8381 pce_info_use->num_ce_pipe_entries = 0;
8382 pce_info_use->ce_pipe_entry = NULL;
8383 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8384 if (p->unit_num == pce_info_use->unit_num)
8385 pce_info_use->num_ce_pipe_entries++;
8386 }
8387
8388 entry = pce_info_use->num_ce_pipe_entries;
8389 pce_entry = pce_info_use->ce_pipe_entry =
8390 kcalloc(entry,
8391 sizeof(struct qseecom_ce_pipe_entry),
8392 GFP_KERNEL);
8393 if (pce_entry == NULL) {
8394 pr_err("failed to alloc memory\n");
8395 rc = -ENOMEM;
8396 goto out;
8397 }
8398
8399 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8400 if (p->unit_num == pce_info_use->unit_num) {
8401 pce_entry->ce_num = p->ce;
8402 pce_entry->ce_pipe_pair =
8403 p->pipe_pair;
8404 pce_entry->valid = true;
8405 pce_entry++;
8406 }
8407 }
8408 }
8409 kfree(unit_tbl);
8410 unit_tbl = NULL;
8411 kfree(pfde_tbl);
8412 pfde_tbl = NULL;
8413 }
8414
8415 if (qseecom.support_pfe)
8416 tbl = of_get_property((&pdev->dev)->of_node,
8417 "qcom,per-file-encrypt-info", &size);
8418 else
8419 tbl = NULL;
8420 if (tbl) {
8421 old_db = false;
8422 if (size % sizeof(struct qseecom_crypto_info)) {
8423 pr_err("per-file-encrypt-info tbl size(%d)\n",
8424 size);
8425 rc = -EINVAL;
8426 goto out;
8427 }
8428 tbl_size = size / sizeof
8429 (struct qseecom_crypto_info);
8430
8431 pfde_tbl = kzalloc(size, GFP_KERNEL);
8432 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8433 total_units = 0;
8434 if (!pfde_tbl || !unit_tbl) {
8435 pr_err("failed to alloc memory\n");
8436 rc = -ENOMEM;
8437 goto out;
8438 }
8439 if (of_property_read_u32_array((&pdev->dev)->of_node,
8440 "qcom,per-file-encrypt-info",
8441 (u32 *)pfde_tbl, size/sizeof(u32))) {
8442 pr_err("failed to read per-file-encrypt-info tbl\n");
8443 rc = -EINVAL;
8444 goto out;
8445 }
8446
8447 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8448 for (j = 0; j < total_units; j++) {
8449 if (p->unit_num == *(unit_tbl + j))
8450 break;
8451 }
8452 if (j == total_units) {
8453 *(unit_tbl + total_units) = p->unit_num;
8454 total_units++;
8455 }
8456 }
8457
8458 qseecom.ce_info.num_pfe = total_units;
8459 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8460 total_units, sizeof(struct qseecom_ce_info_use),
8461 GFP_KERNEL);
8462 if (!pce_info_use) {
8463 pr_err("failed to alloc memory\n");
8464 rc = -ENOMEM;
8465 goto out;
8466 }
8467
8468 for (j = 0; j < total_units; j++, pce_info_use++) {
8469 pce_info_use->unit_num = *(unit_tbl + j);
8470 pce_info_use->alloc = false;
8471 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8472 pce_info_use->num_ce_pipe_entries = 0;
8473 pce_info_use->ce_pipe_entry = NULL;
8474 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8475 if (p->unit_num == pce_info_use->unit_num)
8476 pce_info_use->num_ce_pipe_entries++;
8477 }
8478
8479 entry = pce_info_use->num_ce_pipe_entries;
8480 pce_entry = pce_info_use->ce_pipe_entry =
8481 kcalloc(entry,
8482 sizeof(struct qseecom_ce_pipe_entry),
8483 GFP_KERNEL);
8484 if (pce_entry == NULL) {
8485 pr_err("failed to alloc memory\n");
8486 rc = -ENOMEM;
8487 goto out;
8488 }
8489
8490 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8491 if (p->unit_num == pce_info_use->unit_num) {
8492 pce_entry->ce_num = p->ce;
8493 pce_entry->ce_pipe_pair =
8494 p->pipe_pair;
8495 pce_entry->valid = true;
8496 pce_entry++;
8497 }
8498 }
8499 }
8500 kfree(unit_tbl);
8501 unit_tbl = NULL;
8502 kfree(pfde_tbl);
8503 pfde_tbl = NULL;
8504 }
8505
8506 if (!old_db)
8507 goto out1;
8508
8509 if (of_property_read_bool((&pdev->dev)->of_node,
8510 "qcom,support-multiple-ce-hw-instance")) {
8511 if (of_property_read_u32((&pdev->dev)->of_node,
8512 "qcom,hlos-num-ce-hw-instances",
8513 &hlos_num_ce_hw_instances)) {
8514 pr_err("Fail: get hlos number of ce hw instance\n");
8515 rc = -EINVAL;
8516 goto out;
8517 }
8518 } else {
8519 hlos_num_ce_hw_instances = 1;
8520 }
8521
8522 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8523 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8524 MAX_CE_PIPE_PAIR_PER_UNIT);
8525 rc = -EINVAL;
8526 goto out;
8527 }
8528
8529 if (of_property_read_u32_array((&pdev->dev)->of_node,
8530 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8531 hlos_num_ce_hw_instances)) {
8532 pr_err("Fail: get hlos ce hw instance info\n");
8533 rc = -EINVAL;
8534 goto out;
8535 }
8536
8537 if (qseecom.support_fde) {
8538 pce_info_use = qseecom.ce_info.fde =
8539 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8540 if (!pce_info_use) {
8541 pr_err("failed to alloc memory\n");
8542 rc = -ENOMEM;
8543 goto out;
8544 }
8545 /* by default for old db */
8546 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8547 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8548 pce_info_use->alloc = false;
8549 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8550 pce_info_use->ce_pipe_entry = NULL;
8551 if (of_property_read_u32((&pdev->dev)->of_node,
8552 "qcom,disk-encrypt-pipe-pair",
8553 &disk_encrypt_pipe)) {
8554 pr_err("Fail to get FDE pipe information.\n");
8555 rc = -EINVAL;
8556 goto out;
8557 } else {
8558 pr_debug("disk-encrypt-pipe-pair=0x%x",
8559 disk_encrypt_pipe);
8560 }
8561 entry = pce_info_use->num_ce_pipe_entries =
8562 hlos_num_ce_hw_instances;
8563 pce_entry = pce_info_use->ce_pipe_entry =
8564 kcalloc(entry,
8565 sizeof(struct qseecom_ce_pipe_entry),
8566 GFP_KERNEL);
8567 if (pce_entry == NULL) {
8568 pr_err("failed to alloc memory\n");
8569 rc = -ENOMEM;
8570 goto out;
8571 }
8572 for (i = 0; i < entry; i++) {
8573 pce_entry->ce_num = hlos_ce_hw_instance[i];
8574 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8575 pce_entry->valid = 1;
8576 pce_entry++;
8577 }
8578 } else {
8579 pr_warn("Device does not support FDE");
8580 disk_encrypt_pipe = 0xff;
8581 }
8582 if (qseecom.support_pfe) {
8583 pce_info_use = qseecom.ce_info.pfe =
8584 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8585 if (!pce_info_use) {
8586 pr_err("failed to alloc memory\n");
8587 rc = -ENOMEM;
8588 goto out;
8589 }
8590 /* by default for old db */
8591 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8592 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8593 pce_info_use->alloc = false;
8594 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8595 pce_info_use->ce_pipe_entry = NULL;
8596
8597 if (of_property_read_u32((&pdev->dev)->of_node,
8598 "qcom,file-encrypt-pipe-pair",
8599 &file_encrypt_pipe)) {
8600 pr_err("Fail to get PFE pipe information.\n");
8601 rc = -EINVAL;
8602 goto out;
8603 } else {
8604 pr_debug("file-encrypt-pipe-pair=0x%x",
8605 file_encrypt_pipe);
8606 }
8607 entry = pce_info_use->num_ce_pipe_entries =
8608 hlos_num_ce_hw_instances;
8609 pce_entry = pce_info_use->ce_pipe_entry =
8610 kcalloc(entry,
8611 sizeof(struct qseecom_ce_pipe_entry),
8612 GFP_KERNEL);
8613 if (pce_entry == NULL) {
8614 pr_err("failed to alloc memory\n");
8615 rc = -ENOMEM;
8616 goto out;
8617 }
8618 for (i = 0; i < entry; i++) {
8619 pce_entry->ce_num = hlos_ce_hw_instance[i];
8620 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8621 pce_entry->valid = 1;
8622 pce_entry++;
8623 }
8624 } else {
8625 pr_warn("Device does not support PFE");
8626 file_encrypt_pipe = 0xff;
8627 }
8628
8629out1:
8630 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8631 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8632out:
8633 if (rc) {
8634 if (qseecom.ce_info.fde) {
8635 pce_info_use = qseecom.ce_info.fde;
8636 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8637 pce_entry = pce_info_use->ce_pipe_entry;
8638 kfree(pce_entry);
8639 pce_info_use++;
8640 }
8641 }
8642 kfree(qseecom.ce_info.fde);
8643 qseecom.ce_info.fde = NULL;
8644 if (qseecom.ce_info.pfe) {
8645 pce_info_use = qseecom.ce_info.pfe;
8646 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8647 pce_entry = pce_info_use->ce_pipe_entry;
8648 kfree(pce_entry);
8649 pce_info_use++;
8650 }
8651 }
8652 kfree(qseecom.ce_info.pfe);
8653 qseecom.ce_info.pfe = NULL;
8654 }
8655 kfree(unit_tbl);
8656 kfree(pfde_tbl);
8657 return rc;
8658}
8659
8660static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8661 void __user *argp)
8662{
8663 struct qseecom_ce_info_req req;
8664 struct qseecom_ce_info_req *pinfo = &req;
8665 int ret = 0;
8666 int i;
8667 unsigned int entries;
8668 struct qseecom_ce_info_use *pce_info_use, *p;
8669 int total = 0;
8670 bool found = false;
8671 struct qseecom_ce_pipe_entry *pce_entry;
8672
8673 ret = copy_from_user(pinfo, argp,
8674 sizeof(struct qseecom_ce_info_req));
8675 if (ret) {
8676 pr_err("copy_from_user failed\n");
8677 return ret;
8678 }
8679
8680 switch (pinfo->usage) {
8681 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8682 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8683 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8684 if (qseecom.support_fde) {
8685 p = qseecom.ce_info.fde;
8686 total = qseecom.ce_info.num_fde;
8687 } else {
8688 pr_err("system does not support fde\n");
8689 return -EINVAL;
8690 }
8691 break;
8692 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8693 if (qseecom.support_pfe) {
8694 p = qseecom.ce_info.pfe;
8695 total = qseecom.ce_info.num_pfe;
8696 } else {
8697 pr_err("system does not support pfe\n");
8698 return -EINVAL;
8699 }
8700 break;
8701 default:
8702 pr_err("unsupported usage %d\n", pinfo->usage);
8703 return -EINVAL;
8704 }
8705
8706 pce_info_use = NULL;
8707 for (i = 0; i < total; i++) {
8708 if (!p->alloc)
8709 pce_info_use = p;
8710 else if (!memcmp(p->handle, pinfo->handle,
8711 MAX_CE_INFO_HANDLE_SIZE)) {
8712 pce_info_use = p;
8713 found = true;
8714 break;
8715 }
8716 p++;
8717 }
8718
8719 if (pce_info_use == NULL)
8720 return -EBUSY;
8721
8722 pinfo->unit_num = pce_info_use->unit_num;
8723 if (!pce_info_use->alloc) {
8724 pce_info_use->alloc = true;
8725 memcpy(pce_info_use->handle,
8726 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8727 }
8728 if (pce_info_use->num_ce_pipe_entries >
8729 MAX_CE_PIPE_PAIR_PER_UNIT)
8730 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8731 else
8732 entries = pce_info_use->num_ce_pipe_entries;
8733 pinfo->num_ce_pipe_entries = entries;
8734 pce_entry = pce_info_use->ce_pipe_entry;
8735 for (i = 0; i < entries; i++, pce_entry++)
8736 pinfo->ce_pipe_entry[i] = *pce_entry;
8737 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8738 pinfo->ce_pipe_entry[i].valid = 0;
8739
8740 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8741 pr_err("copy_to_user failed\n");
8742 ret = -EFAULT;
8743 }
8744 return ret;
8745}
8746
8747static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8748 void __user *argp)
8749{
8750 struct qseecom_ce_info_req req;
8751 struct qseecom_ce_info_req *pinfo = &req;
8752 int ret = 0;
8753 struct qseecom_ce_info_use *p;
8754 int total = 0;
8755 int i;
8756 bool found = false;
8757
8758 ret = copy_from_user(pinfo, argp,
8759 sizeof(struct qseecom_ce_info_req));
8760 if (ret)
8761 return ret;
8762
8763 switch (pinfo->usage) {
8764 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8765 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8766 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8767 if (qseecom.support_fde) {
8768 p = qseecom.ce_info.fde;
8769 total = qseecom.ce_info.num_fde;
8770 } else {
8771 pr_err("system does not support fde\n");
8772 return -EINVAL;
8773 }
8774 break;
8775 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8776 if (qseecom.support_pfe) {
8777 p = qseecom.ce_info.pfe;
8778 total = qseecom.ce_info.num_pfe;
8779 } else {
8780 pr_err("system does not support pfe\n");
8781 return -EINVAL;
8782 }
8783 break;
8784 default:
8785 pr_err("unsupported usage %d\n", pinfo->usage);
8786 return -EINVAL;
8787 }
8788
8789 for (i = 0; i < total; i++) {
8790 if (p->alloc &&
8791 !memcmp(p->handle, pinfo->handle,
8792 MAX_CE_INFO_HANDLE_SIZE)) {
8793 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8794 p->alloc = false;
8795 found = true;
8796 break;
8797 }
8798 p++;
8799 }
8800 return ret;
8801}
8802
8803static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8804 void __user *argp)
8805{
8806 struct qseecom_ce_info_req req;
8807 struct qseecom_ce_info_req *pinfo = &req;
8808 int ret = 0;
8809 int i;
8810 unsigned int entries;
8811 struct qseecom_ce_info_use *pce_info_use, *p;
8812 int total = 0;
8813 bool found = false;
8814 struct qseecom_ce_pipe_entry *pce_entry;
8815
8816 ret = copy_from_user(pinfo, argp,
8817 sizeof(struct qseecom_ce_info_req));
8818 if (ret)
8819 return ret;
8820
8821 switch (pinfo->usage) {
8822 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8823 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8824 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8825 if (qseecom.support_fde) {
8826 p = qseecom.ce_info.fde;
8827 total = qseecom.ce_info.num_fde;
8828 } else {
8829 pr_err("system does not support fde\n");
8830 return -EINVAL;
8831 }
8832 break;
8833 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8834 if (qseecom.support_pfe) {
8835 p = qseecom.ce_info.pfe;
8836 total = qseecom.ce_info.num_pfe;
8837 } else {
8838 pr_err("system does not support pfe\n");
8839 return -EINVAL;
8840 }
8841 break;
8842 default:
8843 pr_err("unsupported usage %d\n", pinfo->usage);
8844 return -EINVAL;
8845 }
8846
8847 pce_info_use = NULL;
8848 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8849 pinfo->num_ce_pipe_entries = 0;
8850 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8851 pinfo->ce_pipe_entry[i].valid = 0;
8852
8853 for (i = 0; i < total; i++) {
8854
8855 if (p->alloc && !memcmp(p->handle,
8856 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8857 pce_info_use = p;
8858 found = true;
8859 break;
8860 }
8861 p++;
8862 }
8863 if (!pce_info_use)
8864 goto out;
8865 pinfo->unit_num = pce_info_use->unit_num;
8866 if (pce_info_use->num_ce_pipe_entries >
8867 MAX_CE_PIPE_PAIR_PER_UNIT)
8868 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8869 else
8870 entries = pce_info_use->num_ce_pipe_entries;
8871 pinfo->num_ce_pipe_entries = entries;
8872 pce_entry = pce_info_use->ce_pipe_entry;
8873 for (i = 0; i < entries; i++, pce_entry++)
8874 pinfo->ce_pipe_entry[i] = *pce_entry;
8875 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8876 pinfo->ce_pipe_entry[i].valid = 0;
8877out:
8878 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8879 pr_err("copy_to_user failed\n");
8880 ret = -EFAULT;
8881 }
8882 return ret;
8883}
8884
8885/*
8886 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8887 * then whitelist feature is not supported.
8888 */
8889static int qseecom_check_whitelist_feature(void)
8890{
8891 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8892
8893 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8894}
8895
8896static int qseecom_probe(struct platform_device *pdev)
8897{
8898 int rc;
8899 int i;
8900 uint32_t feature = 10;
8901 struct device *class_dev;
8902 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8903 struct qseecom_command_scm_resp resp;
8904 struct qseecom_ce_info_use *pce_info_use = NULL;
8905
8906 qseecom.qsee_bw_count = 0;
8907 qseecom.qsee_perf_client = 0;
8908 qseecom.qsee_sfpb_bw_count = 0;
8909
8910 qseecom.qsee.ce_core_clk = NULL;
8911 qseecom.qsee.ce_clk = NULL;
8912 qseecom.qsee.ce_core_src_clk = NULL;
8913 qseecom.qsee.ce_bus_clk = NULL;
8914
8915 qseecom.cumulative_mode = 0;
8916 qseecom.current_mode = INACTIVE;
8917 qseecom.support_bus_scaling = false;
8918 qseecom.support_fde = false;
8919 qseecom.support_pfe = false;
8920
8921 qseecom.ce_drv.ce_core_clk = NULL;
8922 qseecom.ce_drv.ce_clk = NULL;
8923 qseecom.ce_drv.ce_core_src_clk = NULL;
8924 qseecom.ce_drv.ce_bus_clk = NULL;
8925 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8926
8927 qseecom.app_block_ref_cnt = 0;
8928 init_waitqueue_head(&qseecom.app_block_wq);
8929 qseecom.whitelist_support = true;
8930
8931 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8932 if (rc < 0) {
8933 pr_err("alloc_chrdev_region failed %d\n", rc);
8934 return rc;
8935 }
8936
8937 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8938 if (IS_ERR(driver_class)) {
8939 rc = -ENOMEM;
8940 pr_err("class_create failed %d\n", rc);
8941 goto exit_unreg_chrdev_region;
8942 }
8943
8944 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8945 QSEECOM_DEV);
8946 if (IS_ERR(class_dev)) {
8947 pr_err("class_device_create failed %d\n", rc);
8948 rc = -ENOMEM;
8949 goto exit_destroy_class;
8950 }
8951
8952 cdev_init(&qseecom.cdev, &qseecom_fops);
8953 qseecom.cdev.owner = THIS_MODULE;
8954
8955 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8956 if (rc < 0) {
8957 pr_err("cdev_add failed %d\n", rc);
8958 goto exit_destroy_device;
8959 }
8960
8961 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008962 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8963 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008964 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008965 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8966 spin_lock_init(&qseecom.registered_kclient_list_lock);
8967 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008968 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008969 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008970 INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
8971 init_waitqueue_head(&qseecom.unload_app_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008972 qseecom.send_resp_flag = 0;
8973
8974 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008975 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008976 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8977 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008978 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008979 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8980 if (rc) {
8981 pr_err("Failed to get QSEE version info %d\n", rc);
8982 goto exit_del_cdev;
8983 }
8984 qseecom.qsee_version = resp.result;
8985 qseecom.qseos_version = QSEOS_VERSION_14;
8986 qseecom.commonlib_loaded = false;
8987 qseecom.commonlib64_loaded = false;
8988 qseecom.pdev = class_dev;
8989 /* Create ION msm client */
8990 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8991 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8992 pr_err("Ion client cannot be created\n");
8993 rc = -ENOMEM;
8994 goto exit_del_cdev;
8995 }
8996
8997 /* register client for bus scaling */
8998 if (pdev->dev.of_node) {
8999 qseecom.pdev->of_node = pdev->dev.of_node;
9000 qseecom.support_bus_scaling =
9001 of_property_read_bool((&pdev->dev)->of_node,
9002 "qcom,support-bus-scaling");
9003 rc = qseecom_retrieve_ce_data(pdev);
9004 if (rc)
9005 goto exit_destroy_ion_client;
9006 qseecom.appsbl_qseecom_support =
9007 of_property_read_bool((&pdev->dev)->of_node,
9008 "qcom,appsbl-qseecom-support");
9009 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
9010 qseecom.appsbl_qseecom_support);
9011
9012 qseecom.commonlib64_loaded =
9013 of_property_read_bool((&pdev->dev)->of_node,
9014 "qcom,commonlib64-loaded-by-uefi");
9015 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
9016 qseecom.commonlib64_loaded);
9017 qseecom.fde_key_size =
9018 of_property_read_bool((&pdev->dev)->of_node,
9019 "qcom,fde-key-size");
9020 qseecom.no_clock_support =
9021 of_property_read_bool((&pdev->dev)->of_node,
9022 "qcom,no-clock-support");
9023 if (!qseecom.no_clock_support) {
9024 pr_info("qseecom clocks handled by other subsystem\n");
9025 } else {
9026 pr_info("no-clock-support=0x%x",
9027 qseecom.no_clock_support);
9028 }
9029
9030 if (of_property_read_u32((&pdev->dev)->of_node,
9031 "qcom,qsee-reentrancy-support",
9032 &qseecom.qsee_reentrancy_support)) {
9033 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
9034 qseecom.qsee_reentrancy_support = 0;
9035 } else {
9036 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
9037 qseecom.qsee_reentrancy_support);
9038 }
9039
Jiten Patela7bb1d52018-05-11 12:34:26 +05309040 qseecom.enable_key_wrap_in_ks =
9041 of_property_read_bool((&pdev->dev)->of_node,
9042 "qcom,enable-key-wrap-in-ks");
9043 if (qseecom.enable_key_wrap_in_ks) {
9044 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
9045 qseecom.enable_key_wrap_in_ks);
9046 }
9047
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009048 /*
9049 * The qseecom bus scaling flag can not be enabled when
9050 * crypto clock is not handled by HLOS.
9051 */
9052 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
9053 pr_err("support_bus_scaling flag can not be enabled.\n");
9054 rc = -EINVAL;
9055 goto exit_destroy_ion_client;
9056 }
9057
9058 if (of_property_read_u32((&pdev->dev)->of_node,
9059 "qcom,ce-opp-freq",
9060 &qseecom.ce_opp_freq_hz)) {
9061 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
9062 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
9063 }
9064 rc = __qseecom_init_clk(CLK_QSEE);
9065 if (rc)
9066 goto exit_destroy_ion_client;
9067
9068 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9069 (qseecom.support_pfe || qseecom.support_fde)) {
9070 rc = __qseecom_init_clk(CLK_CE_DRV);
9071 if (rc) {
9072 __qseecom_deinit_clk(CLK_QSEE);
9073 goto exit_destroy_ion_client;
9074 }
9075 } else {
9076 struct qseecom_clk *qclk;
9077
9078 qclk = &qseecom.qsee;
9079 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
9080 qseecom.ce_drv.ce_clk = qclk->ce_clk;
9081 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
9082 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
9083 }
9084
9085 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9086 msm_bus_cl_get_pdata(pdev);
9087 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
9088 (!qseecom.is_apps_region_protected &&
9089 !qseecom.appsbl_qseecom_support)) {
9090 struct resource *resource = NULL;
9091 struct qsee_apps_region_info_ireq req;
9092 struct qsee_apps_region_info_64bit_ireq req_64bit;
9093 struct qseecom_command_scm_resp resp;
9094 void *cmd_buf = NULL;
9095 size_t cmd_len;
9096
9097 resource = platform_get_resource_byname(pdev,
9098 IORESOURCE_MEM, "secapp-region");
9099 if (resource) {
9100 if (qseecom.qsee_version < QSEE_VERSION_40) {
9101 req.qsee_cmd_id =
9102 QSEOS_APP_REGION_NOTIFICATION;
9103 req.addr = (uint32_t)resource->start;
9104 req.size = resource_size(resource);
9105 cmd_buf = (void *)&req;
9106 cmd_len = sizeof(struct
9107 qsee_apps_region_info_ireq);
9108 pr_warn("secure app region addr=0x%x size=0x%x",
9109 req.addr, req.size);
9110 } else {
9111 req_64bit.qsee_cmd_id =
9112 QSEOS_APP_REGION_NOTIFICATION;
9113 req_64bit.addr = resource->start;
9114 req_64bit.size = resource_size(
9115 resource);
9116 cmd_buf = (void *)&req_64bit;
9117 cmd_len = sizeof(struct
9118 qsee_apps_region_info_64bit_ireq);
9119 pr_warn("secure app region addr=0x%llx size=0x%x",
9120 req_64bit.addr, req_64bit.size);
9121 }
9122 } else {
9123 pr_err("Fail to get secure app region info\n");
9124 rc = -EINVAL;
9125 goto exit_deinit_clock;
9126 }
9127 rc = __qseecom_enable_clk(CLK_QSEE);
9128 if (rc) {
9129 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
9130 rc = -EIO;
9131 goto exit_deinit_clock;
9132 }
Zhen Kong03f220d2019-02-01 17:12:34 -08009133 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009134 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
9135 cmd_buf, cmd_len,
9136 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08009137 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009138 __qseecom_disable_clk(CLK_QSEE);
9139 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
9140 pr_err("send secapp reg fail %d resp.res %d\n",
9141 rc, resp.result);
9142 rc = -EINVAL;
9143 goto exit_deinit_clock;
9144 }
9145 }
9146 /*
9147 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
9148 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
9149 * Pls add "qseecom.commonlib64_loaded = true" here too.
9150 */
9151 if (qseecom.is_apps_region_protected ||
9152 qseecom.appsbl_qseecom_support)
9153 qseecom.commonlib_loaded = true;
9154 } else {
9155 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9156 pdev->dev.platform_data;
9157 }
9158 if (qseecom.support_bus_scaling) {
9159 init_timer(&(qseecom.bw_scale_down_timer));
9160 INIT_WORK(&qseecom.bw_inactive_req_ws,
9161 qseecom_bw_inactive_req_work);
9162 qseecom.bw_scale_down_timer.function =
9163 qseecom_scale_bus_bandwidth_timer_callback;
9164 }
9165 qseecom.timer_running = false;
9166 qseecom.qsee_perf_client = msm_bus_scale_register_client(
9167 qseecom_platform_support);
9168
9169 qseecom.whitelist_support = qseecom_check_whitelist_feature();
9170 pr_warn("qseecom.whitelist_support = %d\n",
9171 qseecom.whitelist_support);
9172
9173 if (!qseecom.qsee_perf_client)
9174 pr_err("Unable to register bus client\n");
9175
Zhen Kongc4c162a2019-01-23 12:07:12 -08009176 /*create a kthread to process pending listener unregister task */
9177 qseecom.unregister_lsnr_kthread_task = kthread_run(
9178 __qseecom_unregister_listener_kthread_func,
9179 NULL, "qseecom-unreg-lsnr");
9180 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
9181 pr_err("failed to create kthread to unregister listener\n");
9182 rc = -EINVAL;
9183 goto exit_deinit_clock;
9184 }
9185 atomic_set(&qseecom.unregister_lsnr_kthread_state,
9186 LSNR_UNREG_KT_SLEEP);
Zhen Kong03b2eae2019-09-17 16:58:46 -07009187
9188 /*create a kthread to process pending ta unloading task */
9189 qseecom.unload_app_kthread_task = kthread_run(
9190 __qseecom_unload_app_kthread_func,
9191 NULL, "qseecom-unload-ta");
9192 if (IS_ERR(qseecom.unload_app_kthread_task)) {
9193 pr_err("failed to create kthread to unload ta\n");
9194 rc = -EINVAL;
9195 goto exit_kill_unreg_lsnr_kthread;
9196 }
9197 atomic_set(&qseecom.unload_app_kthread_state,
9198 UNLOAD_APP_KT_SLEEP);
9199
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009200 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9201 return 0;
9202
Zhen Kong03b2eae2019-09-17 16:58:46 -07009203exit_kill_unreg_lsnr_kthread:
9204 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9205
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009206exit_deinit_clock:
9207 __qseecom_deinit_clk(CLK_QSEE);
9208 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9209 (qseecom.support_pfe || qseecom.support_fde))
9210 __qseecom_deinit_clk(CLK_CE_DRV);
9211exit_destroy_ion_client:
9212 if (qseecom.ce_info.fde) {
9213 pce_info_use = qseecom.ce_info.fde;
9214 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9215 kzfree(pce_info_use->ce_pipe_entry);
9216 pce_info_use++;
9217 }
9218 kfree(qseecom.ce_info.fde);
9219 }
9220 if (qseecom.ce_info.pfe) {
9221 pce_info_use = qseecom.ce_info.pfe;
9222 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9223 kzfree(pce_info_use->ce_pipe_entry);
9224 pce_info_use++;
9225 }
9226 kfree(qseecom.ce_info.pfe);
9227 }
9228 ion_client_destroy(qseecom.ion_clnt);
9229exit_del_cdev:
9230 cdev_del(&qseecom.cdev);
9231exit_destroy_device:
9232 device_destroy(driver_class, qseecom_device_no);
9233exit_destroy_class:
9234 class_destroy(driver_class);
9235exit_unreg_chrdev_region:
9236 unregister_chrdev_region(qseecom_device_no, 1);
9237 return rc;
9238}
9239
9240static int qseecom_remove(struct platform_device *pdev)
9241{
9242 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309243 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009244 unsigned long flags = 0;
9245 int ret = 0;
9246 int i;
9247 struct qseecom_ce_pipe_entry *pce_entry;
9248 struct qseecom_ce_info_use *pce_info_use;
9249
9250 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9251 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9252
Monika Singhe711b162018-04-24 09:54:50 +05309253 list_for_each_entry_safe(kclient, kclient_tmp,
9254 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009255
9256 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009257 if (!kclient->handle) {
9258 list_del(&kclient->list);
9259 kzfree(kclient);
9260 break;
9261 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009262
9263 list_del(&kclient->list);
9264 mutex_lock(&app_access_lock);
9265 ret = qseecom_unload_app(kclient->handle->dev, false);
9266 mutex_unlock(&app_access_lock);
9267 if (!ret) {
9268 kzfree(kclient->handle->dev);
9269 kzfree(kclient->handle);
9270 kzfree(kclient);
9271 }
9272 }
9273
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009274 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9275
9276 if (qseecom.qseos_version > QSEEE_VERSION_00)
9277 qseecom_unload_commonlib_image();
9278
9279 if (qseecom.qsee_perf_client)
9280 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9281 0);
9282 if (pdev->dev.platform_data != NULL)
9283 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9284
9285 if (qseecom.support_bus_scaling) {
9286 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9287 del_timer_sync(&qseecom.bw_scale_down_timer);
9288 }
9289
9290 if (qseecom.ce_info.fde) {
9291 pce_info_use = qseecom.ce_info.fde;
9292 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9293 pce_entry = pce_info_use->ce_pipe_entry;
9294 kfree(pce_entry);
9295 pce_info_use++;
9296 }
9297 }
9298 kfree(qseecom.ce_info.fde);
9299 if (qseecom.ce_info.pfe) {
9300 pce_info_use = qseecom.ce_info.pfe;
9301 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9302 pce_entry = pce_info_use->ce_pipe_entry;
9303 kfree(pce_entry);
9304 pce_info_use++;
9305 }
9306 }
9307 kfree(qseecom.ce_info.pfe);
9308
9309 /* register client for bus scaling */
9310 if (pdev->dev.of_node) {
9311 __qseecom_deinit_clk(CLK_QSEE);
9312 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9313 (qseecom.support_pfe || qseecom.support_fde))
9314 __qseecom_deinit_clk(CLK_CE_DRV);
9315 }
9316
9317 ion_client_destroy(qseecom.ion_clnt);
9318
Zhen Kong03b2eae2019-09-17 16:58:46 -07009319 kthread_stop(qseecom.unload_app_kthread_task);
9320
Zhen Kongc4c162a2019-01-23 12:07:12 -08009321 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9322
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009323 cdev_del(&qseecom.cdev);
9324
9325 device_destroy(driver_class, qseecom_device_no);
9326
9327 class_destroy(driver_class);
9328
9329 unregister_chrdev_region(qseecom_device_no, 1);
9330
9331 return ret;
9332}
9333
9334static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9335{
9336 int ret = 0;
9337 struct qseecom_clk *qclk;
9338
9339 qclk = &qseecom.qsee;
9340 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9341 if (qseecom.no_clock_support)
9342 return 0;
9343
9344 mutex_lock(&qsee_bw_mutex);
9345 mutex_lock(&clk_access_lock);
9346
9347 if (qseecom.current_mode != INACTIVE) {
9348 ret = msm_bus_scale_client_update_request(
9349 qseecom.qsee_perf_client, INACTIVE);
9350 if (ret)
9351 pr_err("Fail to scale down bus\n");
9352 else
9353 qseecom.current_mode = INACTIVE;
9354 }
9355
9356 if (qclk->clk_access_cnt) {
9357 if (qclk->ce_clk != NULL)
9358 clk_disable_unprepare(qclk->ce_clk);
9359 if (qclk->ce_core_clk != NULL)
9360 clk_disable_unprepare(qclk->ce_core_clk);
9361 if (qclk->ce_bus_clk != NULL)
9362 clk_disable_unprepare(qclk->ce_bus_clk);
9363 }
9364
9365 del_timer_sync(&(qseecom.bw_scale_down_timer));
9366 qseecom.timer_running = false;
9367
9368 mutex_unlock(&clk_access_lock);
9369 mutex_unlock(&qsee_bw_mutex);
9370 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9371
9372 return 0;
9373}
9374
9375static int qseecom_resume(struct platform_device *pdev)
9376{
9377 int mode = 0;
9378 int ret = 0;
9379 struct qseecom_clk *qclk;
9380
9381 qclk = &qseecom.qsee;
9382 if (qseecom.no_clock_support)
9383 goto exit;
9384
9385 mutex_lock(&qsee_bw_mutex);
9386 mutex_lock(&clk_access_lock);
9387 if (qseecom.cumulative_mode >= HIGH)
9388 mode = HIGH;
9389 else
9390 mode = qseecom.cumulative_mode;
9391
9392 if (qseecom.cumulative_mode != INACTIVE) {
9393 ret = msm_bus_scale_client_update_request(
9394 qseecom.qsee_perf_client, mode);
9395 if (ret)
9396 pr_err("Fail to scale up bus to %d\n", mode);
9397 else
9398 qseecom.current_mode = mode;
9399 }
9400
9401 if (qclk->clk_access_cnt) {
9402 if (qclk->ce_core_clk != NULL) {
9403 ret = clk_prepare_enable(qclk->ce_core_clk);
9404 if (ret) {
9405 pr_err("Unable to enable/prep CE core clk\n");
9406 qclk->clk_access_cnt = 0;
9407 goto err;
9408 }
9409 }
9410 if (qclk->ce_clk != NULL) {
9411 ret = clk_prepare_enable(qclk->ce_clk);
9412 if (ret) {
9413 pr_err("Unable to enable/prep CE iface clk\n");
9414 qclk->clk_access_cnt = 0;
9415 goto ce_clk_err;
9416 }
9417 }
9418 if (qclk->ce_bus_clk != NULL) {
9419 ret = clk_prepare_enable(qclk->ce_bus_clk);
9420 if (ret) {
9421 pr_err("Unable to enable/prep CE bus clk\n");
9422 qclk->clk_access_cnt = 0;
9423 goto ce_bus_clk_err;
9424 }
9425 }
9426 }
9427
9428 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9429 qseecom.bw_scale_down_timer.expires = jiffies +
9430 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9431 mod_timer(&(qseecom.bw_scale_down_timer),
9432 qseecom.bw_scale_down_timer.expires);
9433 qseecom.timer_running = true;
9434 }
9435
9436 mutex_unlock(&clk_access_lock);
9437 mutex_unlock(&qsee_bw_mutex);
9438 goto exit;
9439
9440ce_bus_clk_err:
9441 if (qclk->ce_clk)
9442 clk_disable_unprepare(qclk->ce_clk);
9443ce_clk_err:
9444 if (qclk->ce_core_clk)
9445 clk_disable_unprepare(qclk->ce_core_clk);
9446err:
9447 mutex_unlock(&clk_access_lock);
9448 mutex_unlock(&qsee_bw_mutex);
9449 ret = -EIO;
9450exit:
9451 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9452 return ret;
9453}
9454
9455static const struct of_device_id qseecom_match[] = {
9456 {
9457 .compatible = "qcom,qseecom",
9458 },
9459 {}
9460};
9461
9462static struct platform_driver qseecom_plat_driver = {
9463 .probe = qseecom_probe,
9464 .remove = qseecom_remove,
9465 .suspend = qseecom_suspend,
9466 .resume = qseecom_resume,
9467 .driver = {
9468 .name = "qseecom",
9469 .owner = THIS_MODULE,
9470 .of_match_table = qseecom_match,
9471 },
9472};
9473
9474static int qseecom_init(void)
9475{
9476 return platform_driver_register(&qseecom_plat_driver);
9477}
9478
9479static void qseecom_exit(void)
9480{
9481 platform_driver_unregister(&qseecom_plat_driver);
9482}
9483
9484MODULE_LICENSE("GPL v2");
9485MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9486
9487module_init(qseecom_init);
9488module_exit(qseecom_exit);