blob: a58e3271cc56262101d82ffcf92709b560a063b2 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
Zhen Kong03b2eae2019-09-17 16:58:46 -0700149enum qseecom_unload_app_kthread_state {
150 UNLOAD_APP_KT_SLEEP = 0,
151 UNLOAD_APP_KT_WAKEUP,
152};
153
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700154static struct class *driver_class;
155static dev_t qseecom_device_no;
156
157static DEFINE_MUTEX(qsee_bw_mutex);
158static DEFINE_MUTEX(app_access_lock);
159static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800160static DEFINE_MUTEX(listener_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700161static DEFINE_MUTEX(unload_app_pending_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800162
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700163
164struct sglist_info {
165 uint32_t indexAndFlags;
166 uint32_t sizeOrCount;
167};
168
169/*
170 * The 31th bit indicates only one or multiple physical address inside
171 * the request buffer. If it is set, the index locates a single physical addr
172 * inside the request buffer, and `sizeOrCount` is the size of the memory being
173 * shared at that physical address.
174 * Otherwise, the index locates an array of {start, len} pairs (a
175 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
176 * that array.
177 *
178 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
179 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
180 *
181 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
182 */
183#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
184 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
185
186#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
187
188#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
189
190#define MAKE_WHITELIST_VERSION(major, minor, patch) \
191 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
192
193struct qseecom_registered_listener_list {
194 struct list_head list;
195 struct qseecom_register_listener_req svc;
196 void *user_virt_sb_base;
197 u8 *sb_virt;
198 phys_addr_t sb_phys;
199 size_t sb_length;
200 struct ion_handle *ihandle; /* Retrieve phy addr */
201 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700203 int rcv_req_flag;
204 int send_resp_flag;
205 bool listener_in_use;
206 /* wq for thread blocked on this listener*/
207 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800208 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
209 uint32_t sglist_cnt;
210 int abort;
211 bool unregister_pending;
212};
213
214struct qseecom_unregister_pending_list {
215 struct list_head list;
216 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700217};
218
219struct qseecom_registered_app_list {
220 struct list_head list;
221 u32 app_id;
222 u32 ref_cnt;
223 char app_name[MAX_APP_NAME_SIZE];
224 u32 app_arch;
225 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700226 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700227 u32 blocked_on_listener_id;
228};
229
230struct qseecom_registered_kclient_list {
231 struct list_head list;
232 struct qseecom_handle *handle;
233};
234
235struct qseecom_ce_info_use {
236 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
237 unsigned int unit_num;
238 unsigned int num_ce_pipe_entries;
239 struct qseecom_ce_pipe_entry *ce_pipe_entry;
240 bool alloc;
241 uint32_t type;
242};
243
244struct ce_hw_usage_info {
245 uint32_t qsee_ce_hw_instance;
246 uint32_t num_fde;
247 struct qseecom_ce_info_use *fde;
248 uint32_t num_pfe;
249 struct qseecom_ce_info_use *pfe;
250};
251
252struct qseecom_clk {
253 enum qseecom_ce_hw_instance instance;
254 struct clk *ce_core_clk;
255 struct clk *ce_clk;
256 struct clk *ce_core_src_clk;
257 struct clk *ce_bus_clk;
258 uint32_t clk_access_cnt;
259};
260
261struct qseecom_control {
262 struct ion_client *ion_clnt; /* Ion client */
263 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700264
265 struct list_head registered_app_list_head;
266 spinlock_t registered_app_list_lock;
267
268 struct list_head registered_kclient_list_head;
269 spinlock_t registered_kclient_list_lock;
270
271 wait_queue_head_t send_resp_wq;
272 int send_resp_flag;
273
274 uint32_t qseos_version;
275 uint32_t qsee_version;
276 struct device *pdev;
277 bool whitelist_support;
278 bool commonlib_loaded;
279 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700280 struct ce_hw_usage_info ce_info;
281
282 int qsee_bw_count;
283 int qsee_sfpb_bw_count;
284
285 uint32_t qsee_perf_client;
286 struct qseecom_clk qsee;
287 struct qseecom_clk ce_drv;
288
289 bool support_bus_scaling;
290 bool support_fde;
291 bool support_pfe;
292 bool fde_key_size;
293 uint32_t cumulative_mode;
294 enum qseecom_bandwidth_request_mode current_mode;
295 struct timer_list bw_scale_down_timer;
296 struct work_struct bw_inactive_req_ws;
297 struct cdev cdev;
298 bool timer_running;
299 bool no_clock_support;
300 unsigned int ce_opp_freq_hz;
301 bool appsbl_qseecom_support;
302 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530303 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700304
305 uint32_t app_block_ref_cnt;
306 wait_queue_head_t app_block_wq;
307 atomic_t qseecom_state;
308 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700309 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800310
311 struct list_head unregister_lsnr_pending_list_head;
312 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800313 struct task_struct *unregister_lsnr_kthread_task;
314 wait_queue_head_t unregister_lsnr_kthread_wq;
315 atomic_t unregister_lsnr_kthread_state;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700316
317 struct list_head unload_app_pending_list_head;
318 struct task_struct *unload_app_kthread_task;
319 wait_queue_head_t unload_app_kthread_wq;
320 atomic_t unload_app_kthread_state;
321};
322
323struct qseecom_unload_app_pending_list {
324 struct list_head list;
325 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700326};
327
328struct qseecom_sec_buf_fd_info {
329 bool is_sec_buf_fd;
330 size_t size;
331 void *vbase;
332 dma_addr_t pbase;
333};
334
335struct qseecom_param_memref {
336 uint32_t buffer;
337 uint32_t size;
338};
339
340struct qseecom_client_handle {
341 u32 app_id;
342 u8 *sb_virt;
343 phys_addr_t sb_phys;
344 unsigned long user_virt_sb_base;
345 size_t sb_length;
346 struct ion_handle *ihandle; /* Retrieve phy addr */
347 char app_name[MAX_APP_NAME_SIZE];
348 u32 app_arch;
349 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700350 bool from_smcinvoke;
Zhen Kong03b2eae2019-09-17 16:58:46 -0700351 bool unload_pending;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700352};
353
354struct qseecom_listener_handle {
355 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800356 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800357 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700358};
359
360static struct qseecom_control qseecom;
361
362struct qseecom_dev_handle {
363 enum qseecom_client_handle_type type;
364 union {
365 struct qseecom_client_handle client;
366 struct qseecom_listener_handle listener;
367 };
368 bool released;
369 int abort;
370 wait_queue_head_t abort_wq;
371 atomic_t ioctl_count;
372 bool perf_enabled;
373 bool fast_load_enabled;
374 enum qseecom_bandwidth_request_mode mode;
375 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
376 uint32_t sglist_cnt;
377 bool use_legacy_cmd;
378};
379
380struct qseecom_key_id_usage_desc {
381 uint8_t desc[QSEECOM_KEY_ID_SIZE];
382};
383
384struct qseecom_crypto_info {
385 unsigned int unit_num;
386 unsigned int ce;
387 unsigned int pipe_pair;
388};
389
390static struct qseecom_key_id_usage_desc key_id_array[] = {
391 {
392 .desc = "Undefined Usage Index",
393 },
394
395 {
396 .desc = "Full Disk Encryption",
397 },
398
399 {
400 .desc = "Per File Encryption",
401 },
402
403 {
404 .desc = "UFS ICE Full Disk Encryption",
405 },
406
407 {
408 .desc = "SDCC ICE Full Disk Encryption",
409 },
410};
411
412/* Function proto types */
413static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
414static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
415static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
416static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
417static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
418static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
419 char *cmnlib_name);
420static int qseecom_enable_ice_setup(int usage);
421static int qseecom_disable_ice_setup(int usage);
422static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
423static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
424 void __user *argp);
425static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
426 void __user *argp);
427static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
428 void __user *argp);
Zhen Kong03b2eae2019-09-17 16:58:46 -0700429static int __qseecom_unload_app(struct qseecom_dev_handle *data,
430 uint32_t app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700431
432static int get_qseecom_keymaster_status(char *str)
433{
434 get_option(&str, &qseecom.is_apps_region_protected);
435 return 1;
436}
437__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
438
Zhen Kong03f220d2019-02-01 17:12:34 -0800439
440#define QSEECOM_SCM_EBUSY_WAIT_MS 30
441#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
442
443static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
444{
445 int ret = 0;
446 int retry_count = 0;
447
448 do {
449 ret = scm_call2_noretry(smc_id, desc);
450 if (ret == -EBUSY) {
451 mutex_unlock(&app_access_lock);
452 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
453 mutex_lock(&app_access_lock);
454 }
455 if (retry_count == 33)
456 pr_warn("secure world has been busy for 1 second!\n");
457 } while (ret == -EBUSY &&
458 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
459 return ret;
460}
461
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700462static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
463 const void *req_buf, void *resp_buf)
464{
465 int ret = 0;
466 uint32_t smc_id = 0;
467 uint32_t qseos_cmd_id = 0;
468 struct scm_desc desc = {0};
469 struct qseecom_command_scm_resp *scm_resp = NULL;
470
471 if (!req_buf || !resp_buf) {
472 pr_err("Invalid buffer pointer\n");
473 return -EINVAL;
474 }
475 qseos_cmd_id = *(uint32_t *)req_buf;
476 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
477
478 switch (svc_id) {
479 case 6: {
480 if (tz_cmd_id == 3) {
481 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
482 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
483 desc.args[0] = *(uint32_t *)req_buf;
484 } else {
485 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
486 svc_id, tz_cmd_id);
487 return -EINVAL;
488 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800489 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700490 break;
491 }
492 case SCM_SVC_ES: {
493 switch (tz_cmd_id) {
494 case SCM_SAVE_PARTITION_HASH_ID: {
495 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
496 struct qseecom_save_partition_hash_req *p_hash_req =
497 (struct qseecom_save_partition_hash_req *)
498 req_buf;
499 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
500
501 if (!tzbuf)
502 return -ENOMEM;
503 memset(tzbuf, 0, tzbuflen);
504 memcpy(tzbuf, p_hash_req->digest,
505 SHA256_DIGEST_LENGTH);
506 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
507 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
508 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
509 desc.args[0] = p_hash_req->partition_id;
510 desc.args[1] = virt_to_phys(tzbuf);
511 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800512 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700513 kzfree(tzbuf);
514 break;
515 }
516 default: {
517 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
518 tz_cmd_id);
519 ret = -EINVAL;
520 break;
521 }
522 } /* end of switch (tz_cmd_id) */
523 break;
524 } /* end of case SCM_SVC_ES */
525 case SCM_SVC_TZSCHEDULER: {
526 switch (qseos_cmd_id) {
527 case QSEOS_APP_START_COMMAND: {
528 struct qseecom_load_app_ireq *req;
529 struct qseecom_load_app_64bit_ireq *req_64bit;
530
531 smc_id = TZ_OS_APP_START_ID;
532 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
533 if (qseecom.qsee_version < QSEE_VERSION_40) {
534 req = (struct qseecom_load_app_ireq *)req_buf;
535 desc.args[0] = req->mdt_len;
536 desc.args[1] = req->img_len;
537 desc.args[2] = req->phy_addr;
538 } else {
539 req_64bit =
540 (struct qseecom_load_app_64bit_ireq *)
541 req_buf;
542 desc.args[0] = req_64bit->mdt_len;
543 desc.args[1] = req_64bit->img_len;
544 desc.args[2] = req_64bit->phy_addr;
545 }
546 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800547 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700548 break;
549 }
550 case QSEOS_APP_SHUTDOWN_COMMAND: {
551 struct qseecom_unload_app_ireq *req;
552
553 req = (struct qseecom_unload_app_ireq *)req_buf;
554 smc_id = TZ_OS_APP_SHUTDOWN_ID;
555 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
556 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700557 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700558 break;
559 }
560 case QSEOS_APP_LOOKUP_COMMAND: {
561 struct qseecom_check_app_ireq *req;
562 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
563 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
564
565 if (!tzbuf)
566 return -ENOMEM;
567 req = (struct qseecom_check_app_ireq *)req_buf;
568 pr_debug("Lookup app_name = %s\n", req->app_name);
569 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
570 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
571 smc_id = TZ_OS_APP_LOOKUP_ID;
572 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
573 desc.args[0] = virt_to_phys(tzbuf);
574 desc.args[1] = strlen(req->app_name);
575 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800576 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700577 kzfree(tzbuf);
578 break;
579 }
580 case QSEOS_APP_REGION_NOTIFICATION: {
581 struct qsee_apps_region_info_ireq *req;
582 struct qsee_apps_region_info_64bit_ireq *req_64bit;
583
584 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
585 desc.arginfo =
586 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
587 if (qseecom.qsee_version < QSEE_VERSION_40) {
588 req = (struct qsee_apps_region_info_ireq *)
589 req_buf;
590 desc.args[0] = req->addr;
591 desc.args[1] = req->size;
592 } else {
593 req_64bit =
594 (struct qsee_apps_region_info_64bit_ireq *)
595 req_buf;
596 desc.args[0] = req_64bit->addr;
597 desc.args[1] = req_64bit->size;
598 }
599 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800600 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700601 break;
602 }
603 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
604 struct qseecom_load_lib_image_ireq *req;
605 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
606
607 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
608 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
609 if (qseecom.qsee_version < QSEE_VERSION_40) {
610 req = (struct qseecom_load_lib_image_ireq *)
611 req_buf;
612 desc.args[0] = req->mdt_len;
613 desc.args[1] = req->img_len;
614 desc.args[2] = req->phy_addr;
615 } else {
616 req_64bit =
617 (struct qseecom_load_lib_image_64bit_ireq *)
618 req_buf;
619 desc.args[0] = req_64bit->mdt_len;
620 desc.args[1] = req_64bit->img_len;
621 desc.args[2] = req_64bit->phy_addr;
622 }
623 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800624 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700625 break;
626 }
627 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
628 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
629 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
630 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800631 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700632 break;
633 }
634 case QSEOS_REGISTER_LISTENER: {
635 struct qseecom_register_listener_ireq *req;
636 struct qseecom_register_listener_64bit_ireq *req_64bit;
637
638 desc.arginfo =
639 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
640 if (qseecom.qsee_version < QSEE_VERSION_40) {
641 req = (struct qseecom_register_listener_ireq *)
642 req_buf;
643 desc.args[0] = req->listener_id;
644 desc.args[1] = req->sb_ptr;
645 desc.args[2] = req->sb_len;
646 } else {
647 req_64bit =
648 (struct qseecom_register_listener_64bit_ireq *)
649 req_buf;
650 desc.args[0] = req_64bit->listener_id;
651 desc.args[1] = req_64bit->sb_ptr;
652 desc.args[2] = req_64bit->sb_len;
653 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700654 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700655 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800656 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800657 if (ret == -EIO) {
658 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700659 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700660 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800661 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700662 }
663 break;
664 }
665 case QSEOS_DEREGISTER_LISTENER: {
666 struct qseecom_unregister_listener_ireq *req;
667
668 req = (struct qseecom_unregister_listener_ireq *)
669 req_buf;
670 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
671 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
672 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800673 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700674 break;
675 }
676 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
677 struct qseecom_client_listener_data_irsp *req;
678
679 req = (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
682 desc.arginfo =
683 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
684 desc.args[0] = req->listener_id;
685 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800686 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700687 break;
688 }
689 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
690 struct qseecom_client_listener_data_irsp *req;
691 struct qseecom_client_listener_data_64bit_irsp *req_64;
692
693 smc_id =
694 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
695 desc.arginfo =
696 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
697 if (qseecom.qsee_version < QSEE_VERSION_40) {
698 req =
699 (struct qseecom_client_listener_data_irsp *)
700 req_buf;
701 desc.args[0] = req->listener_id;
702 desc.args[1] = req->status;
703 desc.args[2] = req->sglistinfo_ptr;
704 desc.args[3] = req->sglistinfo_len;
705 } else {
706 req_64 =
707 (struct qseecom_client_listener_data_64bit_irsp *)
708 req_buf;
709 desc.args[0] = req_64->listener_id;
710 desc.args[1] = req_64->status;
711 desc.args[2] = req_64->sglistinfo_ptr;
712 desc.args[3] = req_64->sglistinfo_len;
713 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800714 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700715 break;
716 }
717 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
718 struct qseecom_load_app_ireq *req;
719 struct qseecom_load_app_64bit_ireq *req_64bit;
720
721 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
722 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
723 if (qseecom.qsee_version < QSEE_VERSION_40) {
724 req = (struct qseecom_load_app_ireq *)req_buf;
725 desc.args[0] = req->mdt_len;
726 desc.args[1] = req->img_len;
727 desc.args[2] = req->phy_addr;
728 } else {
729 req_64bit =
730 (struct qseecom_load_app_64bit_ireq *)req_buf;
731 desc.args[0] = req_64bit->mdt_len;
732 desc.args[1] = req_64bit->img_len;
733 desc.args[2] = req_64bit->phy_addr;
734 }
735 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800736 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700737 break;
738 }
739 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
740 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
741 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
742 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800743 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700744 break;
745 }
746
747 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
748 struct qseecom_client_send_data_ireq *req;
749 struct qseecom_client_send_data_64bit_ireq *req_64bit;
750
751 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
752 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
753 if (qseecom.qsee_version < QSEE_VERSION_40) {
754 req = (struct qseecom_client_send_data_ireq *)
755 req_buf;
756 desc.args[0] = req->app_id;
757 desc.args[1] = req->req_ptr;
758 desc.args[2] = req->req_len;
759 desc.args[3] = req->rsp_ptr;
760 desc.args[4] = req->rsp_len;
761 } else {
762 req_64bit =
763 (struct qseecom_client_send_data_64bit_ireq *)
764 req_buf;
765 desc.args[0] = req_64bit->app_id;
766 desc.args[1] = req_64bit->req_ptr;
767 desc.args[2] = req_64bit->req_len;
768 desc.args[3] = req_64bit->rsp_ptr;
769 desc.args[4] = req_64bit->rsp_len;
770 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800771 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700772 break;
773 }
774 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
775 struct qseecom_client_send_data_ireq *req;
776 struct qseecom_client_send_data_64bit_ireq *req_64bit;
777
778 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
779 desc.arginfo =
780 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
781 if (qseecom.qsee_version < QSEE_VERSION_40) {
782 req = (struct qseecom_client_send_data_ireq *)
783 req_buf;
784 desc.args[0] = req->app_id;
785 desc.args[1] = req->req_ptr;
786 desc.args[2] = req->req_len;
787 desc.args[3] = req->rsp_ptr;
788 desc.args[4] = req->rsp_len;
789 desc.args[5] = req->sglistinfo_ptr;
790 desc.args[6] = req->sglistinfo_len;
791 } else {
792 req_64bit =
793 (struct qseecom_client_send_data_64bit_ireq *)
794 req_buf;
795 desc.args[0] = req_64bit->app_id;
796 desc.args[1] = req_64bit->req_ptr;
797 desc.args[2] = req_64bit->req_len;
798 desc.args[3] = req_64bit->rsp_ptr;
799 desc.args[4] = req_64bit->rsp_len;
800 desc.args[5] = req_64bit->sglistinfo_ptr;
801 desc.args[6] = req_64bit->sglistinfo_len;
802 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800803 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700804 break;
805 }
806 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
807 struct qseecom_client_send_service_ireq *req;
808
809 req = (struct qseecom_client_send_service_ireq *)
810 req_buf;
811 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
812 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
813 desc.args[0] = req->key_type;
814 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800815 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700816 break;
817 }
818 case QSEOS_RPMB_ERASE_COMMAND: {
819 smc_id = TZ_OS_RPMB_ERASE_ID;
820 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
821 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800822 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700823 break;
824 }
825 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
826 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
827 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
828 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800829 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700830 break;
831 }
832 case QSEOS_GENERATE_KEY: {
833 u32 tzbuflen = PAGE_ALIGN(sizeof
834 (struct qseecom_key_generate_ireq) -
835 sizeof(uint32_t));
836 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
837
838 if (!tzbuf)
839 return -ENOMEM;
840 memset(tzbuf, 0, tzbuflen);
841 memcpy(tzbuf, req_buf + sizeof(uint32_t),
842 (sizeof(struct qseecom_key_generate_ireq) -
843 sizeof(uint32_t)));
844 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
845 smc_id = TZ_OS_KS_GEN_KEY_ID;
846 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
847 desc.args[0] = virt_to_phys(tzbuf);
848 desc.args[1] = tzbuflen;
849 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800850 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700851 kzfree(tzbuf);
852 break;
853 }
854 case QSEOS_DELETE_KEY: {
855 u32 tzbuflen = PAGE_ALIGN(sizeof
856 (struct qseecom_key_delete_ireq) -
857 sizeof(uint32_t));
858 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
859
860 if (!tzbuf)
861 return -ENOMEM;
862 memset(tzbuf, 0, tzbuflen);
863 memcpy(tzbuf, req_buf + sizeof(uint32_t),
864 (sizeof(struct qseecom_key_delete_ireq) -
865 sizeof(uint32_t)));
866 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
867 smc_id = TZ_OS_KS_DEL_KEY_ID;
868 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
869 desc.args[0] = virt_to_phys(tzbuf);
870 desc.args[1] = tzbuflen;
871 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800872 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700873 kzfree(tzbuf);
874 break;
875 }
876 case QSEOS_SET_KEY: {
877 u32 tzbuflen = PAGE_ALIGN(sizeof
878 (struct qseecom_key_select_ireq) -
879 sizeof(uint32_t));
880 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
881
882 if (!tzbuf)
883 return -ENOMEM;
884 memset(tzbuf, 0, tzbuflen);
885 memcpy(tzbuf, req_buf + sizeof(uint32_t),
886 (sizeof(struct qseecom_key_select_ireq) -
887 sizeof(uint32_t)));
888 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
889 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
890 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
891 desc.args[0] = virt_to_phys(tzbuf);
892 desc.args[1] = tzbuflen;
893 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800894 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700895 kzfree(tzbuf);
896 break;
897 }
898 case QSEOS_UPDATE_KEY_USERINFO: {
899 u32 tzbuflen = PAGE_ALIGN(sizeof
900 (struct qseecom_key_userinfo_update_ireq) -
901 sizeof(uint32_t));
902 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
903
904 if (!tzbuf)
905 return -ENOMEM;
906 memset(tzbuf, 0, tzbuflen);
907 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
908 (struct qseecom_key_userinfo_update_ireq) -
909 sizeof(uint32_t)));
910 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
911 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
912 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
913 desc.args[0] = virt_to_phys(tzbuf);
914 desc.args[1] = tzbuflen;
915 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800916 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700917 kzfree(tzbuf);
918 break;
919 }
920 case QSEOS_TEE_OPEN_SESSION: {
921 struct qseecom_qteec_ireq *req;
922 struct qseecom_qteec_64bit_ireq *req_64bit;
923
924 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
925 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
926 if (qseecom.qsee_version < QSEE_VERSION_40) {
927 req = (struct qseecom_qteec_ireq *)req_buf;
928 desc.args[0] = req->app_id;
929 desc.args[1] = req->req_ptr;
930 desc.args[2] = req->req_len;
931 desc.args[3] = req->resp_ptr;
932 desc.args[4] = req->resp_len;
933 } else {
934 req_64bit = (struct qseecom_qteec_64bit_ireq *)
935 req_buf;
936 desc.args[0] = req_64bit->app_id;
937 desc.args[1] = req_64bit->req_ptr;
938 desc.args[2] = req_64bit->req_len;
939 desc.args[3] = req_64bit->resp_ptr;
940 desc.args[4] = req_64bit->resp_len;
941 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800942 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700943 break;
944 }
945 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
946 struct qseecom_qteec_ireq *req;
947 struct qseecom_qteec_64bit_ireq *req_64bit;
948
949 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
950 desc.arginfo =
951 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
952 if (qseecom.qsee_version < QSEE_VERSION_40) {
953 req = (struct qseecom_qteec_ireq *)req_buf;
954 desc.args[0] = req->app_id;
955 desc.args[1] = req->req_ptr;
956 desc.args[2] = req->req_len;
957 desc.args[3] = req->resp_ptr;
958 desc.args[4] = req->resp_len;
959 desc.args[5] = req->sglistinfo_ptr;
960 desc.args[6] = req->sglistinfo_len;
961 } else {
962 req_64bit = (struct qseecom_qteec_64bit_ireq *)
963 req_buf;
964 desc.args[0] = req_64bit->app_id;
965 desc.args[1] = req_64bit->req_ptr;
966 desc.args[2] = req_64bit->req_len;
967 desc.args[3] = req_64bit->resp_ptr;
968 desc.args[4] = req_64bit->resp_len;
969 desc.args[5] = req_64bit->sglistinfo_ptr;
970 desc.args[6] = req_64bit->sglistinfo_len;
971 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800972 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700973 break;
974 }
975 case QSEOS_TEE_INVOKE_COMMAND: {
976 struct qseecom_qteec_ireq *req;
977 struct qseecom_qteec_64bit_ireq *req_64bit;
978
979 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
980 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
981 if (qseecom.qsee_version < QSEE_VERSION_40) {
982 req = (struct qseecom_qteec_ireq *)req_buf;
983 desc.args[0] = req->app_id;
984 desc.args[1] = req->req_ptr;
985 desc.args[2] = req->req_len;
986 desc.args[3] = req->resp_ptr;
987 desc.args[4] = req->resp_len;
988 } else {
989 req_64bit = (struct qseecom_qteec_64bit_ireq *)
990 req_buf;
991 desc.args[0] = req_64bit->app_id;
992 desc.args[1] = req_64bit->req_ptr;
993 desc.args[2] = req_64bit->req_len;
994 desc.args[3] = req_64bit->resp_ptr;
995 desc.args[4] = req_64bit->resp_len;
996 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800997 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700998 break;
999 }
1000 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
1001 struct qseecom_qteec_ireq *req;
1002 struct qseecom_qteec_64bit_ireq *req_64bit;
1003
1004 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
1005 desc.arginfo =
1006 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
1007 if (qseecom.qsee_version < QSEE_VERSION_40) {
1008 req = (struct qseecom_qteec_ireq *)req_buf;
1009 desc.args[0] = req->app_id;
1010 desc.args[1] = req->req_ptr;
1011 desc.args[2] = req->req_len;
1012 desc.args[3] = req->resp_ptr;
1013 desc.args[4] = req->resp_len;
1014 desc.args[5] = req->sglistinfo_ptr;
1015 desc.args[6] = req->sglistinfo_len;
1016 } else {
1017 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1018 req_buf;
1019 desc.args[0] = req_64bit->app_id;
1020 desc.args[1] = req_64bit->req_ptr;
1021 desc.args[2] = req_64bit->req_len;
1022 desc.args[3] = req_64bit->resp_ptr;
1023 desc.args[4] = req_64bit->resp_len;
1024 desc.args[5] = req_64bit->sglistinfo_ptr;
1025 desc.args[6] = req_64bit->sglistinfo_len;
1026 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001027 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001028 break;
1029 }
1030 case QSEOS_TEE_CLOSE_SESSION: {
1031 struct qseecom_qteec_ireq *req;
1032 struct qseecom_qteec_64bit_ireq *req_64bit;
1033
1034 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1035 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1036 if (qseecom.qsee_version < QSEE_VERSION_40) {
1037 req = (struct qseecom_qteec_ireq *)req_buf;
1038 desc.args[0] = req->app_id;
1039 desc.args[1] = req->req_ptr;
1040 desc.args[2] = req->req_len;
1041 desc.args[3] = req->resp_ptr;
1042 desc.args[4] = req->resp_len;
1043 } else {
1044 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1045 req_buf;
1046 desc.args[0] = req_64bit->app_id;
1047 desc.args[1] = req_64bit->req_ptr;
1048 desc.args[2] = req_64bit->req_len;
1049 desc.args[3] = req_64bit->resp_ptr;
1050 desc.args[4] = req_64bit->resp_len;
1051 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001052 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001053 break;
1054 }
1055 case QSEOS_TEE_REQUEST_CANCELLATION: {
1056 struct qseecom_qteec_ireq *req;
1057 struct qseecom_qteec_64bit_ireq *req_64bit;
1058
1059 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1060 desc.arginfo =
1061 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1062 if (qseecom.qsee_version < QSEE_VERSION_40) {
1063 req = (struct qseecom_qteec_ireq *)req_buf;
1064 desc.args[0] = req->app_id;
1065 desc.args[1] = req->req_ptr;
1066 desc.args[2] = req->req_len;
1067 desc.args[3] = req->resp_ptr;
1068 desc.args[4] = req->resp_len;
1069 } else {
1070 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1071 req_buf;
1072 desc.args[0] = req_64bit->app_id;
1073 desc.args[1] = req_64bit->req_ptr;
1074 desc.args[2] = req_64bit->req_len;
1075 desc.args[3] = req_64bit->resp_ptr;
1076 desc.args[4] = req_64bit->resp_len;
1077 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001078 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001079 break;
1080 }
1081 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1082 struct qseecom_continue_blocked_request_ireq *req =
1083 (struct qseecom_continue_blocked_request_ireq *)
1084 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001085 if (qseecom.smcinvoke_support)
1086 smc_id =
1087 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1088 else
1089 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001090 desc.arginfo =
1091 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001092 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001093 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001094 break;
1095 }
1096 default: {
1097 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1098 qseos_cmd_id);
1099 ret = -EINVAL;
1100 break;
1101 }
1102 } /*end of switch (qsee_cmd_id) */
1103 break;
1104 } /*end of case SCM_SVC_TZSCHEDULER*/
1105 default: {
1106 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1107 svc_id);
1108 ret = -EINVAL;
1109 break;
1110 }
1111 } /*end of switch svc_id */
1112 scm_resp->result = desc.ret[0];
1113 scm_resp->resp_type = desc.ret[1];
1114 scm_resp->data = desc.ret[2];
1115 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1116 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1117 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1118 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1119 return ret;
1120}
1121
1122
1123static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1124 size_t cmd_len, void *resp_buf, size_t resp_len)
1125{
1126 if (!is_scm_armv8())
1127 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1128 resp_buf, resp_len);
1129 else
1130 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1131}
1132
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001133static struct qseecom_registered_listener_list *__qseecom_find_svc(
1134 int32_t listener_id)
1135{
1136 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001137
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001138 list_for_each_entry(entry,
1139 &qseecom.registered_listener_list_head, list) {
1140 if (entry->svc.listener_id == listener_id)
1141 break;
1142 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001143 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001144 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 return NULL;
1146 }
1147
1148 return entry;
1149}
1150
1151static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1152 struct qseecom_dev_handle *handle,
1153 struct qseecom_register_listener_req *listener)
1154{
1155 int ret = 0;
1156 struct qseecom_register_listener_ireq req;
1157 struct qseecom_register_listener_64bit_ireq req_64bit;
1158 struct qseecom_command_scm_resp resp;
1159 ion_phys_addr_t pa;
1160 void *cmd_buf = NULL;
1161 size_t cmd_len;
1162
1163 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001164 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001165 listener->ifd_data_fd);
1166 if (IS_ERR_OR_NULL(svc->ihandle)) {
1167 pr_err("Ion client could not retrieve the handle\n");
1168 return -ENOMEM;
1169 }
1170
1171 /* Get the physical address of the ION BUF */
1172 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1173 if (ret) {
1174 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1175 ret);
1176 return ret;
1177 }
1178 /* Populate the structure for sending scm call to load image */
1179 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1180 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1181 pr_err("ION memory mapping for listener shared buffer failed\n");
1182 return -ENOMEM;
1183 }
1184 svc->sb_phys = (phys_addr_t)pa;
1185
1186 if (qseecom.qsee_version < QSEE_VERSION_40) {
1187 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1188 req.listener_id = svc->svc.listener_id;
1189 req.sb_len = svc->sb_length;
1190 req.sb_ptr = (uint32_t)svc->sb_phys;
1191 cmd_buf = (void *)&req;
1192 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1193 } else {
1194 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1195 req_64bit.listener_id = svc->svc.listener_id;
1196 req_64bit.sb_len = svc->sb_length;
1197 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1198 cmd_buf = (void *)&req_64bit;
1199 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1200 }
1201
1202 resp.result = QSEOS_RESULT_INCOMPLETE;
1203
Zhen Kongc4c162a2019-01-23 12:07:12 -08001204 mutex_unlock(&listener_access_lock);
1205 mutex_lock(&app_access_lock);
1206 __qseecom_reentrancy_check_if_no_app_blocked(
1207 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001208 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1209 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001210 mutex_unlock(&app_access_lock);
1211 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212 if (ret) {
1213 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1214 return -EINVAL;
1215 }
1216
1217 if (resp.result != QSEOS_RESULT_SUCCESS) {
1218 pr_err("Error SB registration req: resp.result = %d\n",
1219 resp.result);
1220 return -EPERM;
1221 }
1222 return 0;
1223}
1224
1225static int qseecom_register_listener(struct qseecom_dev_handle *data,
1226 void __user *argp)
1227{
1228 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 struct qseecom_register_listener_req rcvd_lstnr;
1230 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001232
1233 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1234 if (ret) {
1235 pr_err("copy_from_user failed\n");
1236 return ret;
1237 }
1238 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1239 rcvd_lstnr.sb_size))
1240 return -EFAULT;
1241
Zhen Kong3c674612018-09-06 22:51:27 -07001242 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001243
1244 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1245 if (ptr_svc) {
1246 if (ptr_svc->unregister_pending == false) {
1247 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001248 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 data->released = true;
1250 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001251 } else {
1252 /*wait until listener is unregistered*/
1253 pr_debug("register %d has to wait\n",
1254 rcvd_lstnr.listener_id);
1255 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301256 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001257 qseecom.register_lsnr_pending_wq,
1258 list_empty(
1259 &qseecom.unregister_lsnr_pending_list_head));
1260 if (ret) {
1261 pr_err("interrupted register_pending_wq %d\n",
1262 rcvd_lstnr.listener_id);
1263 mutex_lock(&listener_access_lock);
1264 return -ERESTARTSYS;
1265 }
1266 mutex_lock(&listener_access_lock);
1267 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001268 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001269 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1270 if (!new_entry)
1271 return -ENOMEM;
1272 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001273 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001274
1275 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1276 new_entry->sb_length = rcvd_lstnr.sb_size;
1277 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1278 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001279 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1280 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001281 kzfree(new_entry);
1282 return -ENOMEM;
1283 }
1284
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001285 init_waitqueue_head(&new_entry->rcv_req_wq);
1286 init_waitqueue_head(&new_entry->listener_block_app_wq);
1287 new_entry->send_resp_flag = 0;
1288 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001289 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001290
Zhen Kong52ce9062018-09-24 14:33:27 -07001291 pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001292 return ret;
1293}
1294
Zhen Kongbcdeda22018-11-16 13:50:51 -08001295static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1296 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001297{
1298 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001299 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001300 struct qseecom_command_scm_resp resp;
1301 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1302
1303 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1304 req.listener_id = data->listener.id;
1305 resp.result = QSEOS_RESULT_INCOMPLETE;
1306
Zhen Kongc4c162a2019-01-23 12:07:12 -08001307 mutex_unlock(&listener_access_lock);
1308 mutex_lock(&app_access_lock);
1309 __qseecom_reentrancy_check_if_no_app_blocked(
1310 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001311 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1312 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001313 mutex_unlock(&app_access_lock);
1314 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001315 if (ret) {
1316 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1317 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001318 if (ret == -EBUSY)
1319 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001320 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001321 }
1322
1323 if (resp.result != QSEOS_RESULT_SUCCESS) {
1324 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1325 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001326 ret = -EPERM;
1327 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001328 }
1329
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001330 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301331 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001332 atomic_read(&data->ioctl_count) <= 1)) {
1333 pr_err("Interrupted from abort\n");
1334 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001335 }
1336 }
1337
Zhen Kong3c674612018-09-06 22:51:27 -07001338exit:
1339 if (ptr_svc->sb_virt) {
1340 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001341 if (!IS_ERR_OR_NULL(ihandle)) {
1342 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1343 ion_free(qseecom.ion_clnt, ihandle);
1344 }
1345 }
Zhen Kong3c674612018-09-06 22:51:27 -07001346 list_del(&ptr_svc->list);
1347 kzfree(ptr_svc);
1348
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001349 data->released = true;
Zhen Kong52ce9062018-09-24 14:33:27 -07001350 pr_debug("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001351 return ret;
1352}
1353
Zhen Kongbcdeda22018-11-16 13:50:51 -08001354static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1355{
1356 struct qseecom_registered_listener_list *ptr_svc = NULL;
1357 struct qseecom_unregister_pending_list *entry = NULL;
1358
1359 ptr_svc = __qseecom_find_svc(data->listener.id);
1360 if (!ptr_svc) {
1361 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1362 return -ENODATA;
1363 }
1364 /* stop CA thread waiting for listener response */
1365 ptr_svc->abort = 1;
1366 wake_up_interruptible_all(&qseecom.send_resp_wq);
1367
Zhen Kongc4c162a2019-01-23 12:07:12 -08001368 /* stop listener thread waiting for listener request */
1369 data->abort = 1;
1370 wake_up_all(&ptr_svc->rcv_req_wq);
1371
Zhen Kongbcdeda22018-11-16 13:50:51 -08001372 /* return directly if pending*/
1373 if (ptr_svc->unregister_pending)
1374 return 0;
1375
1376 /*add unregistration into pending list*/
1377 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1378 if (!entry)
1379 return -ENOMEM;
1380 entry->data = data;
1381 list_add_tail(&entry->list,
1382 &qseecom.unregister_lsnr_pending_list_head);
1383 ptr_svc->unregister_pending = true;
1384 pr_debug("unregister %d pending\n", data->listener.id);
1385 return 0;
1386}
1387
1388static void __qseecom_processing_pending_lsnr_unregister(void)
1389{
1390 struct qseecom_unregister_pending_list *entry = NULL;
1391 struct qseecom_registered_listener_list *ptr_svc = NULL;
1392 struct list_head *pos;
1393 int ret = 0;
1394
1395 mutex_lock(&listener_access_lock);
1396 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1397 pos = qseecom.unregister_lsnr_pending_list_head.next;
1398 entry = list_entry(pos,
1399 struct qseecom_unregister_pending_list, list);
1400 if (entry && entry->data) {
1401 pr_debug("process pending unregister %d\n",
1402 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001403 /* don't process if qseecom_release is not called*/
1404 if (!entry->data->listener.release_called)
1405 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001406 ptr_svc = __qseecom_find_svc(
1407 entry->data->listener.id);
1408 if (ptr_svc) {
1409 ret = __qseecom_unregister_listener(
1410 entry->data, ptr_svc);
1411 if (ret == -EBUSY) {
1412 pr_debug("unregister %d pending again\n",
1413 entry->data->listener.id);
1414 mutex_unlock(&listener_access_lock);
1415 return;
1416 }
1417 } else
1418 pr_err("invalid listener %d\n",
1419 entry->data->listener.id);
1420 kzfree(entry->data);
1421 }
1422 list_del(pos);
1423 kzfree(entry);
1424 }
1425 mutex_unlock(&listener_access_lock);
1426 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1427}
1428
Zhen Kongc4c162a2019-01-23 12:07:12 -08001429static void __wakeup_unregister_listener_kthread(void)
1430{
1431 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1432 LSNR_UNREG_KT_WAKEUP);
1433 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1434}
1435
1436static int __qseecom_unregister_listener_kthread_func(void *data)
1437{
1438 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301439 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001440 qseecom.unregister_lsnr_kthread_wq,
1441 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1442 == LSNR_UNREG_KT_WAKEUP);
1443 pr_debug("kthread to unregister listener is called %d\n",
1444 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1445 __qseecom_processing_pending_lsnr_unregister();
1446 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1447 LSNR_UNREG_KT_SLEEP);
1448 }
1449 pr_warn("kthread to unregister listener stopped\n");
1450 return 0;
1451}
1452
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001453static int __qseecom_set_msm_bus_request(uint32_t mode)
1454{
1455 int ret = 0;
1456 struct qseecom_clk *qclk;
1457
1458 qclk = &qseecom.qsee;
1459 if (qclk->ce_core_src_clk != NULL) {
1460 if (mode == INACTIVE) {
1461 __qseecom_disable_clk(CLK_QSEE);
1462 } else {
1463 ret = __qseecom_enable_clk(CLK_QSEE);
1464 if (ret)
1465 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1466 ret, mode);
1467 }
1468 }
1469
1470 if ((!ret) && (qseecom.current_mode != mode)) {
1471 ret = msm_bus_scale_client_update_request(
1472 qseecom.qsee_perf_client, mode);
1473 if (ret) {
1474 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1475 ret, mode);
1476 if (qclk->ce_core_src_clk != NULL) {
1477 if (mode == INACTIVE) {
1478 ret = __qseecom_enable_clk(CLK_QSEE);
1479 if (ret)
1480 pr_err("CLK enable failed\n");
1481 } else
1482 __qseecom_disable_clk(CLK_QSEE);
1483 }
1484 }
1485 qseecom.current_mode = mode;
1486 }
1487 return ret;
1488}
1489
1490static void qseecom_bw_inactive_req_work(struct work_struct *work)
1491{
1492 mutex_lock(&app_access_lock);
1493 mutex_lock(&qsee_bw_mutex);
1494 if (qseecom.timer_running)
1495 __qseecom_set_msm_bus_request(INACTIVE);
1496 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1497 qseecom.current_mode, qseecom.cumulative_mode);
1498 qseecom.timer_running = false;
1499 mutex_unlock(&qsee_bw_mutex);
1500 mutex_unlock(&app_access_lock);
1501}
1502
1503static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1504{
1505 schedule_work(&qseecom.bw_inactive_req_ws);
1506}
1507
1508static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1509{
1510 struct qseecom_clk *qclk;
1511 int ret = 0;
1512
1513 mutex_lock(&clk_access_lock);
1514 if (ce == CLK_QSEE)
1515 qclk = &qseecom.qsee;
1516 else
1517 qclk = &qseecom.ce_drv;
1518
Zhen Kongf99808af2019-07-09 13:28:24 -07001519 if (qclk->clk_access_cnt > 0) {
1520 qclk->clk_access_cnt--;
1521 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001522 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1523 ret = -EINVAL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001524 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001525
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001526 mutex_unlock(&clk_access_lock);
1527 return ret;
1528}
1529
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001530static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1531{
1532 int32_t ret = 0;
1533 int32_t request_mode = INACTIVE;
1534
1535 mutex_lock(&qsee_bw_mutex);
1536 if (mode == 0) {
1537 if (qseecom.cumulative_mode > MEDIUM)
1538 request_mode = HIGH;
1539 else
1540 request_mode = qseecom.cumulative_mode;
1541 } else {
1542 request_mode = mode;
1543 }
1544
1545 ret = __qseecom_set_msm_bus_request(request_mode);
1546 if (ret) {
1547 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1548 ret, request_mode);
1549 goto err_scale_timer;
1550 }
1551
1552 if (qseecom.timer_running) {
1553 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1554 if (ret) {
1555 pr_err("Failed to decrease clk ref count.\n");
1556 goto err_scale_timer;
1557 }
1558 del_timer_sync(&(qseecom.bw_scale_down_timer));
1559 qseecom.timer_running = false;
1560 }
1561err_scale_timer:
1562 mutex_unlock(&qsee_bw_mutex);
1563 return ret;
1564}
1565
1566
1567static int qseecom_unregister_bus_bandwidth_needs(
1568 struct qseecom_dev_handle *data)
1569{
1570 int32_t ret = 0;
1571
1572 qseecom.cumulative_mode -= data->mode;
1573 data->mode = INACTIVE;
1574
1575 return ret;
1576}
1577
1578static int __qseecom_register_bus_bandwidth_needs(
1579 struct qseecom_dev_handle *data, uint32_t request_mode)
1580{
1581 int32_t ret = 0;
1582
1583 if (data->mode == INACTIVE) {
1584 qseecom.cumulative_mode += request_mode;
1585 data->mode = request_mode;
1586 } else {
1587 if (data->mode != request_mode) {
1588 qseecom.cumulative_mode -= data->mode;
1589 qseecom.cumulative_mode += request_mode;
1590 data->mode = request_mode;
1591 }
1592 }
1593 return ret;
1594}
1595
1596static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1597{
1598 int ret = 0;
1599
1600 ret = qsee_vote_for_clock(data, CLK_DFAB);
1601 if (ret) {
1602 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1603 goto perf_enable_exit;
1604 }
1605 ret = qsee_vote_for_clock(data, CLK_SFPB);
1606 if (ret) {
1607 qsee_disable_clock_vote(data, CLK_DFAB);
1608 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1609 goto perf_enable_exit;
1610 }
1611
1612perf_enable_exit:
1613 return ret;
1614}
1615
1616static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1617 void __user *argp)
1618{
1619 int32_t ret = 0;
1620 int32_t req_mode;
1621
1622 if (qseecom.no_clock_support)
1623 return 0;
1624
1625 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1626 if (ret) {
1627 pr_err("copy_from_user failed\n");
1628 return ret;
1629 }
1630 if (req_mode > HIGH) {
1631 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1632 return -EINVAL;
1633 }
1634
1635 /*
1636 * Register bus bandwidth needs if bus scaling feature is enabled;
1637 * otherwise, qseecom enable/disable clocks for the client directly.
1638 */
1639 if (qseecom.support_bus_scaling) {
1640 mutex_lock(&qsee_bw_mutex);
1641 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1642 mutex_unlock(&qsee_bw_mutex);
1643 } else {
1644 pr_debug("Bus scaling feature is NOT enabled\n");
1645 pr_debug("request bandwidth mode %d for the client\n",
1646 req_mode);
1647 if (req_mode != INACTIVE) {
1648 ret = qseecom_perf_enable(data);
1649 if (ret)
1650 pr_err("Failed to vote for clock with err %d\n",
1651 ret);
1652 } else {
1653 qsee_disable_clock_vote(data, CLK_DFAB);
1654 qsee_disable_clock_vote(data, CLK_SFPB);
1655 }
1656 }
1657 return ret;
1658}
1659
1660static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1661{
1662 if (qseecom.no_clock_support)
1663 return;
1664
1665 mutex_lock(&qsee_bw_mutex);
1666 qseecom.bw_scale_down_timer.expires = jiffies +
1667 msecs_to_jiffies(duration);
1668 mod_timer(&(qseecom.bw_scale_down_timer),
1669 qseecom.bw_scale_down_timer.expires);
1670 qseecom.timer_running = true;
1671 mutex_unlock(&qsee_bw_mutex);
1672}
1673
1674static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1675{
1676 if (!qseecom.support_bus_scaling)
1677 qsee_disable_clock_vote(data, CLK_SFPB);
1678 else
1679 __qseecom_add_bw_scale_down_timer(
1680 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1681}
1682
1683static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1684{
1685 int ret = 0;
1686
1687 if (qseecom.support_bus_scaling) {
1688 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1689 if (ret)
1690 pr_err("Failed to set bw MEDIUM.\n");
1691 } else {
1692 ret = qsee_vote_for_clock(data, CLK_SFPB);
1693 if (ret)
1694 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1695 }
1696 return ret;
1697}
1698
1699static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1700 void __user *argp)
1701{
1702 ion_phys_addr_t pa;
1703 int32_t ret;
1704 struct qseecom_set_sb_mem_param_req req;
1705 size_t len;
1706
1707 /* Copy the relevant information needed for loading the image */
1708 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1709 return -EFAULT;
1710
1711 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1712 (req.sb_len == 0)) {
1713 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1714 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1715 return -EFAULT;
1716 }
1717 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1718 req.sb_len))
1719 return -EFAULT;
1720
1721 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001722 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001723 req.ifd_data_fd);
1724 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1725 pr_err("Ion client could not retrieve the handle\n");
1726 return -ENOMEM;
1727 }
1728 /* Get the physical address of the ION BUF */
1729 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1730 if (ret) {
1731
1732 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1733 ret);
1734 return ret;
1735 }
1736
1737 if (len < req.sb_len) {
1738 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1739 req.sb_len, len);
1740 return -EINVAL;
1741 }
1742 /* Populate the structure for sending scm call to load image */
1743 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1744 data->client.ihandle);
1745 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1746 pr_err("ION memory mapping for client shared buf failed\n");
1747 return -ENOMEM;
1748 }
1749 data->client.sb_phys = (phys_addr_t)pa;
1750 data->client.sb_length = req.sb_len;
1751 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1752 return 0;
1753}
1754
Zhen Kong26e62742018-05-04 17:19:06 -07001755static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1756 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001757{
1758 int ret;
1759
1760 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001761 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001762}
1763
1764static int __qseecom_reentrancy_listener_has_sent_rsp(
1765 struct qseecom_dev_handle *data,
1766 struct qseecom_registered_listener_list *ptr_svc)
1767{
1768 int ret;
1769
1770 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001771 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001772}
1773
1774static void __qseecom_clean_listener_sglistinfo(
1775 struct qseecom_registered_listener_list *ptr_svc)
1776{
1777 if (ptr_svc->sglist_cnt) {
1778 memset(ptr_svc->sglistinfo_ptr, 0,
1779 SGLISTINFO_TABLE_SIZE);
1780 ptr_svc->sglist_cnt = 0;
1781 }
1782}
1783
1784static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1785 struct qseecom_command_scm_resp *resp)
1786{
1787 int ret = 0;
1788 int rc = 0;
1789 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001790 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1791 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1792 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001793 struct qseecom_registered_listener_list *ptr_svc = NULL;
1794 sigset_t new_sigset;
1795 sigset_t old_sigset;
1796 uint32_t status;
1797 void *cmd_buf = NULL;
1798 size_t cmd_len;
1799 struct sglist_info *table = NULL;
1800
Zhen Kongbcdeda22018-11-16 13:50:51 -08001801 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001802 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1803 lstnr = resp->data;
1804 /*
1805 * Wake up blocking lsitener service with the lstnr id
1806 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001807 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001808 list_for_each_entry(ptr_svc,
1809 &qseecom.registered_listener_list_head, list) {
1810 if (ptr_svc->svc.listener_id == lstnr) {
1811 ptr_svc->listener_in_use = true;
1812 ptr_svc->rcv_req_flag = 1;
1813 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1814 break;
1815 }
1816 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001817
1818 if (ptr_svc == NULL) {
1819 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001820 rc = -EINVAL;
1821 status = QSEOS_RESULT_FAILURE;
1822 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001823 }
1824
1825 if (!ptr_svc->ihandle) {
1826 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001827 rc = -EINVAL;
1828 status = QSEOS_RESULT_FAILURE;
1829 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 }
1831
1832 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001833 pr_err("Service %d does not exist\n",
1834 lstnr);
1835 rc = -ERESTARTSYS;
1836 ptr_svc = NULL;
1837 status = QSEOS_RESULT_FAILURE;
1838 goto err_resp;
1839 }
1840
1841 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001842 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001843 lstnr, ptr_svc->abort);
1844 rc = -ENODEV;
1845 status = QSEOS_RESULT_FAILURE;
1846 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001847 }
Zhen Kong25731112018-09-20 13:10:03 -07001848
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001849 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1850
1851 /* initialize the new signal mask with all signals*/
1852 sigfillset(&new_sigset);
1853 /* block all signals */
1854 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1855
Zhen Kongbcdeda22018-11-16 13:50:51 -08001856 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001857 do {
1858 /*
1859 * When reentrancy is not supported, check global
1860 * send_resp_flag; otherwise, check this listener's
1861 * send_resp_flag.
1862 */
1863 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301864 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001865 __qseecom_listener_has_sent_rsp(
1866 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001867 break;
1868 }
1869
1870 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301871 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001872 __qseecom_reentrancy_listener_has_sent_rsp(
1873 data, ptr_svc))) {
1874 break;
1875 }
1876 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001877 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001878 /* restore signal mask */
1879 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001880 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001881 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1882 data->client.app_id, lstnr, ret);
1883 rc = -ENODEV;
1884 status = QSEOS_RESULT_FAILURE;
1885 } else {
1886 status = QSEOS_RESULT_SUCCESS;
1887 }
Zhen Kong26e62742018-05-04 17:19:06 -07001888err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001889 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001890 if (ptr_svc) {
1891 ptr_svc->send_resp_flag = 0;
1892 table = ptr_svc->sglistinfo_ptr;
1893 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001894 if (qseecom.qsee_version < QSEE_VERSION_40) {
1895 send_data_rsp.listener_id = lstnr;
1896 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001897 if (table) {
1898 send_data_rsp.sglistinfo_ptr =
1899 (uint32_t)virt_to_phys(table);
1900 send_data_rsp.sglistinfo_len =
1901 SGLISTINFO_TABLE_SIZE;
1902 dmac_flush_range((void *)table,
1903 (void *)table + SGLISTINFO_TABLE_SIZE);
1904 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001905 cmd_buf = (void *)&send_data_rsp;
1906 cmd_len = sizeof(send_data_rsp);
1907 } else {
1908 send_data_rsp_64bit.listener_id = lstnr;
1909 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001910 if (table) {
1911 send_data_rsp_64bit.sglistinfo_ptr =
1912 virt_to_phys(table);
1913 send_data_rsp_64bit.sglistinfo_len =
1914 SGLISTINFO_TABLE_SIZE;
1915 dmac_flush_range((void *)table,
1916 (void *)table + SGLISTINFO_TABLE_SIZE);
1917 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001918 cmd_buf = (void *)&send_data_rsp_64bit;
1919 cmd_len = sizeof(send_data_rsp_64bit);
1920 }
Zhen Kong7d500032018-08-06 16:58:31 -07001921 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001922 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1923 else
1924 *(uint32_t *)cmd_buf =
1925 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001926 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001927 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1928 ptr_svc->ihandle,
1929 ptr_svc->sb_virt, ptr_svc->sb_length,
1930 ION_IOC_CLEAN_INV_CACHES);
1931 if (ret) {
1932 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001933 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001934 }
1935 }
1936
1937 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1938 ret = __qseecom_enable_clk(CLK_QSEE);
1939 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001940 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001941 }
1942
1943 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1944 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001945 if (ptr_svc) {
1946 ptr_svc->listener_in_use = false;
1947 __qseecom_clean_listener_sglistinfo(ptr_svc);
1948 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001949 if (ret) {
1950 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1951 ret, data->client.app_id);
1952 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1953 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001955 }
Zhen Kong26e62742018-05-04 17:19:06 -07001956 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1957 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001958 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1959 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1960 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1961 resp->result, data->client.app_id, lstnr);
1962 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001963 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001964 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001965exit:
1966 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001967 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1968 __qseecom_disable_clk(CLK_QSEE);
1969
1970 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001971 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001972 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001973 if (rc)
1974 return rc;
1975
1976 return ret;
1977}
1978
Zhen Konga91aaf02018-02-02 17:21:04 -08001979static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001980 struct qseecom_command_scm_resp *resp,
1981 struct qseecom_registered_app_list *ptr_app,
1982 struct qseecom_dev_handle *data)
1983{
1984 struct qseecom_registered_listener_list *list_ptr;
1985 int ret = 0;
1986 struct qseecom_continue_blocked_request_ireq ireq;
1987 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001988 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001989 sigset_t new_sigset;
1990 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001991 unsigned long flags;
1992 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07001993 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001994
1995 if (!resp || !data) {
1996 pr_err("invalid resp or data pointer\n");
1997 ret = -EINVAL;
1998 goto exit;
1999 }
2000
2001 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07002002 if (!ptr_app) {
2003 if (data->client.from_smcinvoke) {
2004 pr_debug("This request is from smcinvoke\n");
2005 ptr_app = &dummy_app_entry;
2006 ptr_app->app_id = data->client.app_id;
2007 } else {
2008 spin_lock_irqsave(&qseecom.registered_app_list_lock,
2009 flags);
2010 list_for_each_entry(ptr_app,
2011 &qseecom.registered_app_list_head, list) {
2012 if ((ptr_app->app_id == data->client.app_id) &&
2013 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002014 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07002015 found_app = true;
2016 break;
2017 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002018 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002019 spin_unlock_irqrestore(
2020 &qseecom.registered_app_list_lock, flags);
2021 if (!found_app) {
2022 pr_err("app_id %d (%s) is not found\n",
2023 data->client.app_id,
2024 (char *)data->client.app_name);
2025 ret = -ENOENT;
2026 goto exit;
2027 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002028 }
2029 }
2030
Zhen Kongd8cc0052017-11-13 15:13:31 -08002031 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002032 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002033 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002034 list_ptr = __qseecom_find_svc(resp->data);
2035 if (!list_ptr) {
2036 pr_err("Invalid listener ID %d\n", resp->data);
2037 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002038 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002039 goto exit;
2040 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002041 ptr_app->blocked_on_listener_id = resp->data;
2042
2043 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2044 resp->data, list_ptr->listener_in_use,
2045 session_id, data->client.app_id);
2046
2047 /* sleep until listener is available */
2048 sigfillset(&new_sigset);
2049 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2050
2051 do {
2052 qseecom.app_block_ref_cnt++;
2053 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002054 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002055 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302056 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002057 list_ptr->listener_block_app_wq,
2058 !list_ptr->listener_in_use);
2059 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002060 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002061 ptr_app->app_blocked = false;
2062 qseecom.app_block_ref_cnt--;
2063 } while (list_ptr->listener_in_use);
2064
2065 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2066
2067 ptr_app->blocked_on_listener_id = 0;
2068 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2069 resp->data, session_id, data->client.app_id);
2070
2071 /* notify TZ that listener is available */
2072 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2073
2074 if (qseecom.smcinvoke_support)
2075 ireq.app_or_session_id = session_id;
2076 else
2077 ireq.app_or_session_id = data->client.app_id;
2078
2079 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2080 &ireq, sizeof(ireq),
2081 &continue_resp, sizeof(continue_resp));
2082 if (ret && qseecom.smcinvoke_support) {
2083 /* retry with legacy cmd */
2084 qseecom.smcinvoke_support = false;
2085 ireq.app_or_session_id = data->client.app_id;
2086 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2087 &ireq, sizeof(ireq),
2088 &continue_resp, sizeof(continue_resp));
2089 qseecom.smcinvoke_support = true;
2090 if (ret) {
2091 pr_err("unblock app %d or session %d fail\n",
2092 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002093 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002094 goto exit;
2095 }
2096 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002097 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002098 resp->result = continue_resp.result;
2099 resp->resp_type = continue_resp.resp_type;
2100 resp->data = continue_resp.data;
2101 pr_debug("unblock resp = %d\n", resp->result);
2102 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2103
2104 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2105 pr_err("Unexpected unblock resp %d\n", resp->result);
2106 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002107 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002108exit:
2109 return ret;
2110}
2111
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002112static int __qseecom_reentrancy_process_incomplete_cmd(
2113 struct qseecom_dev_handle *data,
2114 struct qseecom_command_scm_resp *resp)
2115{
2116 int ret = 0;
2117 int rc = 0;
2118 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002119 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2120 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2121 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002122 struct qseecom_registered_listener_list *ptr_svc = NULL;
2123 sigset_t new_sigset;
2124 sigset_t old_sigset;
2125 uint32_t status;
2126 void *cmd_buf = NULL;
2127 size_t cmd_len;
2128 struct sglist_info *table = NULL;
2129
Zhen Kong26e62742018-05-04 17:19:06 -07002130 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002131 lstnr = resp->data;
2132 /*
2133 * Wake up blocking lsitener service with the lstnr id
2134 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002135 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002136 list_for_each_entry(ptr_svc,
2137 &qseecom.registered_listener_list_head, list) {
2138 if (ptr_svc->svc.listener_id == lstnr) {
2139 ptr_svc->listener_in_use = true;
2140 ptr_svc->rcv_req_flag = 1;
2141 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2142 break;
2143 }
2144 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002145
2146 if (ptr_svc == NULL) {
2147 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002148 rc = -EINVAL;
2149 status = QSEOS_RESULT_FAILURE;
2150 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002151 }
2152
2153 if (!ptr_svc->ihandle) {
2154 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002155 rc = -EINVAL;
2156 status = QSEOS_RESULT_FAILURE;
2157 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002158 }
2159
2160 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002161 pr_err("Service %d does not exist\n",
2162 lstnr);
2163 rc = -ERESTARTSYS;
2164 ptr_svc = NULL;
2165 status = QSEOS_RESULT_FAILURE;
2166 goto err_resp;
2167 }
2168
2169 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002170 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002171 lstnr, ptr_svc->abort);
2172 rc = -ENODEV;
2173 status = QSEOS_RESULT_FAILURE;
2174 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002175 }
Zhen Kong25731112018-09-20 13:10:03 -07002176
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002177 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2178
2179 /* initialize the new signal mask with all signals*/
2180 sigfillset(&new_sigset);
2181
2182 /* block all signals */
2183 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2184
2185 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002186 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002187 mutex_unlock(&app_access_lock);
2188 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302189 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002190 __qseecom_reentrancy_listener_has_sent_rsp(
2191 data, ptr_svc))) {
2192 break;
2193 }
2194 } while (1);
2195 /* lock mutex again after resp sent */
2196 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002197 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002198 ptr_svc->send_resp_flag = 0;
2199 qseecom.send_resp_flag = 0;
2200
2201 /* restore signal mask */
2202 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002203 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002204 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2205 data->client.app_id, lstnr, ret);
2206 rc = -ENODEV;
2207 status = QSEOS_RESULT_FAILURE;
2208 } else {
2209 status = QSEOS_RESULT_SUCCESS;
2210 }
Zhen Kong26e62742018-05-04 17:19:06 -07002211err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002212 if (ptr_svc)
2213 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002214 if (qseecom.qsee_version < QSEE_VERSION_40) {
2215 send_data_rsp.listener_id = lstnr;
2216 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002217 if (table) {
2218 send_data_rsp.sglistinfo_ptr =
2219 (uint32_t)virt_to_phys(table);
2220 send_data_rsp.sglistinfo_len =
2221 SGLISTINFO_TABLE_SIZE;
2222 dmac_flush_range((void *)table,
2223 (void *)table + SGLISTINFO_TABLE_SIZE);
2224 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002225 cmd_buf = (void *)&send_data_rsp;
2226 cmd_len = sizeof(send_data_rsp);
2227 } else {
2228 send_data_rsp_64bit.listener_id = lstnr;
2229 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002230 if (table) {
2231 send_data_rsp_64bit.sglistinfo_ptr =
2232 virt_to_phys(table);
2233 send_data_rsp_64bit.sglistinfo_len =
2234 SGLISTINFO_TABLE_SIZE;
2235 dmac_flush_range((void *)table,
2236 (void *)table + SGLISTINFO_TABLE_SIZE);
2237 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002238 cmd_buf = (void *)&send_data_rsp_64bit;
2239 cmd_len = sizeof(send_data_rsp_64bit);
2240 }
Zhen Kong7d500032018-08-06 16:58:31 -07002241 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002242 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2243 else
2244 *(uint32_t *)cmd_buf =
2245 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002246 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002247 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2248 ptr_svc->ihandle,
2249 ptr_svc->sb_virt, ptr_svc->sb_length,
2250 ION_IOC_CLEAN_INV_CACHES);
2251 if (ret) {
2252 pr_err("cache operation failed %d\n", ret);
2253 return ret;
2254 }
2255 }
2256 if (lstnr == RPMB_SERVICE) {
2257 ret = __qseecom_enable_clk(CLK_QSEE);
2258 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002259 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002260 }
2261
2262 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2263 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002264 if (ptr_svc) {
2265 ptr_svc->listener_in_use = false;
2266 __qseecom_clean_listener_sglistinfo(ptr_svc);
2267 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2268 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002269
2270 if (ret) {
2271 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2272 ret, data->client.app_id);
2273 goto exit;
2274 }
2275
2276 switch (resp->result) {
2277 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2278 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2279 lstnr, data->client.app_id, resp->data);
2280 if (lstnr == resp->data) {
2281 pr_err("lstnr %d should not be blocked!\n",
2282 lstnr);
2283 ret = -EINVAL;
2284 goto exit;
2285 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002286 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002287 ret = __qseecom_process_reentrancy_blocked_on_listener(
2288 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002289 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002290 if (ret) {
2291 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2292 data->client.app_id,
2293 data->client.app_name, resp->data);
2294 goto exit;
2295 }
2296 case QSEOS_RESULT_SUCCESS:
2297 case QSEOS_RESULT_INCOMPLETE:
2298 break;
2299 default:
2300 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2301 resp->result, data->client.app_id, lstnr);
2302 ret = -EINVAL;
2303 goto exit;
2304 }
2305exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002306 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002307 if (lstnr == RPMB_SERVICE)
2308 __qseecom_disable_clk(CLK_QSEE);
2309
2310 }
2311 if (rc)
2312 return rc;
2313
2314 return ret;
2315}
2316
2317/*
2318 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2319 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2320 * So, needs to first check if no app blocked before sending OS level scm call,
2321 * then wait until all apps are unblocked.
2322 */
2323static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2324{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002325 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2326 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2327 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2328 /* thread sleep until this app unblocked */
2329 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002330 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002331 wait_event_interruptible(qseecom.app_block_wq,
2332 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002333 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002334 }
2335 }
2336}
2337
2338/*
2339 * scm_call of send data will fail if this TA is blocked or there are more
2340 * than one TA requesting listener services; So, first check to see if need
2341 * to wait.
2342 */
2343static void __qseecom_reentrancy_check_if_this_app_blocked(
2344 struct qseecom_registered_app_list *ptr_app)
2345{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002346 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002347 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002348 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2349 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002350 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002351 wait_event_interruptible(qseecom.app_block_wq,
2352 (!ptr_app->app_blocked &&
2353 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002354 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002355 }
Zhen Kongdea10592018-07-30 17:50:10 -07002356 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002357 }
2358}
2359
2360static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2361 uint32_t *app_id)
2362{
2363 int32_t ret;
2364 struct qseecom_command_scm_resp resp;
2365 bool found_app = false;
2366 struct qseecom_registered_app_list *entry = NULL;
2367 unsigned long flags = 0;
2368
2369 if (!app_id) {
2370 pr_err("Null pointer to app_id\n");
2371 return -EINVAL;
2372 }
2373 *app_id = 0;
2374
2375 /* check if app exists and has been registered locally */
2376 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2377 list_for_each_entry(entry,
2378 &qseecom.registered_app_list_head, list) {
2379 if (!strcmp(entry->app_name, req.app_name)) {
2380 found_app = true;
2381 break;
2382 }
2383 }
2384 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2385 if (found_app) {
2386 pr_debug("Found app with id %d\n", entry->app_id);
2387 *app_id = entry->app_id;
2388 return 0;
2389 }
2390
2391 memset((void *)&resp, 0, sizeof(resp));
2392
2393 /* SCM_CALL to check if app_id for the mentioned app exists */
2394 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2395 sizeof(struct qseecom_check_app_ireq),
2396 &resp, sizeof(resp));
2397 if (ret) {
2398 pr_err("scm_call to check if app is already loaded failed\n");
2399 return -EINVAL;
2400 }
2401
2402 if (resp.result == QSEOS_RESULT_FAILURE)
2403 return 0;
2404
2405 switch (resp.resp_type) {
2406 /*qsee returned listener type response */
2407 case QSEOS_LISTENER_ID:
2408 pr_err("resp type is of listener type instead of app");
2409 return -EINVAL;
2410 case QSEOS_APP_ID:
2411 *app_id = resp.data;
2412 return 0;
2413 default:
2414 pr_err("invalid resp type (%d) from qsee",
2415 resp.resp_type);
2416 return -ENODEV;
2417 }
2418}
2419
2420static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2421{
2422 struct qseecom_registered_app_list *entry = NULL;
2423 unsigned long flags = 0;
2424 u32 app_id = 0;
2425 struct ion_handle *ihandle; /* Ion handle */
2426 struct qseecom_load_img_req load_img_req;
2427 int32_t ret = 0;
2428 ion_phys_addr_t pa = 0;
2429 size_t len;
2430 struct qseecom_command_scm_resp resp;
2431 struct qseecom_check_app_ireq req;
2432 struct qseecom_load_app_ireq load_req;
2433 struct qseecom_load_app_64bit_ireq load_req_64bit;
2434 void *cmd_buf = NULL;
2435 size_t cmd_len;
2436 bool first_time = false;
2437
2438 /* Copy the relevant information needed for loading the image */
2439 if (copy_from_user(&load_img_req,
2440 (void __user *)argp,
2441 sizeof(struct qseecom_load_img_req))) {
2442 pr_err("copy_from_user failed\n");
2443 return -EFAULT;
2444 }
2445
2446 /* Check and load cmnlib */
2447 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2448 if (!qseecom.commonlib_loaded &&
2449 load_img_req.app_arch == ELFCLASS32) {
2450 ret = qseecom_load_commonlib_image(data, "cmnlib");
2451 if (ret) {
2452 pr_err("failed to load cmnlib\n");
2453 return -EIO;
2454 }
2455 qseecom.commonlib_loaded = true;
2456 pr_debug("cmnlib is loaded\n");
2457 }
2458
2459 if (!qseecom.commonlib64_loaded &&
2460 load_img_req.app_arch == ELFCLASS64) {
2461 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2462 if (ret) {
2463 pr_err("failed to load cmnlib64\n");
2464 return -EIO;
2465 }
2466 qseecom.commonlib64_loaded = true;
2467 pr_debug("cmnlib64 is loaded\n");
2468 }
2469 }
2470
2471 if (qseecom.support_bus_scaling) {
2472 mutex_lock(&qsee_bw_mutex);
2473 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2474 mutex_unlock(&qsee_bw_mutex);
2475 if (ret)
2476 return ret;
2477 }
2478
2479 /* Vote for the SFPB clock */
2480 ret = __qseecom_enable_clk_scale_up(data);
2481 if (ret)
2482 goto enable_clk_err;
2483
2484 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2485 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2486 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2487
2488 ret = __qseecom_check_app_exists(req, &app_id);
2489 if (ret < 0)
2490 goto loadapp_err;
2491
2492 if (app_id) {
2493 pr_debug("App id %d (%s) already exists\n", app_id,
2494 (char *)(req.app_name));
2495 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2496 list_for_each_entry(entry,
2497 &qseecom.registered_app_list_head, list){
2498 if (entry->app_id == app_id) {
2499 entry->ref_cnt++;
2500 break;
2501 }
2502 }
2503 spin_unlock_irqrestore(
2504 &qseecom.registered_app_list_lock, flags);
2505 ret = 0;
2506 } else {
2507 first_time = true;
2508 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2509 (char *)(load_img_req.img_name));
2510 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002511 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002512 load_img_req.ifd_data_fd);
2513 if (IS_ERR_OR_NULL(ihandle)) {
2514 pr_err("Ion client could not retrieve the handle\n");
2515 ret = -ENOMEM;
2516 goto loadapp_err;
2517 }
2518
2519 /* Get the physical address of the ION BUF */
2520 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2521 if (ret) {
2522 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2523 ret);
2524 goto loadapp_err;
2525 }
2526 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2527 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2528 len, load_img_req.mdt_len,
2529 load_img_req.img_len);
2530 ret = -EINVAL;
2531 goto loadapp_err;
2532 }
2533 /* Populate the structure for sending scm call to load image */
2534 if (qseecom.qsee_version < QSEE_VERSION_40) {
2535 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2536 load_req.mdt_len = load_img_req.mdt_len;
2537 load_req.img_len = load_img_req.img_len;
2538 strlcpy(load_req.app_name, load_img_req.img_name,
2539 MAX_APP_NAME_SIZE);
2540 load_req.phy_addr = (uint32_t)pa;
2541 cmd_buf = (void *)&load_req;
2542 cmd_len = sizeof(struct qseecom_load_app_ireq);
2543 } else {
2544 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2545 load_req_64bit.mdt_len = load_img_req.mdt_len;
2546 load_req_64bit.img_len = load_img_req.img_len;
2547 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2548 MAX_APP_NAME_SIZE);
2549 load_req_64bit.phy_addr = (uint64_t)pa;
2550 cmd_buf = (void *)&load_req_64bit;
2551 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2552 }
2553
2554 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2555 ION_IOC_CLEAN_INV_CACHES);
2556 if (ret) {
2557 pr_err("cache operation failed %d\n", ret);
2558 goto loadapp_err;
2559 }
2560
2561 /* SCM_CALL to load the app and get the app_id back */
2562 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2563 cmd_len, &resp, sizeof(resp));
2564 if (ret) {
2565 pr_err("scm_call to load app failed\n");
2566 if (!IS_ERR_OR_NULL(ihandle))
2567 ion_free(qseecom.ion_clnt, ihandle);
2568 ret = -EINVAL;
2569 goto loadapp_err;
2570 }
2571
2572 if (resp.result == QSEOS_RESULT_FAILURE) {
2573 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2574 if (!IS_ERR_OR_NULL(ihandle))
2575 ion_free(qseecom.ion_clnt, ihandle);
2576 ret = -EFAULT;
2577 goto loadapp_err;
2578 }
2579
2580 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2581 ret = __qseecom_process_incomplete_cmd(data, &resp);
2582 if (ret) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002583 /* TZ has created app_id, need to unload it */
2584 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
2585 ret, resp.result, resp.data,
2586 load_img_req.img_name);
2587 __qseecom_unload_app(data, resp.data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002588 if (!IS_ERR_OR_NULL(ihandle))
2589 ion_free(qseecom.ion_clnt, ihandle);
2590 ret = -EFAULT;
2591 goto loadapp_err;
2592 }
2593 }
2594
2595 if (resp.result != QSEOS_RESULT_SUCCESS) {
2596 pr_err("scm_call failed resp.result unknown, %d\n",
2597 resp.result);
2598 if (!IS_ERR_OR_NULL(ihandle))
2599 ion_free(qseecom.ion_clnt, ihandle);
2600 ret = -EFAULT;
2601 goto loadapp_err;
2602 }
2603
2604 app_id = resp.data;
2605
2606 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2607 if (!entry) {
2608 ret = -ENOMEM;
2609 goto loadapp_err;
2610 }
2611 entry->app_id = app_id;
2612 entry->ref_cnt = 1;
2613 entry->app_arch = load_img_req.app_arch;
2614 /*
2615 * keymaster app may be first loaded as "keymaste" by qseecomd,
2616 * and then used as "keymaster" on some targets. To avoid app
2617 * name checking error, register "keymaster" into app_list and
2618 * thread private data.
2619 */
2620 if (!strcmp(load_img_req.img_name, "keymaste"))
2621 strlcpy(entry->app_name, "keymaster",
2622 MAX_APP_NAME_SIZE);
2623 else
2624 strlcpy(entry->app_name, load_img_req.img_name,
2625 MAX_APP_NAME_SIZE);
2626 entry->app_blocked = false;
2627 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002628 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002629
2630 /* Deallocate the handle */
2631 if (!IS_ERR_OR_NULL(ihandle))
2632 ion_free(qseecom.ion_clnt, ihandle);
2633
2634 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2635 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2636 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2637 flags);
2638
2639 pr_warn("App with id %u (%s) now loaded\n", app_id,
2640 (char *)(load_img_req.img_name));
2641 }
2642 data->client.app_id = app_id;
2643 data->client.app_arch = load_img_req.app_arch;
2644 if (!strcmp(load_img_req.img_name, "keymaste"))
2645 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2646 else
2647 strlcpy(data->client.app_name, load_img_req.img_name,
2648 MAX_APP_NAME_SIZE);
2649 load_img_req.app_id = app_id;
2650 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2651 pr_err("copy_to_user failed\n");
2652 ret = -EFAULT;
2653 if (first_time == true) {
2654 spin_lock_irqsave(
2655 &qseecom.registered_app_list_lock, flags);
2656 list_del(&entry->list);
2657 spin_unlock_irqrestore(
2658 &qseecom.registered_app_list_lock, flags);
2659 kzfree(entry);
2660 }
2661 }
2662
2663loadapp_err:
2664 __qseecom_disable_clk_scale_down(data);
2665enable_clk_err:
2666 if (qseecom.support_bus_scaling) {
2667 mutex_lock(&qsee_bw_mutex);
2668 qseecom_unregister_bus_bandwidth_needs(data);
2669 mutex_unlock(&qsee_bw_mutex);
2670 }
2671 return ret;
2672}
2673
2674static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2675{
2676 int ret = 1; /* Set unload app */
2677
2678 wake_up_all(&qseecom.send_resp_wq);
2679 if (qseecom.qsee_reentrancy_support)
2680 mutex_unlock(&app_access_lock);
2681 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302682 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002683 atomic_read(&data->ioctl_count) <= 1)) {
2684 pr_err("Interrupted from abort\n");
2685 ret = -ERESTARTSYS;
2686 break;
2687 }
2688 }
2689 if (qseecom.qsee_reentrancy_support)
2690 mutex_lock(&app_access_lock);
2691 return ret;
2692}
2693
2694static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2695{
2696 int ret = 0;
2697
2698 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2699 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2700 ion_free(qseecom.ion_clnt, data->client.ihandle);
jitendrathakarec7ff9e42019-09-12 19:46:48 +05302701 memset((void *)&data->client,
2702 0, sizeof(struct qseecom_client_handle));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002703 }
2704 return ret;
2705}
2706
Zhen Kong03b2eae2019-09-17 16:58:46 -07002707static int __qseecom_unload_app(struct qseecom_dev_handle *data,
2708 uint32_t app_id)
2709{
2710 struct qseecom_unload_app_ireq req;
2711 struct qseecom_command_scm_resp resp;
2712 int ret = 0;
2713
2714 /* Populate the structure for sending scm call to load image */
2715 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2716 req.app_id = app_id;
2717
2718 /* SCM_CALL to unload the app */
2719 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2720 sizeof(struct qseecom_unload_app_ireq),
2721 &resp, sizeof(resp));
2722 if (ret) {
2723 pr_err("scm_call to unload app (id = %d) failed\n", app_id);
2724 return -EFAULT;
2725 }
2726 switch (resp.result) {
2727 case QSEOS_RESULT_SUCCESS:
2728 pr_warn("App (%d) is unloaded\n", app_id);
2729 break;
2730 case QSEOS_RESULT_INCOMPLETE:
2731 ret = __qseecom_process_incomplete_cmd(data, &resp);
2732 if (ret)
2733 pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
2734 app_id, ret, resp.result, resp.data);
2735 else
2736 pr_warn("App (%d) is unloaded\n", app_id);
2737 break;
2738 case QSEOS_RESULT_FAILURE:
2739 pr_err("app (%d) unload_failed!!\n", app_id);
2740 ret = -EFAULT;
2741 break;
2742 default:
2743 pr_err("unload app %d get unknown resp.result %d\n",
2744 app_id, resp.result);
2745 ret = -EFAULT;
2746 break;
2747 }
2748 return ret;
2749}
2750
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002751static int qseecom_unload_app(struct qseecom_dev_handle *data,
2752 bool app_crash)
2753{
2754 unsigned long flags;
2755 unsigned long flags1;
2756 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002757 struct qseecom_registered_app_list *ptr_app = NULL;
2758 bool unload = false;
2759 bool found_app = false;
2760 bool found_dead_app = false;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002761 bool doublecheck = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002762
2763 if (!data) {
2764 pr_err("Invalid/uninitialized device handle\n");
2765 return -EINVAL;
2766 }
2767
2768 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2769 pr_debug("Do not unload keymaster app from tz\n");
2770 goto unload_exit;
2771 }
2772
2773 __qseecom_cleanup_app(data);
2774 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2775
2776 if (data->client.app_id > 0) {
2777 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2778 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2779 list) {
2780 if (ptr_app->app_id == data->client.app_id) {
2781 if (!strcmp((void *)ptr_app->app_name,
2782 (void *)data->client.app_name)) {
2783 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002784 if (ptr_app->app_blocked ||
2785 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002786 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002787 if (app_crash || ptr_app->ref_cnt == 1)
2788 unload = true;
2789 break;
2790 }
2791 found_dead_app = true;
2792 break;
2793 }
2794 }
2795 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2796 flags);
2797 if (found_app == false && found_dead_app == false) {
2798 pr_err("Cannot find app with id = %d (%s)\n",
2799 data->client.app_id,
2800 (char *)data->client.app_name);
2801 ret = -EINVAL;
2802 goto unload_exit;
2803 }
2804 }
2805
2806 if (found_dead_app)
2807 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2808 (char *)data->client.app_name);
2809
2810 if (unload) {
Zhen Kong03b2eae2019-09-17 16:58:46 -07002811 ret = __qseecom_unload_app(data, data->client.app_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002812
Zhen Kongf818f152019-03-13 12:31:32 -07002813 /* double check if this app_entry still exists */
Zhen Kongf818f152019-03-13 12:31:32 -07002814 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2815 list_for_each_entry(ptr_app,
2816 &qseecom.registered_app_list_head, list) {
2817 if ((ptr_app->app_id == data->client.app_id) &&
2818 (!strcmp((void *)ptr_app->app_name,
2819 (void *)data->client.app_name))) {
2820 doublecheck = true;
2821 break;
2822 }
2823 }
2824 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2825 flags1);
2826 if (!doublecheck) {
2827 pr_warn("app %d(%s) entry is already removed\n",
2828 data->client.app_id,
2829 (char *)data->client.app_name);
2830 found_app = false;
2831 }
2832 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07002833
Zhen Kong7d500032018-08-06 16:58:31 -07002834unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002835 if (found_app) {
2836 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2837 if (app_crash) {
2838 ptr_app->ref_cnt = 0;
2839 pr_debug("app_crash: ref_count = 0\n");
2840 } else {
2841 if (ptr_app->ref_cnt == 1) {
2842 ptr_app->ref_cnt = 0;
2843 pr_debug("ref_count set to 0\n");
2844 } else {
2845 ptr_app->ref_cnt--;
2846 pr_debug("Can't unload app(%d) inuse\n",
2847 ptr_app->app_id);
2848 }
2849 }
2850 if (unload) {
2851 list_del(&ptr_app->list);
2852 kzfree(ptr_app);
2853 }
2854 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2855 flags1);
2856 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002857 qseecom_unmap_ion_allocated_memory(data);
2858 data->released = true;
2859 return ret;
2860}
2861
Zhen Kong03b2eae2019-09-17 16:58:46 -07002862
2863static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
2864{
2865 struct qseecom_unload_app_pending_list *entry = NULL;
2866
2867 pr_debug("prepare to unload app(%d)(%s), pending %d\n",
2868 data->client.app_id, data->client.app_name,
2869 data->client.unload_pending);
2870 if (data->client.unload_pending)
2871 return 0;
2872 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2873 if (!entry)
2874 return -ENOMEM;
2875 entry->data = data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07002876 list_add_tail(&entry->list,
2877 &qseecom.unload_app_pending_list_head);
Zhen Kong03b2eae2019-09-17 16:58:46 -07002878 data->client.unload_pending = true;
2879 pr_debug("unload ta %d pending\n", data->client.app_id);
2880 return 0;
2881}
2882
2883static void __wakeup_unload_app_kthread(void)
2884{
2885 atomic_set(&qseecom.unload_app_kthread_state,
2886 UNLOAD_APP_KT_WAKEUP);
2887 wake_up_interruptible(&qseecom.unload_app_kthread_wq);
2888}
2889
2890static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
2891{
2892 struct qseecom_unload_app_pending_list *entry = NULL;
2893 bool found = false;
2894
2895 mutex_lock(&unload_app_pending_list_lock);
2896 list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
2897 list) {
2898 if ((entry->data->client.app_id == app_id) &&
2899 (!strcmp(entry->data->client.app_name, app_name))) {
2900 found = true;
2901 break;
2902 }
2903 }
2904 mutex_unlock(&unload_app_pending_list_lock);
2905 return found;
2906}
2907
2908static void __qseecom_processing_pending_unload_app(void)
2909{
2910 struct qseecom_unload_app_pending_list *entry = NULL;
2911 struct list_head *pos;
2912 int ret = 0;
2913
2914 mutex_lock(&unload_app_pending_list_lock);
2915 while (!list_empty(&qseecom.unload_app_pending_list_head)) {
2916 pos = qseecom.unload_app_pending_list_head.next;
2917 entry = list_entry(pos,
2918 struct qseecom_unload_app_pending_list, list);
2919 if (entry && entry->data) {
2920 pr_debug("process pending unload app %d (%s)\n",
2921 entry->data->client.app_id,
2922 entry->data->client.app_name);
2923 mutex_unlock(&unload_app_pending_list_lock);
2924 mutex_lock(&app_access_lock);
2925 ret = qseecom_unload_app(entry->data, true);
2926 if (ret)
2927 pr_err("unload app %d pending failed %d\n",
2928 entry->data->client.app_id, ret);
2929 mutex_unlock(&app_access_lock);
2930 mutex_lock(&unload_app_pending_list_lock);
2931 kzfree(entry->data);
2932 }
2933 list_del(pos);
2934 kzfree(entry);
2935 }
2936 mutex_unlock(&unload_app_pending_list_lock);
2937}
2938
2939static int __qseecom_unload_app_kthread_func(void *data)
2940{
2941 while (!kthread_should_stop()) {
2942 wait_event_interruptible(
2943 qseecom.unload_app_kthread_wq,
2944 atomic_read(&qseecom.unload_app_kthread_state)
2945 == UNLOAD_APP_KT_WAKEUP);
2946 pr_debug("kthread to unload app is called, state %d\n",
2947 atomic_read(&qseecom.unload_app_kthread_state));
2948 __qseecom_processing_pending_unload_app();
2949 atomic_set(&qseecom.unload_app_kthread_state,
2950 UNLOAD_APP_KT_SLEEP);
2951 }
2952 pr_warn("kthread to unload app stopped\n");
2953 return 0;
2954}
2955
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002956static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2957 unsigned long virt)
2958{
2959 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2960}
2961
2962static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2963 unsigned long virt)
2964{
2965 return (uintptr_t)data->client.sb_virt +
2966 (virt - data->client.user_virt_sb_base);
2967}
2968
2969int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2970 struct qseecom_send_svc_cmd_req *req_ptr,
2971 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2972{
2973 int ret = 0;
2974 void *req_buf = NULL;
2975
2976 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2977 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2978 req_ptr, send_svc_ireq_ptr);
2979 return -EINVAL;
2980 }
2981
2982 /* Clients need to ensure req_buf is at base offset of shared buffer */
2983 if ((uintptr_t)req_ptr->cmd_req_buf !=
2984 data_ptr->client.user_virt_sb_base) {
2985 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2986 return -EINVAL;
2987 }
2988
2989 if (data_ptr->client.sb_length <
2990 sizeof(struct qseecom_rpmb_provision_key)) {
2991 pr_err("shared buffer is too small to hold key type\n");
2992 return -EINVAL;
2993 }
2994 req_buf = data_ptr->client.sb_virt;
2995
2996 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2997 send_svc_ireq_ptr->key_type =
2998 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2999 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3000 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3001 data_ptr, (uintptr_t)req_ptr->resp_buf));
3002 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3003
3004 return ret;
3005}
3006
3007int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
3008 struct qseecom_send_svc_cmd_req *req_ptr,
3009 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
3010{
3011 int ret = 0;
3012 uint32_t reqd_len_sb_in = 0;
3013
3014 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
3015 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
3016 req_ptr, send_svc_ireq_ptr);
3017 return -EINVAL;
3018 }
3019
3020 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
3021 if (reqd_len_sb_in > data_ptr->client.sb_length) {
3022 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
3023 pr_err("Required: %u, Available: %zu\n",
3024 reqd_len_sb_in, data_ptr->client.sb_length);
3025 return -ENOMEM;
3026 }
3027
3028 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
3029 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
3030 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3031 data_ptr, (uintptr_t)req_ptr->resp_buf));
3032 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
3033
3034 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3035 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
3036
3037
3038 return ret;
3039}
3040
3041static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
3042 struct qseecom_send_svc_cmd_req *req)
3043{
3044 if (!req || !req->resp_buf || !req->cmd_req_buf) {
3045 pr_err("req or cmd buffer or response buffer is null\n");
3046 return -EINVAL;
3047 }
3048
3049 if (!data || !data->client.ihandle) {
3050 pr_err("Client or client handle is not initialized\n");
3051 return -EINVAL;
3052 }
3053
3054 if (data->client.sb_virt == NULL) {
3055 pr_err("sb_virt null\n");
3056 return -EINVAL;
3057 }
3058
3059 if (data->client.user_virt_sb_base == 0) {
3060 pr_err("user_virt_sb_base is null\n");
3061 return -EINVAL;
3062 }
3063
3064 if (data->client.sb_length == 0) {
3065 pr_err("sb_length is 0\n");
3066 return -EINVAL;
3067 }
3068
3069 if (((uintptr_t)req->cmd_req_buf <
3070 data->client.user_virt_sb_base) ||
3071 ((uintptr_t)req->cmd_req_buf >=
3072 (data->client.user_virt_sb_base + data->client.sb_length))) {
3073 pr_err("cmd buffer address not within shared bufffer\n");
3074 return -EINVAL;
3075 }
3076 if (((uintptr_t)req->resp_buf <
3077 data->client.user_virt_sb_base) ||
3078 ((uintptr_t)req->resp_buf >=
3079 (data->client.user_virt_sb_base + data->client.sb_length))) {
3080 pr_err("response buffer address not within shared bufffer\n");
3081 return -EINVAL;
3082 }
3083 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
3084 (req->cmd_req_len > data->client.sb_length) ||
3085 (req->resp_len > data->client.sb_length)) {
3086 pr_err("cmd buf length or response buf length not valid\n");
3087 return -EINVAL;
3088 }
3089 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3090 pr_err("Integer overflow detected in req_len & rsp_len\n");
3091 return -EINVAL;
3092 }
3093
3094 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3095 pr_debug("Not enough memory to fit cmd_buf.\n");
3096 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3097 (req->cmd_req_len + req->resp_len),
3098 data->client.sb_length);
3099 return -ENOMEM;
3100 }
3101 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3102 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3103 return -EINVAL;
3104 }
3105 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3106 pr_err("Integer overflow in resp_len & resp_buf\n");
3107 return -EINVAL;
3108 }
3109 if (data->client.user_virt_sb_base >
3110 (ULONG_MAX - data->client.sb_length)) {
3111 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3112 return -EINVAL;
3113 }
3114 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3115 ((uintptr_t)data->client.user_virt_sb_base +
3116 data->client.sb_length)) ||
3117 (((uintptr_t)req->resp_buf + req->resp_len) >
3118 ((uintptr_t)data->client.user_virt_sb_base +
3119 data->client.sb_length))) {
3120 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3121 return -EINVAL;
3122 }
3123 return 0;
3124}
3125
3126static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3127 void __user *argp)
3128{
3129 int ret = 0;
3130 struct qseecom_client_send_service_ireq send_svc_ireq;
3131 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3132 struct qseecom_command_scm_resp resp;
3133 struct qseecom_send_svc_cmd_req req;
3134 void *send_req_ptr;
3135 size_t req_buf_size;
3136
3137 /*struct qseecom_command_scm_resp resp;*/
3138
3139 if (copy_from_user(&req,
3140 (void __user *)argp,
3141 sizeof(req))) {
3142 pr_err("copy_from_user failed\n");
3143 return -EFAULT;
3144 }
3145
3146 if (__validate_send_service_cmd_inputs(data, &req))
3147 return -EINVAL;
3148
3149 data->type = QSEECOM_SECURE_SERVICE;
3150
3151 switch (req.cmd_id) {
3152 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3153 case QSEOS_RPMB_ERASE_COMMAND:
3154 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3155 send_req_ptr = &send_svc_ireq;
3156 req_buf_size = sizeof(send_svc_ireq);
3157 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3158 send_req_ptr))
3159 return -EINVAL;
3160 break;
3161 case QSEOS_FSM_LTEOTA_REQ_CMD:
3162 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3163 case QSEOS_FSM_IKE_REQ_CMD:
3164 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3165 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3166 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3167 case QSEOS_FSM_ENCFS_REQ_CMD:
3168 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3169 send_req_ptr = &send_fsm_key_svc_ireq;
3170 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3171 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3172 send_req_ptr))
3173 return -EINVAL;
3174 break;
3175 default:
3176 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3177 return -EINVAL;
3178 }
3179
3180 if (qseecom.support_bus_scaling) {
3181 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3182 if (ret) {
3183 pr_err("Fail to set bw HIGH\n");
3184 return ret;
3185 }
3186 } else {
3187 ret = qseecom_perf_enable(data);
3188 if (ret) {
3189 pr_err("Failed to vote for clocks with err %d\n", ret);
3190 goto exit;
3191 }
3192 }
3193
3194 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3195 data->client.sb_virt, data->client.sb_length,
3196 ION_IOC_CLEAN_INV_CACHES);
3197 if (ret) {
3198 pr_err("cache operation failed %d\n", ret);
3199 goto exit;
3200 }
3201 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3202 (const void *)send_req_ptr,
3203 req_buf_size, &resp, sizeof(resp));
3204 if (ret) {
3205 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3206 if (!qseecom.support_bus_scaling) {
3207 qsee_disable_clock_vote(data, CLK_DFAB);
3208 qsee_disable_clock_vote(data, CLK_SFPB);
3209 } else {
3210 __qseecom_add_bw_scale_down_timer(
3211 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3212 }
3213 goto exit;
3214 }
3215 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3216 data->client.sb_virt, data->client.sb_length,
3217 ION_IOC_INV_CACHES);
3218 if (ret) {
3219 pr_err("cache operation failed %d\n", ret);
3220 goto exit;
3221 }
3222 switch (resp.result) {
3223 case QSEOS_RESULT_SUCCESS:
3224 break;
3225 case QSEOS_RESULT_INCOMPLETE:
3226 pr_debug("qseos_result_incomplete\n");
3227 ret = __qseecom_process_incomplete_cmd(data, &resp);
3228 if (ret) {
3229 pr_err("process_incomplete_cmd fail with result: %d\n",
3230 resp.result);
3231 }
3232 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3233 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303234 if (put_user(resp.result,
3235 (uint32_t __user *)req.resp_buf)) {
3236 ret = -EINVAL;
3237 goto exit;
3238 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003239 ret = 0;
3240 }
3241 break;
3242 case QSEOS_RESULT_FAILURE:
3243 pr_err("scm call failed with resp.result: %d\n", resp.result);
3244 ret = -EINVAL;
3245 break;
3246 default:
3247 pr_err("Response result %d not supported\n",
3248 resp.result);
3249 ret = -EINVAL;
3250 break;
3251 }
3252 if (!qseecom.support_bus_scaling) {
3253 qsee_disable_clock_vote(data, CLK_DFAB);
3254 qsee_disable_clock_vote(data, CLK_SFPB);
3255 } else {
3256 __qseecom_add_bw_scale_down_timer(
3257 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3258 }
3259
3260exit:
3261 return ret;
3262}
3263
3264static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3265 struct qseecom_send_cmd_req *req)
3266
3267{
3268 if (!data || !data->client.ihandle) {
3269 pr_err("Client or client handle is not initialized\n");
3270 return -EINVAL;
3271 }
3272 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3273 (req->cmd_req_buf == NULL)) {
3274 pr_err("cmd buffer or response buffer is null\n");
3275 return -EINVAL;
3276 }
3277 if (((uintptr_t)req->cmd_req_buf <
3278 data->client.user_virt_sb_base) ||
3279 ((uintptr_t)req->cmd_req_buf >=
3280 (data->client.user_virt_sb_base + data->client.sb_length))) {
3281 pr_err("cmd buffer address not within shared bufffer\n");
3282 return -EINVAL;
3283 }
3284 if (((uintptr_t)req->resp_buf <
3285 data->client.user_virt_sb_base) ||
3286 ((uintptr_t)req->resp_buf >=
3287 (data->client.user_virt_sb_base + data->client.sb_length))) {
3288 pr_err("response buffer address not within shared bufffer\n");
3289 return -EINVAL;
3290 }
3291 if ((req->cmd_req_len == 0) ||
3292 (req->cmd_req_len > data->client.sb_length) ||
3293 (req->resp_len > data->client.sb_length)) {
3294 pr_err("cmd buf length or response buf length not valid\n");
3295 return -EINVAL;
3296 }
3297 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3298 pr_err("Integer overflow detected in req_len & rsp_len\n");
3299 return -EINVAL;
3300 }
3301
3302 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3303 pr_debug("Not enough memory to fit cmd_buf.\n");
3304 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3305 (req->cmd_req_len + req->resp_len),
3306 data->client.sb_length);
3307 return -ENOMEM;
3308 }
3309 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3310 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3311 return -EINVAL;
3312 }
3313 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3314 pr_err("Integer overflow in resp_len & resp_buf\n");
3315 return -EINVAL;
3316 }
3317 if (data->client.user_virt_sb_base >
3318 (ULONG_MAX - data->client.sb_length)) {
3319 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3320 return -EINVAL;
3321 }
3322 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3323 ((uintptr_t)data->client.user_virt_sb_base +
3324 data->client.sb_length)) ||
3325 (((uintptr_t)req->resp_buf + req->resp_len) >
3326 ((uintptr_t)data->client.user_virt_sb_base +
3327 data->client.sb_length))) {
3328 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3329 return -EINVAL;
3330 }
3331 return 0;
3332}
3333
3334int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3335 struct qseecom_registered_app_list *ptr_app,
3336 struct qseecom_dev_handle *data)
3337{
3338 int ret = 0;
3339
3340 switch (resp->result) {
3341 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3342 pr_warn("App(%d) %s is blocked on listener %d\n",
3343 data->client.app_id, data->client.app_name,
3344 resp->data);
3345 ret = __qseecom_process_reentrancy_blocked_on_listener(
3346 resp, ptr_app, data);
3347 if (ret) {
3348 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3349 data->client.app_id, data->client.app_name, resp->data);
3350 return ret;
3351 }
3352
3353 case QSEOS_RESULT_INCOMPLETE:
3354 qseecom.app_block_ref_cnt++;
3355 ptr_app->app_blocked = true;
3356 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3357 ptr_app->app_blocked = false;
3358 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003359 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003360 if (ret)
3361 pr_err("process_incomplete_cmd failed err: %d\n",
3362 ret);
3363 return ret;
3364 case QSEOS_RESULT_SUCCESS:
3365 return ret;
3366 default:
3367 pr_err("Response result %d not supported\n",
3368 resp->result);
3369 return -EINVAL;
3370 }
3371}
3372
3373static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3374 struct qseecom_send_cmd_req *req)
3375{
3376 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003377 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003378 u32 reqd_len_sb_in = 0;
3379 struct qseecom_client_send_data_ireq send_data_req = {0};
3380 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3381 struct qseecom_command_scm_resp resp;
3382 unsigned long flags;
3383 struct qseecom_registered_app_list *ptr_app;
3384 bool found_app = false;
3385 void *cmd_buf = NULL;
3386 size_t cmd_len;
3387 struct sglist_info *table = data->sglistinfo_ptr;
3388
3389 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3390 /* find app_id & img_name from list */
3391 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3392 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3393 list) {
3394 if ((ptr_app->app_id == data->client.app_id) &&
3395 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3396 found_app = true;
3397 break;
3398 }
3399 }
3400 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3401
3402 if (!found_app) {
3403 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3404 (char *)data->client.app_name);
3405 return -ENOENT;
3406 }
3407
Zhen Kong03b2eae2019-09-17 16:58:46 -07003408 if (__qseecom_find_pending_unload_app(data->client.app_id,
3409 data->client.app_name)) {
3410 pr_err("app %d (%s) unload is pending\n",
3411 data->client.app_id, data->client.app_name);
3412 return -ENOENT;
3413 }
3414
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003415 if (qseecom.qsee_version < QSEE_VERSION_40) {
3416 send_data_req.app_id = data->client.app_id;
3417 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3418 data, (uintptr_t)req->cmd_req_buf));
3419 send_data_req.req_len = req->cmd_req_len;
3420 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3421 data, (uintptr_t)req->resp_buf));
3422 send_data_req.rsp_len = req->resp_len;
3423 send_data_req.sglistinfo_ptr =
3424 (uint32_t)virt_to_phys(table);
3425 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3426 dmac_flush_range((void *)table,
3427 (void *)table + SGLISTINFO_TABLE_SIZE);
3428 cmd_buf = (void *)&send_data_req;
3429 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3430 } else {
3431 send_data_req_64bit.app_id = data->client.app_id;
3432 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3433 (uintptr_t)req->cmd_req_buf);
3434 send_data_req_64bit.req_len = req->cmd_req_len;
3435 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3436 (uintptr_t)req->resp_buf);
3437 send_data_req_64bit.rsp_len = req->resp_len;
3438 /* check if 32bit app's phys_addr region is under 4GB.*/
3439 if ((data->client.app_arch == ELFCLASS32) &&
3440 ((send_data_req_64bit.req_ptr >=
3441 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3442 (send_data_req_64bit.rsp_ptr >=
3443 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3444 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3445 data->client.app_name,
3446 send_data_req_64bit.req_ptr,
3447 send_data_req_64bit.req_len,
3448 send_data_req_64bit.rsp_ptr,
3449 send_data_req_64bit.rsp_len);
3450 return -EFAULT;
3451 }
3452 send_data_req_64bit.sglistinfo_ptr =
3453 (uint64_t)virt_to_phys(table);
3454 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3455 dmac_flush_range((void *)table,
3456 (void *)table + SGLISTINFO_TABLE_SIZE);
3457 cmd_buf = (void *)&send_data_req_64bit;
3458 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3459 }
3460
3461 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3462 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3463 else
3464 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3465
3466 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3467 data->client.sb_virt,
3468 reqd_len_sb_in,
3469 ION_IOC_CLEAN_INV_CACHES);
3470 if (ret) {
3471 pr_err("cache operation failed %d\n", ret);
3472 return ret;
3473 }
3474
3475 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3476
3477 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3478 cmd_buf, cmd_len,
3479 &resp, sizeof(resp));
3480 if (ret) {
3481 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3482 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003483 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003484 }
3485
3486 if (qseecom.qsee_reentrancy_support) {
3487 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003488 if (ret)
3489 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003490 } else {
3491 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3492 ret = __qseecom_process_incomplete_cmd(data, &resp);
3493 if (ret) {
3494 pr_err("process_incomplete_cmd failed err: %d\n",
3495 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003496 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003497 }
3498 } else {
3499 if (resp.result != QSEOS_RESULT_SUCCESS) {
3500 pr_err("Response result %d not supported\n",
3501 resp.result);
3502 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003503 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003504 }
3505 }
3506 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003507exit:
3508 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003509 data->client.sb_virt, data->client.sb_length,
3510 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003511 if (ret2) {
3512 pr_err("cache operation failed %d\n", ret2);
3513 return ret2;
3514 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003515 return ret;
3516}
3517
3518static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3519{
3520 int ret = 0;
3521 struct qseecom_send_cmd_req req;
3522
3523 ret = copy_from_user(&req, argp, sizeof(req));
3524 if (ret) {
3525 pr_err("copy_from_user failed\n");
3526 return ret;
3527 }
3528
3529 if (__validate_send_cmd_inputs(data, &req))
3530 return -EINVAL;
3531
3532 ret = __qseecom_send_cmd(data, &req);
3533
3534 if (ret)
3535 return ret;
3536
3537 return ret;
3538}
3539
3540int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3541 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3542 struct qseecom_dev_handle *data, int i) {
3543
3544 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3545 (req->ifd_data[i].fd > 0)) {
3546 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3547 (req->ifd_data[i].cmd_buf_offset >
3548 req->cmd_req_len - sizeof(uint32_t))) {
3549 pr_err("Invalid offset (req len) 0x%x\n",
3550 req->ifd_data[i].cmd_buf_offset);
3551 return -EINVAL;
3552 }
3553 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3554 (lstnr_resp->ifd_data[i].fd > 0)) {
3555 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3556 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3557 lstnr_resp->resp_len - sizeof(uint32_t))) {
3558 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3559 lstnr_resp->ifd_data[i].cmd_buf_offset);
3560 return -EINVAL;
3561 }
3562 }
3563 return 0;
3564}
3565
Zhen Kongd097c6e02019-08-01 16:10:20 -07003566static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req,
3567 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3568 struct qseecom_dev_handle *data, int i)
3569{
3570
3571 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3572 (req->ifd_data[i].fd > 0)) {
3573 if ((req->cmd_req_len < sizeof(uint64_t)) ||
3574 (req->ifd_data[i].cmd_buf_offset >
3575 req->cmd_req_len - sizeof(uint64_t))) {
3576 pr_err("Invalid offset (req len) 0x%x\n",
3577 req->ifd_data[i].cmd_buf_offset);
3578 return -EINVAL;
3579 }
3580 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3581 (lstnr_resp->ifd_data[i].fd > 0)) {
3582 if ((lstnr_resp->resp_len < sizeof(uint64_t)) ||
3583 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3584 lstnr_resp->resp_len - sizeof(uint64_t))) {
3585 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3586 lstnr_resp->ifd_data[i].cmd_buf_offset);
3587 return -EINVAL;
3588 }
3589 }
3590 return 0;
3591}
3592
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003593static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3594 struct qseecom_dev_handle *data)
3595{
3596 struct ion_handle *ihandle;
3597 char *field;
3598 int ret = 0;
3599 int i = 0;
3600 uint32_t len = 0;
3601 struct scatterlist *sg;
3602 struct qseecom_send_modfd_cmd_req *req = NULL;
3603 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3604 struct qseecom_registered_listener_list *this_lstnr = NULL;
3605 uint32_t offset;
3606 struct sg_table *sg_ptr;
3607
3608 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3609 (data->type != QSEECOM_CLIENT_APP))
3610 return -EFAULT;
3611
3612 if (msg == NULL) {
3613 pr_err("Invalid address\n");
3614 return -EINVAL;
3615 }
3616 if (data->type == QSEECOM_LISTENER_SERVICE) {
3617 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3618 this_lstnr = __qseecom_find_svc(data->listener.id);
3619 if (IS_ERR_OR_NULL(this_lstnr)) {
3620 pr_err("Invalid listener ID\n");
3621 return -ENOMEM;
3622 }
3623 } else {
3624 req = (struct qseecom_send_modfd_cmd_req *)msg;
3625 }
3626
3627 for (i = 0; i < MAX_ION_FD; i++) {
3628 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3629 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003630 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003631 req->ifd_data[i].fd);
3632 if (IS_ERR_OR_NULL(ihandle)) {
3633 pr_err("Ion client can't retrieve the handle\n");
3634 return -ENOMEM;
3635 }
3636 field = (char *) req->cmd_req_buf +
3637 req->ifd_data[i].cmd_buf_offset;
3638 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3639 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003640 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003641 lstnr_resp->ifd_data[i].fd);
3642 if (IS_ERR_OR_NULL(ihandle)) {
3643 pr_err("Ion client can't retrieve the handle\n");
3644 return -ENOMEM;
3645 }
3646 field = lstnr_resp->resp_buf_ptr +
3647 lstnr_resp->ifd_data[i].cmd_buf_offset;
3648 } else {
3649 continue;
3650 }
3651 /* Populate the cmd data structure with the phys_addr */
3652 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3653 if (IS_ERR_OR_NULL(sg_ptr)) {
3654 pr_err("IOn client could not retrieve sg table\n");
3655 goto err;
3656 }
3657 if (sg_ptr->nents == 0) {
3658 pr_err("Num of scattered entries is 0\n");
3659 goto err;
3660 }
3661 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3662 pr_err("Num of scattered entries");
3663 pr_err(" (%d) is greater than max supported %d\n",
3664 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3665 goto err;
3666 }
3667 sg = sg_ptr->sgl;
3668 if (sg_ptr->nents == 1) {
3669 uint32_t *update;
3670
3671 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3672 goto err;
3673 if ((data->type == QSEECOM_CLIENT_APP &&
3674 (data->client.app_arch == ELFCLASS32 ||
3675 data->client.app_arch == ELFCLASS64)) ||
3676 (data->type == QSEECOM_LISTENER_SERVICE)) {
3677 /*
3678 * Check if sg list phy add region is under 4GB
3679 */
3680 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3681 (!cleanup) &&
3682 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3683 >= PHY_ADDR_4G - sg->length)) {
3684 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3685 data->client.app_name,
3686 &(sg_dma_address(sg_ptr->sgl)),
3687 sg->length);
3688 goto err;
3689 }
3690 update = (uint32_t *) field;
3691 *update = cleanup ? 0 :
3692 (uint32_t)sg_dma_address(sg_ptr->sgl);
3693 } else {
3694 pr_err("QSEE app arch %u is not supported\n",
3695 data->client.app_arch);
3696 goto err;
3697 }
3698 len += (uint32_t)sg->length;
3699 } else {
3700 struct qseecom_sg_entry *update;
3701 int j = 0;
3702
3703 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3704 (req->ifd_data[i].fd > 0)) {
3705
3706 if ((req->cmd_req_len <
3707 SG_ENTRY_SZ * sg_ptr->nents) ||
3708 (req->ifd_data[i].cmd_buf_offset >
3709 (req->cmd_req_len -
3710 SG_ENTRY_SZ * sg_ptr->nents))) {
3711 pr_err("Invalid offset = 0x%x\n",
3712 req->ifd_data[i].cmd_buf_offset);
3713 goto err;
3714 }
3715
3716 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3717 (lstnr_resp->ifd_data[i].fd > 0)) {
3718
3719 if ((lstnr_resp->resp_len <
3720 SG_ENTRY_SZ * sg_ptr->nents) ||
3721 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3722 (lstnr_resp->resp_len -
3723 SG_ENTRY_SZ * sg_ptr->nents))) {
3724 goto err;
3725 }
3726 }
3727 if ((data->type == QSEECOM_CLIENT_APP &&
3728 (data->client.app_arch == ELFCLASS32 ||
3729 data->client.app_arch == ELFCLASS64)) ||
3730 (data->type == QSEECOM_LISTENER_SERVICE)) {
3731 update = (struct qseecom_sg_entry *)field;
3732 for (j = 0; j < sg_ptr->nents; j++) {
3733 /*
3734 * Check if sg list PA is under 4GB
3735 */
3736 if ((qseecom.qsee_version >=
3737 QSEE_VERSION_40) &&
3738 (!cleanup) &&
3739 ((uint64_t)(sg_dma_address(sg))
3740 >= PHY_ADDR_4G - sg->length)) {
3741 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3742 data->client.app_name,
3743 &(sg_dma_address(sg)),
3744 sg->length);
3745 goto err;
3746 }
3747 update->phys_addr = cleanup ? 0 :
3748 (uint32_t)sg_dma_address(sg);
3749 update->len = cleanup ? 0 : sg->length;
3750 update++;
3751 len += sg->length;
3752 sg = sg_next(sg);
3753 }
3754 } else {
3755 pr_err("QSEE app arch %u is not supported\n",
3756 data->client.app_arch);
3757 goto err;
3758 }
3759 }
3760
3761 if (cleanup) {
3762 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3763 ihandle, NULL, len,
3764 ION_IOC_INV_CACHES);
3765 if (ret) {
3766 pr_err("cache operation failed %d\n", ret);
3767 goto err;
3768 }
3769 } else {
3770 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3771 ihandle, NULL, len,
3772 ION_IOC_CLEAN_INV_CACHES);
3773 if (ret) {
3774 pr_err("cache operation failed %d\n", ret);
3775 goto err;
3776 }
3777 if (data->type == QSEECOM_CLIENT_APP) {
3778 offset = req->ifd_data[i].cmd_buf_offset;
3779 data->sglistinfo_ptr[i].indexAndFlags =
3780 SGLISTINFO_SET_INDEX_FLAG(
3781 (sg_ptr->nents == 1), 0, offset);
3782 data->sglistinfo_ptr[i].sizeOrCount =
3783 (sg_ptr->nents == 1) ?
3784 sg->length : sg_ptr->nents;
3785 data->sglist_cnt = i + 1;
3786 } else {
3787 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3788 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3789 (uintptr_t)this_lstnr->sb_virt);
3790 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3791 SGLISTINFO_SET_INDEX_FLAG(
3792 (sg_ptr->nents == 1), 0, offset);
3793 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3794 (sg_ptr->nents == 1) ?
3795 sg->length : sg_ptr->nents;
3796 this_lstnr->sglist_cnt = i + 1;
3797 }
3798 }
3799 /* Deallocate the handle */
3800 if (!IS_ERR_OR_NULL(ihandle))
3801 ion_free(qseecom.ion_clnt, ihandle);
3802 }
3803 return ret;
3804err:
3805 if (!IS_ERR_OR_NULL(ihandle))
3806 ion_free(qseecom.ion_clnt, ihandle);
3807 return -ENOMEM;
3808}
3809
3810static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3811 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3812{
3813 struct scatterlist *sg = sg_ptr->sgl;
3814 struct qseecom_sg_entry_64bit *sg_entry;
3815 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3816 void *buf;
3817 uint i;
3818 size_t size;
3819 dma_addr_t coh_pmem;
3820
3821 if (fd_idx >= MAX_ION_FD) {
3822 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3823 return -ENOMEM;
3824 }
3825 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3826 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3827 /* Allocate a contiguous kernel buffer */
3828 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3829 size = (size + PAGE_SIZE) & PAGE_MASK;
3830 buf = dma_alloc_coherent(qseecom.pdev,
3831 size, &coh_pmem, GFP_KERNEL);
3832 if (buf == NULL) {
3833 pr_err("failed to alloc memory for sg buf\n");
3834 return -ENOMEM;
3835 }
3836 /* update qseecom_sg_list_buf_hdr_64bit */
3837 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3838 buf_hdr->new_buf_phys_addr = coh_pmem;
3839 buf_hdr->nents_total = sg_ptr->nents;
3840 /* save the left sg entries into new allocated buf */
3841 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3842 for (i = 0; i < sg_ptr->nents; i++) {
3843 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3844 sg_entry->len = sg->length;
3845 sg_entry++;
3846 sg = sg_next(sg);
3847 }
3848
3849 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3850 data->client.sec_buf_fd[fd_idx].vbase = buf;
3851 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3852 data->client.sec_buf_fd[fd_idx].size = size;
3853
3854 return 0;
3855}
3856
3857static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3858 struct qseecom_dev_handle *data)
3859{
3860 struct ion_handle *ihandle;
3861 char *field;
3862 int ret = 0;
3863 int i = 0;
3864 uint32_t len = 0;
3865 struct scatterlist *sg;
3866 struct qseecom_send_modfd_cmd_req *req = NULL;
3867 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3868 struct qseecom_registered_listener_list *this_lstnr = NULL;
3869 uint32_t offset;
3870 struct sg_table *sg_ptr;
3871
3872 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3873 (data->type != QSEECOM_CLIENT_APP))
3874 return -EFAULT;
3875
3876 if (msg == NULL) {
3877 pr_err("Invalid address\n");
3878 return -EINVAL;
3879 }
3880 if (data->type == QSEECOM_LISTENER_SERVICE) {
3881 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3882 this_lstnr = __qseecom_find_svc(data->listener.id);
3883 if (IS_ERR_OR_NULL(this_lstnr)) {
3884 pr_err("Invalid listener ID\n");
3885 return -ENOMEM;
3886 }
3887 } else {
3888 req = (struct qseecom_send_modfd_cmd_req *)msg;
3889 }
3890
3891 for (i = 0; i < MAX_ION_FD; i++) {
3892 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3893 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003894 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003895 req->ifd_data[i].fd);
3896 if (IS_ERR_OR_NULL(ihandle)) {
3897 pr_err("Ion client can't retrieve the handle\n");
3898 return -ENOMEM;
3899 }
3900 field = (char *) req->cmd_req_buf +
3901 req->ifd_data[i].cmd_buf_offset;
3902 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3903 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003904 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003905 lstnr_resp->ifd_data[i].fd);
3906 if (IS_ERR_OR_NULL(ihandle)) {
3907 pr_err("Ion client can't retrieve the handle\n");
3908 return -ENOMEM;
3909 }
3910 field = lstnr_resp->resp_buf_ptr +
3911 lstnr_resp->ifd_data[i].cmd_buf_offset;
3912 } else {
3913 continue;
3914 }
3915 /* Populate the cmd data structure with the phys_addr */
3916 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3917 if (IS_ERR_OR_NULL(sg_ptr)) {
3918 pr_err("IOn client could not retrieve sg table\n");
3919 goto err;
3920 }
3921 if (sg_ptr->nents == 0) {
3922 pr_err("Num of scattered entries is 0\n");
3923 goto err;
3924 }
3925 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3926 pr_warn("Num of scattered entries");
3927 pr_warn(" (%d) is greater than %d\n",
3928 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3929 if (cleanup) {
3930 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3931 data->client.sec_buf_fd[i].vbase)
3932 dma_free_coherent(qseecom.pdev,
3933 data->client.sec_buf_fd[i].size,
3934 data->client.sec_buf_fd[i].vbase,
3935 data->client.sec_buf_fd[i].pbase);
3936 } else {
3937 ret = __qseecom_allocate_sg_list_buffer(data,
3938 field, i, sg_ptr);
3939 if (ret) {
3940 pr_err("Failed to allocate sg list buffer\n");
3941 goto err;
3942 }
3943 }
3944 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3945 sg = sg_ptr->sgl;
3946 goto cleanup;
3947 }
3948 sg = sg_ptr->sgl;
3949 if (sg_ptr->nents == 1) {
3950 uint64_t *update_64bit;
3951
Zhen Kongd097c6e02019-08-01 16:10:20 -07003952 if (__boundary_checks_offset_64(req, lstnr_resp,
3953 data, i))
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003954 goto err;
3955 /* 64bit app uses 64bit address */
3956 update_64bit = (uint64_t *) field;
3957 *update_64bit = cleanup ? 0 :
3958 (uint64_t)sg_dma_address(sg_ptr->sgl);
3959 len += (uint32_t)sg->length;
3960 } else {
3961 struct qseecom_sg_entry_64bit *update_64bit;
3962 int j = 0;
3963
3964 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3965 (req->ifd_data[i].fd > 0)) {
3966
3967 if ((req->cmd_req_len <
3968 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3969 (req->ifd_data[i].cmd_buf_offset >
3970 (req->cmd_req_len -
3971 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3972 pr_err("Invalid offset = 0x%x\n",
3973 req->ifd_data[i].cmd_buf_offset);
3974 goto err;
3975 }
3976
3977 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3978 (lstnr_resp->ifd_data[i].fd > 0)) {
3979
3980 if ((lstnr_resp->resp_len <
3981 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3982 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3983 (lstnr_resp->resp_len -
3984 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3985 goto err;
3986 }
3987 }
3988 /* 64bit app uses 64bit address */
3989 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3990 for (j = 0; j < sg_ptr->nents; j++) {
3991 update_64bit->phys_addr = cleanup ? 0 :
3992 (uint64_t)sg_dma_address(sg);
3993 update_64bit->len = cleanup ? 0 :
3994 (uint32_t)sg->length;
3995 update_64bit++;
3996 len += sg->length;
3997 sg = sg_next(sg);
3998 }
3999 }
4000cleanup:
4001 if (cleanup) {
4002 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4003 ihandle, NULL, len,
4004 ION_IOC_INV_CACHES);
4005 if (ret) {
4006 pr_err("cache operation failed %d\n", ret);
4007 goto err;
4008 }
4009 } else {
4010 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
4011 ihandle, NULL, len,
4012 ION_IOC_CLEAN_INV_CACHES);
4013 if (ret) {
4014 pr_err("cache operation failed %d\n", ret);
4015 goto err;
4016 }
4017 if (data->type == QSEECOM_CLIENT_APP) {
4018 offset = req->ifd_data[i].cmd_buf_offset;
4019 data->sglistinfo_ptr[i].indexAndFlags =
4020 SGLISTINFO_SET_INDEX_FLAG(
4021 (sg_ptr->nents == 1), 1, offset);
4022 data->sglistinfo_ptr[i].sizeOrCount =
4023 (sg_ptr->nents == 1) ?
4024 sg->length : sg_ptr->nents;
4025 data->sglist_cnt = i + 1;
4026 } else {
4027 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
4028 + (uintptr_t)lstnr_resp->resp_buf_ptr -
4029 (uintptr_t)this_lstnr->sb_virt);
4030 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
4031 SGLISTINFO_SET_INDEX_FLAG(
4032 (sg_ptr->nents == 1), 1, offset);
4033 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
4034 (sg_ptr->nents == 1) ?
4035 sg->length : sg_ptr->nents;
4036 this_lstnr->sglist_cnt = i + 1;
4037 }
4038 }
4039 /* Deallocate the handle */
4040 if (!IS_ERR_OR_NULL(ihandle))
4041 ion_free(qseecom.ion_clnt, ihandle);
4042 }
4043 return ret;
4044err:
4045 for (i = 0; i < MAX_ION_FD; i++)
4046 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
4047 data->client.sec_buf_fd[i].vbase)
4048 dma_free_coherent(qseecom.pdev,
4049 data->client.sec_buf_fd[i].size,
4050 data->client.sec_buf_fd[i].vbase,
4051 data->client.sec_buf_fd[i].pbase);
4052 if (!IS_ERR_OR_NULL(ihandle))
4053 ion_free(qseecom.ion_clnt, ihandle);
4054 return -ENOMEM;
4055}
4056
4057static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4058 void __user *argp,
4059 bool is_64bit_addr)
4060{
4061 int ret = 0;
4062 int i;
4063 struct qseecom_send_modfd_cmd_req req;
4064 struct qseecom_send_cmd_req send_cmd_req;
4065
4066 ret = copy_from_user(&req, argp, sizeof(req));
4067 if (ret) {
4068 pr_err("copy_from_user failed\n");
4069 return ret;
4070 }
4071
4072 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
4073 send_cmd_req.cmd_req_len = req.cmd_req_len;
4074 send_cmd_req.resp_buf = req.resp_buf;
4075 send_cmd_req.resp_len = req.resp_len;
4076
4077 if (__validate_send_cmd_inputs(data, &send_cmd_req))
4078 return -EINVAL;
4079
4080 /* validate offsets */
4081 for (i = 0; i < MAX_ION_FD; i++) {
4082 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
4083 pr_err("Invalid offset %d = 0x%x\n",
4084 i, req.ifd_data[i].cmd_buf_offset);
4085 return -EINVAL;
4086 }
4087 }
4088 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4089 (uintptr_t)req.cmd_req_buf);
4090 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
4091 (uintptr_t)req.resp_buf);
4092
4093 if (!is_64bit_addr) {
4094 ret = __qseecom_update_cmd_buf(&req, false, data);
4095 if (ret)
4096 return ret;
4097 ret = __qseecom_send_cmd(data, &send_cmd_req);
4098 if (ret)
4099 return ret;
4100 ret = __qseecom_update_cmd_buf(&req, true, data);
4101 if (ret)
4102 return ret;
4103 } else {
4104 ret = __qseecom_update_cmd_buf_64(&req, false, data);
4105 if (ret)
4106 return ret;
4107 ret = __qseecom_send_cmd(data, &send_cmd_req);
4108 if (ret)
4109 return ret;
4110 ret = __qseecom_update_cmd_buf_64(&req, true, data);
4111 if (ret)
4112 return ret;
4113 }
4114
4115 return ret;
4116}
4117
4118static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
4119 void __user *argp)
4120{
4121 return __qseecom_send_modfd_cmd(data, argp, false);
4122}
4123
4124static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
4125 void __user *argp)
4126{
4127 return __qseecom_send_modfd_cmd(data, argp, true);
4128}
4129
4130
4131
4132static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
4133 struct qseecom_registered_listener_list *svc)
4134{
4135 int ret;
4136
Zhen Kongf5087172018-10-11 17:22:05 -07004137 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08004138 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004139}
4140
4141static int qseecom_receive_req(struct qseecom_dev_handle *data)
4142{
4143 int ret = 0;
4144 struct qseecom_registered_listener_list *this_lstnr;
4145
Zhen Kongbcdeda22018-11-16 13:50:51 -08004146 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004147 this_lstnr = __qseecom_find_svc(data->listener.id);
4148 if (!this_lstnr) {
4149 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004150 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004151 return -ENODATA;
4152 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004153 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004154
4155 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304156 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004157 __qseecom_listener_has_rcvd_req(data,
4158 this_lstnr))) {
Zhen Kong52ce9062018-09-24 14:33:27 -07004159 pr_debug("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004160 (uint32_t)data->listener.id);
4161 /* woken up for different reason */
4162 return -ERESTARTSYS;
4163 }
4164
Zhen Kongbcdeda22018-11-16 13:50:51 -08004165 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004166 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004167 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004168 return -ENODEV;
4169 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004170 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004171 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004172 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004173 break;
4174 }
4175 return ret;
4176}
4177
4178static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4179{
4180 unsigned char app_arch = 0;
4181 struct elf32_hdr *ehdr;
4182 struct elf64_hdr *ehdr64;
4183
4184 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4185
4186 switch (app_arch) {
4187 case ELFCLASS32: {
4188 ehdr = (struct elf32_hdr *)fw_entry->data;
4189 if (fw_entry->size < sizeof(*ehdr)) {
4190 pr_err("%s: Not big enough to be an elf32 header\n",
4191 qseecom.pdev->init_name);
4192 return false;
4193 }
4194 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4195 pr_err("%s: Not an elf32 header\n",
4196 qseecom.pdev->init_name);
4197 return false;
4198 }
4199 if (ehdr->e_phnum == 0) {
4200 pr_err("%s: No loadable segments\n",
4201 qseecom.pdev->init_name);
4202 return false;
4203 }
4204 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4205 sizeof(struct elf32_hdr) > fw_entry->size) {
4206 pr_err("%s: Program headers not within mdt\n",
4207 qseecom.pdev->init_name);
4208 return false;
4209 }
4210 break;
4211 }
4212 case ELFCLASS64: {
4213 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4214 if (fw_entry->size < sizeof(*ehdr64)) {
4215 pr_err("%s: Not big enough to be an elf64 header\n",
4216 qseecom.pdev->init_name);
4217 return false;
4218 }
4219 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4220 pr_err("%s: Not an elf64 header\n",
4221 qseecom.pdev->init_name);
4222 return false;
4223 }
4224 if (ehdr64->e_phnum == 0) {
4225 pr_err("%s: No loadable segments\n",
4226 qseecom.pdev->init_name);
4227 return false;
4228 }
4229 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4230 sizeof(struct elf64_hdr) > fw_entry->size) {
4231 pr_err("%s: Program headers not within mdt\n",
4232 qseecom.pdev->init_name);
4233 return false;
4234 }
4235 break;
4236 }
4237 default: {
4238 pr_err("QSEE app arch %u is not supported\n", app_arch);
4239 return false;
4240 }
4241 }
4242 return true;
4243}
4244
4245static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4246 uint32_t *app_arch)
4247{
4248 int ret = -1;
4249 int i = 0, rc = 0;
4250 const struct firmware *fw_entry = NULL;
4251 char fw_name[MAX_APP_NAME_SIZE];
4252 struct elf32_hdr *ehdr;
4253 struct elf64_hdr *ehdr64;
4254 int num_images = 0;
4255
4256 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4257 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4258 if (rc) {
4259 pr_err("error with request_firmware\n");
4260 ret = -EIO;
4261 goto err;
4262 }
4263 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4264 ret = -EIO;
4265 goto err;
4266 }
4267 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4268 *fw_size = fw_entry->size;
4269 if (*app_arch == ELFCLASS32) {
4270 ehdr = (struct elf32_hdr *)fw_entry->data;
4271 num_images = ehdr->e_phnum;
4272 } else if (*app_arch == ELFCLASS64) {
4273 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4274 num_images = ehdr64->e_phnum;
4275 } else {
4276 pr_err("QSEE %s app, arch %u is not supported\n",
4277 appname, *app_arch);
4278 ret = -EIO;
4279 goto err;
4280 }
4281 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4282 release_firmware(fw_entry);
4283 fw_entry = NULL;
4284 for (i = 0; i < num_images; i++) {
4285 memset(fw_name, 0, sizeof(fw_name));
4286 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4287 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4288 if (ret)
4289 goto err;
4290 if (*fw_size > U32_MAX - fw_entry->size) {
4291 pr_err("QSEE %s app file size overflow\n", appname);
4292 ret = -EINVAL;
4293 goto err;
4294 }
4295 *fw_size += fw_entry->size;
4296 release_firmware(fw_entry);
4297 fw_entry = NULL;
4298 }
4299
4300 return ret;
4301err:
4302 if (fw_entry)
4303 release_firmware(fw_entry);
4304 *fw_size = 0;
4305 return ret;
4306}
4307
4308static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4309 uint32_t fw_size,
4310 struct qseecom_load_app_ireq *load_req)
4311{
4312 int ret = -1;
4313 int i = 0, rc = 0;
4314 const struct firmware *fw_entry = NULL;
4315 char fw_name[MAX_APP_NAME_SIZE];
4316 u8 *img_data_ptr = img_data;
4317 struct elf32_hdr *ehdr;
4318 struct elf64_hdr *ehdr64;
4319 int num_images = 0;
4320 unsigned char app_arch = 0;
4321
4322 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4323 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4324 if (rc) {
4325 ret = -EIO;
4326 goto err;
4327 }
4328
4329 load_req->img_len = fw_entry->size;
4330 if (load_req->img_len > fw_size) {
4331 pr_err("app %s size %zu is larger than buf size %u\n",
4332 appname, fw_entry->size, fw_size);
4333 ret = -EINVAL;
4334 goto err;
4335 }
4336 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4337 img_data_ptr = img_data_ptr + fw_entry->size;
4338 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4339
4340 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4341 if (app_arch == ELFCLASS32) {
4342 ehdr = (struct elf32_hdr *)fw_entry->data;
4343 num_images = ehdr->e_phnum;
4344 } else if (app_arch == ELFCLASS64) {
4345 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4346 num_images = ehdr64->e_phnum;
4347 } else {
4348 pr_err("QSEE %s app, arch %u is not supported\n",
4349 appname, app_arch);
4350 ret = -EIO;
4351 goto err;
4352 }
4353 release_firmware(fw_entry);
4354 fw_entry = NULL;
4355 for (i = 0; i < num_images; i++) {
4356 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4357 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4358 if (ret) {
4359 pr_err("Failed to locate blob %s\n", fw_name);
4360 goto err;
4361 }
4362 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4363 (fw_entry->size + load_req->img_len > fw_size)) {
4364 pr_err("Invalid file size for %s\n", fw_name);
4365 ret = -EINVAL;
4366 goto err;
4367 }
4368 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4369 img_data_ptr = img_data_ptr + fw_entry->size;
4370 load_req->img_len += fw_entry->size;
4371 release_firmware(fw_entry);
4372 fw_entry = NULL;
4373 }
4374 return ret;
4375err:
4376 release_firmware(fw_entry);
4377 return ret;
4378}
4379
4380static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4381 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4382{
4383 size_t len = 0;
4384 int ret = 0;
4385 ion_phys_addr_t pa;
4386 struct ion_handle *ihandle = NULL;
4387 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004388 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004389 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004390
Zhen Kong3dd92792017-12-08 09:47:15 -08004391 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004392 if (retry++) {
4393 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004394 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004395 mutex_lock(&app_access_lock);
4396 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004397 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004398 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004399 } while (IS_ERR_OR_NULL(ihandle) &&
4400 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004401
4402 if (IS_ERR_OR_NULL(ihandle)) {
4403 pr_err("ION alloc failed\n");
4404 return -ENOMEM;
4405 }
4406 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4407 ihandle);
4408
4409 if (IS_ERR_OR_NULL(img_data)) {
4410 pr_err("ION memory mapping for image loading failed\n");
4411 ret = -ENOMEM;
4412 goto exit_ion_free;
4413 }
4414 /* Get the physical address of the ION BUF */
4415 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4416 if (ret) {
4417 pr_err("physical memory retrieval failure\n");
4418 ret = -EIO;
4419 goto exit_ion_unmap_kernel;
4420 }
4421
4422 *pihandle = ihandle;
4423 *data = img_data;
4424 *paddr = pa;
4425 return ret;
4426
4427exit_ion_unmap_kernel:
4428 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4429exit_ion_free:
4430 ion_free(qseecom.ion_clnt, ihandle);
4431 ihandle = NULL;
4432 return ret;
4433}
4434
4435static void __qseecom_free_img_data(struct ion_handle **ihandle)
4436{
4437 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4438 ion_free(qseecom.ion_clnt, *ihandle);
4439 *ihandle = NULL;
4440}
4441
4442static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4443 uint32_t *app_id)
4444{
4445 int ret = -1;
4446 uint32_t fw_size = 0;
4447 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4448 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4449 struct qseecom_command_scm_resp resp;
4450 u8 *img_data = NULL;
4451 ion_phys_addr_t pa = 0;
4452 struct ion_handle *ihandle = NULL;
4453 void *cmd_buf = NULL;
4454 size_t cmd_len;
4455 uint32_t app_arch = 0;
4456
4457 if (!data || !appname || !app_id) {
4458 pr_err("Null pointer to data or appname or appid\n");
4459 return -EINVAL;
4460 }
4461 *app_id = 0;
4462 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4463 return -EIO;
4464 data->client.app_arch = app_arch;
4465
4466 /* Check and load cmnlib */
4467 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4468 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4469 ret = qseecom_load_commonlib_image(data, "cmnlib");
4470 if (ret) {
4471 pr_err("failed to load cmnlib\n");
4472 return -EIO;
4473 }
4474 qseecom.commonlib_loaded = true;
4475 pr_debug("cmnlib is loaded\n");
4476 }
4477
4478 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4479 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4480 if (ret) {
4481 pr_err("failed to load cmnlib64\n");
4482 return -EIO;
4483 }
4484 qseecom.commonlib64_loaded = true;
4485 pr_debug("cmnlib64 is loaded\n");
4486 }
4487 }
4488
4489 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4490 if (ret)
4491 return ret;
4492
4493 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4494 if (ret) {
4495 ret = -EIO;
4496 goto exit_free_img_data;
4497 }
4498
4499 /* Populate the load_req parameters */
4500 if (qseecom.qsee_version < QSEE_VERSION_40) {
4501 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4502 load_req.mdt_len = load_req.mdt_len;
4503 load_req.img_len = load_req.img_len;
4504 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4505 load_req.phy_addr = (uint32_t)pa;
4506 cmd_buf = (void *)&load_req;
4507 cmd_len = sizeof(struct qseecom_load_app_ireq);
4508 } else {
4509 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4510 load_req_64bit.mdt_len = load_req.mdt_len;
4511 load_req_64bit.img_len = load_req.img_len;
4512 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4513 load_req_64bit.phy_addr = (uint64_t)pa;
4514 cmd_buf = (void *)&load_req_64bit;
4515 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4516 }
4517
4518 if (qseecom.support_bus_scaling) {
4519 mutex_lock(&qsee_bw_mutex);
4520 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4521 mutex_unlock(&qsee_bw_mutex);
4522 if (ret) {
4523 ret = -EIO;
4524 goto exit_free_img_data;
4525 }
4526 }
4527
4528 ret = __qseecom_enable_clk_scale_up(data);
4529 if (ret) {
4530 ret = -EIO;
4531 goto exit_unregister_bus_bw_need;
4532 }
4533
4534 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4535 img_data, fw_size,
4536 ION_IOC_CLEAN_INV_CACHES);
4537 if (ret) {
4538 pr_err("cache operation failed %d\n", ret);
4539 goto exit_disable_clk_vote;
4540 }
4541
4542 /* SCM_CALL to load the image */
4543 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4544 &resp, sizeof(resp));
4545 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004546 pr_err("scm_call to load failed : ret %d, result %x\n",
4547 ret, resp.result);
4548 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4549 ret = -EEXIST;
4550 else
4551 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004552 goto exit_disable_clk_vote;
4553 }
4554
4555 switch (resp.result) {
4556 case QSEOS_RESULT_SUCCESS:
4557 *app_id = resp.data;
4558 break;
4559 case QSEOS_RESULT_INCOMPLETE:
4560 ret = __qseecom_process_incomplete_cmd(data, &resp);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004561 if (ret) {
4562 pr_err("incomp_cmd err %d, %d, unload %d %s\n",
4563 ret, resp.result, resp.data, appname);
4564 __qseecom_unload_app(data, resp.data);
4565 ret = -EFAULT;
4566 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004567 *app_id = resp.data;
Zhen Kong03b2eae2019-09-17 16:58:46 -07004568 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004569 break;
4570 case QSEOS_RESULT_FAILURE:
4571 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4572 break;
4573 default:
4574 pr_err("scm call return unknown response %d\n", resp.result);
4575 ret = -EINVAL;
4576 break;
4577 }
4578
4579exit_disable_clk_vote:
4580 __qseecom_disable_clk_scale_down(data);
4581
4582exit_unregister_bus_bw_need:
4583 if (qseecom.support_bus_scaling) {
4584 mutex_lock(&qsee_bw_mutex);
4585 qseecom_unregister_bus_bandwidth_needs(data);
4586 mutex_unlock(&qsee_bw_mutex);
4587 }
4588
4589exit_free_img_data:
4590 __qseecom_free_img_data(&ihandle);
4591 return ret;
4592}
4593
4594static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4595 char *cmnlib_name)
4596{
4597 int ret = 0;
4598 uint32_t fw_size = 0;
4599 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4600 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4601 struct qseecom_command_scm_resp resp;
4602 u8 *img_data = NULL;
4603 ion_phys_addr_t pa = 0;
4604 void *cmd_buf = NULL;
4605 size_t cmd_len;
4606 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004607 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004608
4609 if (!cmnlib_name) {
4610 pr_err("cmnlib_name is NULL\n");
4611 return -EINVAL;
4612 }
4613 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4614 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4615 cmnlib_name, strlen(cmnlib_name));
4616 return -EINVAL;
4617 }
4618
4619 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4620 return -EIO;
4621
Zhen Kong3bafb312017-10-18 10:27:20 -07004622 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004623 &img_data, fw_size, &pa);
4624 if (ret)
4625 return -EIO;
4626
4627 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4628 if (ret) {
4629 ret = -EIO;
4630 goto exit_free_img_data;
4631 }
4632 if (qseecom.qsee_version < QSEE_VERSION_40) {
4633 load_req.phy_addr = (uint32_t)pa;
4634 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4635 cmd_buf = (void *)&load_req;
4636 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4637 } else {
4638 load_req_64bit.phy_addr = (uint64_t)pa;
4639 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4640 load_req_64bit.img_len = load_req.img_len;
4641 load_req_64bit.mdt_len = load_req.mdt_len;
4642 cmd_buf = (void *)&load_req_64bit;
4643 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4644 }
4645
4646 if (qseecom.support_bus_scaling) {
4647 mutex_lock(&qsee_bw_mutex);
4648 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4649 mutex_unlock(&qsee_bw_mutex);
4650 if (ret) {
4651 ret = -EIO;
4652 goto exit_free_img_data;
4653 }
4654 }
4655
4656 /* Vote for the SFPB clock */
4657 ret = __qseecom_enable_clk_scale_up(data);
4658 if (ret) {
4659 ret = -EIO;
4660 goto exit_unregister_bus_bw_need;
4661 }
4662
Zhen Kong3bafb312017-10-18 10:27:20 -07004663 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004664 img_data, fw_size,
4665 ION_IOC_CLEAN_INV_CACHES);
4666 if (ret) {
4667 pr_err("cache operation failed %d\n", ret);
4668 goto exit_disable_clk_vote;
4669 }
4670
4671 /* SCM_CALL to load the image */
4672 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4673 &resp, sizeof(resp));
4674 if (ret) {
4675 pr_err("scm_call to load failed : ret %d\n", ret);
4676 ret = -EIO;
4677 goto exit_disable_clk_vote;
4678 }
4679
4680 switch (resp.result) {
4681 case QSEOS_RESULT_SUCCESS:
4682 break;
4683 case QSEOS_RESULT_FAILURE:
4684 pr_err("scm call failed w/response result%d\n", resp.result);
4685 ret = -EINVAL;
4686 goto exit_disable_clk_vote;
4687 case QSEOS_RESULT_INCOMPLETE:
4688 ret = __qseecom_process_incomplete_cmd(data, &resp);
4689 if (ret) {
4690 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4691 goto exit_disable_clk_vote;
4692 }
4693 break;
4694 default:
4695 pr_err("scm call return unknown response %d\n", resp.result);
4696 ret = -EINVAL;
4697 goto exit_disable_clk_vote;
4698 }
4699
4700exit_disable_clk_vote:
4701 __qseecom_disable_clk_scale_down(data);
4702
4703exit_unregister_bus_bw_need:
4704 if (qseecom.support_bus_scaling) {
4705 mutex_lock(&qsee_bw_mutex);
4706 qseecom_unregister_bus_bandwidth_needs(data);
4707 mutex_unlock(&qsee_bw_mutex);
4708 }
4709
4710exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004711 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004712 return ret;
4713}
4714
4715static int qseecom_unload_commonlib_image(void)
4716{
4717 int ret = -EINVAL;
4718 struct qseecom_unload_lib_image_ireq unload_req = {0};
4719 struct qseecom_command_scm_resp resp;
4720
4721 /* Populate the remaining parameters */
4722 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4723
4724 /* SCM_CALL to load the image */
4725 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4726 sizeof(struct qseecom_unload_lib_image_ireq),
4727 &resp, sizeof(resp));
4728 if (ret) {
4729 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4730 ret = -EIO;
4731 } else {
4732 switch (resp.result) {
4733 case QSEOS_RESULT_SUCCESS:
4734 break;
4735 case QSEOS_RESULT_FAILURE:
4736 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4737 break;
4738 default:
4739 pr_err("scm call return unknown response %d\n",
4740 resp.result);
4741 ret = -EINVAL;
4742 break;
4743 }
4744 }
4745
4746 return ret;
4747}
4748
4749int qseecom_start_app(struct qseecom_handle **handle,
4750 char *app_name, uint32_t size)
4751{
4752 int32_t ret = 0;
4753 unsigned long flags = 0;
4754 struct qseecom_dev_handle *data = NULL;
4755 struct qseecom_check_app_ireq app_ireq;
4756 struct qseecom_registered_app_list *entry = NULL;
4757 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4758 bool found_app = false;
4759 size_t len;
4760 ion_phys_addr_t pa;
4761 uint32_t fw_size, app_arch;
4762 uint32_t app_id = 0;
4763
Zhen Kongc4c162a2019-01-23 12:07:12 -08004764 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004765 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004766
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004767 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4768 pr_err("Not allowed to be called in %d state\n",
4769 atomic_read(&qseecom.qseecom_state));
4770 return -EPERM;
4771 }
4772 if (!app_name) {
4773 pr_err("failed to get the app name\n");
4774 return -EINVAL;
4775 }
4776
Zhen Kong64a6d7282017-06-16 11:55:07 -07004777 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004778 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004779 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004780 return -EINVAL;
4781 }
4782
4783 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4784 if (!(*handle))
4785 return -ENOMEM;
4786
4787 data = kzalloc(sizeof(*data), GFP_KERNEL);
4788 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304789 ret = -ENOMEM;
4790 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004791 }
4792 data->abort = 0;
4793 data->type = QSEECOM_CLIENT_APP;
4794 data->released = false;
4795 data->client.sb_length = size;
4796 data->client.user_virt_sb_base = 0;
4797 data->client.ihandle = NULL;
4798
4799 init_waitqueue_head(&data->abort_wq);
4800
4801 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4802 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4803 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4804 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304805 ret = -ENOMEM;
4806 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004807 }
4808 mutex_lock(&app_access_lock);
4809
Zhen Kong5d02be92018-05-29 16:17:29 -07004810recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004811 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4812 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4813 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4814 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304815 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004816
4817 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4818 if (app_id) {
4819 pr_warn("App id %d for [%s] app exists\n", app_id,
4820 (char *)app_ireq.app_name);
4821 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4822 list_for_each_entry(entry,
4823 &qseecom.registered_app_list_head, list){
4824 if (entry->app_id == app_id) {
4825 entry->ref_cnt++;
4826 found_app = true;
4827 break;
4828 }
4829 }
4830 spin_unlock_irqrestore(
4831 &qseecom.registered_app_list_lock, flags);
4832 if (!found_app)
4833 pr_warn("App_id %d [%s] was loaded but not registered\n",
4834 ret, (char *)app_ireq.app_name);
4835 } else {
4836 /* load the app and get the app_id */
4837 pr_debug("%s: Loading app for the first time'\n",
4838 qseecom.pdev->init_name);
4839 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004840 if (ret == -EEXIST) {
4841 pr_err("recheck if TA %s is loaded\n", app_name);
4842 goto recheck;
4843 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304844 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004845 }
4846 data->client.app_id = app_id;
4847 if (!found_app) {
4848 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4849 if (!entry) {
4850 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304851 ret = -ENOMEM;
4852 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004853 }
4854 entry->app_id = app_id;
4855 entry->ref_cnt = 1;
4856 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4857 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4858 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304859 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004860 }
4861 entry->app_arch = app_arch;
4862 entry->app_blocked = false;
4863 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004864 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004865 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4866 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4867 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4868 flags);
4869 }
4870
4871 /* Get the physical address of the ION BUF */
4872 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4873 if (ret) {
4874 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4875 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304876 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004877 }
4878
4879 /* Populate the structure for sending scm call to load image */
4880 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4881 data->client.ihandle);
4882 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4883 pr_err("ION memory mapping for client shared buf failed\n");
4884 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304885 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004886 }
4887 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4888 data->client.sb_phys = (phys_addr_t)pa;
4889 (*handle)->dev = (void *)data;
4890 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4891 (*handle)->sbuf_len = data->client.sb_length;
4892
4893 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4894 if (!kclient_entry) {
4895 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304896 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004897 }
4898 kclient_entry->handle = *handle;
4899
4900 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4901 list_add_tail(&kclient_entry->list,
4902 &qseecom.registered_kclient_list_head);
4903 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4904
4905 mutex_unlock(&app_access_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07004906 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004907 return 0;
4908
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304909exit_ion_unmap_kernel:
4910 if (!IS_ERR_OR_NULL(data->client.ihandle))
4911 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4912exit_entry_free:
4913 kfree(entry);
4914exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004915 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304916 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4917 ion_free(qseecom.ion_clnt, data->client.ihandle);
4918 data->client.ihandle = NULL;
4919 }
4920exit_data_free:
4921 kfree(data);
4922exit_handle_free:
4923 if (*handle) {
4924 kfree(*handle);
4925 *handle = NULL;
4926 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07004927 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004928 return ret;
4929}
4930EXPORT_SYMBOL(qseecom_start_app);
4931
4932int qseecom_shutdown_app(struct qseecom_handle **handle)
4933{
4934 int ret = -EINVAL;
4935 struct qseecom_dev_handle *data;
4936
4937 struct qseecom_registered_kclient_list *kclient = NULL;
4938 unsigned long flags = 0;
4939 bool found_handle = false;
4940
Zhen Kongc4c162a2019-01-23 12:07:12 -08004941 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004942 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004943
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004944 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4945 pr_err("Not allowed to be called in %d state\n",
4946 atomic_read(&qseecom.qseecom_state));
4947 return -EPERM;
4948 }
4949
4950 if ((handle == NULL) || (*handle == NULL)) {
4951 pr_err("Handle is not initialized\n");
4952 return -EINVAL;
4953 }
4954 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4955 mutex_lock(&app_access_lock);
4956
4957 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4958 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4959 list) {
4960 if (kclient->handle == (*handle)) {
4961 list_del(&kclient->list);
4962 found_handle = true;
4963 break;
4964 }
4965 }
4966 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4967 if (!found_handle)
4968 pr_err("Unable to find the handle, exiting\n");
4969 else
4970 ret = qseecom_unload_app(data, false);
4971
4972 mutex_unlock(&app_access_lock);
4973 if (ret == 0) {
4974 kzfree(data);
4975 kzfree(*handle);
4976 kzfree(kclient);
4977 *handle = NULL;
4978 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07004979 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004980 return ret;
4981}
4982EXPORT_SYMBOL(qseecom_shutdown_app);
4983
4984int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4985 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4986{
4987 int ret = 0;
4988 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4989 struct qseecom_dev_handle *data;
4990 bool perf_enabled = false;
4991
Zhen Kongc4c162a2019-01-23 12:07:12 -08004992 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07004993 __wakeup_unload_app_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004994
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004995 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4996 pr_err("Not allowed to be called in %d state\n",
4997 atomic_read(&qseecom.qseecom_state));
4998 return -EPERM;
4999 }
5000
5001 if (handle == NULL) {
5002 pr_err("Handle is not initialized\n");
5003 return -EINVAL;
5004 }
5005 data = handle->dev;
5006
5007 req.cmd_req_len = sbuf_len;
5008 req.resp_len = rbuf_len;
5009 req.cmd_req_buf = send_buf;
5010 req.resp_buf = resp_buf;
5011
5012 if (__validate_send_cmd_inputs(data, &req))
5013 return -EINVAL;
5014
5015 mutex_lock(&app_access_lock);
5016 if (qseecom.support_bus_scaling) {
5017 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
5018 if (ret) {
5019 pr_err("Failed to set bw.\n");
5020 mutex_unlock(&app_access_lock);
5021 return ret;
5022 }
5023 }
5024 /*
5025 * On targets where crypto clock is handled by HLOS,
5026 * if clk_access_cnt is zero and perf_enabled is false,
5027 * then the crypto clock was not enabled before sending cmd
5028 * to tz, qseecom will enable the clock to avoid service failure.
5029 */
5030 if (!qseecom.no_clock_support &&
5031 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
5032 pr_debug("ce clock is not enabled!\n");
5033 ret = qseecom_perf_enable(data);
5034 if (ret) {
5035 pr_err("Failed to vote for clock with err %d\n",
5036 ret);
5037 mutex_unlock(&app_access_lock);
5038 return -EINVAL;
5039 }
5040 perf_enabled = true;
5041 }
5042 if (!strcmp(data->client.app_name, "securemm"))
5043 data->use_legacy_cmd = true;
5044
5045 ret = __qseecom_send_cmd(data, &req);
5046 data->use_legacy_cmd = false;
5047 if (qseecom.support_bus_scaling)
5048 __qseecom_add_bw_scale_down_timer(
5049 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
5050
5051 if (perf_enabled) {
5052 qsee_disable_clock_vote(data, CLK_DFAB);
5053 qsee_disable_clock_vote(data, CLK_SFPB);
5054 }
5055
5056 mutex_unlock(&app_access_lock);
5057
5058 if (ret)
5059 return ret;
5060
5061 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
5062 req.resp_len, req.resp_buf);
5063 return ret;
5064}
5065EXPORT_SYMBOL(qseecom_send_command);
5066
5067int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
5068{
5069 int ret = 0;
5070
5071 if ((handle == NULL) || (handle->dev == NULL)) {
5072 pr_err("No valid kernel client\n");
5073 return -EINVAL;
5074 }
5075 if (high) {
5076 if (qseecom.support_bus_scaling) {
5077 mutex_lock(&qsee_bw_mutex);
5078 __qseecom_register_bus_bandwidth_needs(handle->dev,
5079 HIGH);
5080 mutex_unlock(&qsee_bw_mutex);
5081 } else {
5082 ret = qseecom_perf_enable(handle->dev);
5083 if (ret)
5084 pr_err("Failed to vote for clock with err %d\n",
5085 ret);
5086 }
5087 } else {
5088 if (!qseecom.support_bus_scaling) {
5089 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
5090 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
5091 } else {
5092 mutex_lock(&qsee_bw_mutex);
5093 qseecom_unregister_bus_bandwidth_needs(handle->dev);
5094 mutex_unlock(&qsee_bw_mutex);
5095 }
5096 }
5097 return ret;
5098}
5099EXPORT_SYMBOL(qseecom_set_bandwidth);
5100
5101int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
5102{
5103 struct qseecom_registered_app_list dummy_app_entry = { {0} };
5104 struct qseecom_dev_handle dummy_private_data = {0};
5105 struct qseecom_command_scm_resp resp;
5106 int ret = 0;
5107
5108 if (!desc) {
5109 pr_err("desc is NULL\n");
5110 return -EINVAL;
5111 }
5112
5113 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07005114 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005115 resp.data = desc->ret[2]; /*listener_id*/
5116
Zhen Konge7f525f2017-12-01 18:26:25 -08005117 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07005118 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08005119 dummy_app_entry.app_id = desc->ret[1];
5120
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005121 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005122 if (qseecom.qsee_reentrancy_support)
5123 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005124 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07005125 else
5126 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
5127 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005128 mutex_unlock(&app_access_lock);
5129 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07005130 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005131 (int)desc->ret[0], (int)desc->ret[2],
5132 (int)desc->ret[1], ret);
5133 desc->ret[0] = resp.result;
5134 desc->ret[1] = resp.resp_type;
5135 desc->ret[2] = resp.data;
5136 return ret;
5137}
5138EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
5139
5140static int qseecom_send_resp(void)
5141{
5142 qseecom.send_resp_flag = 1;
5143 wake_up_interruptible(&qseecom.send_resp_wq);
5144 return 0;
5145}
5146
5147static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
5148{
5149 struct qseecom_registered_listener_list *this_lstnr = NULL;
5150
5151 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
5152 this_lstnr = __qseecom_find_svc(data->listener.id);
5153 if (this_lstnr == NULL)
5154 return -EINVAL;
5155 qseecom.send_resp_flag = 1;
5156 this_lstnr->send_resp_flag = 1;
5157 wake_up_interruptible(&qseecom.send_resp_wq);
5158 return 0;
5159}
5160
5161static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5162 struct qseecom_send_modfd_listener_resp *resp,
5163 struct qseecom_registered_listener_list *this_lstnr)
5164{
5165 int i;
5166
5167 if (!data || !resp || !this_lstnr) {
5168 pr_err("listener handle or resp msg is null\n");
5169 return -EINVAL;
5170 }
5171
5172 if (resp->resp_buf_ptr == NULL) {
5173 pr_err("resp buffer is null\n");
5174 return -EINVAL;
5175 }
5176 /* validate resp buf length */
5177 if ((resp->resp_len == 0) ||
5178 (resp->resp_len > this_lstnr->sb_length)) {
5179 pr_err("resp buf length %d not valid\n", resp->resp_len);
5180 return -EINVAL;
5181 }
5182
5183 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5184 pr_err("Integer overflow in resp_len & resp_buf\n");
5185 return -EINVAL;
5186 }
5187 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5188 (ULONG_MAX - this_lstnr->sb_length)) {
5189 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5190 return -EINVAL;
5191 }
5192 /* validate resp buf */
5193 if (((uintptr_t)resp->resp_buf_ptr <
5194 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5195 ((uintptr_t)resp->resp_buf_ptr >=
5196 ((uintptr_t)this_lstnr->user_virt_sb_base +
5197 this_lstnr->sb_length)) ||
5198 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5199 ((uintptr_t)this_lstnr->user_virt_sb_base +
5200 this_lstnr->sb_length))) {
5201 pr_err("resp buf is out of shared buffer region\n");
5202 return -EINVAL;
5203 }
5204
5205 /* validate offsets */
5206 for (i = 0; i < MAX_ION_FD; i++) {
5207 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5208 pr_err("Invalid offset %d = 0x%x\n",
5209 i, resp->ifd_data[i].cmd_buf_offset);
5210 return -EINVAL;
5211 }
5212 }
5213
5214 return 0;
5215}
5216
5217static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5218 void __user *argp, bool is_64bit_addr)
5219{
5220 struct qseecom_send_modfd_listener_resp resp;
5221 struct qseecom_registered_listener_list *this_lstnr = NULL;
5222
5223 if (copy_from_user(&resp, argp, sizeof(resp))) {
5224 pr_err("copy_from_user failed");
5225 return -EINVAL;
5226 }
5227
5228 this_lstnr = __qseecom_find_svc(data->listener.id);
5229 if (this_lstnr == NULL)
5230 return -EINVAL;
5231
5232 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5233 return -EINVAL;
5234
5235 resp.resp_buf_ptr = this_lstnr->sb_virt +
5236 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5237
5238 if (!is_64bit_addr)
5239 __qseecom_update_cmd_buf(&resp, false, data);
5240 else
5241 __qseecom_update_cmd_buf_64(&resp, false, data);
5242 qseecom.send_resp_flag = 1;
5243 this_lstnr->send_resp_flag = 1;
5244 wake_up_interruptible(&qseecom.send_resp_wq);
5245 return 0;
5246}
5247
5248static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5249 void __user *argp)
5250{
5251 return __qseecom_send_modfd_resp(data, argp, false);
5252}
5253
5254static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5255 void __user *argp)
5256{
5257 return __qseecom_send_modfd_resp(data, argp, true);
5258}
5259
5260static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5261 void __user *argp)
5262{
5263 struct qseecom_qseos_version_req req;
5264
5265 if (copy_from_user(&req, argp, sizeof(req))) {
5266 pr_err("copy_from_user failed");
5267 return -EINVAL;
5268 }
5269 req.qseos_version = qseecom.qseos_version;
5270 if (copy_to_user(argp, &req, sizeof(req))) {
5271 pr_err("copy_to_user failed");
5272 return -EINVAL;
5273 }
5274 return 0;
5275}
5276
5277static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5278{
5279 int rc = 0;
5280 struct qseecom_clk *qclk = NULL;
5281
5282 if (qseecom.no_clock_support)
5283 return 0;
5284
5285 if (ce == CLK_QSEE)
5286 qclk = &qseecom.qsee;
5287 if (ce == CLK_CE_DRV)
5288 qclk = &qseecom.ce_drv;
5289
5290 if (qclk == NULL) {
5291 pr_err("CLK type not supported\n");
5292 return -EINVAL;
5293 }
5294 mutex_lock(&clk_access_lock);
5295
5296 if (qclk->clk_access_cnt == ULONG_MAX) {
5297 pr_err("clk_access_cnt beyond limitation\n");
5298 goto err;
5299 }
5300 if (qclk->clk_access_cnt > 0) {
5301 qclk->clk_access_cnt++;
5302 mutex_unlock(&clk_access_lock);
5303 return rc;
5304 }
5305
5306 /* Enable CE core clk */
5307 if (qclk->ce_core_clk != NULL) {
5308 rc = clk_prepare_enable(qclk->ce_core_clk);
5309 if (rc) {
5310 pr_err("Unable to enable/prepare CE core clk\n");
5311 goto err;
5312 }
5313 }
5314 /* Enable CE clk */
5315 if (qclk->ce_clk != NULL) {
5316 rc = clk_prepare_enable(qclk->ce_clk);
5317 if (rc) {
5318 pr_err("Unable to enable/prepare CE iface clk\n");
5319 goto ce_clk_err;
5320 }
5321 }
5322 /* Enable AXI clk */
5323 if (qclk->ce_bus_clk != NULL) {
5324 rc = clk_prepare_enable(qclk->ce_bus_clk);
5325 if (rc) {
5326 pr_err("Unable to enable/prepare CE bus clk\n");
5327 goto ce_bus_clk_err;
5328 }
5329 }
5330 qclk->clk_access_cnt++;
5331 mutex_unlock(&clk_access_lock);
5332 return 0;
5333
5334ce_bus_clk_err:
5335 if (qclk->ce_clk != NULL)
5336 clk_disable_unprepare(qclk->ce_clk);
5337ce_clk_err:
5338 if (qclk->ce_core_clk != NULL)
5339 clk_disable_unprepare(qclk->ce_core_clk);
5340err:
5341 mutex_unlock(&clk_access_lock);
5342 return -EIO;
5343}
5344
5345static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5346{
5347 struct qseecom_clk *qclk;
5348
5349 if (qseecom.no_clock_support)
5350 return;
5351
5352 if (ce == CLK_QSEE)
5353 qclk = &qseecom.qsee;
5354 else
5355 qclk = &qseecom.ce_drv;
5356
5357 mutex_lock(&clk_access_lock);
5358
5359 if (qclk->clk_access_cnt == 0) {
5360 mutex_unlock(&clk_access_lock);
5361 return;
5362 }
5363
5364 if (qclk->clk_access_cnt == 1) {
5365 if (qclk->ce_clk != NULL)
5366 clk_disable_unprepare(qclk->ce_clk);
5367 if (qclk->ce_core_clk != NULL)
5368 clk_disable_unprepare(qclk->ce_core_clk);
5369 if (qclk->ce_bus_clk != NULL)
5370 clk_disable_unprepare(qclk->ce_bus_clk);
5371 }
5372 qclk->clk_access_cnt--;
5373 mutex_unlock(&clk_access_lock);
5374}
5375
5376static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5377 int32_t clk_type)
5378{
5379 int ret = 0;
5380 struct qseecom_clk *qclk;
5381
5382 if (qseecom.no_clock_support)
5383 return 0;
5384
5385 qclk = &qseecom.qsee;
5386 if (!qseecom.qsee_perf_client)
5387 return ret;
5388
5389 switch (clk_type) {
5390 case CLK_DFAB:
5391 mutex_lock(&qsee_bw_mutex);
5392 if (!qseecom.qsee_bw_count) {
5393 if (qseecom.qsee_sfpb_bw_count > 0)
5394 ret = msm_bus_scale_client_update_request(
5395 qseecom.qsee_perf_client, 3);
5396 else {
5397 if (qclk->ce_core_src_clk != NULL)
5398 ret = __qseecom_enable_clk(CLK_QSEE);
5399 if (!ret) {
5400 ret =
5401 msm_bus_scale_client_update_request(
5402 qseecom.qsee_perf_client, 1);
5403 if ((ret) &&
5404 (qclk->ce_core_src_clk != NULL))
5405 __qseecom_disable_clk(CLK_QSEE);
5406 }
5407 }
5408 if (ret)
5409 pr_err("DFAB Bandwidth req failed (%d)\n",
5410 ret);
5411 else {
5412 qseecom.qsee_bw_count++;
5413 data->perf_enabled = true;
5414 }
5415 } else {
5416 qseecom.qsee_bw_count++;
5417 data->perf_enabled = true;
5418 }
5419 mutex_unlock(&qsee_bw_mutex);
5420 break;
5421 case CLK_SFPB:
5422 mutex_lock(&qsee_bw_mutex);
5423 if (!qseecom.qsee_sfpb_bw_count) {
5424 if (qseecom.qsee_bw_count > 0)
5425 ret = msm_bus_scale_client_update_request(
5426 qseecom.qsee_perf_client, 3);
5427 else {
5428 if (qclk->ce_core_src_clk != NULL)
5429 ret = __qseecom_enable_clk(CLK_QSEE);
5430 if (!ret) {
5431 ret =
5432 msm_bus_scale_client_update_request(
5433 qseecom.qsee_perf_client, 2);
5434 if ((ret) &&
5435 (qclk->ce_core_src_clk != NULL))
5436 __qseecom_disable_clk(CLK_QSEE);
5437 }
5438 }
5439
5440 if (ret)
5441 pr_err("SFPB Bandwidth req failed (%d)\n",
5442 ret);
5443 else {
5444 qseecom.qsee_sfpb_bw_count++;
5445 data->fast_load_enabled = true;
5446 }
5447 } else {
5448 qseecom.qsee_sfpb_bw_count++;
5449 data->fast_load_enabled = true;
5450 }
5451 mutex_unlock(&qsee_bw_mutex);
5452 break;
5453 default:
5454 pr_err("Clock type not defined\n");
5455 break;
5456 }
5457 return ret;
5458}
5459
5460static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5461 int32_t clk_type)
5462{
5463 int32_t ret = 0;
5464 struct qseecom_clk *qclk;
5465
5466 qclk = &qseecom.qsee;
5467
5468 if (qseecom.no_clock_support)
5469 return;
5470 if (!qseecom.qsee_perf_client)
5471 return;
5472
5473 switch (clk_type) {
5474 case CLK_DFAB:
5475 mutex_lock(&qsee_bw_mutex);
5476 if (qseecom.qsee_bw_count == 0) {
5477 pr_err("Client error.Extra call to disable DFAB clk\n");
5478 mutex_unlock(&qsee_bw_mutex);
5479 return;
5480 }
5481
5482 if (qseecom.qsee_bw_count == 1) {
5483 if (qseecom.qsee_sfpb_bw_count > 0)
5484 ret = msm_bus_scale_client_update_request(
5485 qseecom.qsee_perf_client, 2);
5486 else {
5487 ret = msm_bus_scale_client_update_request(
5488 qseecom.qsee_perf_client, 0);
5489 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5490 __qseecom_disable_clk(CLK_QSEE);
5491 }
5492 if (ret)
5493 pr_err("SFPB Bandwidth req fail (%d)\n",
5494 ret);
5495 else {
5496 qseecom.qsee_bw_count--;
5497 data->perf_enabled = false;
5498 }
5499 } else {
5500 qseecom.qsee_bw_count--;
5501 data->perf_enabled = false;
5502 }
5503 mutex_unlock(&qsee_bw_mutex);
5504 break;
5505 case CLK_SFPB:
5506 mutex_lock(&qsee_bw_mutex);
5507 if (qseecom.qsee_sfpb_bw_count == 0) {
5508 pr_err("Client error.Extra call to disable SFPB clk\n");
5509 mutex_unlock(&qsee_bw_mutex);
5510 return;
5511 }
5512 if (qseecom.qsee_sfpb_bw_count == 1) {
5513 if (qseecom.qsee_bw_count > 0)
5514 ret = msm_bus_scale_client_update_request(
5515 qseecom.qsee_perf_client, 1);
5516 else {
5517 ret = msm_bus_scale_client_update_request(
5518 qseecom.qsee_perf_client, 0);
5519 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5520 __qseecom_disable_clk(CLK_QSEE);
5521 }
5522 if (ret)
5523 pr_err("SFPB Bandwidth req fail (%d)\n",
5524 ret);
5525 else {
5526 qseecom.qsee_sfpb_bw_count--;
5527 data->fast_load_enabled = false;
5528 }
5529 } else {
5530 qseecom.qsee_sfpb_bw_count--;
5531 data->fast_load_enabled = false;
5532 }
5533 mutex_unlock(&qsee_bw_mutex);
5534 break;
5535 default:
5536 pr_err("Clock type not defined\n");
5537 break;
5538 }
5539
5540}
5541
5542static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5543 void __user *argp)
5544{
5545 struct ion_handle *ihandle; /* Ion handle */
5546 struct qseecom_load_img_req load_img_req;
5547 int uret = 0;
5548 int ret;
5549 ion_phys_addr_t pa = 0;
5550 size_t len;
5551 struct qseecom_load_app_ireq load_req;
5552 struct qseecom_load_app_64bit_ireq load_req_64bit;
5553 struct qseecom_command_scm_resp resp;
5554 void *cmd_buf = NULL;
5555 size_t cmd_len;
5556 /* Copy the relevant information needed for loading the image */
5557 if (copy_from_user(&load_img_req,
5558 (void __user *)argp,
5559 sizeof(struct qseecom_load_img_req))) {
5560 pr_err("copy_from_user failed\n");
5561 return -EFAULT;
5562 }
5563
5564 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005565 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005566 load_img_req.ifd_data_fd);
5567 if (IS_ERR_OR_NULL(ihandle)) {
5568 pr_err("Ion client could not retrieve the handle\n");
5569 return -ENOMEM;
5570 }
5571
5572 /* Get the physical address of the ION BUF */
5573 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5574 if (ret) {
5575 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5576 ret);
5577 return ret;
5578 }
5579 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5580 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5581 len, load_img_req.mdt_len,
5582 load_img_req.img_len);
5583 return ret;
5584 }
5585 /* Populate the structure for sending scm call to load image */
5586 if (qseecom.qsee_version < QSEE_VERSION_40) {
5587 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5588 load_req.mdt_len = load_img_req.mdt_len;
5589 load_req.img_len = load_img_req.img_len;
5590 load_req.phy_addr = (uint32_t)pa;
5591 cmd_buf = (void *)&load_req;
5592 cmd_len = sizeof(struct qseecom_load_app_ireq);
5593 } else {
5594 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5595 load_req_64bit.mdt_len = load_img_req.mdt_len;
5596 load_req_64bit.img_len = load_img_req.img_len;
5597 load_req_64bit.phy_addr = (uint64_t)pa;
5598 cmd_buf = (void *)&load_req_64bit;
5599 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5600 }
5601
5602 if (qseecom.support_bus_scaling) {
5603 mutex_lock(&qsee_bw_mutex);
5604 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5605 mutex_unlock(&qsee_bw_mutex);
5606 if (ret) {
5607 ret = -EIO;
5608 goto exit_cpu_restore;
5609 }
5610 }
5611
5612 /* Vote for the SFPB clock */
5613 ret = __qseecom_enable_clk_scale_up(data);
5614 if (ret) {
5615 ret = -EIO;
5616 goto exit_register_bus_bandwidth_needs;
5617 }
5618 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5619 ION_IOC_CLEAN_INV_CACHES);
5620 if (ret) {
5621 pr_err("cache operation failed %d\n", ret);
5622 goto exit_disable_clock;
5623 }
5624 /* SCM_CALL to load the external elf */
5625 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5626 &resp, sizeof(resp));
5627 if (ret) {
5628 pr_err("scm_call to load failed : ret %d\n",
5629 ret);
5630 ret = -EFAULT;
5631 goto exit_disable_clock;
5632 }
5633
5634 switch (resp.result) {
5635 case QSEOS_RESULT_SUCCESS:
5636 break;
5637 case QSEOS_RESULT_INCOMPLETE:
5638 pr_err("%s: qseos result incomplete\n", __func__);
5639 ret = __qseecom_process_incomplete_cmd(data, &resp);
5640 if (ret)
5641 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5642 break;
5643 case QSEOS_RESULT_FAILURE:
5644 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5645 ret = -EFAULT;
5646 break;
5647 default:
5648 pr_err("scm_call response result %d not supported\n",
5649 resp.result);
5650 ret = -EFAULT;
5651 break;
5652 }
5653
5654exit_disable_clock:
5655 __qseecom_disable_clk_scale_down(data);
5656
5657exit_register_bus_bandwidth_needs:
5658 if (qseecom.support_bus_scaling) {
5659 mutex_lock(&qsee_bw_mutex);
5660 uret = qseecom_unregister_bus_bandwidth_needs(data);
5661 mutex_unlock(&qsee_bw_mutex);
5662 if (uret)
5663 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5664 uret, ret);
5665 }
5666
5667exit_cpu_restore:
5668 /* Deallocate the handle */
5669 if (!IS_ERR_OR_NULL(ihandle))
5670 ion_free(qseecom.ion_clnt, ihandle);
5671 return ret;
5672}
5673
5674static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5675{
5676 int ret = 0;
5677 struct qseecom_command_scm_resp resp;
5678 struct qseecom_unload_app_ireq req;
5679
5680 /* unavailable client app */
5681 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5682
5683 /* Populate the structure for sending scm call to unload image */
5684 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5685
5686 /* SCM_CALL to unload the external elf */
5687 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5688 sizeof(struct qseecom_unload_app_ireq),
5689 &resp, sizeof(resp));
5690 if (ret) {
5691 pr_err("scm_call to unload failed : ret %d\n",
5692 ret);
5693 ret = -EFAULT;
5694 goto qseecom_unload_external_elf_scm_err;
5695 }
5696 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5697 ret = __qseecom_process_incomplete_cmd(data, &resp);
5698 if (ret)
5699 pr_err("process_incomplete_cmd fail err: %d\n",
5700 ret);
5701 } else {
5702 if (resp.result != QSEOS_RESULT_SUCCESS) {
5703 pr_err("scm_call to unload image failed resp.result =%d\n",
5704 resp.result);
5705 ret = -EFAULT;
5706 }
5707 }
5708
5709qseecom_unload_external_elf_scm_err:
5710
5711 return ret;
5712}
5713
5714static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5715 void __user *argp)
5716{
5717
5718 int32_t ret;
5719 struct qseecom_qseos_app_load_query query_req;
5720 struct qseecom_check_app_ireq req;
5721 struct qseecom_registered_app_list *entry = NULL;
5722 unsigned long flags = 0;
5723 uint32_t app_arch = 0, app_id = 0;
5724 bool found_app = false;
5725
5726 /* Copy the relevant information needed for loading the image */
5727 if (copy_from_user(&query_req,
5728 (void __user *)argp,
5729 sizeof(struct qseecom_qseos_app_load_query))) {
5730 pr_err("copy_from_user failed\n");
5731 return -EFAULT;
5732 }
5733
5734 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5735 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5736 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5737
5738 ret = __qseecom_check_app_exists(req, &app_id);
5739 if (ret) {
5740 pr_err(" scm call to check if app is loaded failed");
5741 return ret; /* scm call failed */
5742 }
5743 if (app_id) {
5744 pr_debug("App id %d (%s) already exists\n", app_id,
5745 (char *)(req.app_name));
5746 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5747 list_for_each_entry(entry,
5748 &qseecom.registered_app_list_head, list){
5749 if (entry->app_id == app_id) {
5750 app_arch = entry->app_arch;
5751 entry->ref_cnt++;
5752 found_app = true;
5753 break;
5754 }
5755 }
5756 spin_unlock_irqrestore(
5757 &qseecom.registered_app_list_lock, flags);
5758 data->client.app_id = app_id;
5759 query_req.app_id = app_id;
5760 if (app_arch) {
5761 data->client.app_arch = app_arch;
5762 query_req.app_arch = app_arch;
5763 } else {
5764 data->client.app_arch = 0;
5765 query_req.app_arch = 0;
5766 }
5767 strlcpy(data->client.app_name, query_req.app_name,
5768 MAX_APP_NAME_SIZE);
5769 /*
5770 * If app was loaded by appsbl before and was not registered,
5771 * regiser this app now.
5772 */
5773 if (!found_app) {
5774 pr_debug("Register app %d [%s] which was loaded before\n",
5775 ret, (char *)query_req.app_name);
5776 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5777 if (!entry) {
5778 pr_err("kmalloc for app entry failed\n");
5779 return -ENOMEM;
5780 }
5781 entry->app_id = app_id;
5782 entry->ref_cnt = 1;
5783 entry->app_arch = data->client.app_arch;
5784 strlcpy(entry->app_name, data->client.app_name,
5785 MAX_APP_NAME_SIZE);
5786 entry->app_blocked = false;
5787 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005788 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005789 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5790 flags);
5791 list_add_tail(&entry->list,
5792 &qseecom.registered_app_list_head);
5793 spin_unlock_irqrestore(
5794 &qseecom.registered_app_list_lock, flags);
5795 }
5796 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5797 pr_err("copy_to_user failed\n");
5798 return -EFAULT;
5799 }
5800 return -EEXIST; /* app already loaded */
5801 } else {
5802 return 0; /* app not loaded */
5803 }
5804}
5805
5806static int __qseecom_get_ce_pipe_info(
5807 enum qseecom_key_management_usage_type usage,
5808 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5809{
5810 int ret = -EINVAL;
5811 int i, j;
5812 struct qseecom_ce_info_use *p = NULL;
5813 int total = 0;
5814 struct qseecom_ce_pipe_entry *pcepipe;
5815
5816 switch (usage) {
5817 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5818 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5819 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5820 if (qseecom.support_fde) {
5821 p = qseecom.ce_info.fde;
5822 total = qseecom.ce_info.num_fde;
5823 } else {
5824 pr_err("system does not support fde\n");
5825 return -EINVAL;
5826 }
5827 break;
5828 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5829 if (qseecom.support_pfe) {
5830 p = qseecom.ce_info.pfe;
5831 total = qseecom.ce_info.num_pfe;
5832 } else {
5833 pr_err("system does not support pfe\n");
5834 return -EINVAL;
5835 }
5836 break;
5837 default:
5838 pr_err("unsupported usage %d\n", usage);
5839 return -EINVAL;
5840 }
5841
5842 for (j = 0; j < total; j++) {
5843 if (p->unit_num == unit) {
5844 pcepipe = p->ce_pipe_entry;
5845 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5846 (*ce_hw)[i] = pcepipe->ce_num;
5847 *pipe = pcepipe->ce_pipe_pair;
5848 pcepipe++;
5849 }
5850 ret = 0;
5851 break;
5852 }
5853 p++;
5854 }
5855 return ret;
5856}
5857
5858static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5859 enum qseecom_key_management_usage_type usage,
5860 struct qseecom_key_generate_ireq *ireq)
5861{
5862 struct qseecom_command_scm_resp resp;
5863 int ret;
5864
5865 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5866 usage >= QSEOS_KM_USAGE_MAX) {
5867 pr_err("Error:: unsupported usage %d\n", usage);
5868 return -EFAULT;
5869 }
5870 ret = __qseecom_enable_clk(CLK_QSEE);
5871 if (ret)
5872 return ret;
5873
5874 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5875 ireq, sizeof(struct qseecom_key_generate_ireq),
5876 &resp, sizeof(resp));
5877 if (ret) {
5878 if (ret == -EINVAL &&
5879 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5880 pr_debug("Key ID exists.\n");
5881 ret = 0;
5882 } else {
5883 pr_err("scm call to generate key failed : %d\n", ret);
5884 ret = -EFAULT;
5885 }
5886 goto generate_key_exit;
5887 }
5888
5889 switch (resp.result) {
5890 case QSEOS_RESULT_SUCCESS:
5891 break;
5892 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5893 pr_debug("Key ID exists.\n");
5894 break;
5895 case QSEOS_RESULT_INCOMPLETE:
5896 ret = __qseecom_process_incomplete_cmd(data, &resp);
5897 if (ret) {
5898 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5899 pr_debug("Key ID exists.\n");
5900 ret = 0;
5901 } else {
5902 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5903 resp.result);
5904 }
5905 }
5906 break;
5907 case QSEOS_RESULT_FAILURE:
5908 default:
5909 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5910 ret = -EINVAL;
5911 break;
5912 }
5913generate_key_exit:
5914 __qseecom_disable_clk(CLK_QSEE);
5915 return ret;
5916}
5917
5918static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5919 enum qseecom_key_management_usage_type usage,
5920 struct qseecom_key_delete_ireq *ireq)
5921{
5922 struct qseecom_command_scm_resp resp;
5923 int ret;
5924
5925 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5926 usage >= QSEOS_KM_USAGE_MAX) {
5927 pr_err("Error:: unsupported usage %d\n", usage);
5928 return -EFAULT;
5929 }
5930 ret = __qseecom_enable_clk(CLK_QSEE);
5931 if (ret)
5932 return ret;
5933
5934 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5935 ireq, sizeof(struct qseecom_key_delete_ireq),
5936 &resp, sizeof(struct qseecom_command_scm_resp));
5937 if (ret) {
5938 if (ret == -EINVAL &&
5939 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5940 pr_debug("Max attempts to input password reached.\n");
5941 ret = -ERANGE;
5942 } else {
5943 pr_err("scm call to delete key failed : %d\n", ret);
5944 ret = -EFAULT;
5945 }
5946 goto del_key_exit;
5947 }
5948
5949 switch (resp.result) {
5950 case QSEOS_RESULT_SUCCESS:
5951 break;
5952 case QSEOS_RESULT_INCOMPLETE:
5953 ret = __qseecom_process_incomplete_cmd(data, &resp);
5954 if (ret) {
5955 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5956 resp.result);
5957 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5958 pr_debug("Max attempts to input password reached.\n");
5959 ret = -ERANGE;
5960 }
5961 }
5962 break;
5963 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5964 pr_debug("Max attempts to input password reached.\n");
5965 ret = -ERANGE;
5966 break;
5967 case QSEOS_RESULT_FAILURE:
5968 default:
5969 pr_err("Delete key scm call failed resp.result %d\n",
5970 resp.result);
5971 ret = -EINVAL;
5972 break;
5973 }
5974del_key_exit:
5975 __qseecom_disable_clk(CLK_QSEE);
5976 return ret;
5977}
5978
5979static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5980 enum qseecom_key_management_usage_type usage,
5981 struct qseecom_key_select_ireq *ireq)
5982{
5983 struct qseecom_command_scm_resp resp;
5984 int ret;
5985
5986 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5987 usage >= QSEOS_KM_USAGE_MAX) {
5988 pr_err("Error:: unsupported usage %d\n", usage);
5989 return -EFAULT;
5990 }
5991 ret = __qseecom_enable_clk(CLK_QSEE);
5992 if (ret)
5993 return ret;
5994
5995 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5996 ret = __qseecom_enable_clk(CLK_CE_DRV);
5997 if (ret)
5998 return ret;
5999 }
6000
6001 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6002 ireq, sizeof(struct qseecom_key_select_ireq),
6003 &resp, sizeof(struct qseecom_command_scm_resp));
6004 if (ret) {
6005 if (ret == -EINVAL &&
6006 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6007 pr_debug("Max attempts to input password reached.\n");
6008 ret = -ERANGE;
6009 } else if (ret == -EINVAL &&
6010 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6011 pr_debug("Set Key operation under processing...\n");
6012 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6013 } else {
6014 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
6015 ret);
6016 ret = -EFAULT;
6017 }
6018 goto set_key_exit;
6019 }
6020
6021 switch (resp.result) {
6022 case QSEOS_RESULT_SUCCESS:
6023 break;
6024 case QSEOS_RESULT_INCOMPLETE:
6025 ret = __qseecom_process_incomplete_cmd(data, &resp);
6026 if (ret) {
6027 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6028 resp.result);
6029 if (resp.result ==
6030 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6031 pr_debug("Set Key operation under processing...\n");
6032 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6033 }
6034 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
6035 pr_debug("Max attempts to input password reached.\n");
6036 ret = -ERANGE;
6037 }
6038 }
6039 break;
6040 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
6041 pr_debug("Max attempts to input password reached.\n");
6042 ret = -ERANGE;
6043 break;
6044 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6045 pr_debug("Set Key operation under processing...\n");
6046 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6047 break;
6048 case QSEOS_RESULT_FAILURE:
6049 default:
6050 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6051 ret = -EINVAL;
6052 break;
6053 }
6054set_key_exit:
6055 __qseecom_disable_clk(CLK_QSEE);
6056 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
6057 __qseecom_disable_clk(CLK_CE_DRV);
6058 return ret;
6059}
6060
6061static int __qseecom_update_current_key_user_info(
6062 struct qseecom_dev_handle *data,
6063 enum qseecom_key_management_usage_type usage,
6064 struct qseecom_key_userinfo_update_ireq *ireq)
6065{
6066 struct qseecom_command_scm_resp resp;
6067 int ret;
6068
6069 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6070 usage >= QSEOS_KM_USAGE_MAX) {
6071 pr_err("Error:: unsupported usage %d\n", usage);
6072 return -EFAULT;
6073 }
6074 ret = __qseecom_enable_clk(CLK_QSEE);
6075 if (ret)
6076 return ret;
6077
6078 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6079 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
6080 &resp, sizeof(struct qseecom_command_scm_resp));
6081 if (ret) {
6082 if (ret == -EINVAL &&
6083 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6084 pr_debug("Set Key operation under processing...\n");
6085 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6086 } else {
6087 pr_err("scm call to update key userinfo failed: %d\n",
6088 ret);
6089 __qseecom_disable_clk(CLK_QSEE);
6090 return -EFAULT;
6091 }
6092 }
6093
6094 switch (resp.result) {
6095 case QSEOS_RESULT_SUCCESS:
6096 break;
6097 case QSEOS_RESULT_INCOMPLETE:
6098 ret = __qseecom_process_incomplete_cmd(data, &resp);
6099 if (resp.result ==
6100 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
6101 pr_debug("Set Key operation under processing...\n");
6102 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6103 }
6104 if (ret)
6105 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
6106 resp.result);
6107 break;
6108 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
6109 pr_debug("Update Key operation under processing...\n");
6110 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
6111 break;
6112 case QSEOS_RESULT_FAILURE:
6113 default:
6114 pr_err("Set key scm call failed resp.result %d\n", resp.result);
6115 ret = -EINVAL;
6116 break;
6117 }
6118
6119 __qseecom_disable_clk(CLK_QSEE);
6120 return ret;
6121}
6122
6123
6124static int qseecom_enable_ice_setup(int usage)
6125{
6126 int ret = 0;
6127
6128 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6129 ret = qcom_ice_setup_ice_hw("ufs", true);
6130 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6131 ret = qcom_ice_setup_ice_hw("sdcc", true);
6132
6133 return ret;
6134}
6135
6136static int qseecom_disable_ice_setup(int usage)
6137{
6138 int ret = 0;
6139
6140 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6141 ret = qcom_ice_setup_ice_hw("ufs", false);
6142 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6143 ret = qcom_ice_setup_ice_hw("sdcc", false);
6144
6145 return ret;
6146}
6147
6148static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
6149{
6150 struct qseecom_ce_info_use *pce_info_use, *p;
6151 int total = 0;
6152 int i;
6153
6154 switch (usage) {
6155 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
6156 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
6157 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6158 p = qseecom.ce_info.fde;
6159 total = qseecom.ce_info.num_fde;
6160 break;
6161 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6162 p = qseecom.ce_info.pfe;
6163 total = qseecom.ce_info.num_pfe;
6164 break;
6165 default:
6166 pr_err("unsupported usage %d\n", usage);
6167 return -EINVAL;
6168 }
6169
6170 pce_info_use = NULL;
6171
6172 for (i = 0; i < total; i++) {
6173 if (p->unit_num == unit) {
6174 pce_info_use = p;
6175 break;
6176 }
6177 p++;
6178 }
6179 if (!pce_info_use) {
6180 pr_err("can not find %d\n", unit);
6181 return -EINVAL;
6182 }
6183 return pce_info_use->num_ce_pipe_entries;
6184}
6185
6186static int qseecom_create_key(struct qseecom_dev_handle *data,
6187 void __user *argp)
6188{
6189 int i;
6190 uint32_t *ce_hw = NULL;
6191 uint32_t pipe = 0;
6192 int ret = 0;
6193 uint32_t flags = 0;
6194 struct qseecom_create_key_req create_key_req;
6195 struct qseecom_key_generate_ireq generate_key_ireq;
6196 struct qseecom_key_select_ireq set_key_ireq;
6197 uint32_t entries = 0;
6198
6199 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6200 if (ret) {
6201 pr_err("copy_from_user failed\n");
6202 return ret;
6203 }
6204
6205 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6206 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6207 pr_err("unsupported usage %d\n", create_key_req.usage);
6208 ret = -EFAULT;
6209 return ret;
6210 }
6211 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6212 create_key_req.usage);
6213 if (entries <= 0) {
6214 pr_err("no ce instance for usage %d instance %d\n",
6215 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6216 ret = -EINVAL;
6217 return ret;
6218 }
6219
6220 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6221 if (!ce_hw) {
6222 ret = -ENOMEM;
6223 return ret;
6224 }
6225 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6226 DEFAULT_CE_INFO_UNIT);
6227 if (ret) {
6228 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6229 ret = -EINVAL;
6230 goto free_buf;
6231 }
6232
6233 if (qseecom.fde_key_size)
6234 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6235 else
6236 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6237
Jiten Patela7bb1d52018-05-11 12:34:26 +05306238 if (qseecom.enable_key_wrap_in_ks == true)
6239 flags |= ENABLE_KEY_WRAP_IN_KS;
6240
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006241 generate_key_ireq.flags = flags;
6242 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6243 memset((void *)generate_key_ireq.key_id,
6244 0, QSEECOM_KEY_ID_SIZE);
6245 memset((void *)generate_key_ireq.hash32,
6246 0, QSEECOM_HASH_SIZE);
6247 memcpy((void *)generate_key_ireq.key_id,
6248 (void *)key_id_array[create_key_req.usage].desc,
6249 QSEECOM_KEY_ID_SIZE);
6250 memcpy((void *)generate_key_ireq.hash32,
6251 (void *)create_key_req.hash32,
6252 QSEECOM_HASH_SIZE);
6253
6254 ret = __qseecom_generate_and_save_key(data,
6255 create_key_req.usage, &generate_key_ireq);
6256 if (ret) {
6257 pr_err("Failed to generate key on storage: %d\n", ret);
6258 goto free_buf;
6259 }
6260
6261 for (i = 0; i < entries; i++) {
6262 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6263 if (create_key_req.usage ==
6264 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6265 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6266 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6267
6268 } else if (create_key_req.usage ==
6269 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6270 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6271 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6272
6273 } else {
6274 set_key_ireq.ce = ce_hw[i];
6275 set_key_ireq.pipe = pipe;
6276 }
6277 set_key_ireq.flags = flags;
6278
6279 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6280 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6281 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6282 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6283 memcpy((void *)set_key_ireq.key_id,
6284 (void *)key_id_array[create_key_req.usage].desc,
6285 QSEECOM_KEY_ID_SIZE);
6286 memcpy((void *)set_key_ireq.hash32,
6287 (void *)create_key_req.hash32,
6288 QSEECOM_HASH_SIZE);
6289 /*
6290 * It will return false if it is GPCE based crypto instance or
6291 * ICE is setup properly
6292 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006293 ret = qseecom_enable_ice_setup(create_key_req.usage);
6294 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006295 goto free_buf;
6296
6297 do {
6298 ret = __qseecom_set_clear_ce_key(data,
6299 create_key_req.usage,
6300 &set_key_ireq);
6301 /*
6302 * wait a little before calling scm again to let other
6303 * processes run
6304 */
6305 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6306 msleep(50);
6307
6308 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6309
6310 qseecom_disable_ice_setup(create_key_req.usage);
6311
6312 if (ret) {
6313 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6314 pipe, ce_hw[i], ret);
6315 goto free_buf;
6316 } else {
6317 pr_err("Set the key successfully\n");
6318 if ((create_key_req.usage ==
6319 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6320 (create_key_req.usage ==
6321 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6322 goto free_buf;
6323 }
6324 }
6325
6326free_buf:
6327 kzfree(ce_hw);
6328 return ret;
6329}
6330
6331static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6332 void __user *argp)
6333{
6334 uint32_t *ce_hw = NULL;
6335 uint32_t pipe = 0;
6336 int ret = 0;
6337 uint32_t flags = 0;
6338 int i, j;
6339 struct qseecom_wipe_key_req wipe_key_req;
6340 struct qseecom_key_delete_ireq delete_key_ireq;
6341 struct qseecom_key_select_ireq clear_key_ireq;
6342 uint32_t entries = 0;
6343
6344 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6345 if (ret) {
6346 pr_err("copy_from_user failed\n");
6347 return ret;
6348 }
6349
6350 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6351 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6352 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6353 ret = -EFAULT;
6354 return ret;
6355 }
6356
6357 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6358 wipe_key_req.usage);
6359 if (entries <= 0) {
6360 pr_err("no ce instance for usage %d instance %d\n",
6361 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6362 ret = -EINVAL;
6363 return ret;
6364 }
6365
6366 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6367 if (!ce_hw) {
6368 ret = -ENOMEM;
6369 return ret;
6370 }
6371
6372 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6373 DEFAULT_CE_INFO_UNIT);
6374 if (ret) {
6375 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6376 ret = -EINVAL;
6377 goto free_buf;
6378 }
6379
6380 if (wipe_key_req.wipe_key_flag) {
6381 delete_key_ireq.flags = flags;
6382 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6383 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6384 memcpy((void *)delete_key_ireq.key_id,
6385 (void *)key_id_array[wipe_key_req.usage].desc,
6386 QSEECOM_KEY_ID_SIZE);
6387 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6388
6389 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6390 &delete_key_ireq);
6391 if (ret) {
6392 pr_err("Failed to delete key from ssd storage: %d\n",
6393 ret);
6394 ret = -EFAULT;
6395 goto free_buf;
6396 }
6397 }
6398
6399 for (j = 0; j < entries; j++) {
6400 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6401 if (wipe_key_req.usage ==
6402 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6403 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6404 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6405 } else if (wipe_key_req.usage ==
6406 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6407 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6408 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6409 } else {
6410 clear_key_ireq.ce = ce_hw[j];
6411 clear_key_ireq.pipe = pipe;
6412 }
6413 clear_key_ireq.flags = flags;
6414 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6415 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6416 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6417 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6418
6419 /*
6420 * It will return false if it is GPCE based crypto instance or
6421 * ICE is setup properly
6422 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006423 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6424 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006425 goto free_buf;
6426
6427 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6428 &clear_key_ireq);
6429
6430 qseecom_disable_ice_setup(wipe_key_req.usage);
6431
6432 if (ret) {
6433 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6434 pipe, ce_hw[j], ret);
6435 ret = -EFAULT;
6436 goto free_buf;
6437 }
6438 }
6439
6440free_buf:
6441 kzfree(ce_hw);
6442 return ret;
6443}
6444
6445static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6446 void __user *argp)
6447{
6448 int ret = 0;
6449 uint32_t flags = 0;
6450 struct qseecom_update_key_userinfo_req update_key_req;
6451 struct qseecom_key_userinfo_update_ireq ireq;
6452
6453 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6454 if (ret) {
6455 pr_err("copy_from_user failed\n");
6456 return ret;
6457 }
6458
6459 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6460 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6461 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6462 return -EFAULT;
6463 }
6464
6465 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6466
6467 if (qseecom.fde_key_size)
6468 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6469 else
6470 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6471
6472 ireq.flags = flags;
6473 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6474 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6475 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6476 memcpy((void *)ireq.key_id,
6477 (void *)key_id_array[update_key_req.usage].desc,
6478 QSEECOM_KEY_ID_SIZE);
6479 memcpy((void *)ireq.current_hash32,
6480 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6481 memcpy((void *)ireq.new_hash32,
6482 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6483
6484 do {
6485 ret = __qseecom_update_current_key_user_info(data,
6486 update_key_req.usage,
6487 &ireq);
6488 /*
6489 * wait a little before calling scm again to let other
6490 * processes run
6491 */
6492 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6493 msleep(50);
6494
6495 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6496 if (ret) {
6497 pr_err("Failed to update key info: %d\n", ret);
6498 return ret;
6499 }
6500 return ret;
6501
6502}
6503static int qseecom_is_es_activated(void __user *argp)
6504{
Zhen Kong26e62742018-05-04 17:19:06 -07006505 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006506 struct qseecom_command_scm_resp resp;
6507 int ret;
6508
6509 if (qseecom.qsee_version < QSEE_VERSION_04) {
6510 pr_err("invalid qsee version\n");
6511 return -ENODEV;
6512 }
6513
6514 if (argp == NULL) {
6515 pr_err("arg is null\n");
6516 return -EINVAL;
6517 }
6518
6519 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6520 &req, sizeof(req), &resp, sizeof(resp));
6521 if (ret) {
6522 pr_err("scm_call failed\n");
6523 return ret;
6524 }
6525
6526 req.is_activated = resp.result;
6527 ret = copy_to_user(argp, &req, sizeof(req));
6528 if (ret) {
6529 pr_err("copy_to_user failed\n");
6530 return ret;
6531 }
6532
6533 return 0;
6534}
6535
6536static int qseecom_save_partition_hash(void __user *argp)
6537{
6538 struct qseecom_save_partition_hash_req req;
6539 struct qseecom_command_scm_resp resp;
6540 int ret;
6541
6542 memset(&resp, 0x00, sizeof(resp));
6543
6544 if (qseecom.qsee_version < QSEE_VERSION_04) {
6545 pr_err("invalid qsee version\n");
6546 return -ENODEV;
6547 }
6548
6549 if (argp == NULL) {
6550 pr_err("arg is null\n");
6551 return -EINVAL;
6552 }
6553
6554 ret = copy_from_user(&req, argp, sizeof(req));
6555 if (ret) {
6556 pr_err("copy_from_user failed\n");
6557 return ret;
6558 }
6559
6560 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6561 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6562 if (ret) {
6563 pr_err("qseecom_scm_call failed\n");
6564 return ret;
6565 }
6566
6567 return 0;
6568}
6569
6570static int qseecom_mdtp_cipher_dip(void __user *argp)
6571{
6572 struct qseecom_mdtp_cipher_dip_req req;
6573 u32 tzbuflenin, tzbuflenout;
6574 char *tzbufin = NULL, *tzbufout = NULL;
6575 struct scm_desc desc = {0};
6576 int ret;
6577
6578 do {
6579 /* Copy the parameters from userspace */
6580 if (argp == NULL) {
6581 pr_err("arg is null\n");
6582 ret = -EINVAL;
6583 break;
6584 }
6585
6586 ret = copy_from_user(&req, argp, sizeof(req));
6587 if (ret) {
6588 pr_err("copy_from_user failed, ret= %d\n", ret);
6589 break;
6590 }
6591
6592 if (req.in_buf == NULL || req.out_buf == NULL ||
6593 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6594 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6595 req.direction > 1) {
6596 pr_err("invalid parameters\n");
6597 ret = -EINVAL;
6598 break;
6599 }
6600
6601 /* Copy the input buffer from userspace to kernel space */
6602 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6603 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6604 if (!tzbufin) {
6605 pr_err("error allocating in buffer\n");
6606 ret = -ENOMEM;
6607 break;
6608 }
6609
6610 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6611 if (ret) {
6612 pr_err("copy_from_user failed, ret=%d\n", ret);
6613 break;
6614 }
6615
6616 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6617
6618 /* Prepare the output buffer in kernel space */
6619 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6620 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6621 if (!tzbufout) {
6622 pr_err("error allocating out buffer\n");
6623 ret = -ENOMEM;
6624 break;
6625 }
6626
6627 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6628
6629 /* Send the command to TZ */
6630 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6631 desc.args[0] = virt_to_phys(tzbufin);
6632 desc.args[1] = req.in_buf_size;
6633 desc.args[2] = virt_to_phys(tzbufout);
6634 desc.args[3] = req.out_buf_size;
6635 desc.args[4] = req.direction;
6636
6637 ret = __qseecom_enable_clk(CLK_QSEE);
6638 if (ret)
6639 break;
6640
Zhen Kong03f220d2019-02-01 17:12:34 -08006641 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006642
6643 __qseecom_disable_clk(CLK_QSEE);
6644
6645 if (ret) {
6646 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6647 ret);
6648 break;
6649 }
6650
6651 /* Copy the output buffer from kernel space to userspace */
6652 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6653 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6654 if (ret) {
6655 pr_err("copy_to_user failed, ret=%d\n", ret);
6656 break;
6657 }
6658 } while (0);
6659
6660 kzfree(tzbufin);
6661 kzfree(tzbufout);
6662
6663 return ret;
6664}
6665
6666static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6667 struct qseecom_qteec_req *req)
6668{
6669 if (!data || !data->client.ihandle) {
6670 pr_err("Client or client handle is not initialized\n");
6671 return -EINVAL;
6672 }
6673
6674 if (data->type != QSEECOM_CLIENT_APP)
6675 return -EFAULT;
6676
6677 if (req->req_len > UINT_MAX - req->resp_len) {
6678 pr_err("Integer overflow detected in req_len & rsp_len\n");
6679 return -EINVAL;
6680 }
6681
6682 if (req->req_len + req->resp_len > data->client.sb_length) {
6683 pr_debug("Not enough memory to fit cmd_buf.\n");
6684 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6685 (req->req_len + req->resp_len), data->client.sb_length);
6686 return -ENOMEM;
6687 }
6688
6689 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6690 pr_err("cmd buffer or response buffer is null\n");
6691 return -EINVAL;
6692 }
6693 if (((uintptr_t)req->req_ptr <
6694 data->client.user_virt_sb_base) ||
6695 ((uintptr_t)req->req_ptr >=
6696 (data->client.user_virt_sb_base + data->client.sb_length))) {
6697 pr_err("cmd buffer address not within shared bufffer\n");
6698 return -EINVAL;
6699 }
6700
6701 if (((uintptr_t)req->resp_ptr <
6702 data->client.user_virt_sb_base) ||
6703 ((uintptr_t)req->resp_ptr >=
6704 (data->client.user_virt_sb_base + data->client.sb_length))) {
6705 pr_err("response buffer address not within shared bufffer\n");
6706 return -EINVAL;
6707 }
6708
6709 if ((req->req_len == 0) || (req->resp_len == 0)) {
6710 pr_err("cmd buf lengtgh/response buf length not valid\n");
6711 return -EINVAL;
6712 }
6713
6714 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6715 pr_err("Integer overflow in req_len & req_ptr\n");
6716 return -EINVAL;
6717 }
6718
6719 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6720 pr_err("Integer overflow in resp_len & resp_ptr\n");
6721 return -EINVAL;
6722 }
6723
6724 if (data->client.user_virt_sb_base >
6725 (ULONG_MAX - data->client.sb_length)) {
6726 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6727 return -EINVAL;
6728 }
6729 if ((((uintptr_t)req->req_ptr + req->req_len) >
6730 ((uintptr_t)data->client.user_virt_sb_base +
6731 data->client.sb_length)) ||
6732 (((uintptr_t)req->resp_ptr + req->resp_len) >
6733 ((uintptr_t)data->client.user_virt_sb_base +
6734 data->client.sb_length))) {
6735 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6736 return -EINVAL;
6737 }
6738 return 0;
6739}
6740
6741static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6742 uint32_t fd_idx, struct sg_table *sg_ptr)
6743{
6744 struct scatterlist *sg = sg_ptr->sgl;
6745 struct qseecom_sg_entry *sg_entry;
6746 void *buf;
6747 uint i;
6748 size_t size;
6749 dma_addr_t coh_pmem;
6750
6751 if (fd_idx >= MAX_ION_FD) {
6752 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6753 return -ENOMEM;
6754 }
6755 /*
6756 * Allocate a buffer, populate it with number of entry plus
6757 * each sg entry's phy addr and length; then return the
6758 * phy_addr of the buffer.
6759 */
6760 size = sizeof(uint32_t) +
6761 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6762 size = (size + PAGE_SIZE) & PAGE_MASK;
6763 buf = dma_alloc_coherent(qseecom.pdev,
6764 size, &coh_pmem, GFP_KERNEL);
6765 if (buf == NULL) {
6766 pr_err("failed to alloc memory for sg buf\n");
6767 return -ENOMEM;
6768 }
6769 *(uint32_t *)buf = sg_ptr->nents;
6770 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6771 for (i = 0; i < sg_ptr->nents; i++) {
6772 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6773 sg_entry->len = sg->length;
6774 sg_entry++;
6775 sg = sg_next(sg);
6776 }
6777 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6778 data->client.sec_buf_fd[fd_idx].vbase = buf;
6779 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6780 data->client.sec_buf_fd[fd_idx].size = size;
6781 return 0;
6782}
6783
6784static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6785 struct qseecom_dev_handle *data, bool cleanup)
6786{
6787 struct ion_handle *ihandle;
6788 int ret = 0;
6789 int i = 0;
6790 uint32_t *update;
6791 struct sg_table *sg_ptr = NULL;
6792 struct scatterlist *sg;
6793 struct qseecom_param_memref *memref;
6794
6795 if (req == NULL) {
6796 pr_err("Invalid address\n");
6797 return -EINVAL;
6798 }
6799 for (i = 0; i < MAX_ION_FD; i++) {
6800 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006801 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006802 req->ifd_data[i].fd);
6803 if (IS_ERR_OR_NULL(ihandle)) {
6804 pr_err("Ion client can't retrieve the handle\n");
6805 return -ENOMEM;
6806 }
6807 if ((req->req_len < sizeof(uint32_t)) ||
6808 (req->ifd_data[i].cmd_buf_offset >
6809 req->req_len - sizeof(uint32_t))) {
6810 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6811 req->req_len,
6812 req->ifd_data[i].cmd_buf_offset);
6813 return -EINVAL;
6814 }
6815 update = (uint32_t *)((char *) req->req_ptr +
6816 req->ifd_data[i].cmd_buf_offset);
6817 if (!update) {
6818 pr_err("update pointer is NULL\n");
6819 return -EINVAL;
6820 }
6821 } else {
6822 continue;
6823 }
6824 /* Populate the cmd data structure with the phys_addr */
6825 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6826 if (IS_ERR_OR_NULL(sg_ptr)) {
6827 pr_err("IOn client could not retrieve sg table\n");
6828 goto err;
6829 }
6830 sg = sg_ptr->sgl;
6831 if (sg == NULL) {
6832 pr_err("sg is NULL\n");
6833 goto err;
6834 }
6835 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6836 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6837 sg_ptr->nents, sg->length);
6838 goto err;
6839 }
6840 /* clean up buf for pre-allocated fd */
6841 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6842 (*update)) {
6843 if (data->client.sec_buf_fd[i].vbase)
6844 dma_free_coherent(qseecom.pdev,
6845 data->client.sec_buf_fd[i].size,
6846 data->client.sec_buf_fd[i].vbase,
6847 data->client.sec_buf_fd[i].pbase);
6848 memset((void *)update, 0,
6849 sizeof(struct qseecom_param_memref));
6850 memset(&(data->client.sec_buf_fd[i]), 0,
6851 sizeof(struct qseecom_sec_buf_fd_info));
6852 goto clean;
6853 }
6854
6855 if (*update == 0) {
6856 /* update buf for pre-allocated fd from secure heap*/
6857 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6858 sg_ptr);
6859 if (ret) {
6860 pr_err("Failed to handle buf for fd[%d]\n", i);
6861 goto err;
6862 }
6863 memref = (struct qseecom_param_memref *)update;
6864 memref->buffer =
6865 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6866 memref->size =
6867 (uint32_t)(data->client.sec_buf_fd[i].size);
6868 } else {
6869 /* update buf for fd from non-secure qseecom heap */
6870 if (sg_ptr->nents != 1) {
6871 pr_err("Num of scat entr (%d) invalid\n",
6872 sg_ptr->nents);
6873 goto err;
6874 }
6875 if (cleanup)
6876 *update = 0;
6877 else
6878 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6879 }
6880clean:
6881 if (cleanup) {
6882 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6883 ihandle, NULL, sg->length,
6884 ION_IOC_INV_CACHES);
6885 if (ret) {
6886 pr_err("cache operation failed %d\n", ret);
6887 goto err;
6888 }
6889 } else {
6890 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6891 ihandle, NULL, sg->length,
6892 ION_IOC_CLEAN_INV_CACHES);
6893 if (ret) {
6894 pr_err("cache operation failed %d\n", ret);
6895 goto err;
6896 }
6897 data->sglistinfo_ptr[i].indexAndFlags =
6898 SGLISTINFO_SET_INDEX_FLAG(
6899 (sg_ptr->nents == 1), 0,
6900 req->ifd_data[i].cmd_buf_offset);
6901 data->sglistinfo_ptr[i].sizeOrCount =
6902 (sg_ptr->nents == 1) ?
6903 sg->length : sg_ptr->nents;
6904 data->sglist_cnt = i + 1;
6905 }
6906 /* Deallocate the handle */
6907 if (!IS_ERR_OR_NULL(ihandle))
6908 ion_free(qseecom.ion_clnt, ihandle);
6909 }
6910 return ret;
6911err:
6912 if (!IS_ERR_OR_NULL(ihandle))
6913 ion_free(qseecom.ion_clnt, ihandle);
6914 return -ENOMEM;
6915}
6916
6917static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6918 struct qseecom_qteec_req *req, uint32_t cmd_id)
6919{
6920 struct qseecom_command_scm_resp resp;
6921 struct qseecom_qteec_ireq ireq;
6922 struct qseecom_qteec_64bit_ireq ireq_64bit;
6923 struct qseecom_registered_app_list *ptr_app;
6924 bool found_app = false;
6925 unsigned long flags;
6926 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006927 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006928 uint32_t reqd_len_sb_in = 0;
6929 void *cmd_buf = NULL;
6930 size_t cmd_len;
6931 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306932 void *req_ptr = NULL;
6933 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006934
6935 ret = __qseecom_qteec_validate_msg(data, req);
6936 if (ret)
6937 return ret;
6938
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306939 req_ptr = req->req_ptr;
6940 resp_ptr = req->resp_ptr;
6941
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006942 /* find app_id & img_name from list */
6943 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6944 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6945 list) {
6946 if ((ptr_app->app_id == data->client.app_id) &&
6947 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6948 found_app = true;
6949 break;
6950 }
6951 }
6952 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6953 if (!found_app) {
6954 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6955 (char *)data->client.app_name);
6956 return -ENOENT;
6957 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07006958 if (__qseecom_find_pending_unload_app(data->client.app_id,
6959 data->client.app_name)) {
6960 pr_err("app %d (%s) unload is pending\n",
6961 data->client.app_id, data->client.app_name);
6962 return -ENOENT;
6963 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006964
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306965 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6966 (uintptr_t)req->req_ptr);
6967 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6968 (uintptr_t)req->resp_ptr);
6969
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006970 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6971 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6972 ret = __qseecom_update_qteec_req_buf(
6973 (struct qseecom_qteec_modfd_req *)req, data, false);
6974 if (ret)
6975 return ret;
6976 }
6977
6978 if (qseecom.qsee_version < QSEE_VERSION_40) {
6979 ireq.app_id = data->client.app_id;
6980 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306981 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006982 ireq.req_len = req->req_len;
6983 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306984 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006985 ireq.resp_len = req->resp_len;
6986 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6987 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6988 dmac_flush_range((void *)table,
6989 (void *)table + SGLISTINFO_TABLE_SIZE);
6990 cmd_buf = (void *)&ireq;
6991 cmd_len = sizeof(struct qseecom_qteec_ireq);
6992 } else {
6993 ireq_64bit.app_id = data->client.app_id;
6994 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306995 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006996 ireq_64bit.req_len = req->req_len;
6997 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306998 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006999 ireq_64bit.resp_len = req->resp_len;
7000 if ((data->client.app_arch == ELFCLASS32) &&
7001 ((ireq_64bit.req_ptr >=
7002 PHY_ADDR_4G - ireq_64bit.req_len) ||
7003 (ireq_64bit.resp_ptr >=
7004 PHY_ADDR_4G - ireq_64bit.resp_len))){
7005 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
7006 data->client.app_name, data->client.app_id);
7007 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
7008 ireq_64bit.req_ptr, ireq_64bit.req_len,
7009 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
7010 return -EFAULT;
7011 }
7012 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7013 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7014 dmac_flush_range((void *)table,
7015 (void *)table + SGLISTINFO_TABLE_SIZE);
7016 cmd_buf = (void *)&ireq_64bit;
7017 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7018 }
7019 if (qseecom.whitelist_support == true
7020 && cmd_id == QSEOS_TEE_OPEN_SESSION)
7021 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
7022 else
7023 *(uint32_t *)cmd_buf = cmd_id;
7024
7025 reqd_len_sb_in = req->req_len + req->resp_len;
7026 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7027 data->client.sb_virt,
7028 reqd_len_sb_in,
7029 ION_IOC_CLEAN_INV_CACHES);
7030 if (ret) {
7031 pr_err("cache operation failed %d\n", ret);
7032 return ret;
7033 }
7034
7035 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7036
7037 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7038 cmd_buf, cmd_len,
7039 &resp, sizeof(resp));
7040 if (ret) {
7041 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7042 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07007043 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007044 }
7045
7046 if (qseecom.qsee_reentrancy_support) {
7047 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07007048 if (ret)
7049 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007050 } else {
7051 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7052 ret = __qseecom_process_incomplete_cmd(data, &resp);
7053 if (ret) {
7054 pr_err("process_incomplete_cmd failed err: %d\n",
7055 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007056 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007057 }
7058 } else {
7059 if (resp.result != QSEOS_RESULT_SUCCESS) {
7060 pr_err("Response result %d not supported\n",
7061 resp.result);
7062 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07007063 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007064 }
7065 }
7066 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007067exit:
7068 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007069 data->client.sb_virt, data->client.sb_length,
7070 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07007071 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007072 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07007073 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007074 }
7075
7076 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
7077 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07007078 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007079 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07007080 if (ret2)
7081 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007082 }
Zhen Kong4af480e2017-09-19 14:34:16 -07007083 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007084}
7085
7086static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
7087 void __user *argp)
7088{
7089 struct qseecom_qteec_modfd_req req;
7090 int ret = 0;
7091
7092 ret = copy_from_user(&req, argp,
7093 sizeof(struct qseecom_qteec_modfd_req));
7094 if (ret) {
7095 pr_err("copy_from_user failed\n");
7096 return ret;
7097 }
7098 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7099 QSEOS_TEE_OPEN_SESSION);
7100
7101 return ret;
7102}
7103
7104static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
7105 void __user *argp)
7106{
7107 struct qseecom_qteec_req req;
7108 int ret = 0;
7109
7110 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
7111 if (ret) {
7112 pr_err("copy_from_user failed\n");
7113 return ret;
7114 }
7115 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
7116 return ret;
7117}
7118
7119static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
7120 void __user *argp)
7121{
7122 struct qseecom_qteec_modfd_req req;
7123 struct qseecom_command_scm_resp resp;
7124 struct qseecom_qteec_ireq ireq;
7125 struct qseecom_qteec_64bit_ireq ireq_64bit;
7126 struct qseecom_registered_app_list *ptr_app;
7127 bool found_app = false;
7128 unsigned long flags;
7129 int ret = 0;
7130 int i = 0;
7131 uint32_t reqd_len_sb_in = 0;
7132 void *cmd_buf = NULL;
7133 size_t cmd_len;
7134 struct sglist_info *table = data->sglistinfo_ptr;
7135 void *req_ptr = NULL;
7136 void *resp_ptr = NULL;
7137
7138 ret = copy_from_user(&req, argp,
7139 sizeof(struct qseecom_qteec_modfd_req));
7140 if (ret) {
7141 pr_err("copy_from_user failed\n");
7142 return ret;
7143 }
7144 ret = __qseecom_qteec_validate_msg(data,
7145 (struct qseecom_qteec_req *)(&req));
7146 if (ret)
7147 return ret;
7148 req_ptr = req.req_ptr;
7149 resp_ptr = req.resp_ptr;
7150
7151 /* find app_id & img_name from list */
7152 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
7153 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
7154 list) {
7155 if ((ptr_app->app_id == data->client.app_id) &&
7156 (!strcmp(ptr_app->app_name, data->client.app_name))) {
7157 found_app = true;
7158 break;
7159 }
7160 }
7161 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
7162 if (!found_app) {
7163 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7164 (char *)data->client.app_name);
7165 return -ENOENT;
7166 }
Zhen Kong03b2eae2019-09-17 16:58:46 -07007167 if (__qseecom_find_pending_unload_app(data->client.app_id,
7168 data->client.app_name)) {
7169 pr_err("app %d (%s) unload is pending\n",
7170 data->client.app_id, data->client.app_name);
7171 return -ENOENT;
7172 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007173
7174 /* validate offsets */
7175 for (i = 0; i < MAX_ION_FD; i++) {
7176 if (req.ifd_data[i].fd) {
7177 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7178 return -EINVAL;
7179 }
7180 }
7181 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7182 (uintptr_t)req.req_ptr);
7183 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7184 (uintptr_t)req.resp_ptr);
7185 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7186 if (ret)
7187 return ret;
7188
7189 if (qseecom.qsee_version < QSEE_VERSION_40) {
7190 ireq.app_id = data->client.app_id;
7191 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7192 (uintptr_t)req_ptr);
7193 ireq.req_len = req.req_len;
7194 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7195 (uintptr_t)resp_ptr);
7196 ireq.resp_len = req.resp_len;
7197 cmd_buf = (void *)&ireq;
7198 cmd_len = sizeof(struct qseecom_qteec_ireq);
7199 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7200 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7201 dmac_flush_range((void *)table,
7202 (void *)table + SGLISTINFO_TABLE_SIZE);
7203 } else {
7204 ireq_64bit.app_id = data->client.app_id;
7205 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7206 (uintptr_t)req_ptr);
7207 ireq_64bit.req_len = req.req_len;
7208 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7209 (uintptr_t)resp_ptr);
7210 ireq_64bit.resp_len = req.resp_len;
7211 cmd_buf = (void *)&ireq_64bit;
7212 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7213 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7214 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7215 dmac_flush_range((void *)table,
7216 (void *)table + SGLISTINFO_TABLE_SIZE);
7217 }
7218 reqd_len_sb_in = req.req_len + req.resp_len;
7219 if (qseecom.whitelist_support == true)
7220 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7221 else
7222 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7223
7224 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7225 data->client.sb_virt,
7226 reqd_len_sb_in,
7227 ION_IOC_CLEAN_INV_CACHES);
7228 if (ret) {
7229 pr_err("cache operation failed %d\n", ret);
7230 return ret;
7231 }
7232
7233 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7234
7235 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7236 cmd_buf, cmd_len,
7237 &resp, sizeof(resp));
7238 if (ret) {
7239 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7240 ret, data->client.app_id);
7241 return ret;
7242 }
7243
7244 if (qseecom.qsee_reentrancy_support) {
7245 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7246 } else {
7247 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7248 ret = __qseecom_process_incomplete_cmd(data, &resp);
7249 if (ret) {
7250 pr_err("process_incomplete_cmd failed err: %d\n",
7251 ret);
7252 return ret;
7253 }
7254 } else {
7255 if (resp.result != QSEOS_RESULT_SUCCESS) {
7256 pr_err("Response result %d not supported\n",
7257 resp.result);
7258 ret = -EINVAL;
7259 }
7260 }
7261 }
7262 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7263 if (ret)
7264 return ret;
7265
7266 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7267 data->client.sb_virt, data->client.sb_length,
7268 ION_IOC_INV_CACHES);
7269 if (ret) {
7270 pr_err("cache operation failed %d\n", ret);
7271 return ret;
7272 }
7273 return 0;
7274}
7275
7276static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7277 void __user *argp)
7278{
7279 struct qseecom_qteec_modfd_req req;
7280 int ret = 0;
7281
7282 ret = copy_from_user(&req, argp,
7283 sizeof(struct qseecom_qteec_modfd_req));
7284 if (ret) {
7285 pr_err("copy_from_user failed\n");
7286 return ret;
7287 }
7288 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7289 QSEOS_TEE_REQUEST_CANCELLATION);
7290
7291 return ret;
7292}
7293
7294static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7295{
7296 if (data->sglist_cnt) {
7297 memset(data->sglistinfo_ptr, 0,
7298 SGLISTINFO_TABLE_SIZE);
7299 data->sglist_cnt = 0;
7300 }
7301}
7302
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307303static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007304 unsigned int cmd, unsigned long arg)
7305{
7306 int ret = 0;
7307 struct qseecom_dev_handle *data = file->private_data;
7308 void __user *argp = (void __user *) arg;
7309 bool perf_enabled = false;
7310
7311 if (!data) {
7312 pr_err("Invalid/uninitialized device handle\n");
7313 return -EINVAL;
7314 }
7315
7316 if (data->abort) {
7317 pr_err("Aborting qseecom driver\n");
7318 return -ENODEV;
7319 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007320 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7321 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7322 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7323 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007324 __wakeup_unregister_listener_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07007325 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007326
7327 switch (cmd) {
7328 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7329 if (data->type != QSEECOM_GENERIC) {
7330 pr_err("reg lstnr req: invalid handle (%d)\n",
7331 data->type);
7332 ret = -EINVAL;
7333 break;
7334 }
7335 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007336 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007337 atomic_inc(&data->ioctl_count);
7338 data->type = QSEECOM_LISTENER_SERVICE;
7339 ret = qseecom_register_listener(data, argp);
7340 atomic_dec(&data->ioctl_count);
7341 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007342 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007343 if (ret)
7344 pr_err("failed qseecom_register_listener: %d\n", ret);
7345 break;
7346 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307347 case QSEECOM_IOCTL_SET_ICE_INFO: {
7348 struct qseecom_ice_data_t ice_data;
7349
7350 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7351 if (ret) {
7352 pr_err("copy_from_user failed\n");
7353 return -EFAULT;
7354 }
7355 qcom_ice_set_fde_flag(ice_data.flag);
7356 break;
7357 }
7358
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007359 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7360 if ((data->listener.id == 0) ||
7361 (data->type != QSEECOM_LISTENER_SERVICE)) {
7362 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7363 data->type, data->listener.id);
7364 ret = -EINVAL;
7365 break;
7366 }
7367 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007368 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007369 atomic_inc(&data->ioctl_count);
7370 ret = qseecom_unregister_listener(data);
7371 atomic_dec(&data->ioctl_count);
7372 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007373 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007374 if (ret)
7375 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7376 break;
7377 }
7378 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7379 if ((data->client.app_id == 0) ||
7380 (data->type != QSEECOM_CLIENT_APP)) {
7381 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7382 data->type, data->client.app_id);
7383 ret = -EINVAL;
7384 break;
7385 }
7386 /* Only one client allowed here at a time */
7387 mutex_lock(&app_access_lock);
7388 if (qseecom.support_bus_scaling) {
7389 /* register bus bw in case the client doesn't do it */
7390 if (!data->mode) {
7391 mutex_lock(&qsee_bw_mutex);
7392 __qseecom_register_bus_bandwidth_needs(
7393 data, HIGH);
7394 mutex_unlock(&qsee_bw_mutex);
7395 }
7396 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7397 if (ret) {
7398 pr_err("Failed to set bw.\n");
7399 ret = -EINVAL;
7400 mutex_unlock(&app_access_lock);
7401 break;
7402 }
7403 }
7404 /*
7405 * On targets where crypto clock is handled by HLOS,
7406 * if clk_access_cnt is zero and perf_enabled is false,
7407 * then the crypto clock was not enabled before sending cmd to
7408 * tz, qseecom will enable the clock to avoid service failure.
7409 */
7410 if (!qseecom.no_clock_support &&
7411 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7412 pr_debug("ce clock is not enabled!\n");
7413 ret = qseecom_perf_enable(data);
7414 if (ret) {
7415 pr_err("Failed to vote for clock with err %d\n",
7416 ret);
7417 mutex_unlock(&app_access_lock);
7418 ret = -EINVAL;
7419 break;
7420 }
7421 perf_enabled = true;
7422 }
7423 atomic_inc(&data->ioctl_count);
7424 ret = qseecom_send_cmd(data, argp);
7425 if (qseecom.support_bus_scaling)
7426 __qseecom_add_bw_scale_down_timer(
7427 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7428 if (perf_enabled) {
7429 qsee_disable_clock_vote(data, CLK_DFAB);
7430 qsee_disable_clock_vote(data, CLK_SFPB);
7431 }
7432 atomic_dec(&data->ioctl_count);
7433 wake_up_all(&data->abort_wq);
7434 mutex_unlock(&app_access_lock);
7435 if (ret)
7436 pr_err("failed qseecom_send_cmd: %d\n", ret);
7437 break;
7438 }
7439 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7440 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7441 if ((data->client.app_id == 0) ||
7442 (data->type != QSEECOM_CLIENT_APP)) {
7443 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7444 data->type, data->client.app_id);
7445 ret = -EINVAL;
7446 break;
7447 }
7448 /* Only one client allowed here at a time */
7449 mutex_lock(&app_access_lock);
7450 if (qseecom.support_bus_scaling) {
7451 if (!data->mode) {
7452 mutex_lock(&qsee_bw_mutex);
7453 __qseecom_register_bus_bandwidth_needs(
7454 data, HIGH);
7455 mutex_unlock(&qsee_bw_mutex);
7456 }
7457 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7458 if (ret) {
7459 pr_err("Failed to set bw.\n");
7460 mutex_unlock(&app_access_lock);
7461 ret = -EINVAL;
7462 break;
7463 }
7464 }
7465 /*
7466 * On targets where crypto clock is handled by HLOS,
7467 * if clk_access_cnt is zero and perf_enabled is false,
7468 * then the crypto clock was not enabled before sending cmd to
7469 * tz, qseecom will enable the clock to avoid service failure.
7470 */
7471 if (!qseecom.no_clock_support &&
7472 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7473 pr_debug("ce clock is not enabled!\n");
7474 ret = qseecom_perf_enable(data);
7475 if (ret) {
7476 pr_err("Failed to vote for clock with err %d\n",
7477 ret);
7478 mutex_unlock(&app_access_lock);
7479 ret = -EINVAL;
7480 break;
7481 }
7482 perf_enabled = true;
7483 }
7484 atomic_inc(&data->ioctl_count);
7485 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7486 ret = qseecom_send_modfd_cmd(data, argp);
7487 else
7488 ret = qseecom_send_modfd_cmd_64(data, argp);
7489 if (qseecom.support_bus_scaling)
7490 __qseecom_add_bw_scale_down_timer(
7491 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7492 if (perf_enabled) {
7493 qsee_disable_clock_vote(data, CLK_DFAB);
7494 qsee_disable_clock_vote(data, CLK_SFPB);
7495 }
7496 atomic_dec(&data->ioctl_count);
7497 wake_up_all(&data->abort_wq);
7498 mutex_unlock(&app_access_lock);
7499 if (ret)
7500 pr_err("failed qseecom_send_cmd: %d\n", ret);
7501 __qseecom_clean_data_sglistinfo(data);
7502 break;
7503 }
7504 case QSEECOM_IOCTL_RECEIVE_REQ: {
7505 if ((data->listener.id == 0) ||
7506 (data->type != QSEECOM_LISTENER_SERVICE)) {
7507 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7508 data->type, data->listener.id);
7509 ret = -EINVAL;
7510 break;
7511 }
7512 atomic_inc(&data->ioctl_count);
7513 ret = qseecom_receive_req(data);
7514 atomic_dec(&data->ioctl_count);
7515 wake_up_all(&data->abort_wq);
7516 if (ret && (ret != -ERESTARTSYS))
7517 pr_err("failed qseecom_receive_req: %d\n", ret);
7518 break;
7519 }
7520 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7521 if ((data->listener.id == 0) ||
7522 (data->type != QSEECOM_LISTENER_SERVICE)) {
7523 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7524 data->type, data->listener.id);
7525 ret = -EINVAL;
7526 break;
7527 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007528 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007529 atomic_inc(&data->ioctl_count);
7530 if (!qseecom.qsee_reentrancy_support)
7531 ret = qseecom_send_resp();
7532 else
7533 ret = qseecom_reentrancy_send_resp(data);
7534 atomic_dec(&data->ioctl_count);
7535 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007536 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007537 if (ret)
7538 pr_err("failed qseecom_send_resp: %d\n", ret);
7539 break;
7540 }
7541 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7542 if ((data->type != QSEECOM_CLIENT_APP) &&
7543 (data->type != QSEECOM_GENERIC) &&
7544 (data->type != QSEECOM_SECURE_SERVICE)) {
7545 pr_err("set mem param req: invalid handle (%d)\n",
7546 data->type);
7547 ret = -EINVAL;
7548 break;
7549 }
7550 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7551 mutex_lock(&app_access_lock);
7552 atomic_inc(&data->ioctl_count);
7553 ret = qseecom_set_client_mem_param(data, argp);
7554 atomic_dec(&data->ioctl_count);
7555 mutex_unlock(&app_access_lock);
7556 if (ret)
7557 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7558 ret);
7559 break;
7560 }
7561 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7562 if ((data->type != QSEECOM_GENERIC) &&
7563 (data->type != QSEECOM_CLIENT_APP)) {
7564 pr_err("load app req: invalid handle (%d)\n",
7565 data->type);
7566 ret = -EINVAL;
7567 break;
7568 }
7569 data->type = QSEECOM_CLIENT_APP;
7570 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7571 mutex_lock(&app_access_lock);
7572 atomic_inc(&data->ioctl_count);
7573 ret = qseecom_load_app(data, argp);
7574 atomic_dec(&data->ioctl_count);
7575 mutex_unlock(&app_access_lock);
7576 if (ret)
7577 pr_err("failed load_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007578 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007579 break;
7580 }
7581 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7582 if ((data->client.app_id == 0) ||
7583 (data->type != QSEECOM_CLIENT_APP)) {
7584 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7585 data->type, data->client.app_id);
7586 ret = -EINVAL;
7587 break;
7588 }
7589 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7590 mutex_lock(&app_access_lock);
7591 atomic_inc(&data->ioctl_count);
7592 ret = qseecom_unload_app(data, false);
7593 atomic_dec(&data->ioctl_count);
7594 mutex_unlock(&app_access_lock);
7595 if (ret)
7596 pr_err("failed unload_app request: %d\n", ret);
Zhen Kong03b2eae2019-09-17 16:58:46 -07007597 __wakeup_unload_app_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007598 break;
7599 }
7600 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7601 atomic_inc(&data->ioctl_count);
7602 ret = qseecom_get_qseos_version(data, argp);
7603 if (ret)
7604 pr_err("qseecom_get_qseos_version: %d\n", ret);
7605 atomic_dec(&data->ioctl_count);
7606 break;
7607 }
7608 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7609 if ((data->type != QSEECOM_GENERIC) &&
7610 (data->type != QSEECOM_CLIENT_APP)) {
7611 pr_err("perf enable req: invalid handle (%d)\n",
7612 data->type);
7613 ret = -EINVAL;
7614 break;
7615 }
7616 if ((data->type == QSEECOM_CLIENT_APP) &&
7617 (data->client.app_id == 0)) {
7618 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7619 data->type, data->client.app_id);
7620 ret = -EINVAL;
7621 break;
7622 }
7623 atomic_inc(&data->ioctl_count);
7624 if (qseecom.support_bus_scaling) {
7625 mutex_lock(&qsee_bw_mutex);
7626 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7627 mutex_unlock(&qsee_bw_mutex);
7628 } else {
7629 ret = qseecom_perf_enable(data);
7630 if (ret)
7631 pr_err("Fail to vote for clocks %d\n", ret);
7632 }
7633 atomic_dec(&data->ioctl_count);
7634 break;
7635 }
7636 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7637 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7638 (data->type != QSEECOM_CLIENT_APP)) {
7639 pr_err("perf disable req: invalid handle (%d)\n",
7640 data->type);
7641 ret = -EINVAL;
7642 break;
7643 }
7644 if ((data->type == QSEECOM_CLIENT_APP) &&
7645 (data->client.app_id == 0)) {
7646 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7647 data->type, data->client.app_id);
7648 ret = -EINVAL;
7649 break;
7650 }
7651 atomic_inc(&data->ioctl_count);
7652 if (!qseecom.support_bus_scaling) {
7653 qsee_disable_clock_vote(data, CLK_DFAB);
7654 qsee_disable_clock_vote(data, CLK_SFPB);
7655 } else {
7656 mutex_lock(&qsee_bw_mutex);
7657 qseecom_unregister_bus_bandwidth_needs(data);
7658 mutex_unlock(&qsee_bw_mutex);
7659 }
7660 atomic_dec(&data->ioctl_count);
7661 break;
7662 }
7663
7664 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7665 /* If crypto clock is not handled by HLOS, return directly. */
7666 if (qseecom.no_clock_support) {
7667 pr_debug("crypto clock is not handled by HLOS\n");
7668 break;
7669 }
7670 if ((data->client.app_id == 0) ||
7671 (data->type != QSEECOM_CLIENT_APP)) {
7672 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7673 data->type, data->client.app_id);
7674 ret = -EINVAL;
7675 break;
7676 }
7677 atomic_inc(&data->ioctl_count);
7678 ret = qseecom_scale_bus_bandwidth(data, argp);
7679 atomic_dec(&data->ioctl_count);
7680 break;
7681 }
7682 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7683 if (data->type != QSEECOM_GENERIC) {
7684 pr_err("load ext elf req: invalid client handle (%d)\n",
7685 data->type);
7686 ret = -EINVAL;
7687 break;
7688 }
7689 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7690 data->released = true;
7691 mutex_lock(&app_access_lock);
7692 atomic_inc(&data->ioctl_count);
7693 ret = qseecom_load_external_elf(data, argp);
7694 atomic_dec(&data->ioctl_count);
7695 mutex_unlock(&app_access_lock);
7696 if (ret)
7697 pr_err("failed load_external_elf request: %d\n", ret);
7698 break;
7699 }
7700 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7701 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7702 pr_err("unload ext elf req: invalid handle (%d)\n",
7703 data->type);
7704 ret = -EINVAL;
7705 break;
7706 }
7707 data->released = true;
7708 mutex_lock(&app_access_lock);
7709 atomic_inc(&data->ioctl_count);
7710 ret = qseecom_unload_external_elf(data);
7711 atomic_dec(&data->ioctl_count);
7712 mutex_unlock(&app_access_lock);
7713 if (ret)
7714 pr_err("failed unload_app request: %d\n", ret);
7715 break;
7716 }
7717 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
Zhen Kong677362c2019-08-30 10:50:25 -07007718 if ((data->type != QSEECOM_GENERIC) &&
7719 (data->type != QSEECOM_CLIENT_APP)) {
7720 pr_err("app loaded query req: invalid handle (%d)\n",
7721 data->type);
7722 ret = -EINVAL;
7723 break;
7724 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007725 data->type = QSEECOM_CLIENT_APP;
7726 mutex_lock(&app_access_lock);
7727 atomic_inc(&data->ioctl_count);
7728 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7729 ret = qseecom_query_app_loaded(data, argp);
7730 atomic_dec(&data->ioctl_count);
7731 mutex_unlock(&app_access_lock);
7732 break;
7733 }
7734 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7735 if (data->type != QSEECOM_GENERIC) {
7736 pr_err("send cmd svc req: invalid handle (%d)\n",
7737 data->type);
7738 ret = -EINVAL;
7739 break;
7740 }
7741 data->type = QSEECOM_SECURE_SERVICE;
7742 if (qseecom.qsee_version < QSEE_VERSION_03) {
7743 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7744 qseecom.qsee_version);
7745 return -EINVAL;
7746 }
7747 mutex_lock(&app_access_lock);
7748 atomic_inc(&data->ioctl_count);
7749 ret = qseecom_send_service_cmd(data, argp);
7750 atomic_dec(&data->ioctl_count);
7751 mutex_unlock(&app_access_lock);
7752 break;
7753 }
7754 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7755 if (!(qseecom.support_pfe || qseecom.support_fde))
7756 pr_err("Features requiring key init not supported\n");
7757 if (data->type != QSEECOM_GENERIC) {
7758 pr_err("create key req: invalid handle (%d)\n",
7759 data->type);
7760 ret = -EINVAL;
7761 break;
7762 }
7763 if (qseecom.qsee_version < QSEE_VERSION_05) {
7764 pr_err("Create Key feature unsupported: qsee ver %u\n",
7765 qseecom.qsee_version);
7766 return -EINVAL;
7767 }
7768 data->released = true;
7769 mutex_lock(&app_access_lock);
7770 atomic_inc(&data->ioctl_count);
7771 ret = qseecom_create_key(data, argp);
7772 if (ret)
7773 pr_err("failed to create encryption key: %d\n", ret);
7774
7775 atomic_dec(&data->ioctl_count);
7776 mutex_unlock(&app_access_lock);
7777 break;
7778 }
7779 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7780 if (!(qseecom.support_pfe || qseecom.support_fde))
7781 pr_err("Features requiring key init not supported\n");
7782 if (data->type != QSEECOM_GENERIC) {
7783 pr_err("wipe key req: invalid handle (%d)\n",
7784 data->type);
7785 ret = -EINVAL;
7786 break;
7787 }
7788 if (qseecom.qsee_version < QSEE_VERSION_05) {
7789 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7790 qseecom.qsee_version);
7791 return -EINVAL;
7792 }
7793 data->released = true;
7794 mutex_lock(&app_access_lock);
7795 atomic_inc(&data->ioctl_count);
7796 ret = qseecom_wipe_key(data, argp);
7797 if (ret)
7798 pr_err("failed to wipe encryption key: %d\n", ret);
7799 atomic_dec(&data->ioctl_count);
7800 mutex_unlock(&app_access_lock);
7801 break;
7802 }
7803 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7804 if (!(qseecom.support_pfe || qseecom.support_fde))
7805 pr_err("Features requiring key init not supported\n");
7806 if (data->type != QSEECOM_GENERIC) {
7807 pr_err("update key req: invalid handle (%d)\n",
7808 data->type);
7809 ret = -EINVAL;
7810 break;
7811 }
7812 if (qseecom.qsee_version < QSEE_VERSION_05) {
7813 pr_err("Update Key feature unsupported in qsee ver %u\n",
7814 qseecom.qsee_version);
7815 return -EINVAL;
7816 }
7817 data->released = true;
7818 mutex_lock(&app_access_lock);
7819 atomic_inc(&data->ioctl_count);
7820 ret = qseecom_update_key_user_info(data, argp);
7821 if (ret)
7822 pr_err("failed to update key user info: %d\n", ret);
7823 atomic_dec(&data->ioctl_count);
7824 mutex_unlock(&app_access_lock);
7825 break;
7826 }
7827 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7828 if (data->type != QSEECOM_GENERIC) {
7829 pr_err("save part hash req: invalid handle (%d)\n",
7830 data->type);
7831 ret = -EINVAL;
7832 break;
7833 }
7834 data->released = true;
7835 mutex_lock(&app_access_lock);
7836 atomic_inc(&data->ioctl_count);
7837 ret = qseecom_save_partition_hash(argp);
7838 atomic_dec(&data->ioctl_count);
7839 mutex_unlock(&app_access_lock);
7840 break;
7841 }
7842 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7843 if (data->type != QSEECOM_GENERIC) {
7844 pr_err("ES activated req: invalid handle (%d)\n",
7845 data->type);
7846 ret = -EINVAL;
7847 break;
7848 }
7849 data->released = true;
7850 mutex_lock(&app_access_lock);
7851 atomic_inc(&data->ioctl_count);
7852 ret = qseecom_is_es_activated(argp);
7853 atomic_dec(&data->ioctl_count);
7854 mutex_unlock(&app_access_lock);
7855 break;
7856 }
7857 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7858 if (data->type != QSEECOM_GENERIC) {
7859 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7860 data->type);
7861 ret = -EINVAL;
7862 break;
7863 }
7864 data->released = true;
7865 mutex_lock(&app_access_lock);
7866 atomic_inc(&data->ioctl_count);
7867 ret = qseecom_mdtp_cipher_dip(argp);
7868 atomic_dec(&data->ioctl_count);
7869 mutex_unlock(&app_access_lock);
7870 break;
7871 }
7872 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7873 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7874 if ((data->listener.id == 0) ||
7875 (data->type != QSEECOM_LISTENER_SERVICE)) {
7876 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7877 data->type, data->listener.id);
7878 ret = -EINVAL;
7879 break;
7880 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007881 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007882 atomic_inc(&data->ioctl_count);
7883 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7884 ret = qseecom_send_modfd_resp(data, argp);
7885 else
7886 ret = qseecom_send_modfd_resp_64(data, argp);
7887 atomic_dec(&data->ioctl_count);
7888 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007889 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007890 if (ret)
7891 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7892 __qseecom_clean_data_sglistinfo(data);
7893 break;
7894 }
7895 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7896 if ((data->client.app_id == 0) ||
7897 (data->type != QSEECOM_CLIENT_APP)) {
7898 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7899 data->type, data->client.app_id);
7900 ret = -EINVAL;
7901 break;
7902 }
7903 if (qseecom.qsee_version < QSEE_VERSION_40) {
7904 pr_err("GP feature unsupported: qsee ver %u\n",
7905 qseecom.qsee_version);
7906 return -EINVAL;
7907 }
7908 /* Only one client allowed here at a time */
7909 mutex_lock(&app_access_lock);
7910 atomic_inc(&data->ioctl_count);
7911 ret = qseecom_qteec_open_session(data, argp);
7912 atomic_dec(&data->ioctl_count);
7913 wake_up_all(&data->abort_wq);
7914 mutex_unlock(&app_access_lock);
7915 if (ret)
7916 pr_err("failed open_session_cmd: %d\n", ret);
7917 __qseecom_clean_data_sglistinfo(data);
7918 break;
7919 }
7920 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7921 if ((data->client.app_id == 0) ||
7922 (data->type != QSEECOM_CLIENT_APP)) {
7923 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7924 data->type, data->client.app_id);
7925 ret = -EINVAL;
7926 break;
7927 }
7928 if (qseecom.qsee_version < QSEE_VERSION_40) {
7929 pr_err("GP feature unsupported: qsee ver %u\n",
7930 qseecom.qsee_version);
7931 return -EINVAL;
7932 }
7933 /* Only one client allowed here at a time */
7934 mutex_lock(&app_access_lock);
7935 atomic_inc(&data->ioctl_count);
7936 ret = qseecom_qteec_close_session(data, argp);
7937 atomic_dec(&data->ioctl_count);
7938 wake_up_all(&data->abort_wq);
7939 mutex_unlock(&app_access_lock);
7940 if (ret)
7941 pr_err("failed close_session_cmd: %d\n", ret);
7942 break;
7943 }
7944 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7945 if ((data->client.app_id == 0) ||
7946 (data->type != QSEECOM_CLIENT_APP)) {
7947 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7948 data->type, data->client.app_id);
7949 ret = -EINVAL;
7950 break;
7951 }
7952 if (qseecom.qsee_version < QSEE_VERSION_40) {
7953 pr_err("GP feature unsupported: qsee ver %u\n",
7954 qseecom.qsee_version);
7955 return -EINVAL;
7956 }
7957 /* Only one client allowed here at a time */
7958 mutex_lock(&app_access_lock);
7959 atomic_inc(&data->ioctl_count);
7960 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7961 atomic_dec(&data->ioctl_count);
7962 wake_up_all(&data->abort_wq);
7963 mutex_unlock(&app_access_lock);
7964 if (ret)
7965 pr_err("failed Invoke cmd: %d\n", ret);
7966 __qseecom_clean_data_sglistinfo(data);
7967 break;
7968 }
7969 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7970 if ((data->client.app_id == 0) ||
7971 (data->type != QSEECOM_CLIENT_APP)) {
7972 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7973 data->type, data->client.app_id);
7974 ret = -EINVAL;
7975 break;
7976 }
7977 if (qseecom.qsee_version < QSEE_VERSION_40) {
7978 pr_err("GP feature unsupported: qsee ver %u\n",
7979 qseecom.qsee_version);
7980 return -EINVAL;
7981 }
7982 /* Only one client allowed here at a time */
7983 mutex_lock(&app_access_lock);
7984 atomic_inc(&data->ioctl_count);
7985 ret = qseecom_qteec_request_cancellation(data, argp);
7986 atomic_dec(&data->ioctl_count);
7987 wake_up_all(&data->abort_wq);
7988 mutex_unlock(&app_access_lock);
7989 if (ret)
7990 pr_err("failed request_cancellation: %d\n", ret);
7991 break;
7992 }
7993 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7994 atomic_inc(&data->ioctl_count);
7995 ret = qseecom_get_ce_info(data, argp);
7996 if (ret)
7997 pr_err("failed get fde ce pipe info: %d\n", ret);
7998 atomic_dec(&data->ioctl_count);
7999 break;
8000 }
8001 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
8002 atomic_inc(&data->ioctl_count);
8003 ret = qseecom_free_ce_info(data, argp);
8004 if (ret)
8005 pr_err("failed get fde ce pipe info: %d\n", ret);
8006 atomic_dec(&data->ioctl_count);
8007 break;
8008 }
8009 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
8010 atomic_inc(&data->ioctl_count);
8011 ret = qseecom_query_ce_info(data, argp);
8012 if (ret)
8013 pr_err("failed get fde ce pipe info: %d\n", ret);
8014 atomic_dec(&data->ioctl_count);
8015 break;
8016 }
8017 default:
8018 pr_err("Invalid IOCTL: 0x%x\n", cmd);
8019 return -EINVAL;
8020 }
8021 return ret;
8022}
8023
8024static int qseecom_open(struct inode *inode, struct file *file)
8025{
8026 int ret = 0;
8027 struct qseecom_dev_handle *data;
8028
8029 data = kzalloc(sizeof(*data), GFP_KERNEL);
8030 if (!data)
8031 return -ENOMEM;
8032 file->private_data = data;
8033 data->abort = 0;
8034 data->type = QSEECOM_GENERIC;
8035 data->released = false;
8036 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
8037 data->mode = INACTIVE;
8038 init_waitqueue_head(&data->abort_wq);
8039 atomic_set(&data->ioctl_count, 0);
8040 return ret;
8041}
8042
Zhen Konge5e6c942019-10-01 15:45:25 -07008043static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
8044{
8045 if (qseecom.no_clock_support)
8046 return;
8047 if (qseecom.support_bus_scaling) {
8048 mutex_lock(&qsee_bw_mutex);
8049 if (data->mode != INACTIVE) {
8050 qseecom_unregister_bus_bandwidth_needs(data);
8051 if (qseecom.cumulative_mode == INACTIVE)
8052 __qseecom_set_msm_bus_request(INACTIVE);
8053 }
8054 mutex_unlock(&qsee_bw_mutex);
8055 } else {
8056 if (data->fast_load_enabled)
8057 qsee_disable_clock_vote(data, CLK_SFPB);
8058 if (data->perf_enabled)
8059 qsee_disable_clock_vote(data, CLK_DFAB);
8060 }
8061}
8062
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008063static int qseecom_release(struct inode *inode, struct file *file)
8064{
8065 struct qseecom_dev_handle *data = file->private_data;
8066 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008067 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008068
Zhen Konge5e6c942019-10-01 15:45:25 -07008069 __qseecom_release_disable_clk(data);
8070 if (!data->released) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008071 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
8072 data->type, data->mode, data);
8073 switch (data->type) {
8074 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08008075 pr_debug("release lsnr svc %d\n", data->listener.id);
8076 free_private_data = false;
8077 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008078 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08008079 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08008080 mutex_unlock(&listener_access_lock);
Zhen Konge5e6c942019-10-01 15:45:25 -07008081 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008082 break;
8083 case QSEECOM_CLIENT_APP:
Zhen Kong03b2eae2019-09-17 16:58:46 -07008084 pr_debug("release app %d (%s)\n",
8085 data->client.app_id, data->client.app_name);
8086 if (data->client.app_id) {
8087 free_private_data = false;
Zhen Konge5e6c942019-10-01 15:45:25 -07008088 mutex_lock(&unload_app_pending_list_lock);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008089 ret = qseecom_prepare_unload_app(data);
Zhen Konge5e6c942019-10-01 15:45:25 -07008090 mutex_unlock(&unload_app_pending_list_lock);
8091 __wakeup_unload_app_kthread();
Zhen Kong03b2eae2019-09-17 16:58:46 -07008092 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008093 break;
8094 case QSEECOM_SECURE_SERVICE:
8095 case QSEECOM_GENERIC:
8096 ret = qseecom_unmap_ion_allocated_memory(data);
8097 if (ret)
8098 pr_err("Ion Unmap failed\n");
8099 break;
8100 case QSEECOM_UNAVAILABLE_CLIENT_APP:
8101 break;
8102 default:
8103 pr_err("Unsupported clnt_handle_type %d",
8104 data->type);
8105 break;
8106 }
8107 }
8108
Zhen Kongbcdeda22018-11-16 13:50:51 -08008109 if (free_private_data)
8110 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008111 return ret;
8112}
8113
8114#ifdef CONFIG_COMPAT
8115#include "compat_qseecom.c"
8116#else
8117#define compat_qseecom_ioctl NULL
8118#endif
8119
8120static const struct file_operations qseecom_fops = {
8121 .owner = THIS_MODULE,
8122 .unlocked_ioctl = qseecom_ioctl,
8123 .compat_ioctl = compat_qseecom_ioctl,
8124 .open = qseecom_open,
8125 .release = qseecom_release
8126};
8127
8128static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
8129{
8130 int rc = 0;
8131 struct device *pdev;
8132 struct qseecom_clk *qclk;
8133 char *core_clk_src = NULL;
8134 char *core_clk = NULL;
8135 char *iface_clk = NULL;
8136 char *bus_clk = NULL;
8137
8138 switch (ce) {
8139 case CLK_QSEE: {
8140 core_clk_src = "core_clk_src";
8141 core_clk = "core_clk";
8142 iface_clk = "iface_clk";
8143 bus_clk = "bus_clk";
8144 qclk = &qseecom.qsee;
8145 qclk->instance = CLK_QSEE;
8146 break;
8147 };
8148 case CLK_CE_DRV: {
8149 core_clk_src = "ce_drv_core_clk_src";
8150 core_clk = "ce_drv_core_clk";
8151 iface_clk = "ce_drv_iface_clk";
8152 bus_clk = "ce_drv_bus_clk";
8153 qclk = &qseecom.ce_drv;
8154 qclk->instance = CLK_CE_DRV;
8155 break;
8156 };
8157 default:
8158 pr_err("Invalid ce hw instance: %d!\n", ce);
8159 return -EIO;
8160 }
8161
8162 if (qseecom.no_clock_support) {
8163 qclk->ce_core_clk = NULL;
8164 qclk->ce_clk = NULL;
8165 qclk->ce_bus_clk = NULL;
8166 qclk->ce_core_src_clk = NULL;
8167 return 0;
8168 }
8169
8170 pdev = qseecom.pdev;
8171
8172 /* Get CE3 src core clk. */
8173 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
8174 if (!IS_ERR(qclk->ce_core_src_clk)) {
8175 rc = clk_set_rate(qclk->ce_core_src_clk,
8176 qseecom.ce_opp_freq_hz);
8177 if (rc) {
8178 clk_put(qclk->ce_core_src_clk);
8179 qclk->ce_core_src_clk = NULL;
8180 pr_err("Unable to set the core src clk @%uMhz.\n",
8181 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
8182 return -EIO;
8183 }
8184 } else {
8185 pr_warn("Unable to get CE core src clk, set to NULL\n");
8186 qclk->ce_core_src_clk = NULL;
8187 }
8188
8189 /* Get CE core clk */
8190 qclk->ce_core_clk = clk_get(pdev, core_clk);
8191 if (IS_ERR(qclk->ce_core_clk)) {
8192 rc = PTR_ERR(qclk->ce_core_clk);
8193 pr_err("Unable to get CE core clk\n");
8194 if (qclk->ce_core_src_clk != NULL)
8195 clk_put(qclk->ce_core_src_clk);
8196 return -EIO;
8197 }
8198
8199 /* Get CE Interface clk */
8200 qclk->ce_clk = clk_get(pdev, iface_clk);
8201 if (IS_ERR(qclk->ce_clk)) {
8202 rc = PTR_ERR(qclk->ce_clk);
8203 pr_err("Unable to get CE interface clk\n");
8204 if (qclk->ce_core_src_clk != NULL)
8205 clk_put(qclk->ce_core_src_clk);
8206 clk_put(qclk->ce_core_clk);
8207 return -EIO;
8208 }
8209
8210 /* Get CE AXI clk */
8211 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8212 if (IS_ERR(qclk->ce_bus_clk)) {
8213 rc = PTR_ERR(qclk->ce_bus_clk);
8214 pr_err("Unable to get CE BUS interface clk\n");
8215 if (qclk->ce_core_src_clk != NULL)
8216 clk_put(qclk->ce_core_src_clk);
8217 clk_put(qclk->ce_core_clk);
8218 clk_put(qclk->ce_clk);
8219 return -EIO;
8220 }
8221
8222 return rc;
8223}
8224
8225static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8226{
8227 struct qseecom_clk *qclk;
8228
8229 if (ce == CLK_QSEE)
8230 qclk = &qseecom.qsee;
8231 else
8232 qclk = &qseecom.ce_drv;
8233
8234 if (qclk->ce_clk != NULL) {
8235 clk_put(qclk->ce_clk);
8236 qclk->ce_clk = NULL;
8237 }
8238 if (qclk->ce_core_clk != NULL) {
8239 clk_put(qclk->ce_core_clk);
8240 qclk->ce_core_clk = NULL;
8241 }
8242 if (qclk->ce_bus_clk != NULL) {
8243 clk_put(qclk->ce_bus_clk);
8244 qclk->ce_bus_clk = NULL;
8245 }
8246 if (qclk->ce_core_src_clk != NULL) {
8247 clk_put(qclk->ce_core_src_clk);
8248 qclk->ce_core_src_clk = NULL;
8249 }
8250 qclk->instance = CLK_INVALID;
8251}
8252
8253static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8254{
8255 int rc = 0;
8256 uint32_t hlos_num_ce_hw_instances;
8257 uint32_t disk_encrypt_pipe;
8258 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008259 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008260 int i;
8261 const int *tbl;
8262 int size;
8263 int entry;
8264 struct qseecom_crypto_info *pfde_tbl = NULL;
8265 struct qseecom_crypto_info *p;
8266 int tbl_size;
8267 int j;
8268 bool old_db = true;
8269 struct qseecom_ce_info_use *pce_info_use;
8270 uint32_t *unit_tbl = NULL;
8271 int total_units = 0;
8272 struct qseecom_ce_pipe_entry *pce_entry;
8273
8274 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8275 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8276
8277 if (of_property_read_u32((&pdev->dev)->of_node,
8278 "qcom,qsee-ce-hw-instance",
8279 &qseecom.ce_info.qsee_ce_hw_instance)) {
8280 pr_err("Fail to get qsee ce hw instance information.\n");
8281 rc = -EINVAL;
8282 goto out;
8283 } else {
8284 pr_debug("qsee-ce-hw-instance=0x%x\n",
8285 qseecom.ce_info.qsee_ce_hw_instance);
8286 }
8287
8288 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8289 "qcom,support-fde");
8290 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8291 "qcom,support-pfe");
8292
8293 if (!qseecom.support_pfe && !qseecom.support_fde) {
8294 pr_warn("Device does not support PFE/FDE");
8295 goto out;
8296 }
8297
8298 if (qseecom.support_fde)
8299 tbl = of_get_property((&pdev->dev)->of_node,
8300 "qcom,full-disk-encrypt-info", &size);
8301 else
8302 tbl = NULL;
8303 if (tbl) {
8304 old_db = false;
8305 if (size % sizeof(struct qseecom_crypto_info)) {
8306 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8307 size);
8308 rc = -EINVAL;
8309 goto out;
8310 }
8311 tbl_size = size / sizeof
8312 (struct qseecom_crypto_info);
8313
8314 pfde_tbl = kzalloc(size, GFP_KERNEL);
8315 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8316 total_units = 0;
8317
8318 if (!pfde_tbl || !unit_tbl) {
8319 pr_err("failed to alloc memory\n");
8320 rc = -ENOMEM;
8321 goto out;
8322 }
8323 if (of_property_read_u32_array((&pdev->dev)->of_node,
8324 "qcom,full-disk-encrypt-info",
8325 (u32 *)pfde_tbl, size/sizeof(u32))) {
8326 pr_err("failed to read full-disk-encrypt-info tbl\n");
8327 rc = -EINVAL;
8328 goto out;
8329 }
8330
8331 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8332 for (j = 0; j < total_units; j++) {
8333 if (p->unit_num == *(unit_tbl + j))
8334 break;
8335 }
8336 if (j == total_units) {
8337 *(unit_tbl + total_units) = p->unit_num;
8338 total_units++;
8339 }
8340 }
8341
8342 qseecom.ce_info.num_fde = total_units;
8343 pce_info_use = qseecom.ce_info.fde = kcalloc(
8344 total_units, sizeof(struct qseecom_ce_info_use),
8345 GFP_KERNEL);
8346 if (!pce_info_use) {
8347 pr_err("failed to alloc memory\n");
8348 rc = -ENOMEM;
8349 goto out;
8350 }
8351
8352 for (j = 0; j < total_units; j++, pce_info_use++) {
8353 pce_info_use->unit_num = *(unit_tbl + j);
8354 pce_info_use->alloc = false;
8355 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8356 pce_info_use->num_ce_pipe_entries = 0;
8357 pce_info_use->ce_pipe_entry = NULL;
8358 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8359 if (p->unit_num == pce_info_use->unit_num)
8360 pce_info_use->num_ce_pipe_entries++;
8361 }
8362
8363 entry = pce_info_use->num_ce_pipe_entries;
8364 pce_entry = pce_info_use->ce_pipe_entry =
8365 kcalloc(entry,
8366 sizeof(struct qseecom_ce_pipe_entry),
8367 GFP_KERNEL);
8368 if (pce_entry == NULL) {
8369 pr_err("failed to alloc memory\n");
8370 rc = -ENOMEM;
8371 goto out;
8372 }
8373
8374 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8375 if (p->unit_num == pce_info_use->unit_num) {
8376 pce_entry->ce_num = p->ce;
8377 pce_entry->ce_pipe_pair =
8378 p->pipe_pair;
8379 pce_entry->valid = true;
8380 pce_entry++;
8381 }
8382 }
8383 }
8384 kfree(unit_tbl);
8385 unit_tbl = NULL;
8386 kfree(pfde_tbl);
8387 pfde_tbl = NULL;
8388 }
8389
8390 if (qseecom.support_pfe)
8391 tbl = of_get_property((&pdev->dev)->of_node,
8392 "qcom,per-file-encrypt-info", &size);
8393 else
8394 tbl = NULL;
8395 if (tbl) {
8396 old_db = false;
8397 if (size % sizeof(struct qseecom_crypto_info)) {
8398 pr_err("per-file-encrypt-info tbl size(%d)\n",
8399 size);
8400 rc = -EINVAL;
8401 goto out;
8402 }
8403 tbl_size = size / sizeof
8404 (struct qseecom_crypto_info);
8405
8406 pfde_tbl = kzalloc(size, GFP_KERNEL);
8407 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8408 total_units = 0;
8409 if (!pfde_tbl || !unit_tbl) {
8410 pr_err("failed to alloc memory\n");
8411 rc = -ENOMEM;
8412 goto out;
8413 }
8414 if (of_property_read_u32_array((&pdev->dev)->of_node,
8415 "qcom,per-file-encrypt-info",
8416 (u32 *)pfde_tbl, size/sizeof(u32))) {
8417 pr_err("failed to read per-file-encrypt-info tbl\n");
8418 rc = -EINVAL;
8419 goto out;
8420 }
8421
8422 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8423 for (j = 0; j < total_units; j++) {
8424 if (p->unit_num == *(unit_tbl + j))
8425 break;
8426 }
8427 if (j == total_units) {
8428 *(unit_tbl + total_units) = p->unit_num;
8429 total_units++;
8430 }
8431 }
8432
8433 qseecom.ce_info.num_pfe = total_units;
8434 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8435 total_units, sizeof(struct qseecom_ce_info_use),
8436 GFP_KERNEL);
8437 if (!pce_info_use) {
8438 pr_err("failed to alloc memory\n");
8439 rc = -ENOMEM;
8440 goto out;
8441 }
8442
8443 for (j = 0; j < total_units; j++, pce_info_use++) {
8444 pce_info_use->unit_num = *(unit_tbl + j);
8445 pce_info_use->alloc = false;
8446 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8447 pce_info_use->num_ce_pipe_entries = 0;
8448 pce_info_use->ce_pipe_entry = NULL;
8449 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8450 if (p->unit_num == pce_info_use->unit_num)
8451 pce_info_use->num_ce_pipe_entries++;
8452 }
8453
8454 entry = pce_info_use->num_ce_pipe_entries;
8455 pce_entry = pce_info_use->ce_pipe_entry =
8456 kcalloc(entry,
8457 sizeof(struct qseecom_ce_pipe_entry),
8458 GFP_KERNEL);
8459 if (pce_entry == NULL) {
8460 pr_err("failed to alloc memory\n");
8461 rc = -ENOMEM;
8462 goto out;
8463 }
8464
8465 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8466 if (p->unit_num == pce_info_use->unit_num) {
8467 pce_entry->ce_num = p->ce;
8468 pce_entry->ce_pipe_pair =
8469 p->pipe_pair;
8470 pce_entry->valid = true;
8471 pce_entry++;
8472 }
8473 }
8474 }
8475 kfree(unit_tbl);
8476 unit_tbl = NULL;
8477 kfree(pfde_tbl);
8478 pfde_tbl = NULL;
8479 }
8480
8481 if (!old_db)
8482 goto out1;
8483
8484 if (of_property_read_bool((&pdev->dev)->of_node,
8485 "qcom,support-multiple-ce-hw-instance")) {
8486 if (of_property_read_u32((&pdev->dev)->of_node,
8487 "qcom,hlos-num-ce-hw-instances",
8488 &hlos_num_ce_hw_instances)) {
8489 pr_err("Fail: get hlos number of ce hw instance\n");
8490 rc = -EINVAL;
8491 goto out;
8492 }
8493 } else {
8494 hlos_num_ce_hw_instances = 1;
8495 }
8496
8497 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8498 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8499 MAX_CE_PIPE_PAIR_PER_UNIT);
8500 rc = -EINVAL;
8501 goto out;
8502 }
8503
8504 if (of_property_read_u32_array((&pdev->dev)->of_node,
8505 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8506 hlos_num_ce_hw_instances)) {
8507 pr_err("Fail: get hlos ce hw instance info\n");
8508 rc = -EINVAL;
8509 goto out;
8510 }
8511
8512 if (qseecom.support_fde) {
8513 pce_info_use = qseecom.ce_info.fde =
8514 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8515 if (!pce_info_use) {
8516 pr_err("failed to alloc memory\n");
8517 rc = -ENOMEM;
8518 goto out;
8519 }
8520 /* by default for old db */
8521 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8522 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8523 pce_info_use->alloc = false;
8524 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8525 pce_info_use->ce_pipe_entry = NULL;
8526 if (of_property_read_u32((&pdev->dev)->of_node,
8527 "qcom,disk-encrypt-pipe-pair",
8528 &disk_encrypt_pipe)) {
8529 pr_err("Fail to get FDE pipe information.\n");
8530 rc = -EINVAL;
8531 goto out;
8532 } else {
8533 pr_debug("disk-encrypt-pipe-pair=0x%x",
8534 disk_encrypt_pipe);
8535 }
8536 entry = pce_info_use->num_ce_pipe_entries =
8537 hlos_num_ce_hw_instances;
8538 pce_entry = pce_info_use->ce_pipe_entry =
8539 kcalloc(entry,
8540 sizeof(struct qseecom_ce_pipe_entry),
8541 GFP_KERNEL);
8542 if (pce_entry == NULL) {
8543 pr_err("failed to alloc memory\n");
8544 rc = -ENOMEM;
8545 goto out;
8546 }
8547 for (i = 0; i < entry; i++) {
8548 pce_entry->ce_num = hlos_ce_hw_instance[i];
8549 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8550 pce_entry->valid = 1;
8551 pce_entry++;
8552 }
8553 } else {
8554 pr_warn("Device does not support FDE");
8555 disk_encrypt_pipe = 0xff;
8556 }
8557 if (qseecom.support_pfe) {
8558 pce_info_use = qseecom.ce_info.pfe =
8559 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8560 if (!pce_info_use) {
8561 pr_err("failed to alloc memory\n");
8562 rc = -ENOMEM;
8563 goto out;
8564 }
8565 /* by default for old db */
8566 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8567 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8568 pce_info_use->alloc = false;
8569 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8570 pce_info_use->ce_pipe_entry = NULL;
8571
8572 if (of_property_read_u32((&pdev->dev)->of_node,
8573 "qcom,file-encrypt-pipe-pair",
8574 &file_encrypt_pipe)) {
8575 pr_err("Fail to get PFE pipe information.\n");
8576 rc = -EINVAL;
8577 goto out;
8578 } else {
8579 pr_debug("file-encrypt-pipe-pair=0x%x",
8580 file_encrypt_pipe);
8581 }
8582 entry = pce_info_use->num_ce_pipe_entries =
8583 hlos_num_ce_hw_instances;
8584 pce_entry = pce_info_use->ce_pipe_entry =
8585 kcalloc(entry,
8586 sizeof(struct qseecom_ce_pipe_entry),
8587 GFP_KERNEL);
8588 if (pce_entry == NULL) {
8589 pr_err("failed to alloc memory\n");
8590 rc = -ENOMEM;
8591 goto out;
8592 }
8593 for (i = 0; i < entry; i++) {
8594 pce_entry->ce_num = hlos_ce_hw_instance[i];
8595 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8596 pce_entry->valid = 1;
8597 pce_entry++;
8598 }
8599 } else {
8600 pr_warn("Device does not support PFE");
8601 file_encrypt_pipe = 0xff;
8602 }
8603
8604out1:
8605 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8606 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8607out:
8608 if (rc) {
8609 if (qseecom.ce_info.fde) {
8610 pce_info_use = qseecom.ce_info.fde;
8611 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8612 pce_entry = pce_info_use->ce_pipe_entry;
8613 kfree(pce_entry);
8614 pce_info_use++;
8615 }
8616 }
8617 kfree(qseecom.ce_info.fde);
8618 qseecom.ce_info.fde = NULL;
8619 if (qseecom.ce_info.pfe) {
8620 pce_info_use = qseecom.ce_info.pfe;
8621 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8622 pce_entry = pce_info_use->ce_pipe_entry;
8623 kfree(pce_entry);
8624 pce_info_use++;
8625 }
8626 }
8627 kfree(qseecom.ce_info.pfe);
8628 qseecom.ce_info.pfe = NULL;
8629 }
8630 kfree(unit_tbl);
8631 kfree(pfde_tbl);
8632 return rc;
8633}
8634
8635static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8636 void __user *argp)
8637{
8638 struct qseecom_ce_info_req req;
8639 struct qseecom_ce_info_req *pinfo = &req;
8640 int ret = 0;
8641 int i;
8642 unsigned int entries;
8643 struct qseecom_ce_info_use *pce_info_use, *p;
8644 int total = 0;
8645 bool found = false;
8646 struct qseecom_ce_pipe_entry *pce_entry;
8647
8648 ret = copy_from_user(pinfo, argp,
8649 sizeof(struct qseecom_ce_info_req));
8650 if (ret) {
8651 pr_err("copy_from_user failed\n");
8652 return ret;
8653 }
8654
8655 switch (pinfo->usage) {
8656 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8657 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8658 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8659 if (qseecom.support_fde) {
8660 p = qseecom.ce_info.fde;
8661 total = qseecom.ce_info.num_fde;
8662 } else {
8663 pr_err("system does not support fde\n");
8664 return -EINVAL;
8665 }
8666 break;
8667 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8668 if (qseecom.support_pfe) {
8669 p = qseecom.ce_info.pfe;
8670 total = qseecom.ce_info.num_pfe;
8671 } else {
8672 pr_err("system does not support pfe\n");
8673 return -EINVAL;
8674 }
8675 break;
8676 default:
8677 pr_err("unsupported usage %d\n", pinfo->usage);
8678 return -EINVAL;
8679 }
8680
8681 pce_info_use = NULL;
8682 for (i = 0; i < total; i++) {
8683 if (!p->alloc)
8684 pce_info_use = p;
8685 else if (!memcmp(p->handle, pinfo->handle,
8686 MAX_CE_INFO_HANDLE_SIZE)) {
8687 pce_info_use = p;
8688 found = true;
8689 break;
8690 }
8691 p++;
8692 }
8693
8694 if (pce_info_use == NULL)
8695 return -EBUSY;
8696
8697 pinfo->unit_num = pce_info_use->unit_num;
8698 if (!pce_info_use->alloc) {
8699 pce_info_use->alloc = true;
8700 memcpy(pce_info_use->handle,
8701 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8702 }
8703 if (pce_info_use->num_ce_pipe_entries >
8704 MAX_CE_PIPE_PAIR_PER_UNIT)
8705 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8706 else
8707 entries = pce_info_use->num_ce_pipe_entries;
8708 pinfo->num_ce_pipe_entries = entries;
8709 pce_entry = pce_info_use->ce_pipe_entry;
8710 for (i = 0; i < entries; i++, pce_entry++)
8711 pinfo->ce_pipe_entry[i] = *pce_entry;
8712 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8713 pinfo->ce_pipe_entry[i].valid = 0;
8714
8715 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8716 pr_err("copy_to_user failed\n");
8717 ret = -EFAULT;
8718 }
8719 return ret;
8720}
8721
8722static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8723 void __user *argp)
8724{
8725 struct qseecom_ce_info_req req;
8726 struct qseecom_ce_info_req *pinfo = &req;
8727 int ret = 0;
8728 struct qseecom_ce_info_use *p;
8729 int total = 0;
8730 int i;
8731 bool found = false;
8732
8733 ret = copy_from_user(pinfo, argp,
8734 sizeof(struct qseecom_ce_info_req));
8735 if (ret)
8736 return ret;
8737
8738 switch (pinfo->usage) {
8739 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8740 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8741 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8742 if (qseecom.support_fde) {
8743 p = qseecom.ce_info.fde;
8744 total = qseecom.ce_info.num_fde;
8745 } else {
8746 pr_err("system does not support fde\n");
8747 return -EINVAL;
8748 }
8749 break;
8750 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8751 if (qseecom.support_pfe) {
8752 p = qseecom.ce_info.pfe;
8753 total = qseecom.ce_info.num_pfe;
8754 } else {
8755 pr_err("system does not support pfe\n");
8756 return -EINVAL;
8757 }
8758 break;
8759 default:
8760 pr_err("unsupported usage %d\n", pinfo->usage);
8761 return -EINVAL;
8762 }
8763
8764 for (i = 0; i < total; i++) {
8765 if (p->alloc &&
8766 !memcmp(p->handle, pinfo->handle,
8767 MAX_CE_INFO_HANDLE_SIZE)) {
8768 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8769 p->alloc = false;
8770 found = true;
8771 break;
8772 }
8773 p++;
8774 }
8775 return ret;
8776}
8777
8778static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8779 void __user *argp)
8780{
8781 struct qseecom_ce_info_req req;
8782 struct qseecom_ce_info_req *pinfo = &req;
8783 int ret = 0;
8784 int i;
8785 unsigned int entries;
8786 struct qseecom_ce_info_use *pce_info_use, *p;
8787 int total = 0;
8788 bool found = false;
8789 struct qseecom_ce_pipe_entry *pce_entry;
8790
8791 ret = copy_from_user(pinfo, argp,
8792 sizeof(struct qseecom_ce_info_req));
8793 if (ret)
8794 return ret;
8795
8796 switch (pinfo->usage) {
8797 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8798 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8799 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8800 if (qseecom.support_fde) {
8801 p = qseecom.ce_info.fde;
8802 total = qseecom.ce_info.num_fde;
8803 } else {
8804 pr_err("system does not support fde\n");
8805 return -EINVAL;
8806 }
8807 break;
8808 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8809 if (qseecom.support_pfe) {
8810 p = qseecom.ce_info.pfe;
8811 total = qseecom.ce_info.num_pfe;
8812 } else {
8813 pr_err("system does not support pfe\n");
8814 return -EINVAL;
8815 }
8816 break;
8817 default:
8818 pr_err("unsupported usage %d\n", pinfo->usage);
8819 return -EINVAL;
8820 }
8821
8822 pce_info_use = NULL;
8823 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8824 pinfo->num_ce_pipe_entries = 0;
8825 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8826 pinfo->ce_pipe_entry[i].valid = 0;
8827
8828 for (i = 0; i < total; i++) {
8829
8830 if (p->alloc && !memcmp(p->handle,
8831 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8832 pce_info_use = p;
8833 found = true;
8834 break;
8835 }
8836 p++;
8837 }
8838 if (!pce_info_use)
8839 goto out;
8840 pinfo->unit_num = pce_info_use->unit_num;
8841 if (pce_info_use->num_ce_pipe_entries >
8842 MAX_CE_PIPE_PAIR_PER_UNIT)
8843 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8844 else
8845 entries = pce_info_use->num_ce_pipe_entries;
8846 pinfo->num_ce_pipe_entries = entries;
8847 pce_entry = pce_info_use->ce_pipe_entry;
8848 for (i = 0; i < entries; i++, pce_entry++)
8849 pinfo->ce_pipe_entry[i] = *pce_entry;
8850 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8851 pinfo->ce_pipe_entry[i].valid = 0;
8852out:
8853 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8854 pr_err("copy_to_user failed\n");
8855 ret = -EFAULT;
8856 }
8857 return ret;
8858}
8859
8860/*
8861 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8862 * then whitelist feature is not supported.
8863 */
8864static int qseecom_check_whitelist_feature(void)
8865{
8866 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8867
8868 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8869}
8870
8871static int qseecom_probe(struct platform_device *pdev)
8872{
8873 int rc;
8874 int i;
8875 uint32_t feature = 10;
8876 struct device *class_dev;
8877 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8878 struct qseecom_command_scm_resp resp;
8879 struct qseecom_ce_info_use *pce_info_use = NULL;
8880
8881 qseecom.qsee_bw_count = 0;
8882 qseecom.qsee_perf_client = 0;
8883 qseecom.qsee_sfpb_bw_count = 0;
8884
8885 qseecom.qsee.ce_core_clk = NULL;
8886 qseecom.qsee.ce_clk = NULL;
8887 qseecom.qsee.ce_core_src_clk = NULL;
8888 qseecom.qsee.ce_bus_clk = NULL;
8889
8890 qseecom.cumulative_mode = 0;
8891 qseecom.current_mode = INACTIVE;
8892 qseecom.support_bus_scaling = false;
8893 qseecom.support_fde = false;
8894 qseecom.support_pfe = false;
8895
8896 qseecom.ce_drv.ce_core_clk = NULL;
8897 qseecom.ce_drv.ce_clk = NULL;
8898 qseecom.ce_drv.ce_core_src_clk = NULL;
8899 qseecom.ce_drv.ce_bus_clk = NULL;
8900 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8901
8902 qseecom.app_block_ref_cnt = 0;
8903 init_waitqueue_head(&qseecom.app_block_wq);
8904 qseecom.whitelist_support = true;
8905
8906 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8907 if (rc < 0) {
8908 pr_err("alloc_chrdev_region failed %d\n", rc);
8909 return rc;
8910 }
8911
8912 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8913 if (IS_ERR(driver_class)) {
8914 rc = -ENOMEM;
8915 pr_err("class_create failed %d\n", rc);
8916 goto exit_unreg_chrdev_region;
8917 }
8918
8919 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8920 QSEECOM_DEV);
8921 if (IS_ERR(class_dev)) {
8922 pr_err("class_device_create failed %d\n", rc);
8923 rc = -ENOMEM;
8924 goto exit_destroy_class;
8925 }
8926
8927 cdev_init(&qseecom.cdev, &qseecom_fops);
8928 qseecom.cdev.owner = THIS_MODULE;
8929
8930 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8931 if (rc < 0) {
8932 pr_err("cdev_add failed %d\n", rc);
8933 goto exit_destroy_device;
8934 }
8935
8936 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008937 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8938 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008939 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008940 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8941 spin_lock_init(&qseecom.registered_kclient_list_lock);
8942 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008943 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008944 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
Zhen Kong03b2eae2019-09-17 16:58:46 -07008945 INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
8946 init_waitqueue_head(&qseecom.unload_app_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008947 qseecom.send_resp_flag = 0;
8948
8949 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008950 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008951 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8952 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008953 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008954 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8955 if (rc) {
8956 pr_err("Failed to get QSEE version info %d\n", rc);
8957 goto exit_del_cdev;
8958 }
8959 qseecom.qsee_version = resp.result;
8960 qseecom.qseos_version = QSEOS_VERSION_14;
8961 qseecom.commonlib_loaded = false;
8962 qseecom.commonlib64_loaded = false;
8963 qseecom.pdev = class_dev;
8964 /* Create ION msm client */
8965 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8966 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8967 pr_err("Ion client cannot be created\n");
8968 rc = -ENOMEM;
8969 goto exit_del_cdev;
8970 }
8971
8972 /* register client for bus scaling */
8973 if (pdev->dev.of_node) {
8974 qseecom.pdev->of_node = pdev->dev.of_node;
8975 qseecom.support_bus_scaling =
8976 of_property_read_bool((&pdev->dev)->of_node,
8977 "qcom,support-bus-scaling");
8978 rc = qseecom_retrieve_ce_data(pdev);
8979 if (rc)
8980 goto exit_destroy_ion_client;
8981 qseecom.appsbl_qseecom_support =
8982 of_property_read_bool((&pdev->dev)->of_node,
8983 "qcom,appsbl-qseecom-support");
8984 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8985 qseecom.appsbl_qseecom_support);
8986
8987 qseecom.commonlib64_loaded =
8988 of_property_read_bool((&pdev->dev)->of_node,
8989 "qcom,commonlib64-loaded-by-uefi");
8990 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8991 qseecom.commonlib64_loaded);
8992 qseecom.fde_key_size =
8993 of_property_read_bool((&pdev->dev)->of_node,
8994 "qcom,fde-key-size");
8995 qseecom.no_clock_support =
8996 of_property_read_bool((&pdev->dev)->of_node,
8997 "qcom,no-clock-support");
8998 if (!qseecom.no_clock_support) {
8999 pr_info("qseecom clocks handled by other subsystem\n");
9000 } else {
9001 pr_info("no-clock-support=0x%x",
9002 qseecom.no_clock_support);
9003 }
9004
9005 if (of_property_read_u32((&pdev->dev)->of_node,
9006 "qcom,qsee-reentrancy-support",
9007 &qseecom.qsee_reentrancy_support)) {
9008 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
9009 qseecom.qsee_reentrancy_support = 0;
9010 } else {
9011 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
9012 qseecom.qsee_reentrancy_support);
9013 }
9014
Jiten Patela7bb1d52018-05-11 12:34:26 +05309015 qseecom.enable_key_wrap_in_ks =
9016 of_property_read_bool((&pdev->dev)->of_node,
9017 "qcom,enable-key-wrap-in-ks");
9018 if (qseecom.enable_key_wrap_in_ks) {
9019 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
9020 qseecom.enable_key_wrap_in_ks);
9021 }
9022
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009023 /*
9024 * The qseecom bus scaling flag can not be enabled when
9025 * crypto clock is not handled by HLOS.
9026 */
9027 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
9028 pr_err("support_bus_scaling flag can not be enabled.\n");
9029 rc = -EINVAL;
9030 goto exit_destroy_ion_client;
9031 }
9032
9033 if (of_property_read_u32((&pdev->dev)->of_node,
9034 "qcom,ce-opp-freq",
9035 &qseecom.ce_opp_freq_hz)) {
9036 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
9037 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
9038 }
9039 rc = __qseecom_init_clk(CLK_QSEE);
9040 if (rc)
9041 goto exit_destroy_ion_client;
9042
9043 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9044 (qseecom.support_pfe || qseecom.support_fde)) {
9045 rc = __qseecom_init_clk(CLK_CE_DRV);
9046 if (rc) {
9047 __qseecom_deinit_clk(CLK_QSEE);
9048 goto exit_destroy_ion_client;
9049 }
9050 } else {
9051 struct qseecom_clk *qclk;
9052
9053 qclk = &qseecom.qsee;
9054 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
9055 qseecom.ce_drv.ce_clk = qclk->ce_clk;
9056 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
9057 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
9058 }
9059
9060 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9061 msm_bus_cl_get_pdata(pdev);
9062 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
9063 (!qseecom.is_apps_region_protected &&
9064 !qseecom.appsbl_qseecom_support)) {
9065 struct resource *resource = NULL;
9066 struct qsee_apps_region_info_ireq req;
9067 struct qsee_apps_region_info_64bit_ireq req_64bit;
9068 struct qseecom_command_scm_resp resp;
9069 void *cmd_buf = NULL;
9070 size_t cmd_len;
9071
9072 resource = platform_get_resource_byname(pdev,
9073 IORESOURCE_MEM, "secapp-region");
9074 if (resource) {
9075 if (qseecom.qsee_version < QSEE_VERSION_40) {
9076 req.qsee_cmd_id =
9077 QSEOS_APP_REGION_NOTIFICATION;
9078 req.addr = (uint32_t)resource->start;
9079 req.size = resource_size(resource);
9080 cmd_buf = (void *)&req;
9081 cmd_len = sizeof(struct
9082 qsee_apps_region_info_ireq);
9083 pr_warn("secure app region addr=0x%x size=0x%x",
9084 req.addr, req.size);
9085 } else {
9086 req_64bit.qsee_cmd_id =
9087 QSEOS_APP_REGION_NOTIFICATION;
9088 req_64bit.addr = resource->start;
9089 req_64bit.size = resource_size(
9090 resource);
9091 cmd_buf = (void *)&req_64bit;
9092 cmd_len = sizeof(struct
9093 qsee_apps_region_info_64bit_ireq);
9094 pr_warn("secure app region addr=0x%llx size=0x%x",
9095 req_64bit.addr, req_64bit.size);
9096 }
9097 } else {
9098 pr_err("Fail to get secure app region info\n");
9099 rc = -EINVAL;
9100 goto exit_deinit_clock;
9101 }
9102 rc = __qseecom_enable_clk(CLK_QSEE);
9103 if (rc) {
9104 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
9105 rc = -EIO;
9106 goto exit_deinit_clock;
9107 }
Zhen Kong03f220d2019-02-01 17:12:34 -08009108 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009109 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
9110 cmd_buf, cmd_len,
9111 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08009112 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009113 __qseecom_disable_clk(CLK_QSEE);
9114 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
9115 pr_err("send secapp reg fail %d resp.res %d\n",
9116 rc, resp.result);
9117 rc = -EINVAL;
9118 goto exit_deinit_clock;
9119 }
9120 }
9121 /*
9122 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
9123 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
9124 * Pls add "qseecom.commonlib64_loaded = true" here too.
9125 */
9126 if (qseecom.is_apps_region_protected ||
9127 qseecom.appsbl_qseecom_support)
9128 qseecom.commonlib_loaded = true;
9129 } else {
9130 qseecom_platform_support = (struct msm_bus_scale_pdata *)
9131 pdev->dev.platform_data;
9132 }
9133 if (qseecom.support_bus_scaling) {
9134 init_timer(&(qseecom.bw_scale_down_timer));
9135 INIT_WORK(&qseecom.bw_inactive_req_ws,
9136 qseecom_bw_inactive_req_work);
9137 qseecom.bw_scale_down_timer.function =
9138 qseecom_scale_bus_bandwidth_timer_callback;
9139 }
9140 qseecom.timer_running = false;
9141 qseecom.qsee_perf_client = msm_bus_scale_register_client(
9142 qseecom_platform_support);
9143
9144 qseecom.whitelist_support = qseecom_check_whitelist_feature();
9145 pr_warn("qseecom.whitelist_support = %d\n",
9146 qseecom.whitelist_support);
9147
9148 if (!qseecom.qsee_perf_client)
9149 pr_err("Unable to register bus client\n");
9150
Zhen Kongc4c162a2019-01-23 12:07:12 -08009151 /*create a kthread to process pending listener unregister task */
9152 qseecom.unregister_lsnr_kthread_task = kthread_run(
9153 __qseecom_unregister_listener_kthread_func,
9154 NULL, "qseecom-unreg-lsnr");
9155 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
9156 pr_err("failed to create kthread to unregister listener\n");
9157 rc = -EINVAL;
9158 goto exit_deinit_clock;
9159 }
9160 atomic_set(&qseecom.unregister_lsnr_kthread_state,
9161 LSNR_UNREG_KT_SLEEP);
Zhen Kong03b2eae2019-09-17 16:58:46 -07009162
9163 /*create a kthread to process pending ta unloading task */
9164 qseecom.unload_app_kthread_task = kthread_run(
9165 __qseecom_unload_app_kthread_func,
9166 NULL, "qseecom-unload-ta");
9167 if (IS_ERR(qseecom.unload_app_kthread_task)) {
9168 pr_err("failed to create kthread to unload ta\n");
9169 rc = -EINVAL;
9170 goto exit_kill_unreg_lsnr_kthread;
9171 }
9172 atomic_set(&qseecom.unload_app_kthread_state,
9173 UNLOAD_APP_KT_SLEEP);
9174
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009175 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9176 return 0;
9177
Zhen Kong03b2eae2019-09-17 16:58:46 -07009178exit_kill_unreg_lsnr_kthread:
9179 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9180
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009181exit_deinit_clock:
9182 __qseecom_deinit_clk(CLK_QSEE);
9183 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9184 (qseecom.support_pfe || qseecom.support_fde))
9185 __qseecom_deinit_clk(CLK_CE_DRV);
9186exit_destroy_ion_client:
9187 if (qseecom.ce_info.fde) {
9188 pce_info_use = qseecom.ce_info.fde;
9189 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9190 kzfree(pce_info_use->ce_pipe_entry);
9191 pce_info_use++;
9192 }
9193 kfree(qseecom.ce_info.fde);
9194 }
9195 if (qseecom.ce_info.pfe) {
9196 pce_info_use = qseecom.ce_info.pfe;
9197 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9198 kzfree(pce_info_use->ce_pipe_entry);
9199 pce_info_use++;
9200 }
9201 kfree(qseecom.ce_info.pfe);
9202 }
9203 ion_client_destroy(qseecom.ion_clnt);
9204exit_del_cdev:
9205 cdev_del(&qseecom.cdev);
9206exit_destroy_device:
9207 device_destroy(driver_class, qseecom_device_no);
9208exit_destroy_class:
9209 class_destroy(driver_class);
9210exit_unreg_chrdev_region:
9211 unregister_chrdev_region(qseecom_device_no, 1);
9212 return rc;
9213}
9214
9215static int qseecom_remove(struct platform_device *pdev)
9216{
9217 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309218 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009219 unsigned long flags = 0;
9220 int ret = 0;
9221 int i;
9222 struct qseecom_ce_pipe_entry *pce_entry;
9223 struct qseecom_ce_info_use *pce_info_use;
9224
9225 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9226 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9227
Monika Singhe711b162018-04-24 09:54:50 +05309228 list_for_each_entry_safe(kclient, kclient_tmp,
9229 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009230
9231 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009232 if (!kclient->handle) {
9233 list_del(&kclient->list);
9234 kzfree(kclient);
9235 break;
9236 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009237
9238 list_del(&kclient->list);
9239 mutex_lock(&app_access_lock);
9240 ret = qseecom_unload_app(kclient->handle->dev, false);
9241 mutex_unlock(&app_access_lock);
9242 if (!ret) {
9243 kzfree(kclient->handle->dev);
9244 kzfree(kclient->handle);
9245 kzfree(kclient);
9246 }
9247 }
9248
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009249 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9250
9251 if (qseecom.qseos_version > QSEEE_VERSION_00)
9252 qseecom_unload_commonlib_image();
9253
9254 if (qseecom.qsee_perf_client)
9255 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9256 0);
9257 if (pdev->dev.platform_data != NULL)
9258 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9259
9260 if (qseecom.support_bus_scaling) {
9261 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9262 del_timer_sync(&qseecom.bw_scale_down_timer);
9263 }
9264
9265 if (qseecom.ce_info.fde) {
9266 pce_info_use = qseecom.ce_info.fde;
9267 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9268 pce_entry = pce_info_use->ce_pipe_entry;
9269 kfree(pce_entry);
9270 pce_info_use++;
9271 }
9272 }
9273 kfree(qseecom.ce_info.fde);
9274 if (qseecom.ce_info.pfe) {
9275 pce_info_use = qseecom.ce_info.pfe;
9276 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9277 pce_entry = pce_info_use->ce_pipe_entry;
9278 kfree(pce_entry);
9279 pce_info_use++;
9280 }
9281 }
9282 kfree(qseecom.ce_info.pfe);
9283
9284 /* register client for bus scaling */
9285 if (pdev->dev.of_node) {
9286 __qseecom_deinit_clk(CLK_QSEE);
9287 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9288 (qseecom.support_pfe || qseecom.support_fde))
9289 __qseecom_deinit_clk(CLK_CE_DRV);
9290 }
9291
9292 ion_client_destroy(qseecom.ion_clnt);
9293
Zhen Kong03b2eae2019-09-17 16:58:46 -07009294 kthread_stop(qseecom.unload_app_kthread_task);
9295
Zhen Kongc4c162a2019-01-23 12:07:12 -08009296 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9297
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009298 cdev_del(&qseecom.cdev);
9299
9300 device_destroy(driver_class, qseecom_device_no);
9301
9302 class_destroy(driver_class);
9303
9304 unregister_chrdev_region(qseecom_device_no, 1);
9305
9306 return ret;
9307}
9308
9309static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9310{
9311 int ret = 0;
9312 struct qseecom_clk *qclk;
9313
9314 qclk = &qseecom.qsee;
9315 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9316 if (qseecom.no_clock_support)
9317 return 0;
9318
9319 mutex_lock(&qsee_bw_mutex);
9320 mutex_lock(&clk_access_lock);
9321
9322 if (qseecom.current_mode != INACTIVE) {
9323 ret = msm_bus_scale_client_update_request(
9324 qseecom.qsee_perf_client, INACTIVE);
9325 if (ret)
9326 pr_err("Fail to scale down bus\n");
9327 else
9328 qseecom.current_mode = INACTIVE;
9329 }
9330
9331 if (qclk->clk_access_cnt) {
9332 if (qclk->ce_clk != NULL)
9333 clk_disable_unprepare(qclk->ce_clk);
9334 if (qclk->ce_core_clk != NULL)
9335 clk_disable_unprepare(qclk->ce_core_clk);
9336 if (qclk->ce_bus_clk != NULL)
9337 clk_disable_unprepare(qclk->ce_bus_clk);
9338 }
9339
9340 del_timer_sync(&(qseecom.bw_scale_down_timer));
9341 qseecom.timer_running = false;
9342
9343 mutex_unlock(&clk_access_lock);
9344 mutex_unlock(&qsee_bw_mutex);
9345 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9346
9347 return 0;
9348}
9349
9350static int qseecom_resume(struct platform_device *pdev)
9351{
9352 int mode = 0;
9353 int ret = 0;
9354 struct qseecom_clk *qclk;
9355
9356 qclk = &qseecom.qsee;
9357 if (qseecom.no_clock_support)
9358 goto exit;
9359
9360 mutex_lock(&qsee_bw_mutex);
9361 mutex_lock(&clk_access_lock);
9362 if (qseecom.cumulative_mode >= HIGH)
9363 mode = HIGH;
9364 else
9365 mode = qseecom.cumulative_mode;
9366
9367 if (qseecom.cumulative_mode != INACTIVE) {
9368 ret = msm_bus_scale_client_update_request(
9369 qseecom.qsee_perf_client, mode);
9370 if (ret)
9371 pr_err("Fail to scale up bus to %d\n", mode);
9372 else
9373 qseecom.current_mode = mode;
9374 }
9375
9376 if (qclk->clk_access_cnt) {
9377 if (qclk->ce_core_clk != NULL) {
9378 ret = clk_prepare_enable(qclk->ce_core_clk);
9379 if (ret) {
9380 pr_err("Unable to enable/prep CE core clk\n");
9381 qclk->clk_access_cnt = 0;
9382 goto err;
9383 }
9384 }
9385 if (qclk->ce_clk != NULL) {
9386 ret = clk_prepare_enable(qclk->ce_clk);
9387 if (ret) {
9388 pr_err("Unable to enable/prep CE iface clk\n");
9389 qclk->clk_access_cnt = 0;
9390 goto ce_clk_err;
9391 }
9392 }
9393 if (qclk->ce_bus_clk != NULL) {
9394 ret = clk_prepare_enable(qclk->ce_bus_clk);
9395 if (ret) {
9396 pr_err("Unable to enable/prep CE bus clk\n");
9397 qclk->clk_access_cnt = 0;
9398 goto ce_bus_clk_err;
9399 }
9400 }
9401 }
9402
9403 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9404 qseecom.bw_scale_down_timer.expires = jiffies +
9405 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9406 mod_timer(&(qseecom.bw_scale_down_timer),
9407 qseecom.bw_scale_down_timer.expires);
9408 qseecom.timer_running = true;
9409 }
9410
9411 mutex_unlock(&clk_access_lock);
9412 mutex_unlock(&qsee_bw_mutex);
9413 goto exit;
9414
9415ce_bus_clk_err:
9416 if (qclk->ce_clk)
9417 clk_disable_unprepare(qclk->ce_clk);
9418ce_clk_err:
9419 if (qclk->ce_core_clk)
9420 clk_disable_unprepare(qclk->ce_core_clk);
9421err:
9422 mutex_unlock(&clk_access_lock);
9423 mutex_unlock(&qsee_bw_mutex);
9424 ret = -EIO;
9425exit:
9426 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9427 return ret;
9428}
9429
9430static const struct of_device_id qseecom_match[] = {
9431 {
9432 .compatible = "qcom,qseecom",
9433 },
9434 {}
9435};
9436
9437static struct platform_driver qseecom_plat_driver = {
9438 .probe = qseecom_probe,
9439 .remove = qseecom_remove,
9440 .suspend = qseecom_suspend,
9441 .resume = qseecom_resume,
9442 .driver = {
9443 .name = "qseecom",
9444 .owner = THIS_MODULE,
9445 .of_match_table = qseecom_match,
9446 },
9447};
9448
9449static int qseecom_init(void)
9450{
9451 return platform_driver_register(&qseecom_plat_driver);
9452}
9453
9454static void qseecom_exit(void)
9455{
9456 platform_driver_unregister(&qseecom_plat_driver);
9457}
9458
9459MODULE_LICENSE("GPL v2");
9460MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9461
9462module_init(qseecom_init);
9463module_exit(qseecom_exit);