blob: 14a355401c602cc2e4d6c87cbbafa289babd5721 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700334 bool from_smcinvoke;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700335};
336
337struct qseecom_listener_handle {
338 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800339 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800340 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700341};
342
343static struct qseecom_control qseecom;
344
345struct qseecom_dev_handle {
346 enum qseecom_client_handle_type type;
347 union {
348 struct qseecom_client_handle client;
349 struct qseecom_listener_handle listener;
350 };
351 bool released;
352 int abort;
353 wait_queue_head_t abort_wq;
354 atomic_t ioctl_count;
355 bool perf_enabled;
356 bool fast_load_enabled;
357 enum qseecom_bandwidth_request_mode mode;
358 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
359 uint32_t sglist_cnt;
360 bool use_legacy_cmd;
361};
362
363struct qseecom_key_id_usage_desc {
364 uint8_t desc[QSEECOM_KEY_ID_SIZE];
365};
366
367struct qseecom_crypto_info {
368 unsigned int unit_num;
369 unsigned int ce;
370 unsigned int pipe_pair;
371};
372
373static struct qseecom_key_id_usage_desc key_id_array[] = {
374 {
375 .desc = "Undefined Usage Index",
376 },
377
378 {
379 .desc = "Full Disk Encryption",
380 },
381
382 {
383 .desc = "Per File Encryption",
384 },
385
386 {
387 .desc = "UFS ICE Full Disk Encryption",
388 },
389
390 {
391 .desc = "SDCC ICE Full Disk Encryption",
392 },
393};
394
395/* Function proto types */
396static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
397static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
398static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
399static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
400static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
401static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
402 char *cmnlib_name);
403static int qseecom_enable_ice_setup(int usage);
404static int qseecom_disable_ice_setup(int usage);
405static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
406static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
407 void __user *argp);
408static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
409 void __user *argp);
410static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
411 void __user *argp);
412
413static int get_qseecom_keymaster_status(char *str)
414{
415 get_option(&str, &qseecom.is_apps_region_protected);
416 return 1;
417}
418__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
419
Zhen Kong03f220d2019-02-01 17:12:34 -0800420
421#define QSEECOM_SCM_EBUSY_WAIT_MS 30
422#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
423
424static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
425{
426 int ret = 0;
427 int retry_count = 0;
428
429 do {
430 ret = scm_call2_noretry(smc_id, desc);
431 if (ret == -EBUSY) {
432 mutex_unlock(&app_access_lock);
433 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
434 mutex_lock(&app_access_lock);
435 }
436 if (retry_count == 33)
437 pr_warn("secure world has been busy for 1 second!\n");
438 } while (ret == -EBUSY &&
439 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
440 return ret;
441}
442
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700443static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
444 const void *req_buf, void *resp_buf)
445{
446 int ret = 0;
447 uint32_t smc_id = 0;
448 uint32_t qseos_cmd_id = 0;
449 struct scm_desc desc = {0};
450 struct qseecom_command_scm_resp *scm_resp = NULL;
451
452 if (!req_buf || !resp_buf) {
453 pr_err("Invalid buffer pointer\n");
454 return -EINVAL;
455 }
456 qseos_cmd_id = *(uint32_t *)req_buf;
457 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
458
459 switch (svc_id) {
460 case 6: {
461 if (tz_cmd_id == 3) {
462 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
463 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
464 desc.args[0] = *(uint32_t *)req_buf;
465 } else {
466 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
467 svc_id, tz_cmd_id);
468 return -EINVAL;
469 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800470 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700471 break;
472 }
473 case SCM_SVC_ES: {
474 switch (tz_cmd_id) {
475 case SCM_SAVE_PARTITION_HASH_ID: {
476 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
477 struct qseecom_save_partition_hash_req *p_hash_req =
478 (struct qseecom_save_partition_hash_req *)
479 req_buf;
480 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
481
482 if (!tzbuf)
483 return -ENOMEM;
484 memset(tzbuf, 0, tzbuflen);
485 memcpy(tzbuf, p_hash_req->digest,
486 SHA256_DIGEST_LENGTH);
487 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
488 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
489 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
490 desc.args[0] = p_hash_req->partition_id;
491 desc.args[1] = virt_to_phys(tzbuf);
492 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800493 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700494 kzfree(tzbuf);
495 break;
496 }
497 default: {
498 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
499 tz_cmd_id);
500 ret = -EINVAL;
501 break;
502 }
503 } /* end of switch (tz_cmd_id) */
504 break;
505 } /* end of case SCM_SVC_ES */
506 case SCM_SVC_TZSCHEDULER: {
507 switch (qseos_cmd_id) {
508 case QSEOS_APP_START_COMMAND: {
509 struct qseecom_load_app_ireq *req;
510 struct qseecom_load_app_64bit_ireq *req_64bit;
511
512 smc_id = TZ_OS_APP_START_ID;
513 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
514 if (qseecom.qsee_version < QSEE_VERSION_40) {
515 req = (struct qseecom_load_app_ireq *)req_buf;
516 desc.args[0] = req->mdt_len;
517 desc.args[1] = req->img_len;
518 desc.args[2] = req->phy_addr;
519 } else {
520 req_64bit =
521 (struct qseecom_load_app_64bit_ireq *)
522 req_buf;
523 desc.args[0] = req_64bit->mdt_len;
524 desc.args[1] = req_64bit->img_len;
525 desc.args[2] = req_64bit->phy_addr;
526 }
527 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800528 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700529 break;
530 }
531 case QSEOS_APP_SHUTDOWN_COMMAND: {
532 struct qseecom_unload_app_ireq *req;
533
534 req = (struct qseecom_unload_app_ireq *)req_buf;
535 smc_id = TZ_OS_APP_SHUTDOWN_ID;
536 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
537 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700538 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700539 break;
540 }
541 case QSEOS_APP_LOOKUP_COMMAND: {
542 struct qseecom_check_app_ireq *req;
543 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
544 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
545
546 if (!tzbuf)
547 return -ENOMEM;
548 req = (struct qseecom_check_app_ireq *)req_buf;
549 pr_debug("Lookup app_name = %s\n", req->app_name);
550 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
551 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
552 smc_id = TZ_OS_APP_LOOKUP_ID;
553 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
554 desc.args[0] = virt_to_phys(tzbuf);
555 desc.args[1] = strlen(req->app_name);
556 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800557 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700558 kzfree(tzbuf);
559 break;
560 }
561 case QSEOS_APP_REGION_NOTIFICATION: {
562 struct qsee_apps_region_info_ireq *req;
563 struct qsee_apps_region_info_64bit_ireq *req_64bit;
564
565 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
566 desc.arginfo =
567 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
568 if (qseecom.qsee_version < QSEE_VERSION_40) {
569 req = (struct qsee_apps_region_info_ireq *)
570 req_buf;
571 desc.args[0] = req->addr;
572 desc.args[1] = req->size;
573 } else {
574 req_64bit =
575 (struct qsee_apps_region_info_64bit_ireq *)
576 req_buf;
577 desc.args[0] = req_64bit->addr;
578 desc.args[1] = req_64bit->size;
579 }
580 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800581 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700582 break;
583 }
584 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
585 struct qseecom_load_lib_image_ireq *req;
586 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
587
588 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
589 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
590 if (qseecom.qsee_version < QSEE_VERSION_40) {
591 req = (struct qseecom_load_lib_image_ireq *)
592 req_buf;
593 desc.args[0] = req->mdt_len;
594 desc.args[1] = req->img_len;
595 desc.args[2] = req->phy_addr;
596 } else {
597 req_64bit =
598 (struct qseecom_load_lib_image_64bit_ireq *)
599 req_buf;
600 desc.args[0] = req_64bit->mdt_len;
601 desc.args[1] = req_64bit->img_len;
602 desc.args[2] = req_64bit->phy_addr;
603 }
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800605 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700606 break;
607 }
608 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
609 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
610 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
611 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800612 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700613 break;
614 }
615 case QSEOS_REGISTER_LISTENER: {
616 struct qseecom_register_listener_ireq *req;
617 struct qseecom_register_listener_64bit_ireq *req_64bit;
618
619 desc.arginfo =
620 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
621 if (qseecom.qsee_version < QSEE_VERSION_40) {
622 req = (struct qseecom_register_listener_ireq *)
623 req_buf;
624 desc.args[0] = req->listener_id;
625 desc.args[1] = req->sb_ptr;
626 desc.args[2] = req->sb_len;
627 } else {
628 req_64bit =
629 (struct qseecom_register_listener_64bit_ireq *)
630 req_buf;
631 desc.args[0] = req_64bit->listener_id;
632 desc.args[1] = req_64bit->sb_ptr;
633 desc.args[2] = req_64bit->sb_len;
634 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700635 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700636 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800637 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800638 if (ret == -EIO) {
639 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700640 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700641 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800642 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700643 }
644 break;
645 }
646 case QSEOS_DEREGISTER_LISTENER: {
647 struct qseecom_unregister_listener_ireq *req;
648
649 req = (struct qseecom_unregister_listener_ireq *)
650 req_buf;
651 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
652 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
653 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800654 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700655 break;
656 }
657 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
658 struct qseecom_client_listener_data_irsp *req;
659
660 req = (struct qseecom_client_listener_data_irsp *)
661 req_buf;
662 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
663 desc.arginfo =
664 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
665 desc.args[0] = req->listener_id;
666 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800667 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700668 break;
669 }
670 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
671 struct qseecom_client_listener_data_irsp *req;
672 struct qseecom_client_listener_data_64bit_irsp *req_64;
673
674 smc_id =
675 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
676 desc.arginfo =
677 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
678 if (qseecom.qsee_version < QSEE_VERSION_40) {
679 req =
680 (struct qseecom_client_listener_data_irsp *)
681 req_buf;
682 desc.args[0] = req->listener_id;
683 desc.args[1] = req->status;
684 desc.args[2] = req->sglistinfo_ptr;
685 desc.args[3] = req->sglistinfo_len;
686 } else {
687 req_64 =
688 (struct qseecom_client_listener_data_64bit_irsp *)
689 req_buf;
690 desc.args[0] = req_64->listener_id;
691 desc.args[1] = req_64->status;
692 desc.args[2] = req_64->sglistinfo_ptr;
693 desc.args[3] = req_64->sglistinfo_len;
694 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800695 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700696 break;
697 }
698 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
699 struct qseecom_load_app_ireq *req;
700 struct qseecom_load_app_64bit_ireq *req_64bit;
701
702 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
703 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
704 if (qseecom.qsee_version < QSEE_VERSION_40) {
705 req = (struct qseecom_load_app_ireq *)req_buf;
706 desc.args[0] = req->mdt_len;
707 desc.args[1] = req->img_len;
708 desc.args[2] = req->phy_addr;
709 } else {
710 req_64bit =
711 (struct qseecom_load_app_64bit_ireq *)req_buf;
712 desc.args[0] = req_64bit->mdt_len;
713 desc.args[1] = req_64bit->img_len;
714 desc.args[2] = req_64bit->phy_addr;
715 }
716 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800717 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700718 break;
719 }
720 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
721 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
722 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
723 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800724 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700725 break;
726 }
727
728 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
729 struct qseecom_client_send_data_ireq *req;
730 struct qseecom_client_send_data_64bit_ireq *req_64bit;
731
732 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
733 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
734 if (qseecom.qsee_version < QSEE_VERSION_40) {
735 req = (struct qseecom_client_send_data_ireq *)
736 req_buf;
737 desc.args[0] = req->app_id;
738 desc.args[1] = req->req_ptr;
739 desc.args[2] = req->req_len;
740 desc.args[3] = req->rsp_ptr;
741 desc.args[4] = req->rsp_len;
742 } else {
743 req_64bit =
744 (struct qseecom_client_send_data_64bit_ireq *)
745 req_buf;
746 desc.args[0] = req_64bit->app_id;
747 desc.args[1] = req_64bit->req_ptr;
748 desc.args[2] = req_64bit->req_len;
749 desc.args[3] = req_64bit->rsp_ptr;
750 desc.args[4] = req_64bit->rsp_len;
751 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800752 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700753 break;
754 }
755 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
756 struct qseecom_client_send_data_ireq *req;
757 struct qseecom_client_send_data_64bit_ireq *req_64bit;
758
759 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
760 desc.arginfo =
761 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
762 if (qseecom.qsee_version < QSEE_VERSION_40) {
763 req = (struct qseecom_client_send_data_ireq *)
764 req_buf;
765 desc.args[0] = req->app_id;
766 desc.args[1] = req->req_ptr;
767 desc.args[2] = req->req_len;
768 desc.args[3] = req->rsp_ptr;
769 desc.args[4] = req->rsp_len;
770 desc.args[5] = req->sglistinfo_ptr;
771 desc.args[6] = req->sglistinfo_len;
772 } else {
773 req_64bit =
774 (struct qseecom_client_send_data_64bit_ireq *)
775 req_buf;
776 desc.args[0] = req_64bit->app_id;
777 desc.args[1] = req_64bit->req_ptr;
778 desc.args[2] = req_64bit->req_len;
779 desc.args[3] = req_64bit->rsp_ptr;
780 desc.args[4] = req_64bit->rsp_len;
781 desc.args[5] = req_64bit->sglistinfo_ptr;
782 desc.args[6] = req_64bit->sglistinfo_len;
783 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800784 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700785 break;
786 }
787 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
788 struct qseecom_client_send_service_ireq *req;
789
790 req = (struct qseecom_client_send_service_ireq *)
791 req_buf;
792 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
793 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
794 desc.args[0] = req->key_type;
795 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800796 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700797 break;
798 }
799 case QSEOS_RPMB_ERASE_COMMAND: {
800 smc_id = TZ_OS_RPMB_ERASE_ID;
801 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
802 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800803 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700804 break;
805 }
806 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
807 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
808 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
809 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800810 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700811 break;
812 }
813 case QSEOS_GENERATE_KEY: {
814 u32 tzbuflen = PAGE_ALIGN(sizeof
815 (struct qseecom_key_generate_ireq) -
816 sizeof(uint32_t));
817 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
818
819 if (!tzbuf)
820 return -ENOMEM;
821 memset(tzbuf, 0, tzbuflen);
822 memcpy(tzbuf, req_buf + sizeof(uint32_t),
823 (sizeof(struct qseecom_key_generate_ireq) -
824 sizeof(uint32_t)));
825 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
826 smc_id = TZ_OS_KS_GEN_KEY_ID;
827 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
828 desc.args[0] = virt_to_phys(tzbuf);
829 desc.args[1] = tzbuflen;
830 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800831 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700832 kzfree(tzbuf);
833 break;
834 }
835 case QSEOS_DELETE_KEY: {
836 u32 tzbuflen = PAGE_ALIGN(sizeof
837 (struct qseecom_key_delete_ireq) -
838 sizeof(uint32_t));
839 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
840
841 if (!tzbuf)
842 return -ENOMEM;
843 memset(tzbuf, 0, tzbuflen);
844 memcpy(tzbuf, req_buf + sizeof(uint32_t),
845 (sizeof(struct qseecom_key_delete_ireq) -
846 sizeof(uint32_t)));
847 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
848 smc_id = TZ_OS_KS_DEL_KEY_ID;
849 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
850 desc.args[0] = virt_to_phys(tzbuf);
851 desc.args[1] = tzbuflen;
852 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800853 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700854 kzfree(tzbuf);
855 break;
856 }
857 case QSEOS_SET_KEY: {
858 u32 tzbuflen = PAGE_ALIGN(sizeof
859 (struct qseecom_key_select_ireq) -
860 sizeof(uint32_t));
861 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
862
863 if (!tzbuf)
864 return -ENOMEM;
865 memset(tzbuf, 0, tzbuflen);
866 memcpy(tzbuf, req_buf + sizeof(uint32_t),
867 (sizeof(struct qseecom_key_select_ireq) -
868 sizeof(uint32_t)));
869 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
870 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
871 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
872 desc.args[0] = virt_to_phys(tzbuf);
873 desc.args[1] = tzbuflen;
874 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800875 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700876 kzfree(tzbuf);
877 break;
878 }
879 case QSEOS_UPDATE_KEY_USERINFO: {
880 u32 tzbuflen = PAGE_ALIGN(sizeof
881 (struct qseecom_key_userinfo_update_ireq) -
882 sizeof(uint32_t));
883 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
884
885 if (!tzbuf)
886 return -ENOMEM;
887 memset(tzbuf, 0, tzbuflen);
888 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
889 (struct qseecom_key_userinfo_update_ireq) -
890 sizeof(uint32_t)));
891 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
892 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
893 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
894 desc.args[0] = virt_to_phys(tzbuf);
895 desc.args[1] = tzbuflen;
896 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800897 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700898 kzfree(tzbuf);
899 break;
900 }
901 case QSEOS_TEE_OPEN_SESSION: {
902 struct qseecom_qteec_ireq *req;
903 struct qseecom_qteec_64bit_ireq *req_64bit;
904
905 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
906 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
907 if (qseecom.qsee_version < QSEE_VERSION_40) {
908 req = (struct qseecom_qteec_ireq *)req_buf;
909 desc.args[0] = req->app_id;
910 desc.args[1] = req->req_ptr;
911 desc.args[2] = req->req_len;
912 desc.args[3] = req->resp_ptr;
913 desc.args[4] = req->resp_len;
914 } else {
915 req_64bit = (struct qseecom_qteec_64bit_ireq *)
916 req_buf;
917 desc.args[0] = req_64bit->app_id;
918 desc.args[1] = req_64bit->req_ptr;
919 desc.args[2] = req_64bit->req_len;
920 desc.args[3] = req_64bit->resp_ptr;
921 desc.args[4] = req_64bit->resp_len;
922 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800923 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700924 break;
925 }
926 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
927 struct qseecom_qteec_ireq *req;
928 struct qseecom_qteec_64bit_ireq *req_64bit;
929
930 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
931 desc.arginfo =
932 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
933 if (qseecom.qsee_version < QSEE_VERSION_40) {
934 req = (struct qseecom_qteec_ireq *)req_buf;
935 desc.args[0] = req->app_id;
936 desc.args[1] = req->req_ptr;
937 desc.args[2] = req->req_len;
938 desc.args[3] = req->resp_ptr;
939 desc.args[4] = req->resp_len;
940 desc.args[5] = req->sglistinfo_ptr;
941 desc.args[6] = req->sglistinfo_len;
942 } else {
943 req_64bit = (struct qseecom_qteec_64bit_ireq *)
944 req_buf;
945 desc.args[0] = req_64bit->app_id;
946 desc.args[1] = req_64bit->req_ptr;
947 desc.args[2] = req_64bit->req_len;
948 desc.args[3] = req_64bit->resp_ptr;
949 desc.args[4] = req_64bit->resp_len;
950 desc.args[5] = req_64bit->sglistinfo_ptr;
951 desc.args[6] = req_64bit->sglistinfo_len;
952 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800953 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700954 break;
955 }
956 case QSEOS_TEE_INVOKE_COMMAND: {
957 struct qseecom_qteec_ireq *req;
958 struct qseecom_qteec_64bit_ireq *req_64bit;
959
960 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
961 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
962 if (qseecom.qsee_version < QSEE_VERSION_40) {
963 req = (struct qseecom_qteec_ireq *)req_buf;
964 desc.args[0] = req->app_id;
965 desc.args[1] = req->req_ptr;
966 desc.args[2] = req->req_len;
967 desc.args[3] = req->resp_ptr;
968 desc.args[4] = req->resp_len;
969 } else {
970 req_64bit = (struct qseecom_qteec_64bit_ireq *)
971 req_buf;
972 desc.args[0] = req_64bit->app_id;
973 desc.args[1] = req_64bit->req_ptr;
974 desc.args[2] = req_64bit->req_len;
975 desc.args[3] = req_64bit->resp_ptr;
976 desc.args[4] = req_64bit->resp_len;
977 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800978 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700979 break;
980 }
981 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
982 struct qseecom_qteec_ireq *req;
983 struct qseecom_qteec_64bit_ireq *req_64bit;
984
985 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
986 desc.arginfo =
987 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
988 if (qseecom.qsee_version < QSEE_VERSION_40) {
989 req = (struct qseecom_qteec_ireq *)req_buf;
990 desc.args[0] = req->app_id;
991 desc.args[1] = req->req_ptr;
992 desc.args[2] = req->req_len;
993 desc.args[3] = req->resp_ptr;
994 desc.args[4] = req->resp_len;
995 desc.args[5] = req->sglistinfo_ptr;
996 desc.args[6] = req->sglistinfo_len;
997 } else {
998 req_64bit = (struct qseecom_qteec_64bit_ireq *)
999 req_buf;
1000 desc.args[0] = req_64bit->app_id;
1001 desc.args[1] = req_64bit->req_ptr;
1002 desc.args[2] = req_64bit->req_len;
1003 desc.args[3] = req_64bit->resp_ptr;
1004 desc.args[4] = req_64bit->resp_len;
1005 desc.args[5] = req_64bit->sglistinfo_ptr;
1006 desc.args[6] = req_64bit->sglistinfo_len;
1007 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001008 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001009 break;
1010 }
1011 case QSEOS_TEE_CLOSE_SESSION: {
1012 struct qseecom_qteec_ireq *req;
1013 struct qseecom_qteec_64bit_ireq *req_64bit;
1014
1015 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1016 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1017 if (qseecom.qsee_version < QSEE_VERSION_40) {
1018 req = (struct qseecom_qteec_ireq *)req_buf;
1019 desc.args[0] = req->app_id;
1020 desc.args[1] = req->req_ptr;
1021 desc.args[2] = req->req_len;
1022 desc.args[3] = req->resp_ptr;
1023 desc.args[4] = req->resp_len;
1024 } else {
1025 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1026 req_buf;
1027 desc.args[0] = req_64bit->app_id;
1028 desc.args[1] = req_64bit->req_ptr;
1029 desc.args[2] = req_64bit->req_len;
1030 desc.args[3] = req_64bit->resp_ptr;
1031 desc.args[4] = req_64bit->resp_len;
1032 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001033 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001034 break;
1035 }
1036 case QSEOS_TEE_REQUEST_CANCELLATION: {
1037 struct qseecom_qteec_ireq *req;
1038 struct qseecom_qteec_64bit_ireq *req_64bit;
1039
1040 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1041 desc.arginfo =
1042 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1043 if (qseecom.qsee_version < QSEE_VERSION_40) {
1044 req = (struct qseecom_qteec_ireq *)req_buf;
1045 desc.args[0] = req->app_id;
1046 desc.args[1] = req->req_ptr;
1047 desc.args[2] = req->req_len;
1048 desc.args[3] = req->resp_ptr;
1049 desc.args[4] = req->resp_len;
1050 } else {
1051 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1052 req_buf;
1053 desc.args[0] = req_64bit->app_id;
1054 desc.args[1] = req_64bit->req_ptr;
1055 desc.args[2] = req_64bit->req_len;
1056 desc.args[3] = req_64bit->resp_ptr;
1057 desc.args[4] = req_64bit->resp_len;
1058 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001059 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001060 break;
1061 }
1062 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1063 struct qseecom_continue_blocked_request_ireq *req =
1064 (struct qseecom_continue_blocked_request_ireq *)
1065 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001066 if (qseecom.smcinvoke_support)
1067 smc_id =
1068 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1069 else
1070 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001071 desc.arginfo =
1072 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001073 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001074 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001075 break;
1076 }
1077 default: {
1078 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1079 qseos_cmd_id);
1080 ret = -EINVAL;
1081 break;
1082 }
1083 } /*end of switch (qsee_cmd_id) */
1084 break;
1085 } /*end of case SCM_SVC_TZSCHEDULER*/
1086 default: {
1087 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1088 svc_id);
1089 ret = -EINVAL;
1090 break;
1091 }
1092 } /*end of switch svc_id */
1093 scm_resp->result = desc.ret[0];
1094 scm_resp->resp_type = desc.ret[1];
1095 scm_resp->data = desc.ret[2];
1096 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1097 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1098 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1099 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1100 return ret;
1101}
1102
1103
1104static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1105 size_t cmd_len, void *resp_buf, size_t resp_len)
1106{
1107 if (!is_scm_armv8())
1108 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1109 resp_buf, resp_len);
1110 else
1111 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1112}
1113
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001114static struct qseecom_registered_listener_list *__qseecom_find_svc(
1115 int32_t listener_id)
1116{
1117 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001119 list_for_each_entry(entry,
1120 &qseecom.registered_listener_list_head, list) {
1121 if (entry->svc.listener_id == listener_id)
1122 break;
1123 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001124 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001125 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001126 return NULL;
1127 }
1128
1129 return entry;
1130}
1131
1132static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1133 struct qseecom_dev_handle *handle,
1134 struct qseecom_register_listener_req *listener)
1135{
1136 int ret = 0;
1137 struct qseecom_register_listener_ireq req;
1138 struct qseecom_register_listener_64bit_ireq req_64bit;
1139 struct qseecom_command_scm_resp resp;
1140 ion_phys_addr_t pa;
1141 void *cmd_buf = NULL;
1142 size_t cmd_len;
1143
1144 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001145 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001146 listener->ifd_data_fd);
1147 if (IS_ERR_OR_NULL(svc->ihandle)) {
1148 pr_err("Ion client could not retrieve the handle\n");
1149 return -ENOMEM;
1150 }
1151
1152 /* Get the physical address of the ION BUF */
1153 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1154 if (ret) {
1155 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1156 ret);
1157 return ret;
1158 }
1159 /* Populate the structure for sending scm call to load image */
1160 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1161 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1162 pr_err("ION memory mapping for listener shared buffer failed\n");
1163 return -ENOMEM;
1164 }
1165 svc->sb_phys = (phys_addr_t)pa;
1166
1167 if (qseecom.qsee_version < QSEE_VERSION_40) {
1168 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1169 req.listener_id = svc->svc.listener_id;
1170 req.sb_len = svc->sb_length;
1171 req.sb_ptr = (uint32_t)svc->sb_phys;
1172 cmd_buf = (void *)&req;
1173 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1174 } else {
1175 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1176 req_64bit.listener_id = svc->svc.listener_id;
1177 req_64bit.sb_len = svc->sb_length;
1178 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1179 cmd_buf = (void *)&req_64bit;
1180 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1181 }
1182
1183 resp.result = QSEOS_RESULT_INCOMPLETE;
1184
Zhen Kongc4c162a2019-01-23 12:07:12 -08001185 mutex_unlock(&listener_access_lock);
1186 mutex_lock(&app_access_lock);
1187 __qseecom_reentrancy_check_if_no_app_blocked(
1188 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001189 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1190 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001191 mutex_unlock(&app_access_lock);
1192 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001193 if (ret) {
1194 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1195 return -EINVAL;
1196 }
1197
1198 if (resp.result != QSEOS_RESULT_SUCCESS) {
1199 pr_err("Error SB registration req: resp.result = %d\n",
1200 resp.result);
1201 return -EPERM;
1202 }
1203 return 0;
1204}
1205
1206static int qseecom_register_listener(struct qseecom_dev_handle *data,
1207 void __user *argp)
1208{
1209 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001210 struct qseecom_register_listener_req rcvd_lstnr;
1211 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001212 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001213
1214 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1215 if (ret) {
1216 pr_err("copy_from_user failed\n");
1217 return ret;
1218 }
1219 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1220 rcvd_lstnr.sb_size))
1221 return -EFAULT;
1222
Zhen Kong3c674612018-09-06 22:51:27 -07001223 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001224
1225 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1226 if (ptr_svc) {
1227 if (ptr_svc->unregister_pending == false) {
1228 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001229 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001230 data->released = true;
1231 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001232 } else {
1233 /*wait until listener is unregistered*/
1234 pr_debug("register %d has to wait\n",
1235 rcvd_lstnr.listener_id);
1236 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301237 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001238 qseecom.register_lsnr_pending_wq,
1239 list_empty(
1240 &qseecom.unregister_lsnr_pending_list_head));
1241 if (ret) {
1242 pr_err("interrupted register_pending_wq %d\n",
1243 rcvd_lstnr.listener_id);
1244 mutex_lock(&listener_access_lock);
1245 return -ERESTARTSYS;
1246 }
1247 mutex_lock(&listener_access_lock);
1248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001250 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1251 if (!new_entry)
1252 return -ENOMEM;
1253 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001254 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001255
1256 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1257 new_entry->sb_length = rcvd_lstnr.sb_size;
1258 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1259 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001260 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1261 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001262 kzfree(new_entry);
1263 return -ENOMEM;
1264 }
1265
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001266 init_waitqueue_head(&new_entry->rcv_req_wq);
1267 init_waitqueue_head(&new_entry->listener_block_app_wq);
1268 new_entry->send_resp_flag = 0;
1269 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271
Zhen Kong52ce9062018-09-24 14:33:27 -07001272 pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001273 return ret;
1274}
1275
Zhen Kongbcdeda22018-11-16 13:50:51 -08001276static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1277 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278{
1279 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001281 struct qseecom_command_scm_resp resp;
1282 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1283
1284 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1285 req.listener_id = data->listener.id;
1286 resp.result = QSEOS_RESULT_INCOMPLETE;
1287
Zhen Kongc4c162a2019-01-23 12:07:12 -08001288 mutex_unlock(&listener_access_lock);
1289 mutex_lock(&app_access_lock);
1290 __qseecom_reentrancy_check_if_no_app_blocked(
1291 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001292 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1293 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001294 mutex_unlock(&app_access_lock);
1295 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001296 if (ret) {
1297 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1298 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001299 if (ret == -EBUSY)
1300 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001301 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001302 }
1303
1304 if (resp.result != QSEOS_RESULT_SUCCESS) {
1305 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1306 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001307 ret = -EPERM;
1308 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001309 }
1310
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001311 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301312 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001313 atomic_read(&data->ioctl_count) <= 1)) {
1314 pr_err("Interrupted from abort\n");
1315 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001316 }
1317 }
1318
Zhen Kong3c674612018-09-06 22:51:27 -07001319exit:
1320 if (ptr_svc->sb_virt) {
1321 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001322 if (!IS_ERR_OR_NULL(ihandle)) {
1323 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1324 ion_free(qseecom.ion_clnt, ihandle);
1325 }
1326 }
Zhen Kong3c674612018-09-06 22:51:27 -07001327 list_del(&ptr_svc->list);
1328 kzfree(ptr_svc);
1329
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001330 data->released = true;
Zhen Kong52ce9062018-09-24 14:33:27 -07001331 pr_debug("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001332 return ret;
1333}
1334
Zhen Kongbcdeda22018-11-16 13:50:51 -08001335static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1336{
1337 struct qseecom_registered_listener_list *ptr_svc = NULL;
1338 struct qseecom_unregister_pending_list *entry = NULL;
1339
1340 ptr_svc = __qseecom_find_svc(data->listener.id);
1341 if (!ptr_svc) {
1342 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1343 return -ENODATA;
1344 }
1345 /* stop CA thread waiting for listener response */
1346 ptr_svc->abort = 1;
1347 wake_up_interruptible_all(&qseecom.send_resp_wq);
1348
Zhen Kongc4c162a2019-01-23 12:07:12 -08001349 /* stop listener thread waiting for listener request */
1350 data->abort = 1;
1351 wake_up_all(&ptr_svc->rcv_req_wq);
1352
Zhen Kongbcdeda22018-11-16 13:50:51 -08001353 /* return directly if pending*/
1354 if (ptr_svc->unregister_pending)
1355 return 0;
1356
1357 /*add unregistration into pending list*/
1358 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1359 if (!entry)
1360 return -ENOMEM;
1361 entry->data = data;
1362 list_add_tail(&entry->list,
1363 &qseecom.unregister_lsnr_pending_list_head);
1364 ptr_svc->unregister_pending = true;
1365 pr_debug("unregister %d pending\n", data->listener.id);
1366 return 0;
1367}
1368
1369static void __qseecom_processing_pending_lsnr_unregister(void)
1370{
1371 struct qseecom_unregister_pending_list *entry = NULL;
1372 struct qseecom_registered_listener_list *ptr_svc = NULL;
1373 struct list_head *pos;
1374 int ret = 0;
1375
1376 mutex_lock(&listener_access_lock);
1377 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1378 pos = qseecom.unregister_lsnr_pending_list_head.next;
1379 entry = list_entry(pos,
1380 struct qseecom_unregister_pending_list, list);
1381 if (entry && entry->data) {
1382 pr_debug("process pending unregister %d\n",
1383 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001384 /* don't process if qseecom_release is not called*/
1385 if (!entry->data->listener.release_called)
1386 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001387 ptr_svc = __qseecom_find_svc(
1388 entry->data->listener.id);
1389 if (ptr_svc) {
1390 ret = __qseecom_unregister_listener(
1391 entry->data, ptr_svc);
1392 if (ret == -EBUSY) {
1393 pr_debug("unregister %d pending again\n",
1394 entry->data->listener.id);
1395 mutex_unlock(&listener_access_lock);
1396 return;
1397 }
1398 } else
1399 pr_err("invalid listener %d\n",
1400 entry->data->listener.id);
1401 kzfree(entry->data);
1402 }
1403 list_del(pos);
1404 kzfree(entry);
1405 }
1406 mutex_unlock(&listener_access_lock);
1407 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1408}
1409
Zhen Kongc4c162a2019-01-23 12:07:12 -08001410static void __wakeup_unregister_listener_kthread(void)
1411{
1412 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1413 LSNR_UNREG_KT_WAKEUP);
1414 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1415}
1416
1417static int __qseecom_unregister_listener_kthread_func(void *data)
1418{
1419 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301420 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001421 qseecom.unregister_lsnr_kthread_wq,
1422 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1423 == LSNR_UNREG_KT_WAKEUP);
1424 pr_debug("kthread to unregister listener is called %d\n",
1425 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1426 __qseecom_processing_pending_lsnr_unregister();
1427 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1428 LSNR_UNREG_KT_SLEEP);
1429 }
1430 pr_warn("kthread to unregister listener stopped\n");
1431 return 0;
1432}
1433
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001434static int __qseecom_set_msm_bus_request(uint32_t mode)
1435{
1436 int ret = 0;
1437 struct qseecom_clk *qclk;
1438
1439 qclk = &qseecom.qsee;
1440 if (qclk->ce_core_src_clk != NULL) {
1441 if (mode == INACTIVE) {
1442 __qseecom_disable_clk(CLK_QSEE);
1443 } else {
1444 ret = __qseecom_enable_clk(CLK_QSEE);
1445 if (ret)
1446 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1447 ret, mode);
1448 }
1449 }
1450
1451 if ((!ret) && (qseecom.current_mode != mode)) {
1452 ret = msm_bus_scale_client_update_request(
1453 qseecom.qsee_perf_client, mode);
1454 if (ret) {
1455 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1456 ret, mode);
1457 if (qclk->ce_core_src_clk != NULL) {
1458 if (mode == INACTIVE) {
1459 ret = __qseecom_enable_clk(CLK_QSEE);
1460 if (ret)
1461 pr_err("CLK enable failed\n");
1462 } else
1463 __qseecom_disable_clk(CLK_QSEE);
1464 }
1465 }
1466 qseecom.current_mode = mode;
1467 }
1468 return ret;
1469}
1470
1471static void qseecom_bw_inactive_req_work(struct work_struct *work)
1472{
1473 mutex_lock(&app_access_lock);
1474 mutex_lock(&qsee_bw_mutex);
1475 if (qseecom.timer_running)
1476 __qseecom_set_msm_bus_request(INACTIVE);
1477 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1478 qseecom.current_mode, qseecom.cumulative_mode);
1479 qseecom.timer_running = false;
1480 mutex_unlock(&qsee_bw_mutex);
1481 mutex_unlock(&app_access_lock);
1482}
1483
1484static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1485{
1486 schedule_work(&qseecom.bw_inactive_req_ws);
1487}
1488
1489static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1490{
1491 struct qseecom_clk *qclk;
1492 int ret = 0;
1493
1494 mutex_lock(&clk_access_lock);
1495 if (ce == CLK_QSEE)
1496 qclk = &qseecom.qsee;
1497 else
1498 qclk = &qseecom.ce_drv;
1499
Zhen Kongf99808af2019-07-09 13:28:24 -07001500 if (qclk->clk_access_cnt > 0) {
1501 qclk->clk_access_cnt--;
1502 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001503 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1504 ret = -EINVAL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001505 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001506
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001507 mutex_unlock(&clk_access_lock);
1508 return ret;
1509}
1510
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001511static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1512{
1513 int32_t ret = 0;
1514 int32_t request_mode = INACTIVE;
1515
1516 mutex_lock(&qsee_bw_mutex);
1517 if (mode == 0) {
1518 if (qseecom.cumulative_mode > MEDIUM)
1519 request_mode = HIGH;
1520 else
1521 request_mode = qseecom.cumulative_mode;
1522 } else {
1523 request_mode = mode;
1524 }
1525
1526 ret = __qseecom_set_msm_bus_request(request_mode);
1527 if (ret) {
1528 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1529 ret, request_mode);
1530 goto err_scale_timer;
1531 }
1532
1533 if (qseecom.timer_running) {
1534 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1535 if (ret) {
1536 pr_err("Failed to decrease clk ref count.\n");
1537 goto err_scale_timer;
1538 }
1539 del_timer_sync(&(qseecom.bw_scale_down_timer));
1540 qseecom.timer_running = false;
1541 }
1542err_scale_timer:
1543 mutex_unlock(&qsee_bw_mutex);
1544 return ret;
1545}
1546
1547
1548static int qseecom_unregister_bus_bandwidth_needs(
1549 struct qseecom_dev_handle *data)
1550{
1551 int32_t ret = 0;
1552
1553 qseecom.cumulative_mode -= data->mode;
1554 data->mode = INACTIVE;
1555
1556 return ret;
1557}
1558
1559static int __qseecom_register_bus_bandwidth_needs(
1560 struct qseecom_dev_handle *data, uint32_t request_mode)
1561{
1562 int32_t ret = 0;
1563
1564 if (data->mode == INACTIVE) {
1565 qseecom.cumulative_mode += request_mode;
1566 data->mode = request_mode;
1567 } else {
1568 if (data->mode != request_mode) {
1569 qseecom.cumulative_mode -= data->mode;
1570 qseecom.cumulative_mode += request_mode;
1571 data->mode = request_mode;
1572 }
1573 }
1574 return ret;
1575}
1576
1577static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1578{
1579 int ret = 0;
1580
1581 ret = qsee_vote_for_clock(data, CLK_DFAB);
1582 if (ret) {
1583 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1584 goto perf_enable_exit;
1585 }
1586 ret = qsee_vote_for_clock(data, CLK_SFPB);
1587 if (ret) {
1588 qsee_disable_clock_vote(data, CLK_DFAB);
1589 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1590 goto perf_enable_exit;
1591 }
1592
1593perf_enable_exit:
1594 return ret;
1595}
1596
1597static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1598 void __user *argp)
1599{
1600 int32_t ret = 0;
1601 int32_t req_mode;
1602
1603 if (qseecom.no_clock_support)
1604 return 0;
1605
1606 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1607 if (ret) {
1608 pr_err("copy_from_user failed\n");
1609 return ret;
1610 }
1611 if (req_mode > HIGH) {
1612 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1613 return -EINVAL;
1614 }
1615
1616 /*
1617 * Register bus bandwidth needs if bus scaling feature is enabled;
1618 * otherwise, qseecom enable/disable clocks for the client directly.
1619 */
1620 if (qseecom.support_bus_scaling) {
1621 mutex_lock(&qsee_bw_mutex);
1622 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1623 mutex_unlock(&qsee_bw_mutex);
1624 } else {
1625 pr_debug("Bus scaling feature is NOT enabled\n");
1626 pr_debug("request bandwidth mode %d for the client\n",
1627 req_mode);
1628 if (req_mode != INACTIVE) {
1629 ret = qseecom_perf_enable(data);
1630 if (ret)
1631 pr_err("Failed to vote for clock with err %d\n",
1632 ret);
1633 } else {
1634 qsee_disable_clock_vote(data, CLK_DFAB);
1635 qsee_disable_clock_vote(data, CLK_SFPB);
1636 }
1637 }
1638 return ret;
1639}
1640
1641static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1642{
1643 if (qseecom.no_clock_support)
1644 return;
1645
1646 mutex_lock(&qsee_bw_mutex);
1647 qseecom.bw_scale_down_timer.expires = jiffies +
1648 msecs_to_jiffies(duration);
1649 mod_timer(&(qseecom.bw_scale_down_timer),
1650 qseecom.bw_scale_down_timer.expires);
1651 qseecom.timer_running = true;
1652 mutex_unlock(&qsee_bw_mutex);
1653}
1654
1655static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1656{
1657 if (!qseecom.support_bus_scaling)
1658 qsee_disable_clock_vote(data, CLK_SFPB);
1659 else
1660 __qseecom_add_bw_scale_down_timer(
1661 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1662}
1663
1664static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1665{
1666 int ret = 0;
1667
1668 if (qseecom.support_bus_scaling) {
1669 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1670 if (ret)
1671 pr_err("Failed to set bw MEDIUM.\n");
1672 } else {
1673 ret = qsee_vote_for_clock(data, CLK_SFPB);
1674 if (ret)
1675 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1676 }
1677 return ret;
1678}
1679
1680static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1681 void __user *argp)
1682{
1683 ion_phys_addr_t pa;
1684 int32_t ret;
1685 struct qseecom_set_sb_mem_param_req req;
1686 size_t len;
1687
1688 /* Copy the relevant information needed for loading the image */
1689 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1690 return -EFAULT;
1691
1692 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1693 (req.sb_len == 0)) {
1694 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1695 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1696 return -EFAULT;
1697 }
1698 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1699 req.sb_len))
1700 return -EFAULT;
1701
1702 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001703 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001704 req.ifd_data_fd);
1705 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1706 pr_err("Ion client could not retrieve the handle\n");
1707 return -ENOMEM;
1708 }
1709 /* Get the physical address of the ION BUF */
1710 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1711 if (ret) {
1712
1713 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1714 ret);
1715 return ret;
1716 }
1717
1718 if (len < req.sb_len) {
1719 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1720 req.sb_len, len);
1721 return -EINVAL;
1722 }
1723 /* Populate the structure for sending scm call to load image */
1724 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1725 data->client.ihandle);
1726 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1727 pr_err("ION memory mapping for client shared buf failed\n");
1728 return -ENOMEM;
1729 }
1730 data->client.sb_phys = (phys_addr_t)pa;
1731 data->client.sb_length = req.sb_len;
1732 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1733 return 0;
1734}
1735
Zhen Kong26e62742018-05-04 17:19:06 -07001736static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1737 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001738{
1739 int ret;
1740
1741 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001742 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001743}
1744
1745static int __qseecom_reentrancy_listener_has_sent_rsp(
1746 struct qseecom_dev_handle *data,
1747 struct qseecom_registered_listener_list *ptr_svc)
1748{
1749 int ret;
1750
1751 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001752 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001753}
1754
1755static void __qseecom_clean_listener_sglistinfo(
1756 struct qseecom_registered_listener_list *ptr_svc)
1757{
1758 if (ptr_svc->sglist_cnt) {
1759 memset(ptr_svc->sglistinfo_ptr, 0,
1760 SGLISTINFO_TABLE_SIZE);
1761 ptr_svc->sglist_cnt = 0;
1762 }
1763}
1764
1765static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1766 struct qseecom_command_scm_resp *resp)
1767{
1768 int ret = 0;
1769 int rc = 0;
1770 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001771 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1772 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1773 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001774 struct qseecom_registered_listener_list *ptr_svc = NULL;
1775 sigset_t new_sigset;
1776 sigset_t old_sigset;
1777 uint32_t status;
1778 void *cmd_buf = NULL;
1779 size_t cmd_len;
1780 struct sglist_info *table = NULL;
1781
Zhen Kongbcdeda22018-11-16 13:50:51 -08001782 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001783 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1784 lstnr = resp->data;
1785 /*
1786 * Wake up blocking lsitener service with the lstnr id
1787 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001788 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001789 list_for_each_entry(ptr_svc,
1790 &qseecom.registered_listener_list_head, list) {
1791 if (ptr_svc->svc.listener_id == lstnr) {
1792 ptr_svc->listener_in_use = true;
1793 ptr_svc->rcv_req_flag = 1;
1794 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1795 break;
1796 }
1797 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001798
1799 if (ptr_svc == NULL) {
1800 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001801 rc = -EINVAL;
1802 status = QSEOS_RESULT_FAILURE;
1803 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001804 }
1805
1806 if (!ptr_svc->ihandle) {
1807 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001808 rc = -EINVAL;
1809 status = QSEOS_RESULT_FAILURE;
1810 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001811 }
1812
1813 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001814 pr_err("Service %d does not exist\n",
1815 lstnr);
1816 rc = -ERESTARTSYS;
1817 ptr_svc = NULL;
1818 status = QSEOS_RESULT_FAILURE;
1819 goto err_resp;
1820 }
1821
1822 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001823 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001824 lstnr, ptr_svc->abort);
1825 rc = -ENODEV;
1826 status = QSEOS_RESULT_FAILURE;
1827 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001828 }
Zhen Kong25731112018-09-20 13:10:03 -07001829
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1831
1832 /* initialize the new signal mask with all signals*/
1833 sigfillset(&new_sigset);
1834 /* block all signals */
1835 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1836
Zhen Kongbcdeda22018-11-16 13:50:51 -08001837 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001838 do {
1839 /*
1840 * When reentrancy is not supported, check global
1841 * send_resp_flag; otherwise, check this listener's
1842 * send_resp_flag.
1843 */
1844 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301845 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001846 __qseecom_listener_has_sent_rsp(
1847 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001848 break;
1849 }
1850
1851 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301852 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001853 __qseecom_reentrancy_listener_has_sent_rsp(
1854 data, ptr_svc))) {
1855 break;
1856 }
1857 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001858 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001859 /* restore signal mask */
1860 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001861 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001862 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1863 data->client.app_id, lstnr, ret);
1864 rc = -ENODEV;
1865 status = QSEOS_RESULT_FAILURE;
1866 } else {
1867 status = QSEOS_RESULT_SUCCESS;
1868 }
Zhen Kong26e62742018-05-04 17:19:06 -07001869err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001870 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001871 if (ptr_svc) {
1872 ptr_svc->send_resp_flag = 0;
1873 table = ptr_svc->sglistinfo_ptr;
1874 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001875 if (qseecom.qsee_version < QSEE_VERSION_40) {
1876 send_data_rsp.listener_id = lstnr;
1877 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001878 if (table) {
1879 send_data_rsp.sglistinfo_ptr =
1880 (uint32_t)virt_to_phys(table);
1881 send_data_rsp.sglistinfo_len =
1882 SGLISTINFO_TABLE_SIZE;
1883 dmac_flush_range((void *)table,
1884 (void *)table + SGLISTINFO_TABLE_SIZE);
1885 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001886 cmd_buf = (void *)&send_data_rsp;
1887 cmd_len = sizeof(send_data_rsp);
1888 } else {
1889 send_data_rsp_64bit.listener_id = lstnr;
1890 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001891 if (table) {
1892 send_data_rsp_64bit.sglistinfo_ptr =
1893 virt_to_phys(table);
1894 send_data_rsp_64bit.sglistinfo_len =
1895 SGLISTINFO_TABLE_SIZE;
1896 dmac_flush_range((void *)table,
1897 (void *)table + SGLISTINFO_TABLE_SIZE);
1898 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001899 cmd_buf = (void *)&send_data_rsp_64bit;
1900 cmd_len = sizeof(send_data_rsp_64bit);
1901 }
Zhen Kong7d500032018-08-06 16:58:31 -07001902 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001903 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1904 else
1905 *(uint32_t *)cmd_buf =
1906 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001907 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001908 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1909 ptr_svc->ihandle,
1910 ptr_svc->sb_virt, ptr_svc->sb_length,
1911 ION_IOC_CLEAN_INV_CACHES);
1912 if (ret) {
1913 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001914 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001915 }
1916 }
1917
1918 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1919 ret = __qseecom_enable_clk(CLK_QSEE);
1920 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001921 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001922 }
1923
1924 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1925 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001926 if (ptr_svc) {
1927 ptr_svc->listener_in_use = false;
1928 __qseecom_clean_listener_sglistinfo(ptr_svc);
1929 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001930 if (ret) {
1931 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1932 ret, data->client.app_id);
1933 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1934 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001935 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001936 }
Zhen Kong26e62742018-05-04 17:19:06 -07001937 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1938 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001939 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1940 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1941 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1942 resp->result, data->client.app_id, lstnr);
1943 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001944 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001945 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001946exit:
1947 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001948 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1949 __qseecom_disable_clk(CLK_QSEE);
1950
1951 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001952 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001953 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001954 if (rc)
1955 return rc;
1956
1957 return ret;
1958}
1959
Zhen Konga91aaf02018-02-02 17:21:04 -08001960static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001961 struct qseecom_command_scm_resp *resp,
1962 struct qseecom_registered_app_list *ptr_app,
1963 struct qseecom_dev_handle *data)
1964{
1965 struct qseecom_registered_listener_list *list_ptr;
1966 int ret = 0;
1967 struct qseecom_continue_blocked_request_ireq ireq;
1968 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001969 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001970 sigset_t new_sigset;
1971 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001972 unsigned long flags;
1973 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07001974 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001975
1976 if (!resp || !data) {
1977 pr_err("invalid resp or data pointer\n");
1978 ret = -EINVAL;
1979 goto exit;
1980 }
1981
1982 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07001983 if (!ptr_app) {
1984 if (data->client.from_smcinvoke) {
1985 pr_debug("This request is from smcinvoke\n");
1986 ptr_app = &dummy_app_entry;
1987 ptr_app->app_id = data->client.app_id;
1988 } else {
1989 spin_lock_irqsave(&qseecom.registered_app_list_lock,
1990 flags);
1991 list_for_each_entry(ptr_app,
1992 &qseecom.registered_app_list_head, list) {
1993 if ((ptr_app->app_id == data->client.app_id) &&
1994 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001995 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07001996 found_app = true;
1997 break;
1998 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001999 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002000 spin_unlock_irqrestore(
2001 &qseecom.registered_app_list_lock, flags);
2002 if (!found_app) {
2003 pr_err("app_id %d (%s) is not found\n",
2004 data->client.app_id,
2005 (char *)data->client.app_name);
2006 ret = -ENOENT;
2007 goto exit;
2008 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002009 }
2010 }
2011
Zhen Kongd8cc0052017-11-13 15:13:31 -08002012 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002013 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002014 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002015 list_ptr = __qseecom_find_svc(resp->data);
2016 if (!list_ptr) {
2017 pr_err("Invalid listener ID %d\n", resp->data);
2018 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002019 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002020 goto exit;
2021 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002022 ptr_app->blocked_on_listener_id = resp->data;
2023
2024 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2025 resp->data, list_ptr->listener_in_use,
2026 session_id, data->client.app_id);
2027
2028 /* sleep until listener is available */
2029 sigfillset(&new_sigset);
2030 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2031
2032 do {
2033 qseecom.app_block_ref_cnt++;
2034 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002035 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002036 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302037 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002038 list_ptr->listener_block_app_wq,
2039 !list_ptr->listener_in_use);
2040 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002041 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002042 ptr_app->app_blocked = false;
2043 qseecom.app_block_ref_cnt--;
2044 } while (list_ptr->listener_in_use);
2045
2046 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2047
2048 ptr_app->blocked_on_listener_id = 0;
2049 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2050 resp->data, session_id, data->client.app_id);
2051
2052 /* notify TZ that listener is available */
2053 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2054
2055 if (qseecom.smcinvoke_support)
2056 ireq.app_or_session_id = session_id;
2057 else
2058 ireq.app_or_session_id = data->client.app_id;
2059
2060 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2061 &ireq, sizeof(ireq),
2062 &continue_resp, sizeof(continue_resp));
2063 if (ret && qseecom.smcinvoke_support) {
2064 /* retry with legacy cmd */
2065 qseecom.smcinvoke_support = false;
2066 ireq.app_or_session_id = data->client.app_id;
2067 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2068 &ireq, sizeof(ireq),
2069 &continue_resp, sizeof(continue_resp));
2070 qseecom.smcinvoke_support = true;
2071 if (ret) {
2072 pr_err("unblock app %d or session %d fail\n",
2073 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002074 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002075 goto exit;
2076 }
2077 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002078 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002079 resp->result = continue_resp.result;
2080 resp->resp_type = continue_resp.resp_type;
2081 resp->data = continue_resp.data;
2082 pr_debug("unblock resp = %d\n", resp->result);
2083 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2084
2085 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2086 pr_err("Unexpected unblock resp %d\n", resp->result);
2087 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002088 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002089exit:
2090 return ret;
2091}
2092
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002093static int __qseecom_reentrancy_process_incomplete_cmd(
2094 struct qseecom_dev_handle *data,
2095 struct qseecom_command_scm_resp *resp)
2096{
2097 int ret = 0;
2098 int rc = 0;
2099 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002100 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2101 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2102 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002103 struct qseecom_registered_listener_list *ptr_svc = NULL;
2104 sigset_t new_sigset;
2105 sigset_t old_sigset;
2106 uint32_t status;
2107 void *cmd_buf = NULL;
2108 size_t cmd_len;
2109 struct sglist_info *table = NULL;
2110
Zhen Kong26e62742018-05-04 17:19:06 -07002111 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002112 lstnr = resp->data;
2113 /*
2114 * Wake up blocking lsitener service with the lstnr id
2115 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002116 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002117 list_for_each_entry(ptr_svc,
2118 &qseecom.registered_listener_list_head, list) {
2119 if (ptr_svc->svc.listener_id == lstnr) {
2120 ptr_svc->listener_in_use = true;
2121 ptr_svc->rcv_req_flag = 1;
2122 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2123 break;
2124 }
2125 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002126
2127 if (ptr_svc == NULL) {
2128 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002129 rc = -EINVAL;
2130 status = QSEOS_RESULT_FAILURE;
2131 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002132 }
2133
2134 if (!ptr_svc->ihandle) {
2135 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002136 rc = -EINVAL;
2137 status = QSEOS_RESULT_FAILURE;
2138 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002139 }
2140
2141 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002142 pr_err("Service %d does not exist\n",
2143 lstnr);
2144 rc = -ERESTARTSYS;
2145 ptr_svc = NULL;
2146 status = QSEOS_RESULT_FAILURE;
2147 goto err_resp;
2148 }
2149
2150 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002151 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002152 lstnr, ptr_svc->abort);
2153 rc = -ENODEV;
2154 status = QSEOS_RESULT_FAILURE;
2155 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002156 }
Zhen Kong25731112018-09-20 13:10:03 -07002157
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002158 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2159
2160 /* initialize the new signal mask with all signals*/
2161 sigfillset(&new_sigset);
2162
2163 /* block all signals */
2164 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2165
2166 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002167 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002168 mutex_unlock(&app_access_lock);
2169 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302170 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002171 __qseecom_reentrancy_listener_has_sent_rsp(
2172 data, ptr_svc))) {
2173 break;
2174 }
2175 } while (1);
2176 /* lock mutex again after resp sent */
2177 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002178 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002179 ptr_svc->send_resp_flag = 0;
2180 qseecom.send_resp_flag = 0;
2181
2182 /* restore signal mask */
2183 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002184 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002185 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2186 data->client.app_id, lstnr, ret);
2187 rc = -ENODEV;
2188 status = QSEOS_RESULT_FAILURE;
2189 } else {
2190 status = QSEOS_RESULT_SUCCESS;
2191 }
Zhen Kong26e62742018-05-04 17:19:06 -07002192err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002193 if (ptr_svc)
2194 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002195 if (qseecom.qsee_version < QSEE_VERSION_40) {
2196 send_data_rsp.listener_id = lstnr;
2197 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002198 if (table) {
2199 send_data_rsp.sglistinfo_ptr =
2200 (uint32_t)virt_to_phys(table);
2201 send_data_rsp.sglistinfo_len =
2202 SGLISTINFO_TABLE_SIZE;
2203 dmac_flush_range((void *)table,
2204 (void *)table + SGLISTINFO_TABLE_SIZE);
2205 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002206 cmd_buf = (void *)&send_data_rsp;
2207 cmd_len = sizeof(send_data_rsp);
2208 } else {
2209 send_data_rsp_64bit.listener_id = lstnr;
2210 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002211 if (table) {
2212 send_data_rsp_64bit.sglistinfo_ptr =
2213 virt_to_phys(table);
2214 send_data_rsp_64bit.sglistinfo_len =
2215 SGLISTINFO_TABLE_SIZE;
2216 dmac_flush_range((void *)table,
2217 (void *)table + SGLISTINFO_TABLE_SIZE);
2218 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002219 cmd_buf = (void *)&send_data_rsp_64bit;
2220 cmd_len = sizeof(send_data_rsp_64bit);
2221 }
Zhen Kong7d500032018-08-06 16:58:31 -07002222 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002223 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2224 else
2225 *(uint32_t *)cmd_buf =
2226 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002227 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002228 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2229 ptr_svc->ihandle,
2230 ptr_svc->sb_virt, ptr_svc->sb_length,
2231 ION_IOC_CLEAN_INV_CACHES);
2232 if (ret) {
2233 pr_err("cache operation failed %d\n", ret);
2234 return ret;
2235 }
2236 }
2237 if (lstnr == RPMB_SERVICE) {
2238 ret = __qseecom_enable_clk(CLK_QSEE);
2239 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002240 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002241 }
2242
2243 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2244 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002245 if (ptr_svc) {
2246 ptr_svc->listener_in_use = false;
2247 __qseecom_clean_listener_sglistinfo(ptr_svc);
2248 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2249 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002250
2251 if (ret) {
2252 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2253 ret, data->client.app_id);
2254 goto exit;
2255 }
2256
2257 switch (resp->result) {
2258 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2259 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2260 lstnr, data->client.app_id, resp->data);
2261 if (lstnr == resp->data) {
2262 pr_err("lstnr %d should not be blocked!\n",
2263 lstnr);
2264 ret = -EINVAL;
2265 goto exit;
2266 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002267 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002268 ret = __qseecom_process_reentrancy_blocked_on_listener(
2269 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002270 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002271 if (ret) {
2272 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2273 data->client.app_id,
2274 data->client.app_name, resp->data);
2275 goto exit;
2276 }
2277 case QSEOS_RESULT_SUCCESS:
2278 case QSEOS_RESULT_INCOMPLETE:
2279 break;
2280 default:
2281 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2282 resp->result, data->client.app_id, lstnr);
2283 ret = -EINVAL;
2284 goto exit;
2285 }
2286exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002287 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002288 if (lstnr == RPMB_SERVICE)
2289 __qseecom_disable_clk(CLK_QSEE);
2290
2291 }
2292 if (rc)
2293 return rc;
2294
2295 return ret;
2296}
2297
2298/*
2299 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2300 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2301 * So, needs to first check if no app blocked before sending OS level scm call,
2302 * then wait until all apps are unblocked.
2303 */
2304static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2305{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002306 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2307 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2308 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2309 /* thread sleep until this app unblocked */
2310 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002311 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002312 wait_event_interruptible(qseecom.app_block_wq,
2313 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002314 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002315 }
2316 }
2317}
2318
2319/*
2320 * scm_call of send data will fail if this TA is blocked or there are more
2321 * than one TA requesting listener services; So, first check to see if need
2322 * to wait.
2323 */
2324static void __qseecom_reentrancy_check_if_this_app_blocked(
2325 struct qseecom_registered_app_list *ptr_app)
2326{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002327 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002328 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002329 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2330 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002331 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002332 wait_event_interruptible(qseecom.app_block_wq,
2333 (!ptr_app->app_blocked &&
2334 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002335 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002336 }
Zhen Kongdea10592018-07-30 17:50:10 -07002337 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002338 }
2339}
2340
2341static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2342 uint32_t *app_id)
2343{
2344 int32_t ret;
2345 struct qseecom_command_scm_resp resp;
2346 bool found_app = false;
2347 struct qseecom_registered_app_list *entry = NULL;
2348 unsigned long flags = 0;
2349
2350 if (!app_id) {
2351 pr_err("Null pointer to app_id\n");
2352 return -EINVAL;
2353 }
2354 *app_id = 0;
2355
2356 /* check if app exists and has been registered locally */
2357 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2358 list_for_each_entry(entry,
2359 &qseecom.registered_app_list_head, list) {
2360 if (!strcmp(entry->app_name, req.app_name)) {
2361 found_app = true;
2362 break;
2363 }
2364 }
2365 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2366 if (found_app) {
2367 pr_debug("Found app with id %d\n", entry->app_id);
2368 *app_id = entry->app_id;
2369 return 0;
2370 }
2371
2372 memset((void *)&resp, 0, sizeof(resp));
2373
2374 /* SCM_CALL to check if app_id for the mentioned app exists */
2375 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2376 sizeof(struct qseecom_check_app_ireq),
2377 &resp, sizeof(resp));
2378 if (ret) {
2379 pr_err("scm_call to check if app is already loaded failed\n");
2380 return -EINVAL;
2381 }
2382
2383 if (resp.result == QSEOS_RESULT_FAILURE)
2384 return 0;
2385
2386 switch (resp.resp_type) {
2387 /*qsee returned listener type response */
2388 case QSEOS_LISTENER_ID:
2389 pr_err("resp type is of listener type instead of app");
2390 return -EINVAL;
2391 case QSEOS_APP_ID:
2392 *app_id = resp.data;
2393 return 0;
2394 default:
2395 pr_err("invalid resp type (%d) from qsee",
2396 resp.resp_type);
2397 return -ENODEV;
2398 }
2399}
2400
2401static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2402{
2403 struct qseecom_registered_app_list *entry = NULL;
2404 unsigned long flags = 0;
2405 u32 app_id = 0;
2406 struct ion_handle *ihandle; /* Ion handle */
2407 struct qseecom_load_img_req load_img_req;
2408 int32_t ret = 0;
2409 ion_phys_addr_t pa = 0;
2410 size_t len;
2411 struct qseecom_command_scm_resp resp;
2412 struct qseecom_check_app_ireq req;
2413 struct qseecom_load_app_ireq load_req;
2414 struct qseecom_load_app_64bit_ireq load_req_64bit;
2415 void *cmd_buf = NULL;
2416 size_t cmd_len;
2417 bool first_time = false;
2418
2419 /* Copy the relevant information needed for loading the image */
2420 if (copy_from_user(&load_img_req,
2421 (void __user *)argp,
2422 sizeof(struct qseecom_load_img_req))) {
2423 pr_err("copy_from_user failed\n");
2424 return -EFAULT;
2425 }
2426
2427 /* Check and load cmnlib */
2428 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2429 if (!qseecom.commonlib_loaded &&
2430 load_img_req.app_arch == ELFCLASS32) {
2431 ret = qseecom_load_commonlib_image(data, "cmnlib");
2432 if (ret) {
2433 pr_err("failed to load cmnlib\n");
2434 return -EIO;
2435 }
2436 qseecom.commonlib_loaded = true;
2437 pr_debug("cmnlib is loaded\n");
2438 }
2439
2440 if (!qseecom.commonlib64_loaded &&
2441 load_img_req.app_arch == ELFCLASS64) {
2442 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2443 if (ret) {
2444 pr_err("failed to load cmnlib64\n");
2445 return -EIO;
2446 }
2447 qseecom.commonlib64_loaded = true;
2448 pr_debug("cmnlib64 is loaded\n");
2449 }
2450 }
2451
2452 if (qseecom.support_bus_scaling) {
2453 mutex_lock(&qsee_bw_mutex);
2454 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2455 mutex_unlock(&qsee_bw_mutex);
2456 if (ret)
2457 return ret;
2458 }
2459
2460 /* Vote for the SFPB clock */
2461 ret = __qseecom_enable_clk_scale_up(data);
2462 if (ret)
2463 goto enable_clk_err;
2464
2465 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2466 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2467 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2468
2469 ret = __qseecom_check_app_exists(req, &app_id);
2470 if (ret < 0)
2471 goto loadapp_err;
2472
2473 if (app_id) {
2474 pr_debug("App id %d (%s) already exists\n", app_id,
2475 (char *)(req.app_name));
2476 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2477 list_for_each_entry(entry,
2478 &qseecom.registered_app_list_head, list){
2479 if (entry->app_id == app_id) {
2480 entry->ref_cnt++;
2481 break;
2482 }
2483 }
2484 spin_unlock_irqrestore(
2485 &qseecom.registered_app_list_lock, flags);
2486 ret = 0;
2487 } else {
2488 first_time = true;
2489 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2490 (char *)(load_img_req.img_name));
2491 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002492 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002493 load_img_req.ifd_data_fd);
2494 if (IS_ERR_OR_NULL(ihandle)) {
2495 pr_err("Ion client could not retrieve the handle\n");
2496 ret = -ENOMEM;
2497 goto loadapp_err;
2498 }
2499
2500 /* Get the physical address of the ION BUF */
2501 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2502 if (ret) {
2503 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2504 ret);
2505 goto loadapp_err;
2506 }
2507 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2508 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2509 len, load_img_req.mdt_len,
2510 load_img_req.img_len);
2511 ret = -EINVAL;
2512 goto loadapp_err;
2513 }
2514 /* Populate the structure for sending scm call to load image */
2515 if (qseecom.qsee_version < QSEE_VERSION_40) {
2516 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2517 load_req.mdt_len = load_img_req.mdt_len;
2518 load_req.img_len = load_img_req.img_len;
2519 strlcpy(load_req.app_name, load_img_req.img_name,
2520 MAX_APP_NAME_SIZE);
2521 load_req.phy_addr = (uint32_t)pa;
2522 cmd_buf = (void *)&load_req;
2523 cmd_len = sizeof(struct qseecom_load_app_ireq);
2524 } else {
2525 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2526 load_req_64bit.mdt_len = load_img_req.mdt_len;
2527 load_req_64bit.img_len = load_img_req.img_len;
2528 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_req_64bit.phy_addr = (uint64_t)pa;
2531 cmd_buf = (void *)&load_req_64bit;
2532 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2533 }
2534
2535 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2536 ION_IOC_CLEAN_INV_CACHES);
2537 if (ret) {
2538 pr_err("cache operation failed %d\n", ret);
2539 goto loadapp_err;
2540 }
2541
2542 /* SCM_CALL to load the app and get the app_id back */
2543 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2544 cmd_len, &resp, sizeof(resp));
2545 if (ret) {
2546 pr_err("scm_call to load app failed\n");
2547 if (!IS_ERR_OR_NULL(ihandle))
2548 ion_free(qseecom.ion_clnt, ihandle);
2549 ret = -EINVAL;
2550 goto loadapp_err;
2551 }
2552
2553 if (resp.result == QSEOS_RESULT_FAILURE) {
2554 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2555 if (!IS_ERR_OR_NULL(ihandle))
2556 ion_free(qseecom.ion_clnt, ihandle);
2557 ret = -EFAULT;
2558 goto loadapp_err;
2559 }
2560
2561 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2562 ret = __qseecom_process_incomplete_cmd(data, &resp);
2563 if (ret) {
2564 pr_err("process_incomplete_cmd failed err: %d\n",
2565 ret);
2566 if (!IS_ERR_OR_NULL(ihandle))
2567 ion_free(qseecom.ion_clnt, ihandle);
2568 ret = -EFAULT;
2569 goto loadapp_err;
2570 }
2571 }
2572
2573 if (resp.result != QSEOS_RESULT_SUCCESS) {
2574 pr_err("scm_call failed resp.result unknown, %d\n",
2575 resp.result);
2576 if (!IS_ERR_OR_NULL(ihandle))
2577 ion_free(qseecom.ion_clnt, ihandle);
2578 ret = -EFAULT;
2579 goto loadapp_err;
2580 }
2581
2582 app_id = resp.data;
2583
2584 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2585 if (!entry) {
2586 ret = -ENOMEM;
2587 goto loadapp_err;
2588 }
2589 entry->app_id = app_id;
2590 entry->ref_cnt = 1;
2591 entry->app_arch = load_img_req.app_arch;
2592 /*
2593 * keymaster app may be first loaded as "keymaste" by qseecomd,
2594 * and then used as "keymaster" on some targets. To avoid app
2595 * name checking error, register "keymaster" into app_list and
2596 * thread private data.
2597 */
2598 if (!strcmp(load_img_req.img_name, "keymaste"))
2599 strlcpy(entry->app_name, "keymaster",
2600 MAX_APP_NAME_SIZE);
2601 else
2602 strlcpy(entry->app_name, load_img_req.img_name,
2603 MAX_APP_NAME_SIZE);
2604 entry->app_blocked = false;
2605 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002606 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002607
2608 /* Deallocate the handle */
2609 if (!IS_ERR_OR_NULL(ihandle))
2610 ion_free(qseecom.ion_clnt, ihandle);
2611
2612 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2613 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2614 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2615 flags);
2616
2617 pr_warn("App with id %u (%s) now loaded\n", app_id,
2618 (char *)(load_img_req.img_name));
2619 }
2620 data->client.app_id = app_id;
2621 data->client.app_arch = load_img_req.app_arch;
2622 if (!strcmp(load_img_req.img_name, "keymaste"))
2623 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2624 else
2625 strlcpy(data->client.app_name, load_img_req.img_name,
2626 MAX_APP_NAME_SIZE);
2627 load_img_req.app_id = app_id;
2628 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2629 pr_err("copy_to_user failed\n");
2630 ret = -EFAULT;
2631 if (first_time == true) {
2632 spin_lock_irqsave(
2633 &qseecom.registered_app_list_lock, flags);
2634 list_del(&entry->list);
2635 spin_unlock_irqrestore(
2636 &qseecom.registered_app_list_lock, flags);
2637 kzfree(entry);
2638 }
2639 }
2640
2641loadapp_err:
2642 __qseecom_disable_clk_scale_down(data);
2643enable_clk_err:
2644 if (qseecom.support_bus_scaling) {
2645 mutex_lock(&qsee_bw_mutex);
2646 qseecom_unregister_bus_bandwidth_needs(data);
2647 mutex_unlock(&qsee_bw_mutex);
2648 }
2649 return ret;
2650}
2651
2652static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2653{
2654 int ret = 1; /* Set unload app */
2655
2656 wake_up_all(&qseecom.send_resp_wq);
2657 if (qseecom.qsee_reentrancy_support)
2658 mutex_unlock(&app_access_lock);
2659 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302660 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002661 atomic_read(&data->ioctl_count) <= 1)) {
2662 pr_err("Interrupted from abort\n");
2663 ret = -ERESTARTSYS;
2664 break;
2665 }
2666 }
2667 if (qseecom.qsee_reentrancy_support)
2668 mutex_lock(&app_access_lock);
2669 return ret;
2670}
2671
2672static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2673{
2674 int ret = 0;
2675
2676 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2677 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2678 ion_free(qseecom.ion_clnt, data->client.ihandle);
2679 data->client.ihandle = NULL;
2680 }
2681 return ret;
2682}
2683
2684static int qseecom_unload_app(struct qseecom_dev_handle *data,
2685 bool app_crash)
2686{
2687 unsigned long flags;
2688 unsigned long flags1;
2689 int ret = 0;
2690 struct qseecom_command_scm_resp resp;
2691 struct qseecom_registered_app_list *ptr_app = NULL;
2692 bool unload = false;
2693 bool found_app = false;
2694 bool found_dead_app = false;
Zhen Kongf818f152019-03-13 12:31:32 -07002695 bool scm_called = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002696
2697 if (!data) {
2698 pr_err("Invalid/uninitialized device handle\n");
2699 return -EINVAL;
2700 }
2701
2702 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2703 pr_debug("Do not unload keymaster app from tz\n");
2704 goto unload_exit;
2705 }
2706
2707 __qseecom_cleanup_app(data);
2708 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2709
2710 if (data->client.app_id > 0) {
2711 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2712 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2713 list) {
2714 if (ptr_app->app_id == data->client.app_id) {
2715 if (!strcmp((void *)ptr_app->app_name,
2716 (void *)data->client.app_name)) {
2717 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002718 if (ptr_app->app_blocked ||
2719 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002720 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002721 if (app_crash || ptr_app->ref_cnt == 1)
2722 unload = true;
2723 break;
2724 }
2725 found_dead_app = true;
2726 break;
2727 }
2728 }
2729 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2730 flags);
2731 if (found_app == false && found_dead_app == false) {
2732 pr_err("Cannot find app with id = %d (%s)\n",
2733 data->client.app_id,
2734 (char *)data->client.app_name);
2735 ret = -EINVAL;
2736 goto unload_exit;
2737 }
2738 }
2739
2740 if (found_dead_app)
2741 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2742 (char *)data->client.app_name);
2743
2744 if (unload) {
2745 struct qseecom_unload_app_ireq req;
2746 /* Populate the structure for sending scm call to load image */
2747 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2748 req.app_id = data->client.app_id;
2749
2750 /* SCM_CALL to unload the app */
2751 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2752 sizeof(struct qseecom_unload_app_ireq),
2753 &resp, sizeof(resp));
Zhen Kongf818f152019-03-13 12:31:32 -07002754 scm_called = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002755 if (ret) {
2756 pr_err("scm_call to unload app (id = %d) failed\n",
2757 req.app_id);
2758 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002759 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002760 } else {
2761 pr_warn("App id %d now unloaded\n", req.app_id);
2762 }
2763 if (resp.result == QSEOS_RESULT_FAILURE) {
2764 pr_err("app (%d) unload_failed!!\n",
2765 data->client.app_id);
2766 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002767 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002768 }
2769 if (resp.result == QSEOS_RESULT_SUCCESS)
2770 pr_debug("App (%d) is unloaded!!\n",
2771 data->client.app_id);
2772 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2773 ret = __qseecom_process_incomplete_cmd(data, &resp);
2774 if (ret) {
2775 pr_err("process_incomplete_cmd fail err: %d\n",
2776 ret);
Zhen Kongf818f152019-03-13 12:31:32 -07002777 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002778 }
2779 }
2780 }
2781
Zhen Kongf818f152019-03-13 12:31:32 -07002782scm_exit:
2783 if (scm_called) {
2784 /* double check if this app_entry still exists */
2785 bool doublecheck = false;
2786
2787 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2788 list_for_each_entry(ptr_app,
2789 &qseecom.registered_app_list_head, list) {
2790 if ((ptr_app->app_id == data->client.app_id) &&
2791 (!strcmp((void *)ptr_app->app_name,
2792 (void *)data->client.app_name))) {
2793 doublecheck = true;
2794 break;
2795 }
2796 }
2797 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2798 flags1);
2799 if (!doublecheck) {
2800 pr_warn("app %d(%s) entry is already removed\n",
2801 data->client.app_id,
2802 (char *)data->client.app_name);
2803 found_app = false;
2804 }
2805 }
Zhen Kong7d500032018-08-06 16:58:31 -07002806unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002807 if (found_app) {
2808 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2809 if (app_crash) {
2810 ptr_app->ref_cnt = 0;
2811 pr_debug("app_crash: ref_count = 0\n");
2812 } else {
2813 if (ptr_app->ref_cnt == 1) {
2814 ptr_app->ref_cnt = 0;
2815 pr_debug("ref_count set to 0\n");
2816 } else {
2817 ptr_app->ref_cnt--;
2818 pr_debug("Can't unload app(%d) inuse\n",
2819 ptr_app->app_id);
2820 }
2821 }
2822 if (unload) {
2823 list_del(&ptr_app->list);
2824 kzfree(ptr_app);
2825 }
2826 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2827 flags1);
2828 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002829 qseecom_unmap_ion_allocated_memory(data);
2830 data->released = true;
2831 return ret;
2832}
2833
2834static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2835 unsigned long virt)
2836{
2837 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2838}
2839
2840static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2841 unsigned long virt)
2842{
2843 return (uintptr_t)data->client.sb_virt +
2844 (virt - data->client.user_virt_sb_base);
2845}
2846
2847int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2848 struct qseecom_send_svc_cmd_req *req_ptr,
2849 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2850{
2851 int ret = 0;
2852 void *req_buf = NULL;
2853
2854 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2855 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2856 req_ptr, send_svc_ireq_ptr);
2857 return -EINVAL;
2858 }
2859
2860 /* Clients need to ensure req_buf is at base offset of shared buffer */
2861 if ((uintptr_t)req_ptr->cmd_req_buf !=
2862 data_ptr->client.user_virt_sb_base) {
2863 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2864 return -EINVAL;
2865 }
2866
2867 if (data_ptr->client.sb_length <
2868 sizeof(struct qseecom_rpmb_provision_key)) {
2869 pr_err("shared buffer is too small to hold key type\n");
2870 return -EINVAL;
2871 }
2872 req_buf = data_ptr->client.sb_virt;
2873
2874 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2875 send_svc_ireq_ptr->key_type =
2876 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2877 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2878 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2879 data_ptr, (uintptr_t)req_ptr->resp_buf));
2880 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2881
2882 return ret;
2883}
2884
2885int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2886 struct qseecom_send_svc_cmd_req *req_ptr,
2887 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2888{
2889 int ret = 0;
2890 uint32_t reqd_len_sb_in = 0;
2891
2892 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2893 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2894 req_ptr, send_svc_ireq_ptr);
2895 return -EINVAL;
2896 }
2897
2898 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2899 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2900 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2901 pr_err("Required: %u, Available: %zu\n",
2902 reqd_len_sb_in, data_ptr->client.sb_length);
2903 return -ENOMEM;
2904 }
2905
2906 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2907 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2908 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2909 data_ptr, (uintptr_t)req_ptr->resp_buf));
2910 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2911
2912 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2913 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2914
2915
2916 return ret;
2917}
2918
2919static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2920 struct qseecom_send_svc_cmd_req *req)
2921{
2922 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2923 pr_err("req or cmd buffer or response buffer is null\n");
2924 return -EINVAL;
2925 }
2926
2927 if (!data || !data->client.ihandle) {
2928 pr_err("Client or client handle is not initialized\n");
2929 return -EINVAL;
2930 }
2931
2932 if (data->client.sb_virt == NULL) {
2933 pr_err("sb_virt null\n");
2934 return -EINVAL;
2935 }
2936
2937 if (data->client.user_virt_sb_base == 0) {
2938 pr_err("user_virt_sb_base is null\n");
2939 return -EINVAL;
2940 }
2941
2942 if (data->client.sb_length == 0) {
2943 pr_err("sb_length is 0\n");
2944 return -EINVAL;
2945 }
2946
2947 if (((uintptr_t)req->cmd_req_buf <
2948 data->client.user_virt_sb_base) ||
2949 ((uintptr_t)req->cmd_req_buf >=
2950 (data->client.user_virt_sb_base + data->client.sb_length))) {
2951 pr_err("cmd buffer address not within shared bufffer\n");
2952 return -EINVAL;
2953 }
2954 if (((uintptr_t)req->resp_buf <
2955 data->client.user_virt_sb_base) ||
2956 ((uintptr_t)req->resp_buf >=
2957 (data->client.user_virt_sb_base + data->client.sb_length))) {
2958 pr_err("response buffer address not within shared bufffer\n");
2959 return -EINVAL;
2960 }
2961 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2962 (req->cmd_req_len > data->client.sb_length) ||
2963 (req->resp_len > data->client.sb_length)) {
2964 pr_err("cmd buf length or response buf length not valid\n");
2965 return -EINVAL;
2966 }
2967 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2968 pr_err("Integer overflow detected in req_len & rsp_len\n");
2969 return -EINVAL;
2970 }
2971
2972 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2973 pr_debug("Not enough memory to fit cmd_buf.\n");
2974 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2975 (req->cmd_req_len + req->resp_len),
2976 data->client.sb_length);
2977 return -ENOMEM;
2978 }
2979 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2980 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2981 return -EINVAL;
2982 }
2983 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2984 pr_err("Integer overflow in resp_len & resp_buf\n");
2985 return -EINVAL;
2986 }
2987 if (data->client.user_virt_sb_base >
2988 (ULONG_MAX - data->client.sb_length)) {
2989 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2990 return -EINVAL;
2991 }
2992 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2993 ((uintptr_t)data->client.user_virt_sb_base +
2994 data->client.sb_length)) ||
2995 (((uintptr_t)req->resp_buf + req->resp_len) >
2996 ((uintptr_t)data->client.user_virt_sb_base +
2997 data->client.sb_length))) {
2998 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2999 return -EINVAL;
3000 }
3001 return 0;
3002}
3003
3004static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3005 void __user *argp)
3006{
3007 int ret = 0;
3008 struct qseecom_client_send_service_ireq send_svc_ireq;
3009 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3010 struct qseecom_command_scm_resp resp;
3011 struct qseecom_send_svc_cmd_req req;
3012 void *send_req_ptr;
3013 size_t req_buf_size;
3014
3015 /*struct qseecom_command_scm_resp resp;*/
3016
3017 if (copy_from_user(&req,
3018 (void __user *)argp,
3019 sizeof(req))) {
3020 pr_err("copy_from_user failed\n");
3021 return -EFAULT;
3022 }
3023
3024 if (__validate_send_service_cmd_inputs(data, &req))
3025 return -EINVAL;
3026
3027 data->type = QSEECOM_SECURE_SERVICE;
3028
3029 switch (req.cmd_id) {
3030 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3031 case QSEOS_RPMB_ERASE_COMMAND:
3032 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3033 send_req_ptr = &send_svc_ireq;
3034 req_buf_size = sizeof(send_svc_ireq);
3035 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3036 send_req_ptr))
3037 return -EINVAL;
3038 break;
3039 case QSEOS_FSM_LTEOTA_REQ_CMD:
3040 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3041 case QSEOS_FSM_IKE_REQ_CMD:
3042 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3043 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3044 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3045 case QSEOS_FSM_ENCFS_REQ_CMD:
3046 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3047 send_req_ptr = &send_fsm_key_svc_ireq;
3048 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3049 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3050 send_req_ptr))
3051 return -EINVAL;
3052 break;
3053 default:
3054 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3055 return -EINVAL;
3056 }
3057
3058 if (qseecom.support_bus_scaling) {
3059 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3060 if (ret) {
3061 pr_err("Fail to set bw HIGH\n");
3062 return ret;
3063 }
3064 } else {
3065 ret = qseecom_perf_enable(data);
3066 if (ret) {
3067 pr_err("Failed to vote for clocks with err %d\n", ret);
3068 goto exit;
3069 }
3070 }
3071
3072 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3073 data->client.sb_virt, data->client.sb_length,
3074 ION_IOC_CLEAN_INV_CACHES);
3075 if (ret) {
3076 pr_err("cache operation failed %d\n", ret);
3077 goto exit;
3078 }
3079 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3080 (const void *)send_req_ptr,
3081 req_buf_size, &resp, sizeof(resp));
3082 if (ret) {
3083 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3084 if (!qseecom.support_bus_scaling) {
3085 qsee_disable_clock_vote(data, CLK_DFAB);
3086 qsee_disable_clock_vote(data, CLK_SFPB);
3087 } else {
3088 __qseecom_add_bw_scale_down_timer(
3089 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3090 }
3091 goto exit;
3092 }
3093 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3094 data->client.sb_virt, data->client.sb_length,
3095 ION_IOC_INV_CACHES);
3096 if (ret) {
3097 pr_err("cache operation failed %d\n", ret);
3098 goto exit;
3099 }
3100 switch (resp.result) {
3101 case QSEOS_RESULT_SUCCESS:
3102 break;
3103 case QSEOS_RESULT_INCOMPLETE:
3104 pr_debug("qseos_result_incomplete\n");
3105 ret = __qseecom_process_incomplete_cmd(data, &resp);
3106 if (ret) {
3107 pr_err("process_incomplete_cmd fail with result: %d\n",
3108 resp.result);
3109 }
3110 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3111 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303112 if (put_user(resp.result,
3113 (uint32_t __user *)req.resp_buf)) {
3114 ret = -EINVAL;
3115 goto exit;
3116 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003117 ret = 0;
3118 }
3119 break;
3120 case QSEOS_RESULT_FAILURE:
3121 pr_err("scm call failed with resp.result: %d\n", resp.result);
3122 ret = -EINVAL;
3123 break;
3124 default:
3125 pr_err("Response result %d not supported\n",
3126 resp.result);
3127 ret = -EINVAL;
3128 break;
3129 }
3130 if (!qseecom.support_bus_scaling) {
3131 qsee_disable_clock_vote(data, CLK_DFAB);
3132 qsee_disable_clock_vote(data, CLK_SFPB);
3133 } else {
3134 __qseecom_add_bw_scale_down_timer(
3135 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3136 }
3137
3138exit:
3139 return ret;
3140}
3141
3142static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3143 struct qseecom_send_cmd_req *req)
3144
3145{
3146 if (!data || !data->client.ihandle) {
3147 pr_err("Client or client handle is not initialized\n");
3148 return -EINVAL;
3149 }
3150 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3151 (req->cmd_req_buf == NULL)) {
3152 pr_err("cmd buffer or response buffer is null\n");
3153 return -EINVAL;
3154 }
3155 if (((uintptr_t)req->cmd_req_buf <
3156 data->client.user_virt_sb_base) ||
3157 ((uintptr_t)req->cmd_req_buf >=
3158 (data->client.user_virt_sb_base + data->client.sb_length))) {
3159 pr_err("cmd buffer address not within shared bufffer\n");
3160 return -EINVAL;
3161 }
3162 if (((uintptr_t)req->resp_buf <
3163 data->client.user_virt_sb_base) ||
3164 ((uintptr_t)req->resp_buf >=
3165 (data->client.user_virt_sb_base + data->client.sb_length))) {
3166 pr_err("response buffer address not within shared bufffer\n");
3167 return -EINVAL;
3168 }
3169 if ((req->cmd_req_len == 0) ||
3170 (req->cmd_req_len > data->client.sb_length) ||
3171 (req->resp_len > data->client.sb_length)) {
3172 pr_err("cmd buf length or response buf length not valid\n");
3173 return -EINVAL;
3174 }
3175 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3176 pr_err("Integer overflow detected in req_len & rsp_len\n");
3177 return -EINVAL;
3178 }
3179
3180 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3181 pr_debug("Not enough memory to fit cmd_buf.\n");
3182 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3183 (req->cmd_req_len + req->resp_len),
3184 data->client.sb_length);
3185 return -ENOMEM;
3186 }
3187 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3188 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3189 return -EINVAL;
3190 }
3191 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3192 pr_err("Integer overflow in resp_len & resp_buf\n");
3193 return -EINVAL;
3194 }
3195 if (data->client.user_virt_sb_base >
3196 (ULONG_MAX - data->client.sb_length)) {
3197 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3198 return -EINVAL;
3199 }
3200 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3201 ((uintptr_t)data->client.user_virt_sb_base +
3202 data->client.sb_length)) ||
3203 (((uintptr_t)req->resp_buf + req->resp_len) >
3204 ((uintptr_t)data->client.user_virt_sb_base +
3205 data->client.sb_length))) {
3206 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3207 return -EINVAL;
3208 }
3209 return 0;
3210}
3211
3212int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3213 struct qseecom_registered_app_list *ptr_app,
3214 struct qseecom_dev_handle *data)
3215{
3216 int ret = 0;
3217
3218 switch (resp->result) {
3219 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3220 pr_warn("App(%d) %s is blocked on listener %d\n",
3221 data->client.app_id, data->client.app_name,
3222 resp->data);
3223 ret = __qseecom_process_reentrancy_blocked_on_listener(
3224 resp, ptr_app, data);
3225 if (ret) {
3226 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3227 data->client.app_id, data->client.app_name, resp->data);
3228 return ret;
3229 }
3230
3231 case QSEOS_RESULT_INCOMPLETE:
3232 qseecom.app_block_ref_cnt++;
3233 ptr_app->app_blocked = true;
3234 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3235 ptr_app->app_blocked = false;
3236 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003237 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003238 if (ret)
3239 pr_err("process_incomplete_cmd failed err: %d\n",
3240 ret);
3241 return ret;
3242 case QSEOS_RESULT_SUCCESS:
3243 return ret;
3244 default:
3245 pr_err("Response result %d not supported\n",
3246 resp->result);
3247 return -EINVAL;
3248 }
3249}
3250
3251static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3252 struct qseecom_send_cmd_req *req)
3253{
3254 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003255 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003256 u32 reqd_len_sb_in = 0;
3257 struct qseecom_client_send_data_ireq send_data_req = {0};
3258 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3259 struct qseecom_command_scm_resp resp;
3260 unsigned long flags;
3261 struct qseecom_registered_app_list *ptr_app;
3262 bool found_app = false;
3263 void *cmd_buf = NULL;
3264 size_t cmd_len;
3265 struct sglist_info *table = data->sglistinfo_ptr;
3266
3267 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3268 /* find app_id & img_name from list */
3269 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3270 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3271 list) {
3272 if ((ptr_app->app_id == data->client.app_id) &&
3273 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3274 found_app = true;
3275 break;
3276 }
3277 }
3278 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3279
3280 if (!found_app) {
3281 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3282 (char *)data->client.app_name);
3283 return -ENOENT;
3284 }
3285
3286 if (qseecom.qsee_version < QSEE_VERSION_40) {
3287 send_data_req.app_id = data->client.app_id;
3288 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3289 data, (uintptr_t)req->cmd_req_buf));
3290 send_data_req.req_len = req->cmd_req_len;
3291 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3292 data, (uintptr_t)req->resp_buf));
3293 send_data_req.rsp_len = req->resp_len;
3294 send_data_req.sglistinfo_ptr =
3295 (uint32_t)virt_to_phys(table);
3296 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3297 dmac_flush_range((void *)table,
3298 (void *)table + SGLISTINFO_TABLE_SIZE);
3299 cmd_buf = (void *)&send_data_req;
3300 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3301 } else {
3302 send_data_req_64bit.app_id = data->client.app_id;
3303 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3304 (uintptr_t)req->cmd_req_buf);
3305 send_data_req_64bit.req_len = req->cmd_req_len;
3306 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3307 (uintptr_t)req->resp_buf);
3308 send_data_req_64bit.rsp_len = req->resp_len;
3309 /* check if 32bit app's phys_addr region is under 4GB.*/
3310 if ((data->client.app_arch == ELFCLASS32) &&
3311 ((send_data_req_64bit.req_ptr >=
3312 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3313 (send_data_req_64bit.rsp_ptr >=
3314 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3315 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3316 data->client.app_name,
3317 send_data_req_64bit.req_ptr,
3318 send_data_req_64bit.req_len,
3319 send_data_req_64bit.rsp_ptr,
3320 send_data_req_64bit.rsp_len);
3321 return -EFAULT;
3322 }
3323 send_data_req_64bit.sglistinfo_ptr =
3324 (uint64_t)virt_to_phys(table);
3325 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3326 dmac_flush_range((void *)table,
3327 (void *)table + SGLISTINFO_TABLE_SIZE);
3328 cmd_buf = (void *)&send_data_req_64bit;
3329 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3330 }
3331
3332 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3333 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3334 else
3335 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3336
3337 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3338 data->client.sb_virt,
3339 reqd_len_sb_in,
3340 ION_IOC_CLEAN_INV_CACHES);
3341 if (ret) {
3342 pr_err("cache operation failed %d\n", ret);
3343 return ret;
3344 }
3345
3346 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3347
3348 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3349 cmd_buf, cmd_len,
3350 &resp, sizeof(resp));
3351 if (ret) {
3352 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3353 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003354 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003355 }
3356
3357 if (qseecom.qsee_reentrancy_support) {
3358 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003359 if (ret)
3360 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003361 } else {
3362 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3363 ret = __qseecom_process_incomplete_cmd(data, &resp);
3364 if (ret) {
3365 pr_err("process_incomplete_cmd failed err: %d\n",
3366 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003367 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003368 }
3369 } else {
3370 if (resp.result != QSEOS_RESULT_SUCCESS) {
3371 pr_err("Response result %d not supported\n",
3372 resp.result);
3373 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003374 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003375 }
3376 }
3377 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003378exit:
3379 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003380 data->client.sb_virt, data->client.sb_length,
3381 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003382 if (ret2) {
3383 pr_err("cache operation failed %d\n", ret2);
3384 return ret2;
3385 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003386 return ret;
3387}
3388
3389static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3390{
3391 int ret = 0;
3392 struct qseecom_send_cmd_req req;
3393
3394 ret = copy_from_user(&req, argp, sizeof(req));
3395 if (ret) {
3396 pr_err("copy_from_user failed\n");
3397 return ret;
3398 }
3399
3400 if (__validate_send_cmd_inputs(data, &req))
3401 return -EINVAL;
3402
3403 ret = __qseecom_send_cmd(data, &req);
3404
3405 if (ret)
3406 return ret;
3407
3408 return ret;
3409}
3410
3411int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3412 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3413 struct qseecom_dev_handle *data, int i) {
3414
3415 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3416 (req->ifd_data[i].fd > 0)) {
3417 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3418 (req->ifd_data[i].cmd_buf_offset >
3419 req->cmd_req_len - sizeof(uint32_t))) {
3420 pr_err("Invalid offset (req len) 0x%x\n",
3421 req->ifd_data[i].cmd_buf_offset);
3422 return -EINVAL;
3423 }
3424 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3425 (lstnr_resp->ifd_data[i].fd > 0)) {
3426 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3427 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3428 lstnr_resp->resp_len - sizeof(uint32_t))) {
3429 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3430 lstnr_resp->ifd_data[i].cmd_buf_offset);
3431 return -EINVAL;
3432 }
3433 }
3434 return 0;
3435}
3436
Zhen Kongd097c6e02019-08-01 16:10:20 -07003437static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req,
3438 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3439 struct qseecom_dev_handle *data, int i)
3440{
3441
3442 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3443 (req->ifd_data[i].fd > 0)) {
3444 if ((req->cmd_req_len < sizeof(uint64_t)) ||
3445 (req->ifd_data[i].cmd_buf_offset >
3446 req->cmd_req_len - sizeof(uint64_t))) {
3447 pr_err("Invalid offset (req len) 0x%x\n",
3448 req->ifd_data[i].cmd_buf_offset);
3449 return -EINVAL;
3450 }
3451 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3452 (lstnr_resp->ifd_data[i].fd > 0)) {
3453 if ((lstnr_resp->resp_len < sizeof(uint64_t)) ||
3454 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3455 lstnr_resp->resp_len - sizeof(uint64_t))) {
3456 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3457 lstnr_resp->ifd_data[i].cmd_buf_offset);
3458 return -EINVAL;
3459 }
3460 }
3461 return 0;
3462}
3463
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003464static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3465 struct qseecom_dev_handle *data)
3466{
3467 struct ion_handle *ihandle;
3468 char *field;
3469 int ret = 0;
3470 int i = 0;
3471 uint32_t len = 0;
3472 struct scatterlist *sg;
3473 struct qseecom_send_modfd_cmd_req *req = NULL;
3474 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3475 struct qseecom_registered_listener_list *this_lstnr = NULL;
3476 uint32_t offset;
3477 struct sg_table *sg_ptr;
3478
3479 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3480 (data->type != QSEECOM_CLIENT_APP))
3481 return -EFAULT;
3482
3483 if (msg == NULL) {
3484 pr_err("Invalid address\n");
3485 return -EINVAL;
3486 }
3487 if (data->type == QSEECOM_LISTENER_SERVICE) {
3488 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3489 this_lstnr = __qseecom_find_svc(data->listener.id);
3490 if (IS_ERR_OR_NULL(this_lstnr)) {
3491 pr_err("Invalid listener ID\n");
3492 return -ENOMEM;
3493 }
3494 } else {
3495 req = (struct qseecom_send_modfd_cmd_req *)msg;
3496 }
3497
3498 for (i = 0; i < MAX_ION_FD; i++) {
3499 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3500 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003501 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003502 req->ifd_data[i].fd);
3503 if (IS_ERR_OR_NULL(ihandle)) {
3504 pr_err("Ion client can't retrieve the handle\n");
3505 return -ENOMEM;
3506 }
3507 field = (char *) req->cmd_req_buf +
3508 req->ifd_data[i].cmd_buf_offset;
3509 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3510 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003511 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003512 lstnr_resp->ifd_data[i].fd);
3513 if (IS_ERR_OR_NULL(ihandle)) {
3514 pr_err("Ion client can't retrieve the handle\n");
3515 return -ENOMEM;
3516 }
3517 field = lstnr_resp->resp_buf_ptr +
3518 lstnr_resp->ifd_data[i].cmd_buf_offset;
3519 } else {
3520 continue;
3521 }
3522 /* Populate the cmd data structure with the phys_addr */
3523 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3524 if (IS_ERR_OR_NULL(sg_ptr)) {
3525 pr_err("IOn client could not retrieve sg table\n");
3526 goto err;
3527 }
3528 if (sg_ptr->nents == 0) {
3529 pr_err("Num of scattered entries is 0\n");
3530 goto err;
3531 }
3532 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3533 pr_err("Num of scattered entries");
3534 pr_err(" (%d) is greater than max supported %d\n",
3535 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3536 goto err;
3537 }
3538 sg = sg_ptr->sgl;
3539 if (sg_ptr->nents == 1) {
3540 uint32_t *update;
3541
3542 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3543 goto err;
3544 if ((data->type == QSEECOM_CLIENT_APP &&
3545 (data->client.app_arch == ELFCLASS32 ||
3546 data->client.app_arch == ELFCLASS64)) ||
3547 (data->type == QSEECOM_LISTENER_SERVICE)) {
3548 /*
3549 * Check if sg list phy add region is under 4GB
3550 */
3551 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3552 (!cleanup) &&
3553 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3554 >= PHY_ADDR_4G - sg->length)) {
3555 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3556 data->client.app_name,
3557 &(sg_dma_address(sg_ptr->sgl)),
3558 sg->length);
3559 goto err;
3560 }
3561 update = (uint32_t *) field;
3562 *update = cleanup ? 0 :
3563 (uint32_t)sg_dma_address(sg_ptr->sgl);
3564 } else {
3565 pr_err("QSEE app arch %u is not supported\n",
3566 data->client.app_arch);
3567 goto err;
3568 }
3569 len += (uint32_t)sg->length;
3570 } else {
3571 struct qseecom_sg_entry *update;
3572 int j = 0;
3573
3574 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3575 (req->ifd_data[i].fd > 0)) {
3576
3577 if ((req->cmd_req_len <
3578 SG_ENTRY_SZ * sg_ptr->nents) ||
3579 (req->ifd_data[i].cmd_buf_offset >
3580 (req->cmd_req_len -
3581 SG_ENTRY_SZ * sg_ptr->nents))) {
3582 pr_err("Invalid offset = 0x%x\n",
3583 req->ifd_data[i].cmd_buf_offset);
3584 goto err;
3585 }
3586
3587 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3588 (lstnr_resp->ifd_data[i].fd > 0)) {
3589
3590 if ((lstnr_resp->resp_len <
3591 SG_ENTRY_SZ * sg_ptr->nents) ||
3592 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3593 (lstnr_resp->resp_len -
3594 SG_ENTRY_SZ * sg_ptr->nents))) {
3595 goto err;
3596 }
3597 }
3598 if ((data->type == QSEECOM_CLIENT_APP &&
3599 (data->client.app_arch == ELFCLASS32 ||
3600 data->client.app_arch == ELFCLASS64)) ||
3601 (data->type == QSEECOM_LISTENER_SERVICE)) {
3602 update = (struct qseecom_sg_entry *)field;
3603 for (j = 0; j < sg_ptr->nents; j++) {
3604 /*
3605 * Check if sg list PA is under 4GB
3606 */
3607 if ((qseecom.qsee_version >=
3608 QSEE_VERSION_40) &&
3609 (!cleanup) &&
3610 ((uint64_t)(sg_dma_address(sg))
3611 >= PHY_ADDR_4G - sg->length)) {
3612 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3613 data->client.app_name,
3614 &(sg_dma_address(sg)),
3615 sg->length);
3616 goto err;
3617 }
3618 update->phys_addr = cleanup ? 0 :
3619 (uint32_t)sg_dma_address(sg);
3620 update->len = cleanup ? 0 : sg->length;
3621 update++;
3622 len += sg->length;
3623 sg = sg_next(sg);
3624 }
3625 } else {
3626 pr_err("QSEE app arch %u is not supported\n",
3627 data->client.app_arch);
3628 goto err;
3629 }
3630 }
3631
3632 if (cleanup) {
3633 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3634 ihandle, NULL, len,
3635 ION_IOC_INV_CACHES);
3636 if (ret) {
3637 pr_err("cache operation failed %d\n", ret);
3638 goto err;
3639 }
3640 } else {
3641 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3642 ihandle, NULL, len,
3643 ION_IOC_CLEAN_INV_CACHES);
3644 if (ret) {
3645 pr_err("cache operation failed %d\n", ret);
3646 goto err;
3647 }
3648 if (data->type == QSEECOM_CLIENT_APP) {
3649 offset = req->ifd_data[i].cmd_buf_offset;
3650 data->sglistinfo_ptr[i].indexAndFlags =
3651 SGLISTINFO_SET_INDEX_FLAG(
3652 (sg_ptr->nents == 1), 0, offset);
3653 data->sglistinfo_ptr[i].sizeOrCount =
3654 (sg_ptr->nents == 1) ?
3655 sg->length : sg_ptr->nents;
3656 data->sglist_cnt = i + 1;
3657 } else {
3658 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3659 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3660 (uintptr_t)this_lstnr->sb_virt);
3661 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3662 SGLISTINFO_SET_INDEX_FLAG(
3663 (sg_ptr->nents == 1), 0, offset);
3664 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3665 (sg_ptr->nents == 1) ?
3666 sg->length : sg_ptr->nents;
3667 this_lstnr->sglist_cnt = i + 1;
3668 }
3669 }
3670 /* Deallocate the handle */
3671 if (!IS_ERR_OR_NULL(ihandle))
3672 ion_free(qseecom.ion_clnt, ihandle);
3673 }
3674 return ret;
3675err:
3676 if (!IS_ERR_OR_NULL(ihandle))
3677 ion_free(qseecom.ion_clnt, ihandle);
3678 return -ENOMEM;
3679}
3680
3681static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3682 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3683{
3684 struct scatterlist *sg = sg_ptr->sgl;
3685 struct qseecom_sg_entry_64bit *sg_entry;
3686 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3687 void *buf;
3688 uint i;
3689 size_t size;
3690 dma_addr_t coh_pmem;
3691
3692 if (fd_idx >= MAX_ION_FD) {
3693 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3694 return -ENOMEM;
3695 }
3696 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3697 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3698 /* Allocate a contiguous kernel buffer */
3699 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3700 size = (size + PAGE_SIZE) & PAGE_MASK;
3701 buf = dma_alloc_coherent(qseecom.pdev,
3702 size, &coh_pmem, GFP_KERNEL);
3703 if (buf == NULL) {
3704 pr_err("failed to alloc memory for sg buf\n");
3705 return -ENOMEM;
3706 }
3707 /* update qseecom_sg_list_buf_hdr_64bit */
3708 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3709 buf_hdr->new_buf_phys_addr = coh_pmem;
3710 buf_hdr->nents_total = sg_ptr->nents;
3711 /* save the left sg entries into new allocated buf */
3712 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3713 for (i = 0; i < sg_ptr->nents; i++) {
3714 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3715 sg_entry->len = sg->length;
3716 sg_entry++;
3717 sg = sg_next(sg);
3718 }
3719
3720 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3721 data->client.sec_buf_fd[fd_idx].vbase = buf;
3722 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3723 data->client.sec_buf_fd[fd_idx].size = size;
3724
3725 return 0;
3726}
3727
3728static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3729 struct qseecom_dev_handle *data)
3730{
3731 struct ion_handle *ihandle;
3732 char *field;
3733 int ret = 0;
3734 int i = 0;
3735 uint32_t len = 0;
3736 struct scatterlist *sg;
3737 struct qseecom_send_modfd_cmd_req *req = NULL;
3738 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3739 struct qseecom_registered_listener_list *this_lstnr = NULL;
3740 uint32_t offset;
3741 struct sg_table *sg_ptr;
3742
3743 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3744 (data->type != QSEECOM_CLIENT_APP))
3745 return -EFAULT;
3746
3747 if (msg == NULL) {
3748 pr_err("Invalid address\n");
3749 return -EINVAL;
3750 }
3751 if (data->type == QSEECOM_LISTENER_SERVICE) {
3752 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3753 this_lstnr = __qseecom_find_svc(data->listener.id);
3754 if (IS_ERR_OR_NULL(this_lstnr)) {
3755 pr_err("Invalid listener ID\n");
3756 return -ENOMEM;
3757 }
3758 } else {
3759 req = (struct qseecom_send_modfd_cmd_req *)msg;
3760 }
3761
3762 for (i = 0; i < MAX_ION_FD; i++) {
3763 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3764 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003765 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003766 req->ifd_data[i].fd);
3767 if (IS_ERR_OR_NULL(ihandle)) {
3768 pr_err("Ion client can't retrieve the handle\n");
3769 return -ENOMEM;
3770 }
3771 field = (char *) req->cmd_req_buf +
3772 req->ifd_data[i].cmd_buf_offset;
3773 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3774 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003775 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003776 lstnr_resp->ifd_data[i].fd);
3777 if (IS_ERR_OR_NULL(ihandle)) {
3778 pr_err("Ion client can't retrieve the handle\n");
3779 return -ENOMEM;
3780 }
3781 field = lstnr_resp->resp_buf_ptr +
3782 lstnr_resp->ifd_data[i].cmd_buf_offset;
3783 } else {
3784 continue;
3785 }
3786 /* Populate the cmd data structure with the phys_addr */
3787 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3788 if (IS_ERR_OR_NULL(sg_ptr)) {
3789 pr_err("IOn client could not retrieve sg table\n");
3790 goto err;
3791 }
3792 if (sg_ptr->nents == 0) {
3793 pr_err("Num of scattered entries is 0\n");
3794 goto err;
3795 }
3796 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3797 pr_warn("Num of scattered entries");
3798 pr_warn(" (%d) is greater than %d\n",
3799 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3800 if (cleanup) {
3801 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3802 data->client.sec_buf_fd[i].vbase)
3803 dma_free_coherent(qseecom.pdev,
3804 data->client.sec_buf_fd[i].size,
3805 data->client.sec_buf_fd[i].vbase,
3806 data->client.sec_buf_fd[i].pbase);
3807 } else {
3808 ret = __qseecom_allocate_sg_list_buffer(data,
3809 field, i, sg_ptr);
3810 if (ret) {
3811 pr_err("Failed to allocate sg list buffer\n");
3812 goto err;
3813 }
3814 }
3815 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3816 sg = sg_ptr->sgl;
3817 goto cleanup;
3818 }
3819 sg = sg_ptr->sgl;
3820 if (sg_ptr->nents == 1) {
3821 uint64_t *update_64bit;
3822
Zhen Kongd097c6e02019-08-01 16:10:20 -07003823 if (__boundary_checks_offset_64(req, lstnr_resp,
3824 data, i))
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003825 goto err;
3826 /* 64bit app uses 64bit address */
3827 update_64bit = (uint64_t *) field;
3828 *update_64bit = cleanup ? 0 :
3829 (uint64_t)sg_dma_address(sg_ptr->sgl);
3830 len += (uint32_t)sg->length;
3831 } else {
3832 struct qseecom_sg_entry_64bit *update_64bit;
3833 int j = 0;
3834
3835 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3836 (req->ifd_data[i].fd > 0)) {
3837
3838 if ((req->cmd_req_len <
3839 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3840 (req->ifd_data[i].cmd_buf_offset >
3841 (req->cmd_req_len -
3842 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3843 pr_err("Invalid offset = 0x%x\n",
3844 req->ifd_data[i].cmd_buf_offset);
3845 goto err;
3846 }
3847
3848 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3849 (lstnr_resp->ifd_data[i].fd > 0)) {
3850
3851 if ((lstnr_resp->resp_len <
3852 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3853 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3854 (lstnr_resp->resp_len -
3855 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3856 goto err;
3857 }
3858 }
3859 /* 64bit app uses 64bit address */
3860 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3861 for (j = 0; j < sg_ptr->nents; j++) {
3862 update_64bit->phys_addr = cleanup ? 0 :
3863 (uint64_t)sg_dma_address(sg);
3864 update_64bit->len = cleanup ? 0 :
3865 (uint32_t)sg->length;
3866 update_64bit++;
3867 len += sg->length;
3868 sg = sg_next(sg);
3869 }
3870 }
3871cleanup:
3872 if (cleanup) {
3873 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3874 ihandle, NULL, len,
3875 ION_IOC_INV_CACHES);
3876 if (ret) {
3877 pr_err("cache operation failed %d\n", ret);
3878 goto err;
3879 }
3880 } else {
3881 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3882 ihandle, NULL, len,
3883 ION_IOC_CLEAN_INV_CACHES);
3884 if (ret) {
3885 pr_err("cache operation failed %d\n", ret);
3886 goto err;
3887 }
3888 if (data->type == QSEECOM_CLIENT_APP) {
3889 offset = req->ifd_data[i].cmd_buf_offset;
3890 data->sglistinfo_ptr[i].indexAndFlags =
3891 SGLISTINFO_SET_INDEX_FLAG(
3892 (sg_ptr->nents == 1), 1, offset);
3893 data->sglistinfo_ptr[i].sizeOrCount =
3894 (sg_ptr->nents == 1) ?
3895 sg->length : sg_ptr->nents;
3896 data->sglist_cnt = i + 1;
3897 } else {
3898 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3899 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3900 (uintptr_t)this_lstnr->sb_virt);
3901 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3902 SGLISTINFO_SET_INDEX_FLAG(
3903 (sg_ptr->nents == 1), 1, offset);
3904 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3905 (sg_ptr->nents == 1) ?
3906 sg->length : sg_ptr->nents;
3907 this_lstnr->sglist_cnt = i + 1;
3908 }
3909 }
3910 /* Deallocate the handle */
3911 if (!IS_ERR_OR_NULL(ihandle))
3912 ion_free(qseecom.ion_clnt, ihandle);
3913 }
3914 return ret;
3915err:
3916 for (i = 0; i < MAX_ION_FD; i++)
3917 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3918 data->client.sec_buf_fd[i].vbase)
3919 dma_free_coherent(qseecom.pdev,
3920 data->client.sec_buf_fd[i].size,
3921 data->client.sec_buf_fd[i].vbase,
3922 data->client.sec_buf_fd[i].pbase);
3923 if (!IS_ERR_OR_NULL(ihandle))
3924 ion_free(qseecom.ion_clnt, ihandle);
3925 return -ENOMEM;
3926}
3927
3928static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3929 void __user *argp,
3930 bool is_64bit_addr)
3931{
3932 int ret = 0;
3933 int i;
3934 struct qseecom_send_modfd_cmd_req req;
3935 struct qseecom_send_cmd_req send_cmd_req;
3936
3937 ret = copy_from_user(&req, argp, sizeof(req));
3938 if (ret) {
3939 pr_err("copy_from_user failed\n");
3940 return ret;
3941 }
3942
3943 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3944 send_cmd_req.cmd_req_len = req.cmd_req_len;
3945 send_cmd_req.resp_buf = req.resp_buf;
3946 send_cmd_req.resp_len = req.resp_len;
3947
3948 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3949 return -EINVAL;
3950
3951 /* validate offsets */
3952 for (i = 0; i < MAX_ION_FD; i++) {
3953 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3954 pr_err("Invalid offset %d = 0x%x\n",
3955 i, req.ifd_data[i].cmd_buf_offset);
3956 return -EINVAL;
3957 }
3958 }
3959 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3960 (uintptr_t)req.cmd_req_buf);
3961 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3962 (uintptr_t)req.resp_buf);
3963
3964 if (!is_64bit_addr) {
3965 ret = __qseecom_update_cmd_buf(&req, false, data);
3966 if (ret)
3967 return ret;
3968 ret = __qseecom_send_cmd(data, &send_cmd_req);
3969 if (ret)
3970 return ret;
3971 ret = __qseecom_update_cmd_buf(&req, true, data);
3972 if (ret)
3973 return ret;
3974 } else {
3975 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3976 if (ret)
3977 return ret;
3978 ret = __qseecom_send_cmd(data, &send_cmd_req);
3979 if (ret)
3980 return ret;
3981 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3982 if (ret)
3983 return ret;
3984 }
3985
3986 return ret;
3987}
3988
3989static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3990 void __user *argp)
3991{
3992 return __qseecom_send_modfd_cmd(data, argp, false);
3993}
3994
3995static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3996 void __user *argp)
3997{
3998 return __qseecom_send_modfd_cmd(data, argp, true);
3999}
4000
4001
4002
4003static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
4004 struct qseecom_registered_listener_list *svc)
4005{
4006 int ret;
4007
Zhen Kongf5087172018-10-11 17:22:05 -07004008 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08004009 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004010}
4011
4012static int qseecom_receive_req(struct qseecom_dev_handle *data)
4013{
4014 int ret = 0;
4015 struct qseecom_registered_listener_list *this_lstnr;
4016
Zhen Kongbcdeda22018-11-16 13:50:51 -08004017 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004018 this_lstnr = __qseecom_find_svc(data->listener.id);
4019 if (!this_lstnr) {
4020 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004021 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004022 return -ENODATA;
4023 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004024 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004025
4026 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304027 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004028 __qseecom_listener_has_rcvd_req(data,
4029 this_lstnr))) {
Zhen Kong52ce9062018-09-24 14:33:27 -07004030 pr_debug("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004031 (uint32_t)data->listener.id);
4032 /* woken up for different reason */
4033 return -ERESTARTSYS;
4034 }
4035
Zhen Kongbcdeda22018-11-16 13:50:51 -08004036 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004037 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004038 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004039 return -ENODEV;
4040 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004041 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004042 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004043 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004044 break;
4045 }
4046 return ret;
4047}
4048
4049static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4050{
4051 unsigned char app_arch = 0;
4052 struct elf32_hdr *ehdr;
4053 struct elf64_hdr *ehdr64;
4054
4055 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4056
4057 switch (app_arch) {
4058 case ELFCLASS32: {
4059 ehdr = (struct elf32_hdr *)fw_entry->data;
4060 if (fw_entry->size < sizeof(*ehdr)) {
4061 pr_err("%s: Not big enough to be an elf32 header\n",
4062 qseecom.pdev->init_name);
4063 return false;
4064 }
4065 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4066 pr_err("%s: Not an elf32 header\n",
4067 qseecom.pdev->init_name);
4068 return false;
4069 }
4070 if (ehdr->e_phnum == 0) {
4071 pr_err("%s: No loadable segments\n",
4072 qseecom.pdev->init_name);
4073 return false;
4074 }
4075 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4076 sizeof(struct elf32_hdr) > fw_entry->size) {
4077 pr_err("%s: Program headers not within mdt\n",
4078 qseecom.pdev->init_name);
4079 return false;
4080 }
4081 break;
4082 }
4083 case ELFCLASS64: {
4084 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4085 if (fw_entry->size < sizeof(*ehdr64)) {
4086 pr_err("%s: Not big enough to be an elf64 header\n",
4087 qseecom.pdev->init_name);
4088 return false;
4089 }
4090 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4091 pr_err("%s: Not an elf64 header\n",
4092 qseecom.pdev->init_name);
4093 return false;
4094 }
4095 if (ehdr64->e_phnum == 0) {
4096 pr_err("%s: No loadable segments\n",
4097 qseecom.pdev->init_name);
4098 return false;
4099 }
4100 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4101 sizeof(struct elf64_hdr) > fw_entry->size) {
4102 pr_err("%s: Program headers not within mdt\n",
4103 qseecom.pdev->init_name);
4104 return false;
4105 }
4106 break;
4107 }
4108 default: {
4109 pr_err("QSEE app arch %u is not supported\n", app_arch);
4110 return false;
4111 }
4112 }
4113 return true;
4114}
4115
4116static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4117 uint32_t *app_arch)
4118{
4119 int ret = -1;
4120 int i = 0, rc = 0;
4121 const struct firmware *fw_entry = NULL;
4122 char fw_name[MAX_APP_NAME_SIZE];
4123 struct elf32_hdr *ehdr;
4124 struct elf64_hdr *ehdr64;
4125 int num_images = 0;
4126
4127 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4128 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4129 if (rc) {
4130 pr_err("error with request_firmware\n");
4131 ret = -EIO;
4132 goto err;
4133 }
4134 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4135 ret = -EIO;
4136 goto err;
4137 }
4138 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4139 *fw_size = fw_entry->size;
4140 if (*app_arch == ELFCLASS32) {
4141 ehdr = (struct elf32_hdr *)fw_entry->data;
4142 num_images = ehdr->e_phnum;
4143 } else if (*app_arch == ELFCLASS64) {
4144 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4145 num_images = ehdr64->e_phnum;
4146 } else {
4147 pr_err("QSEE %s app, arch %u is not supported\n",
4148 appname, *app_arch);
4149 ret = -EIO;
4150 goto err;
4151 }
4152 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4153 release_firmware(fw_entry);
4154 fw_entry = NULL;
4155 for (i = 0; i < num_images; i++) {
4156 memset(fw_name, 0, sizeof(fw_name));
4157 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4158 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4159 if (ret)
4160 goto err;
4161 if (*fw_size > U32_MAX - fw_entry->size) {
4162 pr_err("QSEE %s app file size overflow\n", appname);
4163 ret = -EINVAL;
4164 goto err;
4165 }
4166 *fw_size += fw_entry->size;
4167 release_firmware(fw_entry);
4168 fw_entry = NULL;
4169 }
4170
4171 return ret;
4172err:
4173 if (fw_entry)
4174 release_firmware(fw_entry);
4175 *fw_size = 0;
4176 return ret;
4177}
4178
4179static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4180 uint32_t fw_size,
4181 struct qseecom_load_app_ireq *load_req)
4182{
4183 int ret = -1;
4184 int i = 0, rc = 0;
4185 const struct firmware *fw_entry = NULL;
4186 char fw_name[MAX_APP_NAME_SIZE];
4187 u8 *img_data_ptr = img_data;
4188 struct elf32_hdr *ehdr;
4189 struct elf64_hdr *ehdr64;
4190 int num_images = 0;
4191 unsigned char app_arch = 0;
4192
4193 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4194 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4195 if (rc) {
4196 ret = -EIO;
4197 goto err;
4198 }
4199
4200 load_req->img_len = fw_entry->size;
4201 if (load_req->img_len > fw_size) {
4202 pr_err("app %s size %zu is larger than buf size %u\n",
4203 appname, fw_entry->size, fw_size);
4204 ret = -EINVAL;
4205 goto err;
4206 }
4207 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4208 img_data_ptr = img_data_ptr + fw_entry->size;
4209 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4210
4211 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4212 if (app_arch == ELFCLASS32) {
4213 ehdr = (struct elf32_hdr *)fw_entry->data;
4214 num_images = ehdr->e_phnum;
4215 } else if (app_arch == ELFCLASS64) {
4216 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4217 num_images = ehdr64->e_phnum;
4218 } else {
4219 pr_err("QSEE %s app, arch %u is not supported\n",
4220 appname, app_arch);
4221 ret = -EIO;
4222 goto err;
4223 }
4224 release_firmware(fw_entry);
4225 fw_entry = NULL;
4226 for (i = 0; i < num_images; i++) {
4227 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4228 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4229 if (ret) {
4230 pr_err("Failed to locate blob %s\n", fw_name);
4231 goto err;
4232 }
4233 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4234 (fw_entry->size + load_req->img_len > fw_size)) {
4235 pr_err("Invalid file size for %s\n", fw_name);
4236 ret = -EINVAL;
4237 goto err;
4238 }
4239 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4240 img_data_ptr = img_data_ptr + fw_entry->size;
4241 load_req->img_len += fw_entry->size;
4242 release_firmware(fw_entry);
4243 fw_entry = NULL;
4244 }
4245 return ret;
4246err:
4247 release_firmware(fw_entry);
4248 return ret;
4249}
4250
4251static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4252 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4253{
4254 size_t len = 0;
4255 int ret = 0;
4256 ion_phys_addr_t pa;
4257 struct ion_handle *ihandle = NULL;
4258 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004259 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004260 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004261
Zhen Kong3dd92792017-12-08 09:47:15 -08004262 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004263 if (retry++) {
4264 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004265 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004266 mutex_lock(&app_access_lock);
4267 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004268 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004269 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004270 } while (IS_ERR_OR_NULL(ihandle) &&
4271 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004272
4273 if (IS_ERR_OR_NULL(ihandle)) {
4274 pr_err("ION alloc failed\n");
4275 return -ENOMEM;
4276 }
4277 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4278 ihandle);
4279
4280 if (IS_ERR_OR_NULL(img_data)) {
4281 pr_err("ION memory mapping for image loading failed\n");
4282 ret = -ENOMEM;
4283 goto exit_ion_free;
4284 }
4285 /* Get the physical address of the ION BUF */
4286 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4287 if (ret) {
4288 pr_err("physical memory retrieval failure\n");
4289 ret = -EIO;
4290 goto exit_ion_unmap_kernel;
4291 }
4292
4293 *pihandle = ihandle;
4294 *data = img_data;
4295 *paddr = pa;
4296 return ret;
4297
4298exit_ion_unmap_kernel:
4299 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4300exit_ion_free:
4301 ion_free(qseecom.ion_clnt, ihandle);
4302 ihandle = NULL;
4303 return ret;
4304}
4305
4306static void __qseecom_free_img_data(struct ion_handle **ihandle)
4307{
4308 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4309 ion_free(qseecom.ion_clnt, *ihandle);
4310 *ihandle = NULL;
4311}
4312
4313static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4314 uint32_t *app_id)
4315{
4316 int ret = -1;
4317 uint32_t fw_size = 0;
4318 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4319 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4320 struct qseecom_command_scm_resp resp;
4321 u8 *img_data = NULL;
4322 ion_phys_addr_t pa = 0;
4323 struct ion_handle *ihandle = NULL;
4324 void *cmd_buf = NULL;
4325 size_t cmd_len;
4326 uint32_t app_arch = 0;
4327
4328 if (!data || !appname || !app_id) {
4329 pr_err("Null pointer to data or appname or appid\n");
4330 return -EINVAL;
4331 }
4332 *app_id = 0;
4333 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4334 return -EIO;
4335 data->client.app_arch = app_arch;
4336
4337 /* Check and load cmnlib */
4338 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4339 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4340 ret = qseecom_load_commonlib_image(data, "cmnlib");
4341 if (ret) {
4342 pr_err("failed to load cmnlib\n");
4343 return -EIO;
4344 }
4345 qseecom.commonlib_loaded = true;
4346 pr_debug("cmnlib is loaded\n");
4347 }
4348
4349 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4350 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4351 if (ret) {
4352 pr_err("failed to load cmnlib64\n");
4353 return -EIO;
4354 }
4355 qseecom.commonlib64_loaded = true;
4356 pr_debug("cmnlib64 is loaded\n");
4357 }
4358 }
4359
4360 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4361 if (ret)
4362 return ret;
4363
4364 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4365 if (ret) {
4366 ret = -EIO;
4367 goto exit_free_img_data;
4368 }
4369
4370 /* Populate the load_req parameters */
4371 if (qseecom.qsee_version < QSEE_VERSION_40) {
4372 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4373 load_req.mdt_len = load_req.mdt_len;
4374 load_req.img_len = load_req.img_len;
4375 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4376 load_req.phy_addr = (uint32_t)pa;
4377 cmd_buf = (void *)&load_req;
4378 cmd_len = sizeof(struct qseecom_load_app_ireq);
4379 } else {
4380 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4381 load_req_64bit.mdt_len = load_req.mdt_len;
4382 load_req_64bit.img_len = load_req.img_len;
4383 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4384 load_req_64bit.phy_addr = (uint64_t)pa;
4385 cmd_buf = (void *)&load_req_64bit;
4386 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4387 }
4388
4389 if (qseecom.support_bus_scaling) {
4390 mutex_lock(&qsee_bw_mutex);
4391 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4392 mutex_unlock(&qsee_bw_mutex);
4393 if (ret) {
4394 ret = -EIO;
4395 goto exit_free_img_data;
4396 }
4397 }
4398
4399 ret = __qseecom_enable_clk_scale_up(data);
4400 if (ret) {
4401 ret = -EIO;
4402 goto exit_unregister_bus_bw_need;
4403 }
4404
4405 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4406 img_data, fw_size,
4407 ION_IOC_CLEAN_INV_CACHES);
4408 if (ret) {
4409 pr_err("cache operation failed %d\n", ret);
4410 goto exit_disable_clk_vote;
4411 }
4412
4413 /* SCM_CALL to load the image */
4414 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4415 &resp, sizeof(resp));
4416 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004417 pr_err("scm_call to load failed : ret %d, result %x\n",
4418 ret, resp.result);
4419 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4420 ret = -EEXIST;
4421 else
4422 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004423 goto exit_disable_clk_vote;
4424 }
4425
4426 switch (resp.result) {
4427 case QSEOS_RESULT_SUCCESS:
4428 *app_id = resp.data;
4429 break;
4430 case QSEOS_RESULT_INCOMPLETE:
4431 ret = __qseecom_process_incomplete_cmd(data, &resp);
4432 if (ret)
4433 pr_err("process_incomplete_cmd FAILED\n");
4434 else
4435 *app_id = resp.data;
4436 break;
4437 case QSEOS_RESULT_FAILURE:
4438 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4439 break;
4440 default:
4441 pr_err("scm call return unknown response %d\n", resp.result);
4442 ret = -EINVAL;
4443 break;
4444 }
4445
4446exit_disable_clk_vote:
4447 __qseecom_disable_clk_scale_down(data);
4448
4449exit_unregister_bus_bw_need:
4450 if (qseecom.support_bus_scaling) {
4451 mutex_lock(&qsee_bw_mutex);
4452 qseecom_unregister_bus_bandwidth_needs(data);
4453 mutex_unlock(&qsee_bw_mutex);
4454 }
4455
4456exit_free_img_data:
4457 __qseecom_free_img_data(&ihandle);
4458 return ret;
4459}
4460
4461static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4462 char *cmnlib_name)
4463{
4464 int ret = 0;
4465 uint32_t fw_size = 0;
4466 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4467 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4468 struct qseecom_command_scm_resp resp;
4469 u8 *img_data = NULL;
4470 ion_phys_addr_t pa = 0;
4471 void *cmd_buf = NULL;
4472 size_t cmd_len;
4473 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004474 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004475
4476 if (!cmnlib_name) {
4477 pr_err("cmnlib_name is NULL\n");
4478 return -EINVAL;
4479 }
4480 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4481 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4482 cmnlib_name, strlen(cmnlib_name));
4483 return -EINVAL;
4484 }
4485
4486 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4487 return -EIO;
4488
Zhen Kong3bafb312017-10-18 10:27:20 -07004489 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004490 &img_data, fw_size, &pa);
4491 if (ret)
4492 return -EIO;
4493
4494 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4495 if (ret) {
4496 ret = -EIO;
4497 goto exit_free_img_data;
4498 }
4499 if (qseecom.qsee_version < QSEE_VERSION_40) {
4500 load_req.phy_addr = (uint32_t)pa;
4501 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4502 cmd_buf = (void *)&load_req;
4503 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4504 } else {
4505 load_req_64bit.phy_addr = (uint64_t)pa;
4506 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4507 load_req_64bit.img_len = load_req.img_len;
4508 load_req_64bit.mdt_len = load_req.mdt_len;
4509 cmd_buf = (void *)&load_req_64bit;
4510 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4511 }
4512
4513 if (qseecom.support_bus_scaling) {
4514 mutex_lock(&qsee_bw_mutex);
4515 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4516 mutex_unlock(&qsee_bw_mutex);
4517 if (ret) {
4518 ret = -EIO;
4519 goto exit_free_img_data;
4520 }
4521 }
4522
4523 /* Vote for the SFPB clock */
4524 ret = __qseecom_enable_clk_scale_up(data);
4525 if (ret) {
4526 ret = -EIO;
4527 goto exit_unregister_bus_bw_need;
4528 }
4529
Zhen Kong3bafb312017-10-18 10:27:20 -07004530 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004531 img_data, fw_size,
4532 ION_IOC_CLEAN_INV_CACHES);
4533 if (ret) {
4534 pr_err("cache operation failed %d\n", ret);
4535 goto exit_disable_clk_vote;
4536 }
4537
4538 /* SCM_CALL to load the image */
4539 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4540 &resp, sizeof(resp));
4541 if (ret) {
4542 pr_err("scm_call to load failed : ret %d\n", ret);
4543 ret = -EIO;
4544 goto exit_disable_clk_vote;
4545 }
4546
4547 switch (resp.result) {
4548 case QSEOS_RESULT_SUCCESS:
4549 break;
4550 case QSEOS_RESULT_FAILURE:
4551 pr_err("scm call failed w/response result%d\n", resp.result);
4552 ret = -EINVAL;
4553 goto exit_disable_clk_vote;
4554 case QSEOS_RESULT_INCOMPLETE:
4555 ret = __qseecom_process_incomplete_cmd(data, &resp);
4556 if (ret) {
4557 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4558 goto exit_disable_clk_vote;
4559 }
4560 break;
4561 default:
4562 pr_err("scm call return unknown response %d\n", resp.result);
4563 ret = -EINVAL;
4564 goto exit_disable_clk_vote;
4565 }
4566
4567exit_disable_clk_vote:
4568 __qseecom_disable_clk_scale_down(data);
4569
4570exit_unregister_bus_bw_need:
4571 if (qseecom.support_bus_scaling) {
4572 mutex_lock(&qsee_bw_mutex);
4573 qseecom_unregister_bus_bandwidth_needs(data);
4574 mutex_unlock(&qsee_bw_mutex);
4575 }
4576
4577exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004578 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004579 return ret;
4580}
4581
4582static int qseecom_unload_commonlib_image(void)
4583{
4584 int ret = -EINVAL;
4585 struct qseecom_unload_lib_image_ireq unload_req = {0};
4586 struct qseecom_command_scm_resp resp;
4587
4588 /* Populate the remaining parameters */
4589 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4590
4591 /* SCM_CALL to load the image */
4592 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4593 sizeof(struct qseecom_unload_lib_image_ireq),
4594 &resp, sizeof(resp));
4595 if (ret) {
4596 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4597 ret = -EIO;
4598 } else {
4599 switch (resp.result) {
4600 case QSEOS_RESULT_SUCCESS:
4601 break;
4602 case QSEOS_RESULT_FAILURE:
4603 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4604 break;
4605 default:
4606 pr_err("scm call return unknown response %d\n",
4607 resp.result);
4608 ret = -EINVAL;
4609 break;
4610 }
4611 }
4612
4613 return ret;
4614}
4615
4616int qseecom_start_app(struct qseecom_handle **handle,
4617 char *app_name, uint32_t size)
4618{
4619 int32_t ret = 0;
4620 unsigned long flags = 0;
4621 struct qseecom_dev_handle *data = NULL;
4622 struct qseecom_check_app_ireq app_ireq;
4623 struct qseecom_registered_app_list *entry = NULL;
4624 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4625 bool found_app = false;
4626 size_t len;
4627 ion_phys_addr_t pa;
4628 uint32_t fw_size, app_arch;
4629 uint32_t app_id = 0;
4630
Zhen Kongc4c162a2019-01-23 12:07:12 -08004631 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004632
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004633 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4634 pr_err("Not allowed to be called in %d state\n",
4635 atomic_read(&qseecom.qseecom_state));
4636 return -EPERM;
4637 }
4638 if (!app_name) {
4639 pr_err("failed to get the app name\n");
4640 return -EINVAL;
4641 }
4642
Zhen Kong64a6d7282017-06-16 11:55:07 -07004643 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004644 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004645 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004646 return -EINVAL;
4647 }
4648
4649 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4650 if (!(*handle))
4651 return -ENOMEM;
4652
4653 data = kzalloc(sizeof(*data), GFP_KERNEL);
4654 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304655 ret = -ENOMEM;
4656 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004657 }
4658 data->abort = 0;
4659 data->type = QSEECOM_CLIENT_APP;
4660 data->released = false;
4661 data->client.sb_length = size;
4662 data->client.user_virt_sb_base = 0;
4663 data->client.ihandle = NULL;
4664
4665 init_waitqueue_head(&data->abort_wq);
4666
4667 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4668 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4669 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4670 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304671 ret = -ENOMEM;
4672 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004673 }
4674 mutex_lock(&app_access_lock);
4675
Zhen Kong5d02be92018-05-29 16:17:29 -07004676recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004677 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4678 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4679 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4680 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304681 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004682
4683 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4684 if (app_id) {
4685 pr_warn("App id %d for [%s] app exists\n", app_id,
4686 (char *)app_ireq.app_name);
4687 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4688 list_for_each_entry(entry,
4689 &qseecom.registered_app_list_head, list){
4690 if (entry->app_id == app_id) {
4691 entry->ref_cnt++;
4692 found_app = true;
4693 break;
4694 }
4695 }
4696 spin_unlock_irqrestore(
4697 &qseecom.registered_app_list_lock, flags);
4698 if (!found_app)
4699 pr_warn("App_id %d [%s] was loaded but not registered\n",
4700 ret, (char *)app_ireq.app_name);
4701 } else {
4702 /* load the app and get the app_id */
4703 pr_debug("%s: Loading app for the first time'\n",
4704 qseecom.pdev->init_name);
4705 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004706 if (ret == -EEXIST) {
4707 pr_err("recheck if TA %s is loaded\n", app_name);
4708 goto recheck;
4709 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304710 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004711 }
4712 data->client.app_id = app_id;
4713 if (!found_app) {
4714 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4715 if (!entry) {
4716 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304717 ret = -ENOMEM;
4718 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004719 }
4720 entry->app_id = app_id;
4721 entry->ref_cnt = 1;
4722 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4723 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4724 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304725 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004726 }
4727 entry->app_arch = app_arch;
4728 entry->app_blocked = false;
4729 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004730 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004731 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4732 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4733 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4734 flags);
4735 }
4736
4737 /* Get the physical address of the ION BUF */
4738 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4739 if (ret) {
4740 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4741 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304742 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004743 }
4744
4745 /* Populate the structure for sending scm call to load image */
4746 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4747 data->client.ihandle);
4748 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4749 pr_err("ION memory mapping for client shared buf failed\n");
4750 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304751 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004752 }
4753 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4754 data->client.sb_phys = (phys_addr_t)pa;
4755 (*handle)->dev = (void *)data;
4756 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4757 (*handle)->sbuf_len = data->client.sb_length;
4758
4759 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4760 if (!kclient_entry) {
4761 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304762 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004763 }
4764 kclient_entry->handle = *handle;
4765
4766 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4767 list_add_tail(&kclient_entry->list,
4768 &qseecom.registered_kclient_list_head);
4769 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4770
4771 mutex_unlock(&app_access_lock);
4772 return 0;
4773
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304774exit_ion_unmap_kernel:
4775 if (!IS_ERR_OR_NULL(data->client.ihandle))
4776 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4777exit_entry_free:
4778 kfree(entry);
4779exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004780 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304781 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4782 ion_free(qseecom.ion_clnt, data->client.ihandle);
4783 data->client.ihandle = NULL;
4784 }
4785exit_data_free:
4786 kfree(data);
4787exit_handle_free:
4788 if (*handle) {
4789 kfree(*handle);
4790 *handle = NULL;
4791 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004792 return ret;
4793}
4794EXPORT_SYMBOL(qseecom_start_app);
4795
4796int qseecom_shutdown_app(struct qseecom_handle **handle)
4797{
4798 int ret = -EINVAL;
4799 struct qseecom_dev_handle *data;
4800
4801 struct qseecom_registered_kclient_list *kclient = NULL;
4802 unsigned long flags = 0;
4803 bool found_handle = false;
4804
Zhen Kongc4c162a2019-01-23 12:07:12 -08004805 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004806
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004807 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4808 pr_err("Not allowed to be called in %d state\n",
4809 atomic_read(&qseecom.qseecom_state));
4810 return -EPERM;
4811 }
4812
4813 if ((handle == NULL) || (*handle == NULL)) {
4814 pr_err("Handle is not initialized\n");
4815 return -EINVAL;
4816 }
4817 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4818 mutex_lock(&app_access_lock);
4819
4820 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4821 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4822 list) {
4823 if (kclient->handle == (*handle)) {
4824 list_del(&kclient->list);
4825 found_handle = true;
4826 break;
4827 }
4828 }
4829 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4830 if (!found_handle)
4831 pr_err("Unable to find the handle, exiting\n");
4832 else
4833 ret = qseecom_unload_app(data, false);
4834
4835 mutex_unlock(&app_access_lock);
4836 if (ret == 0) {
4837 kzfree(data);
4838 kzfree(*handle);
4839 kzfree(kclient);
4840 *handle = NULL;
4841 }
4842
4843 return ret;
4844}
4845EXPORT_SYMBOL(qseecom_shutdown_app);
4846
4847int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4848 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4849{
4850 int ret = 0;
4851 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4852 struct qseecom_dev_handle *data;
4853 bool perf_enabled = false;
4854
Zhen Kongc4c162a2019-01-23 12:07:12 -08004855 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004856
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004857 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4858 pr_err("Not allowed to be called in %d state\n",
4859 atomic_read(&qseecom.qseecom_state));
4860 return -EPERM;
4861 }
4862
4863 if (handle == NULL) {
4864 pr_err("Handle is not initialized\n");
4865 return -EINVAL;
4866 }
4867 data = handle->dev;
4868
4869 req.cmd_req_len = sbuf_len;
4870 req.resp_len = rbuf_len;
4871 req.cmd_req_buf = send_buf;
4872 req.resp_buf = resp_buf;
4873
4874 if (__validate_send_cmd_inputs(data, &req))
4875 return -EINVAL;
4876
4877 mutex_lock(&app_access_lock);
4878 if (qseecom.support_bus_scaling) {
4879 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4880 if (ret) {
4881 pr_err("Failed to set bw.\n");
4882 mutex_unlock(&app_access_lock);
4883 return ret;
4884 }
4885 }
4886 /*
4887 * On targets where crypto clock is handled by HLOS,
4888 * if clk_access_cnt is zero and perf_enabled is false,
4889 * then the crypto clock was not enabled before sending cmd
4890 * to tz, qseecom will enable the clock to avoid service failure.
4891 */
4892 if (!qseecom.no_clock_support &&
4893 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4894 pr_debug("ce clock is not enabled!\n");
4895 ret = qseecom_perf_enable(data);
4896 if (ret) {
4897 pr_err("Failed to vote for clock with err %d\n",
4898 ret);
4899 mutex_unlock(&app_access_lock);
4900 return -EINVAL;
4901 }
4902 perf_enabled = true;
4903 }
4904 if (!strcmp(data->client.app_name, "securemm"))
4905 data->use_legacy_cmd = true;
4906
4907 ret = __qseecom_send_cmd(data, &req);
4908 data->use_legacy_cmd = false;
4909 if (qseecom.support_bus_scaling)
4910 __qseecom_add_bw_scale_down_timer(
4911 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4912
4913 if (perf_enabled) {
4914 qsee_disable_clock_vote(data, CLK_DFAB);
4915 qsee_disable_clock_vote(data, CLK_SFPB);
4916 }
4917
4918 mutex_unlock(&app_access_lock);
4919
4920 if (ret)
4921 return ret;
4922
4923 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4924 req.resp_len, req.resp_buf);
4925 return ret;
4926}
4927EXPORT_SYMBOL(qseecom_send_command);
4928
4929int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4930{
4931 int ret = 0;
4932
4933 if ((handle == NULL) || (handle->dev == NULL)) {
4934 pr_err("No valid kernel client\n");
4935 return -EINVAL;
4936 }
4937 if (high) {
4938 if (qseecom.support_bus_scaling) {
4939 mutex_lock(&qsee_bw_mutex);
4940 __qseecom_register_bus_bandwidth_needs(handle->dev,
4941 HIGH);
4942 mutex_unlock(&qsee_bw_mutex);
4943 } else {
4944 ret = qseecom_perf_enable(handle->dev);
4945 if (ret)
4946 pr_err("Failed to vote for clock with err %d\n",
4947 ret);
4948 }
4949 } else {
4950 if (!qseecom.support_bus_scaling) {
4951 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4952 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4953 } else {
4954 mutex_lock(&qsee_bw_mutex);
4955 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4956 mutex_unlock(&qsee_bw_mutex);
4957 }
4958 }
4959 return ret;
4960}
4961EXPORT_SYMBOL(qseecom_set_bandwidth);
4962
4963int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4964{
4965 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4966 struct qseecom_dev_handle dummy_private_data = {0};
4967 struct qseecom_command_scm_resp resp;
4968 int ret = 0;
4969
4970 if (!desc) {
4971 pr_err("desc is NULL\n");
4972 return -EINVAL;
4973 }
4974
4975 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004976 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004977 resp.data = desc->ret[2]; /*listener_id*/
4978
Zhen Konge7f525f2017-12-01 18:26:25 -08004979 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07004980 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08004981 dummy_app_entry.app_id = desc->ret[1];
4982
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004983 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004984 if (qseecom.qsee_reentrancy_support)
4985 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004986 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004987 else
4988 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4989 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004990 mutex_unlock(&app_access_lock);
4991 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004992 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004993 (int)desc->ret[0], (int)desc->ret[2],
4994 (int)desc->ret[1], ret);
4995 desc->ret[0] = resp.result;
4996 desc->ret[1] = resp.resp_type;
4997 desc->ret[2] = resp.data;
4998 return ret;
4999}
5000EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
5001
5002static int qseecom_send_resp(void)
5003{
5004 qseecom.send_resp_flag = 1;
5005 wake_up_interruptible(&qseecom.send_resp_wq);
5006 return 0;
5007}
5008
5009static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
5010{
5011 struct qseecom_registered_listener_list *this_lstnr = NULL;
5012
5013 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
5014 this_lstnr = __qseecom_find_svc(data->listener.id);
5015 if (this_lstnr == NULL)
5016 return -EINVAL;
5017 qseecom.send_resp_flag = 1;
5018 this_lstnr->send_resp_flag = 1;
5019 wake_up_interruptible(&qseecom.send_resp_wq);
5020 return 0;
5021}
5022
5023static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5024 struct qseecom_send_modfd_listener_resp *resp,
5025 struct qseecom_registered_listener_list *this_lstnr)
5026{
5027 int i;
5028
5029 if (!data || !resp || !this_lstnr) {
5030 pr_err("listener handle or resp msg is null\n");
5031 return -EINVAL;
5032 }
5033
5034 if (resp->resp_buf_ptr == NULL) {
5035 pr_err("resp buffer is null\n");
5036 return -EINVAL;
5037 }
5038 /* validate resp buf length */
5039 if ((resp->resp_len == 0) ||
5040 (resp->resp_len > this_lstnr->sb_length)) {
5041 pr_err("resp buf length %d not valid\n", resp->resp_len);
5042 return -EINVAL;
5043 }
5044
5045 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5046 pr_err("Integer overflow in resp_len & resp_buf\n");
5047 return -EINVAL;
5048 }
5049 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5050 (ULONG_MAX - this_lstnr->sb_length)) {
5051 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5052 return -EINVAL;
5053 }
5054 /* validate resp buf */
5055 if (((uintptr_t)resp->resp_buf_ptr <
5056 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5057 ((uintptr_t)resp->resp_buf_ptr >=
5058 ((uintptr_t)this_lstnr->user_virt_sb_base +
5059 this_lstnr->sb_length)) ||
5060 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5061 ((uintptr_t)this_lstnr->user_virt_sb_base +
5062 this_lstnr->sb_length))) {
5063 pr_err("resp buf is out of shared buffer region\n");
5064 return -EINVAL;
5065 }
5066
5067 /* validate offsets */
5068 for (i = 0; i < MAX_ION_FD; i++) {
5069 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5070 pr_err("Invalid offset %d = 0x%x\n",
5071 i, resp->ifd_data[i].cmd_buf_offset);
5072 return -EINVAL;
5073 }
5074 }
5075
5076 return 0;
5077}
5078
5079static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5080 void __user *argp, bool is_64bit_addr)
5081{
5082 struct qseecom_send_modfd_listener_resp resp;
5083 struct qseecom_registered_listener_list *this_lstnr = NULL;
5084
5085 if (copy_from_user(&resp, argp, sizeof(resp))) {
5086 pr_err("copy_from_user failed");
5087 return -EINVAL;
5088 }
5089
5090 this_lstnr = __qseecom_find_svc(data->listener.id);
5091 if (this_lstnr == NULL)
5092 return -EINVAL;
5093
5094 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5095 return -EINVAL;
5096
5097 resp.resp_buf_ptr = this_lstnr->sb_virt +
5098 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5099
5100 if (!is_64bit_addr)
5101 __qseecom_update_cmd_buf(&resp, false, data);
5102 else
5103 __qseecom_update_cmd_buf_64(&resp, false, data);
5104 qseecom.send_resp_flag = 1;
5105 this_lstnr->send_resp_flag = 1;
5106 wake_up_interruptible(&qseecom.send_resp_wq);
5107 return 0;
5108}
5109
5110static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5111 void __user *argp)
5112{
5113 return __qseecom_send_modfd_resp(data, argp, false);
5114}
5115
5116static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5117 void __user *argp)
5118{
5119 return __qseecom_send_modfd_resp(data, argp, true);
5120}
5121
5122static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5123 void __user *argp)
5124{
5125 struct qseecom_qseos_version_req req;
5126
5127 if (copy_from_user(&req, argp, sizeof(req))) {
5128 pr_err("copy_from_user failed");
5129 return -EINVAL;
5130 }
5131 req.qseos_version = qseecom.qseos_version;
5132 if (copy_to_user(argp, &req, sizeof(req))) {
5133 pr_err("copy_to_user failed");
5134 return -EINVAL;
5135 }
5136 return 0;
5137}
5138
5139static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5140{
5141 int rc = 0;
5142 struct qseecom_clk *qclk = NULL;
5143
5144 if (qseecom.no_clock_support)
5145 return 0;
5146
5147 if (ce == CLK_QSEE)
5148 qclk = &qseecom.qsee;
5149 if (ce == CLK_CE_DRV)
5150 qclk = &qseecom.ce_drv;
5151
5152 if (qclk == NULL) {
5153 pr_err("CLK type not supported\n");
5154 return -EINVAL;
5155 }
5156 mutex_lock(&clk_access_lock);
5157
5158 if (qclk->clk_access_cnt == ULONG_MAX) {
5159 pr_err("clk_access_cnt beyond limitation\n");
5160 goto err;
5161 }
5162 if (qclk->clk_access_cnt > 0) {
5163 qclk->clk_access_cnt++;
5164 mutex_unlock(&clk_access_lock);
5165 return rc;
5166 }
5167
5168 /* Enable CE core clk */
5169 if (qclk->ce_core_clk != NULL) {
5170 rc = clk_prepare_enable(qclk->ce_core_clk);
5171 if (rc) {
5172 pr_err("Unable to enable/prepare CE core clk\n");
5173 goto err;
5174 }
5175 }
5176 /* Enable CE clk */
5177 if (qclk->ce_clk != NULL) {
5178 rc = clk_prepare_enable(qclk->ce_clk);
5179 if (rc) {
5180 pr_err("Unable to enable/prepare CE iface clk\n");
5181 goto ce_clk_err;
5182 }
5183 }
5184 /* Enable AXI clk */
5185 if (qclk->ce_bus_clk != NULL) {
5186 rc = clk_prepare_enable(qclk->ce_bus_clk);
5187 if (rc) {
5188 pr_err("Unable to enable/prepare CE bus clk\n");
5189 goto ce_bus_clk_err;
5190 }
5191 }
5192 qclk->clk_access_cnt++;
5193 mutex_unlock(&clk_access_lock);
5194 return 0;
5195
5196ce_bus_clk_err:
5197 if (qclk->ce_clk != NULL)
5198 clk_disable_unprepare(qclk->ce_clk);
5199ce_clk_err:
5200 if (qclk->ce_core_clk != NULL)
5201 clk_disable_unprepare(qclk->ce_core_clk);
5202err:
5203 mutex_unlock(&clk_access_lock);
5204 return -EIO;
5205}
5206
5207static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5208{
5209 struct qseecom_clk *qclk;
5210
5211 if (qseecom.no_clock_support)
5212 return;
5213
5214 if (ce == CLK_QSEE)
5215 qclk = &qseecom.qsee;
5216 else
5217 qclk = &qseecom.ce_drv;
5218
5219 mutex_lock(&clk_access_lock);
5220
5221 if (qclk->clk_access_cnt == 0) {
5222 mutex_unlock(&clk_access_lock);
5223 return;
5224 }
5225
5226 if (qclk->clk_access_cnt == 1) {
5227 if (qclk->ce_clk != NULL)
5228 clk_disable_unprepare(qclk->ce_clk);
5229 if (qclk->ce_core_clk != NULL)
5230 clk_disable_unprepare(qclk->ce_core_clk);
5231 if (qclk->ce_bus_clk != NULL)
5232 clk_disable_unprepare(qclk->ce_bus_clk);
5233 }
5234 qclk->clk_access_cnt--;
5235 mutex_unlock(&clk_access_lock);
5236}
5237
5238static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5239 int32_t clk_type)
5240{
5241 int ret = 0;
5242 struct qseecom_clk *qclk;
5243
5244 if (qseecom.no_clock_support)
5245 return 0;
5246
5247 qclk = &qseecom.qsee;
5248 if (!qseecom.qsee_perf_client)
5249 return ret;
5250
5251 switch (clk_type) {
5252 case CLK_DFAB:
5253 mutex_lock(&qsee_bw_mutex);
5254 if (!qseecom.qsee_bw_count) {
5255 if (qseecom.qsee_sfpb_bw_count > 0)
5256 ret = msm_bus_scale_client_update_request(
5257 qseecom.qsee_perf_client, 3);
5258 else {
5259 if (qclk->ce_core_src_clk != NULL)
5260 ret = __qseecom_enable_clk(CLK_QSEE);
5261 if (!ret) {
5262 ret =
5263 msm_bus_scale_client_update_request(
5264 qseecom.qsee_perf_client, 1);
5265 if ((ret) &&
5266 (qclk->ce_core_src_clk != NULL))
5267 __qseecom_disable_clk(CLK_QSEE);
5268 }
5269 }
5270 if (ret)
5271 pr_err("DFAB Bandwidth req failed (%d)\n",
5272 ret);
5273 else {
5274 qseecom.qsee_bw_count++;
5275 data->perf_enabled = true;
5276 }
5277 } else {
5278 qseecom.qsee_bw_count++;
5279 data->perf_enabled = true;
5280 }
5281 mutex_unlock(&qsee_bw_mutex);
5282 break;
5283 case CLK_SFPB:
5284 mutex_lock(&qsee_bw_mutex);
5285 if (!qseecom.qsee_sfpb_bw_count) {
5286 if (qseecom.qsee_bw_count > 0)
5287 ret = msm_bus_scale_client_update_request(
5288 qseecom.qsee_perf_client, 3);
5289 else {
5290 if (qclk->ce_core_src_clk != NULL)
5291 ret = __qseecom_enable_clk(CLK_QSEE);
5292 if (!ret) {
5293 ret =
5294 msm_bus_scale_client_update_request(
5295 qseecom.qsee_perf_client, 2);
5296 if ((ret) &&
5297 (qclk->ce_core_src_clk != NULL))
5298 __qseecom_disable_clk(CLK_QSEE);
5299 }
5300 }
5301
5302 if (ret)
5303 pr_err("SFPB Bandwidth req failed (%d)\n",
5304 ret);
5305 else {
5306 qseecom.qsee_sfpb_bw_count++;
5307 data->fast_load_enabled = true;
5308 }
5309 } else {
5310 qseecom.qsee_sfpb_bw_count++;
5311 data->fast_load_enabled = true;
5312 }
5313 mutex_unlock(&qsee_bw_mutex);
5314 break;
5315 default:
5316 pr_err("Clock type not defined\n");
5317 break;
5318 }
5319 return ret;
5320}
5321
5322static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5323 int32_t clk_type)
5324{
5325 int32_t ret = 0;
5326 struct qseecom_clk *qclk;
5327
5328 qclk = &qseecom.qsee;
5329
5330 if (qseecom.no_clock_support)
5331 return;
5332 if (!qseecom.qsee_perf_client)
5333 return;
5334
5335 switch (clk_type) {
5336 case CLK_DFAB:
5337 mutex_lock(&qsee_bw_mutex);
5338 if (qseecom.qsee_bw_count == 0) {
5339 pr_err("Client error.Extra call to disable DFAB clk\n");
5340 mutex_unlock(&qsee_bw_mutex);
5341 return;
5342 }
5343
5344 if (qseecom.qsee_bw_count == 1) {
5345 if (qseecom.qsee_sfpb_bw_count > 0)
5346 ret = msm_bus_scale_client_update_request(
5347 qseecom.qsee_perf_client, 2);
5348 else {
5349 ret = msm_bus_scale_client_update_request(
5350 qseecom.qsee_perf_client, 0);
5351 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5352 __qseecom_disable_clk(CLK_QSEE);
5353 }
5354 if (ret)
5355 pr_err("SFPB Bandwidth req fail (%d)\n",
5356 ret);
5357 else {
5358 qseecom.qsee_bw_count--;
5359 data->perf_enabled = false;
5360 }
5361 } else {
5362 qseecom.qsee_bw_count--;
5363 data->perf_enabled = false;
5364 }
5365 mutex_unlock(&qsee_bw_mutex);
5366 break;
5367 case CLK_SFPB:
5368 mutex_lock(&qsee_bw_mutex);
5369 if (qseecom.qsee_sfpb_bw_count == 0) {
5370 pr_err("Client error.Extra call to disable SFPB clk\n");
5371 mutex_unlock(&qsee_bw_mutex);
5372 return;
5373 }
5374 if (qseecom.qsee_sfpb_bw_count == 1) {
5375 if (qseecom.qsee_bw_count > 0)
5376 ret = msm_bus_scale_client_update_request(
5377 qseecom.qsee_perf_client, 1);
5378 else {
5379 ret = msm_bus_scale_client_update_request(
5380 qseecom.qsee_perf_client, 0);
5381 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5382 __qseecom_disable_clk(CLK_QSEE);
5383 }
5384 if (ret)
5385 pr_err("SFPB Bandwidth req fail (%d)\n",
5386 ret);
5387 else {
5388 qseecom.qsee_sfpb_bw_count--;
5389 data->fast_load_enabled = false;
5390 }
5391 } else {
5392 qseecom.qsee_sfpb_bw_count--;
5393 data->fast_load_enabled = false;
5394 }
5395 mutex_unlock(&qsee_bw_mutex);
5396 break;
5397 default:
5398 pr_err("Clock type not defined\n");
5399 break;
5400 }
5401
5402}
5403
5404static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5405 void __user *argp)
5406{
5407 struct ion_handle *ihandle; /* Ion handle */
5408 struct qseecom_load_img_req load_img_req;
5409 int uret = 0;
5410 int ret;
5411 ion_phys_addr_t pa = 0;
5412 size_t len;
5413 struct qseecom_load_app_ireq load_req;
5414 struct qseecom_load_app_64bit_ireq load_req_64bit;
5415 struct qseecom_command_scm_resp resp;
5416 void *cmd_buf = NULL;
5417 size_t cmd_len;
5418 /* Copy the relevant information needed for loading the image */
5419 if (copy_from_user(&load_img_req,
5420 (void __user *)argp,
5421 sizeof(struct qseecom_load_img_req))) {
5422 pr_err("copy_from_user failed\n");
5423 return -EFAULT;
5424 }
5425
5426 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005427 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005428 load_img_req.ifd_data_fd);
5429 if (IS_ERR_OR_NULL(ihandle)) {
5430 pr_err("Ion client could not retrieve the handle\n");
5431 return -ENOMEM;
5432 }
5433
5434 /* Get the physical address of the ION BUF */
5435 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5436 if (ret) {
5437 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5438 ret);
5439 return ret;
5440 }
5441 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5442 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5443 len, load_img_req.mdt_len,
5444 load_img_req.img_len);
5445 return ret;
5446 }
5447 /* Populate the structure for sending scm call to load image */
5448 if (qseecom.qsee_version < QSEE_VERSION_40) {
5449 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5450 load_req.mdt_len = load_img_req.mdt_len;
5451 load_req.img_len = load_img_req.img_len;
5452 load_req.phy_addr = (uint32_t)pa;
5453 cmd_buf = (void *)&load_req;
5454 cmd_len = sizeof(struct qseecom_load_app_ireq);
5455 } else {
5456 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5457 load_req_64bit.mdt_len = load_img_req.mdt_len;
5458 load_req_64bit.img_len = load_img_req.img_len;
5459 load_req_64bit.phy_addr = (uint64_t)pa;
5460 cmd_buf = (void *)&load_req_64bit;
5461 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5462 }
5463
5464 if (qseecom.support_bus_scaling) {
5465 mutex_lock(&qsee_bw_mutex);
5466 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5467 mutex_unlock(&qsee_bw_mutex);
5468 if (ret) {
5469 ret = -EIO;
5470 goto exit_cpu_restore;
5471 }
5472 }
5473
5474 /* Vote for the SFPB clock */
5475 ret = __qseecom_enable_clk_scale_up(data);
5476 if (ret) {
5477 ret = -EIO;
5478 goto exit_register_bus_bandwidth_needs;
5479 }
5480 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5481 ION_IOC_CLEAN_INV_CACHES);
5482 if (ret) {
5483 pr_err("cache operation failed %d\n", ret);
5484 goto exit_disable_clock;
5485 }
5486 /* SCM_CALL to load the external elf */
5487 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5488 &resp, sizeof(resp));
5489 if (ret) {
5490 pr_err("scm_call to load failed : ret %d\n",
5491 ret);
5492 ret = -EFAULT;
5493 goto exit_disable_clock;
5494 }
5495
5496 switch (resp.result) {
5497 case QSEOS_RESULT_SUCCESS:
5498 break;
5499 case QSEOS_RESULT_INCOMPLETE:
5500 pr_err("%s: qseos result incomplete\n", __func__);
5501 ret = __qseecom_process_incomplete_cmd(data, &resp);
5502 if (ret)
5503 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5504 break;
5505 case QSEOS_RESULT_FAILURE:
5506 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5507 ret = -EFAULT;
5508 break;
5509 default:
5510 pr_err("scm_call response result %d not supported\n",
5511 resp.result);
5512 ret = -EFAULT;
5513 break;
5514 }
5515
5516exit_disable_clock:
5517 __qseecom_disable_clk_scale_down(data);
5518
5519exit_register_bus_bandwidth_needs:
5520 if (qseecom.support_bus_scaling) {
5521 mutex_lock(&qsee_bw_mutex);
5522 uret = qseecom_unregister_bus_bandwidth_needs(data);
5523 mutex_unlock(&qsee_bw_mutex);
5524 if (uret)
5525 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5526 uret, ret);
5527 }
5528
5529exit_cpu_restore:
5530 /* Deallocate the handle */
5531 if (!IS_ERR_OR_NULL(ihandle))
5532 ion_free(qseecom.ion_clnt, ihandle);
5533 return ret;
5534}
5535
5536static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5537{
5538 int ret = 0;
5539 struct qseecom_command_scm_resp resp;
5540 struct qseecom_unload_app_ireq req;
5541
5542 /* unavailable client app */
5543 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5544
5545 /* Populate the structure for sending scm call to unload image */
5546 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5547
5548 /* SCM_CALL to unload the external elf */
5549 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5550 sizeof(struct qseecom_unload_app_ireq),
5551 &resp, sizeof(resp));
5552 if (ret) {
5553 pr_err("scm_call to unload failed : ret %d\n",
5554 ret);
5555 ret = -EFAULT;
5556 goto qseecom_unload_external_elf_scm_err;
5557 }
5558 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5559 ret = __qseecom_process_incomplete_cmd(data, &resp);
5560 if (ret)
5561 pr_err("process_incomplete_cmd fail err: %d\n",
5562 ret);
5563 } else {
5564 if (resp.result != QSEOS_RESULT_SUCCESS) {
5565 pr_err("scm_call to unload image failed resp.result =%d\n",
5566 resp.result);
5567 ret = -EFAULT;
5568 }
5569 }
5570
5571qseecom_unload_external_elf_scm_err:
5572
5573 return ret;
5574}
5575
5576static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5577 void __user *argp)
5578{
5579
5580 int32_t ret;
5581 struct qseecom_qseos_app_load_query query_req;
5582 struct qseecom_check_app_ireq req;
5583 struct qseecom_registered_app_list *entry = NULL;
5584 unsigned long flags = 0;
5585 uint32_t app_arch = 0, app_id = 0;
5586 bool found_app = false;
5587
5588 /* Copy the relevant information needed for loading the image */
5589 if (copy_from_user(&query_req,
5590 (void __user *)argp,
5591 sizeof(struct qseecom_qseos_app_load_query))) {
5592 pr_err("copy_from_user failed\n");
5593 return -EFAULT;
5594 }
5595
5596 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5597 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5598 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5599
5600 ret = __qseecom_check_app_exists(req, &app_id);
5601 if (ret) {
5602 pr_err(" scm call to check if app is loaded failed");
5603 return ret; /* scm call failed */
5604 }
5605 if (app_id) {
5606 pr_debug("App id %d (%s) already exists\n", app_id,
5607 (char *)(req.app_name));
5608 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5609 list_for_each_entry(entry,
5610 &qseecom.registered_app_list_head, list){
5611 if (entry->app_id == app_id) {
5612 app_arch = entry->app_arch;
5613 entry->ref_cnt++;
5614 found_app = true;
5615 break;
5616 }
5617 }
5618 spin_unlock_irqrestore(
5619 &qseecom.registered_app_list_lock, flags);
5620 data->client.app_id = app_id;
5621 query_req.app_id = app_id;
5622 if (app_arch) {
5623 data->client.app_arch = app_arch;
5624 query_req.app_arch = app_arch;
5625 } else {
5626 data->client.app_arch = 0;
5627 query_req.app_arch = 0;
5628 }
5629 strlcpy(data->client.app_name, query_req.app_name,
5630 MAX_APP_NAME_SIZE);
5631 /*
5632 * If app was loaded by appsbl before and was not registered,
5633 * regiser this app now.
5634 */
5635 if (!found_app) {
5636 pr_debug("Register app %d [%s] which was loaded before\n",
5637 ret, (char *)query_req.app_name);
5638 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5639 if (!entry) {
5640 pr_err("kmalloc for app entry failed\n");
5641 return -ENOMEM;
5642 }
5643 entry->app_id = app_id;
5644 entry->ref_cnt = 1;
5645 entry->app_arch = data->client.app_arch;
5646 strlcpy(entry->app_name, data->client.app_name,
5647 MAX_APP_NAME_SIZE);
5648 entry->app_blocked = false;
5649 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005650 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005651 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5652 flags);
5653 list_add_tail(&entry->list,
5654 &qseecom.registered_app_list_head);
5655 spin_unlock_irqrestore(
5656 &qseecom.registered_app_list_lock, flags);
5657 }
5658 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5659 pr_err("copy_to_user failed\n");
5660 return -EFAULT;
5661 }
5662 return -EEXIST; /* app already loaded */
5663 } else {
5664 return 0; /* app not loaded */
5665 }
5666}
5667
5668static int __qseecom_get_ce_pipe_info(
5669 enum qseecom_key_management_usage_type usage,
5670 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5671{
5672 int ret = -EINVAL;
5673 int i, j;
5674 struct qseecom_ce_info_use *p = NULL;
5675 int total = 0;
5676 struct qseecom_ce_pipe_entry *pcepipe;
5677
5678 switch (usage) {
5679 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5680 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5681 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5682 if (qseecom.support_fde) {
5683 p = qseecom.ce_info.fde;
5684 total = qseecom.ce_info.num_fde;
5685 } else {
5686 pr_err("system does not support fde\n");
5687 return -EINVAL;
5688 }
5689 break;
5690 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5691 if (qseecom.support_pfe) {
5692 p = qseecom.ce_info.pfe;
5693 total = qseecom.ce_info.num_pfe;
5694 } else {
5695 pr_err("system does not support pfe\n");
5696 return -EINVAL;
5697 }
5698 break;
5699 default:
5700 pr_err("unsupported usage %d\n", usage);
5701 return -EINVAL;
5702 }
5703
5704 for (j = 0; j < total; j++) {
5705 if (p->unit_num == unit) {
5706 pcepipe = p->ce_pipe_entry;
5707 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5708 (*ce_hw)[i] = pcepipe->ce_num;
5709 *pipe = pcepipe->ce_pipe_pair;
5710 pcepipe++;
5711 }
5712 ret = 0;
5713 break;
5714 }
5715 p++;
5716 }
5717 return ret;
5718}
5719
5720static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5721 enum qseecom_key_management_usage_type usage,
5722 struct qseecom_key_generate_ireq *ireq)
5723{
5724 struct qseecom_command_scm_resp resp;
5725 int ret;
5726
5727 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5728 usage >= QSEOS_KM_USAGE_MAX) {
5729 pr_err("Error:: unsupported usage %d\n", usage);
5730 return -EFAULT;
5731 }
5732 ret = __qseecom_enable_clk(CLK_QSEE);
5733 if (ret)
5734 return ret;
5735
5736 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5737 ireq, sizeof(struct qseecom_key_generate_ireq),
5738 &resp, sizeof(resp));
5739 if (ret) {
5740 if (ret == -EINVAL &&
5741 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5742 pr_debug("Key ID exists.\n");
5743 ret = 0;
5744 } else {
5745 pr_err("scm call to generate key failed : %d\n", ret);
5746 ret = -EFAULT;
5747 }
5748 goto generate_key_exit;
5749 }
5750
5751 switch (resp.result) {
5752 case QSEOS_RESULT_SUCCESS:
5753 break;
5754 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5755 pr_debug("Key ID exists.\n");
5756 break;
5757 case QSEOS_RESULT_INCOMPLETE:
5758 ret = __qseecom_process_incomplete_cmd(data, &resp);
5759 if (ret) {
5760 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5761 pr_debug("Key ID exists.\n");
5762 ret = 0;
5763 } else {
5764 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5765 resp.result);
5766 }
5767 }
5768 break;
5769 case QSEOS_RESULT_FAILURE:
5770 default:
5771 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5772 ret = -EINVAL;
5773 break;
5774 }
5775generate_key_exit:
5776 __qseecom_disable_clk(CLK_QSEE);
5777 return ret;
5778}
5779
5780static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5781 enum qseecom_key_management_usage_type usage,
5782 struct qseecom_key_delete_ireq *ireq)
5783{
5784 struct qseecom_command_scm_resp resp;
5785 int ret;
5786
5787 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5788 usage >= QSEOS_KM_USAGE_MAX) {
5789 pr_err("Error:: unsupported usage %d\n", usage);
5790 return -EFAULT;
5791 }
5792 ret = __qseecom_enable_clk(CLK_QSEE);
5793 if (ret)
5794 return ret;
5795
5796 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5797 ireq, sizeof(struct qseecom_key_delete_ireq),
5798 &resp, sizeof(struct qseecom_command_scm_resp));
5799 if (ret) {
5800 if (ret == -EINVAL &&
5801 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5802 pr_debug("Max attempts to input password reached.\n");
5803 ret = -ERANGE;
5804 } else {
5805 pr_err("scm call to delete key failed : %d\n", ret);
5806 ret = -EFAULT;
5807 }
5808 goto del_key_exit;
5809 }
5810
5811 switch (resp.result) {
5812 case QSEOS_RESULT_SUCCESS:
5813 break;
5814 case QSEOS_RESULT_INCOMPLETE:
5815 ret = __qseecom_process_incomplete_cmd(data, &resp);
5816 if (ret) {
5817 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5818 resp.result);
5819 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5820 pr_debug("Max attempts to input password reached.\n");
5821 ret = -ERANGE;
5822 }
5823 }
5824 break;
5825 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5826 pr_debug("Max attempts to input password reached.\n");
5827 ret = -ERANGE;
5828 break;
5829 case QSEOS_RESULT_FAILURE:
5830 default:
5831 pr_err("Delete key scm call failed resp.result %d\n",
5832 resp.result);
5833 ret = -EINVAL;
5834 break;
5835 }
5836del_key_exit:
5837 __qseecom_disable_clk(CLK_QSEE);
5838 return ret;
5839}
5840
5841static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5842 enum qseecom_key_management_usage_type usage,
5843 struct qseecom_key_select_ireq *ireq)
5844{
5845 struct qseecom_command_scm_resp resp;
5846 int ret;
5847
5848 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5849 usage >= QSEOS_KM_USAGE_MAX) {
5850 pr_err("Error:: unsupported usage %d\n", usage);
5851 return -EFAULT;
5852 }
5853 ret = __qseecom_enable_clk(CLK_QSEE);
5854 if (ret)
5855 return ret;
5856
5857 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5858 ret = __qseecom_enable_clk(CLK_CE_DRV);
5859 if (ret)
5860 return ret;
5861 }
5862
5863 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5864 ireq, sizeof(struct qseecom_key_select_ireq),
5865 &resp, sizeof(struct qseecom_command_scm_resp));
5866 if (ret) {
5867 if (ret == -EINVAL &&
5868 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5869 pr_debug("Max attempts to input password reached.\n");
5870 ret = -ERANGE;
5871 } else if (ret == -EINVAL &&
5872 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5873 pr_debug("Set Key operation under processing...\n");
5874 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5875 } else {
5876 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5877 ret);
5878 ret = -EFAULT;
5879 }
5880 goto set_key_exit;
5881 }
5882
5883 switch (resp.result) {
5884 case QSEOS_RESULT_SUCCESS:
5885 break;
5886 case QSEOS_RESULT_INCOMPLETE:
5887 ret = __qseecom_process_incomplete_cmd(data, &resp);
5888 if (ret) {
5889 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5890 resp.result);
5891 if (resp.result ==
5892 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5893 pr_debug("Set Key operation under processing...\n");
5894 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5895 }
5896 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5897 pr_debug("Max attempts to input password reached.\n");
5898 ret = -ERANGE;
5899 }
5900 }
5901 break;
5902 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5903 pr_debug("Max attempts to input password reached.\n");
5904 ret = -ERANGE;
5905 break;
5906 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5907 pr_debug("Set Key operation under processing...\n");
5908 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5909 break;
5910 case QSEOS_RESULT_FAILURE:
5911 default:
5912 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5913 ret = -EINVAL;
5914 break;
5915 }
5916set_key_exit:
5917 __qseecom_disable_clk(CLK_QSEE);
5918 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5919 __qseecom_disable_clk(CLK_CE_DRV);
5920 return ret;
5921}
5922
5923static int __qseecom_update_current_key_user_info(
5924 struct qseecom_dev_handle *data,
5925 enum qseecom_key_management_usage_type usage,
5926 struct qseecom_key_userinfo_update_ireq *ireq)
5927{
5928 struct qseecom_command_scm_resp resp;
5929 int ret;
5930
5931 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5932 usage >= QSEOS_KM_USAGE_MAX) {
5933 pr_err("Error:: unsupported usage %d\n", usage);
5934 return -EFAULT;
5935 }
5936 ret = __qseecom_enable_clk(CLK_QSEE);
5937 if (ret)
5938 return ret;
5939
5940 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5941 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5942 &resp, sizeof(struct qseecom_command_scm_resp));
5943 if (ret) {
5944 if (ret == -EINVAL &&
5945 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5946 pr_debug("Set Key operation under processing...\n");
5947 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5948 } else {
5949 pr_err("scm call to update key userinfo failed: %d\n",
5950 ret);
5951 __qseecom_disable_clk(CLK_QSEE);
5952 return -EFAULT;
5953 }
5954 }
5955
5956 switch (resp.result) {
5957 case QSEOS_RESULT_SUCCESS:
5958 break;
5959 case QSEOS_RESULT_INCOMPLETE:
5960 ret = __qseecom_process_incomplete_cmd(data, &resp);
5961 if (resp.result ==
5962 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5963 pr_debug("Set Key operation under processing...\n");
5964 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5965 }
5966 if (ret)
5967 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5968 resp.result);
5969 break;
5970 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5971 pr_debug("Update Key operation under processing...\n");
5972 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5973 break;
5974 case QSEOS_RESULT_FAILURE:
5975 default:
5976 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5977 ret = -EINVAL;
5978 break;
5979 }
5980
5981 __qseecom_disable_clk(CLK_QSEE);
5982 return ret;
5983}
5984
5985
5986static int qseecom_enable_ice_setup(int usage)
5987{
5988 int ret = 0;
5989
5990 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5991 ret = qcom_ice_setup_ice_hw("ufs", true);
5992 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5993 ret = qcom_ice_setup_ice_hw("sdcc", true);
5994
5995 return ret;
5996}
5997
5998static int qseecom_disable_ice_setup(int usage)
5999{
6000 int ret = 0;
6001
6002 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
6003 ret = qcom_ice_setup_ice_hw("ufs", false);
6004 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
6005 ret = qcom_ice_setup_ice_hw("sdcc", false);
6006
6007 return ret;
6008}
6009
6010static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
6011{
6012 struct qseecom_ce_info_use *pce_info_use, *p;
6013 int total = 0;
6014 int i;
6015
6016 switch (usage) {
6017 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
6018 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
6019 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6020 p = qseecom.ce_info.fde;
6021 total = qseecom.ce_info.num_fde;
6022 break;
6023 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6024 p = qseecom.ce_info.pfe;
6025 total = qseecom.ce_info.num_pfe;
6026 break;
6027 default:
6028 pr_err("unsupported usage %d\n", usage);
6029 return -EINVAL;
6030 }
6031
6032 pce_info_use = NULL;
6033
6034 for (i = 0; i < total; i++) {
6035 if (p->unit_num == unit) {
6036 pce_info_use = p;
6037 break;
6038 }
6039 p++;
6040 }
6041 if (!pce_info_use) {
6042 pr_err("can not find %d\n", unit);
6043 return -EINVAL;
6044 }
6045 return pce_info_use->num_ce_pipe_entries;
6046}
6047
6048static int qseecom_create_key(struct qseecom_dev_handle *data,
6049 void __user *argp)
6050{
6051 int i;
6052 uint32_t *ce_hw = NULL;
6053 uint32_t pipe = 0;
6054 int ret = 0;
6055 uint32_t flags = 0;
6056 struct qseecom_create_key_req create_key_req;
6057 struct qseecom_key_generate_ireq generate_key_ireq;
6058 struct qseecom_key_select_ireq set_key_ireq;
6059 uint32_t entries = 0;
6060
6061 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6062 if (ret) {
6063 pr_err("copy_from_user failed\n");
6064 return ret;
6065 }
6066
6067 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6068 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6069 pr_err("unsupported usage %d\n", create_key_req.usage);
6070 ret = -EFAULT;
6071 return ret;
6072 }
6073 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6074 create_key_req.usage);
6075 if (entries <= 0) {
6076 pr_err("no ce instance for usage %d instance %d\n",
6077 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6078 ret = -EINVAL;
6079 return ret;
6080 }
6081
6082 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6083 if (!ce_hw) {
6084 ret = -ENOMEM;
6085 return ret;
6086 }
6087 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6088 DEFAULT_CE_INFO_UNIT);
6089 if (ret) {
6090 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6091 ret = -EINVAL;
6092 goto free_buf;
6093 }
6094
6095 if (qseecom.fde_key_size)
6096 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6097 else
6098 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6099
Jiten Patela7bb1d52018-05-11 12:34:26 +05306100 if (qseecom.enable_key_wrap_in_ks == true)
6101 flags |= ENABLE_KEY_WRAP_IN_KS;
6102
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006103 generate_key_ireq.flags = flags;
6104 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6105 memset((void *)generate_key_ireq.key_id,
6106 0, QSEECOM_KEY_ID_SIZE);
6107 memset((void *)generate_key_ireq.hash32,
6108 0, QSEECOM_HASH_SIZE);
6109 memcpy((void *)generate_key_ireq.key_id,
6110 (void *)key_id_array[create_key_req.usage].desc,
6111 QSEECOM_KEY_ID_SIZE);
6112 memcpy((void *)generate_key_ireq.hash32,
6113 (void *)create_key_req.hash32,
6114 QSEECOM_HASH_SIZE);
6115
6116 ret = __qseecom_generate_and_save_key(data,
6117 create_key_req.usage, &generate_key_ireq);
6118 if (ret) {
6119 pr_err("Failed to generate key on storage: %d\n", ret);
6120 goto free_buf;
6121 }
6122
6123 for (i = 0; i < entries; i++) {
6124 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6125 if (create_key_req.usage ==
6126 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6127 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6128 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6129
6130 } else if (create_key_req.usage ==
6131 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6132 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6133 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6134
6135 } else {
6136 set_key_ireq.ce = ce_hw[i];
6137 set_key_ireq.pipe = pipe;
6138 }
6139 set_key_ireq.flags = flags;
6140
6141 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6142 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6143 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6144 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6145 memcpy((void *)set_key_ireq.key_id,
6146 (void *)key_id_array[create_key_req.usage].desc,
6147 QSEECOM_KEY_ID_SIZE);
6148 memcpy((void *)set_key_ireq.hash32,
6149 (void *)create_key_req.hash32,
6150 QSEECOM_HASH_SIZE);
6151 /*
6152 * It will return false if it is GPCE based crypto instance or
6153 * ICE is setup properly
6154 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006155 ret = qseecom_enable_ice_setup(create_key_req.usage);
6156 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006157 goto free_buf;
6158
6159 do {
6160 ret = __qseecom_set_clear_ce_key(data,
6161 create_key_req.usage,
6162 &set_key_ireq);
6163 /*
6164 * wait a little before calling scm again to let other
6165 * processes run
6166 */
6167 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6168 msleep(50);
6169
6170 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6171
6172 qseecom_disable_ice_setup(create_key_req.usage);
6173
6174 if (ret) {
6175 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6176 pipe, ce_hw[i], ret);
6177 goto free_buf;
6178 } else {
6179 pr_err("Set the key successfully\n");
6180 if ((create_key_req.usage ==
6181 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6182 (create_key_req.usage ==
6183 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6184 goto free_buf;
6185 }
6186 }
6187
6188free_buf:
6189 kzfree(ce_hw);
6190 return ret;
6191}
6192
6193static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6194 void __user *argp)
6195{
6196 uint32_t *ce_hw = NULL;
6197 uint32_t pipe = 0;
6198 int ret = 0;
6199 uint32_t flags = 0;
6200 int i, j;
6201 struct qseecom_wipe_key_req wipe_key_req;
6202 struct qseecom_key_delete_ireq delete_key_ireq;
6203 struct qseecom_key_select_ireq clear_key_ireq;
6204 uint32_t entries = 0;
6205
6206 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6207 if (ret) {
6208 pr_err("copy_from_user failed\n");
6209 return ret;
6210 }
6211
6212 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6213 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6214 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6215 ret = -EFAULT;
6216 return ret;
6217 }
6218
6219 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6220 wipe_key_req.usage);
6221 if (entries <= 0) {
6222 pr_err("no ce instance for usage %d instance %d\n",
6223 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6224 ret = -EINVAL;
6225 return ret;
6226 }
6227
6228 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6229 if (!ce_hw) {
6230 ret = -ENOMEM;
6231 return ret;
6232 }
6233
6234 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6235 DEFAULT_CE_INFO_UNIT);
6236 if (ret) {
6237 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6238 ret = -EINVAL;
6239 goto free_buf;
6240 }
6241
6242 if (wipe_key_req.wipe_key_flag) {
6243 delete_key_ireq.flags = flags;
6244 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6245 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6246 memcpy((void *)delete_key_ireq.key_id,
6247 (void *)key_id_array[wipe_key_req.usage].desc,
6248 QSEECOM_KEY_ID_SIZE);
6249 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6250
6251 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6252 &delete_key_ireq);
6253 if (ret) {
6254 pr_err("Failed to delete key from ssd storage: %d\n",
6255 ret);
6256 ret = -EFAULT;
6257 goto free_buf;
6258 }
6259 }
6260
6261 for (j = 0; j < entries; j++) {
6262 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6263 if (wipe_key_req.usage ==
6264 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6265 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6266 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6267 } else if (wipe_key_req.usage ==
6268 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6269 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6270 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6271 } else {
6272 clear_key_ireq.ce = ce_hw[j];
6273 clear_key_ireq.pipe = pipe;
6274 }
6275 clear_key_ireq.flags = flags;
6276 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6277 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6278 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6279 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6280
6281 /*
6282 * It will return false if it is GPCE based crypto instance or
6283 * ICE is setup properly
6284 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006285 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6286 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006287 goto free_buf;
6288
6289 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6290 &clear_key_ireq);
6291
6292 qseecom_disable_ice_setup(wipe_key_req.usage);
6293
6294 if (ret) {
6295 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6296 pipe, ce_hw[j], ret);
6297 ret = -EFAULT;
6298 goto free_buf;
6299 }
6300 }
6301
6302free_buf:
6303 kzfree(ce_hw);
6304 return ret;
6305}
6306
6307static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6308 void __user *argp)
6309{
6310 int ret = 0;
6311 uint32_t flags = 0;
6312 struct qseecom_update_key_userinfo_req update_key_req;
6313 struct qseecom_key_userinfo_update_ireq ireq;
6314
6315 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6316 if (ret) {
6317 pr_err("copy_from_user failed\n");
6318 return ret;
6319 }
6320
6321 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6322 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6323 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6324 return -EFAULT;
6325 }
6326
6327 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6328
6329 if (qseecom.fde_key_size)
6330 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6331 else
6332 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6333
6334 ireq.flags = flags;
6335 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6336 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6337 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6338 memcpy((void *)ireq.key_id,
6339 (void *)key_id_array[update_key_req.usage].desc,
6340 QSEECOM_KEY_ID_SIZE);
6341 memcpy((void *)ireq.current_hash32,
6342 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6343 memcpy((void *)ireq.new_hash32,
6344 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6345
6346 do {
6347 ret = __qseecom_update_current_key_user_info(data,
6348 update_key_req.usage,
6349 &ireq);
6350 /*
6351 * wait a little before calling scm again to let other
6352 * processes run
6353 */
6354 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6355 msleep(50);
6356
6357 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6358 if (ret) {
6359 pr_err("Failed to update key info: %d\n", ret);
6360 return ret;
6361 }
6362 return ret;
6363
6364}
6365static int qseecom_is_es_activated(void __user *argp)
6366{
Zhen Kong26e62742018-05-04 17:19:06 -07006367 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006368 struct qseecom_command_scm_resp resp;
6369 int ret;
6370
6371 if (qseecom.qsee_version < QSEE_VERSION_04) {
6372 pr_err("invalid qsee version\n");
6373 return -ENODEV;
6374 }
6375
6376 if (argp == NULL) {
6377 pr_err("arg is null\n");
6378 return -EINVAL;
6379 }
6380
6381 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6382 &req, sizeof(req), &resp, sizeof(resp));
6383 if (ret) {
6384 pr_err("scm_call failed\n");
6385 return ret;
6386 }
6387
6388 req.is_activated = resp.result;
6389 ret = copy_to_user(argp, &req, sizeof(req));
6390 if (ret) {
6391 pr_err("copy_to_user failed\n");
6392 return ret;
6393 }
6394
6395 return 0;
6396}
6397
6398static int qseecom_save_partition_hash(void __user *argp)
6399{
6400 struct qseecom_save_partition_hash_req req;
6401 struct qseecom_command_scm_resp resp;
6402 int ret;
6403
6404 memset(&resp, 0x00, sizeof(resp));
6405
6406 if (qseecom.qsee_version < QSEE_VERSION_04) {
6407 pr_err("invalid qsee version\n");
6408 return -ENODEV;
6409 }
6410
6411 if (argp == NULL) {
6412 pr_err("arg is null\n");
6413 return -EINVAL;
6414 }
6415
6416 ret = copy_from_user(&req, argp, sizeof(req));
6417 if (ret) {
6418 pr_err("copy_from_user failed\n");
6419 return ret;
6420 }
6421
6422 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6423 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6424 if (ret) {
6425 pr_err("qseecom_scm_call failed\n");
6426 return ret;
6427 }
6428
6429 return 0;
6430}
6431
6432static int qseecom_mdtp_cipher_dip(void __user *argp)
6433{
6434 struct qseecom_mdtp_cipher_dip_req req;
6435 u32 tzbuflenin, tzbuflenout;
6436 char *tzbufin = NULL, *tzbufout = NULL;
6437 struct scm_desc desc = {0};
6438 int ret;
6439
6440 do {
6441 /* Copy the parameters from userspace */
6442 if (argp == NULL) {
6443 pr_err("arg is null\n");
6444 ret = -EINVAL;
6445 break;
6446 }
6447
6448 ret = copy_from_user(&req, argp, sizeof(req));
6449 if (ret) {
6450 pr_err("copy_from_user failed, ret= %d\n", ret);
6451 break;
6452 }
6453
6454 if (req.in_buf == NULL || req.out_buf == NULL ||
6455 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6456 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6457 req.direction > 1) {
6458 pr_err("invalid parameters\n");
6459 ret = -EINVAL;
6460 break;
6461 }
6462
6463 /* Copy the input buffer from userspace to kernel space */
6464 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6465 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6466 if (!tzbufin) {
6467 pr_err("error allocating in buffer\n");
6468 ret = -ENOMEM;
6469 break;
6470 }
6471
6472 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6473 if (ret) {
6474 pr_err("copy_from_user failed, ret=%d\n", ret);
6475 break;
6476 }
6477
6478 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6479
6480 /* Prepare the output buffer in kernel space */
6481 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6482 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6483 if (!tzbufout) {
6484 pr_err("error allocating out buffer\n");
6485 ret = -ENOMEM;
6486 break;
6487 }
6488
6489 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6490
6491 /* Send the command to TZ */
6492 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6493 desc.args[0] = virt_to_phys(tzbufin);
6494 desc.args[1] = req.in_buf_size;
6495 desc.args[2] = virt_to_phys(tzbufout);
6496 desc.args[3] = req.out_buf_size;
6497 desc.args[4] = req.direction;
6498
6499 ret = __qseecom_enable_clk(CLK_QSEE);
6500 if (ret)
6501 break;
6502
Zhen Kong03f220d2019-02-01 17:12:34 -08006503 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006504
6505 __qseecom_disable_clk(CLK_QSEE);
6506
6507 if (ret) {
6508 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6509 ret);
6510 break;
6511 }
6512
6513 /* Copy the output buffer from kernel space to userspace */
6514 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6515 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6516 if (ret) {
6517 pr_err("copy_to_user failed, ret=%d\n", ret);
6518 break;
6519 }
6520 } while (0);
6521
6522 kzfree(tzbufin);
6523 kzfree(tzbufout);
6524
6525 return ret;
6526}
6527
6528static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6529 struct qseecom_qteec_req *req)
6530{
6531 if (!data || !data->client.ihandle) {
6532 pr_err("Client or client handle is not initialized\n");
6533 return -EINVAL;
6534 }
6535
6536 if (data->type != QSEECOM_CLIENT_APP)
6537 return -EFAULT;
6538
6539 if (req->req_len > UINT_MAX - req->resp_len) {
6540 pr_err("Integer overflow detected in req_len & rsp_len\n");
6541 return -EINVAL;
6542 }
6543
6544 if (req->req_len + req->resp_len > data->client.sb_length) {
6545 pr_debug("Not enough memory to fit cmd_buf.\n");
6546 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6547 (req->req_len + req->resp_len), data->client.sb_length);
6548 return -ENOMEM;
6549 }
6550
6551 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6552 pr_err("cmd buffer or response buffer is null\n");
6553 return -EINVAL;
6554 }
6555 if (((uintptr_t)req->req_ptr <
6556 data->client.user_virt_sb_base) ||
6557 ((uintptr_t)req->req_ptr >=
6558 (data->client.user_virt_sb_base + data->client.sb_length))) {
6559 pr_err("cmd buffer address not within shared bufffer\n");
6560 return -EINVAL;
6561 }
6562
6563 if (((uintptr_t)req->resp_ptr <
6564 data->client.user_virt_sb_base) ||
6565 ((uintptr_t)req->resp_ptr >=
6566 (data->client.user_virt_sb_base + data->client.sb_length))) {
6567 pr_err("response buffer address not within shared bufffer\n");
6568 return -EINVAL;
6569 }
6570
6571 if ((req->req_len == 0) || (req->resp_len == 0)) {
6572 pr_err("cmd buf lengtgh/response buf length not valid\n");
6573 return -EINVAL;
6574 }
6575
6576 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6577 pr_err("Integer overflow in req_len & req_ptr\n");
6578 return -EINVAL;
6579 }
6580
6581 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6582 pr_err("Integer overflow in resp_len & resp_ptr\n");
6583 return -EINVAL;
6584 }
6585
6586 if (data->client.user_virt_sb_base >
6587 (ULONG_MAX - data->client.sb_length)) {
6588 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6589 return -EINVAL;
6590 }
6591 if ((((uintptr_t)req->req_ptr + req->req_len) >
6592 ((uintptr_t)data->client.user_virt_sb_base +
6593 data->client.sb_length)) ||
6594 (((uintptr_t)req->resp_ptr + req->resp_len) >
6595 ((uintptr_t)data->client.user_virt_sb_base +
6596 data->client.sb_length))) {
6597 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6598 return -EINVAL;
6599 }
6600 return 0;
6601}
6602
6603static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6604 uint32_t fd_idx, struct sg_table *sg_ptr)
6605{
6606 struct scatterlist *sg = sg_ptr->sgl;
6607 struct qseecom_sg_entry *sg_entry;
6608 void *buf;
6609 uint i;
6610 size_t size;
6611 dma_addr_t coh_pmem;
6612
6613 if (fd_idx >= MAX_ION_FD) {
6614 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6615 return -ENOMEM;
6616 }
6617 /*
6618 * Allocate a buffer, populate it with number of entry plus
6619 * each sg entry's phy addr and length; then return the
6620 * phy_addr of the buffer.
6621 */
6622 size = sizeof(uint32_t) +
6623 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6624 size = (size + PAGE_SIZE) & PAGE_MASK;
6625 buf = dma_alloc_coherent(qseecom.pdev,
6626 size, &coh_pmem, GFP_KERNEL);
6627 if (buf == NULL) {
6628 pr_err("failed to alloc memory for sg buf\n");
6629 return -ENOMEM;
6630 }
6631 *(uint32_t *)buf = sg_ptr->nents;
6632 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6633 for (i = 0; i < sg_ptr->nents; i++) {
6634 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6635 sg_entry->len = sg->length;
6636 sg_entry++;
6637 sg = sg_next(sg);
6638 }
6639 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6640 data->client.sec_buf_fd[fd_idx].vbase = buf;
6641 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6642 data->client.sec_buf_fd[fd_idx].size = size;
6643 return 0;
6644}
6645
6646static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6647 struct qseecom_dev_handle *data, bool cleanup)
6648{
6649 struct ion_handle *ihandle;
6650 int ret = 0;
6651 int i = 0;
6652 uint32_t *update;
6653 struct sg_table *sg_ptr = NULL;
6654 struct scatterlist *sg;
6655 struct qseecom_param_memref *memref;
6656
6657 if (req == NULL) {
6658 pr_err("Invalid address\n");
6659 return -EINVAL;
6660 }
6661 for (i = 0; i < MAX_ION_FD; i++) {
6662 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006663 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006664 req->ifd_data[i].fd);
6665 if (IS_ERR_OR_NULL(ihandle)) {
6666 pr_err("Ion client can't retrieve the handle\n");
6667 return -ENOMEM;
6668 }
6669 if ((req->req_len < sizeof(uint32_t)) ||
6670 (req->ifd_data[i].cmd_buf_offset >
6671 req->req_len - sizeof(uint32_t))) {
6672 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6673 req->req_len,
6674 req->ifd_data[i].cmd_buf_offset);
6675 return -EINVAL;
6676 }
6677 update = (uint32_t *)((char *) req->req_ptr +
6678 req->ifd_data[i].cmd_buf_offset);
6679 if (!update) {
6680 pr_err("update pointer is NULL\n");
6681 return -EINVAL;
6682 }
6683 } else {
6684 continue;
6685 }
6686 /* Populate the cmd data structure with the phys_addr */
6687 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6688 if (IS_ERR_OR_NULL(sg_ptr)) {
6689 pr_err("IOn client could not retrieve sg table\n");
6690 goto err;
6691 }
6692 sg = sg_ptr->sgl;
6693 if (sg == NULL) {
6694 pr_err("sg is NULL\n");
6695 goto err;
6696 }
6697 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6698 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6699 sg_ptr->nents, sg->length);
6700 goto err;
6701 }
6702 /* clean up buf for pre-allocated fd */
6703 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6704 (*update)) {
6705 if (data->client.sec_buf_fd[i].vbase)
6706 dma_free_coherent(qseecom.pdev,
6707 data->client.sec_buf_fd[i].size,
6708 data->client.sec_buf_fd[i].vbase,
6709 data->client.sec_buf_fd[i].pbase);
6710 memset((void *)update, 0,
6711 sizeof(struct qseecom_param_memref));
6712 memset(&(data->client.sec_buf_fd[i]), 0,
6713 sizeof(struct qseecom_sec_buf_fd_info));
6714 goto clean;
6715 }
6716
6717 if (*update == 0) {
6718 /* update buf for pre-allocated fd from secure heap*/
6719 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6720 sg_ptr);
6721 if (ret) {
6722 pr_err("Failed to handle buf for fd[%d]\n", i);
6723 goto err;
6724 }
6725 memref = (struct qseecom_param_memref *)update;
6726 memref->buffer =
6727 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6728 memref->size =
6729 (uint32_t)(data->client.sec_buf_fd[i].size);
6730 } else {
6731 /* update buf for fd from non-secure qseecom heap */
6732 if (sg_ptr->nents != 1) {
6733 pr_err("Num of scat entr (%d) invalid\n",
6734 sg_ptr->nents);
6735 goto err;
6736 }
6737 if (cleanup)
6738 *update = 0;
6739 else
6740 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6741 }
6742clean:
6743 if (cleanup) {
6744 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6745 ihandle, NULL, sg->length,
6746 ION_IOC_INV_CACHES);
6747 if (ret) {
6748 pr_err("cache operation failed %d\n", ret);
6749 goto err;
6750 }
6751 } else {
6752 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6753 ihandle, NULL, sg->length,
6754 ION_IOC_CLEAN_INV_CACHES);
6755 if (ret) {
6756 pr_err("cache operation failed %d\n", ret);
6757 goto err;
6758 }
6759 data->sglistinfo_ptr[i].indexAndFlags =
6760 SGLISTINFO_SET_INDEX_FLAG(
6761 (sg_ptr->nents == 1), 0,
6762 req->ifd_data[i].cmd_buf_offset);
6763 data->sglistinfo_ptr[i].sizeOrCount =
6764 (sg_ptr->nents == 1) ?
6765 sg->length : sg_ptr->nents;
6766 data->sglist_cnt = i + 1;
6767 }
6768 /* Deallocate the handle */
6769 if (!IS_ERR_OR_NULL(ihandle))
6770 ion_free(qseecom.ion_clnt, ihandle);
6771 }
6772 return ret;
6773err:
6774 if (!IS_ERR_OR_NULL(ihandle))
6775 ion_free(qseecom.ion_clnt, ihandle);
6776 return -ENOMEM;
6777}
6778
6779static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6780 struct qseecom_qteec_req *req, uint32_t cmd_id)
6781{
6782 struct qseecom_command_scm_resp resp;
6783 struct qseecom_qteec_ireq ireq;
6784 struct qseecom_qteec_64bit_ireq ireq_64bit;
6785 struct qseecom_registered_app_list *ptr_app;
6786 bool found_app = false;
6787 unsigned long flags;
6788 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006789 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006790 uint32_t reqd_len_sb_in = 0;
6791 void *cmd_buf = NULL;
6792 size_t cmd_len;
6793 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306794 void *req_ptr = NULL;
6795 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006796
6797 ret = __qseecom_qteec_validate_msg(data, req);
6798 if (ret)
6799 return ret;
6800
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306801 req_ptr = req->req_ptr;
6802 resp_ptr = req->resp_ptr;
6803
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006804 /* find app_id & img_name from list */
6805 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6806 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6807 list) {
6808 if ((ptr_app->app_id == data->client.app_id) &&
6809 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6810 found_app = true;
6811 break;
6812 }
6813 }
6814 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6815 if (!found_app) {
6816 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6817 (char *)data->client.app_name);
6818 return -ENOENT;
6819 }
6820
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306821 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6822 (uintptr_t)req->req_ptr);
6823 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6824 (uintptr_t)req->resp_ptr);
6825
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006826 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6827 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6828 ret = __qseecom_update_qteec_req_buf(
6829 (struct qseecom_qteec_modfd_req *)req, data, false);
6830 if (ret)
6831 return ret;
6832 }
6833
6834 if (qseecom.qsee_version < QSEE_VERSION_40) {
6835 ireq.app_id = data->client.app_id;
6836 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306837 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006838 ireq.req_len = req->req_len;
6839 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306840 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006841 ireq.resp_len = req->resp_len;
6842 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6843 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6844 dmac_flush_range((void *)table,
6845 (void *)table + SGLISTINFO_TABLE_SIZE);
6846 cmd_buf = (void *)&ireq;
6847 cmd_len = sizeof(struct qseecom_qteec_ireq);
6848 } else {
6849 ireq_64bit.app_id = data->client.app_id;
6850 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306851 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006852 ireq_64bit.req_len = req->req_len;
6853 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306854 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006855 ireq_64bit.resp_len = req->resp_len;
6856 if ((data->client.app_arch == ELFCLASS32) &&
6857 ((ireq_64bit.req_ptr >=
6858 PHY_ADDR_4G - ireq_64bit.req_len) ||
6859 (ireq_64bit.resp_ptr >=
6860 PHY_ADDR_4G - ireq_64bit.resp_len))){
6861 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6862 data->client.app_name, data->client.app_id);
6863 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6864 ireq_64bit.req_ptr, ireq_64bit.req_len,
6865 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6866 return -EFAULT;
6867 }
6868 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6869 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6870 dmac_flush_range((void *)table,
6871 (void *)table + SGLISTINFO_TABLE_SIZE);
6872 cmd_buf = (void *)&ireq_64bit;
6873 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6874 }
6875 if (qseecom.whitelist_support == true
6876 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6877 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6878 else
6879 *(uint32_t *)cmd_buf = cmd_id;
6880
6881 reqd_len_sb_in = req->req_len + req->resp_len;
6882 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6883 data->client.sb_virt,
6884 reqd_len_sb_in,
6885 ION_IOC_CLEAN_INV_CACHES);
6886 if (ret) {
6887 pr_err("cache operation failed %d\n", ret);
6888 return ret;
6889 }
6890
6891 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6892
6893 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6894 cmd_buf, cmd_len,
6895 &resp, sizeof(resp));
6896 if (ret) {
6897 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6898 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006899 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006900 }
6901
6902 if (qseecom.qsee_reentrancy_support) {
6903 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006904 if (ret)
6905 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006906 } else {
6907 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6908 ret = __qseecom_process_incomplete_cmd(data, &resp);
6909 if (ret) {
6910 pr_err("process_incomplete_cmd failed err: %d\n",
6911 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006912 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006913 }
6914 } else {
6915 if (resp.result != QSEOS_RESULT_SUCCESS) {
6916 pr_err("Response result %d not supported\n",
6917 resp.result);
6918 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006919 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006920 }
6921 }
6922 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006923exit:
6924 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006925 data->client.sb_virt, data->client.sb_length,
6926 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006927 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006928 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006929 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006930 }
6931
6932 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6933 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006934 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006935 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006936 if (ret2)
6937 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006938 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006939 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006940}
6941
6942static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6943 void __user *argp)
6944{
6945 struct qseecom_qteec_modfd_req req;
6946 int ret = 0;
6947
6948 ret = copy_from_user(&req, argp,
6949 sizeof(struct qseecom_qteec_modfd_req));
6950 if (ret) {
6951 pr_err("copy_from_user failed\n");
6952 return ret;
6953 }
6954 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6955 QSEOS_TEE_OPEN_SESSION);
6956
6957 return ret;
6958}
6959
6960static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6961 void __user *argp)
6962{
6963 struct qseecom_qteec_req req;
6964 int ret = 0;
6965
6966 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6967 if (ret) {
6968 pr_err("copy_from_user failed\n");
6969 return ret;
6970 }
6971 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6972 return ret;
6973}
6974
6975static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6976 void __user *argp)
6977{
6978 struct qseecom_qteec_modfd_req req;
6979 struct qseecom_command_scm_resp resp;
6980 struct qseecom_qteec_ireq ireq;
6981 struct qseecom_qteec_64bit_ireq ireq_64bit;
6982 struct qseecom_registered_app_list *ptr_app;
6983 bool found_app = false;
6984 unsigned long flags;
6985 int ret = 0;
6986 int i = 0;
6987 uint32_t reqd_len_sb_in = 0;
6988 void *cmd_buf = NULL;
6989 size_t cmd_len;
6990 struct sglist_info *table = data->sglistinfo_ptr;
6991 void *req_ptr = NULL;
6992 void *resp_ptr = NULL;
6993
6994 ret = copy_from_user(&req, argp,
6995 sizeof(struct qseecom_qteec_modfd_req));
6996 if (ret) {
6997 pr_err("copy_from_user failed\n");
6998 return ret;
6999 }
7000 ret = __qseecom_qteec_validate_msg(data,
7001 (struct qseecom_qteec_req *)(&req));
7002 if (ret)
7003 return ret;
7004 req_ptr = req.req_ptr;
7005 resp_ptr = req.resp_ptr;
7006
7007 /* find app_id & img_name from list */
7008 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
7009 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
7010 list) {
7011 if ((ptr_app->app_id == data->client.app_id) &&
7012 (!strcmp(ptr_app->app_name, data->client.app_name))) {
7013 found_app = true;
7014 break;
7015 }
7016 }
7017 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
7018 if (!found_app) {
7019 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7020 (char *)data->client.app_name);
7021 return -ENOENT;
7022 }
7023
7024 /* validate offsets */
7025 for (i = 0; i < MAX_ION_FD; i++) {
7026 if (req.ifd_data[i].fd) {
7027 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7028 return -EINVAL;
7029 }
7030 }
7031 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7032 (uintptr_t)req.req_ptr);
7033 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7034 (uintptr_t)req.resp_ptr);
7035 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7036 if (ret)
7037 return ret;
7038
7039 if (qseecom.qsee_version < QSEE_VERSION_40) {
7040 ireq.app_id = data->client.app_id;
7041 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7042 (uintptr_t)req_ptr);
7043 ireq.req_len = req.req_len;
7044 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7045 (uintptr_t)resp_ptr);
7046 ireq.resp_len = req.resp_len;
7047 cmd_buf = (void *)&ireq;
7048 cmd_len = sizeof(struct qseecom_qteec_ireq);
7049 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7050 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7051 dmac_flush_range((void *)table,
7052 (void *)table + SGLISTINFO_TABLE_SIZE);
7053 } else {
7054 ireq_64bit.app_id = data->client.app_id;
7055 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7056 (uintptr_t)req_ptr);
7057 ireq_64bit.req_len = req.req_len;
7058 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7059 (uintptr_t)resp_ptr);
7060 ireq_64bit.resp_len = req.resp_len;
7061 cmd_buf = (void *)&ireq_64bit;
7062 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7063 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7064 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7065 dmac_flush_range((void *)table,
7066 (void *)table + SGLISTINFO_TABLE_SIZE);
7067 }
7068 reqd_len_sb_in = req.req_len + req.resp_len;
7069 if (qseecom.whitelist_support == true)
7070 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7071 else
7072 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7073
7074 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7075 data->client.sb_virt,
7076 reqd_len_sb_in,
7077 ION_IOC_CLEAN_INV_CACHES);
7078 if (ret) {
7079 pr_err("cache operation failed %d\n", ret);
7080 return ret;
7081 }
7082
7083 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7084
7085 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7086 cmd_buf, cmd_len,
7087 &resp, sizeof(resp));
7088 if (ret) {
7089 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7090 ret, data->client.app_id);
7091 return ret;
7092 }
7093
7094 if (qseecom.qsee_reentrancy_support) {
7095 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7096 } else {
7097 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7098 ret = __qseecom_process_incomplete_cmd(data, &resp);
7099 if (ret) {
7100 pr_err("process_incomplete_cmd failed err: %d\n",
7101 ret);
7102 return ret;
7103 }
7104 } else {
7105 if (resp.result != QSEOS_RESULT_SUCCESS) {
7106 pr_err("Response result %d not supported\n",
7107 resp.result);
7108 ret = -EINVAL;
7109 }
7110 }
7111 }
7112 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7113 if (ret)
7114 return ret;
7115
7116 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7117 data->client.sb_virt, data->client.sb_length,
7118 ION_IOC_INV_CACHES);
7119 if (ret) {
7120 pr_err("cache operation failed %d\n", ret);
7121 return ret;
7122 }
7123 return 0;
7124}
7125
7126static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7127 void __user *argp)
7128{
7129 struct qseecom_qteec_modfd_req req;
7130 int ret = 0;
7131
7132 ret = copy_from_user(&req, argp,
7133 sizeof(struct qseecom_qteec_modfd_req));
7134 if (ret) {
7135 pr_err("copy_from_user failed\n");
7136 return ret;
7137 }
7138 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7139 QSEOS_TEE_REQUEST_CANCELLATION);
7140
7141 return ret;
7142}
7143
7144static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7145{
7146 if (data->sglist_cnt) {
7147 memset(data->sglistinfo_ptr, 0,
7148 SGLISTINFO_TABLE_SIZE);
7149 data->sglist_cnt = 0;
7150 }
7151}
7152
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307153static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007154 unsigned int cmd, unsigned long arg)
7155{
7156 int ret = 0;
7157 struct qseecom_dev_handle *data = file->private_data;
7158 void __user *argp = (void __user *) arg;
7159 bool perf_enabled = false;
7160
7161 if (!data) {
7162 pr_err("Invalid/uninitialized device handle\n");
7163 return -EINVAL;
7164 }
7165
7166 if (data->abort) {
7167 pr_err("Aborting qseecom driver\n");
7168 return -ENODEV;
7169 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007170 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7171 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7172 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7173 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007174 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007175
7176 switch (cmd) {
7177 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7178 if (data->type != QSEECOM_GENERIC) {
7179 pr_err("reg lstnr req: invalid handle (%d)\n",
7180 data->type);
7181 ret = -EINVAL;
7182 break;
7183 }
7184 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007185 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007186 atomic_inc(&data->ioctl_count);
7187 data->type = QSEECOM_LISTENER_SERVICE;
7188 ret = qseecom_register_listener(data, argp);
7189 atomic_dec(&data->ioctl_count);
7190 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007191 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007192 if (ret)
7193 pr_err("failed qseecom_register_listener: %d\n", ret);
7194 break;
7195 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307196 case QSEECOM_IOCTL_SET_ICE_INFO: {
7197 struct qseecom_ice_data_t ice_data;
7198
7199 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7200 if (ret) {
7201 pr_err("copy_from_user failed\n");
7202 return -EFAULT;
7203 }
7204 qcom_ice_set_fde_flag(ice_data.flag);
7205 break;
7206 }
7207
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007208 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7209 if ((data->listener.id == 0) ||
7210 (data->type != QSEECOM_LISTENER_SERVICE)) {
7211 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7212 data->type, data->listener.id);
7213 ret = -EINVAL;
7214 break;
7215 }
7216 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007217 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007218 atomic_inc(&data->ioctl_count);
7219 ret = qseecom_unregister_listener(data);
7220 atomic_dec(&data->ioctl_count);
7221 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007222 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007223 if (ret)
7224 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7225 break;
7226 }
7227 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7228 if ((data->client.app_id == 0) ||
7229 (data->type != QSEECOM_CLIENT_APP)) {
7230 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7231 data->type, data->client.app_id);
7232 ret = -EINVAL;
7233 break;
7234 }
7235 /* Only one client allowed here at a time */
7236 mutex_lock(&app_access_lock);
7237 if (qseecom.support_bus_scaling) {
7238 /* register bus bw in case the client doesn't do it */
7239 if (!data->mode) {
7240 mutex_lock(&qsee_bw_mutex);
7241 __qseecom_register_bus_bandwidth_needs(
7242 data, HIGH);
7243 mutex_unlock(&qsee_bw_mutex);
7244 }
7245 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7246 if (ret) {
7247 pr_err("Failed to set bw.\n");
7248 ret = -EINVAL;
7249 mutex_unlock(&app_access_lock);
7250 break;
7251 }
7252 }
7253 /*
7254 * On targets where crypto clock is handled by HLOS,
7255 * if clk_access_cnt is zero and perf_enabled is false,
7256 * then the crypto clock was not enabled before sending cmd to
7257 * tz, qseecom will enable the clock to avoid service failure.
7258 */
7259 if (!qseecom.no_clock_support &&
7260 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7261 pr_debug("ce clock is not enabled!\n");
7262 ret = qseecom_perf_enable(data);
7263 if (ret) {
7264 pr_err("Failed to vote for clock with err %d\n",
7265 ret);
7266 mutex_unlock(&app_access_lock);
7267 ret = -EINVAL;
7268 break;
7269 }
7270 perf_enabled = true;
7271 }
7272 atomic_inc(&data->ioctl_count);
7273 ret = qseecom_send_cmd(data, argp);
7274 if (qseecom.support_bus_scaling)
7275 __qseecom_add_bw_scale_down_timer(
7276 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7277 if (perf_enabled) {
7278 qsee_disable_clock_vote(data, CLK_DFAB);
7279 qsee_disable_clock_vote(data, CLK_SFPB);
7280 }
7281 atomic_dec(&data->ioctl_count);
7282 wake_up_all(&data->abort_wq);
7283 mutex_unlock(&app_access_lock);
7284 if (ret)
7285 pr_err("failed qseecom_send_cmd: %d\n", ret);
7286 break;
7287 }
7288 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7289 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7290 if ((data->client.app_id == 0) ||
7291 (data->type != QSEECOM_CLIENT_APP)) {
7292 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7293 data->type, data->client.app_id);
7294 ret = -EINVAL;
7295 break;
7296 }
7297 /* Only one client allowed here at a time */
7298 mutex_lock(&app_access_lock);
7299 if (qseecom.support_bus_scaling) {
7300 if (!data->mode) {
7301 mutex_lock(&qsee_bw_mutex);
7302 __qseecom_register_bus_bandwidth_needs(
7303 data, HIGH);
7304 mutex_unlock(&qsee_bw_mutex);
7305 }
7306 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7307 if (ret) {
7308 pr_err("Failed to set bw.\n");
7309 mutex_unlock(&app_access_lock);
7310 ret = -EINVAL;
7311 break;
7312 }
7313 }
7314 /*
7315 * On targets where crypto clock is handled by HLOS,
7316 * if clk_access_cnt is zero and perf_enabled is false,
7317 * then the crypto clock was not enabled before sending cmd to
7318 * tz, qseecom will enable the clock to avoid service failure.
7319 */
7320 if (!qseecom.no_clock_support &&
7321 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7322 pr_debug("ce clock is not enabled!\n");
7323 ret = qseecom_perf_enable(data);
7324 if (ret) {
7325 pr_err("Failed to vote for clock with err %d\n",
7326 ret);
7327 mutex_unlock(&app_access_lock);
7328 ret = -EINVAL;
7329 break;
7330 }
7331 perf_enabled = true;
7332 }
7333 atomic_inc(&data->ioctl_count);
7334 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7335 ret = qseecom_send_modfd_cmd(data, argp);
7336 else
7337 ret = qseecom_send_modfd_cmd_64(data, argp);
7338 if (qseecom.support_bus_scaling)
7339 __qseecom_add_bw_scale_down_timer(
7340 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7341 if (perf_enabled) {
7342 qsee_disable_clock_vote(data, CLK_DFAB);
7343 qsee_disable_clock_vote(data, CLK_SFPB);
7344 }
7345 atomic_dec(&data->ioctl_count);
7346 wake_up_all(&data->abort_wq);
7347 mutex_unlock(&app_access_lock);
7348 if (ret)
7349 pr_err("failed qseecom_send_cmd: %d\n", ret);
7350 __qseecom_clean_data_sglistinfo(data);
7351 break;
7352 }
7353 case QSEECOM_IOCTL_RECEIVE_REQ: {
7354 if ((data->listener.id == 0) ||
7355 (data->type != QSEECOM_LISTENER_SERVICE)) {
7356 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7357 data->type, data->listener.id);
7358 ret = -EINVAL;
7359 break;
7360 }
7361 atomic_inc(&data->ioctl_count);
7362 ret = qseecom_receive_req(data);
7363 atomic_dec(&data->ioctl_count);
7364 wake_up_all(&data->abort_wq);
7365 if (ret && (ret != -ERESTARTSYS))
7366 pr_err("failed qseecom_receive_req: %d\n", ret);
7367 break;
7368 }
7369 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7370 if ((data->listener.id == 0) ||
7371 (data->type != QSEECOM_LISTENER_SERVICE)) {
7372 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7373 data->type, data->listener.id);
7374 ret = -EINVAL;
7375 break;
7376 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007377 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007378 atomic_inc(&data->ioctl_count);
7379 if (!qseecom.qsee_reentrancy_support)
7380 ret = qseecom_send_resp();
7381 else
7382 ret = qseecom_reentrancy_send_resp(data);
7383 atomic_dec(&data->ioctl_count);
7384 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007385 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007386 if (ret)
7387 pr_err("failed qseecom_send_resp: %d\n", ret);
7388 break;
7389 }
7390 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7391 if ((data->type != QSEECOM_CLIENT_APP) &&
7392 (data->type != QSEECOM_GENERIC) &&
7393 (data->type != QSEECOM_SECURE_SERVICE)) {
7394 pr_err("set mem param req: invalid handle (%d)\n",
7395 data->type);
7396 ret = -EINVAL;
7397 break;
7398 }
7399 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7400 mutex_lock(&app_access_lock);
7401 atomic_inc(&data->ioctl_count);
7402 ret = qseecom_set_client_mem_param(data, argp);
7403 atomic_dec(&data->ioctl_count);
7404 mutex_unlock(&app_access_lock);
7405 if (ret)
7406 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7407 ret);
7408 break;
7409 }
7410 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7411 if ((data->type != QSEECOM_GENERIC) &&
7412 (data->type != QSEECOM_CLIENT_APP)) {
7413 pr_err("load app req: invalid handle (%d)\n",
7414 data->type);
7415 ret = -EINVAL;
7416 break;
7417 }
7418 data->type = QSEECOM_CLIENT_APP;
7419 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7420 mutex_lock(&app_access_lock);
7421 atomic_inc(&data->ioctl_count);
7422 ret = qseecom_load_app(data, argp);
7423 atomic_dec(&data->ioctl_count);
7424 mutex_unlock(&app_access_lock);
7425 if (ret)
7426 pr_err("failed load_app request: %d\n", ret);
7427 break;
7428 }
7429 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7430 if ((data->client.app_id == 0) ||
7431 (data->type != QSEECOM_CLIENT_APP)) {
7432 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7433 data->type, data->client.app_id);
7434 ret = -EINVAL;
7435 break;
7436 }
7437 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7438 mutex_lock(&app_access_lock);
7439 atomic_inc(&data->ioctl_count);
7440 ret = qseecom_unload_app(data, false);
7441 atomic_dec(&data->ioctl_count);
7442 mutex_unlock(&app_access_lock);
7443 if (ret)
7444 pr_err("failed unload_app request: %d\n", ret);
7445 break;
7446 }
7447 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7448 atomic_inc(&data->ioctl_count);
7449 ret = qseecom_get_qseos_version(data, argp);
7450 if (ret)
7451 pr_err("qseecom_get_qseos_version: %d\n", ret);
7452 atomic_dec(&data->ioctl_count);
7453 break;
7454 }
7455 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7456 if ((data->type != QSEECOM_GENERIC) &&
7457 (data->type != QSEECOM_CLIENT_APP)) {
7458 pr_err("perf enable req: invalid handle (%d)\n",
7459 data->type);
7460 ret = -EINVAL;
7461 break;
7462 }
7463 if ((data->type == QSEECOM_CLIENT_APP) &&
7464 (data->client.app_id == 0)) {
7465 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7466 data->type, data->client.app_id);
7467 ret = -EINVAL;
7468 break;
7469 }
7470 atomic_inc(&data->ioctl_count);
7471 if (qseecom.support_bus_scaling) {
7472 mutex_lock(&qsee_bw_mutex);
7473 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7474 mutex_unlock(&qsee_bw_mutex);
7475 } else {
7476 ret = qseecom_perf_enable(data);
7477 if (ret)
7478 pr_err("Fail to vote for clocks %d\n", ret);
7479 }
7480 atomic_dec(&data->ioctl_count);
7481 break;
7482 }
7483 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7484 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7485 (data->type != QSEECOM_CLIENT_APP)) {
7486 pr_err("perf disable req: invalid handle (%d)\n",
7487 data->type);
7488 ret = -EINVAL;
7489 break;
7490 }
7491 if ((data->type == QSEECOM_CLIENT_APP) &&
7492 (data->client.app_id == 0)) {
7493 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7494 data->type, data->client.app_id);
7495 ret = -EINVAL;
7496 break;
7497 }
7498 atomic_inc(&data->ioctl_count);
7499 if (!qseecom.support_bus_scaling) {
7500 qsee_disable_clock_vote(data, CLK_DFAB);
7501 qsee_disable_clock_vote(data, CLK_SFPB);
7502 } else {
7503 mutex_lock(&qsee_bw_mutex);
7504 qseecom_unregister_bus_bandwidth_needs(data);
7505 mutex_unlock(&qsee_bw_mutex);
7506 }
7507 atomic_dec(&data->ioctl_count);
7508 break;
7509 }
7510
7511 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7512 /* If crypto clock is not handled by HLOS, return directly. */
7513 if (qseecom.no_clock_support) {
7514 pr_debug("crypto clock is not handled by HLOS\n");
7515 break;
7516 }
7517 if ((data->client.app_id == 0) ||
7518 (data->type != QSEECOM_CLIENT_APP)) {
7519 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7520 data->type, data->client.app_id);
7521 ret = -EINVAL;
7522 break;
7523 }
7524 atomic_inc(&data->ioctl_count);
7525 ret = qseecom_scale_bus_bandwidth(data, argp);
7526 atomic_dec(&data->ioctl_count);
7527 break;
7528 }
7529 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7530 if (data->type != QSEECOM_GENERIC) {
7531 pr_err("load ext elf req: invalid client handle (%d)\n",
7532 data->type);
7533 ret = -EINVAL;
7534 break;
7535 }
7536 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7537 data->released = true;
7538 mutex_lock(&app_access_lock);
7539 atomic_inc(&data->ioctl_count);
7540 ret = qseecom_load_external_elf(data, argp);
7541 atomic_dec(&data->ioctl_count);
7542 mutex_unlock(&app_access_lock);
7543 if (ret)
7544 pr_err("failed load_external_elf request: %d\n", ret);
7545 break;
7546 }
7547 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7548 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7549 pr_err("unload ext elf req: invalid handle (%d)\n",
7550 data->type);
7551 ret = -EINVAL;
7552 break;
7553 }
7554 data->released = true;
7555 mutex_lock(&app_access_lock);
7556 atomic_inc(&data->ioctl_count);
7557 ret = qseecom_unload_external_elf(data);
7558 atomic_dec(&data->ioctl_count);
7559 mutex_unlock(&app_access_lock);
7560 if (ret)
7561 pr_err("failed unload_app request: %d\n", ret);
7562 break;
7563 }
7564 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
Zhen Kong677362c2019-08-30 10:50:25 -07007565 if ((data->type != QSEECOM_GENERIC) &&
7566 (data->type != QSEECOM_CLIENT_APP)) {
7567 pr_err("app loaded query req: invalid handle (%d)\n",
7568 data->type);
7569 ret = -EINVAL;
7570 break;
7571 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007572 data->type = QSEECOM_CLIENT_APP;
7573 mutex_lock(&app_access_lock);
7574 atomic_inc(&data->ioctl_count);
7575 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7576 ret = qseecom_query_app_loaded(data, argp);
7577 atomic_dec(&data->ioctl_count);
7578 mutex_unlock(&app_access_lock);
7579 break;
7580 }
7581 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7582 if (data->type != QSEECOM_GENERIC) {
7583 pr_err("send cmd svc req: invalid handle (%d)\n",
7584 data->type);
7585 ret = -EINVAL;
7586 break;
7587 }
7588 data->type = QSEECOM_SECURE_SERVICE;
7589 if (qseecom.qsee_version < QSEE_VERSION_03) {
7590 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7591 qseecom.qsee_version);
7592 return -EINVAL;
7593 }
7594 mutex_lock(&app_access_lock);
7595 atomic_inc(&data->ioctl_count);
7596 ret = qseecom_send_service_cmd(data, argp);
7597 atomic_dec(&data->ioctl_count);
7598 mutex_unlock(&app_access_lock);
7599 break;
7600 }
7601 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7602 if (!(qseecom.support_pfe || qseecom.support_fde))
7603 pr_err("Features requiring key init not supported\n");
7604 if (data->type != QSEECOM_GENERIC) {
7605 pr_err("create key req: invalid handle (%d)\n",
7606 data->type);
7607 ret = -EINVAL;
7608 break;
7609 }
7610 if (qseecom.qsee_version < QSEE_VERSION_05) {
7611 pr_err("Create Key feature unsupported: qsee ver %u\n",
7612 qseecom.qsee_version);
7613 return -EINVAL;
7614 }
7615 data->released = true;
7616 mutex_lock(&app_access_lock);
7617 atomic_inc(&data->ioctl_count);
7618 ret = qseecom_create_key(data, argp);
7619 if (ret)
7620 pr_err("failed to create encryption key: %d\n", ret);
7621
7622 atomic_dec(&data->ioctl_count);
7623 mutex_unlock(&app_access_lock);
7624 break;
7625 }
7626 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7627 if (!(qseecom.support_pfe || qseecom.support_fde))
7628 pr_err("Features requiring key init not supported\n");
7629 if (data->type != QSEECOM_GENERIC) {
7630 pr_err("wipe key req: invalid handle (%d)\n",
7631 data->type);
7632 ret = -EINVAL;
7633 break;
7634 }
7635 if (qseecom.qsee_version < QSEE_VERSION_05) {
7636 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7637 qseecom.qsee_version);
7638 return -EINVAL;
7639 }
7640 data->released = true;
7641 mutex_lock(&app_access_lock);
7642 atomic_inc(&data->ioctl_count);
7643 ret = qseecom_wipe_key(data, argp);
7644 if (ret)
7645 pr_err("failed to wipe encryption key: %d\n", ret);
7646 atomic_dec(&data->ioctl_count);
7647 mutex_unlock(&app_access_lock);
7648 break;
7649 }
7650 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7651 if (!(qseecom.support_pfe || qseecom.support_fde))
7652 pr_err("Features requiring key init not supported\n");
7653 if (data->type != QSEECOM_GENERIC) {
7654 pr_err("update key req: invalid handle (%d)\n",
7655 data->type);
7656 ret = -EINVAL;
7657 break;
7658 }
7659 if (qseecom.qsee_version < QSEE_VERSION_05) {
7660 pr_err("Update Key feature unsupported in qsee ver %u\n",
7661 qseecom.qsee_version);
7662 return -EINVAL;
7663 }
7664 data->released = true;
7665 mutex_lock(&app_access_lock);
7666 atomic_inc(&data->ioctl_count);
7667 ret = qseecom_update_key_user_info(data, argp);
7668 if (ret)
7669 pr_err("failed to update key user info: %d\n", ret);
7670 atomic_dec(&data->ioctl_count);
7671 mutex_unlock(&app_access_lock);
7672 break;
7673 }
7674 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7675 if (data->type != QSEECOM_GENERIC) {
7676 pr_err("save part hash req: invalid handle (%d)\n",
7677 data->type);
7678 ret = -EINVAL;
7679 break;
7680 }
7681 data->released = true;
7682 mutex_lock(&app_access_lock);
7683 atomic_inc(&data->ioctl_count);
7684 ret = qseecom_save_partition_hash(argp);
7685 atomic_dec(&data->ioctl_count);
7686 mutex_unlock(&app_access_lock);
7687 break;
7688 }
7689 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7690 if (data->type != QSEECOM_GENERIC) {
7691 pr_err("ES activated req: invalid handle (%d)\n",
7692 data->type);
7693 ret = -EINVAL;
7694 break;
7695 }
7696 data->released = true;
7697 mutex_lock(&app_access_lock);
7698 atomic_inc(&data->ioctl_count);
7699 ret = qseecom_is_es_activated(argp);
7700 atomic_dec(&data->ioctl_count);
7701 mutex_unlock(&app_access_lock);
7702 break;
7703 }
7704 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7705 if (data->type != QSEECOM_GENERIC) {
7706 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7707 data->type);
7708 ret = -EINVAL;
7709 break;
7710 }
7711 data->released = true;
7712 mutex_lock(&app_access_lock);
7713 atomic_inc(&data->ioctl_count);
7714 ret = qseecom_mdtp_cipher_dip(argp);
7715 atomic_dec(&data->ioctl_count);
7716 mutex_unlock(&app_access_lock);
7717 break;
7718 }
7719 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7720 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7721 if ((data->listener.id == 0) ||
7722 (data->type != QSEECOM_LISTENER_SERVICE)) {
7723 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7724 data->type, data->listener.id);
7725 ret = -EINVAL;
7726 break;
7727 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007728 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007729 atomic_inc(&data->ioctl_count);
7730 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7731 ret = qseecom_send_modfd_resp(data, argp);
7732 else
7733 ret = qseecom_send_modfd_resp_64(data, argp);
7734 atomic_dec(&data->ioctl_count);
7735 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007736 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007737 if (ret)
7738 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7739 __qseecom_clean_data_sglistinfo(data);
7740 break;
7741 }
7742 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7743 if ((data->client.app_id == 0) ||
7744 (data->type != QSEECOM_CLIENT_APP)) {
7745 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7746 data->type, data->client.app_id);
7747 ret = -EINVAL;
7748 break;
7749 }
7750 if (qseecom.qsee_version < QSEE_VERSION_40) {
7751 pr_err("GP feature unsupported: qsee ver %u\n",
7752 qseecom.qsee_version);
7753 return -EINVAL;
7754 }
7755 /* Only one client allowed here at a time */
7756 mutex_lock(&app_access_lock);
7757 atomic_inc(&data->ioctl_count);
7758 ret = qseecom_qteec_open_session(data, argp);
7759 atomic_dec(&data->ioctl_count);
7760 wake_up_all(&data->abort_wq);
7761 mutex_unlock(&app_access_lock);
7762 if (ret)
7763 pr_err("failed open_session_cmd: %d\n", ret);
7764 __qseecom_clean_data_sglistinfo(data);
7765 break;
7766 }
7767 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7768 if ((data->client.app_id == 0) ||
7769 (data->type != QSEECOM_CLIENT_APP)) {
7770 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7771 data->type, data->client.app_id);
7772 ret = -EINVAL;
7773 break;
7774 }
7775 if (qseecom.qsee_version < QSEE_VERSION_40) {
7776 pr_err("GP feature unsupported: qsee ver %u\n",
7777 qseecom.qsee_version);
7778 return -EINVAL;
7779 }
7780 /* Only one client allowed here at a time */
7781 mutex_lock(&app_access_lock);
7782 atomic_inc(&data->ioctl_count);
7783 ret = qseecom_qteec_close_session(data, argp);
7784 atomic_dec(&data->ioctl_count);
7785 wake_up_all(&data->abort_wq);
7786 mutex_unlock(&app_access_lock);
7787 if (ret)
7788 pr_err("failed close_session_cmd: %d\n", ret);
7789 break;
7790 }
7791 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7792 if ((data->client.app_id == 0) ||
7793 (data->type != QSEECOM_CLIENT_APP)) {
7794 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7795 data->type, data->client.app_id);
7796 ret = -EINVAL;
7797 break;
7798 }
7799 if (qseecom.qsee_version < QSEE_VERSION_40) {
7800 pr_err("GP feature unsupported: qsee ver %u\n",
7801 qseecom.qsee_version);
7802 return -EINVAL;
7803 }
7804 /* Only one client allowed here at a time */
7805 mutex_lock(&app_access_lock);
7806 atomic_inc(&data->ioctl_count);
7807 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7808 atomic_dec(&data->ioctl_count);
7809 wake_up_all(&data->abort_wq);
7810 mutex_unlock(&app_access_lock);
7811 if (ret)
7812 pr_err("failed Invoke cmd: %d\n", ret);
7813 __qseecom_clean_data_sglistinfo(data);
7814 break;
7815 }
7816 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7817 if ((data->client.app_id == 0) ||
7818 (data->type != QSEECOM_CLIENT_APP)) {
7819 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7820 data->type, data->client.app_id);
7821 ret = -EINVAL;
7822 break;
7823 }
7824 if (qseecom.qsee_version < QSEE_VERSION_40) {
7825 pr_err("GP feature unsupported: qsee ver %u\n",
7826 qseecom.qsee_version);
7827 return -EINVAL;
7828 }
7829 /* Only one client allowed here at a time */
7830 mutex_lock(&app_access_lock);
7831 atomic_inc(&data->ioctl_count);
7832 ret = qseecom_qteec_request_cancellation(data, argp);
7833 atomic_dec(&data->ioctl_count);
7834 wake_up_all(&data->abort_wq);
7835 mutex_unlock(&app_access_lock);
7836 if (ret)
7837 pr_err("failed request_cancellation: %d\n", ret);
7838 break;
7839 }
7840 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7841 atomic_inc(&data->ioctl_count);
7842 ret = qseecom_get_ce_info(data, argp);
7843 if (ret)
7844 pr_err("failed get fde ce pipe info: %d\n", ret);
7845 atomic_dec(&data->ioctl_count);
7846 break;
7847 }
7848 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7849 atomic_inc(&data->ioctl_count);
7850 ret = qseecom_free_ce_info(data, argp);
7851 if (ret)
7852 pr_err("failed get fde ce pipe info: %d\n", ret);
7853 atomic_dec(&data->ioctl_count);
7854 break;
7855 }
7856 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7857 atomic_inc(&data->ioctl_count);
7858 ret = qseecom_query_ce_info(data, argp);
7859 if (ret)
7860 pr_err("failed get fde ce pipe info: %d\n", ret);
7861 atomic_dec(&data->ioctl_count);
7862 break;
7863 }
7864 default:
7865 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7866 return -EINVAL;
7867 }
7868 return ret;
7869}
7870
7871static int qseecom_open(struct inode *inode, struct file *file)
7872{
7873 int ret = 0;
7874 struct qseecom_dev_handle *data;
7875
7876 data = kzalloc(sizeof(*data), GFP_KERNEL);
7877 if (!data)
7878 return -ENOMEM;
7879 file->private_data = data;
7880 data->abort = 0;
7881 data->type = QSEECOM_GENERIC;
7882 data->released = false;
7883 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7884 data->mode = INACTIVE;
7885 init_waitqueue_head(&data->abort_wq);
7886 atomic_set(&data->ioctl_count, 0);
7887 return ret;
7888}
7889
7890static int qseecom_release(struct inode *inode, struct file *file)
7891{
7892 struct qseecom_dev_handle *data = file->private_data;
7893 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007894 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007895
7896 if (data->released == false) {
7897 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7898 data->type, data->mode, data);
7899 switch (data->type) {
7900 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007901 pr_debug("release lsnr svc %d\n", data->listener.id);
7902 free_private_data = false;
7903 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007904 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007905 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007906 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007907 break;
7908 case QSEECOM_CLIENT_APP:
7909 mutex_lock(&app_access_lock);
7910 ret = qseecom_unload_app(data, true);
7911 mutex_unlock(&app_access_lock);
7912 break;
7913 case QSEECOM_SECURE_SERVICE:
7914 case QSEECOM_GENERIC:
7915 ret = qseecom_unmap_ion_allocated_memory(data);
7916 if (ret)
7917 pr_err("Ion Unmap failed\n");
7918 break;
7919 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7920 break;
7921 default:
7922 pr_err("Unsupported clnt_handle_type %d",
7923 data->type);
7924 break;
7925 }
7926 }
7927
7928 if (qseecom.support_bus_scaling) {
7929 mutex_lock(&qsee_bw_mutex);
7930 if (data->mode != INACTIVE) {
7931 qseecom_unregister_bus_bandwidth_needs(data);
7932 if (qseecom.cumulative_mode == INACTIVE) {
7933 ret = __qseecom_set_msm_bus_request(INACTIVE);
7934 if (ret)
7935 pr_err("Fail to scale down bus\n");
7936 }
7937 }
7938 mutex_unlock(&qsee_bw_mutex);
7939 } else {
7940 if (data->fast_load_enabled == true)
7941 qsee_disable_clock_vote(data, CLK_SFPB);
7942 if (data->perf_enabled == true)
7943 qsee_disable_clock_vote(data, CLK_DFAB);
7944 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007945
Zhen Kongbcdeda22018-11-16 13:50:51 -08007946 if (free_private_data)
7947 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007948 return ret;
7949}
7950
7951#ifdef CONFIG_COMPAT
7952#include "compat_qseecom.c"
7953#else
7954#define compat_qseecom_ioctl NULL
7955#endif
7956
7957static const struct file_operations qseecom_fops = {
7958 .owner = THIS_MODULE,
7959 .unlocked_ioctl = qseecom_ioctl,
7960 .compat_ioctl = compat_qseecom_ioctl,
7961 .open = qseecom_open,
7962 .release = qseecom_release
7963};
7964
7965static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7966{
7967 int rc = 0;
7968 struct device *pdev;
7969 struct qseecom_clk *qclk;
7970 char *core_clk_src = NULL;
7971 char *core_clk = NULL;
7972 char *iface_clk = NULL;
7973 char *bus_clk = NULL;
7974
7975 switch (ce) {
7976 case CLK_QSEE: {
7977 core_clk_src = "core_clk_src";
7978 core_clk = "core_clk";
7979 iface_clk = "iface_clk";
7980 bus_clk = "bus_clk";
7981 qclk = &qseecom.qsee;
7982 qclk->instance = CLK_QSEE;
7983 break;
7984 };
7985 case CLK_CE_DRV: {
7986 core_clk_src = "ce_drv_core_clk_src";
7987 core_clk = "ce_drv_core_clk";
7988 iface_clk = "ce_drv_iface_clk";
7989 bus_clk = "ce_drv_bus_clk";
7990 qclk = &qseecom.ce_drv;
7991 qclk->instance = CLK_CE_DRV;
7992 break;
7993 };
7994 default:
7995 pr_err("Invalid ce hw instance: %d!\n", ce);
7996 return -EIO;
7997 }
7998
7999 if (qseecom.no_clock_support) {
8000 qclk->ce_core_clk = NULL;
8001 qclk->ce_clk = NULL;
8002 qclk->ce_bus_clk = NULL;
8003 qclk->ce_core_src_clk = NULL;
8004 return 0;
8005 }
8006
8007 pdev = qseecom.pdev;
8008
8009 /* Get CE3 src core clk. */
8010 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
8011 if (!IS_ERR(qclk->ce_core_src_clk)) {
8012 rc = clk_set_rate(qclk->ce_core_src_clk,
8013 qseecom.ce_opp_freq_hz);
8014 if (rc) {
8015 clk_put(qclk->ce_core_src_clk);
8016 qclk->ce_core_src_clk = NULL;
8017 pr_err("Unable to set the core src clk @%uMhz.\n",
8018 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
8019 return -EIO;
8020 }
8021 } else {
8022 pr_warn("Unable to get CE core src clk, set to NULL\n");
8023 qclk->ce_core_src_clk = NULL;
8024 }
8025
8026 /* Get CE core clk */
8027 qclk->ce_core_clk = clk_get(pdev, core_clk);
8028 if (IS_ERR(qclk->ce_core_clk)) {
8029 rc = PTR_ERR(qclk->ce_core_clk);
8030 pr_err("Unable to get CE core clk\n");
8031 if (qclk->ce_core_src_clk != NULL)
8032 clk_put(qclk->ce_core_src_clk);
8033 return -EIO;
8034 }
8035
8036 /* Get CE Interface clk */
8037 qclk->ce_clk = clk_get(pdev, iface_clk);
8038 if (IS_ERR(qclk->ce_clk)) {
8039 rc = PTR_ERR(qclk->ce_clk);
8040 pr_err("Unable to get CE interface clk\n");
8041 if (qclk->ce_core_src_clk != NULL)
8042 clk_put(qclk->ce_core_src_clk);
8043 clk_put(qclk->ce_core_clk);
8044 return -EIO;
8045 }
8046
8047 /* Get CE AXI clk */
8048 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8049 if (IS_ERR(qclk->ce_bus_clk)) {
8050 rc = PTR_ERR(qclk->ce_bus_clk);
8051 pr_err("Unable to get CE BUS interface clk\n");
8052 if (qclk->ce_core_src_clk != NULL)
8053 clk_put(qclk->ce_core_src_clk);
8054 clk_put(qclk->ce_core_clk);
8055 clk_put(qclk->ce_clk);
8056 return -EIO;
8057 }
8058
8059 return rc;
8060}
8061
8062static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8063{
8064 struct qseecom_clk *qclk;
8065
8066 if (ce == CLK_QSEE)
8067 qclk = &qseecom.qsee;
8068 else
8069 qclk = &qseecom.ce_drv;
8070
8071 if (qclk->ce_clk != NULL) {
8072 clk_put(qclk->ce_clk);
8073 qclk->ce_clk = NULL;
8074 }
8075 if (qclk->ce_core_clk != NULL) {
8076 clk_put(qclk->ce_core_clk);
8077 qclk->ce_core_clk = NULL;
8078 }
8079 if (qclk->ce_bus_clk != NULL) {
8080 clk_put(qclk->ce_bus_clk);
8081 qclk->ce_bus_clk = NULL;
8082 }
8083 if (qclk->ce_core_src_clk != NULL) {
8084 clk_put(qclk->ce_core_src_clk);
8085 qclk->ce_core_src_clk = NULL;
8086 }
8087 qclk->instance = CLK_INVALID;
8088}
8089
8090static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8091{
8092 int rc = 0;
8093 uint32_t hlos_num_ce_hw_instances;
8094 uint32_t disk_encrypt_pipe;
8095 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008096 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008097 int i;
8098 const int *tbl;
8099 int size;
8100 int entry;
8101 struct qseecom_crypto_info *pfde_tbl = NULL;
8102 struct qseecom_crypto_info *p;
8103 int tbl_size;
8104 int j;
8105 bool old_db = true;
8106 struct qseecom_ce_info_use *pce_info_use;
8107 uint32_t *unit_tbl = NULL;
8108 int total_units = 0;
8109 struct qseecom_ce_pipe_entry *pce_entry;
8110
8111 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8112 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8113
8114 if (of_property_read_u32((&pdev->dev)->of_node,
8115 "qcom,qsee-ce-hw-instance",
8116 &qseecom.ce_info.qsee_ce_hw_instance)) {
8117 pr_err("Fail to get qsee ce hw instance information.\n");
8118 rc = -EINVAL;
8119 goto out;
8120 } else {
8121 pr_debug("qsee-ce-hw-instance=0x%x\n",
8122 qseecom.ce_info.qsee_ce_hw_instance);
8123 }
8124
8125 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8126 "qcom,support-fde");
8127 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8128 "qcom,support-pfe");
8129
8130 if (!qseecom.support_pfe && !qseecom.support_fde) {
8131 pr_warn("Device does not support PFE/FDE");
8132 goto out;
8133 }
8134
8135 if (qseecom.support_fde)
8136 tbl = of_get_property((&pdev->dev)->of_node,
8137 "qcom,full-disk-encrypt-info", &size);
8138 else
8139 tbl = NULL;
8140 if (tbl) {
8141 old_db = false;
8142 if (size % sizeof(struct qseecom_crypto_info)) {
8143 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8144 size);
8145 rc = -EINVAL;
8146 goto out;
8147 }
8148 tbl_size = size / sizeof
8149 (struct qseecom_crypto_info);
8150
8151 pfde_tbl = kzalloc(size, GFP_KERNEL);
8152 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8153 total_units = 0;
8154
8155 if (!pfde_tbl || !unit_tbl) {
8156 pr_err("failed to alloc memory\n");
8157 rc = -ENOMEM;
8158 goto out;
8159 }
8160 if (of_property_read_u32_array((&pdev->dev)->of_node,
8161 "qcom,full-disk-encrypt-info",
8162 (u32 *)pfde_tbl, size/sizeof(u32))) {
8163 pr_err("failed to read full-disk-encrypt-info tbl\n");
8164 rc = -EINVAL;
8165 goto out;
8166 }
8167
8168 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8169 for (j = 0; j < total_units; j++) {
8170 if (p->unit_num == *(unit_tbl + j))
8171 break;
8172 }
8173 if (j == total_units) {
8174 *(unit_tbl + total_units) = p->unit_num;
8175 total_units++;
8176 }
8177 }
8178
8179 qseecom.ce_info.num_fde = total_units;
8180 pce_info_use = qseecom.ce_info.fde = kcalloc(
8181 total_units, sizeof(struct qseecom_ce_info_use),
8182 GFP_KERNEL);
8183 if (!pce_info_use) {
8184 pr_err("failed to alloc memory\n");
8185 rc = -ENOMEM;
8186 goto out;
8187 }
8188
8189 for (j = 0; j < total_units; j++, pce_info_use++) {
8190 pce_info_use->unit_num = *(unit_tbl + j);
8191 pce_info_use->alloc = false;
8192 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8193 pce_info_use->num_ce_pipe_entries = 0;
8194 pce_info_use->ce_pipe_entry = NULL;
8195 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8196 if (p->unit_num == pce_info_use->unit_num)
8197 pce_info_use->num_ce_pipe_entries++;
8198 }
8199
8200 entry = pce_info_use->num_ce_pipe_entries;
8201 pce_entry = pce_info_use->ce_pipe_entry =
8202 kcalloc(entry,
8203 sizeof(struct qseecom_ce_pipe_entry),
8204 GFP_KERNEL);
8205 if (pce_entry == NULL) {
8206 pr_err("failed to alloc memory\n");
8207 rc = -ENOMEM;
8208 goto out;
8209 }
8210
8211 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8212 if (p->unit_num == pce_info_use->unit_num) {
8213 pce_entry->ce_num = p->ce;
8214 pce_entry->ce_pipe_pair =
8215 p->pipe_pair;
8216 pce_entry->valid = true;
8217 pce_entry++;
8218 }
8219 }
8220 }
8221 kfree(unit_tbl);
8222 unit_tbl = NULL;
8223 kfree(pfde_tbl);
8224 pfde_tbl = NULL;
8225 }
8226
8227 if (qseecom.support_pfe)
8228 tbl = of_get_property((&pdev->dev)->of_node,
8229 "qcom,per-file-encrypt-info", &size);
8230 else
8231 tbl = NULL;
8232 if (tbl) {
8233 old_db = false;
8234 if (size % sizeof(struct qseecom_crypto_info)) {
8235 pr_err("per-file-encrypt-info tbl size(%d)\n",
8236 size);
8237 rc = -EINVAL;
8238 goto out;
8239 }
8240 tbl_size = size / sizeof
8241 (struct qseecom_crypto_info);
8242
8243 pfde_tbl = kzalloc(size, GFP_KERNEL);
8244 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8245 total_units = 0;
8246 if (!pfde_tbl || !unit_tbl) {
8247 pr_err("failed to alloc memory\n");
8248 rc = -ENOMEM;
8249 goto out;
8250 }
8251 if (of_property_read_u32_array((&pdev->dev)->of_node,
8252 "qcom,per-file-encrypt-info",
8253 (u32 *)pfde_tbl, size/sizeof(u32))) {
8254 pr_err("failed to read per-file-encrypt-info tbl\n");
8255 rc = -EINVAL;
8256 goto out;
8257 }
8258
8259 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8260 for (j = 0; j < total_units; j++) {
8261 if (p->unit_num == *(unit_tbl + j))
8262 break;
8263 }
8264 if (j == total_units) {
8265 *(unit_tbl + total_units) = p->unit_num;
8266 total_units++;
8267 }
8268 }
8269
8270 qseecom.ce_info.num_pfe = total_units;
8271 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8272 total_units, sizeof(struct qseecom_ce_info_use),
8273 GFP_KERNEL);
8274 if (!pce_info_use) {
8275 pr_err("failed to alloc memory\n");
8276 rc = -ENOMEM;
8277 goto out;
8278 }
8279
8280 for (j = 0; j < total_units; j++, pce_info_use++) {
8281 pce_info_use->unit_num = *(unit_tbl + j);
8282 pce_info_use->alloc = false;
8283 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8284 pce_info_use->num_ce_pipe_entries = 0;
8285 pce_info_use->ce_pipe_entry = NULL;
8286 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8287 if (p->unit_num == pce_info_use->unit_num)
8288 pce_info_use->num_ce_pipe_entries++;
8289 }
8290
8291 entry = pce_info_use->num_ce_pipe_entries;
8292 pce_entry = pce_info_use->ce_pipe_entry =
8293 kcalloc(entry,
8294 sizeof(struct qseecom_ce_pipe_entry),
8295 GFP_KERNEL);
8296 if (pce_entry == NULL) {
8297 pr_err("failed to alloc memory\n");
8298 rc = -ENOMEM;
8299 goto out;
8300 }
8301
8302 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8303 if (p->unit_num == pce_info_use->unit_num) {
8304 pce_entry->ce_num = p->ce;
8305 pce_entry->ce_pipe_pair =
8306 p->pipe_pair;
8307 pce_entry->valid = true;
8308 pce_entry++;
8309 }
8310 }
8311 }
8312 kfree(unit_tbl);
8313 unit_tbl = NULL;
8314 kfree(pfde_tbl);
8315 pfde_tbl = NULL;
8316 }
8317
8318 if (!old_db)
8319 goto out1;
8320
8321 if (of_property_read_bool((&pdev->dev)->of_node,
8322 "qcom,support-multiple-ce-hw-instance")) {
8323 if (of_property_read_u32((&pdev->dev)->of_node,
8324 "qcom,hlos-num-ce-hw-instances",
8325 &hlos_num_ce_hw_instances)) {
8326 pr_err("Fail: get hlos number of ce hw instance\n");
8327 rc = -EINVAL;
8328 goto out;
8329 }
8330 } else {
8331 hlos_num_ce_hw_instances = 1;
8332 }
8333
8334 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8335 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8336 MAX_CE_PIPE_PAIR_PER_UNIT);
8337 rc = -EINVAL;
8338 goto out;
8339 }
8340
8341 if (of_property_read_u32_array((&pdev->dev)->of_node,
8342 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8343 hlos_num_ce_hw_instances)) {
8344 pr_err("Fail: get hlos ce hw instance info\n");
8345 rc = -EINVAL;
8346 goto out;
8347 }
8348
8349 if (qseecom.support_fde) {
8350 pce_info_use = qseecom.ce_info.fde =
8351 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8352 if (!pce_info_use) {
8353 pr_err("failed to alloc memory\n");
8354 rc = -ENOMEM;
8355 goto out;
8356 }
8357 /* by default for old db */
8358 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8359 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8360 pce_info_use->alloc = false;
8361 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8362 pce_info_use->ce_pipe_entry = NULL;
8363 if (of_property_read_u32((&pdev->dev)->of_node,
8364 "qcom,disk-encrypt-pipe-pair",
8365 &disk_encrypt_pipe)) {
8366 pr_err("Fail to get FDE pipe information.\n");
8367 rc = -EINVAL;
8368 goto out;
8369 } else {
8370 pr_debug("disk-encrypt-pipe-pair=0x%x",
8371 disk_encrypt_pipe);
8372 }
8373 entry = pce_info_use->num_ce_pipe_entries =
8374 hlos_num_ce_hw_instances;
8375 pce_entry = pce_info_use->ce_pipe_entry =
8376 kcalloc(entry,
8377 sizeof(struct qseecom_ce_pipe_entry),
8378 GFP_KERNEL);
8379 if (pce_entry == NULL) {
8380 pr_err("failed to alloc memory\n");
8381 rc = -ENOMEM;
8382 goto out;
8383 }
8384 for (i = 0; i < entry; i++) {
8385 pce_entry->ce_num = hlos_ce_hw_instance[i];
8386 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8387 pce_entry->valid = 1;
8388 pce_entry++;
8389 }
8390 } else {
8391 pr_warn("Device does not support FDE");
8392 disk_encrypt_pipe = 0xff;
8393 }
8394 if (qseecom.support_pfe) {
8395 pce_info_use = qseecom.ce_info.pfe =
8396 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8397 if (!pce_info_use) {
8398 pr_err("failed to alloc memory\n");
8399 rc = -ENOMEM;
8400 goto out;
8401 }
8402 /* by default for old db */
8403 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8404 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8405 pce_info_use->alloc = false;
8406 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8407 pce_info_use->ce_pipe_entry = NULL;
8408
8409 if (of_property_read_u32((&pdev->dev)->of_node,
8410 "qcom,file-encrypt-pipe-pair",
8411 &file_encrypt_pipe)) {
8412 pr_err("Fail to get PFE pipe information.\n");
8413 rc = -EINVAL;
8414 goto out;
8415 } else {
8416 pr_debug("file-encrypt-pipe-pair=0x%x",
8417 file_encrypt_pipe);
8418 }
8419 entry = pce_info_use->num_ce_pipe_entries =
8420 hlos_num_ce_hw_instances;
8421 pce_entry = pce_info_use->ce_pipe_entry =
8422 kcalloc(entry,
8423 sizeof(struct qseecom_ce_pipe_entry),
8424 GFP_KERNEL);
8425 if (pce_entry == NULL) {
8426 pr_err("failed to alloc memory\n");
8427 rc = -ENOMEM;
8428 goto out;
8429 }
8430 for (i = 0; i < entry; i++) {
8431 pce_entry->ce_num = hlos_ce_hw_instance[i];
8432 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8433 pce_entry->valid = 1;
8434 pce_entry++;
8435 }
8436 } else {
8437 pr_warn("Device does not support PFE");
8438 file_encrypt_pipe = 0xff;
8439 }
8440
8441out1:
8442 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8443 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8444out:
8445 if (rc) {
8446 if (qseecom.ce_info.fde) {
8447 pce_info_use = qseecom.ce_info.fde;
8448 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8449 pce_entry = pce_info_use->ce_pipe_entry;
8450 kfree(pce_entry);
8451 pce_info_use++;
8452 }
8453 }
8454 kfree(qseecom.ce_info.fde);
8455 qseecom.ce_info.fde = NULL;
8456 if (qseecom.ce_info.pfe) {
8457 pce_info_use = qseecom.ce_info.pfe;
8458 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8459 pce_entry = pce_info_use->ce_pipe_entry;
8460 kfree(pce_entry);
8461 pce_info_use++;
8462 }
8463 }
8464 kfree(qseecom.ce_info.pfe);
8465 qseecom.ce_info.pfe = NULL;
8466 }
8467 kfree(unit_tbl);
8468 kfree(pfde_tbl);
8469 return rc;
8470}
8471
8472static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8473 void __user *argp)
8474{
8475 struct qseecom_ce_info_req req;
8476 struct qseecom_ce_info_req *pinfo = &req;
8477 int ret = 0;
8478 int i;
8479 unsigned int entries;
8480 struct qseecom_ce_info_use *pce_info_use, *p;
8481 int total = 0;
8482 bool found = false;
8483 struct qseecom_ce_pipe_entry *pce_entry;
8484
8485 ret = copy_from_user(pinfo, argp,
8486 sizeof(struct qseecom_ce_info_req));
8487 if (ret) {
8488 pr_err("copy_from_user failed\n");
8489 return ret;
8490 }
8491
8492 switch (pinfo->usage) {
8493 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8494 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8495 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8496 if (qseecom.support_fde) {
8497 p = qseecom.ce_info.fde;
8498 total = qseecom.ce_info.num_fde;
8499 } else {
8500 pr_err("system does not support fde\n");
8501 return -EINVAL;
8502 }
8503 break;
8504 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8505 if (qseecom.support_pfe) {
8506 p = qseecom.ce_info.pfe;
8507 total = qseecom.ce_info.num_pfe;
8508 } else {
8509 pr_err("system does not support pfe\n");
8510 return -EINVAL;
8511 }
8512 break;
8513 default:
8514 pr_err("unsupported usage %d\n", pinfo->usage);
8515 return -EINVAL;
8516 }
8517
8518 pce_info_use = NULL;
8519 for (i = 0; i < total; i++) {
8520 if (!p->alloc)
8521 pce_info_use = p;
8522 else if (!memcmp(p->handle, pinfo->handle,
8523 MAX_CE_INFO_HANDLE_SIZE)) {
8524 pce_info_use = p;
8525 found = true;
8526 break;
8527 }
8528 p++;
8529 }
8530
8531 if (pce_info_use == NULL)
8532 return -EBUSY;
8533
8534 pinfo->unit_num = pce_info_use->unit_num;
8535 if (!pce_info_use->alloc) {
8536 pce_info_use->alloc = true;
8537 memcpy(pce_info_use->handle,
8538 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8539 }
8540 if (pce_info_use->num_ce_pipe_entries >
8541 MAX_CE_PIPE_PAIR_PER_UNIT)
8542 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8543 else
8544 entries = pce_info_use->num_ce_pipe_entries;
8545 pinfo->num_ce_pipe_entries = entries;
8546 pce_entry = pce_info_use->ce_pipe_entry;
8547 for (i = 0; i < entries; i++, pce_entry++)
8548 pinfo->ce_pipe_entry[i] = *pce_entry;
8549 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8550 pinfo->ce_pipe_entry[i].valid = 0;
8551
8552 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8553 pr_err("copy_to_user failed\n");
8554 ret = -EFAULT;
8555 }
8556 return ret;
8557}
8558
8559static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8560 void __user *argp)
8561{
8562 struct qseecom_ce_info_req req;
8563 struct qseecom_ce_info_req *pinfo = &req;
8564 int ret = 0;
8565 struct qseecom_ce_info_use *p;
8566 int total = 0;
8567 int i;
8568 bool found = false;
8569
8570 ret = copy_from_user(pinfo, argp,
8571 sizeof(struct qseecom_ce_info_req));
8572 if (ret)
8573 return ret;
8574
8575 switch (pinfo->usage) {
8576 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8577 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8578 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8579 if (qseecom.support_fde) {
8580 p = qseecom.ce_info.fde;
8581 total = qseecom.ce_info.num_fde;
8582 } else {
8583 pr_err("system does not support fde\n");
8584 return -EINVAL;
8585 }
8586 break;
8587 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8588 if (qseecom.support_pfe) {
8589 p = qseecom.ce_info.pfe;
8590 total = qseecom.ce_info.num_pfe;
8591 } else {
8592 pr_err("system does not support pfe\n");
8593 return -EINVAL;
8594 }
8595 break;
8596 default:
8597 pr_err("unsupported usage %d\n", pinfo->usage);
8598 return -EINVAL;
8599 }
8600
8601 for (i = 0; i < total; i++) {
8602 if (p->alloc &&
8603 !memcmp(p->handle, pinfo->handle,
8604 MAX_CE_INFO_HANDLE_SIZE)) {
8605 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8606 p->alloc = false;
8607 found = true;
8608 break;
8609 }
8610 p++;
8611 }
8612 return ret;
8613}
8614
8615static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8616 void __user *argp)
8617{
8618 struct qseecom_ce_info_req req;
8619 struct qseecom_ce_info_req *pinfo = &req;
8620 int ret = 0;
8621 int i;
8622 unsigned int entries;
8623 struct qseecom_ce_info_use *pce_info_use, *p;
8624 int total = 0;
8625 bool found = false;
8626 struct qseecom_ce_pipe_entry *pce_entry;
8627
8628 ret = copy_from_user(pinfo, argp,
8629 sizeof(struct qseecom_ce_info_req));
8630 if (ret)
8631 return ret;
8632
8633 switch (pinfo->usage) {
8634 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8635 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8636 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8637 if (qseecom.support_fde) {
8638 p = qseecom.ce_info.fde;
8639 total = qseecom.ce_info.num_fde;
8640 } else {
8641 pr_err("system does not support fde\n");
8642 return -EINVAL;
8643 }
8644 break;
8645 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8646 if (qseecom.support_pfe) {
8647 p = qseecom.ce_info.pfe;
8648 total = qseecom.ce_info.num_pfe;
8649 } else {
8650 pr_err("system does not support pfe\n");
8651 return -EINVAL;
8652 }
8653 break;
8654 default:
8655 pr_err("unsupported usage %d\n", pinfo->usage);
8656 return -EINVAL;
8657 }
8658
8659 pce_info_use = NULL;
8660 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8661 pinfo->num_ce_pipe_entries = 0;
8662 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8663 pinfo->ce_pipe_entry[i].valid = 0;
8664
8665 for (i = 0; i < total; i++) {
8666
8667 if (p->alloc && !memcmp(p->handle,
8668 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8669 pce_info_use = p;
8670 found = true;
8671 break;
8672 }
8673 p++;
8674 }
8675 if (!pce_info_use)
8676 goto out;
8677 pinfo->unit_num = pce_info_use->unit_num;
8678 if (pce_info_use->num_ce_pipe_entries >
8679 MAX_CE_PIPE_PAIR_PER_UNIT)
8680 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8681 else
8682 entries = pce_info_use->num_ce_pipe_entries;
8683 pinfo->num_ce_pipe_entries = entries;
8684 pce_entry = pce_info_use->ce_pipe_entry;
8685 for (i = 0; i < entries; i++, pce_entry++)
8686 pinfo->ce_pipe_entry[i] = *pce_entry;
8687 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8688 pinfo->ce_pipe_entry[i].valid = 0;
8689out:
8690 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8691 pr_err("copy_to_user failed\n");
8692 ret = -EFAULT;
8693 }
8694 return ret;
8695}
8696
8697/*
8698 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8699 * then whitelist feature is not supported.
8700 */
8701static int qseecom_check_whitelist_feature(void)
8702{
8703 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8704
8705 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8706}
8707
8708static int qseecom_probe(struct platform_device *pdev)
8709{
8710 int rc;
8711 int i;
8712 uint32_t feature = 10;
8713 struct device *class_dev;
8714 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8715 struct qseecom_command_scm_resp resp;
8716 struct qseecom_ce_info_use *pce_info_use = NULL;
8717
8718 qseecom.qsee_bw_count = 0;
8719 qseecom.qsee_perf_client = 0;
8720 qseecom.qsee_sfpb_bw_count = 0;
8721
8722 qseecom.qsee.ce_core_clk = NULL;
8723 qseecom.qsee.ce_clk = NULL;
8724 qseecom.qsee.ce_core_src_clk = NULL;
8725 qseecom.qsee.ce_bus_clk = NULL;
8726
8727 qseecom.cumulative_mode = 0;
8728 qseecom.current_mode = INACTIVE;
8729 qseecom.support_bus_scaling = false;
8730 qseecom.support_fde = false;
8731 qseecom.support_pfe = false;
8732
8733 qseecom.ce_drv.ce_core_clk = NULL;
8734 qseecom.ce_drv.ce_clk = NULL;
8735 qseecom.ce_drv.ce_core_src_clk = NULL;
8736 qseecom.ce_drv.ce_bus_clk = NULL;
8737 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8738
8739 qseecom.app_block_ref_cnt = 0;
8740 init_waitqueue_head(&qseecom.app_block_wq);
8741 qseecom.whitelist_support = true;
8742
8743 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8744 if (rc < 0) {
8745 pr_err("alloc_chrdev_region failed %d\n", rc);
8746 return rc;
8747 }
8748
8749 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8750 if (IS_ERR(driver_class)) {
8751 rc = -ENOMEM;
8752 pr_err("class_create failed %d\n", rc);
8753 goto exit_unreg_chrdev_region;
8754 }
8755
8756 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8757 QSEECOM_DEV);
8758 if (IS_ERR(class_dev)) {
8759 pr_err("class_device_create failed %d\n", rc);
8760 rc = -ENOMEM;
8761 goto exit_destroy_class;
8762 }
8763
8764 cdev_init(&qseecom.cdev, &qseecom_fops);
8765 qseecom.cdev.owner = THIS_MODULE;
8766
8767 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8768 if (rc < 0) {
8769 pr_err("cdev_add failed %d\n", rc);
8770 goto exit_destroy_device;
8771 }
8772
8773 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008774 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8775 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008776 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008777 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8778 spin_lock_init(&qseecom.registered_kclient_list_lock);
8779 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008780 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008781 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008782 qseecom.send_resp_flag = 0;
8783
8784 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008785 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008786 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8787 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008788 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008789 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8790 if (rc) {
8791 pr_err("Failed to get QSEE version info %d\n", rc);
8792 goto exit_del_cdev;
8793 }
8794 qseecom.qsee_version = resp.result;
8795 qseecom.qseos_version = QSEOS_VERSION_14;
8796 qseecom.commonlib_loaded = false;
8797 qseecom.commonlib64_loaded = false;
8798 qseecom.pdev = class_dev;
8799 /* Create ION msm client */
8800 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8801 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8802 pr_err("Ion client cannot be created\n");
8803 rc = -ENOMEM;
8804 goto exit_del_cdev;
8805 }
8806
8807 /* register client for bus scaling */
8808 if (pdev->dev.of_node) {
8809 qseecom.pdev->of_node = pdev->dev.of_node;
8810 qseecom.support_bus_scaling =
8811 of_property_read_bool((&pdev->dev)->of_node,
8812 "qcom,support-bus-scaling");
8813 rc = qseecom_retrieve_ce_data(pdev);
8814 if (rc)
8815 goto exit_destroy_ion_client;
8816 qseecom.appsbl_qseecom_support =
8817 of_property_read_bool((&pdev->dev)->of_node,
8818 "qcom,appsbl-qseecom-support");
8819 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8820 qseecom.appsbl_qseecom_support);
8821
8822 qseecom.commonlib64_loaded =
8823 of_property_read_bool((&pdev->dev)->of_node,
8824 "qcom,commonlib64-loaded-by-uefi");
8825 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8826 qseecom.commonlib64_loaded);
8827 qseecom.fde_key_size =
8828 of_property_read_bool((&pdev->dev)->of_node,
8829 "qcom,fde-key-size");
8830 qseecom.no_clock_support =
8831 of_property_read_bool((&pdev->dev)->of_node,
8832 "qcom,no-clock-support");
8833 if (!qseecom.no_clock_support) {
8834 pr_info("qseecom clocks handled by other subsystem\n");
8835 } else {
8836 pr_info("no-clock-support=0x%x",
8837 qseecom.no_clock_support);
8838 }
8839
8840 if (of_property_read_u32((&pdev->dev)->of_node,
8841 "qcom,qsee-reentrancy-support",
8842 &qseecom.qsee_reentrancy_support)) {
8843 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8844 qseecom.qsee_reentrancy_support = 0;
8845 } else {
8846 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8847 qseecom.qsee_reentrancy_support);
8848 }
8849
Jiten Patela7bb1d52018-05-11 12:34:26 +05308850 qseecom.enable_key_wrap_in_ks =
8851 of_property_read_bool((&pdev->dev)->of_node,
8852 "qcom,enable-key-wrap-in-ks");
8853 if (qseecom.enable_key_wrap_in_ks) {
8854 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8855 qseecom.enable_key_wrap_in_ks);
8856 }
8857
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008858 /*
8859 * The qseecom bus scaling flag can not be enabled when
8860 * crypto clock is not handled by HLOS.
8861 */
8862 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8863 pr_err("support_bus_scaling flag can not be enabled.\n");
8864 rc = -EINVAL;
8865 goto exit_destroy_ion_client;
8866 }
8867
8868 if (of_property_read_u32((&pdev->dev)->of_node,
8869 "qcom,ce-opp-freq",
8870 &qseecom.ce_opp_freq_hz)) {
8871 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8872 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8873 }
8874 rc = __qseecom_init_clk(CLK_QSEE);
8875 if (rc)
8876 goto exit_destroy_ion_client;
8877
8878 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8879 (qseecom.support_pfe || qseecom.support_fde)) {
8880 rc = __qseecom_init_clk(CLK_CE_DRV);
8881 if (rc) {
8882 __qseecom_deinit_clk(CLK_QSEE);
8883 goto exit_destroy_ion_client;
8884 }
8885 } else {
8886 struct qseecom_clk *qclk;
8887
8888 qclk = &qseecom.qsee;
8889 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8890 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8891 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8892 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8893 }
8894
8895 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8896 msm_bus_cl_get_pdata(pdev);
8897 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8898 (!qseecom.is_apps_region_protected &&
8899 !qseecom.appsbl_qseecom_support)) {
8900 struct resource *resource = NULL;
8901 struct qsee_apps_region_info_ireq req;
8902 struct qsee_apps_region_info_64bit_ireq req_64bit;
8903 struct qseecom_command_scm_resp resp;
8904 void *cmd_buf = NULL;
8905 size_t cmd_len;
8906
8907 resource = platform_get_resource_byname(pdev,
8908 IORESOURCE_MEM, "secapp-region");
8909 if (resource) {
8910 if (qseecom.qsee_version < QSEE_VERSION_40) {
8911 req.qsee_cmd_id =
8912 QSEOS_APP_REGION_NOTIFICATION;
8913 req.addr = (uint32_t)resource->start;
8914 req.size = resource_size(resource);
8915 cmd_buf = (void *)&req;
8916 cmd_len = sizeof(struct
8917 qsee_apps_region_info_ireq);
8918 pr_warn("secure app region addr=0x%x size=0x%x",
8919 req.addr, req.size);
8920 } else {
8921 req_64bit.qsee_cmd_id =
8922 QSEOS_APP_REGION_NOTIFICATION;
8923 req_64bit.addr = resource->start;
8924 req_64bit.size = resource_size(
8925 resource);
8926 cmd_buf = (void *)&req_64bit;
8927 cmd_len = sizeof(struct
8928 qsee_apps_region_info_64bit_ireq);
8929 pr_warn("secure app region addr=0x%llx size=0x%x",
8930 req_64bit.addr, req_64bit.size);
8931 }
8932 } else {
8933 pr_err("Fail to get secure app region info\n");
8934 rc = -EINVAL;
8935 goto exit_deinit_clock;
8936 }
8937 rc = __qseecom_enable_clk(CLK_QSEE);
8938 if (rc) {
8939 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8940 rc = -EIO;
8941 goto exit_deinit_clock;
8942 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008943 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008944 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8945 cmd_buf, cmd_len,
8946 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008947 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008948 __qseecom_disable_clk(CLK_QSEE);
8949 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8950 pr_err("send secapp reg fail %d resp.res %d\n",
8951 rc, resp.result);
8952 rc = -EINVAL;
8953 goto exit_deinit_clock;
8954 }
8955 }
8956 /*
8957 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8958 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8959 * Pls add "qseecom.commonlib64_loaded = true" here too.
8960 */
8961 if (qseecom.is_apps_region_protected ||
8962 qseecom.appsbl_qseecom_support)
8963 qseecom.commonlib_loaded = true;
8964 } else {
8965 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8966 pdev->dev.platform_data;
8967 }
8968 if (qseecom.support_bus_scaling) {
8969 init_timer(&(qseecom.bw_scale_down_timer));
8970 INIT_WORK(&qseecom.bw_inactive_req_ws,
8971 qseecom_bw_inactive_req_work);
8972 qseecom.bw_scale_down_timer.function =
8973 qseecom_scale_bus_bandwidth_timer_callback;
8974 }
8975 qseecom.timer_running = false;
8976 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8977 qseecom_platform_support);
8978
8979 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8980 pr_warn("qseecom.whitelist_support = %d\n",
8981 qseecom.whitelist_support);
8982
8983 if (!qseecom.qsee_perf_client)
8984 pr_err("Unable to register bus client\n");
8985
Zhen Kongc4c162a2019-01-23 12:07:12 -08008986 /*create a kthread to process pending listener unregister task */
8987 qseecom.unregister_lsnr_kthread_task = kthread_run(
8988 __qseecom_unregister_listener_kthread_func,
8989 NULL, "qseecom-unreg-lsnr");
8990 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8991 pr_err("failed to create kthread to unregister listener\n");
8992 rc = -EINVAL;
8993 goto exit_deinit_clock;
8994 }
8995 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8996 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008997 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8998 return 0;
8999
9000exit_deinit_clock:
9001 __qseecom_deinit_clk(CLK_QSEE);
9002 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9003 (qseecom.support_pfe || qseecom.support_fde))
9004 __qseecom_deinit_clk(CLK_CE_DRV);
9005exit_destroy_ion_client:
9006 if (qseecom.ce_info.fde) {
9007 pce_info_use = qseecom.ce_info.fde;
9008 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9009 kzfree(pce_info_use->ce_pipe_entry);
9010 pce_info_use++;
9011 }
9012 kfree(qseecom.ce_info.fde);
9013 }
9014 if (qseecom.ce_info.pfe) {
9015 pce_info_use = qseecom.ce_info.pfe;
9016 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9017 kzfree(pce_info_use->ce_pipe_entry);
9018 pce_info_use++;
9019 }
9020 kfree(qseecom.ce_info.pfe);
9021 }
9022 ion_client_destroy(qseecom.ion_clnt);
9023exit_del_cdev:
9024 cdev_del(&qseecom.cdev);
9025exit_destroy_device:
9026 device_destroy(driver_class, qseecom_device_no);
9027exit_destroy_class:
9028 class_destroy(driver_class);
9029exit_unreg_chrdev_region:
9030 unregister_chrdev_region(qseecom_device_no, 1);
9031 return rc;
9032}
9033
9034static int qseecom_remove(struct platform_device *pdev)
9035{
9036 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309037 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009038 unsigned long flags = 0;
9039 int ret = 0;
9040 int i;
9041 struct qseecom_ce_pipe_entry *pce_entry;
9042 struct qseecom_ce_info_use *pce_info_use;
9043
9044 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9045 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9046
Monika Singhe711b162018-04-24 09:54:50 +05309047 list_for_each_entry_safe(kclient, kclient_tmp,
9048 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009049
9050 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009051 if (!kclient->handle) {
9052 list_del(&kclient->list);
9053 kzfree(kclient);
9054 break;
9055 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009056
9057 list_del(&kclient->list);
9058 mutex_lock(&app_access_lock);
9059 ret = qseecom_unload_app(kclient->handle->dev, false);
9060 mutex_unlock(&app_access_lock);
9061 if (!ret) {
9062 kzfree(kclient->handle->dev);
9063 kzfree(kclient->handle);
9064 kzfree(kclient);
9065 }
9066 }
9067
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009068 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9069
9070 if (qseecom.qseos_version > QSEEE_VERSION_00)
9071 qseecom_unload_commonlib_image();
9072
9073 if (qseecom.qsee_perf_client)
9074 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9075 0);
9076 if (pdev->dev.platform_data != NULL)
9077 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9078
9079 if (qseecom.support_bus_scaling) {
9080 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9081 del_timer_sync(&qseecom.bw_scale_down_timer);
9082 }
9083
9084 if (qseecom.ce_info.fde) {
9085 pce_info_use = qseecom.ce_info.fde;
9086 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9087 pce_entry = pce_info_use->ce_pipe_entry;
9088 kfree(pce_entry);
9089 pce_info_use++;
9090 }
9091 }
9092 kfree(qseecom.ce_info.fde);
9093 if (qseecom.ce_info.pfe) {
9094 pce_info_use = qseecom.ce_info.pfe;
9095 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9096 pce_entry = pce_info_use->ce_pipe_entry;
9097 kfree(pce_entry);
9098 pce_info_use++;
9099 }
9100 }
9101 kfree(qseecom.ce_info.pfe);
9102
9103 /* register client for bus scaling */
9104 if (pdev->dev.of_node) {
9105 __qseecom_deinit_clk(CLK_QSEE);
9106 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9107 (qseecom.support_pfe || qseecom.support_fde))
9108 __qseecom_deinit_clk(CLK_CE_DRV);
9109 }
9110
9111 ion_client_destroy(qseecom.ion_clnt);
9112
Zhen Kongc4c162a2019-01-23 12:07:12 -08009113 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9114
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009115 cdev_del(&qseecom.cdev);
9116
9117 device_destroy(driver_class, qseecom_device_no);
9118
9119 class_destroy(driver_class);
9120
9121 unregister_chrdev_region(qseecom_device_no, 1);
9122
9123 return ret;
9124}
9125
9126static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9127{
9128 int ret = 0;
9129 struct qseecom_clk *qclk;
9130
9131 qclk = &qseecom.qsee;
9132 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9133 if (qseecom.no_clock_support)
9134 return 0;
9135
9136 mutex_lock(&qsee_bw_mutex);
9137 mutex_lock(&clk_access_lock);
9138
9139 if (qseecom.current_mode != INACTIVE) {
9140 ret = msm_bus_scale_client_update_request(
9141 qseecom.qsee_perf_client, INACTIVE);
9142 if (ret)
9143 pr_err("Fail to scale down bus\n");
9144 else
9145 qseecom.current_mode = INACTIVE;
9146 }
9147
9148 if (qclk->clk_access_cnt) {
9149 if (qclk->ce_clk != NULL)
9150 clk_disable_unprepare(qclk->ce_clk);
9151 if (qclk->ce_core_clk != NULL)
9152 clk_disable_unprepare(qclk->ce_core_clk);
9153 if (qclk->ce_bus_clk != NULL)
9154 clk_disable_unprepare(qclk->ce_bus_clk);
9155 }
9156
9157 del_timer_sync(&(qseecom.bw_scale_down_timer));
9158 qseecom.timer_running = false;
9159
9160 mutex_unlock(&clk_access_lock);
9161 mutex_unlock(&qsee_bw_mutex);
9162 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9163
9164 return 0;
9165}
9166
9167static int qseecom_resume(struct platform_device *pdev)
9168{
9169 int mode = 0;
9170 int ret = 0;
9171 struct qseecom_clk *qclk;
9172
9173 qclk = &qseecom.qsee;
9174 if (qseecom.no_clock_support)
9175 goto exit;
9176
9177 mutex_lock(&qsee_bw_mutex);
9178 mutex_lock(&clk_access_lock);
9179 if (qseecom.cumulative_mode >= HIGH)
9180 mode = HIGH;
9181 else
9182 mode = qseecom.cumulative_mode;
9183
9184 if (qseecom.cumulative_mode != INACTIVE) {
9185 ret = msm_bus_scale_client_update_request(
9186 qseecom.qsee_perf_client, mode);
9187 if (ret)
9188 pr_err("Fail to scale up bus to %d\n", mode);
9189 else
9190 qseecom.current_mode = mode;
9191 }
9192
9193 if (qclk->clk_access_cnt) {
9194 if (qclk->ce_core_clk != NULL) {
9195 ret = clk_prepare_enable(qclk->ce_core_clk);
9196 if (ret) {
9197 pr_err("Unable to enable/prep CE core clk\n");
9198 qclk->clk_access_cnt = 0;
9199 goto err;
9200 }
9201 }
9202 if (qclk->ce_clk != NULL) {
9203 ret = clk_prepare_enable(qclk->ce_clk);
9204 if (ret) {
9205 pr_err("Unable to enable/prep CE iface clk\n");
9206 qclk->clk_access_cnt = 0;
9207 goto ce_clk_err;
9208 }
9209 }
9210 if (qclk->ce_bus_clk != NULL) {
9211 ret = clk_prepare_enable(qclk->ce_bus_clk);
9212 if (ret) {
9213 pr_err("Unable to enable/prep CE bus clk\n");
9214 qclk->clk_access_cnt = 0;
9215 goto ce_bus_clk_err;
9216 }
9217 }
9218 }
9219
9220 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9221 qseecom.bw_scale_down_timer.expires = jiffies +
9222 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9223 mod_timer(&(qseecom.bw_scale_down_timer),
9224 qseecom.bw_scale_down_timer.expires);
9225 qseecom.timer_running = true;
9226 }
9227
9228 mutex_unlock(&clk_access_lock);
9229 mutex_unlock(&qsee_bw_mutex);
9230 goto exit;
9231
9232ce_bus_clk_err:
9233 if (qclk->ce_clk)
9234 clk_disable_unprepare(qclk->ce_clk);
9235ce_clk_err:
9236 if (qclk->ce_core_clk)
9237 clk_disable_unprepare(qclk->ce_core_clk);
9238err:
9239 mutex_unlock(&clk_access_lock);
9240 mutex_unlock(&qsee_bw_mutex);
9241 ret = -EIO;
9242exit:
9243 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9244 return ret;
9245}
9246
9247static const struct of_device_id qseecom_match[] = {
9248 {
9249 .compatible = "qcom,qseecom",
9250 },
9251 {}
9252};
9253
9254static struct platform_driver qseecom_plat_driver = {
9255 .probe = qseecom_probe,
9256 .remove = qseecom_remove,
9257 .suspend = qseecom_suspend,
9258 .resume = qseecom_resume,
9259 .driver = {
9260 .name = "qseecom",
9261 .owner = THIS_MODULE,
9262 .of_match_table = qseecom_match,
9263 },
9264};
9265
9266static int qseecom_init(void)
9267{
9268 return platform_driver_register(&qseecom_plat_driver);
9269}
9270
9271static void qseecom_exit(void)
9272{
9273 platform_driver_unregister(&qseecom_plat_driver);
9274}
9275
9276MODULE_LICENSE("GPL v2");
9277MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9278
9279module_init(qseecom_init);
9280module_exit(qseecom_exit);